diff options
| author | Junio C Hamano <gitster@pobox.com> | 2017-10-03 15:42:50 +0900 |
|---|---|---|
| committer | Junio C Hamano <gitster@pobox.com> | 2017-10-03 15:42:50 +0900 |
| commit | 1a2e1a76ec2cbbafe60ffd124f673f62045fb0d3 (patch) | |
| tree | 91595ea0e1c159a7422b484c698e258de1579d72 /refs/ref-cache.c | |
| parent | Merge branch 'mr/doc-negative-pathspec' (diff) | |
| parent | packed-backend.c: rename a bunch of things and update comments (diff) | |
| download | git-1a2e1a76ec2cbbafe60ffd124f673f62045fb0d3.tar.gz git-1a2e1a76ec2cbbafe60ffd124f673f62045fb0d3.zip | |
Merge branch 'mh/mmap-packed-refs'
Operations that do not touch (majority of) packed refs have been
optimized by making accesses to packed-refs file lazy; we no longer
pre-parse everything, and an access to a single ref in the
packed-refs does not touch majority of irrelevant refs, either.
* mh/mmap-packed-refs: (21 commits)
packed-backend.c: rename a bunch of things and update comments
mmapped_ref_iterator: inline into `packed_ref_iterator`
ref_cache: remove support for storing peeled values
packed_ref_store: get rid of the `ref_cache` entirely
ref_store: implement `refs_peel_ref()` generically
packed_read_raw_ref(): read the reference from the mmapped buffer
packed_ref_iterator_begin(): iterate using `mmapped_ref_iterator`
read_packed_refs(): ensure that references are ordered when read
packed_ref_cache: keep the `packed-refs` file mmapped if possible
packed-backend.c: reorder some definitions
mmapped_ref_iterator_advance(): no peeled value for broken refs
mmapped_ref_iterator: add iterator over a packed-refs file
packed_ref_cache: remember the file-wide peeling state
read_packed_refs(): read references with minimal copying
read_packed_refs(): make parsing of the header line more robust
read_packed_refs(): only check for a header at the top of the file
read_packed_refs(): use mmap to read the `packed-refs` file
die_unterminated_line(), die_invalid_line(): new functions
packed_ref_cache: add a backlink to the associated `packed_ref_store`
prefix_ref_iterator: break when we leave the prefix
...
Diffstat (limited to 'refs/ref-cache.c')
| -rw-r--r-- | refs/ref-cache.c | 44 |
1 files changed, 2 insertions, 42 deletions
diff --git a/refs/ref-cache.c b/refs/ref-cache.c index 76bb723c86..4f850e1b5c 100644 --- a/refs/ref-cache.c +++ b/refs/ref-cache.c @@ -38,7 +38,6 @@ struct ref_entry *create_ref_entry(const char *refname, FLEX_ALLOC_STR(ref, name, refname); oidcpy(&ref->u.value.oid, oid); - oidclr(&ref->u.value.peeled); ref->flag = flag; return ref; } @@ -491,49 +490,10 @@ static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator) } } -enum peel_status peel_entry(struct ref_entry *entry, int repeel) -{ - enum peel_status status; - - if (entry->flag & REF_KNOWS_PEELED) { - if (repeel) { - entry->flag &= ~REF_KNOWS_PEELED; - oidclr(&entry->u.value.peeled); - } else { - return is_null_oid(&entry->u.value.peeled) ? - PEEL_NON_TAG : PEEL_PEELED; - } - } - if (entry->flag & REF_ISBROKEN) - return PEEL_BROKEN; - if (entry->flag & REF_ISSYMREF) - return PEEL_IS_SYMREF; - - status = peel_object(entry->u.value.oid.hash, entry->u.value.peeled.hash); - if (status == PEEL_PEELED || status == PEEL_NON_TAG) - entry->flag |= REF_KNOWS_PEELED; - return status; -} - static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator, struct object_id *peeled) { - struct cache_ref_iterator *iter = - (struct cache_ref_iterator *)ref_iterator; - struct cache_ref_iterator_level *level; - struct ref_entry *entry; - - level = &iter->levels[iter->levels_nr - 1]; - - if (level->index == -1) - die("BUG: peel called before advance for cache iterator"); - - entry = level->dir->entries[level->index]; - - if (peel_entry(entry, 0)) - return -1; - oidcpy(peeled, &entry->u.value.peeled); - return 0; + return peel_object(ref_iterator->oid->hash, peeled->hash); } static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator) @@ -574,7 +534,7 @@ struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache, iter = xcalloc(1, sizeof(*iter)); ref_iterator = &iter->base; - base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable); + base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable, 1); ALLOC_GROW(iter->levels, 10, iter->levels_alloc); iter->levels_nr = 1; |
