aboutsummaryrefslogtreecommitdiffstats
path: root/refs/packed-backend.c
diff options
context:
space:
mode:
Diffstat (limited to 'refs/packed-backend.c')
-rw-r--r--refs/packed-backend.c810
1 files changed, 667 insertions, 143 deletions
diff --git a/refs/packed-backend.c b/refs/packed-backend.c
index 27bd6339ff..b4289a7d9c 100644
--- a/refs/packed-backend.c
+++ b/refs/packed-backend.c
@@ -1,6 +1,10 @@
+#define USE_THE_REPOSITORY_VARIABLE
+#define DISABLE_SIGN_COMPARE_WARNINGS
+
#include "../git-compat-util.h"
-#include "../alloc.h"
#include "../config.h"
+#include "../dir.h"
+#include "../fsck.h"
#include "../gettext.h"
#include "../hash.h"
#include "../hex.h"
@@ -11,8 +15,10 @@
#include "../lockfile.h"
#include "../chdir-notify.h"
#include "../statinfo.h"
+#include "../worktree.h"
#include "../wrapper.h"
#include "../write-or-die.h"
+#include "../trace2.h"
enum mmap_strategy {
/*
@@ -200,9 +206,14 @@ static int release_snapshot(struct snapshot *snapshot)
}
}
-struct ref_store *packed_ref_store_create(struct repository *repo,
- const char *gitdir,
- unsigned int store_flags)
+static size_t snapshot_hexsz(const struct snapshot *snapshot)
+{
+ return snapshot->refs->base.repo->hash_algo->hexsz;
+}
+
+struct ref_store *packed_ref_store_init(struct repository *repo,
+ const char *gitdir,
+ unsigned int store_flags)
{
struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
struct ref_store *ref_store = (struct ref_store *)refs;
@@ -252,6 +263,15 @@ static void clear_snapshot(struct packed_ref_store *refs)
}
}
+static void packed_ref_store_release(struct ref_store *ref_store)
+{
+ struct packed_ref_store *refs = packed_downcast(ref_store, 0, "release");
+ clear_snapshot(refs);
+ rollback_lock_file(&refs->lock);
+ delete_tempfile(&refs->tempfile);
+ free(refs->path);
+}
+
static NORETURN void die_unterminated_line(const char *path,
const char *p, size_t len)
{
@@ -280,12 +300,9 @@ struct snapshot_record {
size_t len;
};
-static int cmp_packed_ref_records(const void *v1, const void *v2)
-{
- const struct snapshot_record *e1 = v1, *e2 = v2;
- const char *r1 = e1->start + the_hash_algo->hexsz + 1;
- const char *r2 = e2->start + the_hash_algo->hexsz + 1;
+static int cmp_packed_refname(const char *r1, const char *r2)
+{
while (1) {
if (*r1 == '\n')
return *r2 == '\n' ? 0 : -1;
@@ -300,20 +317,32 @@ static int cmp_packed_ref_records(const void *v1, const void *v2)
}
}
+static int cmp_packed_ref_records(const void *v1, const void *v2,
+ void *cb_data)
+{
+ const struct snapshot *snapshot = cb_data;
+ const struct snapshot_record *e1 = v1, *e2 = v2;
+ const char *r1 = e1->start + snapshot_hexsz(snapshot) + 1;
+ const char *r2 = e2->start + snapshot_hexsz(snapshot) + 1;
+
+ return cmp_packed_refname(r1, r2);
+}
+
/*
* Compare a snapshot record at `rec` to the specified NUL-terminated
* refname.
*/
-static int cmp_record_to_refname(const char *rec, const char *refname)
+static int cmp_record_to_refname(const char *rec, const char *refname,
+ int start, const struct snapshot *snapshot)
{
- const char *r1 = rec + the_hash_algo->hexsz + 1;
+ const char *r1 = rec + snapshot_hexsz(snapshot) + 1;
const char *r2 = refname;
while (1) {
if (*r1 == '\n')
return *r2 ? -1 : 0;
if (!*r2)
- return 1;
+ return start ? 1 : -1;
if (*r1 != *r2)
return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
r1++;
@@ -353,7 +382,7 @@ static void sort_snapshot(struct snapshot *snapshot)
if (!eol)
/* The safety check should prevent this. */
BUG("unterminated line found in packed-refs");
- if (eol - pos < the_hash_algo->hexsz + 2)
+ if (eol - pos < snapshot_hexsz(snapshot) + 2)
die_invalid_line(snapshot->refs->path,
pos, eof - pos);
eol++;
@@ -379,7 +408,7 @@ static void sort_snapshot(struct snapshot *snapshot)
if (sorted &&
nr > 1 &&
cmp_packed_ref_records(&records[nr - 2],
- &records[nr - 1]) >= 0)
+ &records[nr - 1], snapshot) >= 0)
sorted = 0;
pos = eol;
@@ -389,7 +418,7 @@ static void sort_snapshot(struct snapshot *snapshot)
goto cleanup;
/* We need to sort the memory. First we sort the records array: */
- QSORT(records, nr, cmp_packed_ref_records);
+ QSORT_S(records, nr, cmp_packed_ref_records, snapshot);
/*
* Allocate a new chunk of memory, and copy the old memory to
@@ -465,11 +494,27 @@ static void verify_buffer_safe(struct snapshot *snapshot)
return;
last_line = find_start_of_record(start, eof - 1);
- if (*(eof - 1) != '\n' || eof - last_line < the_hash_algo->hexsz + 2)
+ if (*(eof - 1) != '\n' ||
+ eof - last_line < snapshot_hexsz(snapshot) + 2)
die_invalid_line(snapshot->refs->path,
last_line, eof - last_line);
}
+/*
+ * When parsing the "packed-refs" file, we will parse it line by line.
+ * Because we know the start pointer of the refname and the next
+ * newline pointer, we could calculate the length of the refname by
+ * subtracting the two pointers. However, there is a corner case where
+ * the refname contains corrupted embedded NUL characters. And
+ * `check_refname_format()` will not catch this when the truncated
+ * refname is still a valid refname. To prevent this, we need to check
+ * whether the refname contains the NUL characters.
+ */
+static int refname_contains_nul(struct strbuf *refname)
+{
+ return !!memchr(refname->buf, '\0', refname->len);
+}
+
#define SMALL_FILE_SIZE (32*1024)
/*
@@ -528,22 +573,9 @@ static int load_contents(struct snapshot *snapshot)
return 1;
}
-/*
- * Find the place in `snapshot->buf` where the start of the record for
- * `refname` starts. If `mustexist` is true and the reference doesn't
- * exist, then return NULL. If `mustexist` is false and the reference
- * doesn't exist, then return the point where that reference would be
- * inserted, or `snapshot->eof` (which might be NULL) if it would be
- * inserted at the end of the file. In the latter mode, `refname`
- * doesn't have to be a proper reference name; for example, one could
- * search for "refs/replace/" to find the start of any replace
- * references.
- *
- * The record is sought using a binary search, so `snapshot->buf` must
- * be sorted.
- */
-static const char *find_reference_location(struct snapshot *snapshot,
- const char *refname, int mustexist)
+static const char *find_reference_location_1(struct snapshot *snapshot,
+ const char *refname, int mustexist,
+ int start)
{
/*
* This is not *quite* a garden-variety binary search, because
@@ -573,7 +605,7 @@ static const char *find_reference_location(struct snapshot *snapshot,
mid = lo + (hi - lo) / 2;
rec = find_start_of_record(lo, mid);
- cmp = cmp_record_to_refname(rec, refname);
+ cmp = cmp_record_to_refname(rec, refname, start, snapshot);
if (cmp < 0) {
lo = find_end_of_record(mid, hi);
} else if (cmp > 0) {
@@ -590,6 +622,41 @@ static const char *find_reference_location(struct snapshot *snapshot,
}
/*
+ * Find the place in `snapshot->buf` where the start of the record for
+ * `refname` starts. If `mustexist` is true and the reference doesn't
+ * exist, then return NULL. If `mustexist` is false and the reference
+ * doesn't exist, then return the point where that reference would be
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
+ *
+ * The record is sought using a binary search, so `snapshot->buf` must
+ * be sorted.
+ */
+static const char *find_reference_location(struct snapshot *snapshot,
+ const char *refname, int mustexist)
+{
+ return find_reference_location_1(snapshot, refname, mustexist, 1);
+}
+
+/*
+ * Find the place in `snapshot->buf` after the end of the record for
+ * `refname`. In other words, find the location of first thing *after*
+ * `refname`.
+ *
+ * Other semantics are identical to the ones in
+ * `find_reference_location()`.
+ */
+static const char *find_reference_location_end(struct snapshot *snapshot,
+ const char *refname,
+ int mustexist)
+{
+ return find_reference_location_1(snapshot, refname, mustexist, 0);
+}
+
+/*
* Create a newly-allocated `snapshot` of the `packed-refs` file in
* its current state and return it. The return value will already have
* its reference count incremented.
@@ -648,7 +715,7 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
tmp = xmemdupz(snapshot->buf, eol - snapshot->buf);
- if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p))
+ if (!skip_prefix(tmp, "# pack-refs with: ", (const char **)&p))
die_invalid_line(refs->path,
snapshot->buf,
snapshot->eof - snapshot->buf);
@@ -751,7 +818,7 @@ static int packed_read_raw_ref(struct ref_store *ref_store, const char *refname,
return -1;
}
- if (get_oid_hex(rec, oid))
+ if (get_oid_hex_algop(rec, oid, ref_store->repo->hash_algo))
die_invalid_line(refs->path, rec, snapshot->eof - rec);
*type = REF_ISPACKED;
@@ -774,12 +841,21 @@ struct packed_ref_iterator {
struct snapshot *snapshot;
+ char *prefix;
+
/* The current position in the snapshot's buffer: */
const char *pos;
/* The end of the part of the buffer that will be iterated over: */
const char *eof;
+ struct jump_list_entry {
+ const char *start;
+ const char *end;
+ } *jump;
+ size_t jump_nr, jump_alloc;
+ size_t jump_cur;
+
/* Scratch space for current values: */
struct object_id oid, peeled;
struct strbuf refname_buf;
@@ -789,25 +865,45 @@ struct packed_ref_iterator {
};
/*
- * Move the iterator to the next record in the snapshot, without
- * respect for whether the record is actually required by the current
- * iteration. Adjust the fields in `iter` and return `ITER_OK` or
- * `ITER_DONE`. This function does not free the iterator in the case
- * of `ITER_DONE`.
+ * Move the iterator to the next record in the snapshot. Adjust the fields in
+ * `iter` and return `ITER_OK` or `ITER_DONE`. This function does not free the
+ * iterator in the case of `ITER_DONE`.
*/
static int next_record(struct packed_ref_iterator *iter)
{
- const char *p = iter->pos, *eol;
+ const char *p, *eol;
strbuf_reset(&iter->refname_buf);
+ /*
+ * If iter->pos is contained within a skipped region, jump past
+ * it.
+ *
+ * Note that each skipped region is considered at most once,
+ * since they are ordered based on their starting position.
+ */
+ while (iter->jump_cur < iter->jump_nr) {
+ struct jump_list_entry *curr = &iter->jump[iter->jump_cur];
+ if (iter->pos < curr->start)
+ break; /* not to the next jump yet */
+
+ iter->jump_cur++;
+ if (iter->pos < curr->end) {
+ iter->pos = curr->end;
+ trace2_counter_add(TRACE2_COUNTER_ID_PACKED_REFS_JUMPS, 1);
+ /* jumps are coalesced, so only one jump is necessary */
+ break;
+ }
+ }
+
if (iter->pos == iter->eof)
return ITER_DONE;
iter->base.flags = REF_ISPACKED;
+ p = iter->pos;
- if (iter->eof - p < the_hash_algo->hexsz + 2 ||
- parse_oid_hex(p, &iter->oid, &p) ||
+ if (iter->eof - p < snapshot_hexsz(iter->snapshot) + 2 ||
+ parse_oid_hex_algop(p, &iter->oid, &p, iter->repo->hash_algo) ||
!isspace(*p++))
die_invalid_line(iter->snapshot->refs->path,
iter->pos, iter->eof - iter->pos);
@@ -820,11 +916,14 @@ static int next_record(struct packed_ref_iterator *iter)
strbuf_add(&iter->refname_buf, p, eol - p);
iter->base.refname = iter->refname_buf.buf;
+ if (refname_contains_nul(&iter->refname_buf))
+ die("packed refname contains embedded NULL: %s", iter->base.refname);
+
if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
if (!refname_is_safe(iter->base.refname))
die("packed refname is dangerous: %s",
iter->base.refname);
- oidclr(&iter->oid);
+ oidclr(&iter->oid, iter->repo->hash_algo);
iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
}
if (iter->snapshot->peeled == PEELED_FULLY ||
@@ -836,8 +935,8 @@ static int next_record(struct packed_ref_iterator *iter)
if (iter->pos < iter->eof && *iter->pos == '^') {
p = iter->pos + 1;
- if (iter->eof - p < the_hash_algo->hexsz + 1 ||
- parse_oid_hex(p, &iter->peeled, &p) ||
+ if (iter->eof - p < snapshot_hexsz(iter->snapshot) + 1 ||
+ parse_oid_hex_algop(p, &iter->peeled, &p, iter->repo->hash_algo) ||
*p++ != '\n')
die_invalid_line(iter->snapshot->refs->path,
iter->pos, iter->eof - iter->pos);
@@ -849,13 +948,13 @@ static int next_record(struct packed_ref_iterator *iter)
* we suppress it if the reference is broken:
*/
if ((iter->base.flags & REF_ISBROKEN)) {
- oidclr(&iter->peeled);
+ oidclr(&iter->peeled, iter->repo->hash_algo);
iter->base.flags &= ~REF_KNOWS_PEELED;
} else {
iter->base.flags |= REF_KNOWS_PEELED;
}
} else {
- oidclr(&iter->peeled);
+ oidclr(&iter->peeled, iter->repo->hash_algo);
}
return ITER_OK;
@@ -868,6 +967,9 @@ static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
int ok;
while ((ok = next_record(iter)) == ITER_OK) {
+ const char *refname = iter->base.refname;
+ const char *prefix = iter->prefix;
+
if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
!is_per_worktree_ref(iter->base.refname))
continue;
@@ -877,59 +979,183 @@ static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
&iter->oid, iter->flags))
continue;
+ while (prefix && *prefix) {
+ if (*refname < *prefix)
+ BUG("packed-refs backend yielded reference preceding its prefix");
+ else if (*refname > *prefix)
+ return ITER_DONE;
+ prefix++;
+ refname++;
+ }
+
return ITER_OK;
}
- if (ref_iterator_abort(ref_iterator) != ITER_DONE)
- ok = ITER_ERROR;
-
return ok;
}
+static int packed_ref_iterator_seek(struct ref_iterator *ref_iterator,
+ const char *prefix)
+{
+ struct packed_ref_iterator *iter =
+ (struct packed_ref_iterator *)ref_iterator;
+ const char *start;
+
+ if (prefix && *prefix)
+ start = find_reference_location(iter->snapshot, prefix, 0);
+ else
+ start = iter->snapshot->start;
+
+ free(iter->prefix);
+ iter->prefix = xstrdup_or_null(prefix);
+ iter->pos = start;
+ iter->eof = iter->snapshot->eof;
+
+ return 0;
+}
+
static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
struct object_id *peeled)
{
struct packed_ref_iterator *iter =
(struct packed_ref_iterator *)ref_iterator;
- if (iter->repo != the_repository)
- BUG("peeling for non-the_repository is not supported");
-
if ((iter->base.flags & REF_KNOWS_PEELED)) {
oidcpy(peeled, &iter->peeled);
return is_null_oid(&iter->peeled) ? -1 : 0;
} else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
return -1;
} else {
- return peel_object(&iter->oid, peeled) ? -1 : 0;
+ return peel_object(iter->repo, &iter->oid, peeled) ? -1 : 0;
}
}
-static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
+static void packed_ref_iterator_release(struct ref_iterator *ref_iterator)
{
struct packed_ref_iterator *iter =
(struct packed_ref_iterator *)ref_iterator;
- int ok = ITER_DONE;
-
strbuf_release(&iter->refname_buf);
+ free(iter->jump);
+ free(iter->prefix);
release_snapshot(iter->snapshot);
- base_ref_iterator_free(ref_iterator);
- return ok;
}
static struct ref_iterator_vtable packed_ref_iterator_vtable = {
.advance = packed_ref_iterator_advance,
+ .seek = packed_ref_iterator_seek,
.peel = packed_ref_iterator_peel,
- .abort = packed_ref_iterator_abort
+ .release = packed_ref_iterator_release,
};
+static int jump_list_entry_cmp(const void *va, const void *vb)
+{
+ const struct jump_list_entry *a = va;
+ const struct jump_list_entry *b = vb;
+
+ if (a->start < b->start)
+ return -1;
+ if (a->start > b->start)
+ return 1;
+ return 0;
+}
+
+static int has_glob_special(const char *str)
+{
+ const char *p;
+ for (p = str; *p; p++) {
+ if (is_glob_special(*p))
+ return 1;
+ }
+ return 0;
+}
+
+static void populate_excluded_jump_list(struct packed_ref_iterator *iter,
+ struct snapshot *snapshot,
+ const char **excluded_patterns)
+{
+ size_t i, j;
+ const char **pattern;
+ struct jump_list_entry *last_disjoint;
+
+ if (!excluded_patterns)
+ return;
+
+ for (pattern = excluded_patterns; *pattern; pattern++) {
+ struct jump_list_entry *e;
+ const char *start, *end;
+
+ /*
+ * We can't feed any excludes with globs in them to the
+ * refs machinery. It only understands prefix matching.
+ * We likewise can't even feed the string leading up to
+ * the first meta-character, as something like "foo[a]"
+ * should not exclude "foobar" (but the prefix "foo"
+ * would match that and mark it for exclusion).
+ */
+ if (has_glob_special(*pattern))
+ continue;
+
+ start = find_reference_location(snapshot, *pattern, 0);
+ end = find_reference_location_end(snapshot, *pattern, 0);
+
+ if (start == end)
+ continue; /* nothing to jump over */
+
+ ALLOC_GROW(iter->jump, iter->jump_nr + 1, iter->jump_alloc);
+
+ e = &iter->jump[iter->jump_nr++];
+ e->start = start;
+ e->end = end;
+ }
+
+ if (!iter->jump_nr) {
+ /*
+ * Every entry in exclude_patterns has a meta-character,
+ * nothing to do here.
+ */
+ return;
+ }
+
+ QSORT(iter->jump, iter->jump_nr, jump_list_entry_cmp);
+
+ /*
+ * As an optimization, merge adjacent entries in the jump list
+ * to jump forwards as far as possible when entering a skipped
+ * region.
+ *
+ * For example, if we have two skipped regions:
+ *
+ * [[A, B], [B, C]]
+ *
+ * we want to combine that into a single entry jumping from A to
+ * C.
+ */
+ last_disjoint = iter->jump;
+
+ for (i = 1, j = 1; i < iter->jump_nr; i++) {
+ struct jump_list_entry *ours = &iter->jump[i];
+ if (ours->start <= last_disjoint->end) {
+ /* overlapping regions extend the previous one */
+ last_disjoint->end = last_disjoint->end > ours->end
+ ? last_disjoint->end : ours->end;
+ } else {
+ /* otherwise, insert a new region */
+ iter->jump[j++] = *ours;
+ last_disjoint = ours;
+ }
+ }
+
+ iter->jump_nr = j;
+ iter->jump_cur = 0;
+}
+
static struct ref_iterator *packed_ref_iterator_begin(
struct ref_store *ref_store,
- const char *prefix, unsigned int flags)
+ const char *prefix, const char **exclude_patterns,
+ unsigned int flags)
{
struct packed_ref_store *refs;
struct snapshot *snapshot;
- const char *start;
struct packed_ref_iterator *iter;
struct ref_iterator *ref_iterator;
unsigned int required_flags = REF_STORE_READ;
@@ -945,33 +1171,24 @@ static struct ref_iterator *packed_ref_iterator_begin(
*/
snapshot = get_snapshot(refs);
- if (prefix && *prefix)
- start = find_reference_location(snapshot, prefix, 0);
- else
- start = snapshot->start;
-
- if (start == snapshot->eof)
- return empty_ref_iterator_begin();
-
CALLOC_ARRAY(iter, 1);
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
+ base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable);
+
+ if (exclude_patterns)
+ populate_excluded_jump_list(iter, snapshot, exclude_patterns);
iter->snapshot = snapshot;
acquire_snapshot(snapshot);
-
- iter->pos = start;
- iter->eof = snapshot->eof;
strbuf_init(&iter->refname_buf, 0);
-
iter->base.oid = &iter->oid;
-
iter->repo = ref_store->repo;
iter->flags = flags;
- if (prefix && *prefix)
- /* Stop iteration after we've gone *past* prefix: */
- ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0);
+ if (packed_ref_iterator_seek(&iter->base, prefix) < 0) {
+ ref_iterator_free(&iter->base);
+ return NULL;
+ }
return ref_iterator;
}
@@ -1074,6 +1291,24 @@ int packed_refs_is_locked(struct ref_store *ref_store)
return is_lock_file_locked(&refs->lock);
}
+int packed_refs_size(struct ref_store *ref_store,
+ size_t *out)
+{
+ struct packed_ref_store *refs = packed_downcast(ref_store, REF_STORE_READ,
+ "packed_refs_size");
+ struct stat st;
+
+ if (stat(refs->path, &st) < 0) {
+ if (errno != ENOENT)
+ return -1;
+ *out = 0;
+ return 0;
+ }
+
+ *out = st.st_size;
+ return 0;
+}
+
/*
* The packed-refs header line that we write out. Perhaps other traits
* will be added later.
@@ -1085,13 +1320,27 @@ int packed_refs_is_locked(struct ref_store *ref_store)
static const char PACKED_REFS_HEADER[] =
"# pack-refs with: peeled fully-peeled sorted \n";
-static int packed_init_db(struct ref_store *ref_store UNUSED,
- struct strbuf *err UNUSED)
+static int packed_ref_store_create_on_disk(struct ref_store *ref_store UNUSED,
+ int flags UNUSED,
+ struct strbuf *err UNUSED)
{
/* Nothing to do. */
return 0;
}
+static int packed_ref_store_remove_on_disk(struct ref_store *ref_store,
+ struct strbuf *err)
+{
+ struct packed_ref_store *refs = packed_downcast(ref_store, 0, "remove");
+
+ if (remove_path(refs->path) < 0) {
+ strbuf_addstr(err, "could not delete packed-refs");
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* Write the packed refs from the current snapshot to the packed-refs
* tempfile, incorporating any changes from `updates`. `updates` must
@@ -1150,10 +1399,12 @@ static int write_with_updates(struct packed_ref_store *refs,
* list of refs is exhausted, set iter to NULL. When the list
* of updates is exhausted, leave i set to updates->nr.
*/
- iter = packed_ref_iterator_begin(&refs->base, "",
+ iter = packed_ref_iterator_begin(&refs->base, "", NULL,
DO_FOR_EACH_INCLUDE_BROKEN);
- if ((ok = ref_iterator_advance(iter)) != ITER_OK)
+ if ((ok = ref_iterator_advance(iter)) != ITER_OK) {
+ ref_iterator_free(iter);
iter = NULL;
+ }
i = 0;
@@ -1201,8 +1452,10 @@ static int write_with_updates(struct packed_ref_store *refs,
* the iterator over the unneeded
* value.
*/
- if ((ok = ref_iterator_advance(iter)) != ITER_OK)
+ if ((ok = ref_iterator_advance(iter)) != ITER_OK) {
+ ref_iterator_free(iter);
iter = NULL;
+ }
cmp = +1;
} else {
/*
@@ -1239,8 +1492,10 @@ static int write_with_updates(struct packed_ref_store *refs,
peel_error ? NULL : &peeled))
goto write_error;
- if ((ok = ref_iterator_advance(iter)) != ITER_OK)
+ if ((ok = ref_iterator_advance(iter)) != ITER_OK) {
+ ref_iterator_free(iter);
iter = NULL;
+ }
} else if (is_null_oid(&update->new_oid)) {
/*
* The update wants to delete the reference,
@@ -1252,7 +1507,8 @@ static int write_with_updates(struct packed_ref_store *refs,
i++;
} else {
struct object_id peeled;
- int peel_error = peel_object(&update->new_oid,
+ int peel_error = peel_object(refs->base.repo,
+ &update->new_oid,
&peeled);
if (write_packed_entry(out, update->refname,
@@ -1288,9 +1544,7 @@ write_error:
get_tempfile_path(refs->tempfile), strerror(errno));
error:
- if (iter)
- ref_iterator_abort(iter);
-
+ ref_iterator_free(iter);
delete_tempfile(&refs->tempfile);
return -1;
}
@@ -1521,91 +1775,359 @@ cleanup:
return ret;
}
-static int packed_initial_transaction_commit(struct ref_store *ref_store UNUSED,
- struct ref_transaction *transaction,
- struct strbuf *err)
+static int packed_pack_refs(struct ref_store *ref_store UNUSED,
+ struct pack_refs_opts *pack_opts UNUSED)
{
- return ref_transaction_commit(transaction, err);
+ /*
+ * Packed refs are already packed. It might be that loose refs
+ * are packed *into* a packed refs store, but that is done by
+ * updating the packed references via a transaction.
+ */
+ return 0;
}
-static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
- struct string_list *refnames, unsigned int flags)
+static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store UNUSED)
{
- struct packed_ref_store *refs =
- packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
- struct strbuf err = STRBUF_INIT;
- struct ref_transaction *transaction;
- struct string_list_item *item;
- int ret;
+ return empty_ref_iterator_begin();
+}
- (void)refs; /* We need the check above, but don't use the variable */
+static int packed_fsck_ref_next_line(struct fsck_options *o,
+ unsigned long line_number, const char *start,
+ const char *eof, const char **eol)
+{
+ int ret = 0;
- if (!refnames->nr)
- return 0;
+ *eol = memchr(start, '\n', eof - start);
+ if (!*eol) {
+ struct strbuf packed_entry = STRBUF_INIT;
+ struct fsck_ref_report report = { 0 };
+
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_PACKED_REF_ENTRY_NOT_TERMINATED,
+ "'%.*s' is not terminated with a newline",
+ (int)(eof - start), start);
+
+ /*
+ * There is no newline but we still want to parse it to the end of
+ * the buffer.
+ */
+ *eol = eof;
+ strbuf_release(&packed_entry);
+ }
+
+ return ret;
+}
+
+static int packed_fsck_ref_header(struct fsck_options *o,
+ const char *start, const char *eol,
+ unsigned int *sorted)
+{
+ struct string_list traits = STRING_LIST_INIT_NODUP;
+ char *tmp_line;
+ int ret = 0;
+ char *p;
+
+ tmp_line = xmemdupz(start, eol - start);
+ if (!skip_prefix(tmp_line, "# pack-refs with: ", (const char **)&p)) {
+ struct fsck_ref_report report = { 0 };
+ report.path = "packed-refs.header";
+
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_PACKED_REF_HEADER,
+ "'%.*s' does not start with '# pack-refs with: '",
+ (int)(eol - start), start);
+ goto cleanup;
+ }
+
+ string_list_split_in_place(&traits, p, " ", -1);
+ *sorted = unsorted_string_list_has_string(&traits, "sorted");
+
+cleanup:
+ free(tmp_line);
+ string_list_clear(&traits, 0);
+ return ret;
+}
+
+static int packed_fsck_ref_peeled_line(struct fsck_options *o,
+ struct ref_store *ref_store,
+ unsigned long line_number,
+ const char *start, const char *eol)
+{
+ struct strbuf packed_entry = STRBUF_INIT;
+ struct fsck_ref_report report = { 0 };
+ struct object_id peeled;
+ const char *p;
+ int ret = 0;
/*
- * Since we don't check the references' old_oids, the
- * individual updates can't fail, so we can pack all of the
- * updates into a single transaction.
+ * Skip the '^' and parse the peeled oid.
*/
+ start++;
+ if (parse_oid_hex_algop(start, &peeled, &p, ref_store->repo->hash_algo)) {
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_PACKED_REF_ENTRY,
+ "'%.*s' has invalid peeled oid",
+ (int)(eol - start), start);
+ goto cleanup;
+ }
- transaction = ref_store_transaction_begin(ref_store, &err);
- if (!transaction)
- return -1;
+ if (p != eol) {
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
- for_each_string_list_item(item, refnames) {
- if (ref_transaction_delete(transaction, item->string, NULL,
- flags, msg, &err)) {
- warning(_("could not delete reference %s: %s"),
- item->string, err.buf);
- strbuf_reset(&err);
- }
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_PACKED_REF_ENTRY,
+ "has trailing garbage after peeled oid '%.*s'",
+ (int)(eol - p), p);
+ goto cleanup;
+ }
+
+cleanup:
+ strbuf_release(&packed_entry);
+ return ret;
+}
+
+static int packed_fsck_ref_main_line(struct fsck_options *o,
+ struct ref_store *ref_store,
+ unsigned long line_number,
+ struct strbuf *refname,
+ const char *start, const char *eol)
+{
+ struct strbuf packed_entry = STRBUF_INIT;
+ struct fsck_ref_report report = { 0 };
+ struct object_id oid;
+ const char *p;
+ int ret = 0;
+
+ if (parse_oid_hex_algop(start, &oid, &p, ref_store->repo->hash_algo)) {
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_PACKED_REF_ENTRY,
+ "'%.*s' has invalid oid",
+ (int)(eol - start), start);
+ goto cleanup;
+ }
+
+ if (p == eol || !isspace(*p)) {
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_PACKED_REF_ENTRY,
+ "has no space after oid '%s' but with '%.*s'",
+ oid_to_hex(&oid), (int)(eol - p), p);
+ goto cleanup;
+ }
+
+ p++;
+ strbuf_reset(refname);
+ strbuf_add(refname, p, eol - p);
+ if (refname_contains_nul(refname)) {
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_PACKED_REF_ENTRY,
+ "refname '%s' contains NULL binaries",
+ refname->buf);
}
- ret = ref_transaction_commit(transaction, &err);
+ if (check_refname_format(refname->buf, 0)) {
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
- if (ret) {
- if (refnames->nr == 1)
- error(_("could not delete reference %s: %s"),
- refnames->items[0].string, err.buf);
- else
- error(_("could not delete references: %s"), err.buf);
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_REF_NAME,
+ "has bad refname '%s'", refname->buf);
}
- ref_transaction_free(transaction);
- strbuf_release(&err);
+cleanup:
+ strbuf_release(&packed_entry);
return ret;
}
-static int packed_pack_refs(struct ref_store *ref_store UNUSED,
- struct pack_refs_opts *pack_opts UNUSED)
+static int packed_fsck_ref_sorted(struct fsck_options *o,
+ struct ref_store *ref_store,
+ const char *start, const char *eof)
{
- /*
- * Packed refs are already packed. It might be that loose refs
- * are packed *into* a packed refs store, but that is done by
- * updating the packed references via a transaction.
- */
- return 0;
+ size_t hexsz = ref_store->repo->hash_algo->hexsz;
+ struct strbuf packed_entry = STRBUF_INIT;
+ struct fsck_ref_report report = { 0 };
+ struct strbuf refname1 = STRBUF_INIT;
+ struct strbuf refname2 = STRBUF_INIT;
+ unsigned long line_number = 1;
+ const char *former = NULL;
+ const char *current;
+ const char *eol;
+ int ret = 0;
+
+ if (*start == '#') {
+ eol = memchr(start, '\n', eof - start);
+ start = eol + 1;
+ line_number++;
+ }
+
+ for (; start < eof; line_number++, start = eol + 1) {
+ eol = memchr(start, '\n', eof - start);
+
+ if (*start == '^')
+ continue;
+
+ if (!former) {
+ former = start + hexsz + 1;
+ continue;
+ }
+
+ current = start + hexsz + 1;
+ if (cmp_packed_refname(former, current) >= 0) {
+ const char *err_fmt =
+ "refname '%s' is less than previous refname '%s'";
+
+ eol = memchr(former, '\n', eof - former);
+ strbuf_add(&refname1, former, eol - former);
+ eol = memchr(current, '\n', eof - current);
+ strbuf_add(&refname2, current, eol - current);
+
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_PACKED_REF_UNSORTED,
+ err_fmt, refname2.buf, refname1.buf);
+ goto cleanup;
+ }
+ former = current;
+ }
+
+cleanup:
+ strbuf_release(&packed_entry);
+ strbuf_release(&refname1);
+ strbuf_release(&refname2);
+ return ret;
}
-static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store UNUSED)
+static int packed_fsck_ref_content(struct fsck_options *o,
+ struct ref_store *ref_store,
+ unsigned int *sorted,
+ const char *start, const char *eof)
{
- return empty_ref_iterator_begin();
+ struct strbuf refname = STRBUF_INIT;
+ unsigned long line_number = 1;
+ const char *eol;
+ int ret = 0;
+
+ ret |= packed_fsck_ref_next_line(o, line_number, start, eof, &eol);
+ if (*start == '#') {
+ ret |= packed_fsck_ref_header(o, start, eol, sorted);
+
+ start = eol + 1;
+ line_number++;
+ }
+
+ while (start < eof) {
+ ret |= packed_fsck_ref_next_line(o, line_number, start, eof, &eol);
+ ret |= packed_fsck_ref_main_line(o, ref_store, line_number, &refname, start, eol);
+ start = eol + 1;
+ line_number++;
+ if (start < eof && *start == '^') {
+ ret |= packed_fsck_ref_next_line(o, line_number, start, eof, &eol);
+ ret |= packed_fsck_ref_peeled_line(o, ref_store, line_number,
+ start, eol);
+ start = eol + 1;
+ line_number++;
+ }
+ }
+
+ strbuf_release(&refname);
+ return ret;
+}
+
+static int packed_fsck(struct ref_store *ref_store,
+ struct fsck_options *o,
+ struct worktree *wt)
+{
+ struct packed_ref_store *refs = packed_downcast(ref_store,
+ REF_STORE_READ, "fsck");
+ struct strbuf packed_ref_content = STRBUF_INIT;
+ unsigned int sorted = 0;
+ struct stat st;
+ int ret = 0;
+ int fd = -1;
+
+ if (!is_main_worktree(wt))
+ goto cleanup;
+
+ if (o->verbose)
+ fprintf_ln(stderr, "Checking packed-refs file %s", refs->path);
+
+ fd = open_nofollow(refs->path, O_RDONLY);
+ if (fd < 0) {
+ /*
+ * If the packed-refs file doesn't exist, there's nothing
+ * to check.
+ */
+ if (errno == ENOENT)
+ goto cleanup;
+
+ if (errno == ELOOP) {
+ struct fsck_ref_report report = { 0 };
+ report.path = "packed-refs";
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_REF_FILETYPE,
+ "not a regular file but a symlink");
+ goto cleanup;
+ }
+
+ ret = error_errno(_("unable to open '%s'"), refs->path);
+ goto cleanup;
+ } else if (fstat(fd, &st) < 0) {
+ ret = error_errno(_("unable to stat '%s'"), refs->path);
+ goto cleanup;
+ } else if (!S_ISREG(st.st_mode)) {
+ struct fsck_ref_report report = { 0 };
+ report.path = "packed-refs";
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_REF_FILETYPE,
+ "not a regular file");
+ goto cleanup;
+ }
+
+ if (strbuf_read(&packed_ref_content, fd, 0) < 0) {
+ ret = error_errno(_("unable to read '%s'"), refs->path);
+ goto cleanup;
+ }
+
+ ret = packed_fsck_ref_content(o, ref_store, &sorted, packed_ref_content.buf,
+ packed_ref_content.buf + packed_ref_content.len);
+ if (!ret && sorted)
+ ret = packed_fsck_ref_sorted(o, ref_store, packed_ref_content.buf,
+ packed_ref_content.buf + packed_ref_content.len);
+
+cleanup:
+ if (fd >= 0)
+ close(fd);
+ strbuf_release(&packed_ref_content);
+ return ret;
}
struct ref_storage_be refs_be_packed = {
- .next = NULL,
.name = "packed",
- .init = packed_ref_store_create,
- .init_db = packed_init_db,
+ .init = packed_ref_store_init,
+ .release = packed_ref_store_release,
+ .create_on_disk = packed_ref_store_create_on_disk,
+ .remove_on_disk = packed_ref_store_remove_on_disk,
+
.transaction_prepare = packed_transaction_prepare,
.transaction_finish = packed_transaction_finish,
.transaction_abort = packed_transaction_abort,
- .initial_transaction_commit = packed_initial_transaction_commit,
.pack_refs = packed_pack_refs,
- .create_symref = NULL,
- .delete_refs = packed_delete_refs,
.rename_ref = NULL,
.copy_ref = NULL,
@@ -1620,4 +2142,6 @@ struct ref_storage_be refs_be_packed = {
.create_reflog = NULL,
.delete_reflog = NULL,
.reflog_expire = NULL,
+
+ .fsck = packed_fsck,
};