aboutsummaryrefslogtreecommitdiffstats
path: root/refs
diff options
context:
space:
mode:
Diffstat (limited to 'refs')
-rw-r--r--refs/debug.c43
-rw-r--r--refs/files-backend.c801
-rw-r--r--refs/iterator.c77
-rw-r--r--refs/packed-backend.c125
-rw-r--r--refs/packed-backend.h13
-rw-r--r--refs/ref-cache.c14
-rw-r--r--refs/ref-cache.h4
-rw-r--r--refs/refs-internal.h151
-rw-r--r--refs/reftable-backend.c2353
9 files changed, 3207 insertions, 374 deletions
diff --git a/refs/debug.c b/refs/debug.c
index 634681ca44..45e2e784a0 100644
--- a/refs/debug.c
+++ b/refs/debug.c
@@ -33,11 +33,18 @@ struct ref_store *maybe_debug_wrap_ref_store(const char *gitdir, struct ref_stor
return (struct ref_store *)res;
}
-static int debug_init_db(struct ref_store *refs, int flags, struct strbuf *err)
+static void debug_release(struct ref_store *refs)
{
struct debug_ref_store *drefs = (struct debug_ref_store *)refs;
- int res = drefs->refs->be->init_db(drefs->refs, flags, err);
- trace_printf_key(&trace_refs, "init_db: %d\n", res);
+ drefs->refs->be->release(drefs->refs);
+ trace_printf_key(&trace_refs, "release\n");
+}
+
+static int debug_create_on_disk(struct ref_store *refs, int flags, struct strbuf *err)
+{
+ struct debug_ref_store *drefs = (struct debug_ref_store *)refs;
+ int res = drefs->refs->be->create_on_disk(drefs->refs, flags, err);
+ trace_printf_key(&trace_refs, "create_on_disk: %d\n", res);
return res;
}
@@ -131,18 +138,6 @@ static int debug_pack_refs(struct ref_store *ref_store, struct pack_refs_opts *o
return res;
}
-static int debug_create_symref(struct ref_store *ref_store,
- const char *ref_name, const char *target,
- const char *logmsg)
-{
- struct debug_ref_store *drefs = (struct debug_ref_store *)ref_store;
- int res = drefs->refs->be->create_symref(drefs->refs, ref_name, target,
- logmsg);
- trace_printf_key(&trace_refs, "create_symref: %s -> %s \"%s\": %d\n", ref_name,
- target, logmsg, res);
- return res;
-}
-
static int debug_rename_ref(struct ref_store *ref_store, const char *oldref,
const char *newref, const char *logmsg)
{
@@ -181,7 +176,6 @@ static int debug_ref_iterator_advance(struct ref_iterator *ref_iterator)
trace_printf_key(&trace_refs, "iterator_advance: %s (0)\n",
diter->iter->refname);
- diter->base.ordered = diter->iter->ordered;
diter->base.refname = diter->iter->refname;
diter->base.oid = diter->iter->oid;
diter->base.flags = diter->iter->flags;
@@ -222,7 +216,7 @@ debug_ref_iterator_begin(struct ref_store *ref_store, const char *prefix,
drefs->refs->be->iterator_begin(drefs->refs, prefix,
exclude_patterns, flags);
struct debug_ref_iterator *diter = xcalloc(1, sizeof(*diter));
- base_ref_iterator_init(&diter->base, &debug_ref_iterator_vtable, 1);
+ base_ref_iterator_init(&diter->base, &debug_ref_iterator_vtable);
diter->iter = res;
trace_printf_key(&trace_refs, "ref_iterator_begin: \"%s\" (0x%x)\n",
prefix, flags);
@@ -425,10 +419,20 @@ static int debug_reflog_expire(struct ref_store *ref_store, const char *refname,
return res;
}
+static int debug_fsck(struct ref_store *ref_store,
+ struct fsck_options *o)
+{
+ struct debug_ref_store *drefs = (struct debug_ref_store *)ref_store;
+ int res = drefs->refs->be->fsck(drefs->refs, o);
+ trace_printf_key(&trace_refs, "fsck: %d\n", res);
+ return res;
+}
+
struct ref_storage_be refs_be_debug = {
.name = "debug",
.init = NULL,
- .init_db = debug_init_db,
+ .release = debug_release,
+ .create_on_disk = debug_create_on_disk,
/*
* None of these should be NULL. If the "files" backend (in
@@ -442,7 +446,6 @@ struct ref_storage_be refs_be_debug = {
.initial_transaction_commit = debug_initial_transaction_commit,
.pack_refs = debug_pack_refs,
- .create_symref = debug_create_symref,
.rename_ref = debug_rename_ref,
.copy_ref = debug_copy_ref,
@@ -457,4 +460,6 @@ struct ref_storage_be refs_be_debug = {
.create_reflog = debug_create_reflog,
.delete_reflog = debug_delete_reflog,
.reflog_expire = debug_reflog_expire,
+
+ .fsck = debug_fsck,
};
diff --git a/refs/files-backend.c b/refs/files-backend.c
index 75dcc21ecb..0824c0b8a9 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -1,10 +1,15 @@
+#define USE_THE_REPOSITORY_VARIABLE
+
#include "../git-compat-util.h"
+#include "../config.h"
#include "../copy.h"
#include "../environment.h"
#include "../gettext.h"
#include "../hash.h"
#include "../hex.h"
+#include "../fsck.h"
#include "../refs.h"
+#include "../repo-settings.h"
#include "refs-internal.h"
#include "ref-cache.h"
#include "packed-backend.h"
@@ -71,6 +76,8 @@ struct files_ref_store {
unsigned int store_flags;
char *gitcommondir;
+ enum log_refs_config log_all_ref_updates;
+ int prefer_symlink_refs;
struct ref_cache *loose;
@@ -89,9 +96,9 @@ static void clear_loose_ref_cache(struct files_ref_store *refs)
* Create a new submodule ref cache and add it to the internal
* set of caches.
*/
-static struct ref_store *files_ref_store_create(struct repository *repo,
- const char *gitdir,
- unsigned int flags)
+static struct ref_store *files_ref_store_init(struct repository *repo,
+ const char *gitdir,
+ unsigned int flags)
{
struct files_ref_store *refs = xcalloc(1, sizeof(*refs));
struct ref_store *ref_store = (struct ref_store *)refs;
@@ -102,7 +109,9 @@ static struct ref_store *files_ref_store_create(struct repository *repo,
get_common_dir_noenv(&sb, gitdir);
refs->gitcommondir = strbuf_detach(&sb, NULL);
refs->packed_ref_store =
- packed_ref_store_create(repo, refs->gitcommondir, flags);
+ packed_ref_store_init(repo, refs->gitcommondir, flags);
+ refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo);
+ repo_config_get_bool(repo, "core.prefersymlinkrefs", &refs->prefer_symlink_refs);
chdir_notify_reparent("files-backend $GIT_DIR", &refs->base.gitdir);
chdir_notify_reparent("files-backend $GIT_COMMONDIR",
@@ -149,6 +158,15 @@ static struct files_ref_store *files_downcast(struct ref_store *ref_store,
return refs;
}
+static void files_ref_store_release(struct ref_store *ref_store)
+{
+ struct files_ref_store *refs = files_downcast(ref_store, 0, "release");
+ free_ref_cache(refs->loose);
+ free(refs->gitcommondir);
+ ref_store_release(refs->packed_ref_store);
+ free(refs->packed_ref_store);
+}
+
static void files_reflog_path(struct files_ref_store *refs,
struct strbuf *sb,
const char *refname)
@@ -229,6 +247,45 @@ static void add_per_worktree_entries_to_dir(struct ref_dir *dir, const char *dir
}
}
+static void loose_fill_ref_dir_regular_file(struct files_ref_store *refs,
+ const char *refname,
+ struct ref_dir *dir)
+{
+ struct object_id oid;
+ int flag;
+ const char *referent = refs_resolve_ref_unsafe(&refs->base,
+ refname,
+ RESOLVE_REF_READING,
+ &oid, &flag);
+
+ if (!referent) {
+ oidclr(&oid, refs->base.repo->hash_algo);
+ flag |= REF_ISBROKEN;
+ } else if (is_null_oid(&oid)) {
+ /*
+ * It is so astronomically unlikely
+ * that null_oid is the OID of an
+ * actual object that we consider its
+ * appearance in a loose reference
+ * file to be repo corruption
+ * (probably due to a software bug).
+ */
+ flag |= REF_ISBROKEN;
+ }
+
+ if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
+ if (!refname_is_safe(refname))
+ die("loose refname is dangerous: %s", refname);
+ oidclr(&oid, refs->base.repo->hash_algo);
+ flag |= REF_BAD_NAME | REF_ISBROKEN;
+ }
+
+ if (!(flag & REF_ISSYMREF))
+ referent = NULL;
+
+ add_entry_to_dir(dir, create_ref_entry(refname, referent, &oid, flag));
+}
+
/*
* Read the loose references from the namespace dirname into dir
* (without recursing). dirname must end with '/'. dir must be the
@@ -257,8 +314,6 @@ static void loose_fill_ref_dir(struct ref_store *ref_store,
strbuf_add(&refname, dirname, dirnamelen);
while ((de = readdir(d)) != NULL) {
- struct object_id oid;
- int flag;
unsigned char dtype;
if (de->d_name[0] == '.')
@@ -274,33 +329,7 @@ static void loose_fill_ref_dir(struct ref_store *ref_store,
create_dir_entry(dir->cache, refname.buf,
refname.len));
} else if (dtype == DT_REG) {
- if (!refs_resolve_ref_unsafe(&refs->base,
- refname.buf,
- RESOLVE_REF_READING,
- &oid, &flag)) {
- oidclr(&oid);
- flag |= REF_ISBROKEN;
- } else if (is_null_oid(&oid)) {
- /*
- * It is so astronomically unlikely
- * that null_oid is the OID of an
- * actual object that we consider its
- * appearance in a loose reference
- * file to be repo corruption
- * (probably due to a software bug).
- */
- flag |= REF_ISBROKEN;
- }
-
- if (check_refname_format(refname.buf,
- REFNAME_ALLOW_ONELEVEL)) {
- if (!refname_is_safe(refname.buf))
- die("loose refname is dangerous: %s", refname.buf);
- oidclr(&oid);
- flag |= REF_BAD_NAME | REF_ISBROKEN;
- }
- add_entry_to_dir(dir,
- create_ref_entry(refname.buf, &oid, flag));
+ loose_fill_ref_dir_regular_file(refs, refname.buf, dir);
}
strbuf_setlen(&refname, dirnamelen);
}
@@ -311,9 +340,89 @@ static void loose_fill_ref_dir(struct ref_store *ref_store,
add_per_worktree_entries_to_dir(dir, dirname);
}
-static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
+static int for_each_root_ref(struct files_ref_store *refs,
+ int (*cb)(const char *refname, void *cb_data),
+ void *cb_data)
+{
+ struct strbuf path = STRBUF_INIT, refname = STRBUF_INIT;
+ const char *dirname = refs->loose->root->name;
+ struct dirent *de;
+ size_t dirnamelen;
+ int ret;
+ DIR *d;
+
+ files_ref_path(refs, &path, dirname);
+
+ d = opendir(path.buf);
+ if (!d) {
+ strbuf_release(&path);
+ return -1;
+ }
+
+ strbuf_addstr(&refname, dirname);
+ dirnamelen = refname.len;
+
+ while ((de = readdir(d)) != NULL) {
+ unsigned char dtype;
+
+ if (de->d_name[0] == '.')
+ continue;
+ if (ends_with(de->d_name, ".lock"))
+ continue;
+ strbuf_addstr(&refname, de->d_name);
+
+ dtype = get_dtype(de, &path, 1);
+ if (dtype == DT_REG && is_root_ref(de->d_name)) {
+ ret = cb(refname.buf, cb_data);
+ if (ret)
+ goto done;
+ }
+
+ strbuf_setlen(&refname, dirnamelen);
+ }
+
+ ret = 0;
+
+done:
+ strbuf_release(&refname);
+ strbuf_release(&path);
+ closedir(d);
+ return ret;
+}
+
+struct fill_root_ref_data {
+ struct files_ref_store *refs;
+ struct ref_dir *dir;
+};
+
+static int fill_root_ref(const char *refname, void *cb_data)
+{
+ struct fill_root_ref_data *data = cb_data;
+ loose_fill_ref_dir_regular_file(data->refs, refname, data->dir);
+ return 0;
+}
+
+/*
+ * Add root refs to the ref dir by parsing the directory for any files which
+ * follow the root ref syntax.
+ */
+static void add_root_refs(struct files_ref_store *refs,
+ struct ref_dir *dir)
+{
+ struct fill_root_ref_data data = {
+ .refs = refs,
+ .dir = dir,
+ };
+
+ for_each_root_ref(refs, fill_root_ref, &data);
+}
+
+static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs,
+ unsigned int flags)
{
if (!refs->loose) {
+ struct ref_dir *dir;
+
/*
* Mark the top-level directory complete because we
* are about to read the only subdirectory that can
@@ -324,12 +433,16 @@ static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
/* We're going to fill the top level ourselves: */
refs->loose->root->flag &= ~REF_INCOMPLETE;
+ dir = get_ref_dir(refs->loose->root);
+
+ if (flags & DO_FOR_EACH_INCLUDE_ROOT_REFS)
+ add_root_refs(refs, dir);
+
/*
* Add an incomplete entry for "refs/" (to be filled
* lazily):
*/
- add_entry_to_dir(get_ref_dir(refs->loose->root),
- create_dir_entry(refs->loose, "refs/", 5));
+ add_entry_to_dir(dir, create_dir_entry(refs->loose, "refs/", 5));
}
return refs->loose;
}
@@ -454,7 +567,8 @@ stat_ref:
strbuf_rtrim(&sb_contents);
buf = sb_contents.buf;
- ret = parse_loose_ref_contents(buf, oid, referent, type, &myerr);
+ ret = parse_loose_ref_contents(ref_store->repo->hash_algo, buf,
+ oid, referent, type, &myerr);
out:
if (ret && !myerr)
@@ -488,7 +602,8 @@ static int files_read_symbolic_ref(struct ref_store *ref_store, const char *refn
return !(type & REF_ISSYMREF);
}
-int parse_loose_ref_contents(const char *buf, struct object_id *oid,
+int parse_loose_ref_contents(const struct git_hash_algo *algop,
+ const char *buf, struct object_id *oid,
struct strbuf *referent, unsigned int *type,
int *failure_errno)
{
@@ -506,7 +621,7 @@ int parse_loose_ref_contents(const char *buf, struct object_id *oid,
/*
* FETCH_HEAD has additional data after the sha.
*/
- if (parse_oid_hex(buf, oid, &p) ||
+ if (parse_oid_hex_algop(buf, oid, &p, algop) ||
(*p != '\0' && !isspace(*p))) {
*type |= REF_ISBROKEN;
*failure_errno = EINVAL;
@@ -735,8 +850,10 @@ retry:
*/
if (refs_verify_refname_available(
refs->packed_ref_store, refname,
- extras, NULL, err))
+ extras, NULL, err)) {
+ ret = TRANSACTION_NAME_CONFLICT;
goto error_return;
+ }
}
ret = 0;
@@ -786,6 +903,8 @@ static int files_ref_iterator_advance(struct ref_iterator *ref_iterator)
iter->base.refname = iter->iter0->refname;
iter->base.oid = iter->iter0->oid;
iter->base.flags = iter->iter0->flags;
+ iter->base.referent = iter->iter0->referent;
+
return ITER_OK;
}
@@ -857,7 +976,7 @@ static struct ref_iterator *files_ref_iterator_begin(
* disk, and re-reads it if not.
*/
- loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs),
+ loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, flags),
prefix, ref_store->repo, 1);
/*
@@ -879,8 +998,7 @@ static struct ref_iterator *files_ref_iterator_begin(
CALLOC_ARRAY(iter, 1);
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable,
- overlay_iter->ordered);
+ base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable);
iter->iter0 = overlay_iter;
iter->repo = ref_store->repo;
iter->flags = flags;
@@ -1053,7 +1171,7 @@ static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs,
if (!refs_resolve_ref_unsafe(&refs->base, lock->ref_name, 0,
&lock->old_oid, NULL))
- oidclr(&lock->old_oid);
+ oidclr(&lock->old_oid, refs->base.repo->hash_algo);
goto out;
error_return:
@@ -1140,7 +1258,7 @@ static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r)
ref_transaction_add_update(
transaction, r->name,
REF_NO_DEREF | REF_HAVE_NEW | REF_HAVE_OLD | REF_IS_PRUNING,
- null_oid(), &r->oid, NULL);
+ null_oid(), &r->oid, NULL, NULL, NULL);
if (ref_transaction_commit(transaction, &err))
goto cleanup;
@@ -1171,7 +1289,8 @@ static void prune_refs(struct files_ref_store *refs, struct ref_to_prune **refs_
/*
* Return true if the specified reference should be packed.
*/
-static int should_pack_ref(const char *refname,
+static int should_pack_ref(struct files_ref_store *refs,
+ const char *refname,
const struct object_id *oid, unsigned int ref_flags,
struct pack_refs_opts *opts)
{
@@ -1187,7 +1306,7 @@ static int should_pack_ref(const char *refname,
return 0;
/* Do not pack broken refs: */
- if (!ref_resolves_to_object(refname, the_repository, oid, ref_flags))
+ if (!ref_resolves_to_object(refname, refs->base.repo, oid, ref_flags))
return 0;
if (ref_excluded(opts->exclusions, refname))
@@ -1200,6 +1319,68 @@ static int should_pack_ref(const char *refname,
return 0;
}
+static int should_pack_refs(struct files_ref_store *refs,
+ struct pack_refs_opts *opts)
+{
+ struct ref_iterator *iter;
+ size_t packed_size;
+ size_t refcount = 0;
+ size_t limit;
+ int ret;
+
+ if (!(opts->flags & PACK_REFS_AUTO))
+ return 1;
+
+ ret = packed_refs_size(refs->packed_ref_store, &packed_size);
+ if (ret < 0)
+ die("cannot determine packed-refs size");
+
+ /*
+ * Packing loose references into the packed-refs file scales with the
+ * number of references we're about to write. We thus decide whether we
+ * repack refs by weighing the current size of the packed-refs file
+ * against the number of loose references. This is done such that we do
+ * not repack too often on repositories with a huge number of
+ * references, where we can expect a lot of churn in the number of
+ * references.
+ *
+ * As a heuristic, we repack if the number of loose references in the
+ * repository exceeds `log2(nr_packed_refs) * 5`, where we estimate
+ * `nr_packed_refs = packed_size / 100`, which scales as following:
+ *
+ * - 1kB ~ 10 packed refs: 16 refs
+ * - 10kB ~ 100 packed refs: 33 refs
+ * - 100kB ~ 1k packed refs: 49 refs
+ * - 1MB ~ 10k packed refs: 66 refs
+ * - 10MB ~ 100k packed refs: 82 refs
+ * - 100MB ~ 1m packed refs: 99 refs
+ *
+ * We thus allow roughly 16 additional loose refs per factor of ten of
+ * packed refs. This heuristic may be tweaked in the future, but should
+ * serve as a sufficiently good first iteration.
+ */
+ limit = log2u(packed_size / 100) * 5;
+ if (limit < 16)
+ limit = 16;
+
+ iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, 0), NULL,
+ refs->base.repo, 0);
+ while ((ret = ref_iterator_advance(iter)) == ITER_OK) {
+ if (should_pack_ref(refs, iter->refname, iter->oid,
+ iter->flags, opts))
+ refcount++;
+ if (refcount >= limit) {
+ ref_iterator_abort(iter);
+ return 1;
+ }
+ }
+
+ if (ret != ITER_DONE)
+ die("error while iterating over references");
+
+ return 0;
+}
+
static int files_pack_refs(struct ref_store *ref_store,
struct pack_refs_opts *opts)
{
@@ -1212,21 +1393,24 @@ static int files_pack_refs(struct ref_store *ref_store,
struct strbuf err = STRBUF_INIT;
struct ref_transaction *transaction;
+ if (!should_pack_refs(refs, opts))
+ return 0;
+
transaction = ref_store_transaction_begin(refs->packed_ref_store, &err);
if (!transaction)
return -1;
packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err);
- iter = cache_ref_iterator_begin(get_loose_ref_cache(refs), NULL,
- the_repository, 0);
+ iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, 0), NULL,
+ refs->base.repo, 0);
while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
/*
* If the loose reference can be packed, add an entry
* in the packed ref cache. If the reference should be
* pruned, also add it to refs_to_prune.
*/
- if (!should_pack_ref(iter->refname, iter->oid, iter->flags, opts))
+ if (!should_pack_ref(refs, iter->refname, iter->oid, iter->flags, opts))
continue;
/*
@@ -1234,7 +1418,7 @@ static int files_pack_refs(struct ref_store *ref_store,
* packed-refs transaction:
*/
if (ref_transaction_update(transaction, iter->refname,
- iter->oid, NULL,
+ iter->oid, NULL, NULL, NULL,
REF_NO_DEREF, NULL, &err))
die("failure preparing to create packed reference %s: %s",
iter->refname, err.buf);
@@ -1323,12 +1507,14 @@ static int rename_tmp_log(struct files_ref_store *refs, const char *newrefname)
return ret;
}
-static int write_ref_to_lockfile(struct ref_lock *lock,
+static int write_ref_to_lockfile(struct files_ref_store *refs,
+ struct ref_lock *lock,
const struct object_id *oid,
int skip_oid_verification, struct strbuf *err);
static int commit_ref_update(struct files_ref_store *refs,
struct ref_lock *lock,
const struct object_id *oid, const char *logmsg,
+ int flags,
struct strbuf *err);
/*
@@ -1471,8 +1657,8 @@ static int files_copy_or_rename_ref(struct ref_store *ref_store,
}
oidcpy(&lock->old_oid, &orig_oid);
- if (write_ref_to_lockfile(lock, &orig_oid, 0, &err) ||
- commit_ref_update(refs, lock, &orig_oid, logmsg, &err)) {
+ if (write_ref_to_lockfile(refs, lock, &orig_oid, 0, &err) ||
+ commit_ref_update(refs, lock, &orig_oid, logmsg, 0, &err)) {
error("unable to write current sha1 into %s: %s", newrefname, err.buf);
strbuf_release(&err);
goto rollback;
@@ -1489,14 +1675,11 @@ static int files_copy_or_rename_ref(struct ref_store *ref_store,
goto rollbacklog;
}
- flag = log_all_ref_updates;
- log_all_ref_updates = LOG_REFS_NONE;
- if (write_ref_to_lockfile(lock, &orig_oid, 0, &err) ||
- commit_ref_update(refs, lock, &orig_oid, NULL, &err)) {
+ if (write_ref_to_lockfile(refs, lock, &orig_oid, 0, &err) ||
+ commit_ref_update(refs, lock, &orig_oid, NULL, REF_SKIP_CREATE_REFLOG, &err)) {
error("unable to write current sha1 into %s: %s", oldrefname, err.buf);
strbuf_release(&err);
}
- log_all_ref_updates = flag;
rollbacklog:
if (logmoved && rename(sb_newref.buf, sb_oldref.buf))
@@ -1591,13 +1774,17 @@ static int log_ref_setup(struct files_ref_store *refs,
const char *refname, int force_create,
int *logfd, struct strbuf *err)
{
+ enum log_refs_config log_refs_cfg = refs->log_all_ref_updates;
struct strbuf logfile_sb = STRBUF_INIT;
char *logfile;
+ if (log_refs_cfg == LOG_REFS_UNSET)
+ log_refs_cfg = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
+
files_reflog_path(refs, &logfile_sb, refname);
logfile = strbuf_detach(&logfile_sb, NULL);
- if (force_create || should_autocreate_reflog(refname)) {
+ if (force_create || should_autocreate_reflog(log_refs_cfg, refname)) {
if (raceproof_create_file(logfile, open_or_create_logfile, logfd)) {
if (errno == ENOENT)
strbuf_addf(err, "unable to create directory for '%s': "
@@ -1683,8 +1870,8 @@ static int files_log_ref_write(struct files_ref_store *refs,
{
int logfd, result;
- if (log_all_ref_updates == LOG_REFS_UNSET)
- log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
+ if (flags & REF_SKIP_CREATE_REFLOG)
+ return 0;
result = log_ref_setup(refs, refname,
flags & REF_FORCE_CREATE_REFLOG,
@@ -1725,7 +1912,8 @@ static int files_log_ref_write(struct files_ref_store *refs,
* Write oid into the open lockfile, then close the lockfile. On
* errors, rollback the lockfile, fill in *err and return -1.
*/
-static int write_ref_to_lockfile(struct ref_lock *lock,
+static int write_ref_to_lockfile(struct files_ref_store *refs,
+ struct ref_lock *lock,
const struct object_id *oid,
int skip_oid_verification, struct strbuf *err)
{
@@ -1734,7 +1922,7 @@ static int write_ref_to_lockfile(struct ref_lock *lock,
int fd;
if (!skip_oid_verification) {
- o = parse_object(the_repository, oid);
+ o = parse_object(refs->base.repo, oid);
if (!o) {
strbuf_addf(
err,
@@ -1753,7 +1941,7 @@ static int write_ref_to_lockfile(struct ref_lock *lock,
}
}
fd = get_lock_file_fd(&lock->lk);
- if (write_in_full(fd, oid_to_hex(oid), the_hash_algo->hexsz) < 0 ||
+ if (write_in_full(fd, oid_to_hex(oid), refs->base.repo->hash_algo->hexsz) < 0 ||
write_in_full(fd, &term, 1) < 0 ||
fsync_component(FSYNC_COMPONENT_REFERENCE, get_lock_file_fd(&lock->lk)) < 0 ||
close_ref_gently(lock) < 0) {
@@ -1773,6 +1961,7 @@ static int write_ref_to_lockfile(struct ref_lock *lock,
static int commit_ref_update(struct files_ref_store *refs,
struct ref_lock *lock,
const struct object_id *oid, const char *logmsg,
+ int flags,
struct strbuf *err)
{
files_assert_main_repository(refs, "commit_ref_update");
@@ -1780,7 +1969,7 @@ static int commit_ref_update(struct files_ref_store *refs,
clear_loose_ref_cache(refs);
if (files_log_ref_write(refs, lock->ref_name,
&lock->old_oid, oid,
- logmsg, 0, err)) {
+ logmsg, flags, err)) {
char *old_msg = strbuf_detach(err, NULL);
strbuf_addf(err, "cannot update the ref '%s': %s",
lock->ref_name, old_msg);
@@ -1813,7 +2002,7 @@ static int commit_ref_update(struct files_ref_store *refs,
struct strbuf log_err = STRBUF_INIT;
if (files_log_ref_write(refs, "HEAD",
&lock->old_oid, oid,
- logmsg, 0, &log_err)) {
+ logmsg, flags, &log_err)) {
error("%s", log_err.buf);
strbuf_release(&log_err);
}
@@ -1830,10 +2019,13 @@ static int commit_ref_update(struct files_ref_store *refs,
return 0;
}
+#ifdef NO_SYMLINK_HEAD
+#define create_ref_symlink(a, b) (-1)
+#else
static int create_ref_symlink(struct ref_lock *lock, const char *target)
{
int ret = -1;
-#ifndef NO_SYMLINK_HEAD
+
char *ref_path = get_locked_file_path(&lock->lk);
unlink(ref_path);
ret = symlink(target, ref_path);
@@ -1841,70 +2033,26 @@ static int create_ref_symlink(struct ref_lock *lock, const char *target)
if (ret)
fprintf(stderr, "no symlink - falling back to symbolic ref\n");
-#endif
return ret;
}
+#endif
-static void update_symref_reflog(struct files_ref_store *refs,
- struct ref_lock *lock, const char *refname,
- const char *target, const char *logmsg)
-{
- struct strbuf err = STRBUF_INIT;
- struct object_id new_oid;
-
- if (logmsg &&
- refs_resolve_ref_unsafe(&refs->base, target,
- RESOLVE_REF_READING, &new_oid, NULL) &&
- files_log_ref_write(refs, refname, &lock->old_oid,
- &new_oid, logmsg, 0, &err)) {
- error("%s", err.buf);
- strbuf_release(&err);
- }
-}
-
-static int create_symref_locked(struct files_ref_store *refs,
- struct ref_lock *lock, const char *refname,
- const char *target, const char *logmsg)
+static int create_symref_lock(struct ref_lock *lock, const char *target,
+ struct strbuf *err)
{
- if (prefer_symlink_refs && !create_ref_symlink(lock, target)) {
- update_symref_reflog(refs, lock, refname, target, logmsg);
- return 0;
+ if (!fdopen_lock_file(&lock->lk, "w")) {
+ strbuf_addf(err, "unable to fdopen %s: %s",
+ get_lock_file_path(&lock->lk), strerror(errno));
+ return -1;
}
- if (!fdopen_lock_file(&lock->lk, "w"))
- return error("unable to fdopen %s: %s",
+ if (fprintf(get_lock_file_fp(&lock->lk), "ref: %s\n", target) < 0) {
+ strbuf_addf(err, "unable to write to %s: %s",
get_lock_file_path(&lock->lk), strerror(errno));
-
- update_symref_reflog(refs, lock, refname, target, logmsg);
-
- /* no error check; commit_ref will check ferror */
- fprintf(get_lock_file_fp(&lock->lk), "ref: %s\n", target);
- if (commit_ref(lock) < 0)
- return error("unable to write symref for %s: %s", refname,
- strerror(errno));
- return 0;
-}
-
-static int files_create_symref(struct ref_store *ref_store,
- const char *refname, const char *target,
- const char *logmsg)
-{
- struct files_ref_store *refs =
- files_downcast(ref_store, REF_STORE_WRITE, "create_symref");
- struct strbuf err = STRBUF_INIT;
- struct ref_lock *lock;
- int ret;
-
- lock = lock_ref_oid_basic(refs, refname, &err);
- if (!lock) {
- error("%s", err.buf);
- strbuf_release(&err);
return -1;
}
- ret = create_symref_locked(refs, lock, refname, target, logmsg);
- unlock_ref(lock);
- return ret;
+ return 0;
}
static int files_reflog_exists(struct ref_store *ref_store,
@@ -1936,7 +2084,8 @@ static int files_delete_reflog(struct ref_store *ref_store,
return ret;
}
-static int show_one_reflog_ent(struct strbuf *sb, each_reflog_ent_fn fn, void *cb_data)
+static int show_one_reflog_ent(struct files_ref_store *refs, struct strbuf *sb,
+ each_reflog_ent_fn fn, void *cb_data)
{
struct object_id ooid, noid;
char *email_end, *message;
@@ -1946,8 +2095,8 @@ static int show_one_reflog_ent(struct strbuf *sb, each_reflog_ent_fn fn, void *c
/* old SP new SP name <email> SP time TAB msg LF */
if (!sb->len || sb->buf[sb->len - 1] != '\n' ||
- parse_oid_hex(p, &ooid, &p) || *p++ != ' ' ||
- parse_oid_hex(p, &noid, &p) || *p++ != ' ' ||
+ parse_oid_hex_algop(p, &ooid, &p, refs->base.repo->hash_algo) || *p++ != ' ' ||
+ parse_oid_hex_algop(p, &noid, &p, refs->base.repo->hash_algo) || *p++ != ' ' ||
!(email_end = strchr(p, '>')) ||
email_end[1] != ' ' ||
!(timestamp = parse_timestamp(email_end + 2, &message, 10)) ||
@@ -2046,7 +2195,7 @@ static int files_for_each_reflog_ent_reverse(struct ref_store *ref_store,
strbuf_splice(&sb, 0, 0, bp + 1, endp - (bp + 1));
scanp = bp;
endp = bp + 1;
- ret = show_one_reflog_ent(&sb, fn, cb_data);
+ ret = show_one_reflog_ent(refs, &sb, fn, cb_data);
strbuf_reset(&sb);
if (ret)
break;
@@ -2058,7 +2207,7 @@ static int files_for_each_reflog_ent_reverse(struct ref_store *ref_store,
* Process it, and we can end the loop.
*/
strbuf_splice(&sb, 0, 0, buf, endp - buf);
- ret = show_one_reflog_ent(&sb, fn, cb_data);
+ ret = show_one_reflog_ent(refs, &sb, fn, cb_data);
strbuf_reset(&sb);
break;
}
@@ -2108,7 +2257,7 @@ static int files_for_each_reflog_ent(struct ref_store *ref_store,
return -1;
while (!ret && !strbuf_getwholeline(&sb, logfp, '\n'))
- ret = show_one_reflog_ent(&sb, fn, cb_data);
+ ret = show_one_reflog_ent(refs, &sb, fn, cb_data);
fclose(logfp);
strbuf_release(&sb);
return ret;
@@ -2116,10 +2265,8 @@ static int files_for_each_reflog_ent(struct ref_store *ref_store,
struct files_reflog_iterator {
struct ref_iterator base;
-
struct ref_store *ref_store;
struct dir_iterator *dir_iterator;
- struct object_id oid;
};
static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
@@ -2130,25 +2277,13 @@ static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
int ok;
while ((ok = dir_iterator_advance(diter)) == ITER_OK) {
- int flags;
-
if (!S_ISREG(diter->st.st_mode))
continue;
- if (diter->basename[0] == '.')
- continue;
- if (ends_with(diter->basename, ".lock"))
+ if (check_refname_format(diter->basename,
+ REFNAME_ALLOW_ONELEVEL))
continue;
- if (!refs_resolve_ref_unsafe(iter->ref_store,
- diter->relative_path, 0,
- &iter->oid, &flags)) {
- error("bad ref for %s", diter->path.buf);
- continue;
- }
-
iter->base.refname = diter->relative_path;
- iter->base.oid = &iter->oid;
- iter->base.flags = flags;
return ITER_OK;
}
@@ -2193,7 +2328,7 @@ static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
strbuf_addf(&sb, "%s/logs", gitdir);
- diter = dir_iterator_begin(sb.buf, 0);
+ diter = dir_iterator_begin(sb.buf, DIR_ITERATOR_SORTED);
if (!diter) {
strbuf_release(&sb);
return empty_ref_iterator_begin();
@@ -2202,7 +2337,7 @@ static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
CALLOC_ARRAY(iter, 1);
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable, 0);
+ base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
iter->dir_iterator = diter;
iter->ref_store = ref_store;
strbuf_release(&sb);
@@ -2210,32 +2345,6 @@ static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
return ref_iterator;
}
-static enum iterator_selection reflog_iterator_select(
- struct ref_iterator *iter_worktree,
- struct ref_iterator *iter_common,
- void *cb_data UNUSED)
-{
- if (iter_worktree) {
- /*
- * We're a bit loose here. We probably should ignore
- * common refs if they are accidentally added as
- * per-worktree refs.
- */
- return ITER_SELECT_0;
- } else if (iter_common) {
- if (parse_worktree_ref(iter_common->refname, NULL, NULL,
- NULL) == REF_WORKTREE_SHARED)
- return ITER_SELECT_1;
-
- /*
- * The main ref store may contain main worktree's
- * per-worktree refs, which should be ignored
- */
- return ITER_SKIP_1;
- } else
- return ITER_DONE;
-}
-
static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
{
struct files_ref_store *refs =
@@ -2246,9 +2355,9 @@ static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_st
return reflog_iterator_begin(ref_store, refs->gitcommondir);
} else {
return merge_ref_iterator_begin(
- 0, reflog_iterator_begin(ref_store, refs->base.gitdir),
+ reflog_iterator_begin(ref_store, refs->base.gitdir),
reflog_iterator_begin(ref_store, refs->gitcommondir),
- reflog_iterator_select, refs);
+ ref_iterator_select, refs);
}
}
@@ -2266,6 +2375,7 @@ static int split_head_update(struct ref_update *update,
struct ref_update *new_update;
if ((update->flags & REF_LOG_ONLY) ||
+ (update->flags & REF_SKIP_CREATE_REFLOG) ||
(update->flags & REF_IS_PRUNING) ||
(update->flags & REF_UPDATE_VIA_HEAD))
return 0;
@@ -2291,7 +2401,7 @@ static int split_head_update(struct ref_update *update,
transaction, "HEAD",
update->flags | REF_LOG_ONLY | REF_NO_DEREF,
&update->new_oid, &update->old_oid,
- update->msg);
+ NULL, NULL, update->msg);
/*
* Add "HEAD". This insertion is O(N) in the transaction
@@ -2353,8 +2463,9 @@ static int split_symref_update(struct ref_update *update,
new_update = ref_transaction_add_update(
transaction, referent, new_flags,
- &update->new_oid, &update->old_oid,
- update->msg);
+ update->new_target ? NULL : &update->new_oid,
+ update->old_target ? NULL : &update->old_oid,
+ update->new_target, update->old_target, update->msg);
new_update->parent_update = update;
@@ -2383,17 +2494,6 @@ static int split_symref_update(struct ref_update *update,
}
/*
- * Return the refname under which update was originally requested.
- */
-static const char *original_update_refname(struct ref_update *update)
-{
- while (update->parent_update)
- update = update->parent_update;
-
- return update->refname;
-}
-
-/*
* Check whether the REF_HAVE_OLD and old_oid values stored in update
* are consistent with oid, which is the reference's current value. If
* everything is OK, return 0; otherwise, write an error message to
@@ -2409,16 +2509,16 @@ static int check_old_oid(struct ref_update *update, struct object_id *oid,
if (is_null_oid(&update->old_oid))
strbuf_addf(err, "cannot lock ref '%s': "
"reference already exists",
- original_update_refname(update));
+ ref_update_original_update_refname(update));
else if (is_null_oid(oid))
strbuf_addf(err, "cannot lock ref '%s': "
"reference is missing but expected %s",
- original_update_refname(update),
+ ref_update_original_update_refname(update),
oid_to_hex(&update->old_oid));
else
strbuf_addf(err, "cannot lock ref '%s': "
"is at %s but expected %s",
- original_update_refname(update),
+ ref_update_original_update_refname(update),
oid_to_hex(oid),
oid_to_hex(&update->old_oid));
@@ -2446,14 +2546,13 @@ static int lock_ref_for_update(struct files_ref_store *refs,
struct strbuf *err)
{
struct strbuf referent = STRBUF_INIT;
- int mustexist = (update->flags & REF_HAVE_OLD) &&
- !is_null_oid(&update->old_oid);
+ int mustexist = ref_update_expects_existing_old_ref(update);
int ret = 0;
struct ref_lock *lock;
files_assert_main_repository(refs, "lock_ref_for_update");
- if ((update->flags & REF_HAVE_NEW) && is_null_oid(&update->new_oid))
+ if ((update->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(update))
update->flags |= REF_DELETING;
if (head_ref) {
@@ -2472,7 +2571,7 @@ static int lock_ref_for_update(struct files_ref_store *refs,
reason = strbuf_detach(err, NULL);
strbuf_addf(err, "cannot lock ref '%s': %s",
- original_update_refname(update), reason);
+ ref_update_original_update_refname(update), reason);
free(reason);
goto out;
}
@@ -2492,11 +2591,18 @@ static int lock_ref_for_update(struct files_ref_store *refs,
if (update->flags & REF_HAVE_OLD) {
strbuf_addf(err, "cannot lock ref '%s': "
"error reading reference",
- original_update_refname(update));
+ ref_update_original_update_refname(update));
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
+ }
+ }
+
+ if (update->old_target) {
+ if (ref_update_check_old_target(referent.buf, update, err)) {
ret = TRANSACTION_GENERIC_ERROR;
goto out;
}
- } else if (check_old_oid(update, &lock->old_oid, err)) {
+ } else if (check_old_oid(update, &lock->old_oid, err)) {
ret = TRANSACTION_GENERIC_ERROR;
goto out;
}
@@ -2517,7 +2623,19 @@ static int lock_ref_for_update(struct files_ref_store *refs,
} else {
struct ref_update *parent_update;
- if (check_old_oid(update, &lock->old_oid, err)) {
+ /*
+ * Even if the ref is a regular ref, if `old_target` is set, we
+ * fail with an error.
+ */
+ if (update->old_target) {
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "expected symref with target '%s': "
+ "but is a regular ref"),
+ ref_update_original_update_refname(update),
+ update->old_target);
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
+ } else if (check_old_oid(update, &lock->old_oid, err)) {
ret = TRANSACTION_GENERIC_ERROR;
goto out;
}
@@ -2535,9 +2653,27 @@ static int lock_ref_for_update(struct files_ref_store *refs,
}
}
- if ((update->flags & REF_HAVE_NEW) &&
- !(update->flags & REF_DELETING) &&
- !(update->flags & REF_LOG_ONLY)) {
+ if (update->new_target && !(update->flags & REF_LOG_ONLY)) {
+ if (create_symref_lock(lock, update->new_target, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
+ }
+
+ if (close_ref_gently(lock)) {
+ strbuf_addf(err, "couldn't close '%s.lock'",
+ update->refname);
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
+ }
+
+ /*
+ * Once we have created the symref lock, the commit
+ * phase of the transaction only needs to commit the lock.
+ */
+ update->flags |= REF_NEEDS_COMMIT;
+ } else if ((update->flags & REF_HAVE_NEW) &&
+ !(update->flags & REF_DELETING) &&
+ !(update->flags & REF_LOG_ONLY)) {
if (!(update->type & REF_ISSYMREF) &&
oideq(&lock->old_oid, &update->new_oid)) {
/*
@@ -2545,7 +2681,7 @@ static int lock_ref_for_update(struct files_ref_store *refs,
* value, so we don't need to write it.
*/
} else if (write_ref_to_lockfile(
- lock, &update->new_oid,
+ refs, lock, &update->new_oid,
update->flags & REF_SKIP_OID_VERIFICATION,
err)) {
char *write_err = strbuf_detach(err, NULL);
@@ -2745,7 +2881,7 @@ static int files_transaction_prepare(struct ref_store *ref_store,
packed_transaction, update->refname,
REF_HAVE_NEW | REF_NO_DEREF,
&update->new_oid, NULL,
- NULL);
+ NULL, NULL, NULL);
}
}
@@ -2800,6 +2936,43 @@ cleanup:
return ret;
}
+static int parse_and_write_reflog(struct files_ref_store *refs,
+ struct ref_update *update,
+ struct ref_lock *lock,
+ struct strbuf *err)
+{
+ if (update->new_target) {
+ /*
+ * We want to get the resolved OID for the target, to ensure
+ * that the correct value is added to the reflog.
+ */
+ if (!refs_resolve_ref_unsafe(&refs->base, update->new_target,
+ RESOLVE_REF_READING,
+ &update->new_oid, NULL)) {
+ /*
+ * TODO: currently we skip creating reflogs for dangling
+ * symref updates. It would be nice to capture this as
+ * zero oid updates however.
+ */
+ return 0;
+ }
+ }
+
+ if (files_log_ref_write(refs, lock->ref_name, &lock->old_oid,
+ &update->new_oid, update->msg, update->flags, err)) {
+ char *old_msg = strbuf_detach(err, NULL);
+
+ strbuf_addf(err, "cannot update the ref '%s': %s",
+ lock->ref_name, old_msg);
+ free(old_msg);
+ unlock_ref(lock);
+ update->backend_data = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
static int files_transaction_finish(struct ref_store *ref_store,
struct ref_transaction *transaction,
struct strbuf *err)
@@ -2830,23 +3003,20 @@ static int files_transaction_finish(struct ref_store *ref_store,
if (update->flags & REF_NEEDS_COMMIT ||
update->flags & REF_LOG_ONLY) {
- if (files_log_ref_write(refs,
- lock->ref_name,
- &lock->old_oid,
- &update->new_oid,
- update->msg, update->flags,
- err)) {
- char *old_msg = strbuf_detach(err, NULL);
-
- strbuf_addf(err, "cannot update the ref '%s': %s",
- lock->ref_name, old_msg);
- free(old_msg);
- unlock_ref(lock);
- update->backend_data = NULL;
+ if (parse_and_write_reflog(refs, update, lock, err)) {
ret = TRANSACTION_GENERIC_ERROR;
goto cleanup;
}
}
+
+ /*
+ * We try creating a symlink, if that succeeds we continue to the
+ * next update. If not, we try and create a regular symref.
+ */
+ if (update->new_target && refs->prefer_symlink_refs)
+ if (!create_ref_symlink(lock, update->new_target))
+ continue;
+
if (update->flags & REF_NEEDS_COMMIT) {
clear_loose_ref_cache(refs);
if (commit_ref(lock)) {
@@ -2951,7 +3121,7 @@ static int files_transaction_abort(struct ref_store *ref_store,
return 0;
}
-static int ref_present(const char *refname,
+static int ref_present(const char *refname, const char *referent UNUSED,
const struct object_id *oid UNUSED,
int flags UNUSED,
void *cb_data)
@@ -3030,7 +3200,7 @@ static int files_initial_transaction_commit(struct ref_store *ref_store,
ref_transaction_add_update(packed_transaction, update->refname,
update->flags & ~REF_HAVE_OLD,
&update->new_oid, &update->old_oid,
- NULL);
+ NULL, NULL, NULL);
}
if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
@@ -3194,7 +3364,7 @@ static int files_reflog_expire(struct ref_store *ref_store,
rollback_lock_file(&reflog_lock);
} else if (update &&
(write_in_full(get_lock_file_fd(&lock->lk),
- oid_to_hex(&cb.last_kept_oid), the_hash_algo->hexsz) < 0 ||
+ oid_to_hex(&cb.last_kept_oid), refs->base.repo->hash_algo->hexsz) < 0 ||
write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 ||
close_ref_gently(lock) < 0)) {
status |= error("couldn't write %s",
@@ -3218,12 +3388,12 @@ static int files_reflog_expire(struct ref_store *ref_store,
return -1;
}
-static int files_init_db(struct ref_store *ref_store,
- int flags,
- struct strbuf *err UNUSED)
+static int files_ref_store_create_on_disk(struct ref_store *ref_store,
+ int flags,
+ struct strbuf *err UNUSED)
{
struct files_ref_store *refs =
- files_downcast(ref_store, REF_STORE_WRITE, "init_db");
+ files_downcast(ref_store, REF_STORE_WRITE, "create");
struct strbuf sb = STRBUF_INIT;
/*
@@ -3246,7 +3416,7 @@ static int files_init_db(struct ref_store *ref_store,
* There is no need to create directories for common refs when creating
* a worktree ref store.
*/
- if (!(flags & REFS_INIT_DB_IS_WORKTREE)) {
+ if (!(flags & REF_STORE_CREATE_ON_DISK_IS_WORKTREE)) {
/*
* Create .git/refs/{heads,tags}
*/
@@ -3263,17 +3433,190 @@ static int files_init_db(struct ref_store *ref_store,
return 0;
}
+struct remove_one_root_ref_data {
+ const char *gitdir;
+ struct strbuf *err;
+};
+
+static int remove_one_root_ref(const char *refname,
+ void *cb_data)
+{
+ struct remove_one_root_ref_data *data = cb_data;
+ struct strbuf buf = STRBUF_INIT;
+ int ret = 0;
+
+ strbuf_addf(&buf, "%s/%s", data->gitdir, refname);
+
+ ret = unlink(buf.buf);
+ if (ret < 0)
+ strbuf_addf(data->err, "could not delete %s: %s\n",
+ refname, strerror(errno));
+
+ strbuf_release(&buf);
+ return ret;
+}
+
+static int files_ref_store_remove_on_disk(struct ref_store *ref_store,
+ struct strbuf *err)
+{
+ struct files_ref_store *refs =
+ files_downcast(ref_store, REF_STORE_WRITE, "remove");
+ struct remove_one_root_ref_data data = {
+ .gitdir = refs->base.gitdir,
+ .err = err,
+ };
+ struct strbuf sb = STRBUF_INIT;
+ int ret = 0;
+
+ strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
+ if (remove_dir_recursively(&sb, 0) < 0) {
+ strbuf_addf(err, "could not delete refs: %s",
+ strerror(errno));
+ ret = -1;
+ }
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/logs", refs->base.gitdir);
+ if (remove_dir_recursively(&sb, 0) < 0) {
+ strbuf_addf(err, "could not delete logs: %s",
+ strerror(errno));
+ ret = -1;
+ }
+ strbuf_reset(&sb);
+
+ if (for_each_root_ref(refs, remove_one_root_ref, &data) < 0)
+ ret = -1;
+
+ if (ref_store_remove_on_disk(refs->packed_ref_store, err) < 0)
+ ret = -1;
+
+ strbuf_release(&sb);
+ return ret;
+}
+
+/*
+ * For refs and reflogs, they share a unified interface when scanning
+ * the whole directory. This function is used as the callback for each
+ * regular file or symlink in the directory.
+ */
+typedef int (*files_fsck_refs_fn)(struct ref_store *ref_store,
+ struct fsck_options *o,
+ const char *refs_check_dir,
+ struct dir_iterator *iter);
+
+static int files_fsck_refs_name(struct ref_store *ref_store UNUSED,
+ struct fsck_options *o,
+ const char *refs_check_dir,
+ struct dir_iterator *iter)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int ret = 0;
+
+ /*
+ * Ignore the files ending with ".lock" as they may be lock files
+ * However, do not allow bare ".lock" files.
+ */
+ if (iter->basename[0] != '.' && ends_with(iter->basename, ".lock"))
+ goto cleanup;
+
+ if (check_refname_format(iter->basename, REFNAME_ALLOW_ONELEVEL)) {
+ struct fsck_ref_report report = { .path = NULL };
+
+ strbuf_addf(&sb, "%s/%s", refs_check_dir, iter->relative_path);
+ report.path = sb.buf;
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_REF_NAME,
+ "invalid refname format");
+ }
+
+cleanup:
+ strbuf_release(&sb);
+ return ret;
+}
+
+static int files_fsck_refs_dir(struct ref_store *ref_store,
+ struct fsck_options *o,
+ const char *refs_check_dir,
+ files_fsck_refs_fn *fsck_refs_fn)
+{
+ struct strbuf sb = STRBUF_INIT;
+ struct dir_iterator *iter;
+ int iter_status;
+ int ret = 0;
+
+ strbuf_addf(&sb, "%s/%s", ref_store->gitdir, refs_check_dir);
+
+ iter = dir_iterator_begin(sb.buf, 0);
+ if (!iter) {
+ ret = error_errno(_("cannot open directory %s"), sb.buf);
+ goto out;
+ }
+
+ while ((iter_status = dir_iterator_advance(iter)) == ITER_OK) {
+ if (S_ISDIR(iter->st.st_mode)) {
+ continue;
+ } else if (S_ISREG(iter->st.st_mode) ||
+ S_ISLNK(iter->st.st_mode)) {
+ if (o->verbose)
+ fprintf_ln(stderr, "Checking %s/%s",
+ refs_check_dir, iter->relative_path);
+ for (size_t i = 0; fsck_refs_fn[i]; i++) {
+ if (fsck_refs_fn[i](ref_store, o, refs_check_dir, iter))
+ ret = -1;
+ }
+ } else {
+ struct fsck_ref_report report = { .path = iter->basename };
+ if (fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_REF_FILETYPE,
+ "unexpected file type"))
+ ret = -1;
+ }
+ }
+
+ if (iter_status != ITER_DONE)
+ ret = error(_("failed to iterate over '%s'"), sb.buf);
+
+out:
+ strbuf_release(&sb);
+ return ret;
+}
+
+static int files_fsck_refs(struct ref_store *ref_store,
+ struct fsck_options *o)
+{
+ files_fsck_refs_fn fsck_refs_fn[]= {
+ files_fsck_refs_name,
+ NULL,
+ };
+
+ if (o->verbose)
+ fprintf_ln(stderr, _("Checking references consistency"));
+ return files_fsck_refs_dir(ref_store, o, "refs", fsck_refs_fn);
+}
+
+static int files_fsck(struct ref_store *ref_store,
+ struct fsck_options *o)
+{
+ struct files_ref_store *refs =
+ files_downcast(ref_store, REF_STORE_READ, "fsck");
+
+ return files_fsck_refs(ref_store, o) |
+ refs->packed_ref_store->be->fsck(refs->packed_ref_store, o);
+}
+
struct ref_storage_be refs_be_files = {
.name = "files",
- .init = files_ref_store_create,
- .init_db = files_init_db,
+ .init = files_ref_store_init,
+ .release = files_ref_store_release,
+ .create_on_disk = files_ref_store_create_on_disk,
+ .remove_on_disk = files_ref_store_remove_on_disk,
+
.transaction_prepare = files_transaction_prepare,
.transaction_finish = files_transaction_finish,
.transaction_abort = files_transaction_abort,
.initial_transaction_commit = files_initial_transaction_commit,
.pack_refs = files_pack_refs,
- .create_symref = files_create_symref,
.rename_ref = files_rename_ref,
.copy_ref = files_copy_ref,
@@ -3287,5 +3630,7 @@ struct ref_storage_be refs_be_files = {
.reflog_exists = files_reflog_exists,
.create_reflog = files_create_reflog,
.delete_reflog = files_delete_reflog,
- .reflog_expire = files_reflog_expire
+ .reflog_expire = files_reflog_expire,
+
+ .fsck = files_fsck,
};
diff --git a/refs/iterator.c b/refs/iterator.c
index 6b680f610e..8e999d81fc 100644
--- a/refs/iterator.c
+++ b/refs/iterator.c
@@ -25,12 +25,11 @@ int ref_iterator_abort(struct ref_iterator *ref_iterator)
}
void base_ref_iterator_init(struct ref_iterator *iter,
- struct ref_iterator_vtable *vtable,
- int ordered)
+ struct ref_iterator_vtable *vtable)
{
iter->vtable = vtable;
- iter->ordered = !!ordered;
iter->refname = NULL;
+ iter->referent = NULL;
iter->oid = NULL;
iter->flags = 0;
}
@@ -74,7 +73,7 @@ struct ref_iterator *empty_ref_iterator_begin(void)
struct empty_ref_iterator *iter = xcalloc(1, sizeof(*iter));
struct ref_iterator *ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable, 1);
+ base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable);
return ref_iterator;
}
@@ -98,6 +97,49 @@ struct merge_ref_iterator {
struct ref_iterator **current;
};
+enum iterator_selection ref_iterator_select(struct ref_iterator *iter_worktree,
+ struct ref_iterator *iter_common,
+ void *cb_data UNUSED)
+{
+ if (iter_worktree && !iter_common) {
+ /*
+ * Return the worktree ref if there are no more common refs.
+ */
+ return ITER_SELECT_0;
+ } else if (iter_common) {
+ /*
+ * In case we have pending worktree and common refs we need to
+ * yield them based on their lexicographical order. Worktree
+ * refs that have the same name as common refs shadow the
+ * latter.
+ */
+ if (iter_worktree) {
+ int cmp = strcmp(iter_worktree->refname,
+ iter_common->refname);
+ if (cmp < 0)
+ return ITER_SELECT_0;
+ else if (!cmp)
+ return ITER_SELECT_0_SKIP_1;
+ }
+
+ /*
+ * We now know that the lexicographically-next ref is a common
+ * ref. When the common ref is a shared one we return it.
+ */
+ if (parse_worktree_ref(iter_common->refname, NULL, NULL,
+ NULL) == REF_WORKTREE_SHARED)
+ return ITER_SELECT_1;
+
+ /*
+ * Otherwise, if the common ref is a per-worktree ref we skip
+ * it because it would belong to the main worktree, not ours.
+ */
+ return ITER_SKIP_1;
+ } else {
+ return ITER_DONE;
+ }
+}
+
static int merge_ref_iterator_advance(struct ref_iterator *ref_iterator)
{
struct merge_ref_iterator *iter =
@@ -158,6 +200,7 @@ static int merge_ref_iterator_advance(struct ref_iterator *ref_iterator)
}
if (selection & ITER_YIELD_CURRENT) {
+ iter->base.referent = (*iter->current)->referent;
iter->base.refname = (*iter->current)->refname;
iter->base.oid = (*iter->current)->oid;
iter->base.flags = (*iter->current)->flags;
@@ -207,7 +250,6 @@ static struct ref_iterator_vtable merge_ref_iterator_vtable = {
};
struct ref_iterator *merge_ref_iterator_begin(
- int ordered,
struct ref_iterator *iter0, struct ref_iterator *iter1,
ref_iterator_select_fn *select, void *cb_data)
{
@@ -222,7 +264,7 @@ struct ref_iterator *merge_ref_iterator_begin(
* references through only if they exist in both iterators.
*/
- base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable, ordered);
+ base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable);
iter->iter0 = iter0;
iter->iter1 = iter1;
iter->select = select;
@@ -271,12 +313,9 @@ struct ref_iterator *overlay_ref_iterator_begin(
} else if (is_empty_ref_iterator(back)) {
ref_iterator_abort(back);
return front;
- } else if (!front->ordered || !back->ordered) {
- BUG("overlay_ref_iterator requires ordered inputs");
}
- return merge_ref_iterator_begin(1, front, back,
- overlay_iterator_select, NULL);
+ return merge_ref_iterator_begin(front, back, overlay_iterator_select, NULL);
}
struct prefix_ref_iterator {
@@ -315,16 +354,12 @@ static int prefix_ref_iterator_advance(struct ref_iterator *ref_iterator)
if (cmp > 0) {
/*
- * If the source iterator is ordered, then we
+ * As the source iterator is ordered, we
* can stop the iteration as soon as we see a
* refname that comes after the prefix:
*/
- if (iter->iter0->ordered) {
- ok = ref_iterator_abort(iter->iter0);
- break;
- } else {
- continue;
- }
+ ok = ref_iterator_abort(iter->iter0);
+ break;
}
if (iter->trim) {
@@ -396,7 +431,7 @@ struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
CALLOC_ARRAY(iter, 1);
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable, iter0->ordered);
+ base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable);
iter->iter0 = iter0;
iter->prefix = xstrdup(prefix);
@@ -407,15 +442,15 @@ struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
struct ref_iterator *current_ref_iter = NULL;
-int do_for_each_repo_ref_iterator(struct repository *r, struct ref_iterator *iter,
- each_repo_ref_fn fn, void *cb_data)
+int do_for_each_ref_iterator(struct ref_iterator *iter,
+ each_ref_fn fn, void *cb_data)
{
int retval = 0, ok;
struct ref_iterator *old_ref_iter = current_ref_iter;
current_ref_iter = iter;
while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
- retval = fn(r, iter->refname, iter->oid, iter->flags, cb_data);
+ retval = fn(iter->refname, iter->referent, iter->oid, iter->flags, cb_data);
if (retval) {
/*
* If ref_iterator_abort() returns ITER_ERROR,
diff --git a/refs/packed-backend.c b/refs/packed-backend.c
index a499a91c7e..07c57fd541 100644
--- a/refs/packed-backend.c
+++ b/refs/packed-backend.c
@@ -1,5 +1,8 @@
+#define USE_THE_REPOSITORY_VARIABLE
+
#include "../git-compat-util.h"
#include "../config.h"
+#include "../dir.h"
#include "../gettext.h"
#include "../hash.h"
#include "../hex.h"
@@ -200,9 +203,14 @@ static int release_snapshot(struct snapshot *snapshot)
}
}
-struct ref_store *packed_ref_store_create(struct repository *repo,
- const char *gitdir,
- unsigned int store_flags)
+static size_t snapshot_hexsz(const struct snapshot *snapshot)
+{
+ return snapshot->refs->base.repo->hash_algo->hexsz;
+}
+
+struct ref_store *packed_ref_store_init(struct repository *repo,
+ const char *gitdir,
+ unsigned int store_flags)
{
struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
struct ref_store *ref_store = (struct ref_store *)refs;
@@ -252,6 +260,15 @@ static void clear_snapshot(struct packed_ref_store *refs)
}
}
+static void packed_ref_store_release(struct ref_store *ref_store)
+{
+ struct packed_ref_store *refs = packed_downcast(ref_store, 0, "release");
+ clear_snapshot(refs);
+ rollback_lock_file(&refs->lock);
+ delete_tempfile(&refs->tempfile);
+ free(refs->path);
+}
+
static NORETURN void die_unterminated_line(const char *path,
const char *p, size_t len)
{
@@ -280,11 +297,13 @@ struct snapshot_record {
size_t len;
};
-static int cmp_packed_ref_records(const void *v1, const void *v2)
+static int cmp_packed_ref_records(const void *v1, const void *v2,
+ void *cb_data)
{
+ const struct snapshot *snapshot = cb_data;
const struct snapshot_record *e1 = v1, *e2 = v2;
- const char *r1 = e1->start + the_hash_algo->hexsz + 1;
- const char *r2 = e2->start + the_hash_algo->hexsz + 1;
+ const char *r1 = e1->start + snapshot_hexsz(snapshot) + 1;
+ const char *r2 = e2->start + snapshot_hexsz(snapshot) + 1;
while (1) {
if (*r1 == '\n')
@@ -305,9 +324,9 @@ static int cmp_packed_ref_records(const void *v1, const void *v2)
* refname.
*/
static int cmp_record_to_refname(const char *rec, const char *refname,
- int start)
+ int start, const struct snapshot *snapshot)
{
- const char *r1 = rec + the_hash_algo->hexsz + 1;
+ const char *r1 = rec + snapshot_hexsz(snapshot) + 1;
const char *r2 = refname;
while (1) {
@@ -354,7 +373,7 @@ static void sort_snapshot(struct snapshot *snapshot)
if (!eol)
/* The safety check should prevent this. */
BUG("unterminated line found in packed-refs");
- if (eol - pos < the_hash_algo->hexsz + 2)
+ if (eol - pos < snapshot_hexsz(snapshot) + 2)
die_invalid_line(snapshot->refs->path,
pos, eof - pos);
eol++;
@@ -380,7 +399,7 @@ static void sort_snapshot(struct snapshot *snapshot)
if (sorted &&
nr > 1 &&
cmp_packed_ref_records(&records[nr - 2],
- &records[nr - 1]) >= 0)
+ &records[nr - 1], snapshot) >= 0)
sorted = 0;
pos = eol;
@@ -390,7 +409,7 @@ static void sort_snapshot(struct snapshot *snapshot)
goto cleanup;
/* We need to sort the memory. First we sort the records array: */
- QSORT(records, nr, cmp_packed_ref_records);
+ QSORT_S(records, nr, cmp_packed_ref_records, snapshot);
/*
* Allocate a new chunk of memory, and copy the old memory to
@@ -466,7 +485,8 @@ static void verify_buffer_safe(struct snapshot *snapshot)
return;
last_line = find_start_of_record(start, eof - 1);
- if (*(eof - 1) != '\n' || eof - last_line < the_hash_algo->hexsz + 2)
+ if (*(eof - 1) != '\n' ||
+ eof - last_line < snapshot_hexsz(snapshot) + 2)
die_invalid_line(snapshot->refs->path,
last_line, eof - last_line);
}
@@ -561,7 +581,7 @@ static const char *find_reference_location_1(struct snapshot *snapshot,
mid = lo + (hi - lo) / 2;
rec = find_start_of_record(lo, mid);
- cmp = cmp_record_to_refname(rec, refname, start);
+ cmp = cmp_record_to_refname(rec, refname, start, snapshot);
if (cmp < 0) {
lo = find_end_of_record(mid, hi);
} else if (cmp > 0) {
@@ -774,7 +794,7 @@ static int packed_read_raw_ref(struct ref_store *ref_store, const char *refname,
return -1;
}
- if (get_oid_hex(rec, oid))
+ if (get_oid_hex_algop(rec, oid, ref_store->repo->hash_algo))
die_invalid_line(refs->path, rec, snapshot->eof - rec);
*type = REF_ISPACKED;
@@ -858,8 +878,8 @@ static int next_record(struct packed_ref_iterator *iter)
iter->base.flags = REF_ISPACKED;
p = iter->pos;
- if (iter->eof - p < the_hash_algo->hexsz + 2 ||
- parse_oid_hex(p, &iter->oid, &p) ||
+ if (iter->eof - p < snapshot_hexsz(iter->snapshot) + 2 ||
+ parse_oid_hex_algop(p, &iter->oid, &p, iter->repo->hash_algo) ||
!isspace(*p++))
die_invalid_line(iter->snapshot->refs->path,
iter->pos, iter->eof - iter->pos);
@@ -876,7 +896,7 @@ static int next_record(struct packed_ref_iterator *iter)
if (!refname_is_safe(iter->base.refname))
die("packed refname is dangerous: %s",
iter->base.refname);
- oidclr(&iter->oid);
+ oidclr(&iter->oid, iter->repo->hash_algo);
iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
}
if (iter->snapshot->peeled == PEELED_FULLY ||
@@ -888,8 +908,8 @@ static int next_record(struct packed_ref_iterator *iter)
if (iter->pos < iter->eof && *iter->pos == '^') {
p = iter->pos + 1;
- if (iter->eof - p < the_hash_algo->hexsz + 1 ||
- parse_oid_hex(p, &iter->peeled, &p) ||
+ if (iter->eof - p < snapshot_hexsz(iter->snapshot) + 1 ||
+ parse_oid_hex_algop(p, &iter->peeled, &p, iter->repo->hash_algo) ||
*p++ != '\n')
die_invalid_line(iter->snapshot->refs->path,
iter->pos, iter->eof - iter->pos);
@@ -901,13 +921,13 @@ static int next_record(struct packed_ref_iterator *iter)
* we suppress it if the reference is broken:
*/
if ((iter->base.flags & REF_ISBROKEN)) {
- oidclr(&iter->peeled);
+ oidclr(&iter->peeled, iter->repo->hash_algo);
iter->base.flags &= ~REF_KNOWS_PEELED;
} else {
iter->base.flags |= REF_KNOWS_PEELED;
}
} else {
- oidclr(&iter->peeled);
+ oidclr(&iter->peeled, iter->repo->hash_algo);
}
return ITER_OK;
@@ -944,16 +964,13 @@ static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
struct packed_ref_iterator *iter =
(struct packed_ref_iterator *)ref_iterator;
- if (iter->repo != the_repository)
- BUG("peeling for non-the_repository is not supported");
-
if ((iter->base.flags & REF_KNOWS_PEELED)) {
oidcpy(peeled, &iter->peeled);
return is_null_oid(&iter->peeled) ? -1 : 0;
} else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
return -1;
} else {
- return peel_object(&iter->oid, peeled) ? -1 : 0;
+ return peel_object(iter->repo, &iter->oid, peeled) ? -1 : 0;
}
}
@@ -1111,7 +1128,7 @@ static struct ref_iterator *packed_ref_iterator_begin(
CALLOC_ARRAY(iter, 1);
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
+ base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable);
if (exclude_patterns)
populate_excluded_jump_list(iter, snapshot, exclude_patterns);
@@ -1233,6 +1250,24 @@ int packed_refs_is_locked(struct ref_store *ref_store)
return is_lock_file_locked(&refs->lock);
}
+int packed_refs_size(struct ref_store *ref_store,
+ size_t *out)
+{
+ struct packed_ref_store *refs = packed_downcast(ref_store, REF_STORE_READ,
+ "packed_refs_size");
+ struct stat st;
+
+ if (stat(refs->path, &st) < 0) {
+ if (errno != ENOENT)
+ return -1;
+ *out = 0;
+ return 0;
+ }
+
+ *out = st.st_size;
+ return 0;
+}
+
/*
* The packed-refs header line that we write out. Perhaps other traits
* will be added later.
@@ -1244,14 +1279,27 @@ int packed_refs_is_locked(struct ref_store *ref_store)
static const char PACKED_REFS_HEADER[] =
"# pack-refs with: peeled fully-peeled sorted \n";
-static int packed_init_db(struct ref_store *ref_store UNUSED,
- int flags UNUSED,
- struct strbuf *err UNUSED)
+static int packed_ref_store_create_on_disk(struct ref_store *ref_store UNUSED,
+ int flags UNUSED,
+ struct strbuf *err UNUSED)
{
/* Nothing to do. */
return 0;
}
+static int packed_ref_store_remove_on_disk(struct ref_store *ref_store,
+ struct strbuf *err)
+{
+ struct packed_ref_store *refs = packed_downcast(ref_store, 0, "remove");
+
+ if (remove_path(refs->path) < 0) {
+ strbuf_addstr(err, "could not delete packed-refs");
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* Write the packed refs from the current snapshot to the packed-refs
* tempfile, incorporating any changes from `updates`. `updates` must
@@ -1412,7 +1460,8 @@ static int write_with_updates(struct packed_ref_store *refs,
i++;
} else {
struct object_id peeled;
- int peel_error = peel_object(&update->new_oid,
+ int peel_error = peel_object(refs->base.repo,
+ &update->new_oid,
&peeled);
if (write_packed_entry(out, update->refname,
@@ -1704,17 +1753,25 @@ static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_s
return empty_ref_iterator_begin();
}
+static int packed_fsck(struct ref_store *ref_store UNUSED,
+ struct fsck_options *o UNUSED)
+{
+ return 0;
+}
+
struct ref_storage_be refs_be_packed = {
.name = "packed",
- .init = packed_ref_store_create,
- .init_db = packed_init_db,
+ .init = packed_ref_store_init,
+ .release = packed_ref_store_release,
+ .create_on_disk = packed_ref_store_create_on_disk,
+ .remove_on_disk = packed_ref_store_remove_on_disk,
+
.transaction_prepare = packed_transaction_prepare,
.transaction_finish = packed_transaction_finish,
.transaction_abort = packed_transaction_abort,
.initial_transaction_commit = packed_initial_transaction_commit,
.pack_refs = packed_pack_refs,
- .create_symref = NULL,
.rename_ref = NULL,
.copy_ref = NULL,
@@ -1729,4 +1786,6 @@ struct ref_storage_be refs_be_packed = {
.create_reflog = NULL,
.delete_reflog = NULL,
.reflog_expire = NULL,
+
+ .fsck = packed_fsck,
};
diff --git a/refs/packed-backend.h b/refs/packed-backend.h
index 9dd8a344c3..9481d5e7c2 100644
--- a/refs/packed-backend.h
+++ b/refs/packed-backend.h
@@ -13,9 +13,9 @@ struct ref_transaction;
* even among packed refs.
*/
-struct ref_store *packed_ref_store_create(struct repository *repo,
- const char *gitdir,
- unsigned int store_flags);
+struct ref_store *packed_ref_store_init(struct repository *repo,
+ const char *gitdir,
+ unsigned int store_flags);
/*
* Lock the packed-refs file for writing. Flags is passed to
@@ -28,6 +28,13 @@ void packed_refs_unlock(struct ref_store *ref_store);
int packed_refs_is_locked(struct ref_store *ref_store);
/*
+ * Obtain the size of the `packed-refs` file. Reports `0` as size in case there
+ * is no packed-refs file. Returns 0 on success, negative otherwise.
+ */
+int packed_refs_size(struct ref_store *ref_store,
+ size_t *out);
+
+/*
* Return true if `transaction` really needs to be carried out against
* the specified packed_ref_store, or false if it can be skipped
* (i.e., because it is an obvious NOOP). `ref_store` must be locked
diff --git a/refs/ref-cache.c b/refs/ref-cache.c
index a372a00941..35bae7e05d 100644
--- a/refs/ref-cache.c
+++ b/refs/ref-cache.c
@@ -34,6 +34,7 @@ struct ref_dir *get_ref_dir(struct ref_entry *entry)
}
struct ref_entry *create_ref_entry(const char *refname,
+ const char *referent,
const struct object_id *oid, int flag)
{
struct ref_entry *ref;
@@ -41,6 +42,8 @@ struct ref_entry *create_ref_entry(const char *refname,
FLEX_ALLOC_STR(ref, name, refname);
oidcpy(&ref->u.value.oid, oid);
ref->flag = flag;
+ ref->u.value.referent = xstrdup_or_null(referent);
+
return ref;
}
@@ -66,11 +69,14 @@ static void free_ref_entry(struct ref_entry *entry)
*/
clear_ref_dir(&entry->u.subdir);
}
+ free(entry->u.value.referent);
free(entry);
}
void free_ref_cache(struct ref_cache *cache)
{
+ if (!cache)
+ return;
free_ref_entry(cache->root);
free(cache);
}
@@ -429,6 +435,7 @@ static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
level->index = -1;
} else {
iter->base.refname = entry->name;
+ iter->base.referent = entry->u.value.referent;
iter->base.oid = &entry->u.value.oid;
iter->base.flags = entry->flag;
return ITER_OK;
@@ -441,10 +448,7 @@ static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
{
struct cache_ref_iterator *iter =
(struct cache_ref_iterator *)ref_iterator;
-
- if (iter->repo != the_repository)
- BUG("peeling for non-the_repository is not supported");
- return peel_object(ref_iterator->oid, peeled) ? -1 : 0;
+ return peel_object(iter->repo, ref_iterator->oid, peeled) ? -1 : 0;
}
static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator)
@@ -486,7 +490,7 @@ struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
CALLOC_ARRAY(iter, 1);
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable, 1);
+ base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable);
ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
iter->levels_nr = 1;
diff --git a/refs/ref-cache.h b/refs/ref-cache.h
index 95c76e27c8..5f04e518c3 100644
--- a/refs/ref-cache.h
+++ b/refs/ref-cache.h
@@ -1,7 +1,7 @@
#ifndef REFS_REF_CACHE_H
#define REFS_REF_CACHE_H
-#include "hash-ll.h"
+#include "hash.h"
struct ref_dir;
struct ref_store;
@@ -42,6 +42,7 @@ struct ref_value {
* referred to by the last reference in the symlink chain.
*/
struct object_id oid;
+ char *referent;
};
/*
@@ -173,6 +174,7 @@ struct ref_entry *create_dir_entry(struct ref_cache *cache,
const char *dirname, size_t len);
struct ref_entry *create_ref_entry(const char *refname,
+ const char *referent,
const struct object_id *oid, int flag);
/*
diff --git a/refs/refs-internal.h b/refs/refs-internal.h
index 82219829b0..2313c830d8 100644
--- a/refs/refs-internal.h
+++ b/refs/refs-internal.h
@@ -4,6 +4,7 @@
#include "refs.h"
#include "iterator.h"
+struct fsck_options;
struct ref_transaction;
/*
@@ -69,40 +70,6 @@ int ref_resolves_to_object(const char *refname,
const struct object_id *oid,
unsigned int flags);
-enum peel_status {
- /* object was peeled successfully: */
- PEEL_PEELED = 0,
-
- /*
- * object cannot be peeled because the named object (or an
- * object referred to by a tag in the peel chain), does not
- * exist.
- */
- PEEL_INVALID = -1,
-
- /* object cannot be peeled because it is not a tag: */
- PEEL_NON_TAG = -2,
-
- /* ref_entry contains no peeled value because it is a symref: */
- PEEL_IS_SYMREF = -3,
-
- /*
- * ref_entry cannot be peeled because it is broken (i.e., the
- * symbolic reference cannot even be resolved to an object
- * name):
- */
- PEEL_BROKEN = -4
-};
-
-/*
- * Peel the named object; i.e., if the object is a tag, resolve the
- * tag recursively until a non-tag is found. If successful, store the
- * result to oid and return PEEL_PEELED. If the object is not a tag
- * or is not valid, return PEEL_NON_TAG or PEEL_INVALID, respectively,
- * and leave oid unchanged.
- */
-enum peel_status peel_object(const struct object_id *name, struct object_id *oid);
-
/**
* Information needed for a single ref update. Set new_oid to the new
* value or to null_oid to delete the ref. To check the old value
@@ -125,6 +92,19 @@ struct ref_update {
struct object_id old_oid;
/*
+ * If set, point the reference to this value. This can also be
+ * used to convert regular references to become symbolic refs.
+ * Cannot be set together with `new_oid`.
+ */
+ const char *new_target;
+
+ /*
+ * If set, check that the reference previously pointed to this
+ * value. Cannot be set together with `old_oid`.
+ */
+ const char *old_target;
+
+ /*
* One or more of REF_NO_DEREF, REF_FORCE_CREATE_REFLOG,
* REF_HAVE_NEW, REF_HAVE_OLD, or backend-specific flags.
*/
@@ -173,6 +153,7 @@ struct ref_update *ref_transaction_add_update(
const char *refname, unsigned int flags,
const struct object_id *new_oid,
const struct object_id *old_oid,
+ const char *new_target, const char *old_target,
const char *msg);
/*
@@ -260,6 +241,12 @@ enum do_for_each_ref_flags {
* INCLUDE_BROKEN, since they are otherwise not included at all.
*/
DO_FOR_EACH_OMIT_DANGLING_SYMREFS = (1 << 2),
+
+ /*
+ * Include root refs i.e. HEAD and pseudorefs along with the regular
+ * refs.
+ */
+ DO_FOR_EACH_INCLUDE_ROOT_REFS = (1 << 3),
};
/*
@@ -312,14 +299,8 @@ enum do_for_each_ref_flags {
*/
struct ref_iterator {
struct ref_iterator_vtable *vtable;
-
- /*
- * Does this `ref_iterator` iterate over references in order
- * by refname?
- */
- unsigned int ordered : 1;
-
const char *refname;
+ const char *referent;
const struct object_id *oid;
unsigned int flags;
};
@@ -387,14 +368,21 @@ typedef enum iterator_selection ref_iterator_select_fn(
void *cb_data);
/*
+ * An implementation of ref_iterator_select_fn that merges worktree and common
+ * refs. Per-worktree refs from the common iterator are ignored, worktree refs
+ * override common refs. Refs are selected lexicographically.
+ */
+enum iterator_selection ref_iterator_select(struct ref_iterator *iter_worktree,
+ struct ref_iterator *iter_common,
+ void *cb_data);
+
+/*
* Iterate over the entries from iter0 and iter1, with the values
* interleaved as directed by the select function. The iterator takes
* ownership of iter0 and iter1 and frees them when the iteration is
- * over. A derived class should set `ordered` to 1 or 0 based on
- * whether it generates its output in order by reference name.
+ * over.
*/
struct ref_iterator *merge_ref_iterator_begin(
- int ordered,
struct ref_iterator *iter0, struct ref_iterator *iter1,
ref_iterator_select_fn *select, void *cb_data);
@@ -423,8 +411,6 @@ struct ref_iterator *overlay_ref_iterator_begin(
* As an convenience to callers, if prefix is the empty string and
* trim is zero, this function returns iter0 directly, without
* wrapping it.
- *
- * The resulting ref_iterator is ordered if iter0 is.
*/
struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
const char *prefix,
@@ -435,14 +421,11 @@ struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
/*
* Base class constructor for ref_iterators. Initialize the
* ref_iterator part of iter, setting its vtable pointer as specified.
- * `ordered` should be set to 1 if the iterator will iterate over
- * references in order by refname; otherwise it should be set to 0.
* This is meant to be called only by the initializers of derived
* classes.
*/
void base_ref_iterator_init(struct ref_iterator *iter,
- struct ref_iterator_vtable *vtable,
- int ordered);
+ struct ref_iterator_vtable *vtable);
/*
* Base class destructor for ref_iterators. Destroy the ref_iterator
@@ -502,9 +485,8 @@ extern struct ref_iterator *current_ref_iter;
* adapter between the callback style of reference iteration and the
* iterator style.
*/
-int do_for_each_repo_ref_iterator(struct repository *r,
- struct ref_iterator *iter,
- each_repo_ref_fn fn, void *cb_data);
+int do_for_each_ref_iterator(struct ref_iterator *iter,
+ each_ref_fn fn, void *cb_data);
struct ref_store;
@@ -528,10 +510,20 @@ struct ref_store;
typedef struct ref_store *ref_store_init_fn(struct repository *repo,
const char *gitdir,
unsigned int flags);
+/*
+ * Release all memory and resources associated with the ref store.
+ */
+typedef void ref_store_release_fn(struct ref_store *refs);
+
+typedef int ref_store_create_on_disk_fn(struct ref_store *refs,
+ int flags,
+ struct strbuf *err);
-typedef int ref_init_db_fn(struct ref_store *refs,
- int flags,
- struct strbuf *err);
+/*
+ * Remove the reference store from disk.
+ */
+typedef int ref_store_remove_on_disk_fn(struct ref_store *refs,
+ struct strbuf *err);
typedef int ref_transaction_prepare_fn(struct ref_store *refs,
struct ref_transaction *transaction,
@@ -551,10 +543,6 @@ typedef int ref_transaction_commit_fn(struct ref_store *refs,
typedef int pack_refs_fn(struct ref_store *ref_store,
struct pack_refs_opts *opts);
-typedef int create_symref_fn(struct ref_store *ref_store,
- const char *ref_target,
- const char *refs_heads_master,
- const char *logmsg);
typedef int rename_ref_fn(struct ref_store *ref_store,
const char *oldref, const char *newref,
const char *logmsg);
@@ -664,10 +652,15 @@ typedef int read_raw_ref_fn(struct ref_store *ref_store, const char *refname,
typedef int read_symbolic_ref_fn(struct ref_store *ref_store, const char *refname,
struct strbuf *referent);
+typedef int fsck_fn(struct ref_store *ref_store,
+ struct fsck_options *o);
+
struct ref_storage_be {
const char *name;
ref_store_init_fn *init;
- ref_init_db_fn *init_db;
+ ref_store_release_fn *release;
+ ref_store_create_on_disk_fn *create_on_disk;
+ ref_store_remove_on_disk_fn *remove_on_disk;
ref_transaction_prepare_fn *transaction_prepare;
ref_transaction_finish_fn *transaction_finish;
@@ -675,7 +668,6 @@ struct ref_storage_be {
ref_transaction_commit_fn *initial_transaction_commit;
pack_refs_fn *pack_refs;
- create_symref_fn *create_symref;
rename_ref_fn *rename_ref;
copy_ref_fn *copy_ref;
@@ -690,15 +682,18 @@ struct ref_storage_be {
create_reflog_fn *create_reflog;
delete_reflog_fn *delete_reflog;
reflog_expire_fn *reflog_expire;
+
+ fsck_fn *fsck;
};
extern struct ref_storage_be refs_be_files;
+extern struct ref_storage_be refs_be_reftable;
extern struct ref_storage_be refs_be_packed;
/*
* A representation of the reference store for the main repository or
* a submodule. The ref_store instances for submodules are kept in a
- * hash map; see get_submodule_ref_store() for more info.
+ * hash map; see repo_get_submodule_ref_store() for more info.
*/
struct ref_store {
/* The backend describing this ref_store's storage scheme: */
@@ -717,7 +712,8 @@ struct ref_store {
* Parse contents of a loose ref file. *failure_errno maybe be set to EINVAL for
* invalid contents.
*/
-int parse_loose_ref_contents(const char *buf, struct object_id *oid,
+int parse_loose_ref_contents(const struct git_hash_algo *algop,
+ const char *buf, struct object_id *oid,
struct strbuf *referent, unsigned int *type,
int *failure_errno);
@@ -733,4 +729,31 @@ void base_ref_store_init(struct ref_store *refs, struct repository *repo,
*/
struct ref_store *maybe_debug_wrap_ref_store(const char *gitdir, struct ref_store *store);
+/*
+ * Return the refname under which update was originally requested.
+ */
+const char *ref_update_original_update_refname(struct ref_update *update);
+
+/*
+ * Helper function to check if the new value is null, this
+ * takes into consideration that the update could be a regular
+ * ref or a symbolic ref.
+ */
+int ref_update_has_null_new_value(struct ref_update *update);
+
+/*
+ * Check whether the old_target values stored in update are consistent
+ * with the referent, which is the symbolic reference's current value.
+ * If everything is OK, return 0; otherwise, write an error message to
+ * err and return -1.
+ */
+int ref_update_check_old_target(const char *referent, struct ref_update *update,
+ struct strbuf *err);
+
+/*
+ * Check if the ref must exist, this means that the old_oid or
+ * old_target is non NULL.
+ */
+int ref_update_expects_existing_old_ref(struct ref_update *update);
+
#endif /* REFS_REFS_INTERNAL_H */
diff --git a/refs/reftable-backend.c b/refs/reftable-backend.c
new file mode 100644
index 0000000000..043e19439f
--- /dev/null
+++ b/refs/reftable-backend.c
@@ -0,0 +1,2353 @@
+#define USE_THE_REPOSITORY_VARIABLE
+
+#include "../git-compat-util.h"
+#include "../abspath.h"
+#include "../chdir-notify.h"
+#include "../config.h"
+#include "../dir.h"
+#include "../environment.h"
+#include "../gettext.h"
+#include "../hash.h"
+#include "../hex.h"
+#include "../iterator.h"
+#include "../ident.h"
+#include "../lockfile.h"
+#include "../object.h"
+#include "../path.h"
+#include "../refs.h"
+#include "../reftable/reftable-stack.h"
+#include "../reftable/reftable-record.h"
+#include "../reftable/reftable-error.h"
+#include "../reftable/reftable-iterator.h"
+#include "../repo-settings.h"
+#include "../setup.h"
+#include "../strmap.h"
+#include "parse.h"
+#include "refs-internal.h"
+
+/*
+ * Used as a flag in ref_update::flags when the ref_update was via an
+ * update to HEAD.
+ */
+#define REF_UPDATE_VIA_HEAD (1 << 8)
+
+struct reftable_ref_store {
+ struct ref_store base;
+
+ /*
+ * The main stack refers to the common dir and thus contains common
+ * refs as well as refs of the main repository.
+ */
+ struct reftable_stack *main_stack;
+ /*
+ * The worktree stack refers to the gitdir in case the refdb is opened
+ * via a worktree. It thus contains the per-worktree refs.
+ */
+ struct reftable_stack *worktree_stack;
+ /*
+ * Map of worktree stacks by their respective worktree names. The map
+ * is populated lazily when we try to resolve `worktrees/$worktree` refs.
+ */
+ struct strmap worktree_stacks;
+ struct reftable_write_options write_options;
+
+ unsigned int store_flags;
+ enum log_refs_config log_all_ref_updates;
+ int err;
+};
+
+/*
+ * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
+ * reftable_ref_store. required_flags is compared with ref_store's store_flags
+ * to ensure the ref_store has all required capabilities. "caller" is used in
+ * any necessary error messages.
+ */
+static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
+ unsigned int required_flags,
+ const char *caller)
+{
+ struct reftable_ref_store *refs;
+
+ if (ref_store->be != &refs_be_reftable)
+ BUG("ref_store is type \"%s\" not \"reftables\" in %s",
+ ref_store->be->name, caller);
+
+ refs = (struct reftable_ref_store *)ref_store;
+
+ if ((refs->store_flags & required_flags) != required_flags)
+ BUG("operation %s requires abilities 0x%x, but only have 0x%x",
+ caller, required_flags, refs->store_flags);
+
+ return refs;
+}
+
+/*
+ * Some refs are global to the repository (refs/heads/{*}), while others are
+ * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
+ * multiple separate databases (ie. multiple reftable/ directories), one for
+ * the shared refs, one for the current worktree refs, and one for each
+ * additional worktree. For reading, we merge the view of both the shared and
+ * the current worktree's refs, when necessary.
+ *
+ * This function also optionally assigns the rewritten reference name that is
+ * local to the stack. This translation is required when using worktree refs
+ * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
+ * those references in their normalized form.
+ */
+static struct reftable_stack *stack_for(struct reftable_ref_store *store,
+ const char *refname,
+ const char **rewritten_ref)
+{
+ const char *wtname;
+ int wtname_len;
+
+ if (!refname)
+ return store->main_stack;
+
+ switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
+ case REF_WORKTREE_OTHER: {
+ static struct strbuf wtname_buf = STRBUF_INIT;
+ struct strbuf wt_dir = STRBUF_INIT;
+ struct reftable_stack *stack;
+
+ /*
+ * We're using a static buffer here so that we don't need to
+ * allocate the worktree name whenever we look up a reference.
+ * This could be avoided if the strmap interface knew how to
+ * handle keys with a length.
+ */
+ strbuf_reset(&wtname_buf);
+ strbuf_add(&wtname_buf, wtname, wtname_len);
+
+ /*
+ * There is an edge case here: when the worktree references the
+ * current worktree, then we set up the stack once via
+ * `worktree_stacks` and once via `worktree_stack`. This is
+ * wasteful, but in the reading case it shouldn't matter. And
+ * in the writing case we would notice that the stack is locked
+ * already and error out when trying to write a reference via
+ * both stacks.
+ */
+ stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
+ if (!stack) {
+ strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
+ store->base.repo->commondir, wtname_buf.buf);
+
+ store->err = reftable_new_stack(&stack, wt_dir.buf,
+ &store->write_options);
+ assert(store->err != REFTABLE_API_ERROR);
+ strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
+ }
+
+ strbuf_release(&wt_dir);
+ return stack;
+ }
+ case REF_WORKTREE_CURRENT:
+ /*
+ * If there is no worktree stack then we're currently in the
+ * main worktree. We thus return the main stack in that case.
+ */
+ if (!store->worktree_stack)
+ return store->main_stack;
+ return store->worktree_stack;
+ case REF_WORKTREE_MAIN:
+ case REF_WORKTREE_SHARED:
+ return store->main_stack;
+ default:
+ BUG("unhandled worktree reference type");
+ }
+}
+
+static int should_write_log(struct reftable_ref_store *refs, const char *refname)
+{
+ enum log_refs_config log_refs_cfg = refs->log_all_ref_updates;
+ if (log_refs_cfg == LOG_REFS_UNSET)
+ log_refs_cfg = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
+
+ switch (log_refs_cfg) {
+ case LOG_REFS_NONE:
+ return refs_reflog_exists(&refs->base, refname);
+ case LOG_REFS_ALWAYS:
+ return 1;
+ case LOG_REFS_NORMAL:
+ if (should_autocreate_reflog(log_refs_cfg, refname))
+ return 1;
+ return refs_reflog_exists(&refs->base, refname);
+ default:
+ BUG("unhandled core.logAllRefUpdates value %d", log_refs_cfg);
+ }
+}
+
+static void fill_reftable_log_record(struct reftable_log_record *log, const struct ident_split *split)
+{
+ const char *tz_begin;
+ int sign = 1;
+
+ reftable_log_record_release(log);
+ log->value_type = REFTABLE_LOG_UPDATE;
+ log->value.update.name =
+ xstrndup(split->name_begin, split->name_end - split->name_begin);
+ log->value.update.email =
+ xstrndup(split->mail_begin, split->mail_end - split->mail_begin);
+ log->value.update.time = atol(split->date_begin);
+
+ tz_begin = split->tz_begin;
+ if (*tz_begin == '-') {
+ sign = -1;
+ tz_begin++;
+ }
+ if (*tz_begin == '+') {
+ sign = 1;
+ tz_begin++;
+ }
+
+ log->value.update.tz_offset = sign * atoi(tz_begin);
+}
+
+static int read_ref_without_reload(struct reftable_ref_store *refs,
+ struct reftable_stack *stack,
+ const char *refname,
+ struct object_id *oid,
+ struct strbuf *referent,
+ unsigned int *type)
+{
+ struct reftable_ref_record ref = {0};
+ int ret;
+
+ ret = reftable_stack_read_ref(stack, refname, &ref);
+ if (ret)
+ goto done;
+
+ if (ref.value_type == REFTABLE_REF_SYMREF) {
+ strbuf_reset(referent);
+ strbuf_addstr(referent, ref.value.symref);
+ *type |= REF_ISSYMREF;
+ } else if (reftable_ref_record_val1(&ref)) {
+ oidread(oid, reftable_ref_record_val1(&ref),
+ refs->base.repo->hash_algo);
+ } else {
+ /* We got a tombstone, which should not happen. */
+ BUG("unhandled reference value type %d", ref.value_type);
+ }
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ reftable_ref_record_release(&ref);
+ return ret;
+}
+
+static int reftable_be_config(const char *var, const char *value,
+ const struct config_context *ctx,
+ void *_opts)
+{
+ struct reftable_write_options *opts = _opts;
+
+ if (!strcmp(var, "reftable.blocksize")) {
+ unsigned long block_size = git_config_ulong(var, value, ctx->kvi);
+ if (block_size > 16777215)
+ die("reftable block size cannot exceed 16MB");
+ opts->block_size = block_size;
+ } else if (!strcmp(var, "reftable.restartinterval")) {
+ unsigned long restart_interval = git_config_ulong(var, value, ctx->kvi);
+ if (restart_interval > UINT16_MAX)
+ die("reftable block size cannot exceed %u", (unsigned)UINT16_MAX);
+ opts->restart_interval = restart_interval;
+ } else if (!strcmp(var, "reftable.indexobjects")) {
+ opts->skip_index_objects = !git_config_bool(var, value);
+ } else if (!strcmp(var, "reftable.geometricfactor")) {
+ unsigned long factor = git_config_ulong(var, value, ctx->kvi);
+ if (factor > UINT8_MAX)
+ die("reftable geometric factor cannot exceed %u", (unsigned)UINT8_MAX);
+ opts->auto_compaction_factor = factor;
+ }
+
+ return 0;
+}
+
+static struct ref_store *reftable_be_init(struct repository *repo,
+ const char *gitdir,
+ unsigned int store_flags)
+{
+ struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
+ struct strbuf path = STRBUF_INIT;
+ int is_worktree;
+ mode_t mask;
+
+ mask = umask(0);
+ umask(mask);
+
+ base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
+ strmap_init(&refs->worktree_stacks);
+ refs->store_flags = store_flags;
+ refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo);
+
+ refs->write_options.hash_id = repo->hash_algo->format_id;
+ refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
+ refs->write_options.disable_auto_compact =
+ !git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
+
+ git_config(reftable_be_config, &refs->write_options);
+
+ /*
+ * It is somewhat unfortunate that we have to mirror the default block
+ * size of the reftable library here. But given that the write options
+ * wouldn't be updated by the library here, and given that we require
+ * the proper block size to trim reflog message so that they fit, we
+ * must set up a proper value here.
+ */
+ if (!refs->write_options.block_size)
+ refs->write_options.block_size = 4096;
+
+ /*
+ * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
+ * This stack contains both the shared and the main worktree refs.
+ *
+ * Note that we don't try to resolve the path in case we have a
+ * worktree because `get_common_dir_noenv()` already does it for us.
+ */
+ is_worktree = get_common_dir_noenv(&path, gitdir);
+ if (!is_worktree) {
+ strbuf_reset(&path);
+ strbuf_realpath(&path, gitdir, 0);
+ }
+ strbuf_addstr(&path, "/reftable");
+ refs->err = reftable_new_stack(&refs->main_stack, path.buf,
+ &refs->write_options);
+ if (refs->err)
+ goto done;
+
+ /*
+ * If we're in a worktree we also need to set up the worktree reftable
+ * stack that is contained in the per-worktree GIT_DIR.
+ *
+ * Ideally, we would also add the stack to our worktree stack map. But
+ * we have no way to figure out the worktree name here and thus can't
+ * do it efficiently.
+ */
+ if (is_worktree) {
+ strbuf_reset(&path);
+ strbuf_addf(&path, "%s/reftable", gitdir);
+
+ refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
+ &refs->write_options);
+ if (refs->err)
+ goto done;
+ }
+
+ chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
+
+done:
+ assert(refs->err != REFTABLE_API_ERROR);
+ strbuf_release(&path);
+ return &refs->base;
+}
+
+static void reftable_be_release(struct ref_store *ref_store)
+{
+ struct reftable_ref_store *refs = reftable_be_downcast(ref_store, 0, "release");
+ struct strmap_entry *entry;
+ struct hashmap_iter iter;
+
+ if (refs->main_stack) {
+ reftable_stack_destroy(refs->main_stack);
+ refs->main_stack = NULL;
+ }
+
+ if (refs->worktree_stack) {
+ reftable_stack_destroy(refs->worktree_stack);
+ refs->worktree_stack = NULL;
+ }
+
+ strmap_for_each_entry(&refs->worktree_stacks, &iter, entry)
+ reftable_stack_destroy(entry->value);
+ strmap_clear(&refs->worktree_stacks, 0);
+}
+
+static int reftable_be_create_on_disk(struct ref_store *ref_store,
+ int flags UNUSED,
+ struct strbuf *err UNUSED)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "create");
+ struct strbuf sb = STRBUF_INIT;
+
+ strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
+ safe_create_dir(sb.buf, 1);
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
+ write_file(sb.buf, "ref: refs/heads/.invalid");
+ adjust_shared_perm(sb.buf);
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
+ safe_create_dir(sb.buf, 1);
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
+ write_file(sb.buf, "this repository uses the reftable format");
+ adjust_shared_perm(sb.buf);
+
+ strbuf_release(&sb);
+ return 0;
+}
+
+static int reftable_be_remove_on_disk(struct ref_store *ref_store,
+ struct strbuf *err)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "remove");
+ struct strbuf sb = STRBUF_INIT;
+ int ret = 0;
+
+ /*
+ * Release the ref store such that all stacks are closed. This is
+ * required so that the "tables.list" file is not open anymore, which
+ * would otherwise make it impossible to remove the file on Windows.
+ */
+ reftable_be_release(ref_store);
+
+ strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
+ if (remove_dir_recursively(&sb, 0) < 0) {
+ strbuf_addf(err, "could not delete reftables: %s",
+ strerror(errno));
+ ret = -1;
+ }
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
+ if (unlink(sb.buf) < 0) {
+ strbuf_addf(err, "could not delete stub HEAD: %s",
+ strerror(errno));
+ ret = -1;
+ }
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
+ if (unlink(sb.buf) < 0) {
+ strbuf_addf(err, "could not delete stub heads: %s",
+ strerror(errno));
+ ret = -1;
+ }
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
+ if (rmdir(sb.buf) < 0) {
+ strbuf_addf(err, "could not delete refs directory: %s",
+ strerror(errno));
+ ret = -1;
+ }
+
+ strbuf_release(&sb);
+ return ret;
+}
+
+struct reftable_ref_iterator {
+ struct ref_iterator base;
+ struct reftable_ref_store *refs;
+ struct reftable_iterator iter;
+ struct reftable_ref_record ref;
+ struct object_id oid;
+
+ const char *prefix;
+ size_t prefix_len;
+ unsigned int flags;
+ int err;
+};
+
+static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
+{
+ struct reftable_ref_iterator *iter =
+ (struct reftable_ref_iterator *)ref_iterator;
+ struct reftable_ref_store *refs = iter->refs;
+ const char *referent = NULL;
+
+ while (!iter->err) {
+ int flags = 0;
+
+ iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
+ if (iter->err)
+ break;
+
+ /*
+ * The files backend only lists references contained in "refs/" unless
+ * the root refs are to be included. We emulate the same behaviour here.
+ */
+ if (!starts_with(iter->ref.refname, "refs/") &&
+ !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS &&
+ is_root_ref(iter->ref.refname))) {
+ continue;
+ }
+
+ if (iter->prefix_len &&
+ strncmp(iter->prefix, iter->ref.refname, iter->prefix_len)) {
+ iter->err = 1;
+ break;
+ }
+
+ if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
+ parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
+ REF_WORKTREE_CURRENT)
+ continue;
+
+ switch (iter->ref.value_type) {
+ case REFTABLE_REF_VAL1:
+ oidread(&iter->oid, iter->ref.value.val1,
+ refs->base.repo->hash_algo);
+ break;
+ case REFTABLE_REF_VAL2:
+ oidread(&iter->oid, iter->ref.value.val2.value,
+ refs->base.repo->hash_algo);
+ break;
+ case REFTABLE_REF_SYMREF:
+ referent = refs_resolve_ref_unsafe(&iter->refs->base,
+ iter->ref.refname,
+ RESOLVE_REF_READING,
+ &iter->oid, &flags);
+ if (!referent)
+ oidclr(&iter->oid, refs->base.repo->hash_algo);
+ break;
+ default:
+ BUG("unhandled reference value type %d", iter->ref.value_type);
+ }
+
+ if (is_null_oid(&iter->oid))
+ flags |= REF_ISBROKEN;
+
+ if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
+ if (!refname_is_safe(iter->ref.refname))
+ die(_("refname is dangerous: %s"), iter->ref.refname);
+ oidclr(&iter->oid, refs->base.repo->hash_algo);
+ flags |= REF_BAD_NAME | REF_ISBROKEN;
+ }
+
+ if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
+ flags & REF_ISSYMREF &&
+ flags & REF_ISBROKEN)
+ continue;
+
+ if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
+ !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
+ &iter->oid, flags))
+ continue;
+
+ iter->base.refname = iter->ref.refname;
+ iter->base.referent = referent;
+ iter->base.oid = &iter->oid;
+ iter->base.flags = flags;
+
+ break;
+ }
+
+ if (iter->err > 0) {
+ if (ref_iterator_abort(ref_iterator) != ITER_DONE)
+ return ITER_ERROR;
+ return ITER_DONE;
+ }
+
+ if (iter->err < 0) {
+ ref_iterator_abort(ref_iterator);
+ return ITER_ERROR;
+ }
+
+ return ITER_OK;
+}
+
+static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
+ struct object_id *peeled)
+{
+ struct reftable_ref_iterator *iter =
+ (struct reftable_ref_iterator *)ref_iterator;
+
+ if (iter->ref.value_type == REFTABLE_REF_VAL2) {
+ oidread(peeled, iter->ref.value.val2.target_value,
+ iter->refs->base.repo->hash_algo);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
+{
+ struct reftable_ref_iterator *iter =
+ (struct reftable_ref_iterator *)ref_iterator;
+ reftable_ref_record_release(&iter->ref);
+ reftable_iterator_destroy(&iter->iter);
+ free(iter);
+ return ITER_DONE;
+}
+
+static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
+ .advance = reftable_ref_iterator_advance,
+ .peel = reftable_ref_iterator_peel,
+ .abort = reftable_ref_iterator_abort
+};
+
+static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
+ struct reftable_stack *stack,
+ const char *prefix,
+ int flags)
+{
+ struct reftable_ref_iterator *iter;
+ int ret;
+
+ iter = xcalloc(1, sizeof(*iter));
+ base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
+ iter->prefix = prefix;
+ iter->prefix_len = prefix ? strlen(prefix) : 0;
+ iter->base.oid = &iter->oid;
+ iter->flags = flags;
+ iter->refs = refs;
+
+ ret = refs->err;
+ if (ret)
+ goto done;
+
+ ret = reftable_stack_reload(stack);
+ if (ret)
+ goto done;
+
+ reftable_stack_init_ref_iterator(stack, &iter->iter);
+ ret = reftable_iterator_seek_ref(&iter->iter, prefix);
+ if (ret)
+ goto done;
+
+done:
+ iter->err = ret;
+ return iter;
+}
+
+static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
+ const char *prefix,
+ const char **exclude_patterns UNUSED,
+ unsigned int flags)
+{
+ struct reftable_ref_iterator *main_iter, *worktree_iter;
+ struct reftable_ref_store *refs;
+ unsigned int required_flags = REF_STORE_READ;
+
+ if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
+ required_flags |= REF_STORE_ODB;
+ refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
+
+ main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix, flags);
+
+ /*
+ * The worktree stack is only set when we're in an actual worktree
+ * right now. If we aren't, then we return the common reftable
+ * iterator, only.
+ */
+ if (!refs->worktree_stack)
+ return &main_iter->base;
+
+ /*
+ * Otherwise we merge both the common and the per-worktree refs into a
+ * single iterator.
+ */
+ worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix, flags);
+ return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
+ ref_iterator_select, NULL);
+}
+
+static int reftable_be_read_raw_ref(struct ref_store *ref_store,
+ const char *refname,
+ struct object_id *oid,
+ struct strbuf *referent,
+ unsigned int *type,
+ int *failure_errno)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
+ struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ int ret;
+
+ if (refs->err < 0)
+ return refs->err;
+
+ ret = reftable_stack_reload(stack);
+ if (ret)
+ return ret;
+
+ ret = read_ref_without_reload(refs, stack, refname, oid, referent, type);
+ if (ret < 0)
+ return ret;
+ if (ret > 0) {
+ *failure_errno = ENOENT;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
+ const char *refname,
+ struct strbuf *referent)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
+ struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ struct reftable_ref_record ref = {0};
+ int ret;
+
+ ret = reftable_stack_reload(stack);
+ if (ret)
+ return ret;
+
+ ret = reftable_stack_read_ref(stack, refname, &ref);
+ if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
+ strbuf_addstr(referent, ref.value.symref);
+ else
+ ret = -1;
+
+ reftable_ref_record_release(&ref);
+ return ret;
+}
+
+struct reftable_transaction_update {
+ struct ref_update *update;
+ struct object_id current_oid;
+};
+
+struct write_transaction_table_arg {
+ struct reftable_ref_store *refs;
+ struct reftable_stack *stack;
+ struct reftable_addition *addition;
+ struct reftable_transaction_update *updates;
+ size_t updates_nr;
+ size_t updates_alloc;
+ size_t updates_expected;
+};
+
+struct reftable_transaction_data {
+ struct write_transaction_table_arg *args;
+ size_t args_nr, args_alloc;
+};
+
+static void free_transaction_data(struct reftable_transaction_data *tx_data)
+{
+ if (!tx_data)
+ return;
+ for (size_t i = 0; i < tx_data->args_nr; i++) {
+ reftable_addition_destroy(tx_data->args[i].addition);
+ free(tx_data->args[i].updates);
+ }
+ free(tx_data->args);
+ free(tx_data);
+}
+
+/*
+ * Prepare transaction update for the given reference update. This will cause
+ * us to lock the corresponding reftable stack for concurrent modification.
+ */
+static int prepare_transaction_update(struct write_transaction_table_arg **out,
+ struct reftable_ref_store *refs,
+ struct reftable_transaction_data *tx_data,
+ struct ref_update *update,
+ struct strbuf *err)
+{
+ struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
+ struct write_transaction_table_arg *arg = NULL;
+ size_t i;
+ int ret;
+
+ /*
+ * Search for a preexisting stack update. If there is one then we add
+ * the update to it, otherwise we set up a new stack update.
+ */
+ for (i = 0; !arg && i < tx_data->args_nr; i++)
+ if (tx_data->args[i].stack == stack)
+ arg = &tx_data->args[i];
+
+ if (!arg) {
+ struct reftable_addition *addition;
+
+ ret = reftable_stack_reload(stack);
+ if (ret)
+ return ret;
+
+ ret = reftable_stack_new_addition(&addition, stack);
+ if (ret) {
+ if (ret == REFTABLE_LOCK_ERROR)
+ strbuf_addstr(err, "cannot lock references");
+ return ret;
+ }
+
+ ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
+ tx_data->args_alloc);
+ arg = &tx_data->args[tx_data->args_nr++];
+ arg->refs = refs;
+ arg->stack = stack;
+ arg->addition = addition;
+ arg->updates = NULL;
+ arg->updates_nr = 0;
+ arg->updates_alloc = 0;
+ arg->updates_expected = 0;
+ }
+
+ arg->updates_expected++;
+
+ if (out)
+ *out = arg;
+
+ return 0;
+}
+
+/*
+ * Queue a reference update for the correct stack. We potentially need to
+ * handle multiple stack updates in a single transaction when it spans across
+ * multiple worktrees.
+ */
+static int queue_transaction_update(struct reftable_ref_store *refs,
+ struct reftable_transaction_data *tx_data,
+ struct ref_update *update,
+ struct object_id *current_oid,
+ struct strbuf *err)
+{
+ struct write_transaction_table_arg *arg = NULL;
+ int ret;
+
+ if (update->backend_data)
+ BUG("reference update queued more than once");
+
+ ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
+ if (ret < 0)
+ return ret;
+
+ ALLOC_GROW(arg->updates, arg->updates_nr + 1,
+ arg->updates_alloc);
+ arg->updates[arg->updates_nr].update = update;
+ oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
+ update->backend_data = &arg->updates[arg->updates_nr++];
+
+ return 0;
+}
+
+static int reftable_be_transaction_prepare(struct ref_store *ref_store,
+ struct ref_transaction *transaction,
+ struct strbuf *err)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
+ struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
+ struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
+ struct reftable_transaction_data *tx_data = NULL;
+ struct object_id head_oid;
+ unsigned int head_type = 0;
+ size_t i;
+ int ret;
+
+ ret = refs->err;
+ if (ret < 0)
+ goto done;
+
+ tx_data = xcalloc(1, sizeof(*tx_data));
+
+ /*
+ * Preprocess all updates. For one we check that there are no duplicate
+ * reference updates in this transaction. Second, we lock all stacks
+ * that will be modified during the transaction.
+ */
+ for (i = 0; i < transaction->nr; i++) {
+ ret = prepare_transaction_update(NULL, refs, tx_data,
+ transaction->updates[i], err);
+ if (ret)
+ goto done;
+
+ string_list_append(&affected_refnames,
+ transaction->updates[i]->refname);
+ }
+
+ /*
+ * Now that we have counted updates per stack we can preallocate their
+ * arrays. This avoids having to reallocate many times.
+ */
+ for (i = 0; i < tx_data->args_nr; i++) {
+ CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
+ tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
+ }
+
+ /*
+ * Fail if a refname appears more than once in the transaction.
+ * This code is taken from the files backend and is a good candidate to
+ * be moved into the generic layer.
+ */
+ string_list_sort(&affected_refnames);
+ if (ref_update_reject_duplicates(&affected_refnames, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto done;
+ }
+
+ ret = read_ref_without_reload(refs, stack_for(refs, "HEAD", NULL), "HEAD",
+ &head_oid, &head_referent, &head_type);
+ if (ret < 0)
+ goto done;
+ ret = 0;
+
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *u = transaction->updates[i];
+ struct object_id current_oid = {0};
+ struct reftable_stack *stack;
+ const char *rewritten_ref;
+
+ stack = stack_for(refs, u->refname, &rewritten_ref);
+
+ /* Verify that the new object ID is valid. */
+ if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
+ !(u->flags & REF_SKIP_OID_VERIFICATION) &&
+ !(u->flags & REF_LOG_ONLY)) {
+ struct object *o = parse_object(refs->base.repo, &u->new_oid);
+ if (!o) {
+ strbuf_addf(err,
+ _("trying to write ref '%s' with nonexistent object %s"),
+ u->refname, oid_to_hex(&u->new_oid));
+ ret = -1;
+ goto done;
+ }
+
+ if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
+ strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
+ oid_to_hex(&u->new_oid), u->refname);
+ ret = -1;
+ goto done;
+ }
+ }
+
+ /*
+ * When we update the reference that HEAD points to we enqueue
+ * a second log-only update for HEAD so that its reflog is
+ * updated accordingly.
+ */
+ if (head_type == REF_ISSYMREF &&
+ !(u->flags & REF_LOG_ONLY) &&
+ !(u->flags & REF_UPDATE_VIA_HEAD) &&
+ !strcmp(rewritten_ref, head_referent.buf)) {
+ struct ref_update *new_update;
+
+ /*
+ * First make sure that HEAD is not already in the
+ * transaction. This check is O(lg N) in the transaction
+ * size, but it happens at most once per transaction.
+ */
+ if (string_list_has_string(&affected_refnames, "HEAD")) {
+ /* An entry already existed */
+ strbuf_addf(err,
+ _("multiple updates for 'HEAD' (including one "
+ "via its referent '%s') are not allowed"),
+ u->refname);
+ ret = TRANSACTION_NAME_CONFLICT;
+ goto done;
+ }
+
+ new_update = ref_transaction_add_update(
+ transaction, "HEAD",
+ u->flags | REF_LOG_ONLY | REF_NO_DEREF,
+ &u->new_oid, &u->old_oid, NULL, NULL, u->msg);
+ string_list_insert(&affected_refnames, new_update->refname);
+ }
+
+ ret = read_ref_without_reload(refs, stack, rewritten_ref,
+ &current_oid, &referent, &u->type);
+ if (ret < 0)
+ goto done;
+ if (ret > 0 && !ref_update_expects_existing_old_ref(u)) {
+ /*
+ * The reference does not exist, and we either have no
+ * old object ID or expect the reference to not exist.
+ * We can thus skip below safety checks as well as the
+ * symref splitting. But we do want to verify that
+ * there is no conflicting reference here so that we
+ * can output a proper error message instead of failing
+ * at a later point.
+ */
+ ret = refs_verify_refname_available(ref_store, u->refname,
+ &affected_refnames, NULL, err);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * There is no need to write the reference deletion
+ * when the reference in question doesn't exist.
+ */
+ if ((u->flags & REF_HAVE_NEW) && !ref_update_has_null_new_value(u)) {
+ ret = queue_transaction_update(refs, tx_data, u,
+ &current_oid, err);
+ if (ret)
+ goto done;
+ }
+
+ continue;
+ }
+ if (ret > 0) {
+ /* The reference does not exist, but we expected it to. */
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "unable to resolve reference '%s'"),
+ ref_update_original_update_refname(u), u->refname);
+ ret = -1;
+ goto done;
+ }
+
+ if (u->type & REF_ISSYMREF) {
+ /*
+ * The reftable stack is locked at this point already,
+ * so it is safe to call `refs_resolve_ref_unsafe()`
+ * here without causing races.
+ */
+ const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
+ &current_oid, NULL);
+
+ if (u->flags & REF_NO_DEREF) {
+ if (u->flags & REF_HAVE_OLD && !resolved) {
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "error reading reference"), u->refname);
+ ret = -1;
+ goto done;
+ }
+ } else {
+ struct ref_update *new_update;
+ int new_flags;
+
+ new_flags = u->flags;
+ if (!strcmp(rewritten_ref, "HEAD"))
+ new_flags |= REF_UPDATE_VIA_HEAD;
+
+ /*
+ * If we are updating a symref (eg. HEAD), we should also
+ * update the branch that the symref points to.
+ *
+ * This is generic functionality, and would be better
+ * done in refs.c, but the current implementation is
+ * intertwined with the locking in files-backend.c.
+ */
+ new_update = ref_transaction_add_update(
+ transaction, referent.buf, new_flags,
+ u->new_target ? NULL : &u->new_oid,
+ u->old_target ? NULL : &u->old_oid,
+ u->new_target, u->old_target, u->msg);
+
+ new_update->parent_update = u;
+
+ /*
+ * Change the symbolic ref update to log only. Also, it
+ * doesn't need to check its old OID value, as that will be
+ * done when new_update is processed.
+ */
+ u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
+ u->flags &= ~REF_HAVE_OLD;
+
+ if (string_list_has_string(&affected_refnames, new_update->refname)) {
+ strbuf_addf(err,
+ _("multiple updates for '%s' (including one "
+ "via symref '%s') are not allowed"),
+ referent.buf, u->refname);
+ ret = TRANSACTION_NAME_CONFLICT;
+ goto done;
+ }
+ string_list_insert(&affected_refnames, new_update->refname);
+ }
+ }
+
+ /*
+ * Verify that the old object matches our expectations. Note
+ * that the error messages here do not make a lot of sense in
+ * the context of the reftable backend as we never lock
+ * individual refs. But the error messages match what the files
+ * backend returns, which keeps our tests happy.
+ */
+ if (u->old_target) {
+ if (!(u->type & REF_ISSYMREF)) {
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "expected symref with target '%s': "
+ "but is a regular ref"),
+ ref_update_original_update_refname(u),
+ u->old_target);
+ ret = -1;
+ goto done;
+ }
+
+ if (ref_update_check_old_target(referent.buf, u, err)) {
+ ret = -1;
+ goto done;
+ }
+ } else if ((u->flags & REF_HAVE_OLD) && !oideq(&current_oid, &u->old_oid)) {
+ if (is_null_oid(&u->old_oid))
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "reference already exists"),
+ ref_update_original_update_refname(u));
+ else if (is_null_oid(&current_oid))
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "reference is missing but expected %s"),
+ ref_update_original_update_refname(u),
+ oid_to_hex(&u->old_oid));
+ else
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "is at %s but expected %s"),
+ ref_update_original_update_refname(u),
+ oid_to_hex(&current_oid),
+ oid_to_hex(&u->old_oid));
+ ret = -1;
+ goto done;
+ }
+
+ /*
+ * If all of the following conditions are true:
+ *
+ * - We're not about to write a symref.
+ * - We're not about to write a log-only entry.
+ * - Old and new object ID are different.
+ *
+ * Then we're essentially doing a no-op update that can be
+ * skipped. This is not only for the sake of efficiency, but
+ * also skips writing unneeded reflog entries.
+ */
+ if ((u->type & REF_ISSYMREF) ||
+ (u->flags & REF_LOG_ONLY) ||
+ (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
+ ret = queue_transaction_update(refs, tx_data, u,
+ &current_oid, err);
+ if (ret)
+ goto done;
+ }
+ }
+
+ transaction->backend_data = tx_data;
+ transaction->state = REF_TRANSACTION_PREPARED;
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ if (ret < 0) {
+ free_transaction_data(tx_data);
+ transaction->state = REF_TRANSACTION_CLOSED;
+ if (!err->len)
+ strbuf_addf(err, _("reftable: transaction prepare: %s"),
+ reftable_error_str(ret));
+ }
+ string_list_clear(&affected_refnames, 0);
+ strbuf_release(&referent);
+ strbuf_release(&head_referent);
+
+ return ret;
+}
+
+static int reftable_be_transaction_abort(struct ref_store *ref_store UNUSED,
+ struct ref_transaction *transaction,
+ struct strbuf *err UNUSED)
+{
+ struct reftable_transaction_data *tx_data = transaction->backend_data;
+ free_transaction_data(tx_data);
+ transaction->state = REF_TRANSACTION_CLOSED;
+ return 0;
+}
+
+static int transaction_update_cmp(const void *a, const void *b)
+{
+ return strcmp(((struct reftable_transaction_update *)a)->update->refname,
+ ((struct reftable_transaction_update *)b)->update->refname);
+}
+
+static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
+{
+ struct write_transaction_table_arg *arg = cb_data;
+ uint64_t ts = reftable_stack_next_update_index(arg->stack);
+ struct reftable_log_record *logs = NULL;
+ struct ident_split committer_ident = {0};
+ size_t logs_nr = 0, logs_alloc = 0, i;
+ const char *committer_info;
+ int ret = 0;
+
+ committer_info = git_committer_info(0);
+ if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
+ BUG("failed splitting committer info");
+
+ QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
+
+ reftable_writer_set_limits(writer, ts, ts);
+
+ for (i = 0; i < arg->updates_nr; i++) {
+ struct reftable_transaction_update *tx_update = &arg->updates[i];
+ struct ref_update *u = tx_update->update;
+
+ /*
+ * Write a reflog entry when updating a ref to point to
+ * something new in either of the following cases:
+ *
+ * - The reference is about to be deleted. We always want to
+ * delete the reflog in that case.
+ * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
+ * the reflog entry.
+ * - `core.logAllRefUpdates` tells us to create the reflog for
+ * the given ref.
+ */
+ if ((u->flags & REF_HAVE_NEW) &&
+ !(u->type & REF_ISSYMREF) &&
+ ref_update_has_null_new_value(u)) {
+ struct reftable_log_record log = {0};
+ struct reftable_iterator it = {0};
+
+ reftable_stack_init_log_iterator(arg->stack, &it);
+
+ /*
+ * When deleting refs we also delete all reflog entries
+ * with them. While it is not strictly required to
+ * delete reflogs together with their refs, this
+ * matches the behaviour of the files backend.
+ *
+ * Unfortunately, we have no better way than to delete
+ * all reflog entries one by one.
+ */
+ ret = reftable_iterator_seek_log(&it, u->refname);
+ while (ret == 0) {
+ struct reftable_log_record *tombstone;
+
+ ret = reftable_iterator_next_log(&it, &log);
+ if (ret < 0)
+ break;
+ if (ret > 0 || strcmp(log.refname, u->refname)) {
+ ret = 0;
+ break;
+ }
+
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ tombstone = &logs[logs_nr++];
+ tombstone->refname = xstrdup(u->refname);
+ tombstone->value_type = REFTABLE_LOG_DELETION;
+ tombstone->update_index = log.update_index;
+ }
+
+ reftable_log_record_release(&log);
+ reftable_iterator_destroy(&it);
+
+ if (ret)
+ goto done;
+ } else if (!(u->flags & REF_SKIP_CREATE_REFLOG) &&
+ (u->flags & REF_HAVE_NEW) &&
+ (u->flags & REF_FORCE_CREATE_REFLOG ||
+ should_write_log(arg->refs, u->refname))) {
+ struct reftable_log_record *log;
+ int create_reflog = 1;
+
+ if (u->new_target) {
+ if (!refs_resolve_ref_unsafe(&arg->refs->base, u->new_target,
+ RESOLVE_REF_READING, &u->new_oid, NULL)) {
+ /*
+ * TODO: currently we skip creating reflogs for dangling
+ * symref updates. It would be nice to capture this as
+ * zero oid updates however.
+ */
+ create_reflog = 0;
+ }
+ }
+
+ if (create_reflog) {
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ log = &logs[logs_nr++];
+ memset(log, 0, sizeof(*log));
+
+ fill_reftable_log_record(log, &committer_ident);
+ log->update_index = ts;
+ log->refname = xstrdup(u->refname);
+ memcpy(log->value.update.new_hash,
+ u->new_oid.hash, GIT_MAX_RAWSZ);
+ memcpy(log->value.update.old_hash,
+ tx_update->current_oid.hash, GIT_MAX_RAWSZ);
+ log->value.update.message =
+ xstrndup(u->msg, arg->refs->write_options.block_size / 2);
+ }
+ }
+
+ if (u->flags & REF_LOG_ONLY)
+ continue;
+
+ if (u->new_target) {
+ struct reftable_ref_record ref = {
+ .refname = (char *)u->refname,
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = (char *)u->new_target,
+ .update_index = ts,
+ };
+
+ ret = reftable_writer_add_ref(writer, &ref);
+ if (ret < 0)
+ goto done;
+ } else if ((u->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(u)) {
+ struct reftable_ref_record ref = {
+ .refname = (char *)u->refname,
+ .update_index = ts,
+ .value_type = REFTABLE_REF_DELETION,
+ };
+
+ ret = reftable_writer_add_ref(writer, &ref);
+ if (ret < 0)
+ goto done;
+ } else if (u->flags & REF_HAVE_NEW) {
+ struct reftable_ref_record ref = {0};
+ struct object_id peeled;
+ int peel_error;
+
+ ref.refname = (char *)u->refname;
+ ref.update_index = ts;
+
+ peel_error = peel_object(arg->refs->base.repo, &u->new_oid, &peeled);
+ if (!peel_error) {
+ ref.value_type = REFTABLE_REF_VAL2;
+ memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
+ memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
+ } else if (!is_null_oid(&u->new_oid)) {
+ ref.value_type = REFTABLE_REF_VAL1;
+ memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
+ }
+
+ ret = reftable_writer_add_ref(writer, &ref);
+ if (ret < 0)
+ goto done;
+ }
+ }
+
+ /*
+ * Logs are written at the end so that we do not have intermixed ref
+ * and log blocks.
+ */
+ if (logs) {
+ ret = reftable_writer_add_logs(writer, logs, logs_nr);
+ if (ret < 0)
+ goto done;
+ }
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ for (i = 0; i < logs_nr; i++)
+ reftable_log_record_release(&logs[i]);
+ free(logs);
+ return ret;
+}
+
+static int reftable_be_transaction_finish(struct ref_store *ref_store UNUSED,
+ struct ref_transaction *transaction,
+ struct strbuf *err)
+{
+ struct reftable_transaction_data *tx_data = transaction->backend_data;
+ int ret = 0;
+
+ for (size_t i = 0; i < tx_data->args_nr; i++) {
+ ret = reftable_addition_add(tx_data->args[i].addition,
+ write_transaction_table, &tx_data->args[i]);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_addition_commit(tx_data->args[i].addition);
+ if (ret < 0)
+ goto done;
+ }
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ free_transaction_data(tx_data);
+ transaction->state = REF_TRANSACTION_CLOSED;
+
+ if (ret) {
+ strbuf_addf(err, _("reftable: transaction failure: %s"),
+ reftable_error_str(ret));
+ return -1;
+ }
+ return ret;
+}
+
+static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
+ struct ref_transaction *transaction,
+ struct strbuf *err)
+{
+ return ref_transaction_commit(transaction, err);
+}
+
+static int reftable_be_pack_refs(struct ref_store *ref_store,
+ struct pack_refs_opts *opts)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
+ struct reftable_stack *stack;
+ int ret;
+
+ if (refs->err)
+ return refs->err;
+
+ stack = refs->worktree_stack;
+ if (!stack)
+ stack = refs->main_stack;
+
+ if (opts->flags & PACK_REFS_AUTO)
+ ret = reftable_stack_auto_compact(stack);
+ else
+ ret = reftable_stack_compact_all(stack, NULL);
+ if (ret < 0) {
+ ret = error(_("unable to compact stack: %s"),
+ reftable_error_str(ret));
+ goto out;
+ }
+
+ ret = reftable_stack_clean(stack);
+ if (ret)
+ goto out;
+
+out:
+ return ret;
+}
+
+struct write_create_symref_arg {
+ struct reftable_ref_store *refs;
+ struct reftable_stack *stack;
+ struct strbuf *err;
+ const char *refname;
+ const char *target;
+ const char *logmsg;
+};
+
+struct write_copy_arg {
+ struct reftable_ref_store *refs;
+ struct reftable_stack *stack;
+ const char *oldname;
+ const char *newname;
+ const char *logmsg;
+ int delete_old;
+};
+
+static int write_copy_table(struct reftable_writer *writer, void *cb_data)
+{
+ struct write_copy_arg *arg = cb_data;
+ uint64_t deletion_ts, creation_ts;
+ struct reftable_ref_record old_ref = {0}, refs[2] = {0};
+ struct reftable_log_record old_log = {0}, *logs = NULL;
+ struct reftable_iterator it = {0};
+ struct string_list skip = STRING_LIST_INIT_NODUP;
+ struct ident_split committer_ident = {0};
+ struct strbuf errbuf = STRBUF_INIT;
+ size_t logs_nr = 0, logs_alloc = 0, i;
+ const char *committer_info;
+ int ret;
+
+ committer_info = git_committer_info(0);
+ if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
+ BUG("failed splitting committer info");
+
+ if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
+ ret = error(_("refname %s not found"), arg->oldname);
+ goto done;
+ }
+ if (old_ref.value_type == REFTABLE_REF_SYMREF) {
+ ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
+ arg->oldname);
+ goto done;
+ }
+
+ /*
+ * There's nothing to do in case the old and new name are the same, so
+ * we exit early in that case.
+ */
+ if (!strcmp(arg->oldname, arg->newname)) {
+ ret = 0;
+ goto done;
+ }
+
+ /*
+ * Verify that the new refname is available.
+ */
+ if (arg->delete_old)
+ string_list_insert(&skip, arg->oldname);
+ ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
+ NULL, &skip, &errbuf);
+ if (ret < 0) {
+ error("%s", errbuf.buf);
+ goto done;
+ }
+
+ /*
+ * When deleting the old reference we have to use two update indices:
+ * once to delete the old ref and its reflog, and once to create the
+ * new ref and its reflog. They need to be staged with two separate
+ * indices because the new reflog needs to encode both the deletion of
+ * the old branch and the creation of the new branch, and we cannot do
+ * two changes to a reflog in a single update.
+ */
+ deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
+ if (arg->delete_old)
+ creation_ts++;
+ reftable_writer_set_limits(writer, deletion_ts, creation_ts);
+
+ /*
+ * Add the new reference. If this is a rename then we also delete the
+ * old reference.
+ */
+ refs[0] = old_ref;
+ refs[0].refname = xstrdup(arg->newname);
+ refs[0].update_index = creation_ts;
+ if (arg->delete_old) {
+ refs[1].refname = xstrdup(arg->oldname);
+ refs[1].value_type = REFTABLE_REF_DELETION;
+ refs[1].update_index = deletion_ts;
+ }
+ ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * When deleting the old branch we need to create a reflog entry on the
+ * new branch name that indicates that the old branch has been deleted
+ * and then recreated. This is a tad weird, but matches what the files
+ * backend does.
+ */
+ if (arg->delete_old) {
+ struct strbuf head_referent = STRBUF_INIT;
+ struct object_id head_oid;
+ int append_head_reflog;
+ unsigned head_type = 0;
+
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
+ fill_reftable_log_record(&logs[logs_nr], &committer_ident);
+ logs[logs_nr].refname = xstrdup(arg->newname);
+ logs[logs_nr].update_index = deletion_ts;
+ logs[logs_nr].value.update.message =
+ xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
+ memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
+ logs_nr++;
+
+ ret = read_ref_without_reload(arg->refs, arg->stack, "HEAD", &head_oid,
+ &head_referent, &head_type);
+ if (ret < 0)
+ goto done;
+ append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
+ strbuf_release(&head_referent);
+
+ /*
+ * The files backend uses `refs_delete_ref()` to delete the old
+ * branch name, which will append a reflog entry for HEAD in
+ * case it points to the old branch.
+ */
+ if (append_head_reflog) {
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ logs[logs_nr] = logs[logs_nr - 1];
+ logs[logs_nr].refname = xstrdup("HEAD");
+ logs[logs_nr].value.update.name =
+ xstrdup(logs[logs_nr].value.update.name);
+ logs[logs_nr].value.update.email =
+ xstrdup(logs[logs_nr].value.update.email);
+ logs[logs_nr].value.update.message =
+ xstrdup(logs[logs_nr].value.update.message);
+ logs_nr++;
+ }
+ }
+
+ /*
+ * Create the reflog entry for the newly created branch.
+ */
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
+ fill_reftable_log_record(&logs[logs_nr], &committer_ident);
+ logs[logs_nr].refname = xstrdup(arg->newname);
+ logs[logs_nr].update_index = creation_ts;
+ logs[logs_nr].value.update.message =
+ xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
+ memcpy(logs[logs_nr].value.update.new_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
+ logs_nr++;
+
+ /*
+ * In addition to writing the reflog entry for the new branch, we also
+ * copy over all log entries from the old reflog. Last but not least,
+ * when renaming we also have to delete all the old reflog entries.
+ */
+ reftable_stack_init_log_iterator(arg->stack, &it);
+ ret = reftable_iterator_seek_log(&it, arg->oldname);
+ if (ret < 0)
+ goto done;
+
+ while (1) {
+ ret = reftable_iterator_next_log(&it, &old_log);
+ if (ret < 0)
+ goto done;
+ if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
+ ret = 0;
+ break;
+ }
+
+ free(old_log.refname);
+
+ /*
+ * Copy over the old reflog entry with the new refname.
+ */
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ logs[logs_nr] = old_log;
+ logs[logs_nr].refname = xstrdup(arg->newname);
+ logs_nr++;
+
+ /*
+ * Delete the old reflog entry in case we are renaming.
+ */
+ if (arg->delete_old) {
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
+ logs[logs_nr].refname = xstrdup(arg->oldname);
+ logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
+ logs[logs_nr].update_index = old_log.update_index;
+ logs_nr++;
+ }
+
+ /*
+ * Transfer ownership of the log record we're iterating over to
+ * the array of log records. Otherwise, the pointers would get
+ * free'd or reallocated by the iterator.
+ */
+ memset(&old_log, 0, sizeof(old_log));
+ }
+
+ ret = reftable_writer_add_logs(writer, logs, logs_nr);
+ if (ret < 0)
+ goto done;
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ reftable_iterator_destroy(&it);
+ string_list_clear(&skip, 0);
+ strbuf_release(&errbuf);
+ for (i = 0; i < logs_nr; i++)
+ reftable_log_record_release(&logs[i]);
+ free(logs);
+ for (i = 0; i < ARRAY_SIZE(refs); i++)
+ reftable_ref_record_release(&refs[i]);
+ reftable_ref_record_release(&old_ref);
+ reftable_log_record_release(&old_log);
+ return ret;
+}
+
+static int reftable_be_rename_ref(struct ref_store *ref_store,
+ const char *oldrefname,
+ const char *newrefname,
+ const char *logmsg)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
+ struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
+ struct write_copy_arg arg = {
+ .refs = refs,
+ .stack = stack,
+ .oldname = oldrefname,
+ .newname = newrefname,
+ .logmsg = logmsg,
+ .delete_old = 1,
+ };
+ int ret;
+
+ ret = refs->err;
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_stack_reload(stack);
+ if (ret)
+ goto done;
+ ret = reftable_stack_add(stack, &write_copy_table, &arg);
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ return ret;
+}
+
+static int reftable_be_copy_ref(struct ref_store *ref_store,
+ const char *oldrefname,
+ const char *newrefname,
+ const char *logmsg)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
+ struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
+ struct write_copy_arg arg = {
+ .refs = refs,
+ .stack = stack,
+ .oldname = oldrefname,
+ .newname = newrefname,
+ .logmsg = logmsg,
+ };
+ int ret;
+
+ ret = refs->err;
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_stack_reload(stack);
+ if (ret)
+ goto done;
+ ret = reftable_stack_add(stack, &write_copy_table, &arg);
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ return ret;
+}
+
+struct reftable_reflog_iterator {
+ struct ref_iterator base;
+ struct reftable_ref_store *refs;
+ struct reftable_iterator iter;
+ struct reftable_log_record log;
+ struct strbuf last_name;
+ int err;
+};
+
+static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
+{
+ struct reftable_reflog_iterator *iter =
+ (struct reftable_reflog_iterator *)ref_iterator;
+
+ while (!iter->err) {
+ iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
+ if (iter->err)
+ break;
+
+ /*
+ * We want the refnames that we have reflogs for, so we skip if
+ * we've already produced this name. This could be faster by
+ * seeking directly to reflog@update_index==0.
+ */
+ if (!strcmp(iter->log.refname, iter->last_name.buf))
+ continue;
+
+ if (check_refname_format(iter->log.refname,
+ REFNAME_ALLOW_ONELEVEL))
+ continue;
+
+ strbuf_reset(&iter->last_name);
+ strbuf_addstr(&iter->last_name, iter->log.refname);
+ iter->base.refname = iter->log.refname;
+
+ break;
+ }
+
+ if (iter->err > 0) {
+ if (ref_iterator_abort(ref_iterator) != ITER_DONE)
+ return ITER_ERROR;
+ return ITER_DONE;
+ }
+
+ if (iter->err < 0) {
+ ref_iterator_abort(ref_iterator);
+ return ITER_ERROR;
+ }
+
+ return ITER_OK;
+}
+
+static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
+ struct object_id *peeled UNUSED)
+{
+ BUG("reftable reflog iterator cannot be peeled");
+ return -1;
+}
+
+static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
+{
+ struct reftable_reflog_iterator *iter =
+ (struct reftable_reflog_iterator *)ref_iterator;
+ reftable_log_record_release(&iter->log);
+ reftable_iterator_destroy(&iter->iter);
+ strbuf_release(&iter->last_name);
+ free(iter);
+ return ITER_DONE;
+}
+
+static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
+ .advance = reftable_reflog_iterator_advance,
+ .peel = reftable_reflog_iterator_peel,
+ .abort = reftable_reflog_iterator_abort
+};
+
+static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
+ struct reftable_stack *stack)
+{
+ struct reftable_reflog_iterator *iter;
+ int ret;
+
+ iter = xcalloc(1, sizeof(*iter));
+ base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
+ strbuf_init(&iter->last_name, 0);
+ iter->refs = refs;
+
+ ret = refs->err;
+ if (ret)
+ goto done;
+
+ ret = reftable_stack_reload(stack);
+ if (ret < 0)
+ goto done;
+
+ reftable_stack_init_log_iterator(stack, &iter->iter);
+ ret = reftable_iterator_seek_log(&iter->iter, "");
+ if (ret < 0)
+ goto done;
+
+done:
+ iter->err = ret;
+ return iter;
+}
+
+static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
+ struct reftable_reflog_iterator *main_iter, *worktree_iter;
+
+ main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
+ if (!refs->worktree_stack)
+ return &main_iter->base;
+
+ worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
+
+ return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
+ ref_iterator_select, NULL);
+}
+
+static int yield_log_record(struct reftable_ref_store *refs,
+ struct reftable_log_record *log,
+ each_reflog_ent_fn fn,
+ void *cb_data)
+{
+ struct object_id old_oid, new_oid;
+ const char *full_committer;
+
+ oidread(&old_oid, log->value.update.old_hash, refs->base.repo->hash_algo);
+ oidread(&new_oid, log->value.update.new_hash, refs->base.repo->hash_algo);
+
+ /*
+ * When both the old object ID and the new object ID are null
+ * then this is the reflog existence marker. The caller must
+ * not be aware of it.
+ */
+ if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
+ return 0;
+
+ full_committer = fmt_ident(log->value.update.name, log->value.update.email,
+ WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
+ return fn(&old_oid, &new_oid, full_committer,
+ log->value.update.time, log->value.update.tz_offset,
+ log->value.update.message, cb_data);
+}
+
+static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
+ const char *refname,
+ each_reflog_ent_fn fn,
+ void *cb_data)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
+ struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ struct reftable_log_record log = {0};
+ struct reftable_iterator it = {0};
+ int ret;
+
+ if (refs->err < 0)
+ return refs->err;
+
+ reftable_stack_init_log_iterator(stack, &it);
+ ret = reftable_iterator_seek_log(&it, refname);
+ while (!ret) {
+ ret = reftable_iterator_next_log(&it, &log);
+ if (ret < 0)
+ break;
+ if (ret > 0 || strcmp(log.refname, refname)) {
+ ret = 0;
+ break;
+ }
+
+ ret = yield_log_record(refs, &log, fn, cb_data);
+ if (ret)
+ break;
+ }
+
+ reftable_log_record_release(&log);
+ reftable_iterator_destroy(&it);
+ return ret;
+}
+
+static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
+ const char *refname,
+ each_reflog_ent_fn fn,
+ void *cb_data)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
+ struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ struct reftable_log_record *logs = NULL;
+ struct reftable_iterator it = {0};
+ size_t logs_alloc = 0, logs_nr = 0, i;
+ int ret;
+
+ if (refs->err < 0)
+ return refs->err;
+
+ reftable_stack_init_log_iterator(stack, &it);
+ ret = reftable_iterator_seek_log(&it, refname);
+ while (!ret) {
+ struct reftable_log_record log = {0};
+
+ ret = reftable_iterator_next_log(&it, &log);
+ if (ret < 0)
+ goto done;
+ if (ret > 0 || strcmp(log.refname, refname)) {
+ reftable_log_record_release(&log);
+ ret = 0;
+ break;
+ }
+
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ logs[logs_nr++] = log;
+ }
+
+ for (i = logs_nr; i--;) {
+ ret = yield_log_record(refs, &logs[i], fn, cb_data);
+ if (ret)
+ goto done;
+ }
+
+done:
+ reftable_iterator_destroy(&it);
+ for (i = 0; i < logs_nr; i++)
+ reftable_log_record_release(&logs[i]);
+ free(logs);
+ return ret;
+}
+
+static int reftable_be_reflog_exists(struct ref_store *ref_store,
+ const char *refname)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
+ struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ struct reftable_log_record log = {0};
+ struct reftable_iterator it = {0};
+ int ret;
+
+ ret = refs->err;
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_stack_reload(stack);
+ if (ret < 0)
+ goto done;
+
+ reftable_stack_init_log_iterator(stack, &it);
+ ret = reftable_iterator_seek_log(&it, refname);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * Check whether we get at least one log record for the given ref name.
+ * If so, the reflog exists, otherwise it doesn't.
+ */
+ ret = reftable_iterator_next_log(&it, &log);
+ if (ret < 0)
+ goto done;
+ if (ret > 0) {
+ ret = 0;
+ goto done;
+ }
+
+ ret = strcmp(log.refname, refname) == 0;
+
+done:
+ reftable_iterator_destroy(&it);
+ reftable_log_record_release(&log);
+ if (ret < 0)
+ ret = 0;
+ return ret;
+}
+
+struct write_reflog_existence_arg {
+ struct reftable_ref_store *refs;
+ const char *refname;
+ struct reftable_stack *stack;
+};
+
+static int write_reflog_existence_table(struct reftable_writer *writer,
+ void *cb_data)
+{
+ struct write_reflog_existence_arg *arg = cb_data;
+ uint64_t ts = reftable_stack_next_update_index(arg->stack);
+ struct reftable_log_record log = {0};
+ int ret;
+
+ ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
+ if (ret <= 0)
+ goto done;
+
+ reftable_writer_set_limits(writer, ts, ts);
+
+ /*
+ * The existence entry has both old and new object ID set to the the
+ * null object ID. Our iterators are aware of this and will not present
+ * them to their callers.
+ */
+ log.refname = xstrdup(arg->refname);
+ log.update_index = ts;
+ log.value_type = REFTABLE_LOG_UPDATE;
+ ret = reftable_writer_add_log(writer, &log);
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ reftable_log_record_release(&log);
+ return ret;
+}
+
+static int reftable_be_create_reflog(struct ref_store *ref_store,
+ const char *refname,
+ struct strbuf *errmsg UNUSED)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
+ struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ struct write_reflog_existence_arg arg = {
+ .refs = refs,
+ .stack = stack,
+ .refname = refname,
+ };
+ int ret;
+
+ ret = refs->err;
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_stack_reload(stack);
+ if (ret)
+ goto done;
+
+ ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
+
+done:
+ return ret;
+}
+
+struct write_reflog_delete_arg {
+ struct reftable_stack *stack;
+ const char *refname;
+};
+
+static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
+{
+ struct write_reflog_delete_arg *arg = cb_data;
+ struct reftable_log_record log = {0}, tombstone = {0};
+ struct reftable_iterator it = {0};
+ uint64_t ts = reftable_stack_next_update_index(arg->stack);
+ int ret;
+
+ reftable_writer_set_limits(writer, ts, ts);
+
+ reftable_stack_init_log_iterator(arg->stack, &it);
+
+ /*
+ * In order to delete a table we need to delete all reflog entries one
+ * by one. This is inefficient, but the reftable format does not have a
+ * better marker right now.
+ */
+ ret = reftable_iterator_seek_log(&it, arg->refname);
+ while (ret == 0) {
+ ret = reftable_iterator_next_log(&it, &log);
+ if (ret < 0)
+ break;
+ if (ret > 0 || strcmp(log.refname, arg->refname)) {
+ ret = 0;
+ break;
+ }
+
+ tombstone.refname = (char *)arg->refname;
+ tombstone.value_type = REFTABLE_LOG_DELETION;
+ tombstone.update_index = log.update_index;
+
+ ret = reftable_writer_add_log(writer, &tombstone);
+ }
+
+ reftable_log_record_release(&log);
+ reftable_iterator_destroy(&it);
+ return ret;
+}
+
+static int reftable_be_delete_reflog(struct ref_store *ref_store,
+ const char *refname)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
+ struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ struct write_reflog_delete_arg arg = {
+ .stack = stack,
+ .refname = refname,
+ };
+ int ret;
+
+ ret = reftable_stack_reload(stack);
+ if (ret)
+ return ret;
+ ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
+
+ assert(ret != REFTABLE_API_ERROR);
+ return ret;
+}
+
+struct reflog_expiry_arg {
+ struct reftable_ref_store *refs;
+ struct reftable_stack *stack;
+ struct reftable_log_record *records;
+ struct object_id update_oid;
+ const char *refname;
+ size_t len;
+};
+
+static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
+{
+ struct reflog_expiry_arg *arg = cb_data;
+ uint64_t ts = reftable_stack_next_update_index(arg->stack);
+ uint64_t live_records = 0;
+ size_t i;
+ int ret;
+
+ for (i = 0; i < arg->len; i++)
+ if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
+ live_records++;
+
+ reftable_writer_set_limits(writer, ts, ts);
+
+ if (!is_null_oid(&arg->update_oid)) {
+ struct reftable_ref_record ref = {0};
+ struct object_id peeled;
+
+ ref.refname = (char *)arg->refname;
+ ref.update_index = ts;
+
+ if (!peel_object(arg->refs->base.repo, &arg->update_oid, &peeled)) {
+ ref.value_type = REFTABLE_REF_VAL2;
+ memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
+ memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
+ } else {
+ ref.value_type = REFTABLE_REF_VAL1;
+ memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
+ }
+
+ ret = reftable_writer_add_ref(writer, &ref);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * When there are no more entries left in the reflog we empty it
+ * completely, but write a placeholder reflog entry that indicates that
+ * the reflog still exists.
+ */
+ if (!live_records) {
+ struct reftable_log_record log = {
+ .refname = (char *)arg->refname,
+ .value_type = REFTABLE_LOG_UPDATE,
+ .update_index = ts,
+ };
+
+ ret = reftable_writer_add_log(writer, &log);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < arg->len; i++) {
+ ret = reftable_writer_add_log(writer, &arg->records[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int reftable_be_reflog_expire(struct ref_store *ref_store,
+ const char *refname,
+ unsigned int flags,
+ reflog_expiry_prepare_fn prepare_fn,
+ reflog_expiry_should_prune_fn should_prune_fn,
+ reflog_expiry_cleanup_fn cleanup_fn,
+ void *policy_cb_data)
+{
+ /*
+ * For log expiry, we write tombstones for every single reflog entry
+ * that is to be expired. This means that the entries are still
+ * retrievable by delving into the stack, and expiring entries
+ * paradoxically takes extra memory. This memory is only reclaimed when
+ * compacting the reftable stack.
+ *
+ * It would be better if the refs backend supported an API that sets a
+ * criterion for all refs, passing the criterion to pack_refs().
+ *
+ * On the plus side, because we do the expiration per ref, we can easily
+ * insert the reflog existence dummies.
+ */
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
+ struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ struct reftable_log_record *logs = NULL;
+ struct reftable_log_record *rewritten = NULL;
+ struct reftable_ref_record ref_record = {0};
+ struct reftable_iterator it = {0};
+ struct reftable_addition *add = NULL;
+ struct reflog_expiry_arg arg = {0};
+ struct object_id oid = {0};
+ uint8_t *last_hash = NULL;
+ size_t logs_nr = 0, logs_alloc = 0, i;
+ int ret;
+
+ if (refs->err < 0)
+ return refs->err;
+
+ ret = reftable_stack_reload(stack);
+ if (ret < 0)
+ goto done;
+
+ reftable_stack_init_log_iterator(stack, &it);
+
+ ret = reftable_iterator_seek_log(&it, refname);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_stack_new_addition(&add, stack);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_stack_read_ref(stack, refname, &ref_record);
+ if (ret < 0)
+ goto done;
+ if (reftable_ref_record_val1(&ref_record))
+ oidread(&oid, reftable_ref_record_val1(&ref_record),
+ ref_store->repo->hash_algo);
+ prepare_fn(refname, &oid, policy_cb_data);
+
+ while (1) {
+ struct reftable_log_record log = {0};
+ struct object_id old_oid, new_oid;
+
+ ret = reftable_iterator_next_log(&it, &log);
+ if (ret < 0)
+ goto done;
+ if (ret > 0 || strcmp(log.refname, refname)) {
+ reftable_log_record_release(&log);
+ break;
+ }
+
+ oidread(&old_oid, log.value.update.old_hash,
+ ref_store->repo->hash_algo);
+ oidread(&new_oid, log.value.update.new_hash,
+ ref_store->repo->hash_algo);
+
+ /*
+ * Skip over the reflog existence marker. We will add it back
+ * in when there are no live reflog records.
+ */
+ if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
+ reftable_log_record_release(&log);
+ continue;
+ }
+
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ logs[logs_nr++] = log;
+ }
+
+ /*
+ * We need to rewrite all reflog entries according to the pruning
+ * callback function:
+ *
+ * - If a reflog entry shall be pruned we mark the record for
+ * deletion.
+ *
+ * - Otherwise we may have to rewrite the chain of reflog entries so
+ * that gaps created by just-deleted records get backfilled.
+ */
+ CALLOC_ARRAY(rewritten, logs_nr);
+ for (i = logs_nr; i--;) {
+ struct reftable_log_record *dest = &rewritten[i];
+ struct object_id old_oid, new_oid;
+
+ *dest = logs[i];
+ oidread(&old_oid, logs[i].value.update.old_hash,
+ ref_store->repo->hash_algo);
+ oidread(&new_oid, logs[i].value.update.new_hash,
+ ref_store->repo->hash_algo);
+
+ if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
+ (timestamp_t)logs[i].value.update.time,
+ logs[i].value.update.tz_offset,
+ logs[i].value.update.message,
+ policy_cb_data)) {
+ dest->value_type = REFTABLE_LOG_DELETION;
+ } else {
+ if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
+ memcpy(dest->value.update.old_hash, last_hash, GIT_MAX_RAWSZ);
+ last_hash = logs[i].value.update.new_hash;
+ }
+ }
+
+ if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
+ reftable_ref_record_val1(&ref_record))
+ oidread(&arg.update_oid, last_hash, ref_store->repo->hash_algo);
+
+ arg.refs = refs;
+ arg.records = rewritten;
+ arg.len = logs_nr;
+ arg.stack = stack,
+ arg.refname = refname,
+
+ ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * Future improvement: we could skip writing records that were
+ * not changed.
+ */
+ if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
+ ret = reftable_addition_commit(add);
+
+done:
+ if (add)
+ cleanup_fn(policy_cb_data);
+ assert(ret != REFTABLE_API_ERROR);
+
+ reftable_ref_record_release(&ref_record);
+ reftable_iterator_destroy(&it);
+ reftable_addition_destroy(add);
+ for (i = 0; i < logs_nr; i++)
+ reftable_log_record_release(&logs[i]);
+ free(logs);
+ free(rewritten);
+ return ret;
+}
+
+static int reftable_be_fsck(struct ref_store *ref_store UNUSED,
+ struct fsck_options *o UNUSED)
+{
+ return 0;
+}
+
+struct ref_storage_be refs_be_reftable = {
+ .name = "reftable",
+ .init = reftable_be_init,
+ .release = reftable_be_release,
+ .create_on_disk = reftable_be_create_on_disk,
+ .remove_on_disk = reftable_be_remove_on_disk,
+
+ .transaction_prepare = reftable_be_transaction_prepare,
+ .transaction_finish = reftable_be_transaction_finish,
+ .transaction_abort = reftable_be_transaction_abort,
+ .initial_transaction_commit = reftable_be_initial_transaction_commit,
+
+ .pack_refs = reftable_be_pack_refs,
+ .rename_ref = reftable_be_rename_ref,
+ .copy_ref = reftable_be_copy_ref,
+
+ .iterator_begin = reftable_be_iterator_begin,
+ .read_raw_ref = reftable_be_read_raw_ref,
+ .read_symbolic_ref = reftable_be_read_symbolic_ref,
+
+ .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
+ .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
+ .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
+ .reflog_exists = reftable_be_reflog_exists,
+ .create_reflog = reftable_be_create_reflog,
+ .delete_reflog = reftable_be_delete_reflog,
+ .reflog_expire = reftable_be_reflog_expire,
+
+ .fsck = reftable_be_fsck,
+};