aboutsummaryrefslogtreecommitdiffstats
path: root/refs/reftable-backend.c
diff options
context:
space:
mode:
Diffstat (limited to 'refs/reftable-backend.c')
-rw-r--r--refs/reftable-backend.c1087
1 files changed, 652 insertions, 435 deletions
diff --git a/refs/reftable-backend.c b/refs/reftable-backend.c
index 3c96fbf66f..4c3817f4ec 100644
--- a/refs/reftable-backend.c
+++ b/refs/reftable-backend.c
@@ -15,6 +15,7 @@
#include "../object.h"
#include "../path.h"
#include "../refs.h"
+#include "../reftable/reftable-basics.h"
#include "../reftable/reftable-stack.h"
#include "../reftable/reftable-record.h"
#include "../reftable/reftable-error.h"
@@ -23,6 +24,7 @@
#include "../setup.h"
#include "../strmap.h"
#include "../trace2.h"
+#include "../write-or-die.h"
#include "parse.h"
#include "refs-internal.h"
@@ -32,24 +34,111 @@
*/
#define REF_UPDATE_VIA_HEAD (1 << 8)
+struct reftable_backend {
+ struct reftable_stack *stack;
+ struct reftable_iterator it;
+};
+
+static void reftable_backend_on_reload(void *payload)
+{
+ struct reftable_backend *be = payload;
+ reftable_iterator_destroy(&be->it);
+}
+
+static int reftable_backend_init(struct reftable_backend *be,
+ const char *path,
+ const struct reftable_write_options *_opts)
+{
+ struct reftable_write_options opts = *_opts;
+ opts.on_reload = reftable_backend_on_reload;
+ opts.on_reload_payload = be;
+ return reftable_new_stack(&be->stack, path, &opts);
+}
+
+static void reftable_backend_release(struct reftable_backend *be)
+{
+ reftable_stack_destroy(be->stack);
+ be->stack = NULL;
+ reftable_iterator_destroy(&be->it);
+}
+
+static int reftable_backend_read_ref(struct reftable_backend *be,
+ const char *refname,
+ struct object_id *oid,
+ struct strbuf *referent,
+ unsigned int *type)
+{
+ struct reftable_ref_record ref = {0};
+ int ret;
+
+ if (!be->it.ops) {
+ ret = reftable_stack_init_ref_iterator(be->stack, &be->it);
+ if (ret)
+ goto done;
+ }
+
+ ret = reftable_iterator_seek_ref(&be->it, refname);
+ if (ret)
+ goto done;
+
+ ret = reftable_iterator_next_ref(&be->it, &ref);
+ if (ret)
+ goto done;
+
+ if (strcmp(ref.refname, refname)) {
+ ret = 1;
+ goto done;
+ }
+
+ if (ref.value_type == REFTABLE_REF_SYMREF) {
+ strbuf_reset(referent);
+ strbuf_addstr(referent, ref.value.symref);
+ *type |= REF_ISSYMREF;
+ } else if (reftable_ref_record_val1(&ref)) {
+ unsigned int hash_id;
+
+ switch (reftable_stack_hash_id(be->stack)) {
+ case REFTABLE_HASH_SHA1:
+ hash_id = GIT_HASH_SHA1;
+ break;
+ case REFTABLE_HASH_SHA256:
+ hash_id = GIT_HASH_SHA256;
+ break;
+ default:
+ BUG("unhandled hash ID %d", reftable_stack_hash_id(be->stack));
+ }
+
+ oidread(oid, reftable_ref_record_val1(&ref),
+ &hash_algos[hash_id]);
+ } else {
+ /* We got a tombstone, which should not happen. */
+ BUG("unhandled reference value type %d", ref.value_type);
+ }
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ reftable_ref_record_release(&ref);
+ return ret;
+}
+
struct reftable_ref_store {
struct ref_store base;
/*
- * The main stack refers to the common dir and thus contains common
+ * The main backend refers to the common dir and thus contains common
* refs as well as refs of the main repository.
*/
- struct reftable_stack *main_stack;
+ struct reftable_backend main_backend;
/*
- * The worktree stack refers to the gitdir in case the refdb is opened
+ * The worktree backend refers to the gitdir in case the refdb is opened
* via a worktree. It thus contains the per-worktree refs.
*/
- struct reftable_stack *worktree_stack;
+ struct reftable_backend worktree_backend;
/*
- * Map of worktree stacks by their respective worktree names. The map
+ * Map of worktree backends by their respective worktree names. The map
* is populated lazily when we try to resolve `worktrees/$worktree` refs.
*/
- struct strmap worktree_stacks;
+ struct strmap worktree_backends;
struct reftable_write_options write_options;
unsigned int store_flags;
@@ -95,21 +184,25 @@ static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_sto
* like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
* those references in their normalized form.
*/
-static struct reftable_stack *stack_for(struct reftable_ref_store *store,
- const char *refname,
- const char **rewritten_ref)
+static int backend_for(struct reftable_backend **out,
+ struct reftable_ref_store *store,
+ const char *refname,
+ const char **rewritten_ref,
+ int reload)
{
+ struct reftable_backend *be;
const char *wtname;
int wtname_len;
- if (!refname)
- return store->main_stack;
+ if (!refname) {
+ be = &store->main_backend;
+ goto out;
+ }
switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
case REF_WORKTREE_OTHER: {
static struct strbuf wtname_buf = STRBUF_INIT;
struct strbuf wt_dir = STRBUF_INIT;
- struct reftable_stack *stack;
/*
* We're using a static buffer here so that we don't need to
@@ -123,40 +216,55 @@ static struct reftable_stack *stack_for(struct reftable_ref_store *store,
/*
* There is an edge case here: when the worktree references the
* current worktree, then we set up the stack once via
- * `worktree_stacks` and once via `worktree_stack`. This is
+ * `worktree_backends` and once via `worktree_backend`. This is
* wasteful, but in the reading case it shouldn't matter. And
* in the writing case we would notice that the stack is locked
* already and error out when trying to write a reference via
* both stacks.
*/
- stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
- if (!stack) {
+ be = strmap_get(&store->worktree_backends, wtname_buf.buf);
+ if (!be) {
strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
store->base.repo->commondir, wtname_buf.buf);
- store->err = reftable_new_stack(&stack, wt_dir.buf,
- &store->write_options);
+ CALLOC_ARRAY(be, 1);
+ store->err = reftable_backend_init(be, wt_dir.buf,
+ &store->write_options);
assert(store->err != REFTABLE_API_ERROR);
- strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
+
+ strmap_put(&store->worktree_backends, wtname_buf.buf, be);
}
strbuf_release(&wt_dir);
- return stack;
+ goto out;
}
case REF_WORKTREE_CURRENT:
/*
* If there is no worktree stack then we're currently in the
* main worktree. We thus return the main stack in that case.
*/
- if (!store->worktree_stack)
- return store->main_stack;
- return store->worktree_stack;
+ if (!store->worktree_backend.stack)
+ be = &store->main_backend;
+ else
+ be = &store->worktree_backend;
+ goto out;
case REF_WORKTREE_MAIN:
case REF_WORKTREE_SHARED:
- return store->main_stack;
+ be = &store->main_backend;
+ goto out;
default:
BUG("unhandled worktree reference type");
}
+
+out:
+ if (reload) {
+ int ret = reftable_stack_reload(be->stack);
+ if (ret)
+ return ret;
+ }
+ *out = be;
+
+ return 0;
}
static int should_write_log(struct reftable_ref_store *refs, const char *refname)
@@ -205,38 +313,6 @@ static void fill_reftable_log_record(struct reftable_log_record *log, const stru
log->value.update.tz_offset = sign * atoi(tz_begin);
}
-static int read_ref_without_reload(struct reftable_ref_store *refs,
- struct reftable_stack *stack,
- const char *refname,
- struct object_id *oid,
- struct strbuf *referent,
- unsigned int *type)
-{
- struct reftable_ref_record ref = {0};
- int ret;
-
- ret = reftable_stack_read_ref(stack, refname, &ref);
- if (ret)
- goto done;
-
- if (ref.value_type == REFTABLE_REF_SYMREF) {
- strbuf_reset(referent);
- strbuf_addstr(referent, ref.value.symref);
- *type |= REF_ISSYMREF;
- } else if (reftable_ref_record_val1(&ref)) {
- oidread(oid, reftable_ref_record_val1(&ref),
- refs->base.repo->hash_algo);
- } else {
- /* We got a tombstone, which should not happen. */
- BUG("unhandled reference value type %d", ref.value_type);
- }
-
-done:
- assert(ret != REFTABLE_API_ERROR);
- reftable_ref_record_release(&ref);
- return ret;
-}
-
static int reftable_be_config(const char *var, const char *value,
const struct config_context *ctx,
void *_opts)
@@ -272,6 +348,11 @@ static int reftable_be_config(const char *var, const char *value,
return 0;
}
+static int reftable_be_fsync(int fd)
+{
+ return fsync_component(FSYNC_COMPONENT_REFERENCE, fd);
+}
+
static struct ref_store *reftable_be_init(struct repository *repo,
const char *gitdir,
unsigned int store_flags)
@@ -285,15 +366,25 @@ static struct ref_store *reftable_be_init(struct repository *repo,
umask(mask);
base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
- strmap_init(&refs->worktree_stacks);
+ strmap_init(&refs->worktree_backends);
refs->store_flags = store_flags;
refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo);
- refs->write_options.hash_id = repo->hash_algo->format_id;
- refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
+ switch (repo->hash_algo->format_id) {
+ case GIT_SHA1_FORMAT_ID:
+ refs->write_options.hash_id = REFTABLE_HASH_SHA1;
+ break;
+ case GIT_SHA256_FORMAT_ID:
+ refs->write_options.hash_id = REFTABLE_HASH_SHA256;
+ break;
+ default:
+ BUG("unknown hash algorithm %d", repo->hash_algo->format_id);
+ }
+ refs->write_options.default_permissions = calc_shared_perm(the_repository, 0666 & ~mask);
refs->write_options.disable_auto_compact =
!git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
refs->write_options.lock_timeout_ms = 100;
+ refs->write_options.fsync = reftable_be_fsync;
git_config(reftable_be_config, &refs->write_options);
@@ -320,8 +411,8 @@ static struct ref_store *reftable_be_init(struct repository *repo,
strbuf_realpath(&path, gitdir, 0);
}
strbuf_addstr(&path, "/reftable");
- refs->err = reftable_new_stack(&refs->main_stack, path.buf,
- &refs->write_options);
+ refs->err = reftable_backend_init(&refs->main_backend, path.buf,
+ &refs->write_options);
if (refs->err)
goto done;
@@ -337,8 +428,8 @@ static struct ref_store *reftable_be_init(struct repository *repo,
strbuf_reset(&path);
strbuf_addf(&path, "%s/reftable", gitdir);
- refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
- &refs->write_options);
+ refs->err = reftable_backend_init(&refs->worktree_backend, path.buf,
+ &refs->write_options);
if (refs->err)
goto done;
}
@@ -357,19 +448,17 @@ static void reftable_be_release(struct ref_store *ref_store)
struct strmap_entry *entry;
struct hashmap_iter iter;
- if (refs->main_stack) {
- reftable_stack_destroy(refs->main_stack);
- refs->main_stack = NULL;
- }
+ if (refs->main_backend.stack)
+ reftable_backend_release(&refs->main_backend);
+ if (refs->worktree_backend.stack)
+ reftable_backend_release(&refs->worktree_backend);
- if (refs->worktree_stack) {
- reftable_stack_destroy(refs->worktree_stack);
- refs->worktree_stack = NULL;
+ strmap_for_each_entry(&refs->worktree_backends, &iter, entry) {
+ struct reftable_backend *be = entry->value;
+ reftable_backend_release(be);
+ free(be);
}
-
- strmap_for_each_entry(&refs->worktree_stacks, &iter, entry)
- reftable_stack_destroy(entry->value);
- strmap_clear(&refs->worktree_stacks, 0);
+ strmap_clear(&refs->worktree_backends, 0);
}
static int reftable_be_create_on_disk(struct ref_store *ref_store,
@@ -381,21 +470,21 @@ static int reftable_be_create_on_disk(struct ref_store *ref_store,
struct strbuf sb = STRBUF_INIT;
strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
- safe_create_dir(sb.buf, 1);
+ safe_create_dir(the_repository, sb.buf, 1);
strbuf_reset(&sb);
strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
write_file(sb.buf, "ref: refs/heads/.invalid");
- adjust_shared_perm(sb.buf);
+ adjust_shared_perm(the_repository, sb.buf);
strbuf_reset(&sb);
strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
- safe_create_dir(sb.buf, 1);
+ safe_create_dir(the_repository, sb.buf, 1);
strbuf_reset(&sb);
strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
write_file(sb.buf, "this repository uses the reftable format");
- adjust_shared_perm(sb.buf);
+ adjust_shared_perm(the_repository, sb.buf);
strbuf_release(&sb);
return 0;
@@ -458,7 +547,7 @@ struct reftable_ref_iterator {
struct reftable_ref_record ref;
struct object_id oid;
- const char *prefix;
+ char *prefix;
size_t prefix_len;
char **exclude_patterns;
size_t exclude_patterns_index;
@@ -622,20 +711,27 @@ static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
break;
}
- if (iter->err > 0) {
- if (ref_iterator_abort(ref_iterator) != ITER_DONE)
- return ITER_ERROR;
+ if (iter->err > 0)
return ITER_DONE;
- }
-
- if (iter->err < 0) {
- ref_iterator_abort(ref_iterator);
+ if (iter->err < 0)
return ITER_ERROR;
- }
-
return ITER_OK;
}
+static int reftable_ref_iterator_seek(struct ref_iterator *ref_iterator,
+ const char *prefix)
+{
+ struct reftable_ref_iterator *iter =
+ (struct reftable_ref_iterator *)ref_iterator;
+
+ free(iter->prefix);
+ iter->prefix = xstrdup_or_null(prefix);
+ iter->prefix_len = prefix ? strlen(prefix) : 0;
+ iter->err = reftable_iterator_seek_ref(&iter->iter, prefix);
+
+ return iter->err;
+}
+
static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
struct object_id *peeled)
{
@@ -651,7 +747,7 @@ static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
return -1;
}
-static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
+static void reftable_ref_iterator_release(struct ref_iterator *ref_iterator)
{
struct reftable_ref_iterator *iter =
(struct reftable_ref_iterator *)ref_iterator;
@@ -662,14 +758,14 @@ static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
free(iter->exclude_patterns[i]);
free(iter->exclude_patterns);
}
- free(iter);
- return ITER_DONE;
+ free(iter->prefix);
}
static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
.advance = reftable_ref_iterator_advance,
+ .seek = reftable_ref_iterator_seek,
.peel = reftable_ref_iterator_peel,
- .abort = reftable_ref_iterator_abort
+ .release = reftable_ref_iterator_release,
};
static int qsort_strcmp(const void *va, const void *vb)
@@ -726,8 +822,6 @@ static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_
iter = xcalloc(1, sizeof(*iter));
base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
- iter->prefix = prefix;
- iter->prefix_len = prefix ? strlen(prefix) : 0;
iter->base.oid = &iter->oid;
iter->flags = flags;
iter->refs = refs;
@@ -741,8 +835,11 @@ static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_
if (ret)
goto done;
- reftable_stack_init_ref_iterator(stack, &iter->iter);
- ret = reftable_iterator_seek_ref(&iter->iter, prefix);
+ ret = reftable_stack_init_ref_iterator(stack, &iter->iter);
+ if (ret)
+ goto done;
+
+ ret = reftable_ref_iterator_seek(&iter->base, prefix);
if (ret)
goto done;
@@ -764,7 +861,7 @@ static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_sto
required_flags |= REF_STORE_ODB;
refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
- main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix,
+ main_iter = ref_iterator_for_stack(refs, refs->main_backend.stack, prefix,
exclude_patterns, flags);
/*
@@ -772,14 +869,14 @@ static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_sto
* right now. If we aren't, then we return the common reftable
* iterator, only.
*/
- if (!refs->worktree_stack)
+ if (!refs->worktree_backend.stack)
return &main_iter->base;
/*
* Otherwise we merge both the common and the per-worktree refs into a
* single iterator.
*/
- worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix,
+ worktree_iter = ref_iterator_for_stack(refs, refs->worktree_backend.stack, prefix,
exclude_patterns, flags);
return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
ref_iterator_select, NULL);
@@ -794,17 +891,17 @@ static int reftable_be_read_raw_ref(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ struct reftable_backend *be;
int ret;
if (refs->err < 0)
return refs->err;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&be, refs, refname, &refname, 1);
if (ret)
return ret;
- ret = read_ref_without_reload(refs, stack, refname, oid, referent, type);
+ ret = reftable_backend_read_ref(be, refname, oid, referent, type);
if (ret < 0)
return ret;
if (ret > 0) {
@@ -821,21 +918,22 @@ static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
- struct reftable_ref_record ref = {0};
+ struct reftable_backend *be;
+ struct object_id oid;
+ unsigned int type = 0;
int ret;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&be, refs, refname, &refname, 1);
if (ret)
return ret;
- ret = reftable_stack_read_ref(stack, refname, &ref);
- if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
- strbuf_addstr(referent, ref.value.symref);
- else
+ ret = reftable_backend_read_ref(be, refname, &oid, referent, &type);
+ if (ret)
ret = -1;
-
- reftable_ref_record_release(&ref);
+ else if (type == REF_ISSYMREF)
+ ; /* happy */
+ else
+ ret = NOT_A_SYMREF;
return ret;
}
@@ -846,12 +944,13 @@ struct reftable_transaction_update {
struct write_transaction_table_arg {
struct reftable_ref_store *refs;
- struct reftable_stack *stack;
+ struct reftable_backend *be;
struct reftable_addition *addition;
struct reftable_transaction_update *updates;
size_t updates_nr;
size_t updates_alloc;
size_t updates_expected;
+ uint64_t max_index;
};
struct reftable_transaction_data {
@@ -881,27 +980,37 @@ static int prepare_transaction_update(struct write_transaction_table_arg **out,
struct ref_update *update,
struct strbuf *err)
{
- struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
struct write_transaction_table_arg *arg = NULL;
+ struct reftable_backend *be;
size_t i;
int ret;
/*
+ * This function gets called in a loop, and we don't want to repeatedly
+ * reload the stack for every single ref update. Instead, we manually
+ * reload further down in the case where we haven't yet prepared the
+ * specific `reftable_backend`.
+ */
+ ret = backend_for(&be, refs, update->refname, NULL, 0);
+ if (ret)
+ return ret;
+
+ /*
* Search for a preexisting stack update. If there is one then we add
* the update to it, otherwise we set up a new stack update.
*/
for (i = 0; !arg && i < tx_data->args_nr; i++)
- if (tx_data->args[i].stack == stack)
+ if (tx_data->args[i].be == be)
arg = &tx_data->args[i];
if (!arg) {
struct reftable_addition *addition;
- ret = reftable_stack_reload(stack);
+ ret = reftable_stack_reload(be->stack);
if (ret)
return ret;
- ret = reftable_stack_new_addition(&addition, stack,
+ ret = reftable_stack_new_addition(&addition, be->stack,
REFTABLE_STACK_NEW_ADDITION_RELOAD);
if (ret) {
if (ret == REFTABLE_LOCK_ERROR)
@@ -913,12 +1022,13 @@ static int prepare_transaction_update(struct write_transaction_table_arg **out,
tx_data->args_alloc);
arg = &tx_data->args[tx_data->args_nr++];
arg->refs = refs;
- arg->stack = stack;
+ arg->be = be;
arg->addition = addition;
arg->updates = NULL;
arg->updates_nr = 0;
arg->updates_alloc = 0;
arg->updates_expected = 0;
+ arg->max_index = 0;
}
arg->updates_expected++;
@@ -959,6 +1069,244 @@ static int queue_transaction_update(struct reftable_ref_store *refs,
return 0;
}
+static enum ref_transaction_error prepare_single_update(struct reftable_ref_store *refs,
+ struct reftable_transaction_data *tx_data,
+ struct ref_transaction *transaction,
+ struct reftable_backend *be,
+ struct ref_update *u,
+ size_t update_idx,
+ struct string_list *refnames_to_check,
+ unsigned int head_type,
+ struct strbuf *head_referent,
+ struct strbuf *referent,
+ struct strbuf *err)
+{
+ enum ref_transaction_error ret = 0;
+ struct object_id current_oid = {0};
+ const char *rewritten_ref;
+
+ /*
+ * There is no need to reload the respective backends here as
+ * we have already reloaded them when preparing the transaction
+ * update. And given that the stacks have been locked there
+ * shouldn't have been any concurrent modifications of the
+ * stack.
+ */
+ ret = backend_for(&be, refs, u->refname, &rewritten_ref, 0);
+ if (ret)
+ return REF_TRANSACTION_ERROR_GENERIC;
+
+ /* Verify that the new object ID is valid. */
+ if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
+ !(u->flags & REF_SKIP_OID_VERIFICATION) &&
+ !(u->flags & REF_LOG_ONLY)) {
+ struct object *o = parse_object(refs->base.repo, &u->new_oid);
+ if (!o) {
+ strbuf_addf(err,
+ _("trying to write ref '%s' with nonexistent object %s"),
+ u->refname, oid_to_hex(&u->new_oid));
+ return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE;
+ }
+
+ if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
+ strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
+ oid_to_hex(&u->new_oid), u->refname);
+ return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE;
+ }
+ }
+
+ /*
+ * When we update the reference that HEAD points to we enqueue
+ * a second log-only update for HEAD so that its reflog is
+ * updated accordingly.
+ */
+ if (head_type == REF_ISSYMREF &&
+ !(u->flags & REF_LOG_ONLY) &&
+ !(u->flags & REF_UPDATE_VIA_HEAD) &&
+ !strcmp(rewritten_ref, head_referent->buf)) {
+ /*
+ * First make sure that HEAD is not already in the
+ * transaction. This check is O(lg N) in the transaction
+ * size, but it happens at most once per transaction.
+ */
+ if (string_list_has_string(&transaction->refnames, "HEAD")) {
+ /* An entry already existed */
+ strbuf_addf(err,
+ _("multiple updates for 'HEAD' (including one "
+ "via its referent '%s') are not allowed"),
+ u->refname);
+ return REF_TRANSACTION_ERROR_NAME_CONFLICT;
+ }
+
+ ref_transaction_add_update(
+ transaction, "HEAD",
+ u->flags | REF_LOG_ONLY | REF_NO_DEREF,
+ &u->new_oid, &u->old_oid, NULL, NULL, NULL,
+ u->msg);
+ }
+
+ ret = reftable_backend_read_ref(be, rewritten_ref,
+ &current_oid, referent, &u->type);
+ if (ret < 0)
+ return REF_TRANSACTION_ERROR_GENERIC;
+ if (ret > 0 && !ref_update_expects_existing_old_ref(u)) {
+ struct string_list_item *item;
+ /*
+ * The reference does not exist, and we either have no
+ * old object ID or expect the reference to not exist.
+ * We can thus skip below safety checks as well as the
+ * symref splitting. But we do want to verify that
+ * there is no conflicting reference here so that we
+ * can output a proper error message instead of failing
+ * at a later point.
+ */
+ item = string_list_append(refnames_to_check, u->refname);
+ item->util = xmalloc(sizeof(update_idx));
+ memcpy(item->util, &update_idx, sizeof(update_idx));
+
+ /*
+ * There is no need to write the reference deletion
+ * when the reference in question doesn't exist.
+ */
+ if ((u->flags & REF_HAVE_NEW) && !ref_update_has_null_new_value(u)) {
+ ret = queue_transaction_update(refs, tx_data, u,
+ &current_oid, err);
+ if (ret)
+ return REF_TRANSACTION_ERROR_GENERIC;
+ }
+
+ return 0;
+ }
+ if (ret > 0) {
+ /* The reference does not exist, but we expected it to. */
+ strbuf_addf(err, _("cannot lock ref '%s': "
+
+
+ "unable to resolve reference '%s'"),
+ ref_update_original_update_refname(u), u->refname);
+ return REF_TRANSACTION_ERROR_NONEXISTENT_REF;
+ }
+
+ if (u->type & REF_ISSYMREF) {
+ /*
+ * The reftable stack is locked at this point already,
+ * so it is safe to call `refs_resolve_ref_unsafe()`
+ * here without causing races.
+ */
+ const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
+ &current_oid, NULL);
+
+ if (u->flags & REF_NO_DEREF) {
+ if (u->flags & REF_HAVE_OLD && !resolved) {
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "error reading reference"), u->refname);
+ return REF_TRANSACTION_ERROR_GENERIC;
+ }
+ } else {
+ struct ref_update *new_update;
+ int new_flags;
+
+ new_flags = u->flags;
+ if (!strcmp(rewritten_ref, "HEAD"))
+ new_flags |= REF_UPDATE_VIA_HEAD;
+
+ if (string_list_has_string(&transaction->refnames, referent->buf)) {
+ strbuf_addf(err,
+ _("multiple updates for '%s' (including one "
+ "via symref '%s') are not allowed"),
+ referent->buf, u->refname);
+ return REF_TRANSACTION_ERROR_NAME_CONFLICT;
+ }
+
+ /*
+ * If we are updating a symref (eg. HEAD), we should also
+ * update the branch that the symref points to.
+ *
+ * This is generic functionality, and would be better
+ * done in refs.c, but the current implementation is
+ * intertwined with the locking in files-backend.c.
+ */
+ new_update = ref_transaction_add_update(
+ transaction, referent->buf, new_flags,
+ u->new_target ? NULL : &u->new_oid,
+ u->old_target ? NULL : &u->old_oid,
+ u->new_target, u->old_target,
+ u->committer_info, u->msg);
+
+ new_update->parent_update = u;
+
+ /*
+ * Change the symbolic ref update to log only. Also, it
+ * doesn't need to check its old OID value, as that will be
+ * done when new_update is processed.
+ */
+ u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
+ u->flags &= ~REF_HAVE_OLD;
+ }
+ }
+
+ /*
+ * Verify that the old object matches our expectations. Note
+ * that the error messages here do not make a lot of sense in
+ * the context of the reftable backend as we never lock
+ * individual refs. But the error messages match what the files
+ * backend returns, which keeps our tests happy.
+ */
+ if (u->old_target) {
+ if (!(u->type & REF_ISSYMREF)) {
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "expected symref with target '%s': "
+ "but is a regular ref"),
+ ref_update_original_update_refname(u),
+ u->old_target);
+ return REF_TRANSACTION_ERROR_EXPECTED_SYMREF;
+ }
+
+ ret = ref_update_check_old_target(referent->buf, u, err);
+ if (ret)
+ return ret;
+ } else if ((u->flags & REF_HAVE_OLD) && !oideq(&current_oid, &u->old_oid)) {
+ if (is_null_oid(&u->old_oid)) {
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "reference already exists"),
+ ref_update_original_update_refname(u));
+ return REF_TRANSACTION_ERROR_CREATE_EXISTS;
+ } else if (is_null_oid(&current_oid)) {
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "reference is missing but expected %s"),
+ ref_update_original_update_refname(u),
+ oid_to_hex(&u->old_oid));
+ return REF_TRANSACTION_ERROR_NONEXISTENT_REF;
+ } else {
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "is at %s but expected %s"),
+ ref_update_original_update_refname(u),
+ oid_to_hex(&current_oid),
+ oid_to_hex(&u->old_oid));
+ return REF_TRANSACTION_ERROR_INCORRECT_OLD_VALUE;
+ }
+ }
+
+ /*
+ * If all of the following conditions are true:
+ *
+ * - We're not about to write a symref.
+ * - We're not about to write a log-only entry.
+ * - Old and new object ID are different.
+ *
+ * Then we're essentially doing a no-op update that can be
+ * skipped. This is not only for the sake of efficiency, but
+ * also skips writing unneeded reflog entries.
+ */
+ if ((u->type & REF_ISSYMREF) ||
+ (u->flags & REF_LOG_ONLY) ||
+ (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid)))
+ if (queue_transaction_update(refs, tx_data, u, &current_oid, err))
+ return REF_TRANSACTION_ERROR_GENERIC;
+
+ return 0;
+}
+
static int reftable_be_transaction_prepare(struct ref_store *ref_store,
struct ref_transaction *transaction,
struct strbuf *err)
@@ -966,8 +1314,9 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
- struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
+ struct string_list refnames_to_check = STRING_LIST_INIT_NODUP;
struct reftable_transaction_data *tx_data = NULL;
+ struct reftable_backend *be;
struct object_id head_oid;
unsigned int head_type = 0;
size_t i;
@@ -989,9 +1338,6 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
transaction->updates[i], err);
if (ret)
goto done;
-
- string_list_append(&affected_refnames,
- transaction->updates[i]->refname);
}
/*
@@ -1004,253 +1350,54 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
}
/*
- * Fail if a refname appears more than once in the transaction.
- * This code is taken from the files backend and is a good candidate to
- * be moved into the generic layer.
+ * TODO: it's dubious whether we should reload the stack that "HEAD"
+ * belongs to or not. In theory, it may happen that we only modify
+ * stacks which are _not_ part of the "HEAD" stack. In that case we
+ * wouldn't have prepared any transaction for its stack and would not
+ * have reloaded it, which may mean that it is stale.
+ *
+ * On the other hand, reloading that stack without locking it feels
+ * wrong, too, as the value of "HEAD" could be modified concurrently at
+ * any point in time.
*/
- string_list_sort(&affected_refnames);
- if (ref_update_reject_duplicates(&affected_refnames, err)) {
- ret = TRANSACTION_GENERIC_ERROR;
+ ret = backend_for(&be, refs, "HEAD", NULL, 0);
+ if (ret)
goto done;
- }
- ret = read_ref_without_reload(refs, stack_for(refs, "HEAD", NULL), "HEAD",
- &head_oid, &head_referent, &head_type);
+ ret = reftable_backend_read_ref(be, "HEAD", &head_oid,
+ &head_referent, &head_type);
if (ret < 0)
goto done;
ret = 0;
for (i = 0; i < transaction->nr; i++) {
- struct ref_update *u = transaction->updates[i];
- struct object_id current_oid = {0};
- struct reftable_stack *stack;
- const char *rewritten_ref;
-
- stack = stack_for(refs, u->refname, &rewritten_ref);
-
- /* Verify that the new object ID is valid. */
- if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
- !(u->flags & REF_SKIP_OID_VERIFICATION) &&
- !(u->flags & REF_LOG_ONLY)) {
- struct object *o = parse_object(refs->base.repo, &u->new_oid);
- if (!o) {
- strbuf_addf(err,
- _("trying to write ref '%s' with nonexistent object %s"),
- u->refname, oid_to_hex(&u->new_oid));
- ret = -1;
- goto done;
- }
-
- if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
- strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
- oid_to_hex(&u->new_oid), u->refname);
- ret = -1;
- goto done;
- }
- }
-
- /*
- * When we update the reference that HEAD points to we enqueue
- * a second log-only update for HEAD so that its reflog is
- * updated accordingly.
- */
- if (head_type == REF_ISSYMREF &&
- !(u->flags & REF_LOG_ONLY) &&
- !(u->flags & REF_UPDATE_VIA_HEAD) &&
- !strcmp(rewritten_ref, head_referent.buf)) {
- struct ref_update *new_update;
-
- /*
- * First make sure that HEAD is not already in the
- * transaction. This check is O(lg N) in the transaction
- * size, but it happens at most once per transaction.
- */
- if (string_list_has_string(&affected_refnames, "HEAD")) {
- /* An entry already existed */
- strbuf_addf(err,
- _("multiple updates for 'HEAD' (including one "
- "via its referent '%s') are not allowed"),
- u->refname);
- ret = TRANSACTION_NAME_CONFLICT;
- goto done;
- }
-
- new_update = ref_transaction_add_update(
- transaction, "HEAD",
- u->flags | REF_LOG_ONLY | REF_NO_DEREF,
- &u->new_oid, &u->old_oid, NULL, NULL, u->msg);
- string_list_insert(&affected_refnames, new_update->refname);
- }
-
- ret = read_ref_without_reload(refs, stack, rewritten_ref,
- &current_oid, &referent, &u->type);
- if (ret < 0)
- goto done;
- if (ret > 0 && !ref_update_expects_existing_old_ref(u)) {
- /*
- * The reference does not exist, and we either have no
- * old object ID or expect the reference to not exist.
- * We can thus skip below safety checks as well as the
- * symref splitting. But we do want to verify that
- * there is no conflicting reference here so that we
- * can output a proper error message instead of failing
- * at a later point.
- */
- ret = refs_verify_refname_available(ref_store, u->refname,
- &affected_refnames, NULL, err);
- if (ret < 0)
- goto done;
-
- /*
- * There is no need to write the reference deletion
- * when the reference in question doesn't exist.
- */
- if ((u->flags & REF_HAVE_NEW) && !ref_update_has_null_new_value(u)) {
- ret = queue_transaction_update(refs, tx_data, u,
- &current_oid, err);
- if (ret)
- goto done;
- }
-
- continue;
- }
- if (ret > 0) {
- /* The reference does not exist, but we expected it to. */
- strbuf_addf(err, _("cannot lock ref '%s': "
- "unable to resolve reference '%s'"),
- ref_update_original_update_refname(u), u->refname);
- ret = -1;
- goto done;
- }
-
- if (u->type & REF_ISSYMREF) {
- /*
- * The reftable stack is locked at this point already,
- * so it is safe to call `refs_resolve_ref_unsafe()`
- * here without causing races.
- */
- const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
- &current_oid, NULL);
-
- if (u->flags & REF_NO_DEREF) {
- if (u->flags & REF_HAVE_OLD && !resolved) {
- strbuf_addf(err, _("cannot lock ref '%s': "
- "error reading reference"), u->refname);
- ret = -1;
- goto done;
- }
- } else {
- struct ref_update *new_update;
- int new_flags;
-
- new_flags = u->flags;
- if (!strcmp(rewritten_ref, "HEAD"))
- new_flags |= REF_UPDATE_VIA_HEAD;
-
- /*
- * If we are updating a symref (eg. HEAD), we should also
- * update the branch that the symref points to.
- *
- * This is generic functionality, and would be better
- * done in refs.c, but the current implementation is
- * intertwined with the locking in files-backend.c.
- */
- new_update = ref_transaction_add_update(
- transaction, referent.buf, new_flags,
- u->new_target ? NULL : &u->new_oid,
- u->old_target ? NULL : &u->old_oid,
- u->new_target, u->old_target, u->msg);
-
- new_update->parent_update = u;
-
- /*
- * Change the symbolic ref update to log only. Also, it
- * doesn't need to check its old OID value, as that will be
- * done when new_update is processed.
- */
- u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
- u->flags &= ~REF_HAVE_OLD;
-
- if (string_list_has_string(&affected_refnames, new_update->refname)) {
- strbuf_addf(err,
- _("multiple updates for '%s' (including one "
- "via symref '%s') are not allowed"),
- referent.buf, u->refname);
- ret = TRANSACTION_NAME_CONFLICT;
- goto done;
- }
- string_list_insert(&affected_refnames, new_update->refname);
- }
- }
-
- /*
- * Verify that the old object matches our expectations. Note
- * that the error messages here do not make a lot of sense in
- * the context of the reftable backend as we never lock
- * individual refs. But the error messages match what the files
- * backend returns, which keeps our tests happy.
- */
- if (u->old_target) {
- if (!(u->type & REF_ISSYMREF)) {
- strbuf_addf(err, _("cannot lock ref '%s': "
- "expected symref with target '%s': "
- "but is a regular ref"),
- ref_update_original_update_refname(u),
- u->old_target);
- ret = -1;
- goto done;
- }
+ ret = prepare_single_update(refs, tx_data, transaction, be,
+ transaction->updates[i], i,
+ &refnames_to_check, head_type,
+ &head_referent, &referent, err);
+ if (ret) {
+ if (ref_transaction_maybe_set_rejected(transaction, i, ret)) {
+ strbuf_reset(err);
+ ret = 0;
- if (ref_update_check_old_target(referent.buf, u, err)) {
- ret = -1;
- goto done;
+ continue;
}
- } else if ((u->flags & REF_HAVE_OLD) && !oideq(&current_oid, &u->old_oid)) {
- if (is_null_oid(&u->old_oid))
- strbuf_addf(err, _("cannot lock ref '%s': "
- "reference already exists"),
- ref_update_original_update_refname(u));
- else if (is_null_oid(&current_oid))
- strbuf_addf(err, _("cannot lock ref '%s': "
- "reference is missing but expected %s"),
- ref_update_original_update_refname(u),
- oid_to_hex(&u->old_oid));
- else
- strbuf_addf(err, _("cannot lock ref '%s': "
- "is at %s but expected %s"),
- ref_update_original_update_refname(u),
- oid_to_hex(&current_oid),
- oid_to_hex(&u->old_oid));
- ret = -1;
goto done;
}
-
- /*
- * If all of the following conditions are true:
- *
- * - We're not about to write a symref.
- * - We're not about to write a log-only entry.
- * - Old and new object ID are different.
- *
- * Then we're essentially doing a no-op update that can be
- * skipped. This is not only for the sake of efficiency, but
- * also skips writing unneeded reflog entries.
- */
- if ((u->type & REF_ISSYMREF) ||
- (u->flags & REF_LOG_ONLY) ||
- (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
- ret = queue_transaction_update(refs, tx_data, u,
- &current_oid, err);
- if (ret)
- goto done;
- }
}
+ ret = refs_verify_refnames_available(ref_store, &refnames_to_check,
+ &transaction->refnames, NULL,
+ transaction,
+ transaction->flags & REF_TRANSACTION_FLAG_INITIAL,
+ err);
+ if (ret < 0)
+ goto done;
+
transaction->backend_data = tx_data;
transaction->state = REF_TRANSACTION_PREPARED;
done:
- assert(ret != REFTABLE_API_ERROR);
if (ret < 0) {
free_transaction_data(tx_data);
transaction->state = REF_TRANSACTION_CLOSED;
@@ -1258,9 +1405,9 @@ done:
strbuf_addf(err, _("reftable: transaction prepare: %s"),
reftable_error_str(ret));
}
- string_list_clear(&affected_refnames, 0);
strbuf_release(&referent);
strbuf_release(&head_referent);
+ string_list_clear(&refnames_to_check, 1);
return ret;
}
@@ -1277,14 +1424,23 @@ static int reftable_be_transaction_abort(struct ref_store *ref_store UNUSED,
static int transaction_update_cmp(const void *a, const void *b)
{
- return strcmp(((struct reftable_transaction_update *)a)->update->refname,
- ((struct reftable_transaction_update *)b)->update->refname);
+ struct reftable_transaction_update *update_a = (struct reftable_transaction_update *)a;
+ struct reftable_transaction_update *update_b = (struct reftable_transaction_update *)b;
+
+ /*
+ * If there is an index set, it should take preference (default is 0).
+ * This ensures that updates with indexes are sorted amongst themselves.
+ */
+ if (update_a->update->index || update_b->update->index)
+ return update_a->update->index - update_b->update->index;
+
+ return strcmp(update_a->update->refname, update_b->update->refname);
}
static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
{
struct write_transaction_table_arg *arg = cb_data;
- uint64_t ts = reftable_stack_next_update_index(arg->stack);
+ uint64_t ts = reftable_stack_next_update_index(arg->be->stack);
struct reftable_log_record *logs = NULL;
struct ident_split committer_ident = {0};
size_t logs_nr = 0, logs_alloc = 0, i;
@@ -1297,12 +1453,22 @@ static int write_transaction_table(struct reftable_writer *writer, void *cb_data
QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
- reftable_writer_set_limits(writer, ts, ts);
+ /*
+ * During reflog migration, we add indexes for a single reflog with
+ * multiple entries. Each entry will contain a different update_index,
+ * so set the limits accordingly.
+ */
+ ret = reftable_writer_set_limits(writer, ts, ts + arg->max_index);
+ if (ret < 0)
+ goto done;
for (i = 0; i < arg->updates_nr; i++) {
struct reftable_transaction_update *tx_update = &arg->updates[i];
struct ref_update *u = tx_update->update;
+ if (u->rejection_err)
+ continue;
+
/*
* Write a reflog entry when updating a ref to point to
* something new in either of the following cases:
@@ -1320,7 +1486,9 @@ static int write_transaction_table(struct reftable_writer *writer, void *cb_data
struct reftable_log_record log = {0};
struct reftable_iterator it = {0};
- reftable_stack_init_log_iterator(arg->stack, &it);
+ ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
+ if (ret < 0)
+ goto done;
/*
* When deleting refs we also delete all reflog entries
@@ -1375,12 +1543,28 @@ static int write_transaction_table(struct reftable_writer *writer, void *cb_data
}
if (create_reflog) {
+ struct ident_split c;
+
ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
log = &logs[logs_nr++];
memset(log, 0, sizeof(*log));
- fill_reftable_log_record(log, &committer_ident);
- log->update_index = ts;
+ if (u->committer_info) {
+ if (split_ident_line(&c, u->committer_info,
+ strlen(u->committer_info)))
+ BUG("failed splitting committer info");
+ } else {
+ c = committer_ident;
+ }
+
+ fill_reftable_log_record(log, &c);
+
+ /*
+ * Updates are sorted by the writer. So updates for the same
+ * refname need to contain different update indices.
+ */
+ log->update_index = ts + u->index;
+
log->refname = xstrdup(u->refname);
memcpy(log->value.update.new_hash,
u->new_oid.hash, GIT_MAX_RAWSZ);
@@ -1465,6 +1649,8 @@ static int reftable_be_transaction_finish(struct ref_store *ref_store UNUSED,
int ret = 0;
for (size_t i = 0; i < tx_data->args_nr; i++) {
+ tx_data->args[i].max_index = transaction->max_index;
+
ret = reftable_addition_add(tx_data->args[i].addition,
write_transaction_table, &tx_data->args[i]);
if (ret < 0)
@@ -1488,13 +1674,6 @@ done:
return ret;
}
-static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
- struct ref_transaction *transaction,
- struct strbuf *err)
-{
- return ref_transaction_commit(transaction, err);
-}
-
static int reftable_be_pack_refs(struct ref_store *ref_store,
struct pack_refs_opts *opts)
{
@@ -1506,9 +1685,9 @@ static int reftable_be_pack_refs(struct ref_store *ref_store,
if (refs->err)
return refs->err;
- stack = refs->worktree_stack;
+ stack = refs->worktree_backend.stack;
if (!stack)
- stack = refs->main_stack;
+ stack = refs->main_backend.stack;
if (opts->flags & PACK_REFS_AUTO)
ret = reftable_stack_auto_compact(stack);
@@ -1539,7 +1718,7 @@ struct write_create_symref_arg {
struct write_copy_arg {
struct reftable_ref_store *refs;
- struct reftable_stack *stack;
+ struct reftable_backend *be;
const char *oldname;
const char *newname;
const char *logmsg;
@@ -1564,7 +1743,7 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data)
if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
BUG("failed splitting committer info");
- if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
+ if (reftable_stack_read_ref(arg->be->stack, arg->oldname, &old_ref)) {
ret = error(_("refname %s not found"), arg->oldname);
goto done;
}
@@ -1589,7 +1768,7 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data)
if (arg->delete_old)
string_list_insert(&skip, arg->oldname);
ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
- NULL, &skip, &errbuf);
+ NULL, &skip, 0, &errbuf);
if (ret < 0) {
error("%s", errbuf.buf);
goto done;
@@ -1603,10 +1782,12 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data)
* the old branch and the creation of the new branch, and we cannot do
* two changes to a reflog in a single update.
*/
- deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
+ deletion_ts = creation_ts = reftable_stack_next_update_index(arg->be->stack);
if (arg->delete_old)
creation_ts++;
- reftable_writer_set_limits(writer, deletion_ts, creation_ts);
+ ret = reftable_writer_set_limits(writer, deletion_ts, creation_ts);
+ if (ret < 0)
+ goto done;
/*
* Add the new reference. If this is a rename then we also delete the
@@ -1646,8 +1827,8 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data)
memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
logs_nr++;
- ret = read_ref_without_reload(arg->refs, arg->stack, "HEAD", &head_oid,
- &head_referent, &head_type);
+ ret = reftable_backend_read_ref(arg->be, "HEAD", &head_oid,
+ &head_referent, &head_type);
if (ret < 0)
goto done;
append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
@@ -1690,7 +1871,10 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data)
* copy over all log entries from the old reflog. Last but not least,
* when renaming we also have to delete all the old reflog entries.
*/
- reftable_stack_init_log_iterator(arg->stack, &it);
+ ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
+ if (ret < 0)
+ goto done;
+
ret = reftable_iterator_seek_log(&it, arg->oldname);
if (ret < 0)
goto done;
@@ -1760,10 +1944,8 @@ static int reftable_be_rename_ref(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
- struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
struct write_copy_arg arg = {
.refs = refs,
- .stack = stack,
.oldname = oldrefname,
.newname = newrefname,
.logmsg = logmsg,
@@ -1775,10 +1957,10 @@ static int reftable_be_rename_ref(struct ref_store *ref_store,
if (ret < 0)
goto done;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
if (ret)
goto done;
- ret = reftable_stack_add(stack, &write_copy_table, &arg);
+ ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg);
done:
assert(ret != REFTABLE_API_ERROR);
@@ -1792,10 +1974,8 @@ static int reftable_be_copy_ref(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
- struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
struct write_copy_arg arg = {
.refs = refs,
- .stack = stack,
.oldname = oldrefname,
.newname = newrefname,
.logmsg = logmsg,
@@ -1806,10 +1986,10 @@ static int reftable_be_copy_ref(struct ref_store *ref_store,
if (ret < 0)
goto done;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
if (ret)
goto done;
- ret = reftable_stack_add(stack, &write_copy_table, &arg);
+ ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg);
done:
assert(ret != REFTABLE_API_ERROR);
@@ -1854,20 +2034,20 @@ static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
break;
}
- if (iter->err > 0) {
- if (ref_iterator_abort(ref_iterator) != ITER_DONE)
- return ITER_ERROR;
+ if (iter->err > 0)
return ITER_DONE;
- }
-
- if (iter->err < 0) {
- ref_iterator_abort(ref_iterator);
+ if (iter->err < 0)
return ITER_ERROR;
- }
-
return ITER_OK;
}
+static int reftable_reflog_iterator_seek(struct ref_iterator *ref_iterator UNUSED,
+ const char *prefix UNUSED)
+{
+ BUG("reftable reflog iterator cannot be seeked");
+ return -1;
+}
+
static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
struct object_id *peeled UNUSED)
{
@@ -1875,21 +2055,20 @@ static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator UNUSE
return -1;
}
-static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
+static void reftable_reflog_iterator_release(struct ref_iterator *ref_iterator)
{
struct reftable_reflog_iterator *iter =
(struct reftable_reflog_iterator *)ref_iterator;
reftable_log_record_release(&iter->log);
reftable_iterator_destroy(&iter->iter);
strbuf_release(&iter->last_name);
- free(iter);
- return ITER_DONE;
}
static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
.advance = reftable_reflog_iterator_advance,
+ .seek = reftable_reflog_iterator_seek,
.peel = reftable_reflog_iterator_peel,
- .abort = reftable_reflog_iterator_abort
+ .release = reftable_reflog_iterator_release,
};
static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
@@ -1911,7 +2090,10 @@ static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftabl
if (ret < 0)
goto done;
- reftable_stack_init_log_iterator(stack, &iter->iter);
+ ret = reftable_stack_init_log_iterator(stack, &iter->iter);
+ if (ret < 0)
+ goto done;
+
ret = reftable_iterator_seek_log(&iter->iter, "");
if (ret < 0)
goto done;
@@ -1927,11 +2109,11 @@ static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *
reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
struct reftable_reflog_iterator *main_iter, *worktree_iter;
- main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
- if (!refs->worktree_stack)
+ main_iter = reflog_iterator_for_stack(refs, refs->main_backend.stack);
+ if (!refs->worktree_backend.stack)
return &main_iter->base;
- worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
+ worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_backend.stack);
return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
ref_iterator_select, NULL);
@@ -1970,15 +2152,26 @@ static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
struct reftable_log_record log = {0};
struct reftable_iterator it = {0};
+ struct reftable_backend *be;
int ret;
if (refs->err < 0)
return refs->err;
- reftable_stack_init_log_iterator(stack, &it);
+ /*
+ * TODO: we should adapt this callsite to reload the stack. There is no
+ * obvious reason why we shouldn't.
+ */
+ ret = backend_for(&be, refs, refname, &refname, 0);
+ if (ret)
+ goto done;
+
+ ret = reftable_stack_init_log_iterator(be->stack, &it);
+ if (ret < 0)
+ goto done;
+
ret = reftable_iterator_seek_log(&it, refname);
while (!ret) {
ret = reftable_iterator_next_log(&it, &log);
@@ -1994,6 +2187,7 @@ static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
break;
}
+done:
reftable_log_record_release(&log);
reftable_iterator_destroy(&it);
return ret;
@@ -2006,16 +2200,27 @@ static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
struct reftable_log_record *logs = NULL;
struct reftable_iterator it = {0};
+ struct reftable_backend *be;
size_t logs_alloc = 0, logs_nr = 0, i;
int ret;
if (refs->err < 0)
return refs->err;
- reftable_stack_init_log_iterator(stack, &it);
+ /*
+ * TODO: we should adapt this callsite to reload the stack. There is no
+ * obvious reason why we shouldn't.
+ */
+ ret = backend_for(&be, refs, refname, &refname, 0);
+ if (ret)
+ goto done;
+
+ ret = reftable_stack_init_log_iterator(be->stack, &it);
+ if (ret < 0)
+ goto done;
+
ret = reftable_iterator_seek_log(&it, refname);
while (!ret) {
struct reftable_log_record log = {0};
@@ -2052,20 +2257,23 @@ static int reftable_be_reflog_exists(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
struct reftable_log_record log = {0};
struct reftable_iterator it = {0};
+ struct reftable_backend *be;
int ret;
ret = refs->err;
if (ret < 0)
goto done;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&be, refs, refname, &refname, 1);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_stack_init_log_iterator(be->stack, &it);
if (ret < 0)
goto done;
- reftable_stack_init_log_iterator(stack, &it);
ret = reftable_iterator_seek_log(&it, refname);
if (ret < 0)
goto done;
@@ -2110,10 +2318,12 @@ static int write_reflog_existence_table(struct reftable_writer *writer,
if (ret <= 0)
goto done;
- reftable_writer_set_limits(writer, ts, ts);
+ ret = reftable_writer_set_limits(writer, ts, ts);
+ if (ret < 0)
+ goto done;
/*
- * The existence entry has both old and new object ID set to the the
+ * The existence entry has both old and new object ID set to the
* null object ID. Our iterators are aware of this and will not present
* them to their callers.
*/
@@ -2134,10 +2344,9 @@ static int reftable_be_create_reflog(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ struct reftable_backend *be;
struct write_reflog_existence_arg arg = {
.refs = refs,
- .stack = stack,
.refname = refname,
};
int ret;
@@ -2146,11 +2355,12 @@ static int reftable_be_create_reflog(struct ref_store *ref_store,
if (ret < 0)
goto done;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&be, refs, refname, &refname, 1);
if (ret)
goto done;
+ arg.stack = be->stack;
- ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
+ ret = reftable_stack_add(be->stack, &write_reflog_existence_table, &arg);
done:
return ret;
@@ -2169,9 +2379,13 @@ static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_da
uint64_t ts = reftable_stack_next_update_index(arg->stack);
int ret;
- reftable_writer_set_limits(writer, ts, ts);
+ ret = reftable_writer_set_limits(writer, ts, ts);
+ if (ret < 0)
+ goto out;
- reftable_stack_init_log_iterator(arg->stack, &it);
+ ret = reftable_stack_init_log_iterator(arg->stack, &it);
+ if (ret < 0)
+ goto out;
/*
* In order to delete a table we need to delete all reflog entries one
@@ -2195,6 +2409,7 @@ static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_da
ret = reftable_writer_add_log(writer, &tombstone);
}
+out:
reftable_log_record_release(&log);
reftable_iterator_destroy(&it);
return ret;
@@ -2205,17 +2420,18 @@ static int reftable_be_delete_reflog(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ struct reftable_backend *be;
struct write_reflog_delete_arg arg = {
- .stack = stack,
.refname = refname,
};
int ret;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&be, refs, refname, &refname, 1);
if (ret)
return ret;
- ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
+ arg.stack = be->stack;
+
+ ret = reftable_stack_add(be->stack, &write_reflog_delete_table, &arg);
assert(ret != REFTABLE_API_ERROR);
return ret;
@@ -2242,7 +2458,9 @@ static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_da
if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
live_records++;
- reftable_writer_set_limits(writer, ts, ts);
+ ret = reftable_writer_set_limits(writer, ts, ts);
+ if (ret < 0)
+ return ret;
if (!is_null_oid(&arg->update_oid)) {
struct reftable_ref_record ref = {0};
@@ -2314,41 +2532,41 @@ static int reftable_be_reflog_expire(struct ref_store *ref_store,
*/
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
struct reftable_log_record *logs = NULL;
struct reftable_log_record *rewritten = NULL;
- struct reftable_ref_record ref_record = {0};
struct reftable_iterator it = {0};
struct reftable_addition *add = NULL;
struct reflog_expiry_arg arg = {0};
+ struct reftable_backend *be;
struct object_id oid = {0};
+ struct strbuf referent = STRBUF_INIT;
uint8_t *last_hash = NULL;
size_t logs_nr = 0, logs_alloc = 0, i;
+ unsigned int type = 0;
int ret;
if (refs->err < 0)
return refs->err;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&be, refs, refname, &refname, 1);
if (ret < 0)
goto done;
- reftable_stack_init_log_iterator(stack, &it);
+ ret = reftable_stack_init_log_iterator(be->stack, &it);
+ if (ret < 0)
+ goto done;
ret = reftable_iterator_seek_log(&it, refname);
if (ret < 0)
goto done;
- ret = reftable_stack_new_addition(&add, stack, 0);
+ ret = reftable_stack_new_addition(&add, be->stack, 0);
if (ret < 0)
goto done;
- ret = reftable_stack_read_ref(stack, refname, &ref_record);
+ ret = reftable_backend_read_ref(be, refname, &oid, &referent, &type);
if (ret < 0)
goto done;
- if (reftable_ref_record_val1(&ref_record))
- oidread(&oid, reftable_ref_record_val1(&ref_record),
- ref_store->repo->hash_algo);
prepare_fn(refname, &oid, policy_cb_data);
while (1) {
@@ -2415,15 +2633,14 @@ static int reftable_be_reflog_expire(struct ref_store *ref_store,
}
}
- if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
- reftable_ref_record_val1(&ref_record))
+ if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash && !is_null_oid(&oid))
oidread(&arg.update_oid, last_hash, ref_store->repo->hash_algo);
arg.refs = refs;
arg.records = rewritten;
arg.len = logs_nr;
- arg.stack = stack,
- arg.refname = refname,
+ arg.stack = be->stack;
+ arg.refname = refname;
ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
if (ret < 0)
@@ -2441,18 +2658,19 @@ done:
cleanup_fn(policy_cb_data);
assert(ret != REFTABLE_API_ERROR);
- reftable_ref_record_release(&ref_record);
reftable_iterator_destroy(&it);
reftable_addition_destroy(add);
for (i = 0; i < logs_nr; i++)
reftable_log_record_release(&logs[i]);
+ strbuf_release(&referent);
free(logs);
free(rewritten);
return ret;
}
static int reftable_be_fsck(struct ref_store *ref_store UNUSED,
- struct fsck_options *o UNUSED)
+ struct fsck_options *o UNUSED,
+ struct worktree *wt UNUSED)
{
return 0;
}
@@ -2467,7 +2685,6 @@ struct ref_storage_be refs_be_reftable = {
.transaction_prepare = reftable_be_transaction_prepare,
.transaction_finish = reftable_be_transaction_finish,
.transaction_abort = reftable_be_transaction_abort,
- .initial_transaction_commit = reftable_be_initial_transaction_commit,
.pack_refs = reftable_be_pack_refs,
.rename_ref = reftable_be_rename_ref,