aboutsummaryrefslogtreecommitdiffstats
path: root/builtin/fast-import.c
diff options
context:
space:
mode:
Diffstat (limited to 'builtin/fast-import.c')
-rw-r--r--builtin/fast-import.c168
1 files changed, 106 insertions, 62 deletions
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
index 1e7ab67f6e..b1389c5921 100644
--- a/builtin/fast-import.c
+++ b/builtin/fast-import.c
@@ -1,4 +1,6 @@
#define USE_THE_REPOSITORY_VARIABLE
+#define DISABLE_SIGN_COMPARE_WARNINGS
+
#include "builtin.h"
#include "abspath.h"
#include "environment.h"
@@ -13,6 +15,7 @@
#include "delta.h"
#include "pack.h"
#include "path.h"
+#include "read-cache-ll.h"
#include "refs.h"
#include "csum-file.h"
#include "quote.h"
@@ -21,7 +24,7 @@
#include "packfile.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store-ll.h"
+#include "odb.h"
#include "mem-pool.h"
#include "commit-reach.h"
#include "khash.h"
@@ -179,6 +182,7 @@ static unsigned long branch_load_count;
static int failure;
static FILE *pack_edges;
static unsigned int show_stats = 1;
+static unsigned int quiet;
static int global_argc;
static const char **global_argv;
static const char *global_prefix;
@@ -324,7 +328,7 @@ static void write_branch_report(FILE *rpt, struct branch *b)
static void write_crash_report(const char *err)
{
- char *loc = git_pathdup("fast_import_crash_%"PRIuMAX, (uintmax_t) getpid());
+ char *loc = repo_git_path(the_repository, "fast_import_crash_%"PRIuMAX, (uintmax_t) getpid());
FILE *rpt = fopen(loc, "w");
struct branch *b;
unsigned long lu;
@@ -759,13 +763,15 @@ static void start_packfile(void)
struct packed_git *p;
int pack_fd;
- pack_fd = odb_mkstemp(&tmp_file, "pack/tmp_pack_XXXXXX");
+ pack_fd = odb_mkstemp(the_repository->objects, &tmp_file,
+ "pack/tmp_pack_XXXXXX");
FLEX_ALLOC_STR(p, pack_name, tmp_file.buf);
strbuf_release(&tmp_file);
p->pack_fd = pack_fd;
p->do_not_close = 1;
- pack_file = hashfd(pack_fd, p->pack_name);
+ p->repo = the_repository;
+ pack_file = hashfd(the_repository->hash_algo, pack_fd, p->pack_name);
pack_data = p;
pack_size = write_pack_header(pack_file, 0);
@@ -793,8 +799,8 @@ static const char *create_index(void)
if (c != last)
die("internal consistency error creating the index");
- tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts,
- pack_data->hash);
+ tmpfile = write_idx_file(the_repository, NULL, idx, object_count,
+ &pack_idx_opts, pack_data->hash);
free(idx);
return tmpfile;
}
@@ -805,19 +811,20 @@ static char *keep_pack(const char *curr_index_name)
struct strbuf name = STRBUF_INIT;
int keep_fd;
- odb_pack_name(&name, pack_data->hash, "keep");
- keep_fd = odb_pack_keep(name.buf);
+ odb_pack_name(pack_data->repo, &name, pack_data->hash, "keep");
+ keep_fd = safe_create_file_with_leading_directories(pack_data->repo,
+ name.buf);
if (keep_fd < 0)
die_errno("cannot create keep file");
write_or_die(keep_fd, keep_msg, strlen(keep_msg));
if (close(keep_fd))
die_errno("failed to write keep file");
- odb_pack_name(&name, pack_data->hash, "pack");
+ odb_pack_name(pack_data->repo, &name, pack_data->hash, "pack");
if (finalize_object_file(pack_data->pack_name, name.buf))
die("cannot store pack file");
- odb_pack_name(&name, pack_data->hash, "idx");
+ odb_pack_name(pack_data->repo, &name, pack_data->hash, "idx");
if (finalize_object_file(curr_index_name, name.buf))
die("cannot store index file");
free((void *)curr_index_name);
@@ -831,7 +838,7 @@ static void unkeep_all_packs(void)
for (k = 0; k < pack_id; k++) {
struct packed_git *p = all_packs[k];
- odb_pack_name(&name, p->hash, "keep");
+ odb_pack_name(p->repo, &name, p->hash, "keep");
unlink_or_warn(name.buf);
}
strbuf_release(&name);
@@ -873,9 +880,10 @@ static void end_packfile(void)
close_pack_windows(pack_data);
finalize_hashfile(pack_file, cur_pack_oid.hash, FSYNC_COMPONENT_PACK, 0);
- fixup_pack_header_footer(pack_data->pack_fd, pack_data->hash,
- pack_data->pack_name, object_count,
- cur_pack_oid.hash, pack_size);
+ fixup_pack_header_footer(the_hash_algo, pack_data->pack_fd,
+ pack_data->hash, pack_data->pack_name,
+ object_count, cur_pack_oid.hash,
+ pack_size);
if (object_count <= unpack_limit) {
if (!loosen_small_pack(pack_data)) {
@@ -888,7 +896,7 @@ static void end_packfile(void)
idx_name = keep_pack(create_index());
/* Register the packfile with core git's machinery. */
- new_p = add_packed_git(idx_name, strlen(idx_name), 1);
+ new_p = add_packed_git(pack_data->repo, idx_name, strlen(idx_name), 1);
if (!new_p)
die("core git rejected index %s", idx_name);
all_packs[pack_id] = new_p;
@@ -948,15 +956,15 @@ static int store_object(
unsigned char hdr[96];
struct object_id oid;
unsigned long hdrlen, deltalen;
- git_hash_ctx c;
+ struct git_hash_ctx c;
git_zstream s;
hdrlen = format_object_header((char *)hdr, sizeof(hdr), type,
dat->len);
the_hash_algo->init_fn(&c);
- the_hash_algo->update_fn(&c, hdr, hdrlen);
- the_hash_algo->update_fn(&c, dat->buf, dat->len);
- the_hash_algo->final_oid_fn(&oid, &c);
+ git_hash_update(&c, hdr, hdrlen);
+ git_hash_update(&c, dat->buf, dat->len);
+ git_hash_final_oid(&oid, &c);
if (oidout)
oidcpy(oidout, &oid);
@@ -966,8 +974,7 @@ static int store_object(
if (e->idx.offset) {
duplicate_count_by_type[type]++;
return 1;
- } else if (find_sha1_pack(oid.hash,
- get_all_packs(the_repository))) {
+ } else if (find_oid_pack(&oid, get_all_packs(the_repository))) {
e->type = type;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */
@@ -1091,7 +1098,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
struct object_id oid;
unsigned long hdrlen;
off_t offset;
- git_hash_ctx c;
+ struct git_hash_ctx c;
git_zstream s;
struct hashfile_checkpoint checkpoint;
int status = Z_OK;
@@ -1102,14 +1109,14 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
|| (pack_size + PACK_SIZE_THRESHOLD + len) < pack_size)
cycle_packfile();
- the_hash_algo->init_fn(&checkpoint.ctx);
+ hashfile_checkpoint_init(pack_file, &checkpoint);
hashfile_checkpoint(pack_file, &checkpoint);
offset = checkpoint.offset;
hdrlen = format_object_header((char *)out_buf, out_sz, OBJ_BLOB, len);
the_hash_algo->init_fn(&c);
- the_hash_algo->update_fn(&c, out_buf, hdrlen);
+ git_hash_update(&c, out_buf, hdrlen);
crc32_begin(pack_file);
@@ -1127,7 +1134,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
if (!n && feof(stdin))
die("EOF in data (%" PRIuMAX " bytes remaining)", len);
- the_hash_algo->update_fn(&c, in_buf, n);
+ git_hash_update(&c, in_buf, n);
s.next_in = in_buf;
s.avail_in = n;
len -= n;
@@ -1153,7 +1160,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
}
}
git_deflate_end(&s);
- the_hash_algo->final_oid_fn(&oid, &c);
+ git_hash_final_oid(&oid, &c);
if (oidout)
oidcpy(oidout, &oid);
@@ -1167,8 +1174,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
duplicate_count_by_type[OBJ_BLOB]++;
truncate_pack(&checkpoint);
- } else if (find_sha1_pack(oid.hash,
- get_all_packs(the_repository))) {
+ } else if (find_oid_pack(&oid, get_all_packs(the_repository))) {
e->type = OBJ_BLOB;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */
@@ -1259,7 +1265,7 @@ static void load_tree(struct tree_entry *root)
die("Can't load tree %s", oid_to_hex(oid));
} else {
enum object_type type;
- buf = repo_read_object_file(the_repository, oid, &type, &size);
+ buf = odb_read_object(the_repository->objects, oid, &type, &size);
if (!buf || type != OBJ_TREE)
die("Can't load tree %s", oid_to_hex(oid));
}
@@ -1604,7 +1610,19 @@ static int update_branch(struct branch *b)
struct ref_transaction *transaction;
struct object_id old_oid;
struct strbuf err = STRBUF_INIT;
-
+ static const char *replace_prefix = "refs/replace/";
+
+ if (starts_with(b->name, replace_prefix) &&
+ !strcmp(b->name + strlen(replace_prefix),
+ oid_to_hex(&b->oid))) {
+ if (!quiet)
+ warning("Dropping %s since it would point to "
+ "itself (i.e. to %s)",
+ b->name, oid_to_hex(&b->oid));
+ refs_delete_ref(get_main_ref_store(the_repository),
+ NULL, b->name, NULL, 0);
+ return 0;
+ }
if (is_null_oid(&b->oid)) {
if (b->delete)
refs_delete_ref(get_main_ref_store(the_repository),
@@ -1636,7 +1654,7 @@ static int update_branch(struct branch *b)
}
}
transaction = ref_store_transaction_begin(get_main_ref_store(the_repository),
- &err);
+ 0, &err);
if (!transaction ||
ref_transaction_update(transaction, b->name, &b->oid, &old_oid,
NULL, NULL, 0, msg, &err) ||
@@ -1671,7 +1689,7 @@ static void dump_tags(void)
struct ref_transaction *transaction;
transaction = ref_store_transaction_begin(get_main_ref_store(the_repository),
- &err);
+ 0, &err);
if (!transaction) {
failure |= error("%s", err.buf);
goto cleanup;
@@ -1704,7 +1722,7 @@ static void dump_marks(void)
if (!export_marks_file || (import_marks_file && !import_marks_file_done))
return;
- if (safe_create_leading_directories_const(export_marks_file)) {
+ if (safe_create_leading_directories_const(the_repository, export_marks_file)) {
failure |= error_errno("unable to create leading directories of %s",
export_marks_file);
return;
@@ -1738,8 +1756,8 @@ static void insert_object_entry(struct mark_set **s, struct object_id *oid, uint
struct object_entry *e;
e = find_object(oid);
if (!e) {
- enum object_type type = oid_object_info(the_repository,
- oid, NULL);
+ enum object_type type = odb_read_object_info(the_repository->objects,
+ oid, NULL);
if (type < 0)
die("object not found: %s", oid_to_hex(oid));
e = insert_object(oid);
@@ -2005,7 +2023,7 @@ static void parse_and_store_blob(
static struct strbuf buf = STRBUF_INIT;
uintmax_t len;
- if (parse_data(&buf, big_file_threshold, &len))
+ if (parse_data(&buf, repo_settings_get_big_file_threshold(the_repository), &len))
store_object(OBJ_BLOB, &buf, last, oidout, mark);
else {
if (last) {
@@ -2398,8 +2416,8 @@ static void file_change_m(const char *p, struct branch *b)
enum object_type expected = S_ISDIR(mode) ?
OBJ_TREE: OBJ_BLOB;
enum object_type type = oe ? oe->type :
- oid_object_info(the_repository, &oid,
- NULL);
+ odb_read_object_info(the_repository->objects,
+ &oid, NULL);
if (type < 0)
die("%s not found: %s",
S_ISDIR(mode) ? "Tree" : "Blob",
@@ -2414,6 +2432,9 @@ static void file_change_m(const char *p, struct branch *b)
tree_content_replace(&b->branch_tree, &oid, mode, NULL);
return;
}
+
+ if (!verify_path(path.buf, mode))
+ die("invalid path '%s'", path.buf);
tree_content_set(&b->branch_tree, path.buf, &oid, mode, NULL);
}
@@ -2451,6 +2472,8 @@ static void file_change_cr(const char *p, struct branch *b, int rename)
leaf.tree);
return;
}
+ if (!verify_path(dest.buf, leaf.versions[1].mode))
+ die("invalid path '%s'", dest.buf);
tree_content_set(&b->branch_tree, dest.buf,
&leaf.versions[1].oid,
leaf.versions[1].mode,
@@ -2512,10 +2535,9 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
oidcpy(&commit_oid, &commit_oe->idx.oid);
} else if (!repo_get_oid(the_repository, p, &commit_oid)) {
unsigned long size;
- char *buf = read_object_with_reference(the_repository,
- &commit_oid,
- OBJ_COMMIT, &size,
- &commit_oid);
+ char *buf = odb_read_object_peeled(the_repository->objects,
+ &commit_oid, OBJ_COMMIT, &size,
+ &commit_oid);
if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", p);
free(buf);
@@ -2530,7 +2552,7 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
die("Not a blob (actually a %s): %s",
type_name(oe->type), command_buf.buf);
} else if (!is_null_oid(&oid)) {
- enum object_type type = oid_object_info(the_repository, &oid,
+ enum object_type type = odb_read_object_info(the_repository->objects, &oid,
NULL);
if (type < 0)
die("Blob not found: %s", command_buf.buf);
@@ -2581,9 +2603,8 @@ static void parse_from_existing(struct branch *b)
unsigned long size;
char *buf;
- buf = read_object_with_reference(the_repository,
- &b->oid, OBJ_COMMIT, &size,
- &b->oid);
+ buf = odb_read_object_peeled(the_repository->objects, &b->oid,
+ OBJ_COMMIT, &size, &b->oid);
parse_from_commit(b, buf, size);
free(buf);
}
@@ -2676,10 +2697,9 @@ static struct hash_list *parse_merge(unsigned int *count)
oidcpy(&n->oid, &oe->idx.oid);
} else if (!repo_get_oid(the_repository, from, &n->oid)) {
unsigned long size;
- char *buf = read_object_with_reference(the_repository,
- &n->oid,
- OBJ_COMMIT,
- &size, &n->oid);
+ char *buf = odb_read_object_peeled(the_repository->objects,
+ &n->oid, OBJ_COMMIT,
+ &size, &n->oid);
if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", from);
free(buf);
@@ -2698,10 +2718,13 @@ static struct hash_list *parse_merge(unsigned int *count)
static void parse_new_commit(const char *arg)
{
+ static struct strbuf sig = STRBUF_INIT;
static struct strbuf msg = STRBUF_INIT;
+ struct string_list siglines = STRING_LIST_INIT_NODUP;
struct branch *b;
char *author = NULL;
char *committer = NULL;
+ char *sig_alg = NULL;
char *encoding = NULL;
struct hash_list *merge_list = NULL;
unsigned int merge_count;
@@ -2725,6 +2748,13 @@ static void parse_new_commit(const char *arg)
}
if (!committer)
die("Expected committer but didn't get one");
+ if (skip_prefix(command_buf.buf, "gpgsig ", &v)) {
+ sig_alg = xstrdup(v);
+ read_next_command();
+ parse_data(&sig, 0, NULL);
+ read_next_command();
+ } else
+ strbuf_setlen(&sig, 0);
if (skip_prefix(command_buf.buf, "encoding ", &v)) {
encoding = xstrdup(v);
read_next_command();
@@ -2798,10 +2828,23 @@ static void parse_new_commit(const char *arg)
strbuf_addf(&new_data,
"encoding %s\n",
encoding);
+ if (sig_alg) {
+ if (!strcmp(sig_alg, "sha1"))
+ strbuf_addstr(&new_data, "gpgsig ");
+ else if (!strcmp(sig_alg, "sha256"))
+ strbuf_addstr(&new_data, "gpgsig-sha256 ");
+ else
+ die("Expected gpgsig algorithm sha1 or sha256, got %s", sig_alg);
+ string_list_split_in_place(&siglines, sig.buf, "\n", -1);
+ strbuf_add_separated_string_list(&new_data, "\n ", &siglines);
+ strbuf_addch(&new_data, '\n');
+ }
strbuf_addch(&new_data, '\n');
strbuf_addbuf(&new_data, &msg);
+ string_list_clear(&siglines, 1);
free(author);
free(committer);
+ free(sig_alg);
free(encoding);
if (!store_object(OBJ_COMMIT, &new_data, NULL, &b->oid, next_mark))
@@ -2849,7 +2892,8 @@ static void parse_new_tag(const char *arg)
} else if (!repo_get_oid(the_repository, from, &oid)) {
struct object_entry *oe = find_object(&oid);
if (!oe) {
- type = oid_object_info(the_repository, &oid, NULL);
+ type = odb_read_object_info(the_repository->objects,
+ &oid, NULL);
if (type < 0)
die("Not a valid object: %s", from);
} else
@@ -2955,7 +2999,7 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid)
char *buf;
if (!oe || oe->pack_id == MAX_PACK_ID) {
- buf = repo_read_object_file(the_repository, oid, &type, &size);
+ buf = odb_read_object(the_repository->objects, oid, &type, &size);
} else {
type = oe->type;
buf = gfi_unpack_entry(oe, &size);
@@ -3039,8 +3083,8 @@ static struct object_entry *dereference(struct object_entry *oe,
const unsigned hexsz = the_hash_algo->hexsz;
if (!oe) {
- enum object_type type = oid_object_info(the_repository, oid,
- NULL);
+ enum object_type type = odb_read_object_info(the_repository->objects,
+ oid, NULL);
if (type < 0)
die("object not found: %s", oid_to_hex(oid));
/* cache it! */
@@ -3063,8 +3107,8 @@ static struct object_entry *dereference(struct object_entry *oe,
buf = gfi_unpack_entry(oe, &size);
} else {
enum object_type unused;
- buf = repo_read_object_file(the_repository, oid, &unused,
- &size);
+ buf = odb_read_object(the_repository->objects, oid,
+ &unused, &size);
}
if (!buf)
die("Can't load object %s", oid_to_hex(oid));
@@ -3259,7 +3303,7 @@ static char* make_fast_import_path(const char *path)
{
if (!relative_marks_paths || is_absolute_path(path))
return prefix_filename(global_prefix, path);
- return git_pathdup("info/fast-import/%s", path);
+ return repo_git_path(the_repository, "info/fast-import/%s", path);
}
static void option_import_marks(const char *marks,
@@ -3381,7 +3425,7 @@ static int parse_one_option(const char *option)
unsigned long v;
if (!git_parse_ulong(option, &v))
return 0;
- big_file_threshold = v;
+ repo_settings_set_big_file_threshold(the_repository, v);
} else if (skip_prefix(option, "depth=", &option)) {
option_depth(option);
} else if (skip_prefix(option, "active-branches=", &option)) {
@@ -3390,6 +3434,7 @@ static int parse_one_option(const char *option)
option_export_pack_edges(option);
} else if (!strcmp(option, "quiet")) {
show_stats = 0;
+ quiet = 1;
} else if (!strcmp(option, "stats")) {
show_stats = 1;
} else if (!strcmp(option, "allow-unsafe-features")) {
@@ -3540,12 +3585,11 @@ static void parse_argv(void)
int cmd_fast_import(int argc,
const char **argv,
const char *prefix,
- struct repository *repo UNUSED)
+ struct repository *repo)
{
unsigned int i;
- if (argc == 2 && !strcmp(argv[1], "-h"))
- usage(fast_import_usage);
+ show_usage_if_asked(argc, argv, fast_import_usage);
reset_pack_idx_option(&pack_idx_opts);
git_pack_config();
@@ -3661,7 +3705,7 @@ int cmd_fast_import(int argc,
fprintf(stderr, " pools: %10lu KiB\n", (unsigned long)((tree_entry_allocd + fi_mem_pool.pool_alloc) /1024));
fprintf(stderr, " objects: %10" PRIuMAX " KiB\n", (alloc_count*sizeof(struct object_entry))/1024);
fprintf(stderr, "---------------------------------------------------------------------\n");
- pack_report();
+ pack_report(repo);
fprintf(stderr, "---------------------------------------------------------------------\n");
fprintf(stderr, "\n");
}