diff options
Diffstat (limited to 'object-file.c')
| -rw-r--r-- | object-file.c | 497 |
1 files changed, 362 insertions, 135 deletions
diff --git a/object-file.c b/object-file.c index 8be57f48de..5b270f046d 100644 --- a/object-file.c +++ b/object-file.c @@ -167,49 +167,49 @@ static void git_hash_unknown_final_oid(struct object_id *oid, git_hash_ctx *ctx) const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = { { - NULL, - 0x00000000, - 0, - 0, - 0, - git_hash_unknown_init, - git_hash_unknown_clone, - git_hash_unknown_update, - git_hash_unknown_final, - git_hash_unknown_final_oid, - NULL, - NULL, - NULL, + .name = NULL, + .format_id = 0x00000000, + .rawsz = 0, + .hexsz = 0, + .blksz = 0, + .init_fn = git_hash_unknown_init, + .clone_fn = git_hash_unknown_clone, + .update_fn = git_hash_unknown_update, + .final_fn = git_hash_unknown_final, + .final_oid_fn = git_hash_unknown_final_oid, + .empty_tree = NULL, + .empty_blob = NULL, + .null_oid = NULL, }, { - "sha1", - GIT_SHA1_FORMAT_ID, - GIT_SHA1_RAWSZ, - GIT_SHA1_HEXSZ, - GIT_SHA1_BLKSZ, - git_hash_sha1_init, - git_hash_sha1_clone, - git_hash_sha1_update, - git_hash_sha1_final, - git_hash_sha1_final_oid, - &empty_tree_oid, - &empty_blob_oid, - &null_oid_sha1, + .name = "sha1", + .format_id = GIT_SHA1_FORMAT_ID, + .rawsz = GIT_SHA1_RAWSZ, + .hexsz = GIT_SHA1_HEXSZ, + .blksz = GIT_SHA1_BLKSZ, + .init_fn = git_hash_sha1_init, + .clone_fn = git_hash_sha1_clone, + .update_fn = git_hash_sha1_update, + .final_fn = git_hash_sha1_final, + .final_oid_fn = git_hash_sha1_final_oid, + .empty_tree = &empty_tree_oid, + .empty_blob = &empty_blob_oid, + .null_oid = &null_oid_sha1, }, { - "sha256", - GIT_SHA256_FORMAT_ID, - GIT_SHA256_RAWSZ, - GIT_SHA256_HEXSZ, - GIT_SHA256_BLKSZ, - git_hash_sha256_init, - git_hash_sha256_clone, - git_hash_sha256_update, - git_hash_sha256_final, - git_hash_sha256_final_oid, - &empty_tree_oid_sha256, - &empty_blob_oid_sha256, - &null_oid_sha256, + .name = "sha256", + .format_id = GIT_SHA256_FORMAT_ID, + .rawsz = GIT_SHA256_RAWSZ, + .hexsz = GIT_SHA256_HEXSZ, + .blksz = GIT_SHA256_BLKSZ, + .init_fn = git_hash_sha256_init, + .clone_fn = git_hash_sha256_clone, + .update_fn = git_hash_sha256_update, + .final_fn = git_hash_sha256_final, + .final_oid_fn = git_hash_sha256_final_oid, + .empty_tree = &empty_tree_oid_sha256, + .empty_blob = &empty_blob_oid_sha256, + .null_oid = &null_oid_sha256, } }; @@ -274,10 +274,11 @@ static struct cached_object { static int cached_object_nr, cached_object_alloc; static struct cached_object empty_tree = { - { EMPTY_TREE_SHA1_BIN_LITERAL }, - OBJ_TREE, - "", - 0 + .oid = { + .hash = EMPTY_TREE_SHA1_BIN_LITERAL, + }, + .type = OBJ_TREE, + .buf = "", }; static struct cached_object *find_cached_object(const struct object_id *oid) @@ -837,7 +838,7 @@ static void fill_alternate_refs_command(struct child_process *cmd, } } - strvec_pushv(&cmd->env_array, (const char **)local_repo_env); + strvec_pushv(&cmd->env, (const char **)local_repo_env); cmd->out = -1; } @@ -996,7 +997,7 @@ int has_loose_object_nonlocal(const struct object_id *oid) return check_and_freshen_nonlocal(oid, 0); } -static int has_loose_object(const struct object_id *oid) +int has_loose_object(const struct object_id *oid) { return check_and_freshen(oid, 0); } @@ -1049,35 +1050,50 @@ void *xmmap(void *start, size_t length, return ret; } -/* - * With an in-core object data in "map", rehash it to make sure the - * object name actually matches "oid" to detect object corruption. - * With "map" == NULL, try reading the object named with "oid" using - * the streaming interface and rehash it to do the same. - */ +static int format_object_header_literally(char *str, size_t size, + const char *type, size_t objsize) +{ + return xsnprintf(str, size, "%s %"PRIuMAX, type, (uintmax_t)objsize) + 1; +} + +int format_object_header(char *str, size_t size, enum object_type type, + size_t objsize) +{ + const char *name = type_name(type); + + if (!name) + BUG("could not get a type name for 'enum object_type' value %d", type); + + return format_object_header_literally(str, size, name, objsize); +} + int check_object_signature(struct repository *r, const struct object_id *oid, - void *map, unsigned long size, const char *type, - struct object_id *real_oidp) + void *buf, unsigned long size, + enum object_type type) +{ + struct object_id real_oid; + + hash_object_file(r->hash_algo, buf, size, type, &real_oid); + + return !oideq(oid, &real_oid) ? -1 : 0; +} + +int stream_object_signature(struct repository *r, const struct object_id *oid) { - struct object_id tmp; - struct object_id *real_oid = real_oidp ? real_oidp : &tmp; + struct object_id real_oid; + unsigned long size; enum object_type obj_type; struct git_istream *st; git_hash_ctx c; char hdr[MAX_HEADER_LEN]; int hdrlen; - if (map) { - hash_object_file(r->hash_algo, map, size, type, real_oid); - return !oideq(oid, real_oid) ? -1 : 0; - } - st = open_istream(r, oid, &obj_type, &size, NULL); if (!st) return -1; /* Generate the header */ - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(obj_type), (uintmax_t)size) + 1; + hdrlen = format_object_header(hdr, sizeof(hdr), obj_type, size); /* Sha1.. */ r->hash_algo->init_fn(&c); @@ -1094,9 +1110,9 @@ int check_object_signature(struct repository *r, const struct object_id *oid, break; r->hash_algo->update_fn(&c, buf, readlen); } - r->hash_algo->final_oid_fn(real_oid, &c); + r->hash_algo->final_oid_fn(&real_oid, &c); close_istream(st); - return !oideq(oid, real_oid) ? -1 : 0; + return !oideq(oid, &real_oid) ? -1 : 0; } int git_open_cloexec(const char *name, int flags) @@ -1662,7 +1678,7 @@ int pretend_object_file(void *buf, unsigned long len, enum object_type type, { struct cached_object *co; - hash_object_file(the_hash_algo, buf, len, type_name(type), oid); + hash_object_file(the_hash_algo, buf, len, type, oid); if (has_object_file_with_flags(oid, OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT) || find_cached_object(oid)) return 0; @@ -1712,7 +1728,7 @@ void *read_object_file_extended(struct repository *r, die(_("loose object %s (stored in %s) is corrupt"), oid_to_hex(repl), path); - if ((p = has_packed_and_bad(r, repl)) != NULL) + if ((p = has_packed_and_bad(r, repl))) die(_("packed object %s (stored in %s) is corrupt"), oid_to_hex(repl), p->pack_name); obj_read_unlock(); @@ -1722,16 +1738,15 @@ void *read_object_file_extended(struct repository *r, void *read_object_with_reference(struct repository *r, const struct object_id *oid, - const char *required_type_name, + enum object_type required_type, unsigned long *size, struct object_id *actual_oid_return) { - enum object_type type, required_type; + enum object_type type; void *buffer; unsigned long isize; struct object_id actual_oid; - required_type = type_from_string(required_type_name); oidcpy(&actual_oid, oid); while (1) { int ref_length = -1; @@ -1769,21 +1784,40 @@ void *read_object_with_reference(struct repository *r, } } +static void hash_object_body(const struct git_hash_algo *algo, git_hash_ctx *c, + const void *buf, unsigned long len, + struct object_id *oid, + char *hdr, int *hdrlen) +{ + algo->init_fn(c); + algo->update_fn(c, hdr, *hdrlen); + algo->update_fn(c, buf, len); + algo->final_oid_fn(oid, c); +} + static void write_object_file_prepare(const struct git_hash_algo *algo, const void *buf, unsigned long len, - const char *type, struct object_id *oid, + enum object_type type, struct object_id *oid, char *hdr, int *hdrlen) { git_hash_ctx c; /* Generate the header */ - *hdrlen = xsnprintf(hdr, *hdrlen, "%s %"PRIuMAX , type, (uintmax_t)len)+1; + *hdrlen = format_object_header(hdr, *hdrlen, type, len); /* Sha1.. */ - algo->init_fn(&c); - algo->update_fn(&c, hdr, *hdrlen); - algo->update_fn(&c, buf, len); - algo->final_oid_fn(oid, &c); + hash_object_body(algo, &c, buf, len, oid, hdr, hdrlen); +} + +static void write_object_file_prepare_literally(const struct git_hash_algo *algo, + const void *buf, unsigned long len, + const char *type, struct object_id *oid, + char *hdr, int *hdrlen) +{ + git_hash_ctx c; + + *hdrlen = format_object_header_literally(hdr, *hdrlen, type, len); + hash_object_body(algo, &c, buf, len, oid, hdr, hdrlen); } /* @@ -1836,24 +1870,38 @@ static int write_buffer(int fd, const void *buf, size_t len) return 0; } -int hash_object_file(const struct git_hash_algo *algo, const void *buf, - unsigned long len, const char *type, - struct object_id *oid) +static void hash_object_file_literally(const struct git_hash_algo *algo, + const void *buf, unsigned long len, + const char *type, struct object_id *oid) { char hdr[MAX_HEADER_LEN]; int hdrlen = sizeof(hdr); - write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen); - return 0; + + write_object_file_prepare_literally(algo, buf, len, type, oid, hdr, &hdrlen); +} + +void hash_object_file(const struct git_hash_algo *algo, const void *buf, + unsigned long len, enum object_type type, + struct object_id *oid) +{ + hash_object_file_literally(algo, buf, len, type_name(type), oid); } /* Finalize a file on disk, and close it. */ -static void close_loose_object(int fd) +static void close_loose_object(int fd, const char *filename) { - if (!the_repository->objects->odb->will_destroy) { - if (fsync_object_files) - fsync_or_die(fd, "loose object file"); - } + if (the_repository->objects->odb->will_destroy) + goto out; + + if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT)) + fsync_loose_object_bulk_checkin(fd, filename); + else if (fsync_object_files > 0) + fsync_or_die(fd, filename); + else + fsync_component_or_die(FSYNC_COMPONENT_LOOSE_OBJECT, fd, + filename); +out: if (close(fd) != 0) die_errno(_("error when closing loose object file")); } @@ -1903,6 +1951,96 @@ static int create_tmpfile(struct strbuf *tmp, const char *filename) return fd; } +/** + * Common steps for loose object writers to start writing loose + * objects: + * + * - Create tmpfile for the loose object. + * - Setup zlib stream for compression. + * - Start to feed header to zlib stream. + * + * Returns a "fd", which should later be provided to + * end_loose_object_common(). + */ +static int start_loose_object_common(struct strbuf *tmp_file, + const char *filename, unsigned flags, + git_zstream *stream, + unsigned char *buf, size_t buflen, + git_hash_ctx *c, + char *hdr, int hdrlen) +{ + int fd; + + fd = create_tmpfile(tmp_file, filename); + if (fd < 0) { + if (flags & HASH_SILENT) + return -1; + else if (errno == EACCES) + return error(_("insufficient permission for adding " + "an object to repository database %s"), + get_object_directory()); + else + return error_errno( + _("unable to create temporary file")); + } + + /* Setup zlib stream for compression */ + git_deflate_init(stream, zlib_compression_level); + stream->next_out = buf; + stream->avail_out = buflen; + the_hash_algo->init_fn(c); + + /* Start to feed header to zlib stream */ + stream->next_in = (unsigned char *)hdr; + stream->avail_in = hdrlen; + while (git_deflate(stream, 0) == Z_OK) + ; /* nothing */ + the_hash_algo->update_fn(c, hdr, hdrlen); + + return fd; +} + +/** + * Common steps for the inner git_deflate() loop for writing loose + * objects. Returns what git_deflate() returns. + */ +static int write_loose_object_common(git_hash_ctx *c, + git_zstream *stream, const int flush, + unsigned char *in0, const int fd, + unsigned char *compressed, + const size_t compressed_len) +{ + int ret; + + ret = git_deflate(stream, flush ? Z_FINISH : 0); + the_hash_algo->update_fn(c, in0, stream->next_in - in0); + if (write_buffer(fd, compressed, stream->next_out - compressed) < 0) + die(_("unable to write loose object file")); + stream->next_out = compressed; + stream->avail_out = compressed_len; + + return ret; +} + +/** + * Common steps for loose object writers to end writing loose objects: + * + * - End the compression of zlib stream. + * - Get the calculated oid to "oid". + */ +static int end_loose_object_common(git_hash_ctx *c, git_zstream *stream, + struct object_id *oid) +{ + int ret; + + ret = git_deflate_end_gently(stream); + if (ret != Z_OK) + return ret; + the_hash_algo->final_oid_fn(oid, c); + + return Z_OK; +} + static int write_loose_object(const struct object_id *oid, char *hdr, int hdrlen, const void *buf, unsigned long len, time_t mtime, unsigned flags) @@ -1915,57 +2053,39 @@ static int write_loose_object(const struct object_id *oid, char *hdr, static struct strbuf tmp_file = STRBUF_INIT; static struct strbuf filename = STRBUF_INIT; - loose_object_path(the_repository, &filename, oid); + if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT)) + prepare_loose_object_bulk_checkin(); - fd = create_tmpfile(&tmp_file, filename.buf); - if (fd < 0) { - if (flags & HASH_SILENT) - return -1; - else if (errno == EACCES) - return error(_("insufficient permission for adding an object to repository database %s"), get_object_directory()); - else - return error_errno(_("unable to create temporary file")); - } - - /* Set it up */ - git_deflate_init(&stream, zlib_compression_level); - stream.next_out = compressed; - stream.avail_out = sizeof(compressed); - the_hash_algo->init_fn(&c); + loose_object_path(the_repository, &filename, oid); - /* First header.. */ - stream.next_in = (unsigned char *)hdr; - stream.avail_in = hdrlen; - while (git_deflate(&stream, 0) == Z_OK) - ; /* nothing */ - the_hash_algo->update_fn(&c, hdr, hdrlen); + fd = start_loose_object_common(&tmp_file, filename.buf, flags, + &stream, compressed, sizeof(compressed), + &c, hdr, hdrlen); + if (fd < 0) + return -1; /* Then the data itself.. */ stream.next_in = (void *)buf; stream.avail_in = len; do { unsigned char *in0 = stream.next_in; - ret = git_deflate(&stream, Z_FINISH); - the_hash_algo->update_fn(&c, in0, stream.next_in - in0); - if (write_buffer(fd, compressed, stream.next_out - compressed) < 0) - die(_("unable to write loose object file")); - stream.next_out = compressed; - stream.avail_out = sizeof(compressed); + + ret = write_loose_object_common(&c, &stream, 1, in0, fd, + compressed, sizeof(compressed)); } while (ret == Z_OK); if (ret != Z_STREAM_END) die(_("unable to deflate new object %s (%d)"), oid_to_hex(oid), ret); - ret = git_deflate_end_gently(&stream); + ret = end_loose_object_common(&c, &stream, ¶no_oid); if (ret != Z_OK) die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid), ret); - the_hash_algo->final_oid_fn(¶no_oid, &c); if (!oideq(oid, ¶no_oid)) die(_("confused by unstable object source data for %s"), oid_to_hex(oid)); - close_loose_object(fd); + close_loose_object(fd, tmp_file.buf); if (mtime) { struct utimbuf utb; @@ -1989,6 +2109,8 @@ static int freshen_packed_object(const struct object_id *oid) struct pack_entry e; if (!find_pack_entry(the_repository, oid, &e)) return 0; + if (e.p->is_cruft) + return 0; if (e.p->freshened) return 1; if (!freshen_file(e.p->pack_name)) @@ -1997,8 +2119,112 @@ static int freshen_packed_object(const struct object_id *oid) return 1; } +int stream_loose_object(struct input_stream *in_stream, size_t len, + struct object_id *oid) +{ + int fd, ret, err = 0, flush = 0; + unsigned char compressed[4096]; + git_zstream stream; + git_hash_ctx c; + struct strbuf tmp_file = STRBUF_INIT; + struct strbuf filename = STRBUF_INIT; + int dirlen; + char hdr[MAX_HEADER_LEN]; + int hdrlen; + + if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT)) + prepare_loose_object_bulk_checkin(); + + /* Since oid is not determined, save tmp file to odb path. */ + strbuf_addf(&filename, "%s/", get_object_directory()); + hdrlen = format_object_header(hdr, sizeof(hdr), OBJ_BLOB, len); + + /* + * Common steps for write_loose_object and stream_loose_object to + * start writing loose objects: + * + * - Create tmpfile for the loose object. + * - Setup zlib stream for compression. + * - Start to feed header to zlib stream. + */ + fd = start_loose_object_common(&tmp_file, filename.buf, 0, + &stream, compressed, sizeof(compressed), + &c, hdr, hdrlen); + if (fd < 0) { + err = -1; + goto cleanup; + } + + /* Then the data itself.. */ + do { + unsigned char *in0 = stream.next_in; + + if (!stream.avail_in && !in_stream->is_finished) { + const void *in = in_stream->read(in_stream, &stream.avail_in); + stream.next_in = (void *)in; + in0 = (unsigned char *)in; + /* All data has been read. */ + if (in_stream->is_finished) + flush = 1; + } + ret = write_loose_object_common(&c, &stream, flush, in0, fd, + compressed, sizeof(compressed)); + /* + * Unlike write_loose_object(), we do not have the entire + * buffer. If we get Z_BUF_ERROR due to too few input bytes, + * then we'll replenish them in the next input_stream->read() + * call when we loop. + */ + } while (ret == Z_OK || ret == Z_BUF_ERROR); + + if (stream.total_in != len + hdrlen) + die(_("write stream object %ld != %"PRIuMAX), stream.total_in, + (uintmax_t)len + hdrlen); + + /* + * Common steps for write_loose_object and stream_loose_object to + * end writing loose oject: + * + * - End the compression of zlib stream. + * - Get the calculated oid. + */ + if (ret != Z_STREAM_END) + die(_("unable to stream deflate new object (%d)"), ret); + ret = end_loose_object_common(&c, &stream, oid); + if (ret != Z_OK) + die(_("deflateEnd on stream object failed (%d)"), ret); + close_loose_object(fd, tmp_file.buf); + + if (freshen_packed_object(oid) || freshen_loose_object(oid)) { + unlink_or_warn(tmp_file.buf); + goto cleanup; + } + + loose_object_path(the_repository, &filename, oid); + + /* We finally know the object path, and create the missing dir. */ + dirlen = directory_size(filename.buf); + if (dirlen) { + struct strbuf dir = STRBUF_INIT; + strbuf_add(&dir, filename.buf, dirlen); + + if (mkdir_in_gitdir(dir.buf) && errno != EEXIST) { + err = error_errno(_("unable to create directory %s"), dir.buf); + strbuf_release(&dir); + goto cleanup; + } + strbuf_release(&dir); + } + + err = finalize_object_file(tmp_file.buf, filename.buf); +cleanup: + strbuf_release(&tmp_file); + strbuf_release(&filename); + return err; +} + int write_object_file_flags(const void *buf, unsigned long len, - const char *type, struct object_id *oid, + enum object_type type, struct object_id *oid, unsigned flags) { char hdr[MAX_HEADER_LEN]; @@ -2014,9 +2240,9 @@ int write_object_file_flags(const void *buf, unsigned long len, return write_loose_object(oid, hdr, hdrlen, buf, len, 0, flags); } -int hash_object_file_literally(const void *buf, unsigned long len, - const char *type, struct object_id *oid, - unsigned flags) +int write_object_file_literally(const void *buf, unsigned long len, + const char *type, struct object_id *oid, + unsigned flags) { char *header; int hdrlen, status = 0; @@ -2024,8 +2250,8 @@ int hash_object_file_literally(const void *buf, unsigned long len, /* type string, SP, %lu of the length plus NUL must fit this */ hdrlen = strlen(type) + MAX_HEADER_LEN; header = xmalloc(hdrlen); - write_object_file_prepare(the_hash_algo, buf, len, type, oid, header, - &hdrlen); + write_object_file_prepare_literally(the_hash_algo, buf, len, type, + oid, header, &hdrlen); if (!(flags & HASH_WRITE_OBJECT)) goto cleanup; @@ -2052,7 +2278,7 @@ int force_object_loose(const struct object_id *oid, time_t mtime) buf = read_object(the_repository, oid, &type, &len); if (!buf) return error(_("cannot read object for %s"), oid_to_hex(oid)); - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(type), (uintmax_t)len) + 1; + hdrlen = format_object_header(hdr, sizeof(hdr), type, len); ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime, 0); free(buf); @@ -2118,7 +2344,8 @@ static int index_mem(struct index_state *istate, enum object_type type, const char *path, unsigned flags) { - int ret, re_allocated = 0; + int ret = 0; + int re_allocated = 0; int write_object = flags & HASH_WRITE_OBJECT; if (!type) @@ -2145,10 +2372,9 @@ static int index_mem(struct index_state *istate, } if (write_object) - ret = write_object_file(buf, size, type_name(type), oid); + ret = write_object_file(buf, size, type, oid); else - ret = hash_object_file(the_hash_algo, buf, size, - type_name(type), oid); + hash_object_file(the_hash_algo, buf, size, type, oid); if (re_allocated) free(buf); return ret; @@ -2160,7 +2386,7 @@ static int index_stream_convert_blob(struct index_state *istate, const char *path, unsigned flags) { - int ret; + int ret = 0; const int write_object = flags & HASH_WRITE_OBJECT; struct strbuf sbuf = STRBUF_INIT; @@ -2171,11 +2397,11 @@ static int index_stream_convert_blob(struct index_state *istate, get_conv_flags(flags)); if (write_object) - ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB), + ret = write_object_file(sbuf.buf, sbuf.len, OBJ_BLOB, oid); else - ret = hash_object_file(the_hash_algo, sbuf.buf, sbuf.len, - type_name(OBJ_BLOB), oid); + hash_object_file(the_hash_algo, sbuf.buf, sbuf.len, OBJ_BLOB, + oid); strbuf_release(&sbuf); return ret; } @@ -2294,8 +2520,8 @@ int index_path(struct index_state *istate, struct object_id *oid, return error_errno("readlink(\"%s\")", path); if (!(flags & HASH_WRITE_OBJECT)) hash_object_file(the_hash_algo, sb.buf, sb.len, - blob_type, oid); - else if (write_object_file(sb.buf, sb.len, blob_type, oid)) + OBJ_BLOB, oid); + else if (write_object_file(sb.buf, sb.len, OBJ_BLOB, oid)) rc = error(_("%s: failed to insert into database"), path); strbuf_release(&sb); break; @@ -2578,7 +2804,7 @@ int read_loose_object(const char *path, } if (unpack_loose_header(&stream, map, mapsize, hdr, sizeof(hdr), - NULL) < 0) { + NULL) != ULHR_OK) { error(_("unable to unpack header of %s"), path); goto out; } @@ -2599,9 +2825,10 @@ int read_loose_object(const char *path, git_inflate_end(&stream); goto out; } - if (check_object_signature(the_repository, expected_oid, + hash_object_file_literally(the_repository->hash_algo, *contents, *size, - oi->type_name->buf, real_oid)) + oi->type_name->buf, real_oid); + if (!oideq(expected_oid, real_oid)) goto out; } |
