diff options
89 files changed, 2857 insertions, 212 deletions
diff --git a/.gitignore b/.gitignore index 3dcdb6bb5a..96c794b1c7 100644 --- a/.gitignore +++ b/.gitignore @@ -33,6 +33,7 @@ /git-check-mailmap /git-check-ref-format /git-checkout +/git-checkout--worker /git-checkout-index /git-cherry /git-cherry-pick diff --git a/Documentation/Makefile b/Documentation/Makefile index 874a01d7a8..c2baad0bd8 100644 --- a/Documentation/Makefile +++ b/Documentation/Makefile @@ -91,6 +91,7 @@ TECH_DOCS += technical/multi-pack-index TECH_DOCS += technical/pack-format TECH_DOCS += technical/pack-heuristics TECH_DOCS += technical/pack-protocol +TECH_DOCS += technical/parallel-checkout TECH_DOCS += technical/partial-clone TECH_DOCS += technical/protocol-capabilities TECH_DOCS += technical/protocol-common diff --git a/Documentation/RelNotes/2.32.0.txt b/Documentation/RelNotes/2.32.0.txt index 3f73411286..f4e5191ae2 100644 --- a/Documentation/RelNotes/2.32.0.txt +++ b/Documentation/RelNotes/2.32.0.txt @@ -77,6 +77,9 @@ UI, Workflows & Features * Userdiff patterns for "Scheme" has been added. + * "git log" learned "--diff-merges=<style>" option, with an + associated configuration variable log.diffMerges. + Performance, Internal Implementation, Development Support etc. @@ -131,6 +134,21 @@ Performance, Internal Implementation, Development Support etc. * A bit of code clean-up and a lot of test clean-up around userdiff area. + * Handling of "promisor packs" that allows certain objects to be + missing and lazily retrievable has been optimized (a bit). + + * When packet_write() fails, we gave an extra error message + unnecessarily, which has been corrected. + + * The checkout machinery has been taught to perform the actual + write-out of the files in parallel when able. + + * Show errno in the trace output in the error codepath that calls + read_raw_ref method. + + * Effort to make the command line completion (in contrib/) safe with + "set -u" continues. + Fixes since v2.31 ----------------- @@ -230,6 +248,20 @@ Fixes since v2.31 corrected. (merge 56550ea718 sg/bugreport-fixes later to maint). + * "git push --quiet --set-upstream" was not quiet when setting the + upstream branch configuration, which has been corrected. + (merge f3cce896a8 ow/push-quiet-set-upstream later to maint). + + * The prefetch task in "git maintenance" assumed that "git fetch" + from any remote would fetch all its local branches, which would + fetch too much if the user is interested in only a subset of + branches there. + (merge 32f67888d8 ds/maintenance-prefetch-fix later to maint). + + * Clarify that pathnames recorded in Git trees are most often (but + not necessarily) encoded in UTF-8. + (merge 9364bf465d ab/pathname-encoding-doc later to maint). + * Other code cleanup, docfix, build fix, etc. (merge f451960708 dl/cat-file-doc-cleanup later to maint). (merge 12604a8d0c sv/t9801-test-path-is-file-cleanup later to maint). diff --git a/Documentation/config/checkout.txt b/Documentation/config/checkout.txt index 2cddf7b4b4..bfbca90f0e 100644 --- a/Documentation/config/checkout.txt +++ b/Documentation/config/checkout.txt @@ -21,3 +21,24 @@ checkout.guess:: Provides the default value for the `--guess` or `--no-guess` option in `git checkout` and `git switch`. See linkgit:git-switch[1] and linkgit:git-checkout[1]. + +checkout.workers:: + The number of parallel workers to use when updating the working tree. + The default is one, i.e. sequential execution. If set to a value less + than one, Git will use as many workers as the number of logical cores + available. This setting and `checkout.thresholdForParallelism` affect + all commands that perform checkout. E.g. checkout, clone, reset, + sparse-checkout, etc. ++ +Note: parallel checkout usually delivers better performance for repositories +located on SSDs or over NFS. For repositories on spinning disks and/or machines +with a small number of cores, the default sequential checkout often performs +better. The size and compression level of a repository might also influence how +well the parallel version performs. + +checkout.thresholdForParallelism:: + When running parallel checkout with a small number of files, the cost + of subprocess spawning and inter-process communication might outweigh + the parallelization gains. This setting allows to define the minimum + number of files for which parallel checkout should be attempted. The + default is 100. diff --git a/Documentation/config/index.txt b/Documentation/config/index.txt index 7cb50b37e9..75f3a2d105 100644 --- a/Documentation/config/index.txt +++ b/Documentation/config/index.txt @@ -14,6 +14,11 @@ index.recordOffsetTable:: Defaults to 'true' if index.threads has been explicitly enabled, 'false' otherwise. +index.sparse:: + When enabled, write the index using sparse-directory entries. This + has no effect unless `core.sparseCheckout` and + `core.sparseCheckoutCone` are both enabled. Defaults to 'false'. + index.threads:: Specifies the number of threads to spawn when loading the index. This is meant to reduce index load time on multiprocessor machines. diff --git a/Documentation/config/log.txt b/Documentation/config/log.txt index 208d5fdcaa..456eb07800 100644 --- a/Documentation/config/log.txt +++ b/Documentation/config/log.txt @@ -24,6 +24,11 @@ log.excludeDecoration:: the config option can be overridden by the `--decorate-refs` option. +log.diffMerges:: + Set default diff format to be used for merge commits. See + `--diff-merges` in linkgit:git-log[1] for details. + Defaults to `separate`. + log.follow:: If `true`, `git log` will act as if the `--follow` option was used when a single <path> is given. This has the same limitations as `--follow`, diff --git a/Documentation/diff-options.txt b/Documentation/diff-options.txt index aa2b5c11f2..530d115914 100644 --- a/Documentation/diff-options.txt +++ b/Documentation/diff-options.txt @@ -34,7 +34,7 @@ endif::git-diff[] endif::git-format-patch[] ifdef::git-log[] ---diff-merges=(off|none|first-parent|1|separate|m|combined|c|dense-combined|cc):: +--diff-merges=(off|none|on|first-parent|1|separate|m|combined|c|dense-combined|cc):: --no-diff-merges:: Specify diff format to be used for merge commits. Default is {diff-merges-default} unless `--first-parent` is in use, in which case @@ -45,17 +45,24 @@ ifdef::git-log[] Disable output of diffs for merge commits. Useful to override implied value. + +--diff-merges=on::: +--diff-merges=m::: +-m::: + This option makes diff output for merge commits to be shown in + the default format. `-m` will produce the output only if `-p` + is given as well. The default format could be changed using + `log.diffMerges` configuration parameter, which default value + is `separate`. ++ --diff-merges=first-parent::: --diff-merges=1::: This option makes merge commits show the full diff with respect to the first parent only. + --diff-merges=separate::: ---diff-merges=m::: --m::: This makes merge commits show the full diff with respect to each of the parents. Separate log entry and diff is generated - for each parent. `-m` doesn't produce any output without `-p`. + for each parent. + --diff-merges=combined::: --diff-merges=c::: @@ -293,11 +300,14 @@ explained for the configuration variable `core.quotePath` (see linkgit:git-config[1]). --name-only:: - Show only names of changed files. + Show only names of changed files. The file names are often encoded in UTF-8. + For more information see the discussion about encoding in the linkgit:git-log[1] + manual page. --name-status:: Show only names and status of changed files. See the description of the `--diff-filter` option on what the status letters mean. + Just like `--name-only` the file names are often encoded in UTF-8. --submodule[=<format>]:: Specify how differences in submodules are shown. When specifying diff --git a/Documentation/fetch-options.txt b/Documentation/fetch-options.txt index 07783deee3..9e7b4e189c 100644 --- a/Documentation/fetch-options.txt +++ b/Documentation/fetch-options.txt @@ -110,6 +110,11 @@ ifndef::git-pull[] setting `fetch.writeCommitGraph`. endif::git-pull[] +--prefetch:: + Modify the configured refspec to place all refs into the + `refs/prefetch/` namespace. See the `prefetch` task in + linkgit:git-maintenance[1]. + -p:: --prune:: Before fetching, remove any remote-tracking references that no diff --git a/Documentation/git-maintenance.txt b/Documentation/git-maintenance.txt index 80ddd33ceb..1e738ad398 100644 --- a/Documentation/git-maintenance.txt +++ b/Documentation/git-maintenance.txt @@ -92,10 +92,8 @@ commit-graph:: prefetch:: The `prefetch` task updates the object directory with the latest objects from all registered remotes. For each remote, a `git fetch` - command is run. The refmap is custom to avoid updating local or remote - branches (those in `refs/heads` or `refs/remotes`). Instead, the - remote refs are stored in `refs/prefetch/<remote>/`. Also, tags are - not updated. + command is run. The configured refspec is modified to place all + requested refs within `refs/prefetch/`. Also, tags are not updated. + This is done to avoid disrupting the remote-tracking branches. The end users expect these refs to stay unmoved unless they initiate a fetch. With prefetch diff --git a/Documentation/git-sparse-checkout.txt b/Documentation/git-sparse-checkout.txt index a0eeaeb02e..fdcf43f87c 100644 --- a/Documentation/git-sparse-checkout.txt +++ b/Documentation/git-sparse-checkout.txt @@ -45,6 +45,20 @@ To avoid interfering with other worktrees, it first enables the When `--cone` is provided, the `core.sparseCheckoutCone` setting is also set, allowing for better performance with a limited set of patterns (see 'CONE PATTERN SET' below). ++ +Use the `--[no-]sparse-index` option to toggle the use of the sparse +index format. This reduces the size of the index to be more closely +aligned with your sparse-checkout definition. This can have significant +performance advantages for commands such as `git status` or `git add`. +This feature is still experimental. Some commands might be slower with +a sparse index until they are properly integrated with the feature. ++ +**WARNING:** Using a sparse index requires modifying the index in a way +that is not completely understood by external tools. If you have trouble +with this compatibility, then run `git sparse-checkout init --no-sparse-index` +to rewrite your index to not be sparse. Older versions of Git will not +understand the sparse directory entries index extension and may fail to +interact with your repository until it is disabled. 'set':: Write a set of patterns to the sparse-checkout file, as given as diff --git a/Documentation/git.txt b/Documentation/git.txt index 3a9c44987f..ba5c8e9d98 100644 --- a/Documentation/git.txt +++ b/Documentation/git.txt @@ -13,7 +13,7 @@ SYNOPSIS [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path] [-p|--paginate|-P|--no-pager] [--no-replace-objects] [--bare] [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>] - [--super-prefix=<path>] [--config-env <name>=<envvar>] + [--super-prefix=<path>] [--config-env=<name>=<envvar>] <command> [<args>] DESCRIPTION diff --git a/Documentation/technical/index-format.txt b/Documentation/technical/index-format.txt index d363a71c37..65da0daaa5 100644 --- a/Documentation/technical/index-format.txt +++ b/Documentation/technical/index-format.txt @@ -44,6 +44,13 @@ Git index format localization, no special casing of directory separator '/'). Entries with the same name are sorted by their stage field. + An index entry typically represents a file. However, if sparse-checkout + is enabled in cone mode (`core.sparseCheckoutCone` is enabled) and the + `extensions.sparseIndex` extension is enabled, then the index may + contain entries for directories outside of the sparse-checkout definition. + These entries have mode `040000`, include the `SKIP_WORKTREE` bit, and + the path ends in a directory separator. + 32-bit ctime seconds, the last time a file's metadata changed this is stat(2) data @@ -385,3 +392,15 @@ The remaining data of each directory block is grouped by type: in this block of entries. - 32-bit count of cache entries in this block + +== Sparse Directory Entries + + When using sparse-checkout in cone mode, some entire directories within + the index can be summarized by pointing to a tree object instead of the + entire expanded list of paths within that tree. An index containing such + entries is a "sparse index". Index format versions 4 and less were not + implemented with such entries in mind. Thus, for these versions, an + index containing sparse directory entries will include this extension + with signature { 's', 'd', 'i', 'r' }. Like the split-index extension, + tools should avoid interacting with a sparse index unless they understand + this extension. diff --git a/Documentation/technical/parallel-checkout.txt b/Documentation/technical/parallel-checkout.txt new file mode 100644 index 0000000000..e790258a1a --- /dev/null +++ b/Documentation/technical/parallel-checkout.txt @@ -0,0 +1,270 @@ +Parallel Checkout Design Notes +============================== + +The "Parallel Checkout" feature attempts to use multiple processes to +parallelize the work of uncompressing the blobs, applying in-core +filters, and writing the resulting contents to the working tree during a +checkout operation. It can be used by all checkout-related commands, +such as `clone`, `checkout`, `reset`, `sparse-checkout`, and others. + +These commands share the following basic structure: + +* Step 1: Read the current index file into memory. + +* Step 2: Modify the in-memory index based upon the command, and + temporarily mark all cache entries that need to be updated. + +* Step 3: Populate the working tree to match the new candidate index. + This includes iterating over all of the to-be-updated cache entries + and delete, create, or overwrite the associated files in the working + tree. + +* Step 4: Write the new index to disk. + +Step 3 is the focus of the "parallel checkout" effort described here. + +Sequential Implementation +------------------------- + +For the purposes of discussion here, the current sequential +implementation of Step 3 is divided in 3 parts, each one implemented in +its own function: + +* Step 3a: `unpack-trees.c:check_updates()` contains a series of + sequential loops iterating over the `cache_entry`'s array. The main + loop in this function calls the Step 3b function for each of the + to-be-updated entries. + +* Step 3b: `entry.c:checkout_entry()` examines the existing working tree + for file conflicts, collisions, and unsaved changes. It removes files + and creates leading directories as necessary. It calls the Step 3c + function for each entry to be written. + +* Step 3c: `entry.c:write_entry()` loads the blob into memory, smudges + it if necessary, creates the file in the working tree, writes the + smudged contents, calls `fstat()` or `lstat()`, and updates the + associated `cache_entry` struct with the stat information gathered. + +It wouldn't be safe to perform Step 3b in parallel, as there could be +race conditions between file creations and removals. Instead, the +parallel checkout framework lets the sequential code handle Step 3b, +and uses parallel workers to replace the sequential +`entry.c:write_entry()` calls from Step 3c. + +Rejected Multi-Threaded Solution +-------------------------------- + +The most "straightforward" implementation would be to spread the set of +to-be-updated cache entries across multiple threads. But due to the +thread-unsafe functions in the ODB code, we would have to use locks to +coordinate the parallel operation. An early prototype of this solution +showed that the multi-threaded checkout would bring performance +improvements over the sequential code, but there was still too much lock +contention. A `perf` profiling indicated that around 20% of the runtime +during a local Linux clone (on an SSD) was spent in locking functions. +For this reason this approach was rejected in favor of using multiple +child processes, which led to a better performance. + +Multi-Process Solution +---------------------- + +Parallel checkout alters the aforementioned Step 3 to use multiple +`checkout--worker` background processes to distribute the work. The +long-running worker processes are controlled by the foreground Git +command using the existing run-command API. + +Overview +~~~~~~~~ + +Step 3b is only slightly altered; for each entry to be checked out, the +main process performs the following steps: + +* M1: Check whether there is any untracked or unclean file in the + working tree which would be overwritten by this entry, and decide + whether to proceed (removing the file(s)) or not. + +* M2: Create the leading directories. + +* M3: Load the conversion attributes for the entry's path. + +* M4: Check, based on the entry's type and conversion attributes, + whether the entry is eligible for parallel checkout (more on this + later). If it is eligible, enqueue the entry and the loaded + attributes to later write the entry in parallel. If not, write the + entry right away, using the default sequential code. + +Note: we save the conversion attributes associated with each entry +because the workers don't have access to the main process' index state, +so they can't load the attributes by themselves (and the attributes are +needed to properly smudge the entry). Additionally, this has a positive +impact on performance as (1) we don't need to load the attributes twice +and (2) the attributes machinery is optimized to handle paths in +sequential order. + +After all entries have passed through the above steps, the main process +checks if the number of enqueued entries is sufficient to spread among +the workers. If not, it just writes them sequentially. Otherwise, it +spawns the workers and distributes the queued entries uniformly in +continuous chunks. This aims to minimize the chances of two workers +writing to the same directory simultaneously, which could increase lock +contention in the kernel. + +Then, for each assigned item, each worker: + +* W1: Checks if there is any non-directory file in the leading part of + the entry's path or if there already exists a file at the entry' path. + If so, mark the entry with `PC_ITEM_COLLIDED` and skip it (more on + this later). + +* W2: Creates the file (with O_CREAT and O_EXCL). + +* W3: Loads the blob into memory (inflating and delta reconstructing + it). + +* W4: Applies any required in-process filter, like end-of-line + conversion and re-encoding. + +* W5: Writes the result to the file descriptor opened at W2. + +* W6: Calls `fstat()` or lstat()` on the just-written path, and sends + the result back to the main process, together with the end status of + the operation and the item's identification number. + +Note that, when possible, steps W3 to W5 are delegated to the streaming +machinery, removing the need to keep the entire blob in memory. + +If the worker fails to read the blob or to write it to the working tree, +it removes the created file to avoid leaving empty files behind. This is +the *only* time a worker is allowed to remove a file. + +As mentioned earlier, it is the responsibility of the main process to +remove any file that blocks the checkout operation (or abort if the +removal(s) would cause data loss and the user didn't ask to `--force`). +This is crucial to avoid race conditions and also to properly detect +path collisions at Step W1. + +After the workers finish writing the items and sending back the required +information, the main process handles the results in two steps: + +- First, it updates the in-memory index with the `lstat()` information + sent by the workers. (This must be done first as this information + might me required in the following step.) + +- Then it writes the items which collided on disk (i.e. items marked + with `PC_ITEM_COLLIDED`). More on this below. + +Path Collisions +--------------- + +Path collisions happen when two different paths correspond to the same +entry in the file system. E.g. the paths 'a' and 'A' would collide in a +case-insensitive file system. + +The sequential checkout deals with collisions in the same way that it +deals with files that were already present in the working tree before +checkout. Basically, it checks if the path that it wants to write +already exists on disk, makes sure the existing file doesn't have +unsaved data, and then overwrites it. (To be more pedantic: it deletes +the existing file and creates the new one.) So, if there are multiple +colliding files to be checked out, the sequential code will write each +one of them but only the last will actually survive on disk. + +Parallel checkout aims to reproduce the same behavior. However, we +cannot let the workers racily write to the same file on disk. Instead, +the workers detect when the entry that they want to check out would +collide with an existing file, and mark it with `PC_ITEM_COLLIDED`. +Later, the main process can sequentially feed these entries back to +`checkout_entry()` without the risk of race conditions. On clone, this +also has the effect of marking the colliding entries to later emit a +warning for the user, like the classic sequential checkout does. + +The workers are able to detect both collisions among the entries being +concurrently written and collisions between a parallel-eligible entry +and an ineligible entry. The general idea for collision detection is +quite straightforward: for each parallel-eligible entry, the main +process must remove all files that prevent this entry from being written +(before enqueueing it). This includes any non-directory file in the +leading path of the entry. Later, when a worker gets assigned the entry, +it looks again for the non-directories files and for an already existing +file at the entry's path. If any of these checks finds something, the +worker knows that there was a path collision. + +Because parallel checkout can distinguish path collisions from the case +where the file was already present in the working tree before checkout, +we could alternatively choose to skip the checkout of colliding entries. +However, each entry that doesn't get written would have NULL `lstat()` +fields on the index. This could cause performance penalties for +subsequent commands that need to refresh the index, as they would have +to go to the file system to see if the entry is dirty. Thus, if we have +N entries in a colliding group and we decide to write and `lstat()` only +one of them, every subsequent `git-status` will have to read, convert, +and hash the written file N - 1 times. By checking out all colliding +entries (like the sequential code does), we only pay the overhead once, +during checkout. + +Eligible Entries for Parallel Checkout +-------------------------------------- + +As previously mentioned, not all entries passed to `checkout_entry()` +will be considered eligible for parallel checkout. More specifically, we +exclude: + +- Symbolic links; to avoid race conditions that, in combination with + path collisions, could cause workers to write files at the wrong + place. For example, if we were to concurrently check out a symlink + 'a' -> 'b' and a regular file 'A/f' in a case-insensitive file system, + we could potentially end up writing the file 'A/f' at 'a/f', due to a + race condition. + +- Regular files that require external filters (either "one shot" filters + or long-running process filters). These filters are black-boxes to Git + and may have their own internal locking or non-concurrent assumptions. + So it might not be safe to run multiple instances in parallel. ++ +Besides, long-running filters may use the delayed checkout feature to +postpone the return of some filtered blobs. The delayed checkout queue +and the parallel checkout queue are not compatible and should remain +separate. ++ +Note: regular files that only require internal filters, like end-of-line +conversion and re-encoding, are eligible for parallel checkout. + +Ineligible entries are checked out by the classic sequential codepath +*before* spawning workers. + +Note: submodules's files are also eligible for parallel checkout (as +long as they don't fall into any of the excluding categories mentioned +above). But since each submodule is checked out in its own child +process, we don't mix the superproject's and the submodules' files in +the same parallel checkout process or queue. + +The API +------- + +The parallel checkout API was designed with the goal of minimizing +changes to the current users of the checkout machinery. This means that +they don't have to call a different function for sequential or parallel +checkout. As already mentioned, `checkout_entry()` will automatically +insert the given entry in the parallel checkout queue when this feature +is enabled and the entry is eligible; otherwise, it will just write the +entry right away, using the sequential code. In general, callers of the +parallel checkout API should look similar to this: + +---------------------------------------------- +int pc_workers, pc_threshold, err = 0; +struct checkout state; + +get_parallel_checkout_configs(&pc_workers, &pc_threshold); + +/* + * This check is not strictly required, but it + * should save some time in sequential mode. + */ +if (pc_workers > 1) + init_parallel_checkout(); + +for (each cache_entry ce to-be-updated) + err |= checkout_entry(ce, &state, NULL, NULL); + +err |= run_parallel_checkout(&state, pc_workers, pc_threshold, NULL, NULL); +---------------------------------------------- diff --git a/Documentation/technical/sparse-index.txt b/Documentation/technical/sparse-index.txt new file mode 100644 index 0000000000..3b24c1a219 --- /dev/null +++ b/Documentation/technical/sparse-index.txt @@ -0,0 +1,208 @@ +Git Sparse-Index Design Document +================================ + +The sparse-checkout feature allows users to focus a working directory on +a subset of the files at HEAD. The cone mode patterns, enabled by +`core.sparseCheckoutCone`, allow for very fast pattern matching to +discover which files at HEAD belong in the sparse-checkout cone. + +Three important scale dimensions for a Git working directory are: + +* `HEAD`: How many files are present at `HEAD`? + +* Populated: How many files are within the sparse-checkout cone. + +* Modified: How many files has the user modified in the working directory? + +We will use big-O notation -- O(X) -- to denote how expensive certain +operations are in terms of these dimensions. + +These dimensions are ordered by their magnitude: users (typically) modify +fewer files than are populated, and we can only populate files at `HEAD`. + +Problems occur if there is an extreme imbalance in these dimensions. For +example, if `HEAD` contains millions of paths but the populated set has +only tens of thousands, then commands like `git status` and `git add` can +be dominated by operations that require O(`HEAD`) operations instead of +O(Populated). Primarily, the cost is in parsing and rewriting the index, +which is filled primarily with files at `HEAD` that are marked with the +`SKIP_WORKTREE` bit. + +The sparse-index intends to take these commands that read and modify the +index from O(`HEAD`) to O(Populated). To do this, we need to modify the +index format in a significant way: add "sparse directory" entries. + +With cone mode patterns, it is possible to detect when an entire +directory will have its contents outside of the sparse-checkout definition. +Instead of listing all of the files it contains as individual entries, a +sparse-index contains an entry with the directory name, referencing the +object ID of the tree at `HEAD` and marked with the `SKIP_WORKTREE` bit. +If we need to discover the details for paths within that directory, we +can parse trees to find that list. + +At time of writing, sparse-directory entries violate expectations about the +index format and its in-memory data structure. There are many consumers in +the codebase that expect to iterate through all of the index entries and +see only files. In fact, these loops expect to see a reference to every +staged file. One way to handle this is to parse trees to replace a +sparse-directory entry with all of the files within that tree as the index +is loaded. However, parsing trees is slower than parsing the index format, +so that is a slower operation than if we left the index alone. The plan is +to make all of these integrations "sparse aware" so this expansion through +tree parsing is unnecessary and they use fewer resources than when using a +full index. + +The implementation plan below follows four phases to slowly integrate with +the sparse-index. The intention is to incrementally update Git commands to +interact safely with the sparse-index without significant slowdowns. This +may not always be possible, but the hope is that the primary commands that +users need in their daily work are dramatically improved. + +Phase I: Format and initial speedups +------------------------------------ + +During this phase, Git learns to enable the sparse-index and safely parse +one. Protections are put in place so that every consumer of the in-memory +data structure can operate with its current assumption of every file at +`HEAD`. + +At first, every index parse will call a helper method, +`ensure_full_index()`, which scans the index for sparse-directory entries +(pointing to trees) and replaces them with the full list of paths (with +blob contents) by parsing tree objects. This will be slower in all cases. +The only noticeable change in behavior will be that the serialized index +file contains sparse-directory entries. + +To start, we use a new required index extension, `sdir`, to allow +inserting sparse-directory entries into indexes with file format +versions 2, 3, and 4. This prevents Git versions that do not understand +the sparse-index from operating on one, while allowing tools that do not +understand the sparse-index to operate on repositories as long as they do +not interact with the index. A new format, index v5, will be introduced +that includes sparse-directory entries by default. It might also +introduce other features that have been considered for improving the +index, as well. + +Next, consumers of the index will be guarded against operating on a +sparse-index by inserting calls to `ensure_full_index()` or +`expand_index_to_path()`. If a specific path is requested, then those will +be protected from within the `index_file_exists()` and `index_name_pos()` +API calls: they will call `ensure_full_index()` if necessary. The +intention here is to preserve existing behavior when interacting with a +sparse-checkout. We don't want a change to happen by accident, without +tests. Many of these locations may not need any change before removing the +guards, but we should not do so without tests to ensure the expected +behavior happens. + +It may be desirable to _change_ the behavior of some commands in the +presence of a sparse index or more generally in any sparse-checkout +scenario. In such cases, these should be carefully communicated and +tested. No such behavior changes are intended during this phase. + +During a scan of the codebase, not every iteration of the cache entries +needs an `ensure_full_index()` check. The basic reasons include: + +1. The loop is scanning for entries with non-zero stage. These entries + are not collapsed into a sparse-directory entry. + +2. The loop is scanning for submodules. These entries are not collapsed + into a sparse-directory entry. + +3. The loop is part of the index API, especially around reading or + writing the format. + +4. The loop is checking for correct order of cache entries and that is + correct if and only if the sparse-directory entries are in the correct + location. + +5. The loop ignores entries with the `SKIP_WORKTREE` bit set, or is + otherwise already aware of sparse directory entries. + +6. The sparse-index is disabled at this point when using the split-index + feature, so no effort is made to protect the split-index API. + +Even after inserting these guards, we will keep expanding sparse-indexes +for most Git commands using the `command_requires_full_index` repository +setting. This setting will be on by default and disabled one builtin at a +time until we have sufficient confidence that all of the index operations +are properly guarded. + +To complete this phase, the commands `git status` and `git add` will be +integrated with the sparse-index so that they operate with O(Populated) +performance. They will be carefully tested for operations within and +outside the sparse-checkout definition. + +Phase II: Careful integrations +------------------------------ + +This phase focuses on ensuring that all index extensions and APIs work +well with a sparse-index. This requires significant increases to our test +coverage, especially for operations that interact with the working +directory outside of the sparse-checkout definition. Some of these +behaviors may not be the desirable ones, such as some tests already +marked for failure in `t1092-sparse-checkout-compatibility.sh`. + +The index extensions that may require special integrations are: + +* FS Monitor +* Untracked cache + +While integrating with these features, we should look for patterns that +might lead to better APIs for interacting with the index. Coalescing +common usage patterns into an API call can reduce the number of places +where sparse-directories need to be handled carefully. + +Phase III: Important command speedups +------------------------------------- + +At this point, the patterns for testing and implementing sparse-directory +logic should be relatively stable. This phase focuses on updating some of +the most common builtins that use the index to operate as O(Populated). +Here is a potential list of commands that could be valuable to integrate +at this point: + +* `git commit` +* `git checkout` +* `git merge` +* `git rebase` + +Hopefully, commands such as `git merge` and `git rebase` can benefit +instead from merge algorithms that do not use the index as a data +structure, such as the merge-ORT strategy. As these topics mature, we +may enable the ORT strategy by default for repositories using the +sparse-index feature. + +Along with `git status` and `git add`, these commands cover the majority +of users' interactions with the working directory. In addition, we can +integrate with these commands: + +* `git grep` +* `git rm` + +These have been proposed as some whose behavior could change when in a +repo with a sparse-checkout definition. It would be good to include this +behavior automatically when using a sparse-index. Some clarity is needed +to make the behavior switch clear to the user. + +This phase is the first where parallel work might be possible without too +much conflicts between topics. + +Phase IV: The long tail +----------------------- + +This last phase is less a "phase" and more "the new normal" after all of +the previous work. + +To start, the `command_requires_full_index` option could be removed in +favor of expanding only when hitting an API guard. + +There are many Git commands that could use special attention to operate as +O(Populated), while some might be so rare that it is acceptable to leave +them with additional overhead when a sparse-index is present. + +Here are some commands that might be useful to update: + +* `git sparse-checkout set` +* `git am` +* `git clean` +* `git stash` @@ -948,6 +948,7 @@ LIB_OBJS += pack-revindex.o LIB_OBJS += pack-write.o LIB_OBJS += packfile.o LIB_OBJS += pager.o +LIB_OBJS += parallel-checkout.o LIB_OBJS += parse-options-cb.o LIB_OBJS += parse-options.o LIB_OBJS += patch-delta.o @@ -995,6 +996,7 @@ LIB_OBJS += setup.o LIB_OBJS += shallow.o LIB_OBJS += sideband.o LIB_OBJS += sigchain.o +LIB_OBJS += sparse-index.o LIB_OBJS += split-index.o LIB_OBJS += stable-qsort.o LIB_OBJS += strbuf.o @@ -1063,6 +1065,7 @@ BUILTIN_OBJS += builtin/check-attr.o BUILTIN_OBJS += builtin/check-ignore.o BUILTIN_OBJS += builtin/check-mailmap.o BUILTIN_OBJS += builtin/check-ref-format.o +BUILTIN_OBJS += builtin/checkout--worker.o BUILTIN_OBJS += builtin/checkout-index.o BUILTIN_OBJS += builtin/checkout.o BUILTIN_OBJS += builtin/clean.o @@ -733,7 +733,7 @@ static struct attr_stack *read_attr_from_file(const char *path, unsigned flags) return res; } -static struct attr_stack *read_attr_from_index(const struct index_state *istate, +static struct attr_stack *read_attr_from_index(struct index_state *istate, const char *path, unsigned flags) { @@ -763,7 +763,7 @@ static struct attr_stack *read_attr_from_index(const struct index_state *istate, return res; } -static struct attr_stack *read_attr(const struct index_state *istate, +static struct attr_stack *read_attr(struct index_state *istate, const char *path, unsigned flags) { struct attr_stack *res = NULL; @@ -855,7 +855,7 @@ static void push_stack(struct attr_stack **attr_stack_p, } } -static void bootstrap_attr_stack(const struct index_state *istate, +static void bootstrap_attr_stack(struct index_state *istate, struct attr_stack **stack) { struct attr_stack *e; @@ -894,7 +894,7 @@ static void bootstrap_attr_stack(const struct index_state *istate, push_stack(stack, e, NULL, 0); } -static void prepare_attr_stack(const struct index_state *istate, +static void prepare_attr_stack(struct index_state *istate, const char *path, int dirlen, struct attr_stack **stack) { @@ -1094,7 +1094,7 @@ static void determine_macros(struct all_attrs_item *all_attrs, * If check->check_nr is non-zero, only attributes in check[] are collected. * Otherwise all attributes are collected. */ -static void collect_some_attrs(const struct index_state *istate, +static void collect_some_attrs(struct index_state *istate, const char *path, struct attr_check *check) { @@ -1123,7 +1123,7 @@ static void collect_some_attrs(const struct index_state *istate, fill(path, pathlen, basename_offset, check->stack, check->all_attrs, rem); } -void git_check_attr(const struct index_state *istate, +void git_check_attr(struct index_state *istate, const char *path, struct attr_check *check) { @@ -1140,7 +1140,7 @@ void git_check_attr(const struct index_state *istate, } } -void git_all_attrs(const struct index_state *istate, +void git_all_attrs(struct index_state *istate, const char *path, struct attr_check *check) { int i; @@ -190,14 +190,14 @@ void attr_check_free(struct attr_check *check); */ const char *git_attr_name(const struct git_attr *); -void git_check_attr(const struct index_state *istate, +void git_check_attr(struct index_state *istate, const char *path, struct attr_check *check); /* * Retrieve all attributes that apply to the specified path. * check holds the attributes and their values. */ -void git_all_attrs(const struct index_state *istate, +void git_all_attrs(struct index_state *istate, const char *path, struct attr_check *check); enum git_attr_direction { @@ -123,6 +123,7 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix); int cmd_bundle(int argc, const char **argv, const char *prefix); int cmd_cat_file(int argc, const char **argv, const char *prefix); int cmd_checkout(int argc, const char **argv, const char *prefix); +int cmd_checkout__worker(int argc, const char **argv, const char *prefix); int cmd_checkout_index(int argc, const char **argv, const char *prefix); int cmd_check_attr(int argc, const char **argv, const char *prefix); int cmd_check_ignore(int argc, const char **argv, const char *prefix); diff --git a/builtin/add.c b/builtin/add.c index ea762a41e3..afccf2fd55 100644 --- a/builtin/add.c +++ b/builtin/add.c @@ -141,6 +141,8 @@ static int renormalize_tracked_files(const struct pathspec *pathspec, int flags) { int i, retval = 0; + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); for (i = 0; i < active_nr; i++) { struct cache_entry *ce = active_cache[i]; diff --git a/builtin/checkout--worker.c b/builtin/checkout--worker.c new file mode 100644 index 0000000000..31e0de2f7e --- /dev/null +++ b/builtin/checkout--worker.c @@ -0,0 +1,145 @@ +#include "builtin.h" +#include "config.h" +#include "entry.h" +#include "parallel-checkout.h" +#include "parse-options.h" +#include "pkt-line.h" + +static void packet_to_pc_item(const char *buffer, int len, + struct parallel_checkout_item *pc_item) +{ + const struct pc_item_fixed_portion *fixed_portion; + const char *variant; + char *encoding; + + if (len < sizeof(struct pc_item_fixed_portion)) + BUG("checkout worker received too short item (got %dB, exp %dB)", + len, (int)sizeof(struct pc_item_fixed_portion)); + + fixed_portion = (struct pc_item_fixed_portion *)buffer; + + if (len - sizeof(struct pc_item_fixed_portion) != + fixed_portion->name_len + fixed_portion->working_tree_encoding_len) + BUG("checkout worker received corrupted item"); + + variant = buffer + sizeof(struct pc_item_fixed_portion); + + /* + * Note: the main process uses zero length to communicate that the + * encoding is NULL. There is no use case that requires sending an + * actual empty string, since convert_attrs() never sets + * ca.working_tree_enconding to "". + */ + if (fixed_portion->working_tree_encoding_len) { + encoding = xmemdupz(variant, + fixed_portion->working_tree_encoding_len); + variant += fixed_portion->working_tree_encoding_len; + } else { + encoding = NULL; + } + + memset(pc_item, 0, sizeof(*pc_item)); + pc_item->ce = make_empty_transient_cache_entry(fixed_portion->name_len); + pc_item->ce->ce_namelen = fixed_portion->name_len; + pc_item->ce->ce_mode = fixed_portion->ce_mode; + memcpy(pc_item->ce->name, variant, pc_item->ce->ce_namelen); + oidcpy(&pc_item->ce->oid, &fixed_portion->oid); + + pc_item->id = fixed_portion->id; + pc_item->ca.crlf_action = fixed_portion->crlf_action; + pc_item->ca.ident = fixed_portion->ident; + pc_item->ca.working_tree_encoding = encoding; +} + +static void report_result(struct parallel_checkout_item *pc_item) +{ + struct pc_item_result res; + size_t size; + + res.id = pc_item->id; + res.status = pc_item->status; + + if (pc_item->status == PC_ITEM_WRITTEN) { + res.st = pc_item->st; + size = sizeof(res); + } else { + size = PC_ITEM_RESULT_BASE_SIZE; + } + + packet_write(1, (const char *)&res, size); +} + +/* Free the worker-side malloced data, but not pc_item itself. */ +static void release_pc_item_data(struct parallel_checkout_item *pc_item) +{ + free((char *)pc_item->ca.working_tree_encoding); + discard_cache_entry(pc_item->ce); +} + +static void worker_loop(struct checkout *state) +{ + struct parallel_checkout_item *items = NULL; + size_t i, nr = 0, alloc = 0; + + while (1) { + int len = packet_read(0, NULL, NULL, packet_buffer, + sizeof(packet_buffer), 0); + + if (len < 0) + BUG("packet_read() returned negative value"); + else if (!len) + break; + + ALLOC_GROW(items, nr + 1, alloc); + packet_to_pc_item(packet_buffer, len, &items[nr++]); + } + + for (i = 0; i < nr; i++) { + struct parallel_checkout_item *pc_item = &items[i]; + write_pc_item(pc_item, state); + report_result(pc_item); + release_pc_item_data(pc_item); + } + + packet_flush(1); + + free(items); +} + +static const char * const checkout_worker_usage[] = { + N_("git checkout--worker [<options>]"), + NULL +}; + +int cmd_checkout__worker(int argc, const char **argv, const char *prefix) +{ + struct checkout state = CHECKOUT_INIT; + struct option checkout_worker_options[] = { + OPT_STRING(0, "prefix", &state.base_dir, N_("string"), + N_("when creating files, prepend <string>")), + OPT_END() + }; + + if (argc == 2 && !strcmp(argv[1], "-h")) + usage_with_options(checkout_worker_usage, + checkout_worker_options); + + git_config(git_default_config, NULL); + argc = parse_options(argc, argv, prefix, checkout_worker_options, + checkout_worker_usage, 0); + if (argc > 0) + usage_with_options(checkout_worker_usage, checkout_worker_options); + + if (state.base_dir) + state.base_dir_len = strlen(state.base_dir); + + /* + * Setting this on a worker won't actually update the index. We just + * need to tell the checkout machinery to lstat() the written entries, + * so that we can send this data back to the main process. + */ + state.refresh_cache = 1; + + worker_loop(&state); + return 0; +} diff --git a/builtin/checkout-index.c b/builtin/checkout-index.c index c0bf4ac1b2..c9a3c71914 100644 --- a/builtin/checkout-index.c +++ b/builtin/checkout-index.c @@ -120,6 +120,8 @@ static void checkout_all(const char *prefix, int prefix_length) int i, errs = 0; struct cache_entry *last_ce = NULL; + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); for (i = 0; i < active_nr ; i++) { struct cache_entry *ce = active_cache[i]; if (ce_stage(ce) != checkout_stage diff --git a/builtin/checkout.c b/builtin/checkout.c index 4c696ef480..5bd9128d1a 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -369,6 +369,9 @@ static int checkout_worktree(const struct checkout_opts *opts, NULL); enable_delayed_checkout(&state); + + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); for (pos = 0; pos < active_nr; pos++) { struct cache_entry *ce = active_cache[pos]; if (ce->ce_flags & CE_MATCHED) { @@ -513,6 +516,8 @@ static int checkout_paths(const struct checkout_opts *opts, * Make sure all pathspecs participated in locating the paths * to be checked out. */ + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); for (pos = 0; pos < active_nr; pos++) if (opts->overlay_mode) mark_ce_for_checkout_overlay(active_cache[pos], diff --git a/builtin/commit.c b/builtin/commit.c index 55d50a8891..190d215d43 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -261,6 +261,8 @@ static int list_paths(struct string_list *list, const char *with_tree, free(max_prefix); } + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); for (i = 0; i < active_nr; i++) { const struct cache_entry *ce = active_cache[i]; struct string_list_item *item; @@ -976,6 +978,8 @@ static int prepare_to_commit(const char *index_file, const char *prefix, if (get_oid(parent, &oid)) { int i, ita_nr = 0; + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); for (i = 0; i < active_nr; i++) if (ce_intent_to_add(active_cache[i])) ita_nr++; diff --git a/builtin/difftool.c b/builtin/difftool.c index ef25729d49..0202a43052 100644 --- a/builtin/difftool.c +++ b/builtin/difftool.c @@ -585,6 +585,9 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, setenv("GIT_DIFFTOOL_DIRDIFF", "true", 1); rc = run_command_v_opt(helper_argv, flags); + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&wtindex); + /* * If the diff includes working copy files and those * files were modified during the diff, then the changes diff --git a/builtin/fetch.c b/builtin/fetch.c index 0b90de87c7..97c4fe6e6d 100644 --- a/builtin/fetch.c +++ b/builtin/fetch.c @@ -48,6 +48,7 @@ enum { static int fetch_prune_config = -1; /* unspecified */ static int fetch_show_forced_updates = 1; static uint64_t forced_updates_ms = 0; +static int prefetch = 0; static int prune = -1; /* unspecified */ #define PRUNE_BY_DEFAULT 0 /* do we prune by default? */ @@ -158,6 +159,8 @@ static struct option builtin_fetch_options[] = { N_("do not fetch all tags (--no-tags)"), TAGS_UNSET), OPT_INTEGER('j', "jobs", &max_jobs, N_("number of submodules fetched in parallel")), + OPT_BOOL(0, "prefetch", &prefetch, + N_("modify the refspec to place all refs within refs/prefetch/")), OPT_BOOL('p', "prune", &prune, N_("prune remote-tracking branches no longer on remote")), OPT_BOOL('P', "prune-tags", &prune_tags, @@ -436,6 +439,56 @@ static void find_non_local_tags(const struct ref *refs, oidset_clear(&fetch_oids); } +static void filter_prefetch_refspec(struct refspec *rs) +{ + int i; + + if (!prefetch) + return; + + for (i = 0; i < rs->nr; i++) { + struct strbuf new_dst = STRBUF_INIT; + char *old_dst; + const char *sub = NULL; + + if (rs->items[i].negative) + continue; + if (!rs->items[i].dst || + (rs->items[i].src && + !strncmp(rs->items[i].src, "refs/tags/", 10))) { + int j; + + free(rs->items[i].src); + free(rs->items[i].dst); + + for (j = i + 1; j < rs->nr; j++) { + rs->items[j - 1] = rs->items[j]; + rs->raw[j - 1] = rs->raw[j]; + } + rs->nr--; + i--; + continue; + } + + old_dst = rs->items[i].dst; + strbuf_addstr(&new_dst, "refs/prefetch/"); + + /* + * If old_dst starts with "refs/", then place + * sub after that prefix. Otherwise, start at + * the beginning of the string. + */ + if (!skip_prefix(old_dst, "refs/", &sub)) + sub = old_dst; + strbuf_addstr(&new_dst, sub); + + rs->items[i].dst = strbuf_detach(&new_dst, NULL); + rs->items[i].force = 1; + + free(old_dst); + } +} + static struct ref *get_ref_map(struct remote *remote, const struct ref *remote_refs, struct refspec *rs, @@ -452,6 +505,10 @@ static struct ref *get_ref_map(struct remote *remote, struct hashmap existing_refs; int existing_refs_populated = 0; + filter_prefetch_refspec(rs); + if (remote) + filter_prefetch_refspec(&remote->fetch); + if (rs->nr) { struct refspec *fetch_refspec; @@ -520,7 +577,7 @@ static struct ref *get_ref_map(struct remote *remote, if (has_merge && !strcmp(branch->remote_name, remote->name)) add_merge_config(&ref_map, remote_refs, branch, &tail); - } else { + } else if (!prefetch) { ref_map = get_remote_ref(remote_refs, "HEAD"); if (!ref_map) die(_("Couldn't find remote ref HEAD")); diff --git a/builtin/fsck.c b/builtin/fsck.c index 70ff95837a..87a99b0108 100644 --- a/builtin/fsck.c +++ b/builtin/fsck.c @@ -725,7 +725,7 @@ static int fsck_cache_tree(struct cache_tree *it) static void mark_object_for_connectivity(const struct object_id *oid) { - struct object *obj = lookup_unknown_object(oid); + struct object *obj = lookup_unknown_object(the_repository, oid); obj->flags |= HAS_OBJ; } @@ -881,6 +881,8 @@ int cmd_fsck(int argc, const char **argv, const char *prefix) verify_index_checksum = 1; verify_ce_order = 1; read_cache(); + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); for (i = 0; i < active_nr; i++) { unsigned int mode; struct blob *blob; diff --git a/builtin/gc.c b/builtin/gc.c index ef7226d7bc..98a803196b 100644 --- a/builtin/gc.c +++ b/builtin/gc.c @@ -873,55 +873,40 @@ static int maintenance_task_commit_graph(struct maintenance_run_opts *opts) return 0; } -static int fetch_remote(const char *remote, struct maintenance_run_opts *opts) +static int fetch_remote(struct remote *remote, void *cbdata) { + struct maintenance_run_opts *opts = cbdata; struct child_process child = CHILD_PROCESS_INIT; + if (remote->skip_default_update) + return 0; + child.git_cmd = 1; - strvec_pushl(&child.args, "fetch", remote, "--prune", "--no-tags", + strvec_pushl(&child.args, "fetch", remote->name, + "--prefetch", "--prune", "--no-tags", "--no-write-fetch-head", "--recurse-submodules=no", - "--refmap=", NULL); + NULL); if (opts->quiet) strvec_push(&child.args, "--quiet"); - strvec_pushf(&child.args, "+refs/heads/*:refs/prefetch/%s/*", remote); - return !!run_command(&child); } -static int append_remote(struct remote *remote, void *cbdata) -{ - struct string_list *remotes = (struct string_list *)cbdata; - - string_list_append(remotes, remote->name); - return 0; -} - static int maintenance_task_prefetch(struct maintenance_run_opts *opts) { - int result = 0; - struct string_list_item *item; - struct string_list remotes = STRING_LIST_INIT_DUP; - git_config_set_multivar_gently("log.excludedecoration", "refs/prefetch/", "refs/prefetch/", CONFIG_FLAGS_FIXED_VALUE | CONFIG_FLAGS_MULTI_REPLACE); - if (for_each_remote(append_remote, &remotes)) { - error(_("failed to fill remotes")); - result = 1; - goto cleanup; + if (for_each_remote(fetch_remote, opts)) { + error(_("failed to prefetch remotes")); + return 1; } - for_each_string_list_item(item, &remotes) - result |= fetch_remote(item->string, opts); - -cleanup: - string_list_clear(&remotes, 0); - return result; + return 0; } static int maintenance_task_gc(struct maintenance_run_opts *opts) diff --git a/builtin/grep.c b/builtin/grep.c index 5de725f904..b71b4a2de6 100644 --- a/builtin/grep.c +++ b/builtin/grep.c @@ -504,6 +504,8 @@ static int grep_cache(struct grep_opt *opt, if (repo_read_index(repo) < 0) die(_("index file corrupt")); + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(repo->index); for (nr = 0; nr < repo->index->cache_nr; nr++) { const struct cache_entry *ce = repo->index->cache[nr]; diff --git a/builtin/log.c b/builtin/log.c index 8acd285daf..6102893fcc 100644 --- a/builtin/log.c +++ b/builtin/log.c @@ -481,6 +481,8 @@ static int git_log_config(const char *var, const char *value, void *cb) decoration_style = 0; /* maybe warn? */ return 0; } + if (!strcmp(var, "log.diffmerges")) + return diff_merges_config(value); if (!strcmp(var, "log.showroot")) { default_show_root = git_config_bool(var, value); return 0; diff --git a/builtin/ls-files.c b/builtin/ls-files.c index 60a2913a01..a0b4e54d11 100644 --- a/builtin/ls-files.c +++ b/builtin/ls-files.c @@ -57,7 +57,7 @@ static const char *tag_modified = ""; static const char *tag_skip_worktree = ""; static const char *tag_resolve_undo = ""; -static void write_eolinfo(const struct index_state *istate, +static void write_eolinfo(struct index_state *istate, const struct cache_entry *ce, const char *path) { if (show_eol) { @@ -122,7 +122,7 @@ static void print_debug(const struct cache_entry *ce) } } -static void show_dir_entry(const struct index_state *istate, +static void show_dir_entry(struct index_state *istate, const char *tag, struct dir_entry *ent) { int len = max_prefix_len; @@ -139,7 +139,7 @@ static void show_dir_entry(const struct index_state *istate, write_name(ent->name); } -static void show_other_files(const struct index_state *istate, +static void show_other_files(struct index_state *istate, const struct dir_struct *dir) { int i; @@ -152,7 +152,7 @@ static void show_other_files(const struct index_state *istate, } } -static void show_killed_files(const struct index_state *istate, +static void show_killed_files(struct index_state *istate, const struct dir_struct *dir) { int i; @@ -254,7 +254,7 @@ static void show_ce(struct repository *repo, struct dir_struct *dir, } } -static void show_ru_info(const struct index_state *istate) +static void show_ru_info(struct index_state *istate) { struct string_list_item *item; @@ -317,6 +317,8 @@ static void show_files(struct repository *repo, struct dir_struct *dir) if (!(show_cached || show_stage || show_deleted || show_modified)) return; + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(repo->index); for (i = 0; i < repo->index->cache_nr; i++) { const struct cache_entry *ce = repo->index->cache[i]; struct stat st; @@ -494,6 +496,8 @@ void overlay_tree_on_index(struct index_state *istate, die("bad tree-ish %s", tree_name); /* Hoist the unmerged entries up to stage #3 to make room */ + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(istate); for (i = 0; i < istate->cache_nr; i++) { struct cache_entry *ce = istate->cache[i]; if (!ce_stage(ce)) diff --git a/builtin/merge-index.c b/builtin/merge-index.c index 38ea6ad6ca..c0383fe9df 100644 --- a/builtin/merge-index.c +++ b/builtin/merge-index.c @@ -58,6 +58,8 @@ static void merge_one_path(const char *path) static void merge_all(void) { int i; + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); for (i = 0; i < active_nr; i++) { const struct cache_entry *ce = active_cache[i]; if (!ce_stage(ce)) @@ -80,6 +82,9 @@ int cmd_merge_index(int argc, const char **argv, const char *prefix) read_cache(); + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); + i = 1; if (!strcmp(argv[i], "-o")) { one_shot = 1; diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index 247a08d024..6d13cd3e1a 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -3386,7 +3386,7 @@ static void add_objects_in_unpacked_packs(void) for (i = 0; i < p->num_objects; i++) { nth_packed_object_id(&oid, p, i); - o = lookup_unknown_object(&oid); + o = lookup_unknown_object(the_repository, &oid); if (!(o->flags & OBJECT_ADDED)) mark_in_pack_object(o, p, &in_pack); o->flags |= OBJECT_ADDED; diff --git a/builtin/rm.c b/builtin/rm.c index 4858631e0f..5559a0b453 100644 --- a/builtin/rm.c +++ b/builtin/rm.c @@ -293,6 +293,8 @@ int cmd_rm(int argc, const char **argv, const char *prefix) seen = xcalloc(pathspec.nr, 1); + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); for (i = 0; i < active_nr; i++) { const struct cache_entry *ce = active_cache[i]; if (!ce_path_match(&the_index, ce, &pathspec, seen)) diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c index d7da50ada5..a4bdd7c494 100644 --- a/builtin/sparse-checkout.c +++ b/builtin/sparse-checkout.c @@ -14,6 +14,7 @@ #include "unpack-trees.h" #include "wt-status.h" #include "quote.h" +#include "sparse-index.h" static const char *empty_base = ""; @@ -110,6 +111,8 @@ static int update_working_directory(struct pattern_list *pl) if (is_index_unborn(r->index)) return UPDATE_SPARSITY_SUCCESS; + r->index->sparse_checkout_patterns = pl; + memset(&o, 0, sizeof(o)); o.verbose_update = isatty(2); o.update = 1; @@ -138,6 +141,7 @@ static int update_working_directory(struct pattern_list *pl) else rollback_lock_file(&lock_file); + r->index->sparse_checkout_patterns = NULL; return result; } @@ -276,16 +280,20 @@ static int set_config(enum sparse_checkout_mode mode) "core.sparseCheckoutCone", mode == MODE_CONE_PATTERNS ? "true" : NULL); + if (mode == MODE_NO_PATTERNS) + set_sparse_index_config(the_repository, 0); + return 0; } static char const * const builtin_sparse_checkout_init_usage[] = { - N_("git sparse-checkout init [--cone]"), + N_("git sparse-checkout init [--cone] [--[no-]sparse-index]"), NULL }; static struct sparse_checkout_init_opts { int cone_mode; + int sparse_index; } init_opts; static int sparse_checkout_init(int argc, const char **argv) @@ -300,11 +308,15 @@ static int sparse_checkout_init(int argc, const char **argv) static struct option builtin_sparse_checkout_init_options[] = { OPT_BOOL(0, "cone", &init_opts.cone_mode, N_("initialize the sparse-checkout in cone mode")), + OPT_BOOL(0, "sparse-index", &init_opts.sparse_index, + N_("toggle the use of a sparse index")), OPT_END(), }; repo_read_index(the_repository); + init_opts.sparse_index = -1; + argc = parse_options(argc, argv, NULL, builtin_sparse_checkout_init_options, builtin_sparse_checkout_init_usage, 0); @@ -323,10 +335,20 @@ static int sparse_checkout_init(int argc, const char **argv) sparse_filename = get_sparse_checkout_filename(); res = add_patterns_from_file_to_list(sparse_filename, "", 0, &pl, NULL, 0); + if (init_opts.sparse_index >= 0) { + if (set_sparse_index_config(the_repository, init_opts.sparse_index) < 0) + die(_("failed to modify sparse-index config")); + + /* force an index rewrite */ + repo_read_index(the_repository); + the_repository->index->updated_workdir = 1; + } + + core_apply_sparse_checkout = 1; + /* If we already have a sparse-checkout file, use it. */ if (res >= 0) { free(sparse_filename); - core_apply_sparse_checkout = 1; return update_working_directory(NULL); } @@ -348,6 +370,7 @@ static int sparse_checkout_init(int argc, const char **argv) add_pattern(strbuf_detach(&pattern, NULL), empty_base, 0, &pl, 0); strbuf_addstr(&pattern, "!/*/"); add_pattern(strbuf_detach(&pattern, NULL), empty_base, 0, &pl, 0); + pl.use_cone_patterns = init_opts.cone_mode; return write_patterns_and_update(&pl); } @@ -517,19 +540,18 @@ static int modify_pattern_list(int argc, const char **argv, enum modify_type m) { int result; int changed_config = 0; - struct pattern_list pl; - memset(&pl, 0, sizeof(pl)); + struct pattern_list *pl = xcalloc(1, sizeof(*pl)); switch (m) { case ADD: if (core_sparse_checkout_cone) - add_patterns_cone_mode(argc, argv, &pl); + add_patterns_cone_mode(argc, argv, pl); else - add_patterns_literal(argc, argv, &pl); + add_patterns_literal(argc, argv, pl); break; case REPLACE: - add_patterns_from_input(&pl, argc, argv); + add_patterns_from_input(pl, argc, argv); break; } @@ -539,12 +561,13 @@ static int modify_pattern_list(int argc, const char **argv, enum modify_type m) changed_config = 1; } - result = write_patterns_and_update(&pl); + result = write_patterns_and_update(pl); if (result && changed_config) set_config(MODE_NO_PATTERNS); - clear_pattern_list(&pl); + clear_pattern_list(pl); + free(pl); return result; } @@ -614,6 +637,9 @@ static int sparse_checkout_disable(int argc, const char **argv) strbuf_addstr(&match_all, "/*"); add_pattern(strbuf_detach(&match_all, NULL), empty_base, 0, &pl, 0); + prepare_repo_settings(the_repository); + the_repository->settings.sparse_index = 0; + if (update_working_directory(&pl)) die(_("error while refreshing working directory")); diff --git a/builtin/stash.c b/builtin/stash.c index c56fed3354..d68ed784d2 100644 --- a/builtin/stash.c +++ b/builtin/stash.c @@ -1412,6 +1412,8 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q int i; char *ps_matched = xcalloc(ps->nr, 1); + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); for (i = 0; i < active_nr; i++) ce_path_match(&the_index, active_cache[i], ps, ps_matched); diff --git a/builtin/update-index.c b/builtin/update-index.c index 79087bccea..f1f16f2de5 100644 --- a/builtin/update-index.c +++ b/builtin/update-index.c @@ -745,6 +745,8 @@ static int do_reupdate(int ac, const char **av, */ has_head = 0; redo: + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); for (pos = 0; pos < active_nr; pos++) { const struct cache_entry *ce = active_cache[pos]; struct cache_entry *old = NULL; diff --git a/cache-tree.c b/cache-tree.c index add1f07713..45e58666af 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -6,6 +6,7 @@ #include "object-store.h" #include "replace-object.h" #include "promisor-remote.h" +#include "sparse-index.h" #ifndef DEBUG_CACHE_TREE #define DEBUG_CACHE_TREE 0 @@ -255,6 +256,24 @@ static int update_one(struct cache_tree *it, *skip_count = 0; + /* + * If the first entry of this region is a sparse directory + * entry corresponding exactly to 'base', then this cache_tree + * struct is a "leaf" in the data structure, pointing to the + * tree OID specified in the entry. + */ + if (entries > 0) { + const struct cache_entry *ce = cache[0]; + + if (S_ISSPARSEDIR(ce->ce_mode) && + ce->ce_namelen == baselen && + !strncmp(ce->name, base, baselen)) { + it->entry_count = 1; + oidcpy(&it->oid, &ce->oid); + return 1; + } + } + if (0 <= it->entry_count && has_object_file(&it->oid)) return it->entry_count; @@ -442,6 +461,8 @@ int cache_tree_update(struct index_state *istate, int flags) if (i) return i; + ensure_full_index(istate); + if (!istate->cache_tree) istate->cache_tree = cache_tree(); @@ -787,6 +808,19 @@ int cache_tree_matches_traversal(struct cache_tree *root, return 0; } +static void verify_one_sparse(struct repository *r, + struct index_state *istate, + struct cache_tree *it, + struct strbuf *path, + int pos) +{ + struct cache_entry *ce = istate->cache[pos]; + + if (!S_ISSPARSEDIR(ce->ce_mode)) + BUG("directory '%s' is present in index, but not sparse", + path->buf); +} + static void verify_one(struct repository *r, struct index_state *istate, struct cache_tree *it, @@ -809,6 +843,12 @@ static void verify_one(struct repository *r, if (path->len) { pos = index_name_pos(istate, path->buf, path->len); + + if (pos >= 0) { + verify_one_sparse(r, istate, it, path, pos); + return; + } + pos = -pos - 1; } else { pos = 0; @@ -204,6 +204,8 @@ struct cache_entry { #error "CE_EXTENDED_FLAGS out of range" #endif +#define S_ISSPARSEDIR(m) ((m) == S_IFDIR) + /* Forward structure decls */ struct pathspec; struct child_process; @@ -249,6 +251,8 @@ static inline unsigned int create_ce_mode(unsigned int mode) { if (S_ISLNK(mode)) return S_IFLNK; + if (S_ISSPARSEDIR(mode)) + return S_IFDIR; if (S_ISDIR(mode) || S_ISGITLINK(mode)) return S_IFGITLINK; return S_IFREG | ce_permissions(mode); @@ -305,6 +309,7 @@ static inline unsigned int canon_mode(unsigned int mode) struct split_index; struct untracked_cache; struct progress; +struct pattern_list; struct index_state { struct cache_entry **cache; @@ -319,7 +324,14 @@ struct index_state { drop_cache_tree : 1, updated_workdir : 1, updated_skipworktree : 1, - fsmonitor_has_run_once : 1; + fsmonitor_has_run_once : 1, + + /* + * sparse_index == 1 when sparse-directory + * entries exist. Requires sparse-checkout + * in cone mode. + */ + sparse_index : 1; struct hashmap name_hash; struct hashmap dir_hash; struct object_id oid; @@ -329,6 +341,7 @@ struct index_state { struct mem_pool *ce_mem_pool; struct progress *progress; struct repository *repo; + struct pattern_list *sparse_checkout_patterns; }; /* Name hashing */ @@ -337,6 +350,7 @@ void add_name_hash(struct index_state *istate, struct cache_entry *ce); void remove_name_hash(struct index_state *istate, struct cache_entry *ce); void free_name_hash(struct index_state *istate); +void ensure_full_index(struct index_state *istate); /* Cache entry creation and cleanup */ @@ -722,6 +736,8 @@ int read_index_from(struct index_state *, const char *path, const char *gitdir); int is_index_unborn(struct index_state *); +void ensure_full_index(struct index_state *istate); + /* For use with `write_locked_index()`. */ #define COMMIT_LOCK (1 << 0) #define SKIP_IF_UNCHANGED (1 << 1) @@ -785,7 +801,7 @@ struct cache_entry *index_file_exists(struct index_state *istate, const char *na * index_name_pos(&index, "f", 1) -> -3 * index_name_pos(&index, "g", 1) -> -5 */ -int index_name_pos(const struct index_state *, const char *name, int namelen); +int index_name_pos(struct index_state *, const char *name, int namelen); /* * Some functions return the negative complement of an insert position when a @@ -835,8 +851,8 @@ int add_file_to_index(struct index_state *, const char *path, int flags); int chmod_index_entry(struct index_state *, struct cache_entry *ce, char flip); int ce_same_name(const struct cache_entry *a, const struct cache_entry *b); void set_object_name_for_intent_to_add_entry(struct cache_entry *ce); -int index_name_is_other(const struct index_state *, const char *, int); -void *read_blob_data_from_index(const struct index_state *, const char *, unsigned long *); +int index_name_is_other(struct index_state *, const char *, int); +void *read_blob_data_from_index(struct index_state *, const char *, unsigned long *); /* do stat comparison even if CE_VALID is true */ #define CE_MATCH_IGNORE_VALID 01 @@ -1044,6 +1060,7 @@ struct repository_format { int worktree_config; int is_bare; int hash_algo; + int sparse_index; char *work_tree; struct string_list unknown_extensions; struct string_list v1_only_extensions; diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash index dfa735ea62..49e76e9d08 100644 --- a/contrib/completion/git-completion.bash +++ b/contrib/completion/git-completion.bash @@ -1129,7 +1129,7 @@ __git_pretty_aliases () # __git_aliased_command requires 1 argument __git_aliased_command () { - local cur=$1 last list word cmdline + local cur=$1 last list= word cmdline while [[ -n "$cur" ]]; do if [[ "$list" == *" $cur "* ]]; then @@ -127,7 +127,7 @@ static const char *gather_convert_stats_ascii(const char *data, unsigned long si } } -const char *get_cached_convert_stats_ascii(const struct index_state *istate, +const char *get_cached_convert_stats_ascii(struct index_state *istate, const char *path) { const char *ret; @@ -211,7 +211,7 @@ static void check_global_conv_flags_eol(const char *path, } } -static int has_crlf_in_index(const struct index_state *istate, const char *path) +static int has_crlf_in_index(struct index_state *istate, const char *path) { unsigned long sz; void *data; @@ -485,7 +485,7 @@ static int encode_to_worktree(const char *path, const char *src, size_t src_len, return 1; } -static int crlf_to_git(const struct index_state *istate, +static int crlf_to_git(struct index_state *istate, const char *path, const char *src, size_t len, struct strbuf *buf, enum convert_crlf_action crlf_action, int conv_flags) @@ -1293,7 +1293,7 @@ static int git_path_check_ident(struct attr_check_item *check) static struct attr_check *check; -void convert_attrs(const struct index_state *istate, +void convert_attrs(struct index_state *istate, struct conv_attrs *ca, const char *path) { struct attr_check_item *ccheck = NULL; @@ -1355,7 +1355,7 @@ void reset_parsed_attributes(void) user_convert_tail = NULL; } -int would_convert_to_git_filter_fd(const struct index_state *istate, const char *path) +int would_convert_to_git_filter_fd(struct index_state *istate, const char *path) { struct conv_attrs ca; @@ -1374,7 +1374,7 @@ int would_convert_to_git_filter_fd(const struct index_state *istate, const char return apply_filter(path, NULL, 0, -1, NULL, ca.drv, CAP_CLEAN, NULL, NULL); } -const char *get_convert_attr_ascii(const struct index_state *istate, const char *path) +const char *get_convert_attr_ascii(struct index_state *istate, const char *path) { struct conv_attrs ca; @@ -1400,7 +1400,7 @@ const char *get_convert_attr_ascii(const struct index_state *istate, const char return ""; } -int convert_to_git(const struct index_state *istate, +int convert_to_git(struct index_state *istate, const char *path, const char *src, size_t len, struct strbuf *dst, int conv_flags) { @@ -1434,7 +1434,7 @@ int convert_to_git(const struct index_state *istate, return ret | ident_to_git(src, len, dst, ca.ident); } -void convert_to_git_filter_fd(const struct index_state *istate, +void convert_to_git_filter_fd(struct index_state *istate, const char *path, int fd, struct strbuf *dst, int conv_flags) { @@ -1511,7 +1511,7 @@ int convert_to_working_tree_ca(const struct conv_attrs *ca, meta, NULL); } -int renormalize_buffer(const struct index_state *istate, const char *path, +int renormalize_buffer(struct index_state *istate, const char *path, const char *src, size_t len, struct strbuf *dst) { struct conv_attrs ca; @@ -1972,7 +1972,7 @@ struct stream_filter *get_stream_filter_ca(const struct conv_attrs *ca, return filter; } -struct stream_filter *get_stream_filter(const struct index_state *istate, +struct stream_filter *get_stream_filter(struct index_state *istate, const char *path, const struct object_id *oid) { @@ -84,19 +84,19 @@ struct conv_attrs { const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */ }; -void convert_attrs(const struct index_state *istate, +void convert_attrs(struct index_state *istate, struct conv_attrs *ca, const char *path); extern enum eol core_eol; extern char *check_roundtrip_encoding; -const char *get_cached_convert_stats_ascii(const struct index_state *istate, +const char *get_cached_convert_stats_ascii(struct index_state *istate, const char *path); const char *get_wt_convert_stats_ascii(const char *path); -const char *get_convert_attr_ascii(const struct index_state *istate, +const char *get_convert_attr_ascii(struct index_state *istate, const char *path); /* returns 1 if *dst was used */ -int convert_to_git(const struct index_state *istate, +int convert_to_git(struct index_state *istate, const char *path, const char *src, size_t len, struct strbuf *dst, int conv_flags); int convert_to_working_tree_ca(const struct conv_attrs *ca, @@ -108,7 +108,7 @@ int async_convert_to_working_tree_ca(const struct conv_attrs *ca, size_t len, struct strbuf *dst, const struct checkout_metadata *meta, void *dco); -static inline int convert_to_working_tree(const struct index_state *istate, +static inline int convert_to_working_tree(struct index_state *istate, const char *path, const char *src, size_t len, struct strbuf *dst, const struct checkout_metadata *meta) @@ -117,7 +117,7 @@ static inline int convert_to_working_tree(const struct index_state *istate, convert_attrs(istate, &ca, path); return convert_to_working_tree_ca(&ca, path, src, len, dst, meta); } -static inline int async_convert_to_working_tree(const struct index_state *istate, +static inline int async_convert_to_working_tree(struct index_state *istate, const char *path, const char *src, size_t len, struct strbuf *dst, const struct checkout_metadata *meta, @@ -129,20 +129,20 @@ static inline int async_convert_to_working_tree(const struct index_state *istate } int async_query_available_blobs(const char *cmd, struct string_list *available_paths); -int renormalize_buffer(const struct index_state *istate, +int renormalize_buffer(struct index_state *istate, const char *path, const char *src, size_t len, struct strbuf *dst); -static inline int would_convert_to_git(const struct index_state *istate, +static inline int would_convert_to_git(struct index_state *istate, const char *path) { return convert_to_git(istate, path, NULL, 0, NULL, 0); } /* Precondition: would_convert_to_git_filter_fd(path) == true */ -void convert_to_git_filter_fd(const struct index_state *istate, +void convert_to_git_filter_fd(struct index_state *istate, const char *path, int fd, struct strbuf *dst, int conv_flags); -int would_convert_to_git_filter_fd(const struct index_state *istate, +int would_convert_to_git_filter_fd(struct index_state *istate, const char *path); /* @@ -176,7 +176,7 @@ void reset_parsed_attributes(void); struct stream_filter; /* opaque */ -struct stream_filter *get_stream_filter(const struct index_state *istate, +struct stream_filter *get_stream_filter(struct index_state *istate, const char *path, const struct object_id *); struct stream_filter *get_stream_filter_ca(const struct conv_attrs *ca, diff --git a/diff-merges.c b/diff-merges.c index 146bb50316..f3a9daed7e 100644 --- a/diff-merges.c +++ b/diff-merges.c @@ -2,6 +2,11 @@ #include "revision.h" +typedef void (*diff_merges_setup_func_t)(struct rev_info *); +static void set_separate(struct rev_info *revs); + +static diff_merges_setup_func_t set_to_default = set_separate; + static void suppress(struct rev_info *revs) { revs->separate_merges = 0; @@ -29,10 +34,10 @@ static void set_m(struct rev_info *revs) { /* * To "diff-index", "-m" means "match missing", and to the "log" - * family of commands, it means "show full diff for merges". Set + * family of commands, it means "show default diff for merges". Set * both fields appropriately. */ - set_separate(revs); + set_to_default(revs); revs->match_missing = 1; } @@ -50,33 +55,52 @@ static void set_dense_combined(struct rev_info *revs) revs->dense_combined_merges = 1; } -static void set_diff_merges(struct rev_info *revs, const char *optarg) +static diff_merges_setup_func_t func_by_opt(const char *optarg) { - if (!strcmp(optarg, "off") || !strcmp(optarg, "none")) { - suppress(revs); - /* Return early to leave revs->merges_need_diff unset */ - return; - } - + if (!strcmp(optarg, "off") || !strcmp(optarg, "none")) + return suppress; if (!strcmp(optarg, "1") || !strcmp(optarg, "first-parent")) - set_first_parent(revs); - else if (!strcmp(optarg, "m") || !strcmp(optarg, "separate")) - set_separate(revs); + return set_first_parent; + else if (!strcmp(optarg, "separate")) + return set_separate; else if (!strcmp(optarg, "c") || !strcmp(optarg, "combined")) - set_combined(revs); + return set_combined; else if (!strcmp(optarg, "cc") || !strcmp(optarg, "dense-combined")) - set_dense_combined(revs); - else + return set_dense_combined; + else if (!strcmp(optarg, "m") || !strcmp(optarg, "on")) + return set_to_default; + return NULL; +} + +static void set_diff_merges(struct rev_info *revs, const char *optarg) +{ + diff_merges_setup_func_t func = func_by_opt(optarg); + + if (!func) die(_("unknown value for --diff-merges: %s"), optarg); - /* The flag is cleared by set_xxx() functions, so don't move this up */ - revs->merges_need_diff = 1; + func(revs); + + /* NOTE: the merges_need_diff flag is cleared by func() call */ + if (func != suppress) + revs->merges_need_diff = 1; } /* * Public functions. They are in the order they are called. */ +int diff_merges_config(const char *value) +{ + diff_merges_setup_func_t func = func_by_opt(value); + + if (!func) + return -1; + + set_to_default = func; + return 0; +} + int diff_merges_parse_opts(struct rev_info *revs, const char **argv) { int argcount = 1; diff --git a/diff-merges.h b/diff-merges.h index 659467c99a..09d9a6c9a4 100644 --- a/diff-merges.h +++ b/diff-merges.h @@ -9,6 +9,8 @@ struct rev_info; +int diff_merges_config(const char *value); + int diff_merges_parse_opts(struct rev_info *revs, const char **argv); void diff_merges_suppress(struct rev_info *revs); @@ -306,7 +306,7 @@ static int do_read_blob(const struct object_id *oid, struct oid_stat *oid_stat, * [1] Only if DO_MATCH_DIRECTORY is passed; otherwise, this is NOT a match. * [2] Only if DO_MATCH_LEADING_PATHSPEC is passed; otherwise, not a match. */ -static int match_pathspec_item(const struct index_state *istate, +static int match_pathspec_item(struct index_state *istate, const struct pathspec_item *item, int prefix, const char *name, int namelen, unsigned flags) { @@ -429,7 +429,7 @@ static int match_pathspec_item(const struct index_state *istate, * pathspec did not match any names, which could indicate that the * user mistyped the nth pathspec. */ -static int do_match_pathspec(const struct index_state *istate, +static int do_match_pathspec(struct index_state *istate, const struct pathspec *ps, const char *name, int namelen, int prefix, char *seen, @@ -500,7 +500,7 @@ static int do_match_pathspec(const struct index_state *istate, return retval; } -static int match_pathspec_with_flags(const struct index_state *istate, +static int match_pathspec_with_flags(struct index_state *istate, const struct pathspec *ps, const char *name, int namelen, int prefix, char *seen, unsigned flags) @@ -516,7 +516,7 @@ static int match_pathspec_with_flags(const struct index_state *istate, return negative ? 0 : positive; } -int match_pathspec(const struct index_state *istate, +int match_pathspec(struct index_state *istate, const struct pathspec *ps, const char *name, int namelen, int prefix, char *seen, int is_dir) @@ -529,7 +529,7 @@ int match_pathspec(const struct index_state *istate, /** * Check if a submodule is a superset of the pathspec */ -int submodule_path_match(const struct index_state *istate, +int submodule_path_match(struct index_state *istate, const struct pathspec *ps, const char *submodule_name, char *seen) @@ -892,7 +892,7 @@ void add_pattern(const char *string, const char *base, add_pattern_to_hashsets(pl, pattern); } -static int read_skip_worktree_file_from_index(const struct index_state *istate, +static int read_skip_worktree_file_from_index(struct index_state *istate, const char *path, size_t *size_out, char **data_out, struct oid_stat *oid_stat) @@ -3542,6 +3542,8 @@ static void connect_wt_gitdir_in_nested(const char *sub_worktree, if (repo_read_index(&subrepo) < 0) die(_("index file corrupt in repo %s"), subrepo.gitdir); + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(subrepo.index); for (i = 0; i < subrepo.index->cache_nr; i++) { const struct cache_entry *ce = subrepo.index->cache[i]; @@ -354,7 +354,7 @@ int count_slashes(const char *s); int simple_length(const char *match); int no_wildcard(const char *string); char *common_prefix(const struct pathspec *pathspec); -int match_pathspec(const struct index_state *istate, +int match_pathspec(struct index_state *istate, const struct pathspec *pathspec, const char *name, int namelen, int prefix, char *seen, int is_dir); @@ -493,12 +493,12 @@ int git_fnmatch(const struct pathspec_item *item, const char *pattern, const char *string, int prefix); -int submodule_path_match(const struct index_state *istate, +int submodule_path_match(struct index_state *istate, const struct pathspec *ps, const char *submodule_name, char *seen); -static inline int ce_path_match(const struct index_state *istate, +static inline int ce_path_match(struct index_state *istate, const struct cache_entry *ce, const struct pathspec *pathspec, char *seen) @@ -507,7 +507,7 @@ static inline int ce_path_match(const struct index_state *istate, S_ISDIR(ce->ce_mode) || S_ISGITLINK(ce->ce_mode)); } -static inline int dir_path_match(const struct index_state *istate, +static inline int dir_path_match(struct index_state *istate, const struct dir_entry *ent, const struct pathspec *pathspec, int prefix, char *seen) @@ -7,6 +7,7 @@ #include "progress.h" #include "fsmonitor.h" #include "entry.h" +#include "parallel-checkout.h" static void create_directories(const char *path, int path_len, const struct checkout *state) @@ -423,11 +424,22 @@ static void mark_colliding_entries(const struct checkout *state, ce->ce_flags |= CE_MATCHED; + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(state->istate); for (i = 0; i < state->istate->cache_nr; i++) { struct cache_entry *dup = state->istate->cache[i]; - if (dup == ce) - break; + if (dup == ce) { + /* + * Parallel checkout doesn't create the files in index + * order. So the other side of the collision may appear + * after the given cache_entry in the array. + */ + if (parallel_checkout_status() == PC_RUNNING) + continue; + else + break; + } if (dup->ce_flags & (CE_MATCHED | CE_VALID | CE_SKIP_WORKTREE)) continue; @@ -536,6 +548,9 @@ int checkout_entry_ca(struct cache_entry *ce, struct conv_attrs *ca, ca = &ca_buf; } + if (!enqueue_checkout(ce, ca)) + return 0; + return write_entry(ce, path.buf, ca, state, 0); } @@ -255,6 +255,14 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) git_config_push_parameter((*argv)[1]); (*argv)++; (*argc)--; + } else if (!strcmp(cmd, "--config-env")) { + if (*argc < 2) { + fprintf(stderr, _("no config key given for --config-env\n" )); + usage(git_usage_string); + } + git_config_push_env((*argv)[1]); + (*argv)++; + (*argc)--; } else if (skip_prefix(cmd, "--config-env=", &cmd)) { git_config_push_env(cmd); } else if (!strcmp(cmd, "--literal-pathspecs")) { @@ -490,6 +498,8 @@ static struct cmd_struct commands[] = { { "check-mailmap", cmd_check_mailmap, RUN_SETUP }, { "check-ref-format", cmd_check_ref_format, NO_PARSEOPT }, { "checkout", cmd_checkout, RUN_SETUP | NEED_WORK_TREE }, + { "checkout--worker", cmd_checkout__worker, + RUN_SETUP | NEED_WORK_TREE | SUPPORT_SUPER_PREFIX }, { "checkout-index", cmd_checkout_index, RUN_SETUP | NEED_WORK_TREE}, { "cherry", cmd_cherry, RUN_SETUP }, diff --git a/http-push.c b/http-push.c index b60d5fcc85..813123242e 100644 --- a/http-push.c +++ b/http-push.c @@ -1436,7 +1436,7 @@ static void one_remote_ref(const char *refname) * may be required for updating server info later. */ if (repo->can_update_info_refs && !has_object_file(&ref->old_oid)) { - obj = lookup_unknown_object(&ref->old_oid); + obj = lookup_unknown_object(the_repository, &ref->old_oid); fprintf(stderr, " fetch %s for %s\n", oid_to_hex(&ref->old_oid), refname); add_fetch_request(obj); diff --git a/merge-ort.c b/merge-ort.c index b1795d838e..6c2792b10e 100644 --- a/merge-ort.c +++ b/merge-ort.c @@ -2564,7 +2564,7 @@ static int blob_unchanged(struct merge_options *opt, struct strbuf basebuf = STRBUF_INIT; struct strbuf sidebuf = STRBUF_INIT; int ret = 0; /* assume changed for safety */ - const struct index_state *idx = &opt->priv->attr_index; + struct index_state *idx = &opt->priv->attr_index; if (!idx->initialized) initialize_attr_index(opt); diff --git a/merge-recursive.c b/merge-recursive.c index 7618303f7b..27b222ae49 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -522,6 +522,8 @@ static struct string_list *get_unmerged(struct index_state *istate) unmerged->strdup_strings = 1; + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(istate); for (i = 0; i < istate->cache_nr; i++) { struct string_list_item *item; struct stage_data *e; @@ -3014,7 +3016,7 @@ static int blob_unchanged(struct merge_options *opt, struct strbuf obuf = STRBUF_INIT; struct strbuf abuf = STRBUF_INIT; int ret = 0; /* assume changed for safety */ - const struct index_state *idx = opt->repo->index; + struct index_state *idx = opt->repo->index; if (a->mode != o->mode) return 0; diff --git a/name-hash.c b/name-hash.c index ce28f3f070..7487d33124 100644 --- a/name-hash.c +++ b/name-hash.c @@ -8,6 +8,7 @@ #include "cache.h" #include "thread-utils.h" #include "trace2.h" +#include "sparse-index.h" struct dir_entry { struct hashmap_entry ent; @@ -109,8 +110,11 @@ static void hash_index_entry(struct index_state *istate, struct cache_entry *ce) if (ce->ce_flags & CE_HASHED) return; ce->ce_flags |= CE_HASHED; - hashmap_entry_init(&ce->ent, memihash(ce->name, ce_namelen(ce))); - hashmap_add(&istate->name_hash, &ce->ent); + + if (!S_ISSPARSEDIR(ce->ce_mode)) { + hashmap_entry_init(&ce->ent, memihash(ce->name, ce_namelen(ce))); + hashmap_add(&istate->name_hash, &ce->ent); + } if (ignore_case) add_dir_entry(istate, ce); @@ -680,6 +684,7 @@ int index_dir_exists(struct index_state *istate, const char *name, int namelen) struct dir_entry *dir; lazy_init_name_hash(istate); + expand_to_path(istate, name, namelen, 0); dir = find_dir_entry(istate, name, namelen); return dir && dir->nr; } @@ -690,6 +695,7 @@ void adjust_dirname_case(struct index_state *istate, char *name) const char *ptr = startPtr; lazy_init_name_hash(istate); + expand_to_path(istate, name, strlen(name), 0); while (*ptr) { while (*ptr && *ptr != '/') ptr++; @@ -713,6 +719,7 @@ struct cache_entry *index_file_exists(struct index_state *istate, const char *na unsigned int hash = memihash(name, namelen); lazy_init_name_hash(istate); + expand_to_path(istate, name, namelen, icase); ce = hashmap_get_entry_from_hash(&istate->name_hash, hash, NULL, struct cache_entry, ent); @@ -177,12 +177,11 @@ void *object_as_type(struct object *obj, enum object_type type, int quiet) } } -struct object *lookup_unknown_object(const struct object_id *oid) +struct object *lookup_unknown_object(struct repository *r, const struct object_id *oid) { - struct object *obj = lookup_object(the_repository, oid); + struct object *obj = lookup_object(r, oid); if (!obj) - obj = create_object(the_repository, oid, - alloc_object_node(the_repository)); + obj = create_object(r, oid, alloc_object_node(r)); return obj; } @@ -145,7 +145,7 @@ struct object *parse_object_or_die(const struct object_id *oid, const char *name struct object *parse_object_buffer(struct repository *r, const struct object_id *oid, enum object_type type, unsigned long size, void *buffer, int *eaten_p); /** Returns the object, with potentially excess memory allocated. **/ -struct object *lookup_unknown_object(const struct object_id *oid); +struct object *lookup_unknown_object(struct repository *r, const struct object_id *oid); struct object_list *object_list_insert(struct object *item, struct object_list **list_p); diff --git a/packfile.c b/packfile.c index 8668345d93..b79cbc8cd4 100644 --- a/packfile.c +++ b/packfile.c @@ -2247,6 +2247,7 @@ static int add_promisor_object(const struct object_id *oid, return 0; while (tree_entry_gently(&desc, &entry)) oidset_insert(set, &entry.oid); + free_tree_buffer(tree); } else if (obj->type == OBJ_COMMIT) { struct commit *commit = (struct commit *) obj; struct commit_list *parents = commit->parents; diff --git a/parallel-checkout.c b/parallel-checkout.c new file mode 100644 index 0000000000..09e8b10a35 --- /dev/null +++ b/parallel-checkout.c @@ -0,0 +1,655 @@ +#include "cache.h" +#include "config.h" +#include "entry.h" +#include "parallel-checkout.h" +#include "pkt-line.h" +#include "progress.h" +#include "run-command.h" +#include "sigchain.h" +#include "streaming.h" +#include "thread-utils.h" + +struct pc_worker { + struct child_process cp; + size_t next_item_to_complete, nr_items_to_complete; +}; + +struct parallel_checkout { + enum pc_status status; + struct parallel_checkout_item *items; /* The parallel checkout queue. */ + size_t nr, alloc; + struct progress *progress; + unsigned int *progress_cnt; +}; + +static struct parallel_checkout parallel_checkout; + +enum pc_status parallel_checkout_status(void) +{ + return parallel_checkout.status; +} + +static const int DEFAULT_THRESHOLD_FOR_PARALLELISM = 100; +static const int DEFAULT_NUM_WORKERS = 1; + +void get_parallel_checkout_configs(int *num_workers, int *threshold) +{ + if (git_config_get_int("checkout.workers", num_workers)) + *num_workers = DEFAULT_NUM_WORKERS; + else if (*num_workers < 1) + *num_workers = online_cpus(); + + if (git_config_get_int("checkout.thresholdForParallelism", threshold)) + *threshold = DEFAULT_THRESHOLD_FOR_PARALLELISM; +} + +void init_parallel_checkout(void) +{ + if (parallel_checkout.status != PC_UNINITIALIZED) + BUG("parallel checkout already initialized"); + + parallel_checkout.status = PC_ACCEPTING_ENTRIES; +} + +static void finish_parallel_checkout(void) +{ + if (parallel_checkout.status == PC_UNINITIALIZED) + BUG("cannot finish parallel checkout: not initialized yet"); + + free(parallel_checkout.items); + memset(¶llel_checkout, 0, sizeof(parallel_checkout)); +} + +static int is_eligible_for_parallel_checkout(const struct cache_entry *ce, + const struct conv_attrs *ca) +{ + enum conv_attrs_classification c; + size_t packed_item_size; + + /* + * Symlinks cannot be checked out in parallel as, in case of path + * collision, they could racily replace leading directories of other + * entries being checked out. Submodules are checked out in child + * processes, which have their own parallel checkout queues. + */ + if (!S_ISREG(ce->ce_mode)) + return 0; + + packed_item_size = sizeof(struct pc_item_fixed_portion) + ce->ce_namelen + + (ca->working_tree_encoding ? strlen(ca->working_tree_encoding) : 0); + + /* + * The amount of data we send to the workers per checkout item is + * typically small (75~300B). So unless we find an insanely huge path + * of 64KB, we should never reach the 65KB limit of one pkt-line. If + * that does happen, we let the sequential code handle the item. + */ + if (packed_item_size > LARGE_PACKET_DATA_MAX) + return 0; + + c = classify_conv_attrs(ca); + switch (c) { + case CA_CLASS_INCORE: + return 1; + + case CA_CLASS_INCORE_FILTER: + /* + * It would be safe to allow concurrent instances of + * single-file smudge filters, like rot13, but we should not + * assume that all filters are parallel-process safe. So we + * don't allow this. + */ + return 0; + + case CA_CLASS_INCORE_PROCESS: + /* + * The parallel queue and the delayed queue are not compatible, + * so they must be kept completely separated. And we can't tell + * if a long-running process will delay its response without + * actually asking it to perform the filtering. Therefore, this + * type of filter is not allowed in parallel checkout. + * + * Furthermore, there should only be one instance of the + * long-running process filter as we don't know how it is + * managing its own concurrency. So, spreading the entries that + * requisite such a filter among the parallel workers would + * require a lot more inter-process communication. We would + * probably have to designate a single process to interact with + * the filter and send all the necessary data to it, for each + * entry. + */ + return 0; + + case CA_CLASS_STREAMABLE: + return 1; + + default: + BUG("unsupported conv_attrs classification '%d'", c); + } +} + +int enqueue_checkout(struct cache_entry *ce, struct conv_attrs *ca) +{ + struct parallel_checkout_item *pc_item; + + if (parallel_checkout.status != PC_ACCEPTING_ENTRIES || + !is_eligible_for_parallel_checkout(ce, ca)) + return -1; + + ALLOC_GROW(parallel_checkout.items, parallel_checkout.nr + 1, + parallel_checkout.alloc); + + pc_item = ¶llel_checkout.items[parallel_checkout.nr]; + pc_item->ce = ce; + memcpy(&pc_item->ca, ca, sizeof(pc_item->ca)); + pc_item->status = PC_ITEM_PENDING; + pc_item->id = parallel_checkout.nr; + parallel_checkout.nr++; + + return 0; +} + +size_t pc_queue_size(void) +{ + return parallel_checkout.nr; +} + +static void advance_progress_meter(void) +{ + if (parallel_checkout.progress) { + (*parallel_checkout.progress_cnt)++; + display_progress(parallel_checkout.progress, + *parallel_checkout.progress_cnt); + } +} + +static int handle_results(struct checkout *state) +{ + int ret = 0; + size_t i; + int have_pending = 0; + + /* + * We first update the successfully written entries with the collected + * stat() data, so that they can be found by mark_colliding_entries(), + * in the next loop, when necessary. + */ + for (i = 0; i < parallel_checkout.nr; i++) { + struct parallel_checkout_item *pc_item = ¶llel_checkout.items[i]; + if (pc_item->status == PC_ITEM_WRITTEN) + update_ce_after_write(state, pc_item->ce, &pc_item->st); + } + + for (i = 0; i < parallel_checkout.nr; i++) { + struct parallel_checkout_item *pc_item = ¶llel_checkout.items[i]; + + switch(pc_item->status) { + case PC_ITEM_WRITTEN: + /* Already handled */ + break; + case PC_ITEM_COLLIDED: + /* + * The entry could not be checked out due to a path + * collision with another entry. Since there can only + * be one entry of each colliding group on the disk, we + * could skip trying to check out this one and move on. + * However, this would leave the unwritten entries with + * null stat() fields on the index, which could + * potentially slow down subsequent operations that + * require refreshing it: git would not be able to + * trust st_size and would have to go to the filesystem + * to see if the contents match (see ie_modified()). + * + * Instead, let's pay the overhead only once, now, and + * call checkout_entry_ca() again for this file, to + * have its stat() data stored in the index. This also + * has the benefit of adding this entry and its + * colliding pair to the collision report message. + * Additionally, this overwriting behavior is consistent + * with what the sequential checkout does, so it doesn't + * add any extra overhead. + */ + ret |= checkout_entry_ca(pc_item->ce, &pc_item->ca, + state, NULL, NULL); + advance_progress_meter(); + break; + case PC_ITEM_PENDING: + have_pending = 1; + /* fall through */ + case PC_ITEM_FAILED: + ret = -1; + break; + default: + BUG("unknown checkout item status in parallel checkout"); + } + } + + if (have_pending) + error("parallel checkout finished with pending entries"); + + return ret; +} + +static int reset_fd(int fd, const char *path) +{ + if (lseek(fd, 0, SEEK_SET) != 0) + return error_errno("failed to rewind descriptor of '%s'", path); + if (ftruncate(fd, 0)) + return error_errno("failed to truncate file '%s'", path); + return 0; +} + +static int write_pc_item_to_fd(struct parallel_checkout_item *pc_item, int fd, + const char *path) +{ + int ret; + struct stream_filter *filter; + struct strbuf buf = STRBUF_INIT; + char *blob; + unsigned long size; + ssize_t wrote; + + /* Sanity check */ + assert(is_eligible_for_parallel_checkout(pc_item->ce, &pc_item->ca)); + + filter = get_stream_filter_ca(&pc_item->ca, &pc_item->ce->oid); + if (filter) { + if (stream_blob_to_fd(fd, &pc_item->ce->oid, filter, 1)) { + /* On error, reset fd to try writing without streaming */ + if (reset_fd(fd, path)) + return -1; + } else { + return 0; + } + } + + blob = read_blob_entry(pc_item->ce, &size); + if (!blob) + return error("cannot read object %s '%s'", + oid_to_hex(&pc_item->ce->oid), pc_item->ce->name); + + /* + * checkout metadata is used to give context for external process + * filters. Files requiring such filters are not eligible for parallel + * checkout, so pass NULL. Note: if that changes, the metadata must also + * be passed from the main process to the workers. + */ + ret = convert_to_working_tree_ca(&pc_item->ca, pc_item->ce->name, + blob, size, &buf, NULL); + + if (ret) { + size_t newsize; + free(blob); + blob = strbuf_detach(&buf, &newsize); + size = newsize; + } + + wrote = write_in_full(fd, blob, size); + free(blob); + if (wrote < 0) + return error("unable to write file '%s'", path); + + return 0; +} + +static int close_and_clear(int *fd) +{ + int ret = 0; + + if (*fd >= 0) { + ret = close(*fd); + *fd = -1; + } + + return ret; +} + +void write_pc_item(struct parallel_checkout_item *pc_item, + struct checkout *state) +{ + unsigned int mode = (pc_item->ce->ce_mode & 0100) ? 0777 : 0666; + int fd = -1, fstat_done = 0; + struct strbuf path = STRBUF_INIT; + const char *dir_sep; + + strbuf_add(&path, state->base_dir, state->base_dir_len); + strbuf_add(&path, pc_item->ce->name, pc_item->ce->ce_namelen); + + dir_sep = find_last_dir_sep(path.buf); + + /* + * The leading dirs should have been already created by now. But, in + * case of path collisions, one of the dirs could have been replaced by + * a symlink (checked out after we enqueued this entry for parallel + * checkout). Thus, we must check the leading dirs again. + */ + if (dir_sep && !has_dirs_only_path(path.buf, dir_sep - path.buf, + state->base_dir_len)) { + pc_item->status = PC_ITEM_COLLIDED; + goto out; + } + + fd = open(path.buf, O_WRONLY | O_CREAT | O_EXCL, mode); + + if (fd < 0) { + if (errno == EEXIST || errno == EISDIR) { + /* + * Errors which probably represent a path collision. + * Suppress the error message and mark the item to be + * retried later, sequentially. ENOTDIR and ENOENT are + * also interesting, but the above has_dirs_only_path() + * call should have already caught these cases. + */ + pc_item->status = PC_ITEM_COLLIDED; + } else { + error_errno("failed to open file '%s'", path.buf); + pc_item->status = PC_ITEM_FAILED; + } + goto out; + } + + if (write_pc_item_to_fd(pc_item, fd, path.buf)) { + /* Error was already reported. */ + pc_item->status = PC_ITEM_FAILED; + close_and_clear(&fd); + unlink(path.buf); + goto out; + } + + fstat_done = fstat_checkout_output(fd, state, &pc_item->st); + + if (close_and_clear(&fd)) { + error_errno("unable to close file '%s'", path.buf); + pc_item->status = PC_ITEM_FAILED; + goto out; + } + + if (state->refresh_cache && !fstat_done && lstat(path.buf, &pc_item->st) < 0) { + error_errno("unable to stat just-written file '%s'", path.buf); + pc_item->status = PC_ITEM_FAILED; + goto out; + } + + pc_item->status = PC_ITEM_WRITTEN; + +out: + strbuf_release(&path); +} + +static void send_one_item(int fd, struct parallel_checkout_item *pc_item) +{ + size_t len_data; + char *data, *variant; + struct pc_item_fixed_portion *fixed_portion; + const char *working_tree_encoding = pc_item->ca.working_tree_encoding; + size_t name_len = pc_item->ce->ce_namelen; + size_t working_tree_encoding_len = working_tree_encoding ? + strlen(working_tree_encoding) : 0; + + /* + * Any changes in the calculation of the message size must also be made + * in is_eligible_for_parallel_checkout(). + */ + len_data = sizeof(struct pc_item_fixed_portion) + name_len + + working_tree_encoding_len; + + data = xcalloc(1, len_data); + + fixed_portion = (struct pc_item_fixed_portion *)data; + fixed_portion->id = pc_item->id; + fixed_portion->ce_mode = pc_item->ce->ce_mode; + fixed_portion->crlf_action = pc_item->ca.crlf_action; + fixed_portion->ident = pc_item->ca.ident; + fixed_portion->name_len = name_len; + fixed_portion->working_tree_encoding_len = working_tree_encoding_len; + /* + * We use hashcpy() instead of oidcpy() because the hash[] positions + * after `the_hash_algo->rawsz` might not be initialized. And Valgrind + * would complain about passing uninitialized bytes to a syscall + * (write(2)). There is no real harm in this case, but the warning could + * hinder the detection of actual errors. + */ + hashcpy(fixed_portion->oid.hash, pc_item->ce->oid.hash); + + variant = data + sizeof(*fixed_portion); + if (working_tree_encoding_len) { + memcpy(variant, working_tree_encoding, working_tree_encoding_len); + variant += working_tree_encoding_len; + } + memcpy(variant, pc_item->ce->name, name_len); + + packet_write(fd, data, len_data); + + free(data); +} + +static void send_batch(int fd, size_t start, size_t nr) +{ + size_t i; + sigchain_push(SIGPIPE, SIG_IGN); + for (i = 0; i < nr; i++) + send_one_item(fd, ¶llel_checkout.items[start + i]); + packet_flush(fd); + sigchain_pop(SIGPIPE); +} + +static struct pc_worker *setup_workers(struct checkout *state, int num_workers) +{ + struct pc_worker *workers; + int i, workers_with_one_extra_item; + size_t base_batch_size, batch_beginning = 0; + + ALLOC_ARRAY(workers, num_workers); + + for (i = 0; i < num_workers; i++) { + struct child_process *cp = &workers[i].cp; + + child_process_init(cp); + cp->git_cmd = 1; + cp->in = -1; + cp->out = -1; + cp->clean_on_exit = 1; + strvec_push(&cp->args, "checkout--worker"); + if (state->base_dir_len) + strvec_pushf(&cp->args, "--prefix=%s", state->base_dir); + if (start_command(cp)) + die("failed to spawn checkout worker"); + } + + base_batch_size = parallel_checkout.nr / num_workers; + workers_with_one_extra_item = parallel_checkout.nr % num_workers; + + for (i = 0; i < num_workers; i++) { + struct pc_worker *worker = &workers[i]; + size_t batch_size = base_batch_size; + + /* distribute the extra work evenly */ + if (i < workers_with_one_extra_item) + batch_size++; + + send_batch(worker->cp.in, batch_beginning, batch_size); + worker->next_item_to_complete = batch_beginning; + worker->nr_items_to_complete = batch_size; + + batch_beginning += batch_size; + } + + return workers; +} + +static void finish_workers(struct pc_worker *workers, int num_workers) +{ + int i; + + /* + * Close pipes before calling finish_command() to let the workers + * exit asynchronously and avoid spending extra time on wait(). + */ + for (i = 0; i < num_workers; i++) { + struct child_process *cp = &workers[i].cp; + if (cp->in >= 0) + close(cp->in); + if (cp->out >= 0) + close(cp->out); + } + + for (i = 0; i < num_workers; i++) { + int rc = finish_command(&workers[i].cp); + if (rc > 128) { + /* + * For a normal non-zero exit, the worker should have + * already printed something useful to stderr. But a + * death by signal should be mentioned to the user. + */ + error("checkout worker %d died of signal %d", i, rc - 128); + } + } + + free(workers); +} + +static inline void assert_pc_item_result_size(int got, int exp) +{ + if (got != exp) + BUG("wrong result size from checkout worker (got %dB, exp %dB)", + got, exp); +} + +static void parse_and_save_result(const char *buffer, int len, + struct pc_worker *worker) +{ + struct pc_item_result *res; + struct parallel_checkout_item *pc_item; + struct stat *st = NULL; + + if (len < PC_ITEM_RESULT_BASE_SIZE) + BUG("too short result from checkout worker (got %dB, exp >=%dB)", + len, (int)PC_ITEM_RESULT_BASE_SIZE); + + res = (struct pc_item_result *)buffer; + + /* + * Worker should send either the full result struct on success, or + * just the base (i.e. no stat data), otherwise. + */ + if (res->status == PC_ITEM_WRITTEN) { + assert_pc_item_result_size(len, (int)sizeof(struct pc_item_result)); + st = &res->st; + } else { + assert_pc_item_result_size(len, (int)PC_ITEM_RESULT_BASE_SIZE); + } + + if (!worker->nr_items_to_complete) + BUG("received result from supposedly finished checkout worker"); + if (res->id != worker->next_item_to_complete) + BUG("unexpected item id from checkout worker (got %"PRIuMAX", exp %"PRIuMAX")", + (uintmax_t)res->id, (uintmax_t)worker->next_item_to_complete); + + worker->next_item_to_complete++; + worker->nr_items_to_complete--; + + pc_item = ¶llel_checkout.items[res->id]; + pc_item->status = res->status; + if (st) + pc_item->st = *st; + + if (res->status != PC_ITEM_COLLIDED) + advance_progress_meter(); +} + +static void gather_results_from_workers(struct pc_worker *workers, + int num_workers) +{ + int i, active_workers = num_workers; + struct pollfd *pfds; + + CALLOC_ARRAY(pfds, num_workers); + for (i = 0; i < num_workers; i++) { + pfds[i].fd = workers[i].cp.out; + pfds[i].events = POLLIN; + } + + while (active_workers) { + int nr = poll(pfds, num_workers, -1); + + if (nr < 0) { + if (errno == EINTR) + continue; + die_errno("failed to poll checkout workers"); + } + + for (i = 0; i < num_workers && nr > 0; i++) { + struct pc_worker *worker = &workers[i]; + struct pollfd *pfd = &pfds[i]; + + if (!pfd->revents) + continue; + + if (pfd->revents & POLLIN) { + int len = packet_read(pfd->fd, NULL, NULL, + packet_buffer, + sizeof(packet_buffer), 0); + + if (len < 0) { + BUG("packet_read() returned negative value"); + } else if (!len) { + pfd->fd = -1; + active_workers--; + } else { + parse_and_save_result(packet_buffer, + len, worker); + } + } else if (pfd->revents & POLLHUP) { + pfd->fd = -1; + active_workers--; + } else if (pfd->revents & (POLLNVAL | POLLERR)) { + die("error polling from checkout worker"); + } + + nr--; + } + } + + free(pfds); +} + +static void write_items_sequentially(struct checkout *state) +{ + size_t i; + + for (i = 0; i < parallel_checkout.nr; i++) { + struct parallel_checkout_item *pc_item = ¶llel_checkout.items[i]; + write_pc_item(pc_item, state); + if (pc_item->status != PC_ITEM_COLLIDED) + advance_progress_meter(); + } +} + +int run_parallel_checkout(struct checkout *state, int num_workers, int threshold, + struct progress *progress, unsigned int *progress_cnt) +{ + int ret; + + if (parallel_checkout.status != PC_ACCEPTING_ENTRIES) + BUG("cannot run parallel checkout: uninitialized or already running"); + + parallel_checkout.status = PC_RUNNING; + parallel_checkout.progress = progress; + parallel_checkout.progress_cnt = progress_cnt; + + if (parallel_checkout.nr < num_workers) + num_workers = parallel_checkout.nr; + + if (num_workers <= 1 || parallel_checkout.nr < threshold) { + write_items_sequentially(state); + } else { + struct pc_worker *workers = setup_workers(state, num_workers); + gather_results_from_workers(workers, num_workers); + finish_workers(workers, num_workers); + } + + ret = handle_results(state); + + finish_parallel_checkout(); + return ret; +} diff --git a/parallel-checkout.h b/parallel-checkout.h new file mode 100644 index 0000000000..80f539bcb7 --- /dev/null +++ b/parallel-checkout.h @@ -0,0 +1,111 @@ +#ifndef PARALLEL_CHECKOUT_H +#define PARALLEL_CHECKOUT_H + +#include "convert.h" + +struct cache_entry; +struct checkout; +struct progress; + +/**************************************************************** + * Users of parallel checkout + ****************************************************************/ + +enum pc_status { + PC_UNINITIALIZED = 0, + PC_ACCEPTING_ENTRIES, + PC_RUNNING, +}; + +enum pc_status parallel_checkout_status(void); +void get_parallel_checkout_configs(int *num_workers, int *threshold); + +/* + * Put parallel checkout into the PC_ACCEPTING_ENTRIES state. Should be used + * only when in the PC_UNINITIALIZED state. + */ +void init_parallel_checkout(void); + +/* + * Return -1 if parallel checkout is currently not accepting entries or if the + * entry is not eligible for parallel checkout. Otherwise, enqueue the entry + * for later write and return 0. + */ +int enqueue_checkout(struct cache_entry *ce, struct conv_attrs *ca); +size_t pc_queue_size(void); + +/* + * Write all the queued entries, returning 0 on success. If the number of + * entries is smaller than the specified threshold, the operation is performed + * sequentially. + */ +int run_parallel_checkout(struct checkout *state, int num_workers, int threshold, + struct progress *progress, unsigned int *progress_cnt); + +/**************************************************************** + * Interface with checkout--worker + ****************************************************************/ + +enum pc_item_status { + PC_ITEM_PENDING = 0, + PC_ITEM_WRITTEN, + /* + * The entry could not be written because there was another file + * already present in its path or leading directories. Since + * checkout_entry_ca() removes such files from the working tree before + * enqueueing the entry for parallel checkout, it means that there was + * a path collision among the entries being written. + */ + PC_ITEM_COLLIDED, + PC_ITEM_FAILED, +}; + +struct parallel_checkout_item { + /* + * In main process ce points to a istate->cache[] entry. Thus, it's not + * owned by us. In workers they own the memory, which *must be* released. + */ + struct cache_entry *ce; + struct conv_attrs ca; + size_t id; /* position in parallel_checkout.items[] of main process */ + + /* Output fields, sent from workers. */ + enum pc_item_status status; + struct stat st; +}; + +/* + * The fixed-size portion of `struct parallel_checkout_item` that is sent to the + * workers. Following this will be 2 strings: ca.working_tree_encoding and + * ce.name; These are NOT null terminated, since we have the size in the fixed + * portion. + * + * Note that not all fields of conv_attrs and cache_entry are passed, only the + * ones that will be required by the workers to smudge and write the entry. + */ +struct pc_item_fixed_portion { + size_t id; + struct object_id oid; + unsigned int ce_mode; + enum convert_crlf_action crlf_action; + int ident; + size_t working_tree_encoding_len; + size_t name_len; +}; + +/* + * The fields of `struct parallel_checkout_item` that are returned by the + * workers. Note: `st` must be the last one, as it is omitted on error. + */ +struct pc_item_result { + size_t id; + enum pc_item_status status; + struct stat st; +}; + +#define PC_ITEM_RESULT_BASE_SIZE offsetof(struct pc_item_result, st) + +void write_pc_item(struct parallel_checkout_item *pc_item, + struct checkout *state); + +#endif /* PARALLEL_CHECKOUT_H */ diff --git a/pathspec.c b/pathspec.c index 18b3be362a..14c7e9fbe3 100644 --- a/pathspec.c +++ b/pathspec.c @@ -20,7 +20,7 @@ * to use find_pathspecs_matching_against_index() instead. */ void add_pathspec_matches_against_index(const struct pathspec *pathspec, - const struct index_state *istate, + struct index_state *istate, char *seen) { int num_unmatched = 0, i; @@ -36,6 +36,8 @@ void add_pathspec_matches_against_index(const struct pathspec *pathspec, num_unmatched++; if (!num_unmatched) return; + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(istate); for (i = 0; i < istate->cache_nr; i++) { const struct cache_entry *ce = istate->cache[i]; ce_path_match(istate, ce, pathspec, seen); @@ -51,7 +53,7 @@ void add_pathspec_matches_against_index(const struct pathspec *pathspec, * given pathspecs achieves against all items in the index. */ char *find_pathspecs_matching_against_index(const struct pathspec *pathspec, - const struct index_state *istate) + struct index_state *istate) { char *seen = xcalloc(pathspec->nr, 1); add_pathspec_matches_against_index(pathspec, istate, seen); @@ -702,7 +704,7 @@ void clear_pathspec(struct pathspec *pathspec) pathspec->nr = 0; } -int match_pathspec_attrs(const struct index_state *istate, +int match_pathspec_attrs(struct index_state *istate, const char *name, int namelen, const struct pathspec_item *item) { diff --git a/pathspec.h b/pathspec.h index 454ce364fa..2ccc8080b6 100644 --- a/pathspec.h +++ b/pathspec.h @@ -150,11 +150,11 @@ static inline int ps_strcmp(const struct pathspec_item *item, } void add_pathspec_matches_against_index(const struct pathspec *pathspec, - const struct index_state *istate, + struct index_state *istate, char *seen); char *find_pathspecs_matching_against_index(const struct pathspec *pathspec, - const struct index_state *istate); -int match_pathspec_attrs(const struct index_state *istate, + struct index_state *istate); +int match_pathspec_attrs(struct index_state *istate, const char *name, int namelen, const struct pathspec_item *item); diff --git a/pkt-line.c b/pkt-line.c index 0194137528..98304ce374 100644 --- a/pkt-line.c +++ b/pkt-line.c @@ -194,13 +194,16 @@ int packet_write_fmt_gently(int fd, const char *fmt, ...) return status; } -static int packet_write_gently(const int fd_out, const char *buf, size_t size) +static int do_packet_write(const int fd_out, const char *buf, size_t size, + struct strbuf *err) { char header[4]; size_t packet_size; - if (size > LARGE_PACKET_DATA_MAX) - return error(_("packet write failed - data exceeds max packet size")); + if (size > LARGE_PACKET_DATA_MAX) { + strbuf_addstr(err, _("packet write failed - data exceeds max packet size")); + return -1; + } packet_trace(buf, size, 1); packet_size = size + 4; @@ -215,15 +218,29 @@ static int packet_write_gently(const int fd_out, const char *buf, size_t size) */ if (write_in_full(fd_out, header, 4) < 0 || - write_in_full(fd_out, buf, size) < 0) - return error(_("packet write failed")); + write_in_full(fd_out, buf, size) < 0) { + strbuf_addf(err, _("packet write failed: %s"), strerror(errno)); + return -1; + } + return 0; +} + +static int packet_write_gently(const int fd_out, const char *buf, size_t size) +{ + struct strbuf err = STRBUF_INIT; + if (do_packet_write(fd_out, buf, size, &err)) { + error("%s", err.buf); + strbuf_release(&err); + return -1; + } return 0; } void packet_write(int fd_out, const char *buf, size_t size) { - if (packet_write_gently(fd_out, buf, size)) - die_errno(_("packet write failed")); + struct strbuf err = STRBUF_INIT; + if (do_packet_write(fd_out, buf, size, &err)) + die("%s", err.buf); } void packet_buf_write(struct strbuf *buf, const char *fmt, ...) diff --git a/read-cache.c b/read-cache.c index 5a907af2fb..72a1d339c9 100644 --- a/read-cache.c +++ b/read-cache.c @@ -25,6 +25,7 @@ #include "fsmonitor.h" #include "thread-utils.h" #include "progress.h" +#include "sparse-index.h" /* Mask for the name length in ce_flags in the on-disk index */ @@ -47,6 +48,7 @@ #define CACHE_EXT_FSMONITOR 0x46534D4E /* "FSMN" */ #define CACHE_EXT_ENDOFINDEXENTRIES 0x454F4945 /* "EOIE" */ #define CACHE_EXT_INDEXENTRYOFFSETTABLE 0x49454F54 /* "IEOT" */ +#define CACHE_EXT_SPARSE_DIRECTORIES 0x73646972 /* "sdir" */ /* changes that can be kept in $GIT_DIR/index (basically all extensions) */ #define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \ @@ -101,6 +103,9 @@ static const char *alternate_index_output; static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce) { + if (S_ISSPARSEDIR(ce->ce_mode)) + istate->sparse_index = 1; + istate->cache[nr] = ce; add_name_hash(istate, ce); } @@ -544,7 +549,7 @@ int cache_name_stage_compare(const char *name1, int len1, int stage1, const char return 0; } -static int index_name_stage_pos(const struct index_state *istate, const char *name, int namelen, int stage) +static int index_name_stage_pos(struct index_state *istate, const char *name, int namelen, int stage) { int first, last; @@ -562,10 +567,31 @@ static int index_name_stage_pos(const struct index_state *istate, const char *na } first = next+1; } + + if (istate->sparse_index && + first > 0) { + /* Note: first <= istate->cache_nr */ + struct cache_entry *ce = istate->cache[first - 1]; + + /* + * If we are in a sparse-index _and_ the entry before the + * insertion position is a sparse-directory entry that is + * an ancestor of 'name', then we need to expand the index + * and search again. This will only trigger once, because + * thereafter the index is fully expanded. + */ + if (S_ISSPARSEDIR(ce->ce_mode) && + ce_namelen(ce) < namelen && + !strncmp(name, ce->name, ce_namelen(ce))) { + ensure_full_index(istate); + return index_name_stage_pos(istate, name, namelen, stage); + } + } + return -first-1; } -int index_name_pos(const struct index_state *istate, const char *name, int namelen) +int index_name_pos(struct index_state *istate, const char *name, int namelen) { return index_name_stage_pos(istate, name, namelen, 0); } @@ -999,8 +1025,14 @@ inside: c = *path++; if ((c == '.' && !verify_dotfile(path, mode)) || - is_dir_sep(c) || c == '\0') + is_dir_sep(c)) return 0; + /* + * allow terminating directory separators for + * sparse directory entries. + */ + if (c == '\0') + return S_ISDIR(mode); } else if (c == '\\' && protect_ntfs) { if (is_ntfs_dotgit(path)) return 0; @@ -1545,6 +1577,8 @@ int refresh_index(struct index_state *istate, unsigned int flags, */ preload_index(istate, pathspec, 0); trace2_region_enter("index", "refresh", NULL); + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(istate); for (i = 0; i < istate->cache_nr; i++) { struct cache_entry *ce, *new_entry; int cache_errno = 0; @@ -1760,6 +1794,10 @@ static int read_index_extension(struct index_state *istate, case CACHE_EXT_INDEXENTRYOFFSETTABLE: /* already handled in do_read_index() */ break; + case CACHE_EXT_SPARSE_DIRECTORIES: + /* no content, only an indicator */ + istate->sparse_index = 1; + break; default: if (*ext < 'A' || 'Z' < *ext) return error(_("index uses %.4s extension, which we do not understand"), @@ -2273,6 +2311,12 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist) trace2_data_intmax("index", the_repository, "read/cache_nr", istate->cache_nr); + if (!istate->repo) + istate->repo = the_repository; + prepare_repo_settings(istate->repo); + if (istate->repo->settings.command_requires_full_index) + ensure_full_index(istate); + return istate->cache_nr; unmap: @@ -2457,6 +2501,8 @@ int repo_index_has_changes(struct repository *repo, diff_flush(&opt); return opt.flags.has_changes != 0; } else { + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(istate); for (i = 0; sb && i < istate->cache_nr; i++) { if (i) strbuf_addch(sb, ' '); @@ -3012,6 +3058,10 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, if (err) return -1; } + if (istate->sparse_index) { + if (write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_SPARSE_DIRECTORIES, 0) < 0) + return -1; + } /* * CACHE_EXT_ENDOFINDEXENTRIES must be written as the last entry before the SHA1 @@ -3071,6 +3121,14 @@ static int do_write_locked_index(struct index_state *istate, struct lock_file *l unsigned flags) { int ret; + int was_full = !istate->sparse_index; + + ret = convert_to_sparse(istate); + + if (ret) { + warning(_("failed to convert to a sparse-index")); + return ret; + } /* * TODO trace2: replace "the_repository" with the actual repo instance @@ -3082,6 +3140,9 @@ static int do_write_locked_index(struct index_state *istate, struct lock_file *l trace2_region_leave_printf("index", "do_write_index", the_repository, "%s", get_lock_file_path(lock)); + if (was_full) + ensure_full_index(istate); + if (ret) return ret; if (flags & COMMIT_LOCK) @@ -3172,9 +3233,10 @@ static int write_shared_index(struct index_state *istate, struct tempfile **temp) { struct split_index *si = istate->split_index; - int ret; + int ret, was_full = !istate->sparse_index; move_cache_to_base_index(istate); + convert_to_sparse(istate); trace2_region_enter_printf("index", "shared/do_write_index", the_repository, "%s", get_tempfile_path(*temp)); @@ -3182,6 +3244,9 @@ static int write_shared_index(struct index_state *istate, trace2_region_leave_printf("index", "shared/do_write_index", the_repository, "%s", get_tempfile_path(*temp)); + if (was_full) + ensure_full_index(istate); + if (ret) return ret; ret = adjust_shared_perm(get_tempfile_path(*temp)); @@ -3350,8 +3415,8 @@ int repo_read_index_unmerged(struct repository *repo) * We helpfully remove a trailing "/" from directories so that * the output of read_directory can be used as-is. */ -int index_name_is_other(const struct index_state *istate, const char *name, - int namelen) +int index_name_is_other(struct index_state *istate, const char *name, + int namelen) { int pos; if (namelen && name[namelen - 1] == '/') @@ -3369,7 +3434,7 @@ int index_name_is_other(const struct index_state *istate, const char *name, return 1; } -void *read_blob_data_from_index(const struct index_state *istate, +void *read_blob_data_from_index(struct index_state *istate, const char *path, unsigned long *size) { int pos, len; @@ -337,7 +337,7 @@ static int filter_refs(const char *refname, const struct object_id *oid, enum peel_status peel_object(const struct object_id *name, struct object_id *oid) { - struct object *o = lookup_unknown_object(name); + struct object *o = lookup_unknown_object(the_repository, name); if (o->type == OBJ_NONE) { int type = oid_object_info(the_repository, name, NULL); diff --git a/refs/debug.c b/refs/debug.c index 3b25e3aeb1..001e30651c 100644 --- a/refs/debug.c +++ b/refs/debug.c @@ -244,6 +244,7 @@ static int debug_read_raw_ref(struct ref_store *ref_store, const char *refname, int res = 0; oidcpy(oid, &null_oid); + errno = 0; res = drefs->refs->be->read_raw_ref(drefs->refs, refname, oid, referent, type); @@ -251,7 +252,9 @@ static int debug_read_raw_ref(struct ref_store *ref_store, const char *refname, trace_printf_key(&trace_refs, "read_raw_ref: %s: %s (=> %s) type %x: %d\n", refname, oid_to_hex(oid), referent->buf, *type, res); } else { - trace_printf_key(&trace_refs, "read_raw_ref: %s: %d\n", refname, res); + trace_printf_key(&trace_refs, + "read_raw_ref: %s: %d (errno %d)\n", refname, + res, errno); } return res; } diff --git a/repo-settings.c b/repo-settings.c index f7fff0f5ab..0cfe8b787d 100644 --- a/repo-settings.c +++ b/repo-settings.c @@ -77,4 +77,19 @@ void prepare_repo_settings(struct repository *r) UPDATE_DEFAULT_BOOL(r->settings.core_untracked_cache, UNTRACKED_CACHE_KEEP); UPDATE_DEFAULT_BOOL(r->settings.fetch_negotiation_algorithm, FETCH_NEGOTIATION_DEFAULT); + + /* + * This setting guards all index reads to require a full index + * over a sparse index. After suitable guards are placed in the + * codebase around uses of the index, this setting will be + * removed. + */ + r->settings.command_requires_full_index = 1; + + /* + * Initialize this as off. + */ + r->settings.sparse_index = 0; + if (!repo_config_get_bool(r, "index.sparse", &value) && value) + r->settings.sparse_index = 1; } diff --git a/repository.c b/repository.c index 87b355e7a6..448cd557d4 100644 --- a/repository.c +++ b/repository.c @@ -10,6 +10,7 @@ #include "object.h" #include "lockfile.h" #include "submodule-config.h" +#include "sparse-index.h" /* The main repository */ static struct repository the_repo; @@ -261,6 +262,8 @@ void repo_clear(struct repository *repo) int repo_read_index(struct repository *repo) { + int res; + if (!repo->index) CALLOC_ARRAY(repo->index, 1); @@ -270,7 +273,13 @@ int repo_read_index(struct repository *repo) else if (repo->index->repo != repo) BUG("repo's index should point back at itself"); - return read_index_from(repo->index, repo->index_file, repo->gitdir); + res = read_index_from(repo->index, repo->index_file, repo->gitdir); + + prepare_repo_settings(repo); + if (repo->settings.command_requires_full_index) + ensure_full_index(repo->index); + + return res; } int repo_hold_locked_index(struct repository *repo, diff --git a/repository.h b/repository.h index b385ca3c94..a45f7520fd 100644 --- a/repository.h +++ b/repository.h @@ -41,6 +41,9 @@ struct repo_settings { enum fetch_negotiation_setting fetch_negotiation_algorithm; int core_multi_pack_index; + + unsigned command_requires_full_index:1, + sparse_index:1; }; struct repository { diff --git a/resolve-undo.c b/resolve-undo.c index bbd2e57fe4..e81096e2d4 100644 --- a/resolve-undo.c +++ b/resolve-undo.c @@ -172,6 +172,8 @@ void unmerge_marked_index(struct index_state *istate) if (!istate->resolve_undo) return; + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(istate); for (i = 0; i < istate->cache_nr; i++) { const struct cache_entry *ce = istate->cache[i]; if (ce->ce_flags & CE_MATCHED) @@ -186,6 +188,8 @@ void unmerge_index(struct index_state *istate, const struct pathspec *pathspec) if (!istate->resolve_undo) return; + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(istate); for (i = 0; i < istate->cache_nr; i++) { const struct cache_entry *ce = istate->cache[i]; if (!ce_path_match(istate, ce, pathspec, NULL)) diff --git a/revision.c b/revision.c index 553c0faa9b..4853c85d0b 100644 --- a/revision.c +++ b/revision.c @@ -1680,6 +1680,8 @@ static void do_add_index_objects_to_pending(struct rev_info *revs, { int i; + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(istate); for (i = 0; i < istate->cache_nr; i++) { struct cache_entry *ce = istate->cache[i]; struct blob *blob; @@ -3271,7 +3273,7 @@ static int mark_uninteresting(const struct object_id *oid, void *cb) { struct rev_info *revs = cb; - struct object *o = parse_object(revs->repo, oid); + struct object *o = lookup_unknown_object(revs->repo, oid); o->flags |= UNINTERESTING | SEEN; return 0; } diff --git a/sparse-index.c b/sparse-index.c new file mode 100644 index 0000000000..6f21397e2e --- /dev/null +++ b/sparse-index.c @@ -0,0 +1,358 @@ +#include "cache.h" +#include "repository.h" +#include "sparse-index.h" +#include "tree.h" +#include "pathspec.h" +#include "trace2.h" +#include "cache-tree.h" +#include "config.h" +#include "dir.h" +#include "fsmonitor.h" + +static struct cache_entry *construct_sparse_dir_entry( + struct index_state *istate, + const char *sparse_dir, + struct cache_tree *tree) +{ + struct cache_entry *de; + + de = make_cache_entry(istate, S_IFDIR, &tree->oid, sparse_dir, 0, 0); + + de->ce_flags |= CE_SKIP_WORKTREE; + return de; +} + +/* + * Returns the number of entries "inserted" into the index. + */ +static int convert_to_sparse_rec(struct index_state *istate, + int num_converted, + int start, int end, + const char *ct_path, size_t ct_pathlen, + struct cache_tree *ct) +{ + int i, can_convert = 1; + int start_converted = num_converted; + enum pattern_match_result match; + int dtype; + struct strbuf child_path = STRBUF_INIT; + struct pattern_list *pl = istate->sparse_checkout_patterns; + + /* + * Is the current path outside of the sparse cone? + * Then check if the region can be replaced by a sparse + * directory entry (everything is sparse and merged). + */ + match = path_matches_pattern_list(ct_path, ct_pathlen, + NULL, &dtype, pl, istate); + if (match != NOT_MATCHED) + can_convert = 0; + + for (i = start; can_convert && i < end; i++) { + struct cache_entry *ce = istate->cache[i]; + + if (ce_stage(ce) || + S_ISGITLINK(ce->ce_mode) || + !(ce->ce_flags & CE_SKIP_WORKTREE)) + can_convert = 0; + } + + if (can_convert) { + struct cache_entry *se; + se = construct_sparse_dir_entry(istate, ct_path, ct); + + istate->cache[num_converted++] = se; + return 1; + } + + for (i = start; i < end; ) { + int count, span, pos = -1; + const char *base, *slash; + struct cache_entry *ce = istate->cache[i]; + + /* + * Detect if this is a normal entry outside of any subtree + * entry. + */ + base = ce->name + ct_pathlen; + slash = strchr(base, '/'); + + if (slash) + pos = cache_tree_subtree_pos(ct, base, slash - base); + + if (pos < 0) { + istate->cache[num_converted++] = ce; + i++; + continue; + } + + strbuf_setlen(&child_path, 0); + strbuf_add(&child_path, ce->name, slash - ce->name + 1); + + span = ct->down[pos]->cache_tree->entry_count; + count = convert_to_sparse_rec(istate, + num_converted, i, i + span, + child_path.buf, child_path.len, + ct->down[pos]->cache_tree); + num_converted += count; + i += span; + } + + strbuf_release(&child_path); + return num_converted - start_converted; +} + +static int set_index_sparse_config(struct repository *repo, int enable) +{ + int res; + char *config_path = repo_git_path(repo, "config.worktree"); + res = git_config_set_in_file_gently(config_path, + "index.sparse", + enable ? "true" : NULL); + free(config_path); + + prepare_repo_settings(repo); + repo->settings.sparse_index = 1; + return res; +} + +int set_sparse_index_config(struct repository *repo, int enable) +{ + int res = set_index_sparse_config(repo, enable); + + prepare_repo_settings(repo); + repo->settings.sparse_index = enable; + return res; +} + +int convert_to_sparse(struct index_state *istate) +{ + int test_env; + if (istate->split_index || istate->sparse_index || + !core_apply_sparse_checkout || !core_sparse_checkout_cone) + return 0; + + if (!istate->repo) + istate->repo = the_repository; + + /* + * The GIT_TEST_SPARSE_INDEX environment variable triggers the + * index.sparse config variable to be on. + */ + test_env = git_env_bool("GIT_TEST_SPARSE_INDEX", -1); + if (test_env >= 0) + set_sparse_index_config(istate->repo, test_env); + + /* + * Only convert to sparse if index.sparse is set. + */ + prepare_repo_settings(istate->repo); + if (!istate->repo->settings.sparse_index) + return 0; + + if (!istate->sparse_checkout_patterns) { + istate->sparse_checkout_patterns = xcalloc(1, sizeof(struct pattern_list)); + if (get_sparse_checkout_patterns(istate->sparse_checkout_patterns) < 0) + return 0; + } + + if (!istate->sparse_checkout_patterns->use_cone_patterns) { + warning(_("attempting to use sparse-index without cone mode")); + return -1; + } + + if (cache_tree_update(istate, 0)) { + warning(_("unable to update cache-tree, staying full")); + return -1; + } + + remove_fsmonitor(istate); + + trace2_region_enter("index", "convert_to_sparse", istate->repo); + istate->cache_nr = convert_to_sparse_rec(istate, + 0, 0, istate->cache_nr, + "", 0, istate->cache_tree); + + /* Clear and recompute the cache-tree */ + cache_tree_free(&istate->cache_tree); + cache_tree_update(istate, 0); + + istate->sparse_index = 1; + trace2_region_leave("index", "convert_to_sparse", istate->repo); + return 0; +} + +static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce) +{ + ALLOC_GROW(istate->cache, nr + 1, istate->cache_alloc); + + istate->cache[nr] = ce; + add_name_hash(istate, ce); +} + +static int add_path_to_index(const struct object_id *oid, + struct strbuf *base, const char *path, + unsigned int mode, void *context) +{ + struct index_state *istate = (struct index_state *)context; + struct cache_entry *ce; + size_t len = base->len; + + if (S_ISDIR(mode)) + return READ_TREE_RECURSIVE; + + strbuf_addstr(base, path); + + ce = make_cache_entry(istate, mode, oid, base->buf, 0, 0); + ce->ce_flags |= CE_SKIP_WORKTREE; + set_index_entry(istate, istate->cache_nr++, ce); + + strbuf_setlen(base, len); + return 0; +} + +void ensure_full_index(struct index_state *istate) +{ + int i; + struct index_state *full; + struct strbuf base = STRBUF_INIT; + + if (!istate || !istate->sparse_index) + return; + + if (!istate->repo) + istate->repo = the_repository; + + trace2_region_enter("index", "ensure_full_index", istate->repo); + + /* initialize basics of new index */ + full = xcalloc(1, sizeof(struct index_state)); + memcpy(full, istate, sizeof(struct index_state)); + + /* then change the necessary things */ + full->sparse_index = 0; + full->cache_alloc = (3 * istate->cache_alloc) / 2; + full->cache_nr = 0; + ALLOC_ARRAY(full->cache, full->cache_alloc); + + for (i = 0; i < istate->cache_nr; i++) { + struct cache_entry *ce = istate->cache[i]; + struct tree *tree; + struct pathspec ps; + + if (!S_ISSPARSEDIR(ce->ce_mode)) { + set_index_entry(full, full->cache_nr++, ce); + continue; + } + if (!(ce->ce_flags & CE_SKIP_WORKTREE)) + warning(_("index entry is a directory, but not sparse (%08x)"), + ce->ce_flags); + + /* recursively walk into cd->name */ + tree = lookup_tree(istate->repo, &ce->oid); + + memset(&ps, 0, sizeof(ps)); + ps.recursive = 1; + ps.has_wildcard = 1; + ps.max_depth = -1; + + strbuf_setlen(&base, 0); + strbuf_add(&base, ce->name, strlen(ce->name)); + + read_tree_at(istate->repo, tree, &base, &ps, + add_path_to_index, full); + + /* free directory entries. full entries are re-used */ + discard_cache_entry(ce); + } + + /* Copy back into original index. */ + memcpy(&istate->name_hash, &full->name_hash, sizeof(full->name_hash)); + istate->sparse_index = 0; + free(istate->cache); + istate->cache = full->cache; + istate->cache_nr = full->cache_nr; + istate->cache_alloc = full->cache_alloc; + + strbuf_release(&base); + free(full); + + /* Clear and recompute the cache-tree */ + cache_tree_free(&istate->cache_tree); + cache_tree_update(istate, 0); + + trace2_region_leave("index", "ensure_full_index", istate->repo); +} + +/* + * This static global helps avoid infinite recursion between + * expand_to_path() and index_file_exists(). + */ +static int in_expand_to_path = 0; + +void expand_to_path(struct index_state *istate, + const char *path, size_t pathlen, int icase) +{ + struct strbuf path_mutable = STRBUF_INIT; + size_t substr_len; + + /* prevent extra recursion */ + if (in_expand_to_path) + return; + + if (!istate || !istate->sparse_index) + return; + + if (!istate->repo) + istate->repo = the_repository; + + in_expand_to_path = 1; + + /* + * We only need to actually expand a region if the + * following are both true: + * + * 1. 'path' is not already in the index. + * 2. Some parent directory of 'path' is a sparse directory. + */ + + if (index_file_exists(istate, path, pathlen, icase)) + goto cleanup; + + strbuf_add(&path_mutable, path, pathlen); + strbuf_addch(&path_mutable, '/'); + + /* Check the name hash for all parent directories */ + substr_len = 0; + while (substr_len < pathlen) { + char temp; + char *replace = strchr(path_mutable.buf + substr_len, '/'); + + if (!replace) + break; + + /* replace the character _after_ the slash */ + replace++; + temp = *replace; + *replace = '\0'; + if (index_file_exists(istate, path_mutable.buf, + path_mutable.len, icase)) { + /* + * We found a parent directory in the name-hash + * hashtable, because only sparse directory entries + * have a trailing '/' character. Since "path" wasn't + * in the index, perhaps it exists within this + * sparse-directory. Expand accordingly. + */ + ensure_full_index(istate); + break; + } + + *replace = temp; + substr_len = replace - path_mutable.buf; + } + +cleanup: + strbuf_release(&path_mutable); + in_expand_to_path = 0; +} diff --git a/sparse-index.h b/sparse-index.h new file mode 100644 index 0000000000..1115a0d7dd --- /dev/null +++ b/sparse-index.h @@ -0,0 +1,23 @@ +#ifndef SPARSE_INDEX_H__ +#define SPARSE_INDEX_H__ + +struct index_state; +int convert_to_sparse(struct index_state *istate); + +/* + * Some places in the codebase expect to search for a specific path. + * This path might be outside of the sparse-checkout definition, in + * which case a sparse-index may not contain a path for that index. + * + * Given an index and a path, check to see if a leading directory for + * 'path' exists in the index as a sparse directory. In that case, + * expand that sparse directory to a full range of cache entries and + * populate the index accordingly. + */ +void expand_to_path(struct index_state *istate, + const char *path, size_t pathlen, int icase); + +struct repository; +int set_sparse_index_config(struct repository *repo, int enable); + +#endif diff --git a/submodule.c b/submodule.c index 9767ba9893..83809a4f7b 100644 --- a/submodule.c +++ b/submodule.c @@ -33,7 +33,7 @@ static struct oid_array ref_tips_after_fetch; * will be disabled because we can't guess what might be configured in * .gitmodules unless the user resolves the conflict. */ -int is_gitmodules_unmerged(const struct index_state *istate) +int is_gitmodules_unmerged(struct index_state *istate) { int pos = index_name_pos(istate, GITMODULES_FILE, strlen(GITMODULES_FILE)); if (pos < 0) { /* .gitmodules not found or isn't merged */ @@ -301,7 +301,7 @@ int is_submodule_populated_gently(const char *path, int *return_error_code) /* * Dies if the provided 'prefix' corresponds to an unpopulated submodule */ -void die_in_unpopulated_submodule(const struct index_state *istate, +void die_in_unpopulated_submodule(struct index_state *istate, const char *prefix) { int i, prefixlen; @@ -331,7 +331,7 @@ void die_in_unpopulated_submodule(const struct index_state *istate, /* * Dies if any paths in the provided pathspec descends into a submodule */ -void die_path_inside_submodule(const struct index_state *istate, +void die_path_inside_submodule(struct index_state *istate, const struct pathspec *ps) { int i, j; diff --git a/submodule.h b/submodule.h index 4ac6e31cf1..84640c49c1 100644 --- a/submodule.h +++ b/submodule.h @@ -39,7 +39,7 @@ struct submodule_update_strategy { }; #define SUBMODULE_UPDATE_STRATEGY_INIT {SM_UPDATE_UNSPECIFIED, NULL} -int is_gitmodules_unmerged(const struct index_state *istate); +int is_gitmodules_unmerged(struct index_state *istate); int is_writing_gitmodules_ok(void); int is_staging_gitmodules_ok(struct index_state *istate); int update_path_in_gitmodules(const char *oldpath, const char *newpath); @@ -60,9 +60,9 @@ int is_submodule_active(struct repository *repo, const char *path); * Otherwise the return error code is the same as of resolve_gitdir_gently. */ int is_submodule_populated_gently(const char *path, int *return_error_code); -void die_in_unpopulated_submodule(const struct index_state *istate, +void die_in_unpopulated_submodule(struct index_state *istate, const char *prefix); -void die_path_inside_submodule(const struct index_state *istate, +void die_path_inside_submodule(struct index_state *istate, const struct pathspec *ps); enum submodule_update_type parse_submodule_update_type(const char *value); int parse_submodule_update_strategy(const char *value, @@ -436,6 +436,9 @@ and "sha256". GIT_TEST_WRITE_REV_INDEX=<boolean>, when true enables the 'pack.writeReverseIndex' setting. +GIT_TEST_SPARSE_INDEX=<boolean>, when true enables index writes to use the +sparse-index format by default. + Naming Tests ------------ diff --git a/t/helper/test-example-decorate.c b/t/helper/test-example-decorate.c index c8a1cde7d2..b9d1200eb9 100644 --- a/t/helper/test-example-decorate.c +++ b/t/helper/test-example-decorate.c @@ -26,8 +26,8 @@ int cmd__example_decorate(int argc, const char **argv) * Add 2 objects, one with a non-NULL decoration and one with a NULL * decoration. */ - one = lookup_unknown_object(&one_oid); - two = lookup_unknown_object(&two_oid); + one = lookup_unknown_object(the_repository, &one_oid); + two = lookup_unknown_object(the_repository, &two_oid); ret = add_decoration(&n, one, &decoration_a); if (ret) BUG("when adding a brand-new object, NULL should be returned"); @@ -56,7 +56,7 @@ int cmd__example_decorate(int argc, const char **argv) ret = lookup_decoration(&n, two); if (ret != &decoration_b) BUG("lookup should return added declaration"); - three = lookup_unknown_object(&three_oid); + three = lookup_unknown_object(the_repository, &three_oid); ret = lookup_decoration(&n, three); if (ret) BUG("lookup for unknown object should return NULL"); diff --git a/t/helper/test-read-cache.c b/t/helper/test-read-cache.c index 244977a29b..b52c174acc 100644 --- a/t/helper/test-read-cache.c +++ b/t/helper/test-read-cache.c @@ -1,36 +1,82 @@ #include "test-tool.h" #include "cache.h" #include "config.h" +#include "blob.h" +#include "commit.h" +#include "tree.h" +#include "sparse-index.h" + +static void print_cache_entry(struct cache_entry *ce) +{ + const char *type; + printf("%06o ", ce->ce_mode & 0177777); + + if (S_ISSPARSEDIR(ce->ce_mode)) + type = tree_type; + else if (S_ISGITLINK(ce->ce_mode)) + type = commit_type; + else + type = blob_type; + + printf("%s %s\t%s\n", + type, + oid_to_hex(&ce->oid), + ce->name); +} + +static void print_cache(struct index_state *istate) +{ + int i; + for (i = 0; i < istate->cache_nr; i++) + print_cache_entry(istate->cache[i]); +} int cmd__read_cache(int argc, const char **argv) { + struct repository *r = the_repository; int i, cnt = 1; const char *name = NULL; + int table = 0, expand = 0; + + initialize_the_repository(); + prepare_repo_settings(r); + r->settings.command_requires_full_index = 0; - if (argc > 1 && skip_prefix(argv[1], "--print-and-refresh=", &name)) { - argc--; - argv++; + for (++argv, --argc; *argv && starts_with(*argv, "--"); ++argv, --argc) { + if (skip_prefix(*argv, "--print-and-refresh=", &name)) + continue; + if (!strcmp(*argv, "--table")) + table = 1; + else if (!strcmp(*argv, "--expand")) + expand = 1; } - if (argc == 2) - cnt = strtol(argv[1], NULL, 0); + if (argc == 1) + cnt = strtol(argv[0], NULL, 0); setup_git_directory(); git_config(git_default_config, NULL); + for (i = 0; i < cnt; i++) { - read_cache(); + repo_read_index(r); + + if (expand) + ensure_full_index(r->index); + if (name) { int pos; - refresh_index(&the_index, REFRESH_QUIET, + refresh_index(r->index, REFRESH_QUIET, NULL, NULL, NULL); - pos = index_name_pos(&the_index, name, strlen(name)); + pos = index_name_pos(r->index, name, strlen(name)); if (pos < 0) die("%s not in index", name); printf("%s is%s up to date\n", name, - ce_uptodate(the_index.cache[pos]) ? "" : " not"); + ce_uptodate(r->index->cache[pos]) ? "" : " not"); write_file(name, "%d\n", i); } - discard_cache(); + if (table) + print_cache(r->index); + discard_index(r->index); } return 0; } diff --git a/t/perf/p2000-sparse-operations.sh b/t/perf/p2000-sparse-operations.sh new file mode 100755 index 0000000000..94513c9774 --- /dev/null +++ b/t/perf/p2000-sparse-operations.sh @@ -0,0 +1,101 @@ +#!/bin/sh + +test_description="test performance of Git operations using the index" + +. ./perf-lib.sh + +test_perf_default_repo + +SPARSE_CONE=f2/f4/f1 + +test_expect_success 'setup repo and indexes' ' + git reset --hard HEAD && + + # Remove submodules from the example repo, because our + # duplication of the entire repo creates an unlikely data shape. + if git config --file .gitmodules --get-regexp "submodule.*.path" >modules + then + git rm $(awk "{print \$2}" modules) && + git commit -m "remove submodules" || return 1 + fi && + + echo bogus >a && + cp a b && + git add a b && + git commit -m "level 0" && + BLOB=$(git rev-parse HEAD:a) && + OLD_COMMIT=$(git rev-parse HEAD) && + OLD_TREE=$(git rev-parse HEAD^{tree}) && + + for i in $(test_seq 1 4) + do + cat >in <<-EOF && + 100755 blob $BLOB a + 040000 tree $OLD_TREE f1 + 040000 tree $OLD_TREE f2 + 040000 tree $OLD_TREE f3 + 040000 tree $OLD_TREE f4 + EOF + NEW_TREE=$(git mktree <in) && + NEW_COMMIT=$(git commit-tree $NEW_TREE -p $OLD_COMMIT -m "level $i") && + OLD_TREE=$NEW_TREE && + OLD_COMMIT=$NEW_COMMIT || return 1 + done && + + git sparse-checkout init --cone && + git branch -f wide $OLD_COMMIT && + git -c core.sparseCheckoutCone=true clone --branch=wide --sparse . full-index-v3 && + ( + cd full-index-v3 && + git sparse-checkout init --cone && + git sparse-checkout set $SPARSE_CONE && + git config index.version 3 && + git update-index --index-version=3 + ) && + git -c core.sparseCheckoutCone=true clone --branch=wide --sparse . full-index-v4 && + ( + cd full-index-v4 && + git sparse-checkout init --cone && + git sparse-checkout set $SPARSE_CONE && + git config index.version 4 && + git update-index --index-version=4 + ) && + git -c core.sparseCheckoutCone=true clone --branch=wide --sparse . sparse-index-v3 && + ( + cd sparse-index-v3 && + git sparse-checkout init --cone --sparse-index && + git sparse-checkout set $SPARSE_CONE && + git config index.version 3 && + git update-index --index-version=3 + ) && + git -c core.sparseCheckoutCone=true clone --branch=wide --sparse . sparse-index-v4 && + ( + cd sparse-index-v4 && + git sparse-checkout init --cone --sparse-index && + git sparse-checkout set $SPARSE_CONE && + git config index.version 4 && + git update-index --index-version=4 + ) +' + +test_perf_on_all () { + command="$@" + for repo in full-index-v3 full-index-v4 \ + sparse-index-v3 sparse-index-v4 + do + test_perf "$command ($repo)" " + ( + cd $repo && + echo >>$SPARSE_CONE/a && + $command + ) + " + done +} + +test_perf_on_all git status +test_perf_on_all git add -A +test_perf_on_all git add . +test_perf_on_all git commit -a -m A + +test_done diff --git a/t/perf/p5600-partial-clone.sh b/t/perf/p5600-partial-clone.sh index 3e04bd2ae1..ca785a3341 100755 --- a/t/perf/p5600-partial-clone.sh +++ b/t/perf/p5600-partial-clone.sh @@ -23,4 +23,16 @@ test_perf 'checkout of result' ' git -C worktree checkout -f ' +test_perf 'fsck' ' + git -C bare.git fsck +' + +test_perf 'count commits' ' + git -C bare.git rev-list --all --count +' + +test_perf 'count non-promisor commits' ' + git -C bare.git rev-list --all --count --exclude-promisor-objects +' + test_done diff --git a/t/t1091-sparse-checkout-builtin.sh b/t/t1091-sparse-checkout-builtin.sh index fc64e9ed99..38fc8340f5 100755 --- a/t/t1091-sparse-checkout-builtin.sh +++ b/t/t1091-sparse-checkout-builtin.sh @@ -205,6 +205,19 @@ test_expect_success 'sparse-checkout disable' ' check_files repo a deep folder1 folder2 ' +test_expect_success 'sparse-index enabled and disabled' ' + git -C repo sparse-checkout init --cone --sparse-index && + test_cmp_config -C repo true index.sparse && + test-tool -C repo read-cache --table >cache && + grep " tree " cache && + + git -C repo sparse-checkout disable && + test-tool -C repo read-cache --table >cache && + ! grep " tree " cache && + git -C repo config --list >config && + ! grep index.sparse config +' + test_expect_success 'cone mode: init and set' ' git -C repo sparse-checkout init --cone && git -C repo config --list >config && diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh index 8cd3e5a8d2..12e6c45302 100755 --- a/t/t1092-sparse-checkout-compatibility.sh +++ b/t/t1092-sparse-checkout-compatibility.sh @@ -2,11 +2,15 @@ test_description='compare full workdir to sparse workdir' +GIT_TEST_SPLIT_INDEX=0 +GIT_TEST_SPARSE_INDEX= + . ./test-lib.sh test_expect_success 'setup' ' git init initial-repo && ( + GIT_TEST_SPARSE_INDEX=0 && cd initial-repo && echo a >a && echo "after deep" >e && @@ -87,39 +91,102 @@ init_repos () { cp -r initial-repo sparse-checkout && git -C sparse-checkout reset --hard && - git -C sparse-checkout sparse-checkout init --cone && + + cp -r initial-repo sparse-index && + git -C sparse-index reset --hard && # initialize sparse-checkout definitions - git -C sparse-checkout sparse-checkout set deep + git -C sparse-checkout sparse-checkout init --cone && + git -C sparse-checkout sparse-checkout set deep && + git -C sparse-index sparse-checkout init --cone --sparse-index && + test_cmp_config -C sparse-index true index.sparse && + git -C sparse-index sparse-checkout set deep } run_on_sparse () { ( cd sparse-checkout && - $* >../sparse-checkout-out 2>../sparse-checkout-err + "$@" >../sparse-checkout-out 2>../sparse-checkout-err + ) && + ( + cd sparse-index && + "$@" >../sparse-index-out 2>../sparse-index-err ) } run_on_all () { ( cd full-checkout && - $* >../full-checkout-out 2>../full-checkout-err + "$@" >../full-checkout-out 2>../full-checkout-err ) && - run_on_sparse $* + run_on_sparse "$@" } test_all_match () { - run_on_all $* && + run_on_all "$@" && test_cmp full-checkout-out sparse-checkout-out && - test_cmp full-checkout-err sparse-checkout-err + test_cmp full-checkout-out sparse-index-out && + test_cmp full-checkout-err sparse-checkout-err && + test_cmp full-checkout-err sparse-index-err +} + +test_sparse_match () { + run_on_sparse "$@" && + test_cmp sparse-checkout-out sparse-index-out && + test_cmp sparse-checkout-err sparse-index-err } +test_expect_success 'sparse-index contents' ' + init_repos && + + test-tool -C sparse-index read-cache --table >cache && + for dir in folder1 folder2 x + do + TREE=$(git -C sparse-index rev-parse HEAD:$dir) && + grep "040000 tree $TREE $dir/" cache \ + || return 1 + done && + + git -C sparse-index sparse-checkout set folder1 && + + test-tool -C sparse-index read-cache --table >cache && + for dir in deep folder2 x + do + TREE=$(git -C sparse-index rev-parse HEAD:$dir) && + grep "040000 tree $TREE $dir/" cache \ + || return 1 + done && + + git -C sparse-index sparse-checkout set deep/deeper1 && + + test-tool -C sparse-index read-cache --table >cache && + for dir in deep/deeper2 folder1 folder2 x + do + TREE=$(git -C sparse-index rev-parse HEAD:$dir) && + grep "040000 tree $TREE $dir/" cache \ + || return 1 + done && + + # Disabling the sparse-index removes tree entries with full ones + git -C sparse-index sparse-checkout init --no-sparse-index && + + test-tool -C sparse-index read-cache --table >cache && + ! grep "040000 tree" cache && + test_sparse_match test-tool read-cache --table +' + +test_expect_success 'expanded in-memory index matches full index' ' + init_repos && + test_sparse_match test-tool read-cache --expand --table +' + test_expect_success 'status with options' ' init_repos && + test_sparse_match ls && test_all_match git status --porcelain=v2 && test_all_match git status --porcelain=v2 -z -u && test_all_match git status --porcelain=v2 -uno && - run_on_all "touch README.md" && + run_on_all touch README.md && test_all_match git status --porcelain=v2 && test_all_match git status --porcelain=v2 -z -u && test_all_match git status --porcelain=v2 -uno && @@ -135,7 +202,7 @@ test_expect_success 'add, commit, checkout' ' write_script edit-contents <<-\EOF && echo text >>$1 EOF - run_on_all "../edit-contents README.md" && + run_on_all ../edit-contents README.md && test_all_match git add README.md && test_all_match git status --porcelain=v2 && @@ -144,7 +211,7 @@ test_expect_success 'add, commit, checkout' ' test_all_match git checkout HEAD~1 && test_all_match git checkout - && - run_on_all "../edit-contents README.md" && + run_on_all ../edit-contents README.md && test_all_match git add -A && test_all_match git status --porcelain=v2 && @@ -153,7 +220,7 @@ test_expect_success 'add, commit, checkout' ' test_all_match git checkout HEAD~1 && test_all_match git checkout - && - run_on_all "../edit-contents deep/newfile" && + run_on_all ../edit-contents deep/newfile && test_all_match git status --porcelain=v2 -uno && test_all_match git status --porcelain=v2 && @@ -186,7 +253,7 @@ test_expect_success 'diff --staged' ' write_script edit-contents <<-\EOF && echo text >>README.md EOF - run_on_all "../edit-contents" && + run_on_all ../edit-contents && test_all_match git diff && test_all_match git diff --staged && @@ -252,6 +319,17 @@ test_expect_failure 'checkout and reset (mixed)' ' test_all_match git reset update-folder2 ' +# Ensure that sparse-index behaves identically to +# sparse-checkout with a full index. +test_expect_success 'checkout and reset (mixed) [sparse]' ' + init_repos && + + test_sparse_match git checkout -b reset-test update-deep && + test_sparse_match git reset deepest && + test_sparse_match git reset update-folder1 && + test_sparse_match git reset update-folder2 +' + test_expect_success 'merge' ' init_repos && @@ -280,7 +358,7 @@ test_expect_success 'clean' ' echo bogus >>.gitignore && run_on_all cp ../.gitignore . && test_all_match git add .gitignore && - test_all_match git commit -m ignore-bogus-files && + test_all_match git commit -m "ignore bogus files" && run_on_sparse mkdir folder1 && run_on_all touch folder1/bogus && @@ -288,14 +366,51 @@ test_expect_success 'clean' ' test_all_match git status --porcelain=v2 && test_all_match git clean -f && test_all_match git status --porcelain=v2 && + test_sparse_match ls && + test_sparse_match ls folder1 && test_all_match git clean -xf && test_all_match git status --porcelain=v2 && + test_sparse_match ls && + test_sparse_match ls folder1 && test_all_match git clean -xdf && test_all_match git status --porcelain=v2 && + test_sparse_match ls && + test_sparse_match ls folder1 && + + test_sparse_match test_path_is_dir folder1 +' + +test_expect_success 'submodule handling' ' + init_repos && + + test_all_match mkdir modules && + test_all_match touch modules/a && + test_all_match git add modules && + test_all_match git commit -m "add modules directory" && + + run_on_all git submodule add "$(pwd)/initial-repo" modules/sub && + test_all_match git commit -m "add submodule" && + + # having a submodule prevents "modules" from collapse + test-tool -C sparse-index read-cache --table >cache && + grep "100644 blob .* modules/a" cache && + grep "160000 commit $(git -C initial-repo rev-parse HEAD) modules/sub" cache +' + +test_expect_success 'sparse-index is expanded and converted back' ' + init_repos && + + GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \ + git -C sparse-index -c core.fsmonitor="" reset --hard && + test_region index convert_to_sparse trace2.txt && + test_region index ensure_full_index trace2.txt && - test_path_is_dir sparse-checkout/folder1 + rm trace2.txt && + GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \ + git -C sparse-index -c core.fsmonitor="" status -uno && + test_region index ensure_full_index trace2.txt ' test_done diff --git a/t/t1300-config.sh b/t/t1300-config.sh index e0dd5d65ce..ad4e6d0cfc 100755 --- a/t/t1300-config.sh +++ b/t/t1300-config.sh @@ -1374,16 +1374,29 @@ test_expect_success 'git --config-env=key=envvar support' ' cat >expect <<-\EOF && value value + value + value + false false EOF { ENVVAR=value git --config-env=core.name=ENVVAR config core.name && + ENVVAR=value git --config-env core.name=ENVVAR config core.name && ENVVAR=value git --config-env=foo.CamelCase=ENVVAR config foo.camelcase && - ENVVAR= git --config-env=foo.flag=ENVVAR config --bool foo.flag + ENVVAR=value git --config-env foo.CamelCase=ENVVAR config foo.camelcase && + ENVVAR= git --config-env=foo.flag=ENVVAR config --bool foo.flag && + ENVVAR= git --config-env foo.flag=ENVVAR config --bool foo.flag } >actual && test_cmp expect actual ' +test_expect_success 'git --config-env with missing value' ' + test_must_fail env ENVVAR=value git --config-env 2>error && + grep "no config key given for --config-env" error && + test_must_fail env ENVVAR=value git --config-env config core.name 2>error && + grep "invalid config format: config" error +' + test_expect_success 'git --config-env fails with invalid parameters' ' test_must_fail git --config-env=foo.flag config --bool foo.flag 2>error && test_i18ngrep "invalid config format: foo.flag" error && diff --git a/t/t4013-diff-various.sh b/t/t4013-diff-various.sh index 6cca8b84a6..87def81699 100755 --- a/t/t4013-diff-various.sh +++ b/t/t4013-diff-various.sh @@ -452,6 +452,37 @@ diff-tree --stat --compact-summary initial mode diff-tree -R --stat --compact-summary initial mode EOF +test_expect_success 'log --diff-merges=on matches --diff-merges=separate' ' + git log -p --diff-merges=separate master >result && + process_diffs result >expected && + git log -p --diff-merges=on master >result && + process_diffs result >actual && + test_cmp expected actual +' + +test_expect_success 'deny wrong log.diffMerges config' ' + test_config log.diffMerges wrong-value && + test_expect_code 128 git log +' + +test_expect_success 'git config log.diffMerges first-parent' ' + git log -p --diff-merges=first-parent master >result && + process_diffs result >expected && + test_config log.diffMerges first-parent && + git log -p --diff-merges=on master >result && + process_diffs result >actual && + test_cmp expected actual +' + +test_expect_success 'git config log.diffMerges first-parent vs -m' ' + git log -p --diff-merges=first-parent master >result && + process_diffs result >expected && + test_config log.diffMerges first-parent && + git log -p -m master >result && + process_diffs result >actual && + test_cmp expected actual +' + test_expect_success 'log -S requires an argument' ' test_must_fail git log -S ' diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh index cabdf7d57a..bcb558ef4d 100755 --- a/t/t4205-log-pretty-formats.sh +++ b/t/t4205-log-pretty-formats.sh @@ -525,15 +525,14 @@ test_expect_success 'strbuf_utf8_replace() not producing NUL' ' ! grep Q actual ' -# ISO strict date format -test_expect_success 'ISO and ISO-strict date formats display the same values' ' - git log --format=%ai%n%ci | - sed -e "s/ /T/; s/ //; s/..\$/:&/" >expected && +# --date=[XXX] and corresponding %a[X] %c[X] format equivalency +test_expect_success '--date=iso-strict %ad%cd is the same as %aI%cI' ' + git log --format=%ad%n%cd --date=iso-strict >expected && git log --format=%aI%n%cI >actual && test_cmp expected actual ' -test_expect_success 'short date' ' +test_expect_success '--date=short %ad%cd is the same as %as%cs' ' git log --format=%ad%n%cd --date=short >expected && git log --format=%as%n%cs >actual && test_cmp expected actual diff --git a/t/t5523-push-upstream.sh b/t/t5523-push-upstream.sh index 9fbe7f784d..fdb4292056 100755 --- a/t/t5523-push-upstream.sh +++ b/t/t5523-push-upstream.sh @@ -119,4 +119,11 @@ test_expect_success TTY 'quiet push' ' test_must_be_empty output ' +test_expect_success TTY 'quiet push -u' ' + ensure_fresh_upstream && + + test_terminal git push --quiet -u --no-progress upstream main 2>&1 | tee output && + test_must_be_empty output +' + test_done diff --git a/t/t5582-fetch-negative-refspec.sh b/t/t5582-fetch-negative-refspec.sh index f345097277..e5d2e79ad3 100755 --- a/t/t5582-fetch-negative-refspec.sh +++ b/t/t5582-fetch-negative-refspec.sh @@ -240,4 +240,47 @@ test_expect_success "push with matching +: and negative refspec" ' git -C two push -v one ' +test_expect_success '--prefetch correctly modifies refspecs' ' + git -C one config --unset-all remote.origin.fetch && + git -C one config --add remote.origin.fetch ^refs/heads/bogus/ignore && + git -C one config --add remote.origin.fetch "refs/tags/*:refs/tags/*" && + git -C one config --add remote.origin.fetch "refs/heads/bogus/*:bogus/*" && + + git tag -a -m never never-fetch-tag HEAD && + + git branch bogus/fetched HEAD~1 && + git branch bogus/ignore HEAD && + + git -C one fetch --prefetch --no-tags && + test_must_fail git -C one rev-parse never-fetch-tag && + git -C one rev-parse refs/prefetch/bogus/fetched && + test_must_fail git -C one rev-parse refs/prefetch/bogus/ignore && + + # correctly handle when refspec set becomes empty + # after removing the refs/tags/* refspec. + git -C one config --unset-all remote.origin.fetch && + git -C one config --add remote.origin.fetch "refs/tags/*:refs/tags/*" && + + git -C one fetch --prefetch --no-tags && + test_must_fail git -C one rev-parse never-fetch-tag && + + # The refspec for refs that are not fully qualified + # are filtered multiple times. + git -C one rev-parse refs/prefetch/bogus/fetched && + test_must_fail git -C one rev-parse refs/prefetch/bogus/ignore +' + +test_expect_success '--prefetch succeeds when refspec becomes empty' ' + git checkout bogus/fetched && + test_commit extra && + + git -C one config --unset-all remote.origin.fetch && + git -C one config --unset branch.main.remote && + git -C one config remote.origin.fetch "+refs/tags/extra" && + git -C one config remote.origin.skipfetchall true && + git -C one config remote.origin.tagopt "--no-tags" && + + git -C one fetch --prefetch +' + test_done diff --git a/t/t7900-maintenance.sh b/t/t7900-maintenance.sh index 2412d8c5c0..b93ae014ee 100755 --- a/t/t7900-maintenance.sh +++ b/t/t7900-maintenance.sh @@ -141,19 +141,25 @@ test_expect_success 'prefetch multiple remotes' ' test_commit -C clone1 one && test_commit -C clone2 two && GIT_TRACE2_EVENT="$(pwd)/run-prefetch.txt" git maintenance run --task=prefetch 2>/dev/null && - fetchargs="--prune --no-tags --no-write-fetch-head --recurse-submodules=no --refmap= --quiet" && - test_subcommand git fetch remote1 $fetchargs +refs/heads/\\*:refs/prefetch/remote1/\\* <run-prefetch.txt && - test_subcommand git fetch remote2 $fetchargs +refs/heads/\\*:refs/prefetch/remote2/\\* <run-prefetch.txt && + fetchargs="--prefetch --prune --no-tags --no-write-fetch-head --recurse-submodules=no --quiet" && + test_subcommand git fetch remote1 $fetchargs <run-prefetch.txt && + test_subcommand git fetch remote2 $fetchargs <run-prefetch.txt && test_path_is_missing .git/refs/remotes && - git log prefetch/remote1/one && - git log prefetch/remote2/two && + git log prefetch/remotes/remote1/one && + git log prefetch/remotes/remote2/two && git fetch --all && - test_cmp_rev refs/remotes/remote1/one refs/prefetch/remote1/one && - test_cmp_rev refs/remotes/remote2/two refs/prefetch/remote2/two && + test_cmp_rev refs/remotes/remote1/one refs/prefetch/remotes/remote1/one && + test_cmp_rev refs/remotes/remote2/two refs/prefetch/remotes/remote2/two && test_cmp_config refs/prefetch/ log.excludedecoration && git log --oneline --decorate --all >log && - ! grep "prefetch" log + ! grep "prefetch" log && + + test_when_finished git config --unset remote.remote1.skipFetchAll && + git config remote.remote1.skipFetchAll true && + GIT_TRACE2_EVENT="$(pwd)/skip-remote1.txt" git maintenance run --task=prefetch 2>/dev/null && + test_subcommand ! git fetch remote1 $fetchargs <skip-remote1.txt && + test_subcommand git fetch remote2 $fetchargs <skip-remote1.txt ' test_expect_success 'prefetch and existing log.excludeDecoration values' ' diff --git a/t/t9902-completion.sh b/t/t9902-completion.sh index 04ce884ef5..4d732d6d4f 100755 --- a/t/t9902-completion.sh +++ b/t/t9902-completion.sh @@ -2306,6 +2306,7 @@ test_expect_success 'git config - variable name' ' test_completion "git config log.d" <<-\EOF log.date Z log.decorate Z + log.diffMerges Z EOF ' @@ -2327,6 +2328,7 @@ test_expect_success 'git -c - variable name' ' test_completion "git -c log.d" <<-\EOF log.date=Z log.decorate=Z + log.diffMerges=Z EOF ' @@ -2348,6 +2350,7 @@ test_expect_success 'git clone --config= - variable name' ' test_completion "git clone --config=log.d" <<-\EOF log.date=Z log.decorate=Z + log.diffMerges=Z EOF ' diff --git a/transport.c b/transport.c index ef66e73090..62b6eeed21 100644 --- a/transport.c +++ b/transport.c @@ -108,11 +108,11 @@ static void set_upstreams(struct transport *transport, struct ref *refs, if (!remotename || !starts_with(remotename, "refs/heads/")) continue; - if (!pretend) - install_branch_config(BRANCH_CONFIG_VERBOSE, - localname + 11, transport->remote->name, - remotename); - else + if (!pretend) { + int flag = transport->verbose < 0 ? 0 : BRANCH_CONFIG_VERBOSE; + install_branch_config(flag, localname + 11, + transport->remote->name, remotename); + } else if (transport->verbose >= 0) printf(_("Would set upstream of '%s' to '%s' of '%s'\n"), localname + 11, remotename + 11, transport->remote->name); diff --git a/unpack-trees.c b/unpack-trees.c index 8a1afbc1e4..7a1804c314 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -17,6 +17,7 @@ #include "object-store.h" #include "promisor-remote.h" #include "entry.h" +#include "parallel-checkout.h" /* * Error messages expected by scripts out of plumbing commands such as @@ -398,7 +399,7 @@ static int check_updates(struct unpack_trees_options *o, int errs = 0; struct progress *progress; struct checkout state = CHECKOUT_INIT; - int i; + int i, pc_workers, pc_threshold; trace_performance_enter(); state.force = 1; @@ -441,7 +442,6 @@ static int check_updates(struct unpack_trees_options *o, if (should_update_submodules()) load_gitmodules_file(index, &state); - enable_delayed_checkout(&state); if (has_promisor_remote()) { /* * Prefetch the objects that are to be checked out in the loop @@ -464,18 +464,31 @@ static int check_updates(struct unpack_trees_options *o, to_fetch.oid, to_fetch.nr); oid_array_clear(&to_fetch); } + + get_parallel_checkout_configs(&pc_workers, &pc_threshold); + + enable_delayed_checkout(&state); + if (pc_workers > 1) + init_parallel_checkout(); for (i = 0; i < index->cache_nr; i++) { struct cache_entry *ce = index->cache[i]; if (ce->ce_flags & CE_UPDATE) { + size_t last_pc_queue_size = pc_queue_size(); + if (ce->ce_flags & CE_WT_REMOVE) BUG("both update and delete flags are set on %s", ce->name); - display_progress(progress, ++cnt); ce->ce_flags &= ~CE_UPDATE; errs |= checkout_entry(ce, &state, NULL, NULL); + + if (last_pc_queue_size == pc_queue_size()) + display_progress(progress, ++cnt); } } + if (pc_workers > 1) + errs |= run_parallel_checkout(&state, pc_workers, pc_threshold, + progress, &cnt); stop_progress(&progress); errs |= finish_delayed_checkout(&state, NULL); git_attr_set_direction(GIT_ATTR_CHECKIN); @@ -750,9 +763,13 @@ static int index_pos_by_traverse_info(struct name_entry *names, strbuf_make_traverse_path(&name, info, names->path, names->pathlen); strbuf_addch(&name, '/'); pos = index_name_pos(o->src_index, name.buf, name.len); - if (pos >= 0) - BUG("This is a directory and should not exist in index"); - pos = -pos - 1; + if (pos >= 0) { + if (!o->src_index->sparse_index || + !(o->src_index->cache[pos]->ce_flags & CE_SKIP_WORKTREE)) + BUG("This is a directory and should not exist in index"); + } else { + pos = -pos - 1; + } if (pos >= o->src_index->cache_nr || !starts_with(o->src_index->cache[pos]->name, name.buf) || (pos > 0 && starts_with(o->src_index->cache[pos-1]->name, name.buf))) @@ -1571,6 +1588,7 @@ static int verify_absent(const struct cache_entry *, */ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o) { + struct repository *repo = the_repository; int i, ret; static struct cache_entry *dfc; struct pattern_list pl; @@ -1582,6 +1600,12 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options trace_performance_enter(); trace2_region_enter("unpack_trees", "unpack_trees", the_repository); + prepare_repo_settings(repo); + if (repo->settings.command_requires_full_index) { + ensure_full_index(o->src_index); + ensure_full_index(o->dst_index); + } + if (!core_apply_sparse_checkout || !o->update) o->skip_sparse_checkout = 1; if (!o->skip_sparse_checkout && !o->pl) { diff --git a/upload-pack.c b/upload-pack.c index e19583ae0f..5c1cd19612 100644 --- a/upload-pack.c +++ b/upload-pack.c @@ -1153,7 +1153,7 @@ static void receive_needs(struct upload_pack_data *data, static int mark_our_ref(const char *refname, const char *refname_full, const struct object_id *oid) { - struct object *o = lookup_unknown_object(oid); + struct object *o = lookup_unknown_object(the_repository, oid); if (ref_is_hidden(refname, refname_full)) { o->flags |= HIDDEN_REF; @@ -298,7 +298,7 @@ int walker_fetch(struct walker *walker, int targets, char **target, error("Could not interpret response from server '%s' as something to pull", target[i]); goto done; } - if (process(walker, lookup_unknown_object(&oids[i]))) + if (process(walker, lookup_unknown_object(the_repository, &oids[i]))) goto done; } |
