aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--.gitlab-ci.yml3
-rw-r--r--.mailmap2
-rw-r--r--Documentation/BreakingChanges.adoc5
-rw-r--r--Documentation/CodingGuidelines6
-rw-r--r--Documentation/RelNotes/2.52.0.adoc99
-rw-r--r--Documentation/config.adoc9
-rw-r--r--Documentation/config/alias.adoc2
-rw-r--r--Documentation/config/core.adoc20
-rw-r--r--Documentation/config/promisor.adoc61
-rw-r--r--Documentation/config/sendemail.adoc2
-rw-r--r--Documentation/config/worktree.adoc2
-rw-r--r--Documentation/fetch-options.adoc2
-rw-r--r--Documentation/git-add.adoc34
-rw-r--r--Documentation/git-fast-import.adoc10
-rw-r--r--Documentation/git-last-modified.adoc54
-rw-r--r--Documentation/git-multi-pack-index.adoc2
-rw-r--r--Documentation/git-refs.adoc7
-rw-r--r--Documentation/git-repo.adoc9
-rw-r--r--Documentation/git-send-email.adoc34
-rw-r--r--Documentation/gitprotocol-v2.adoc64
-rw-r--r--Documentation/meson.build1
-rw-r--r--Makefile12
-rw-r--r--add-interactive.c88
-rw-r--r--add-interactive.h7
-rw-r--r--add-patch.c12
-rw-r--r--alloc.c10
-rw-r--r--alloc.h4
-rw-r--r--builtin.h1
-rw-r--r--builtin/add.c5
-rw-r--r--builtin/commit-graph.c4
-rw-r--r--builtin/commit.c7
-rw-r--r--builtin/describe.c34
-rw-r--r--builtin/last-modified.c326
-rw-r--r--builtin/log.c1
-rw-r--r--builtin/ls-files.c13
-rw-r--r--builtin/ls-tree.c7
-rw-r--r--builtin/merge.c3
-rw-r--r--builtin/multi-pack-index.c31
-rw-r--r--builtin/pack-objects.c2
-rw-r--r--builtin/range-diff.c21
-rw-r--r--builtin/rebase.c3
-rw-r--r--builtin/refs.c48
-rw-r--r--builtin/repack.c7
-rw-r--r--builtin/repo.c45
-rw-r--r--builtin/rev-parse.c7
-rw-r--r--builtin/revert.c7
-rw-r--r--builtin/stash.c19
-rw-r--r--builtin/unpack-objects.c5
-rw-r--r--builtin/update-index.c7
-rw-r--r--bulk-checkin.c152
-rw-r--r--bulk-checkin.h25
-rw-r--r--cache-tree.c5
-rw-r--r--command-list.txt1
-rw-r--r--commit-graph.c7
-rw-r--r--config.c297
-rw-r--r--contrib/diff-highlight/README8
-rwxr-xr-xcontrib/subtree/git-subtree.sh36
-rwxr-xr-xcontrib/subtree/t/t7900-subtree.sh71
-rw-r--r--environment.c15
-rw-r--r--environment.h3
-rw-r--r--git-curl-compat.h7
-rw-r--r--git-gui/Makefile2
-rwxr-xr-xgit-gui/git-gui--askyesno63
-rwxr-xr-xgit-gui/git-gui.sh42
-rw-r--r--git-gui/lib/index.tcl7
-rwxr-xr-xgit-send-email.perl43
-rw-r--r--git.c1
-rw-r--r--gitk-git/README.md93
-rwxr-xr-xgitk-git/gitk26
-rw-r--r--http.c8
-rw-r--r--imap-send.c26
-rw-r--r--line-log.c15
-rw-r--r--list-objects-filter.c9
-rw-r--r--log-tree.c1
-rw-r--r--meson.build1
-rw-r--r--midx-write.c245
-rw-r--r--midx.c135
-rw-r--r--midx.h42
-rw-r--r--object-file.c30
-rw-r--r--object-name.c37
-rw-r--r--object.c26
-rw-r--r--odb.c64
-rw-r--r--odb.h27
-rw-r--r--pack-bitmap.c15
-rw-r--r--pack-revindex.c14
-rw-r--r--packfile.c13
-rw-r--r--path-walk.c55
-rw-r--r--promisor-remote.c424
-rw-r--r--range-diff.c20
-rw-r--r--range-diff.h5
-rw-r--r--read-cache.c5
-rw-r--r--repository.c2
-rw-r--r--repository.h3
-rw-r--r--t/Makefile14
-rw-r--r--t/helper/test-read-midx.c31
-rw-r--r--t/meson.build6
-rwxr-xr-xt/perf/p8020-last-modified.sh22
-rw-r--r--t/show-ref-exists-tests.sh77
-rwxr-xr-xt/t0450-txt-doc-vs-help.sh15
-rw-r--r--t/t0450/adoc-missing9
-rwxr-xr-xt/t1092-sparse-checkout-compatibility.sh13
-rwxr-xr-xt/t1403-show-ref.sh65
-rwxr-xr-xt/t1422-show-ref-exists.sh9
-rwxr-xr-xt/t1462-refs-exists.sh10
-rwxr-xr-xt/t1900-repo.sh18
-rwxr-xr-xt/t3404-rebase-interactive.sh19
-rwxr-xr-xt/t3418-rebase-continue.sh2
-rwxr-xr-xt/t3701-add-interactive.sh53
-rwxr-xr-xt/t3904-stash-patch.sh19
-rwxr-xr-xt/t4211-line-log.sh2
-rw-r--r--t/t4211/sha1/expect.multiple6
-rw-r--r--t/t4211/sha1/expect.no-assertion-error90
-rw-r--r--t/t4211/sha1/expect.two-ranges6
-rw-r--r--t/t4211/sha256/expect.multiple6
-rw-r--r--t/t4211/sha256/expect.no-assertion-error90
-rw-r--r--t/t4211/sha256/expect.two-ranges6
-rwxr-xr-xt/t5319-multi-pack-index.sh30
-rwxr-xr-xt/t5530-upload-pack-error.sh68
-rwxr-xr-xt/t5564-http-proxy.sh4
-rwxr-xr-xt/t5710-promisor-remote-capability.sh65
-rwxr-xr-xt/t7502-commit-porcelain.sh52
-rwxr-xr-xt/t7700-repack.sh63
-rwxr-xr-xt/t8020-last-modified.sh210
-rw-r--r--t/unit-tests/clar/.github/workflows/ci.yml37
-rw-r--r--t/unit-tests/clar/CMakeLists.txt13
-rw-r--r--t/unit-tests/clar/README.md37
-rw-r--r--t/unit-tests/clar/clar.c154
-rw-r--r--t/unit-tests/clar/clar.h83
-rw-r--r--t/unit-tests/clar/clar/fixtures.h6
-rw-r--r--t/unit-tests/clar/clar/fs.h29
-rw-r--r--t/unit-tests/clar/clar/print.h71
-rw-r--r--t/unit-tests/clar/clar/sandbox.h226
-rw-r--r--t/unit-tests/clar/clar/summary.h5
-rw-r--r--t/unit-tests/clar/example/CMakeLists.txt28
-rw-r--r--t/unit-tests/clar/example/example.c6
-rw-r--r--t/unit-tests/clar/example/main.c (renamed from t/unit-tests/clar/test/main.c.sample)2
-rwxr-xr-xt/unit-tests/clar/generate.py28
-rw-r--r--t/unit-tests/clar/test/CMakeLists.txt34
-rw-r--r--t/unit-tests/clar/test/clar_test.h16
-rw-r--r--t/unit-tests/clar/test/expected/help12
-rw-r--r--t/unit-tests/clar/test/expected/quiet44
-rw-r--r--t/unit-tests/clar/test/expected/specific_test9
-rw-r--r--t/unit-tests/clar/test/expected/stop_on_failure8
-rw-r--r--t/unit-tests/clar/test/expected/suite_names2
-rw-r--r--t/unit-tests/clar/test/expected/summary.xml41
-rw-r--r--t/unit-tests/clar/test/expected/summary_with_filename49
-rw-r--r--t/unit-tests/clar/test/expected/summary_without_filename49
-rw-r--r--t/unit-tests/clar/test/expected/tap92
-rw-r--r--t/unit-tests/clar/test/expected/without_arguments48
-rw-r--r--t/unit-tests/clar/test/main.c41
-rw-r--r--t/unit-tests/clar/test/selftest.c370
-rw-r--r--t/unit-tests/clar/test/selftest.h3
-rw-r--r--t/unit-tests/clar/test/suites/CMakeLists.txt53
-rw-r--r--t/unit-tests/clar/test/suites/combined.c (renamed from t/unit-tests/clar/test/sample.c)37
-rw-r--r--t/unit-tests/clar/test/suites/main.c27
-rw-r--r--t/unit-tests/clar/test/suites/pointer.c13
-rw-r--r--t/unit-tests/clar/test/suites/resources/test/file (renamed from t/unit-tests/clar/test/resources/test/file)0
-rw-r--r--upload-pack.c19
-rw-r--r--xdiff/xutils.c66
-rw-r--r--xdiff/xutils.h10
161 files changed, 4949 insertions, 1190 deletions
diff --git a/.gitignore b/.gitignore
index 1803023427..802ce70e48 100644
--- a/.gitignore
+++ b/.gitignore
@@ -87,6 +87,7 @@
/git-init-db
/git-interpret-trailers
/git-instaweb
+/git-last-modified
/git-log
/git-ls-files
/git-ls-remote
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index af10ebb59a..cf122e706f 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -119,6 +119,7 @@ build:mingw64:
variables:
NO_PERL: 1
before_script:
+ - Set-MpPreference -DisableRealtimeMonitoring $true
- ./ci/install-sdk.ps1 -directory "git-sdk"
script:
- git-sdk/usr/bin/bash.exe -l -c 'ci/make-test-artifacts.sh artifacts'
@@ -135,6 +136,7 @@ test:mingw64:
- job: "build:mingw64"
artifacts: true
before_script:
+ - Set-MpPreference -DisableRealtimeMonitoring $true
- git-sdk/usr/bin/bash.exe -l -c 'tar xf artifacts/artifacts.tar.gz'
- New-Item -Path .git/info -ItemType Directory
- New-Item .git/info/exclude -ItemType File -Value "/git-sdk"
@@ -148,6 +150,7 @@ test:mingw64:
tags:
- saas-windows-medium-amd64
before_script:
+ - Set-MpPreference -DisableRealtimeMonitoring $true
- choco install -y git meson ninja openssl
- Import-Module $env:ChocolateyInstall\helpers\chocolateyProfile.psm1
- refreshenv
diff --git a/.mailmap b/.mailmap
index 96c2740fbb..afa21abbaa 100644
--- a/.mailmap
+++ b/.mailmap
@@ -81,6 +81,8 @@ Fredrik Kuivinen <frekui@gmail.com> <freku045@student.liu.se>
Frédéric Heitzmann <frederic.heitzmann@gmail.com>
Garry Dolley <gdolley@ucla.edu> <gdolley@arpnetworks.com>
Glen Choo <glencbz@gmail.com> <chooglen@google.com>
+Greg Hurrell <greg@hurrell.net> <greg.hurrell@datadoghq.com>
+Greg Hurrell <greg@hurrell.net> <win@wincent.com>
Greg Price <price@mit.edu> <price@MIT.EDU>
Greg Price <price@mit.edu> <price@ksplice.com>
Heiko Voigt <hvoigt@hvoigt.net> <git-list@hvoigt.net>
diff --git a/Documentation/BreakingChanges.adoc b/Documentation/BreakingChanges.adoc
index f4e11c8865..0cba20fadb 100644
--- a/Documentation/BreakingChanges.adoc
+++ b/Documentation/BreakingChanges.adoc
@@ -245,6 +245,11 @@ These features will be removed.
+
The command will be removed.
+* Support for `core.commentString=auto` has been deprecated and will
+ be removed in Git 3.0.
++
+cf. <xmqqa59i45wc.fsf@gitster.g>
+
== Superseded features that will not be deprecated
Some features have gained newer replacements that aim to improve the design in
diff --git a/Documentation/CodingGuidelines b/Documentation/CodingGuidelines
index 224f0978a8..df72fe0177 100644
--- a/Documentation/CodingGuidelines
+++ b/Documentation/CodingGuidelines
@@ -650,6 +650,12 @@ For C programs:
cases. However, it is recommended to find a more descriptive name wherever
possible to improve the readability and maintainability of the code.
+ - Bit fields should be defined without a space around the colon. E.g.
+
+ unsigned my_field:1;
+ unsigned other_field:1;
+ unsigned field_with_longer_name:1;
+
For Perl programs:
- Most of the C guidelines above apply.
diff --git a/Documentation/RelNotes/2.52.0.adoc b/Documentation/RelNotes/2.52.0.adoc
index fa72515358..c4fc561631 100644
--- a/Documentation/RelNotes/2.52.0.adoc
+++ b/Documentation/RelNotes/2.52.0.adoc
@@ -14,6 +14,27 @@ UI, Workflows & Features
* A new subcommand "git repo" gives users a way to grab various
repository characteristics.
+ * A new command "git last-modified" has been added to show the closest
+ ancestor commit that touched each path.
+
+ * "git refs exists" that works like "git show-ref --exists" has been
+ added.
+
+ * "repo info" learns a short-hand option "-z" that is the same as
+ "--format=nul", and learns to report the objects format used in the
+ repository.
+
+ * "core.commentChar=auto" that attempts to dynamically pick a
+ suitable comment character is non-workable, as it is too much
+ trouble to support for little benefit, and is marked as deprecated.
+
+ * "git send-email" learned to drive "git imap-send" to store already
+ sent e-mails in an IMAP folder.
+
+ * The "promisor-remote" capability mechanism has been updated to
+ allow the "partialCloneFilter" settings and the "token" value to be
+ communicated from the server side.
+
Performance, Internal Implementation, Development Support etc.
--------------------------------------------------------------
@@ -40,6 +61,32 @@ Performance, Internal Implementation, Development Support etc.
* Discord has been added to the first contribution documentation as
another way to ask for help.
+ * Inspired by Ezekiel's recent effort to showcase Rust interface, the
+ hash function implementation used to hash lines have been updated
+ to the one used for ELF symbol lookup by Glibc.
+
+ * Instead of scanning for the remaining items to see if there are
+ still commits to be explored in the queue, use khash to remember
+ which items are still on the queue (an unacceptable alternative is
+ to reserve one object flag bits).
+
+ * The bulk-checkin code used to depend on a file-scope static
+ singleton variable, which has been updated to pass an instance
+ throughout the callchain.
+
+ * CodingGuidelines now spells out how bitfields are to be written.
+
+ * Adjust to the way newer versions of cURL selectivel enables tracing
+ options, so that our tests can continue to work.
+ (merge 1b5a6bfff3 jk/curl-global-trace-components later to maint).
+
+ * The clear_alloc_state() API function was not fully clearing the
+ structure for reuse, but since nobody reuses it, replace it with a
+ variant that frees the structure as well, making the callers simpler.
+
+ * "git range-diff" learned a way to limit the memory consumed by
+ O(N*N) cost matrix.
+
Fixes since v2.51
-----------------
@@ -130,6 +177,46 @@ including security updates, are included in this release.
instead of `gitgitgadget/git`.
(merge 37001cdbc4 ds/doc-ggg-pr-fork-clarify later to maint).
+ * Makefile tried to run multiple "cargo build" which would not work
+ very well; serialize their execution to work it around.
+ (merge 0eeacde50e da/cargo-serialize later to maint).
+
+ * "git repack --path-walk" lost objects in some corner cases, which
+ has been corrected.
+ (merge 93afe9b060 ds/path-walk-repack-fix later to maint).
+
+ * "git ls-files <pathspec>..." should not necessarily have to expand
+ the index fully if a sparsified directory is excluded by the
+ pathspec; the code is taught to expand the index on demand to avoid
+ this.
+ (merge 681f26bccc ds/ls-files-lazy-unsparse later to maint).
+
+ * Windows "real-time monitoring" interferes with the execution of
+ tests and affects negatively in both correctness and performance,
+ which has been disabled in Gitlab CI.
+ (merge 608cf5b793 ps/gitlab-ci-disable-windows-monitoring later to maint).
+
+ * A broken or malicious "git fetch" can say that it has the same
+ object for many many times, and the upload-pack serving it can
+ exhaust memory storing them redundantly, which has been corrected.
+ (merge 88a2dc68c8 ps/upload-pack-oom-protection later to maint).
+
+ * A corner case bug in "git log -L..." has been corrected.
+ (merge e3106998ff sg/line-log-boundary-fixes later to maint).
+
+ * "git rev-parse --short" and friends failed to disambiguate two
+ objects with object names that share common prefix longer than 32
+ characters, which has been fixed.
+ (merge 8655908b9e jc/longer-disambiguation-fix later to maint).
+
+ * Some among "git add -p" and friends ignored color.diff and/or
+ color.ui configuration variables, which is an old regression, which
+ has been corrected.
+ (merge 1092cd6435 jk/add-i-color later to maint).
+
+ * "git subtree" (in contrib/) did not work correctly when splitting
+ squashed subtrees, which has been improved.
+
* Other code cleanup, docfix, build fix, etc.
(merge 823d537fa7 kh/doc-git-log-markup-fix later to maint).
(merge cf7efa4f33 rj/t6137-cygwin-fix later to maint).
@@ -143,3 +230,15 @@ including security updates, are included in this release.
(merge 374579c6d4 kh/doc-interpret-trailers-markup-fix later to maint).
(merge 44dce6541c kh/doc-config-typofix later to maint).
(merge 785628b173 js/doc-sending-patch-via-thunderbird later to maint).
+ (merge e5c27bd3d8 je/doc-add later to maint).
+ (merge 13296ac909 ps/object-store-midx-dedup-info later to maint).
+ (merge 2f4bf83ffc km/alias-doc-markup-fix later to maint).
+ (merge b0d97aac19 kh/doc-markup-fixes later to maint).
+ (merge f9a6705d9a tc/t0450-harden later to maint).
+ (merge c25651aefd ds/midx-write-fixes later to maint).
+ (merge 069c15d256 rs/object-name-extend-abbrev-len-update later to maint).
+ (merge bf5c224537 mm/worktree-doc-typofix later to maint).
+ (merge 31397bc4f7 kh/doc-fast-import-markup-fix later to maint).
+ (merge ac7096723b jc/doc-includeif-hasconfig-remote-url-fix later to maint).
+ (merge fafc9b08b8 ag/doc-sendmail-gmail-example-update later to maint).
+ (merge a66fc22bf9 rs/get-oid-with-flags-cleanup later to maint).
diff --git a/Documentation/config.adoc b/Documentation/config.adoc
index cc769251be..05f1ca7293 100644
--- a/Documentation/config.adoc
+++ b/Documentation/config.adoc
@@ -114,8 +114,7 @@ whose format and meaning depends on the keyword. Supported keywords
are:
`gitdir`::
-
- The data that follows the keyword `gitdir:` is used as a glob
+ The data that follows the keyword `gitdir` and a colon is used as a glob
pattern. If the location of the .git directory matches the
pattern, the include condition is met.
+
@@ -148,7 +147,7 @@ refer to linkgit:gitignore[5] for details. For convenience:
case-insensitively (e.g. on case-insensitive file systems)
`onbranch`::
- The data that follows the keyword `onbranch:` is taken to be a
+ The data that follows the keyword `onbranch` and a colon is taken to be a
pattern with standard globbing wildcards and two additional
ones, `**/` and `/**`, that can match multiple path components.
If we are in a worktree where the name of the branch that is
@@ -161,8 +160,8 @@ all branches that begin with `foo/`. This is useful if your branches are
organized hierarchically and you would like to apply a configuration to
all the branches in that hierarchy.
-`hasconfig:remote.*.url:`::
- The data that follows this keyword is taken to
+`hasconfig:remote.*.url`::
+ The data that follows this keyword and a colon is taken to
be a pattern with standard globbing wildcards and two
additional ones, `**/` and `/**`, that can match multiple
components. The first time this keyword is seen, the rest of
diff --git a/Documentation/config/alias.adoc b/Documentation/config/alias.adoc
index 2c5db0ad84..95825354bf 100644
--- a/Documentation/config/alias.adoc
+++ b/Documentation/config/alias.adoc
@@ -38,6 +38,6 @@ it will be treated as a shell command. For example, defining
** A convenient way to deal with this is to write your script
operations in an inline function that is then called with any
arguments from the command-line. For example `alias.cmd = "!c() {
- echo $1 | grep $2 ; }; c" will correctly execute the prior example.
+ echo $1 | grep $2 ; }; c"` will correctly execute the prior example.
** Setting `GIT_TRACE=1` can help you debug the command being run for
your alias.
diff --git a/Documentation/config/core.adoc b/Documentation/config/core.adoc
index 3fbe83eef1..08739bb9d4 100644
--- a/Documentation/config/core.adoc
+++ b/Documentation/config/core.adoc
@@ -531,9 +531,25 @@ core.commentString::
commented, and removes them after the editor returns
(default '#').
+
-If set to "auto", `git-commit` would select a character that is not
+ifndef::with-breaking-changes[]
+If set to "auto", `git-commit` will select a character that is not
the beginning character of any line in existing commit messages.
-+
+Support for this value is deprecated and will be removed in Git 3.0
+due to the following limitations:
++
+--
+* It is incompatible with adding comments in a commit message
+ template. This includes the conflicts comments added to
+ the commit message by `cherry-pick`, `merge`, `rebase` and
+ `revert`.
+* It is incompatible with adding comments to the commit message
+ in the `prepare-commit-msg` hook.
+* It is incompatible with the `fixup` and `squash` commands when
+ rebasing,
+* It is not respected by `git notes`
+--
++
+endif::with-breaking-changes[]
Note that these two variables are aliases of each other, and in modern
versions of Git you are free to use a string (e.g., `//` or `⁑⁕⁑`) with
`commentChar`. Versions of Git prior to v2.45.0 will ignore
diff --git a/Documentation/config/promisor.adoc b/Documentation/config/promisor.adoc
index 2638b01f83..93e5e0d9b5 100644
--- a/Documentation/config/promisor.adoc
+++ b/Documentation/config/promisor.adoc
@@ -9,6 +9,28 @@ promisor.advertise::
"false", which means the "promisor-remote" capability is not
advertised.
+promisor.sendFields::
+ A comma or space separated list of additional remote related
+ field names. A server sends these field names and the
+ associated field values from its configuration when
+ advertising its promisor remotes using the "promisor-remote"
+ capability, see linkgit:gitprotocol-v2[5]. Currently, only the
+ "partialCloneFilter" and "token" field names are supported.
++
+`partialCloneFilter`:: contains the partial clone filter
+used for the remote.
++
+`token`:: contains an authentication token for the remote.
++
+When a field name is part of this list and a corresponding
+"remote.foo.<field-name>" config variable is set on the server to a
+non-empty value, then the field name and value are sent when
+advertising the promisor remote "foo".
++
+This list has no effect unless the "promisor.advertise" config
+variable is set to "true", and the "name" and "url" fields are always
+advertised regardless of this setting.
+
promisor.acceptFromServer::
If set to "all", a client will accept all the promisor remotes
a server might advertise using the "promisor-remote"
@@ -28,3 +50,42 @@ promisor.acceptFromServer::
lazily fetchable from this promisor remote from its responses
to "fetch" and "clone" requests from the client. Name and URL
comparisons are case sensitive. See linkgit:gitprotocol-v2[5].
+
+promisor.checkFields::
+ A comma or space separated list of additional remote related
+ field names. A client checks if the values of these fields
+ transmitted by a server correspond to the values of these
+ fields in its own configuration before accepting a promisor
+ remote. Currently, "partialCloneFilter" and "token" are the
+ only supported field names.
++
+If one of these field names (e.g., "token") is being checked for an
+advertised promisor remote (e.g., "foo"), three conditions must be met
+for the check of this specific field to pass:
++
+1. The corresponding local configuration (e.g., `remote.foo.token`)
+ must be set.
+2. The server must advertise the "token" field for remote "foo".
+3. The value of the locally configured `remote.foo.token` must exactly
+ match the value advertised by the server for the "token" field.
++
+If any of these conditions is not met for any field name listed in
+`promisor.checkFields`, the advertised remote "foo" is rejected.
++
+For the "partialCloneFilter" field, this allows the client to ensure
+that the server's filter matches what it expects locally, preventing
+inconsistencies in filtering behavior. For the "token" field, this can
+be used to verify that authentication credentials match expected
+values.
++
+Field values are compared case-sensitively.
++
+The "name" and "url" fields are always checked according to the
+`promisor.acceptFromServer` policy, independently of this setting.
++
+The field names and values should be passed by the server through the
+"promisor-remote" capability by using the `promisor.sendFields` config
+variable. The fields are checked only if the
+`promisor.acceptFromServer` config variable is not set to "None". If
+set to "None", this config variable has no effect. See
+linkgit:gitprotocol-v2[5].
diff --git a/Documentation/config/sendemail.adoc b/Documentation/config/sendemail.adoc
index 4722334657..90164c734d 100644
--- a/Documentation/config/sendemail.adoc
+++ b/Documentation/config/sendemail.adoc
@@ -88,6 +88,8 @@ sendemail.smtpServer::
sendemail.smtpServerPort::
sendemail.smtpServerOption::
sendemail.smtpUser::
+sendemail.imapSentFolder::
+sendemail.useImapOnly::
sendemail.thread::
sendemail.transferEncoding::
sendemail.validate::
diff --git a/Documentation/config/worktree.adoc b/Documentation/config/worktree.adoc
index 5e35c7d018..9e3f84f748 100644
--- a/Documentation/config/worktree.adoc
+++ b/Documentation/config/worktree.adoc
@@ -15,5 +15,5 @@ worktree.useRelativePaths::
different locations or environments. Defaults to "false".
+
Note that setting `worktree.useRelativePaths` to "true" implies enabling the
-`extension.relativeWorktrees` config (see linkgit:git-config[1]),
+`extensions.relativeWorktrees` config (see linkgit:git-config[1]),
thus making it incompatible with older versions of Git.
diff --git a/Documentation/fetch-options.adoc b/Documentation/fetch-options.adoc
index d3ac31f4e2..ad1e1f49be 100644
--- a/Documentation/fetch-options.adoc
+++ b/Documentation/fetch-options.adoc
@@ -2,7 +2,7 @@
--no-all::
Fetch all remotes, except for the ones that has the
`remote.<name>.skipFetchAll` configuration variable set.
- This overrides the configuration variable fetch.all`.
+ This overrides the configuration variable `fetch.all`.
-a::
--append::
diff --git a/Documentation/git-add.adoc b/Documentation/git-add.adoc
index b7a735824d..ad629c46c5 100644
--- a/Documentation/git-add.adoc
+++ b/Documentation/git-add.adoc
@@ -16,18 +16,18 @@ git add [--verbose | -v] [--dry-run | -n] [--force | -f] [--interactive | -i] [-
DESCRIPTION
-----------
-This command updates the index using the current content found in
-the working tree, to prepare the content staged for the next commit.
-It typically adds the current content of existing paths as a whole,
-but with some options it can also be used to add content with
-only part of the changes made to the working tree files applied, or
-remove paths that do not exist in the working tree anymore.
-
-The "index" holds a snapshot of the content of the working tree, and it
-is this snapshot that is taken as the contents of the next commit. Thus
-after making any changes to the working tree, and before running
-the commit command, you must use the `add` command to add any new or
-modified files to the index.
+Add contents of new or changed files to the index. The "index" (also
+known as the "staging area") is what you use to prepare the contents of
+the next commit.
+
+When you run `git commit` without any other arguments, it will only
+commit staged changes. For example, if you've edited `file.c` and want
+to commit your changes to that file, you can run:
+
+ git add file.c
+ git commit
+
+You can also add only part of your changes to a file with `git add -p`.
This command can be performed multiple times before a commit. It only
adds the content of the specified file(s) at the time the add command is
@@ -37,12 +37,10 @@ you must run `git add` again to add the new content to the index.
The `git status` command can be used to obtain a summary of which
files have changes that are staged for the next commit.
-The `git add` command will not add ignored files by default. If any
-ignored files were explicitly specified on the command line, `git add`
-will fail with a list of ignored files. Ignored files reached by
-directory recursion or filename globbing performed by Git (quote your
-globs before the shell) will be silently ignored. The `git add` command can
-be used to add ignored files with the `-f` (force) option.
+The `git add` command will not add ignored files by default. You can
+use the `--force` option to add ignored files. If you specify the exact
+filename of an ignored file, `git add` will fail with a list of ignored
+files. Otherwise it will silently ignore the file.
Please see linkgit:git-commit[1] for alternative ways to add content to a
commit.
diff --git a/Documentation/git-fast-import.adoc b/Documentation/git-fast-import.adoc
index 3144ffcdb6..6e095b02a1 100644
--- a/Documentation/git-fast-import.adoc
+++ b/Documentation/git-fast-import.adoc
@@ -61,10 +61,10 @@ OPTIONS
currently impacts only the `export-marks`, `import-marks`, and
`import-marks-if-exists` feature commands.
+
- Only enable this option if you trust the program generating the
- fast-import stream! This option is enabled automatically for
- remote-helpers that use the `import` capability, as they are
- already trusted to run their own code.
+Only enable this option if you trust the program generating the
+fast-import stream! This option is enabled automatically for
+remote-helpers that use the `import` capability, as they are
+already trusted to run their own code.
Options for Frontends
~~~~~~~~~~~~~~~~~~~~~
@@ -647,7 +647,7 @@ External data format::
+
Here usually `<dataref>` must be either a mark reference (`:<idnum>`)
set by a prior `blob` command, or a full 40-byte SHA-1 of an
-existing Git blob object. If `<mode>` is `040000`` then
+existing Git blob object. If `<mode>` is `040000` then
`<dataref>` must be the full 40-byte SHA-1 of an existing
Git tree object or a mark reference set with `--import-marks`.
diff --git a/Documentation/git-last-modified.adoc b/Documentation/git-last-modified.adoc
new file mode 100644
index 0000000000..602843e095
--- /dev/null
+++ b/Documentation/git-last-modified.adoc
@@ -0,0 +1,54 @@
+git-last-modified(1)
+====================
+
+NAME
+----
+git-last-modified - EXPERIMENTAL: Show when files were last modified
+
+
+SYNOPSIS
+--------
+[synopsis]
+git last-modified [--recursive] [--show-trees] [<revision-range>] [[--] <path>...]
+
+DESCRIPTION
+-----------
+
+Shows which commit last modified each of the relevant files and subdirectories.
+A commit renaming a path, or changing it's mode is also taken into account.
+
+THIS COMMAND IS EXPERIMENTAL. THE BEHAVIOR MAY CHANGE.
+
+OPTIONS
+-------
+
+`-r`::
+`--recursive`::
+ Instead of showing tree entries, step into subtrees and show all entries
+ inside them recursively.
+
+`-t`::
+`--show-trees`::
+ Show tree entries even when recursing into them. It has no effect
+ without `--recursive`.
+
+`<revision-range>`::
+ Only traverse commits in the specified revision range. When no
+ `<revision-range>` is specified, it defaults to `HEAD` (i.e. the whole
+ history leading to the current commit). For a complete list of ways to
+ spell `<revision-range>`, see the 'Specifying Ranges' section of
+ linkgit:gitrevisions[7].
+
+`[--] <path>...`::
+ For each _<path>_ given, the commit which last modified it is returned.
+ Without an optional path parameter, all files and subdirectories
+ in path traversal the are included in the output.
+
+SEE ALSO
+--------
+linkgit:git-blame[1],
+linkgit:git-log[1].
+
+GIT
+---
+Part of the linkgit:git[1] suite
diff --git a/Documentation/git-multi-pack-index.adoc b/Documentation/git-multi-pack-index.adoc
index e8073bc272..2f642697e9 100644
--- a/Documentation/git-multi-pack-index.adoc
+++ b/Documentation/git-multi-pack-index.adoc
@@ -29,7 +29,7 @@ OPTIONS
--no-progress::
Turn progress on/off explicitly. If neither is specified, progress is
shown if standard error is connected to a terminal. Supported by
- sub-commands `write`, `verify`, `expire`, and `repack.
+ sub-commands `write`, `verify`, `expire`, and `repack`.
The following subcommands are available:
diff --git a/Documentation/git-refs.adoc b/Documentation/git-refs.adoc
index d462953fb5..bfa9b3ea2d 100644
--- a/Documentation/git-refs.adoc
+++ b/Documentation/git-refs.adoc
@@ -18,6 +18,7 @@ git refs list [--count=<count>] [--shell|--perl|--python|--tcl]
[--contains[=<object>]] [--no-contains[=<object>]]
[(--exclude=<pattern>)...] [--start-after=<marker>]
[ --stdin | (<pattern>...)]
+git refs exists <ref>
DESCRIPTION
-----------
@@ -38,6 +39,12 @@ list::
formatting, and sorting. This subcommand is an alias for
linkgit:git-for-each-ref[1] and offers identical functionality.
+exists::
+ Check whether the given reference exists. Returns an exit code of 0 if
+ it does, 2 if it is missing, and 1 in case looking up the reference
+ failed with an error other than the reference being missing. This does
+ not verify whether the reference resolves to an actual object.
+
OPTIONS
-------
diff --git a/Documentation/git-repo.adoc b/Documentation/git-repo.adoc
index 2870828d93..209afd1b61 100644
--- a/Documentation/git-repo.adoc
+++ b/Documentation/git-repo.adoc
@@ -8,7 +8,7 @@ git-repo - Retrieve information about the repository
SYNOPSIS
--------
[synopsis]
-git repo info [--format=(keyvalue|nul)] [<key>...]
+git repo info [--format=(keyvalue|nul)] [-z] [<key>...]
DESCRIPTION
-----------
@@ -18,7 +18,7 @@ THIS COMMAND IS EXPERIMENTAL. THE BEHAVIOR MAY CHANGE.
COMMANDS
--------
-`info [--format=(keyvalue|nul)] [<key>...]`::
+`info [--format=(keyvalue|nul)] [-z] [<key>...]`::
Retrieve metadata-related information about the current repository. Only
the requested data will be returned based on their keys (see "INFO KEYS"
section below).
@@ -40,6 +40,8 @@ supported:
between the key and the value and using a NUL character after each value.
This format is better suited for being parsed by another applications than
`keyvalue`. Unlike in the `keyvalue` format, the values are never quoted.
++
+`-z` is an alias for `--format=nul`.
INFO KEYS
---------
@@ -53,6 +55,9 @@ values that they return:
`layout.shallow`::
`true` if this is a shallow repository, otherwise `false`.
+`object.format`::
+ The object format (hash algorithm) used in the repository.
+
`references.format`::
The reference storage format. The valid values are:
+
diff --git a/Documentation/git-send-email.adoc b/Documentation/git-send-email.adoc
index 11b1ab1a07..263b977353 100644
--- a/Documentation/git-send-email.adoc
+++ b/Documentation/git-send-email.adoc
@@ -300,6 +300,32 @@ must be used for each option.
commands and replies will be printed. Useful to debug TLS
connection and authentication problems.
+--imap-sent-folder=<folder>::
+ Some email providers (e.g. iCloud) do not send a copy of the emails sent
+ using SMTP to the `Sent` folder or similar in your mailbox. Use this option
+ to use `git imap-send` to send a copy of the emails to the folder specified
+ using this option. You can run `git imap-send --list` to get a list of
+ valid folder names, including the correct name of the `Sent` folder in
+ your mailbox. You can also use this option to send emails to a dedicated
+ IMAP folder of your choice.
++
+This feature requires setting up `git imap-send`. See linkgit:git-imap-send[1]
+for instructions.
+
+--use-imap-only::
+--no-use-imap-only::
+ If this is set, all emails will only be copied to the IMAP folder specified
+ with `--imap-sent-folder` or `sendemail.imapSentFolder` and will not be sent
+ to the recipients. Useful if you just want to create a draft of the emails
+ and use another email client to send them.
+ If disabled with `--no-use-imap-only`, the emails will be sent like usual.
+ Disabled by default, but the `sendemail.useImapOnly` configuration
+ variable can be used to enable it.
+
++
+This feature requires setting up `git imap-send`. See linkgit:git-imap-send[1]
+for instructions.
+
--batch-size=<num>::
Some email servers (e.g. 'smtp.163.com') limit the number of emails to be
sent per session (connection) and this will lead to a failure when
@@ -531,10 +557,10 @@ edit `~/.gitconfig` to specify your account settings:
----
[sendemail]
- smtpEncryption = tls
+ smtpEncryption = ssl
smtpServer = smtp.gmail.com
smtpUser = yourname@gmail.com
- smtpServerPort = 587
+ smtpServerPort = 465
----
Gmail does not allow using your regular password for `git send-email`.
@@ -552,10 +578,10 @@ if you want to use `OAUTHBEARER`, edit your `~/.gitconfig` file and add
----
[sendemail]
- smtpEncryption = tls
+ smtpEncryption = ssl
smtpServer = smtp.gmail.com
smtpUser = yourname@gmail.com
- smtpServerPort = 587
+ smtpServerPort = 465
smtpAuth = OAUTHBEARER
----
diff --git a/Documentation/gitprotocol-v2.adoc b/Documentation/gitprotocol-v2.adoc
index 9a57005d77..c7db103299 100644
--- a/Documentation/gitprotocol-v2.adoc
+++ b/Documentation/gitprotocol-v2.adoc
@@ -785,33 +785,64 @@ retrieving the header from a bundle at the indicated URI, and thus
save themselves and the server(s) the request(s) needed to inspect the
headers of that bundle or bundles.
-promisor-remote=<pr-infos>
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+promisor-remote=<pr-info>
+~~~~~~~~~~~~~~~~~~~~~~~~~
The server may advertise some promisor remotes it is using or knows
about to a client which may want to use them as its promisor remotes,
-instead of this repository. In this case <pr-infos> should be of the
+instead of this repository. In this case <pr-info> should be of the
form:
- pr-infos = pr-info | pr-infos ";" pr-info
+ pr-info = pr-fields | pr-info ";" pr-fields
- pr-info = "name=" pr-name | "name=" pr-name "," "url=" pr-url
+ pr-fields = pr-field | pr-fields "," pr-field
-where `pr-name` is the urlencoded name of a promisor remote, and
-`pr-url` the urlencoded URL of that promisor remote.
+ pr-field = field-name "=" field-value
-In this case, if the client decides to use one or more promisor
-remotes the server advertised, it can reply with
-"promisor-remote=<pr-names>" where <pr-names> should be of the form:
+where all the `field-name` and `field-value` in a given `pr-fields`
+are field names and values related to a single promisor remote. A
+given `field-name` MUST NOT appear more than once in given
+`pr-fields`.
+
+The server MUST advertise at least the "name" and "url" field names
+along with the associated field values, which are the name of a valid
+remote and its URL, in each `pr-fields`. The "name" and "url" fields
+MUST appear first in each pr-fields, in that order.
+
+After these mandatory fields, the server MAY advertise the following
+optional fields in any order:
+
+`partialCloneFilter`:: The filter specification used by the remote.
+Clients can use this to determine if the remote's filtering strategy
+is compatible with their needs (e.g., checking if both use "blob:none").
+It corresponds to the "remote.<name>.partialCloneFilter" config setting.
+
+`token`:: An authentication token that clients can use when
+connecting to the remote. It corresponds to the "remote.<name>.token"
+config setting.
+
+No other fields are defined by the protocol at this time. Field names
+are case-sensitive and MUST be transmitted exactly as specified
+above. Clients MUST ignore fields they don't recognize to allow for
+future protocol extensions.
+
+For now, the client can only use information transmitted through these
+fields to decide if it accepts the advertised promisor remote. In the
+future that information might be used for other purposes though.
+
+Field values MUST be urlencoded.
+
+If the client decides to use one or more promisor remotes the server
+advertised, it can reply with "promisor-remote=<pr-names>" where
+<pr-names> should be of the form:
pr-names = pr-name | pr-names ";" pr-name
where `pr-name` is the urlencoded name of a promisor remote the server
advertised and the client accepts.
-Note that, everywhere in this document, `pr-name` MUST be a valid
-remote name, and the ';' and ',' characters MUST be encoded if they
-appear in `pr-name` or `pr-url`.
+Note that, everywhere in this document, the ';' and ',' characters
+MUST be encoded if they appear in `pr-name` or `field-value`.
If the server doesn't know any promisor remote that could be good for
a client to use, or prefers a client not to use any promisor remote it
@@ -822,9 +853,10 @@ In this case, or if the client doesn't want to use any promisor remote
the server advertised, the client shouldn't advertise the
"promisor-remote" capability at all in its reply.
-The "promisor.advertise" and "promisor.acceptFromServer" configuration
-options can be used on the server and client side to control what they
-advertise or accept respectively. See the documentation of these
+On the server side, the "promisor.advertise" and "promisor.sendFields"
+configuration options can be used to control what it advertises. On
+the client side, the "promisor.acceptFromServer" configuration option
+can be used to control what it accepts. See the documentation of these
configuration options for more information.
Note that in the future it would be nice if the "promisor-remote"
diff --git a/Documentation/meson.build b/Documentation/meson.build
index 41f43e0336..e34965c5b0 100644
--- a/Documentation/meson.build
+++ b/Documentation/meson.build
@@ -74,6 +74,7 @@ manpages = {
'git-init.adoc' : 1,
'git-instaweb.adoc' : 1,
'git-interpret-trailers.adoc' : 1,
+ 'git-last-modified.adoc' : 1,
'git-log.adoc' : 1,
'git-ls-files.adoc' : 1,
'git-ls-remote.adoc' : 1,
diff --git a/Makefile b/Makefile
index 555b7f4dc3..4c95affadb 100644
--- a/Makefile
+++ b/Makefile
@@ -1265,6 +1265,7 @@ BUILTIN_OBJS += builtin/hook.o
BUILTIN_OBJS += builtin/index-pack.o
BUILTIN_OBJS += builtin/init-db.o
BUILTIN_OBJS += builtin/interpret-trailers.o
+BUILTIN_OBJS += builtin/last-modified.o
BUILTIN_OBJS += builtin/log.o
BUILTIN_OBJS += builtin/ls-files.o
BUILTIN_OBJS += builtin/ls-remote.o
@@ -3945,13 +3946,12 @@ unit-tests: $(UNIT_TEST_PROGS) $(CLAR_TEST_PROG) t/helper/test-tool$X
$(MAKE) -C t/ unit-tests
.PHONY: libgit-sys libgit-rs
-libgit-sys libgit-rs:
- $(QUIET)(\
- cd contrib/$@ && \
- cargo build \
- )
+libgit-sys:
+ $(QUIET)cargo build --manifest-path contrib/libgit-sys/Cargo.toml
+libgit-rs: libgit-sys
+ $(QUIET)cargo build --manifest-path contrib/libgit-rs/Cargo.toml
ifdef INCLUDE_LIBGIT_RS
-all:: libgit-sys libgit-rs
+all:: libgit-rs
endif
LIBGIT_PUB_OBJS += contrib/libgit-sys/public_symbol_export.o
diff --git a/add-interactive.c b/add-interactive.c
index 3e692b47ec..4604c69140 100644
--- a/add-interactive.c
+++ b/add-interactive.c
@@ -20,14 +20,14 @@
#include "prompt.h"
#include "tree.h"
-static void init_color(struct repository *r, struct add_i_state *s,
+static void init_color(struct repository *r, int use_color,
const char *section_and_slot, char *dst,
const char *default_color)
{
char *key = xstrfmt("color.%s", section_and_slot);
const char *value;
- if (!s->use_color)
+ if (!use_color)
dst[0] = '\0';
else if (repo_config_get_value(r, key, &value) ||
color_parse(value, dst))
@@ -36,42 +36,63 @@ static void init_color(struct repository *r, struct add_i_state *s,
free(key);
}
-void init_add_i_state(struct add_i_state *s, struct repository *r,
- struct add_p_opt *add_p_opt)
+static int check_color_config(struct repository *r, const char *var)
{
const char *value;
+ int ret;
+
+ if (repo_config_get_value(r, var, &value))
+ ret = -1;
+ else
+ ret = git_config_colorbool(var, value);
+
+ /*
+ * Do not rely on want_color() to fall back to color.ui for us. It uses
+ * the value parsed by git_color_config(), which may not have been
+ * called by the main command.
+ */
+ if (ret < 0 && !repo_config_get_value(r, "color.ui", &value))
+ ret = git_config_colorbool("color.ui", value);
+ return want_color(ret);
+}
+
+void init_add_i_state(struct add_i_state *s, struct repository *r,
+ struct add_p_opt *add_p_opt)
+{
s->r = r;
s->context = -1;
s->interhunkcontext = -1;
- if (repo_config_get_value(r, "color.interactive", &value))
- s->use_color = -1;
- else
- s->use_color =
- git_config_colorbool("color.interactive", value);
- s->use_color = want_color(s->use_color);
-
- init_color(r, s, "interactive.header", s->header_color, GIT_COLOR_BOLD);
- init_color(r, s, "interactive.help", s->help_color, GIT_COLOR_BOLD_RED);
- init_color(r, s, "interactive.prompt", s->prompt_color,
- GIT_COLOR_BOLD_BLUE);
- init_color(r, s, "interactive.error", s->error_color,
- GIT_COLOR_BOLD_RED);
-
- init_color(r, s, "diff.frag", s->fraginfo_color,
- diff_get_color(s->use_color, DIFF_FRAGINFO));
- init_color(r, s, "diff.context", s->context_color, "fall back");
+ s->use_color_interactive = check_color_config(r, "color.interactive");
+
+ init_color(r, s->use_color_interactive, "interactive.header",
+ s->header_color, GIT_COLOR_BOLD);
+ init_color(r, s->use_color_interactive, "interactive.help",
+ s->help_color, GIT_COLOR_BOLD_RED);
+ init_color(r, s->use_color_interactive, "interactive.prompt",
+ s->prompt_color, GIT_COLOR_BOLD_BLUE);
+ init_color(r, s->use_color_interactive, "interactive.error",
+ s->error_color, GIT_COLOR_BOLD_RED);
+ strlcpy(s->reset_color_interactive,
+ s->use_color_interactive ? GIT_COLOR_RESET : "", COLOR_MAXLEN);
+
+ s->use_color_diff = check_color_config(r, "color.diff");
+
+ init_color(r, s->use_color_diff, "diff.frag", s->fraginfo_color,
+ diff_get_color(s->use_color_diff, DIFF_FRAGINFO));
+ init_color(r, s->use_color_diff, "diff.context", s->context_color,
+ "fall back");
if (!strcmp(s->context_color, "fall back"))
- init_color(r, s, "diff.plain", s->context_color,
- diff_get_color(s->use_color, DIFF_CONTEXT));
- init_color(r, s, "diff.old", s->file_old_color,
- diff_get_color(s->use_color, DIFF_FILE_OLD));
- init_color(r, s, "diff.new", s->file_new_color,
- diff_get_color(s->use_color, DIFF_FILE_NEW));
-
- strlcpy(s->reset_color,
- s->use_color ? GIT_COLOR_RESET : "", COLOR_MAXLEN);
+ init_color(r, s->use_color_diff, "diff.plain",
+ s->context_color,
+ diff_get_color(s->use_color_diff, DIFF_CONTEXT));
+ init_color(r, s->use_color_diff, "diff.old", s->file_old_color,
+ diff_get_color(s->use_color_diff, DIFF_FILE_OLD));
+ init_color(r, s->use_color_diff, "diff.new", s->file_new_color,
+ diff_get_color(s->use_color_diff, DIFF_FILE_NEW));
+ strlcpy(s->reset_color_diff,
+ s->use_color_diff ? GIT_COLOR_RESET : "", COLOR_MAXLEN);
FREE_AND_NULL(s->interactive_diff_filter);
repo_config_get_string(r, "interactive.difffilter",
@@ -109,7 +130,8 @@ void clear_add_i_state(struct add_i_state *s)
FREE_AND_NULL(s->interactive_diff_filter);
FREE_AND_NULL(s->interactive_diff_algorithm);
memset(s, 0, sizeof(*s));
- s->use_color = -1;
+ s->use_color_interactive = -1;
+ s->use_color_diff = -1;
}
/*
@@ -1188,9 +1210,9 @@ int run_add_i(struct repository *r, const struct pathspec *ps,
* When color was asked for, use the prompt color for
* highlighting, otherwise use square brackets.
*/
- if (s.use_color) {
+ if (s.use_color_interactive) {
data.color = s.prompt_color;
- data.reset = s.reset_color;
+ data.reset = s.reset_color_interactive;
}
print_file_item_data.color = data.color;
print_file_item_data.reset = data.reset;
diff --git a/add-interactive.h b/add-interactive.h
index 4213dcd67b..ceadfa6bb6 100644
--- a/add-interactive.h
+++ b/add-interactive.h
@@ -12,16 +12,19 @@ struct add_p_opt {
struct add_i_state {
struct repository *r;
- int use_color;
+ int use_color_interactive;
+ int use_color_diff;
char header_color[COLOR_MAXLEN];
char help_color[COLOR_MAXLEN];
char prompt_color[COLOR_MAXLEN];
char error_color[COLOR_MAXLEN];
- char reset_color[COLOR_MAXLEN];
+ char reset_color_interactive[COLOR_MAXLEN];
+
char fraginfo_color[COLOR_MAXLEN];
char context_color[COLOR_MAXLEN];
char file_old_color[COLOR_MAXLEN];
char file_new_color[COLOR_MAXLEN];
+ char reset_color_diff[COLOR_MAXLEN];
int use_single_key;
char *interactive_diff_filter, *interactive_diff_algorithm;
diff --git a/add-patch.c b/add-patch.c
index 302e6ba7d9..b0389c5d5b 100644
--- a/add-patch.c
+++ b/add-patch.c
@@ -300,7 +300,7 @@ static void err(struct add_p_state *s, const char *fmt, ...)
va_start(args, fmt);
fputs(s->s.error_color, stdout);
vprintf(fmt, args);
- puts(s->s.reset_color);
+ puts(s->s.reset_color_interactive);
va_end(args);
}
@@ -457,7 +457,7 @@ static int parse_diff(struct add_p_state *s, const struct pathspec *ps)
}
strbuf_complete_line(plain);
- if (want_color_fd(1, -1)) {
+ if (want_color_fd(1, s->s.use_color_diff)) {
struct child_process colored_cp = CHILD_PROCESS_INIT;
const char *diff_filter = s->s.interactive_diff_filter;
@@ -714,7 +714,7 @@ static void render_hunk(struct add_p_state *s, struct hunk *hunk,
if (len)
strbuf_add(out, p, len);
else if (colored)
- strbuf_addf(out, "%s\n", s->s.reset_color);
+ strbuf_addf(out, "%s\n", s->s.reset_color_diff);
else
strbuf_addch(out, '\n');
}
@@ -1107,7 +1107,7 @@ static void recolor_hunk(struct add_p_state *s, struct hunk *hunk)
s->s.file_new_color :
s->s.context_color);
strbuf_add(&s->colored, plain + current, eol - current);
- strbuf_addstr(&s->colored, s->s.reset_color);
+ strbuf_addstr(&s->colored, s->s.reset_color_diff);
if (next > eol)
strbuf_add(&s->colored, plain + eol, next - eol);
current = next;
@@ -1528,8 +1528,8 @@ static int patch_update_file(struct add_p_state *s,
: 1));
printf(_(s->mode->prompt_mode[prompt_mode_type]),
s->buf.buf);
- if (*s->s.reset_color)
- fputs(s->s.reset_color, stdout);
+ if (*s->s.reset_color_interactive)
+ fputs(s->s.reset_color_interactive, stdout);
fflush(stdout);
if (read_single_character(s) == EOF)
break;
diff --git a/alloc.c b/alloc.c
index 377e80f5dd..533a045c2a 100644
--- a/alloc.c
+++ b/alloc.c
@@ -36,19 +36,25 @@ struct alloc_state {
int slab_nr, slab_alloc;
};
-struct alloc_state *allocate_alloc_state(void)
+struct alloc_state *alloc_state_alloc(void)
{
return xcalloc(1, sizeof(struct alloc_state));
}
-void clear_alloc_state(struct alloc_state *s)
+void alloc_state_free_and_null(struct alloc_state **s_)
{
+ struct alloc_state *s = *s_;
+
+ if (!s)
+ return;
+
while (s->slab_nr > 0) {
s->slab_nr--;
free(s->slabs[s->slab_nr]);
}
FREE_AND_NULL(s->slabs);
+ FREE_AND_NULL(*s_);
}
static inline void *alloc_node(struct alloc_state *s, size_t node_size)
diff --git a/alloc.h b/alloc.h
index 3f4a0ad310..87a47a9709 100644
--- a/alloc.h
+++ b/alloc.h
@@ -14,7 +14,7 @@ void *alloc_commit_node(struct repository *r);
void *alloc_tag_node(struct repository *r);
void *alloc_object_node(struct repository *r);
-struct alloc_state *allocate_alloc_state(void);
-void clear_alloc_state(struct alloc_state *s);
+struct alloc_state *alloc_state_alloc(void);
+void alloc_state_free_and_null(struct alloc_state **s_);
#endif
diff --git a/builtin.h b/builtin.h
index e6458e6fb9..1b35565fbd 100644
--- a/builtin.h
+++ b/builtin.h
@@ -176,6 +176,7 @@ int cmd_hook(int argc, const char **argv, const char *prefix, struct repository
int cmd_index_pack(int argc, const char **argv, const char *prefix, struct repository *repo);
int cmd_init_db(int argc, const char **argv, const char *prefix, struct repository *repo);
int cmd_interpret_trailers(int argc, const char **argv, const char *prefix, struct repository *repo);
+int cmd_last_modified(int argc, const char **argv, const char *prefix, struct repository *repo);
int cmd_log_reflog(int argc, const char **argv, const char *prefix, struct repository *repo);
int cmd_log(int argc, const char **argv, const char *prefix, struct repository *repo);
int cmd_ls_files(int argc, const char **argv, const char *prefix, struct repository *repo);
diff --git a/builtin/add.c b/builtin/add.c
index 0235854f80..740c7c4581 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -389,6 +389,7 @@ int cmd_add(int argc,
char *seen = NULL;
char *ps_matched = NULL;
struct lock_file lock_file = LOCK_INIT;
+ struct odb_transaction *transaction;
repo_config(repo, add_config, NULL);
@@ -574,7 +575,7 @@ int cmd_add(int argc,
string_list_clear(&only_match_skip_worktree, 0);
}
- begin_odb_transaction();
+ transaction = begin_odb_transaction(repo->objects);
ps_matched = xcalloc(pathspec.nr, 1);
if (add_renormalize)
@@ -593,7 +594,7 @@ int cmd_add(int argc,
if (chmod_arg && pathspec.nr)
exit_status |= chmod_pathspec(repo, &pathspec, chmod_arg[0], show_only);
- end_odb_transaction();
+ end_odb_transaction(transaction);
finish:
if (write_locked_index(repo->index, &lock_file,
diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c
index 6656187f90..fe3ebaadad 100644
--- a/builtin/commit-graph.c
+++ b/builtin/commit-graph.c
@@ -102,7 +102,7 @@ static int graph_verify(int argc, const char **argv, const char *prefix,
if (opts.progress)
flags |= COMMIT_GRAPH_WRITE_PROGRESS;
- source = odb_find_source(the_repository->objects, opts.obj_dir);
+ source = odb_find_source_or_die(the_repository->objects, opts.obj_dir);
graph_name = get_commit_graph_filename(source);
chain_name = get_commit_graph_chain_filename(source);
if (open_commit_graph(graph_name, &fd, &st))
@@ -291,7 +291,7 @@ static int graph_write(int argc, const char **argv, const char *prefix,
git_env_bool(GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS, 0))
flags |= COMMIT_GRAPH_WRITE_BLOOM_FILTERS;
- source = odb_find_source(the_repository->objects, opts.obj_dir);
+ source = odb_find_source_or_die(the_repository->objects, opts.obj_dir);
if (opts.reachable) {
if (write_commit_graph_reachable(source, flags, &write_opts))
diff --git a/builtin/commit.c b/builtin/commit.c
index 8a5dee384d..384d0b4e93 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -695,6 +695,7 @@ static int author_date_is_interesting(void)
return author_message || force_date;
}
+#ifndef WITH_BREAKING_CHANGES
static void adjust_comment_line_char(const struct strbuf *sb)
{
char candidates[] = "#;@!$%^&|:";
@@ -732,6 +733,7 @@ static void adjust_comment_line_char(const struct strbuf *sb)
free(comment_line_str_to_free);
comment_line_str = comment_line_str_to_free = xstrfmt("%c", *p);
}
+#endif /* !WITH_BREAKING_CHANGES */
static void prepare_amend_commit(struct commit *commit, struct strbuf *sb,
struct pretty_print_context *ctx)
@@ -928,8 +930,10 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
if (fwrite(sb.buf, 1, sb.len, s->fp) < sb.len)
die_errno(_("could not write commit template"));
+#ifndef WITH_BREAKING_CHANGES
if (auto_comment_line_char)
adjust_comment_line_char(&sb);
+#endif /* !WITH_BREAKING_CHANGES */
strbuf_release(&sb);
/* This checks if committer ident is explicitly given */
@@ -1793,6 +1797,9 @@ int cmd_commit(int argc,
show_usage_with_options_if_asked(argc, argv,
builtin_commit_usage, builtin_commit_options);
+#ifndef WITH_BREAKING_CHANGES
+ warn_on_auto_comment_char = true;
+#endif /* !WITH_BREAKING_CHANGES */
prepare_repo_settings(the_repository);
the_repository->settings.command_requires_full_index = 0;
diff --git a/builtin/describe.c b/builtin/describe.c
index fbe78ace66..9f4e26d7ff 100644
--- a/builtin/describe.c
+++ b/builtin/describe.c
@@ -24,6 +24,7 @@
#include "commit-slab.h"
#include "wildmatch.h"
#include "prio-queue.h"
+#include "oidset.h"
#define MAX_TAGS (FLAG_BITS - 1)
#define DEFAULT_CANDIDATES 10
@@ -286,38 +287,47 @@ static void lazy_queue_clear(struct lazy_queue *queue)
queue->get_pending = false;
}
-static bool all_have_flag(const struct lazy_queue *queue, unsigned flag)
+static unsigned long finish_depth_computation(struct lazy_queue *queue,
+ struct possible_tag *best)
{
+ unsigned long seen_commits = 0;
+ struct oidset unflagged = OIDSET_INIT;
+
for (size_t i = queue->get_pending ? 1 : 0; i < queue->queue.nr; i++) {
struct commit *commit = queue->queue.array[i].data;
- if (!(commit->object.flags & flag))
- return false;
+ if (!(commit->object.flags & best->flag_within))
+ oidset_insert(&unflagged, &commit->object.oid);
}
- return true;
-}
-static unsigned long finish_depth_computation(struct lazy_queue *queue,
- struct possible_tag *best)
-{
- unsigned long seen_commits = 0;
while (!lazy_queue_empty(queue)) {
struct commit *c = lazy_queue_get(queue);
struct commit_list *parents = c->parents;
seen_commits++;
if (c->object.flags & best->flag_within) {
- if (all_have_flag(queue, best->flag_within))
+ if (!oidset_size(&unflagged))
break;
- } else
+ } else {
+ oidset_remove(&unflagged, &c->object.oid);
best->depth++;
+ }
while (parents) {
+ unsigned seen, flag_before, flag_after;
struct commit *p = parents->item;
repo_parse_commit(the_repository, p);
- if (!(p->object.flags & SEEN))
+ seen = p->object.flags & SEEN;
+ if (!seen)
lazy_queue_put(queue, p);
+ flag_before = p->object.flags & best->flag_within;
p->object.flags |= c->object.flags;
+ flag_after = p->object.flags & best->flag_within;
+ if (!seen && !flag_after)
+ oidset_insert(&unflagged, &p->object.oid);
+ if (seen && !flag_before && flag_after)
+ oidset_remove(&unflagged, &p->object.oid);
parents = parents->next;
}
}
+ oidset_clear(&unflagged);
return seen_commits;
}
diff --git a/builtin/last-modified.c b/builtin/last-modified.c
new file mode 100644
index 0000000000..886ba12cb5
--- /dev/null
+++ b/builtin/last-modified.c
@@ -0,0 +1,326 @@
+#include "git-compat-util.h"
+#include "bloom.h"
+#include "builtin.h"
+#include "commit-graph.h"
+#include "commit.h"
+#include "config.h"
+#include "environment.h"
+#include "diff.h"
+#include "diffcore.h"
+#include "environment.h"
+#include "hashmap.h"
+#include "hex.h"
+#include "log-tree.h"
+#include "object-name.h"
+#include "object.h"
+#include "parse-options.h"
+#include "quote.h"
+#include "repository.h"
+#include "revision.h"
+
+struct last_modified_entry {
+ struct hashmap_entry hashent;
+ struct object_id oid;
+ struct bloom_key key;
+ const char path[FLEX_ARRAY];
+};
+
+static int last_modified_entry_hashcmp(const void *unused UNUSED,
+ const struct hashmap_entry *hent1,
+ const struct hashmap_entry *hent2,
+ const void *path)
+{
+ const struct last_modified_entry *ent1 =
+ container_of(hent1, const struct last_modified_entry, hashent);
+ const struct last_modified_entry *ent2 =
+ container_of(hent2, const struct last_modified_entry, hashent);
+ return strcmp(ent1->path, path ? path : ent2->path);
+}
+
+struct last_modified {
+ struct hashmap paths;
+ struct rev_info rev;
+ bool recursive;
+ bool show_trees;
+};
+
+static void last_modified_release(struct last_modified *lm)
+{
+ struct hashmap_iter iter;
+ struct last_modified_entry *ent;
+
+ hashmap_for_each_entry(&lm->paths, &iter, ent, hashent)
+ bloom_key_clear(&ent->key);
+
+ hashmap_clear_and_free(&lm->paths, struct last_modified_entry, hashent);
+ release_revisions(&lm->rev);
+}
+
+struct last_modified_callback_data {
+ struct last_modified *lm;
+ struct commit *commit;
+};
+
+static void add_path_from_diff(struct diff_queue_struct *q,
+ struct diff_options *opt UNUSED, void *data)
+{
+ struct last_modified *lm = data;
+
+ for (int i = 0; i < q->nr; i++) {
+ struct diff_filepair *p = q->queue[i];
+ struct last_modified_entry *ent;
+ const char *path = p->two->path;
+
+ FLEX_ALLOC_STR(ent, path, path);
+ oidcpy(&ent->oid, &p->two->oid);
+ if (lm->rev.bloom_filter_settings)
+ bloom_key_fill(&ent->key, path, strlen(path),
+ lm->rev.bloom_filter_settings);
+ hashmap_entry_init(&ent->hashent, strhash(ent->path));
+ hashmap_add(&lm->paths, &ent->hashent);
+ }
+}
+
+static int populate_paths_from_revs(struct last_modified *lm)
+{
+ int num_interesting = 0;
+ struct diff_options diffopt;
+
+ /*
+ * Create a copy of `struct diff_options`. In this copy a callback is
+ * set that when called adds entries to `paths` in `struct last_modified`.
+ * This copy is used to diff the tree of the target revision against an
+ * empty tree. This results in all paths in the target revision being
+ * listed. After `paths` is populated, we don't need this copy no more.
+ */
+ memcpy(&diffopt, &lm->rev.diffopt, sizeof(diffopt));
+ copy_pathspec(&diffopt.pathspec, &lm->rev.diffopt.pathspec);
+ diffopt.output_format = DIFF_FORMAT_CALLBACK;
+ diffopt.format_callback = add_path_from_diff;
+ diffopt.format_callback_data = lm;
+
+ for (size_t i = 0; i < lm->rev.pending.nr; i++) {
+ struct object_array_entry *obj = lm->rev.pending.objects + i;
+
+ if (obj->item->flags & UNINTERESTING)
+ continue;
+
+ if (num_interesting++)
+ return error(_("last-modified can only operate on one tree at a time"));
+
+ diff_tree_oid(lm->rev.repo->hash_algo->empty_tree,
+ &obj->item->oid, "", &diffopt);
+ diff_flush(&diffopt);
+ }
+ clear_pathspec(&diffopt.pathspec);
+
+ return 0;
+}
+
+static void last_modified_emit(struct last_modified *lm,
+ const char *path, const struct commit *commit)
+
+{
+ if (commit->object.flags & BOUNDARY)
+ putchar('^');
+ printf("%s\t", oid_to_hex(&commit->object.oid));
+
+ if (lm->rev.diffopt.line_termination)
+ write_name_quoted(path, stdout, '\n');
+ else
+ printf("%s%c", path, '\0');
+}
+
+static void mark_path(const char *path, const struct object_id *oid,
+ struct last_modified_callback_data *data)
+{
+ struct last_modified_entry *ent;
+
+ /* Is it even a path that we are interested in? */
+ ent = hashmap_get_entry_from_hash(&data->lm->paths, strhash(path), path,
+ struct last_modified_entry, hashent);
+ if (!ent)
+ return;
+
+ /*
+ * Is it arriving at a version of interest, or is it from a side branch
+ * which did not contribute to the final state?
+ */
+ if (!oideq(oid, &ent->oid))
+ return;
+
+ last_modified_emit(data->lm, path, data->commit);
+
+ hashmap_remove(&data->lm->paths, &ent->hashent, path);
+ bloom_key_clear(&ent->key);
+ free(ent);
+}
+
+static void last_modified_diff(struct diff_queue_struct *q,
+ struct diff_options *opt UNUSED, void *cbdata)
+{
+ struct last_modified_callback_data *data = cbdata;
+
+ for (int i = 0; i < q->nr; i++) {
+ struct diff_filepair *p = q->queue[i];
+ switch (p->status) {
+ case DIFF_STATUS_DELETED:
+ /*
+ * There's no point in feeding a deletion, as it could
+ * not have resulted in our current state, which
+ * actually has the file.
+ */
+ break;
+
+ default:
+ /*
+ * Otherwise, we care only that we somehow arrived at
+ * a final oid state. Note that this covers some
+ * potentially controversial areas, including:
+ *
+ * 1. A rename or copy will be found, as it is the
+ * first time the content has arrived at the given
+ * path.
+ *
+ * 2. Even a non-content modification like a mode or
+ * type change will trigger it.
+ *
+ * We take the inclusive approach for now, and find
+ * anything which impacts the path. Options to tweak
+ * the behavior (e.g., to "--follow" the content across
+ * renames) can come later.
+ */
+ mark_path(p->two->path, &p->two->oid, data);
+ break;
+ }
+ }
+}
+
+static bool maybe_changed_path(struct last_modified *lm, struct commit *origin)
+{
+ struct bloom_filter *filter;
+ struct last_modified_entry *ent;
+ struct hashmap_iter iter;
+
+ if (!lm->rev.bloom_filter_settings)
+ return true;
+
+ if (commit_graph_generation(origin) == GENERATION_NUMBER_INFINITY)
+ return true;
+
+ filter = get_bloom_filter(lm->rev.repo, origin);
+ if (!filter)
+ return true;
+
+ hashmap_for_each_entry(&lm->paths, &iter, ent, hashent) {
+ if (bloom_filter_contains(filter, &ent->key,
+ lm->rev.bloom_filter_settings))
+ return true;
+ }
+ return false;
+}
+
+static int last_modified_run(struct last_modified *lm)
+{
+ struct last_modified_callback_data data = { .lm = lm };
+
+ lm->rev.diffopt.output_format = DIFF_FORMAT_CALLBACK;
+ lm->rev.diffopt.format_callback = last_modified_diff;
+ lm->rev.diffopt.format_callback_data = &data;
+
+ prepare_revision_walk(&lm->rev);
+
+ while (hashmap_get_size(&lm->paths)) {
+ data.commit = get_revision(&lm->rev);
+ if (!data.commit)
+ BUG("paths remaining beyond boundary in last-modified");
+
+ if (data.commit->object.flags & BOUNDARY) {
+ diff_tree_oid(lm->rev.repo->hash_algo->empty_tree,
+ &data.commit->object.oid, "",
+ &lm->rev.diffopt);
+ diff_flush(&lm->rev.diffopt);
+
+ break;
+ }
+
+ if (!maybe_changed_path(lm, data.commit))
+ continue;
+
+ log_tree_commit(&lm->rev, data.commit);
+ }
+
+ return 0;
+}
+
+static int last_modified_init(struct last_modified *lm, struct repository *r,
+ const char *prefix, int argc, const char **argv)
+{
+ hashmap_init(&lm->paths, last_modified_entry_hashcmp, NULL, 0);
+
+ repo_init_revisions(r, &lm->rev, prefix);
+ lm->rev.def = "HEAD";
+ lm->rev.combine_merges = 1;
+ lm->rev.show_root_diff = 1;
+ lm->rev.boundary = 1;
+ lm->rev.no_commit_id = 1;
+ lm->rev.diff = 1;
+ lm->rev.diffopt.flags.recursive = lm->recursive;
+ lm->rev.diffopt.flags.tree_in_recursive = lm->show_trees;
+
+ argc = setup_revisions(argc, argv, &lm->rev, NULL);
+ if (argc > 1) {
+ error(_("unknown last-modified argument: %s"), argv[1]);
+ return argc;
+ }
+
+ lm->rev.bloom_filter_settings = get_bloom_filter_settings(lm->rev.repo);
+
+ if (populate_paths_from_revs(lm) < 0)
+ return error(_("unable to setup last-modified"));
+
+ return 0;
+}
+
+int cmd_last_modified(int argc, const char **argv, const char *prefix,
+ struct repository *repo)
+{
+ int ret;
+ struct last_modified lm = { 0 };
+
+ const char * const last_modified_usage[] = {
+ N_("git last-modified [--recursive] [--show-trees] "
+ "[<revision-range>] [[--] <path>...]"),
+ NULL
+ };
+
+ struct option last_modified_options[] = {
+ OPT_BOOL('r', "recursive", &lm.recursive,
+ N_("recurse into subtrees")),
+ OPT_BOOL('t', "show-trees", &lm.show_trees,
+ N_("show tree entries when recursing into subtrees")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, last_modified_options,
+ last_modified_usage,
+ PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT);
+
+ repo_config(repo, git_default_config, NULL);
+
+ ret = last_modified_init(&lm, repo, prefix, argc, argv);
+ if (ret > 0)
+ usage_with_options(last_modified_usage,
+ last_modified_options);
+ if (ret)
+ goto out;
+
+ ret = last_modified_run(&lm);
+ if (ret)
+ goto out;
+
+out:
+ last_modified_release(&lm);
+
+ return ret;
+}
diff --git a/builtin/log.c b/builtin/log.c
index c2f8bbf863..5f552d14c0 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -1404,6 +1404,7 @@ static void make_cover_letter(struct rev_info *rev, int use_separate_file,
struct range_diff_options range_diff_opts = {
.creation_factor = rev->creation_factor,
.dual_color = 1,
+ .max_memory = RANGE_DIFF_MAX_MEMORY_DEFAULT,
.diffopt = &opts,
.other_arg = &other_arg
};
diff --git a/builtin/ls-files.c b/builtin/ls-files.c
index c06a6f33e4..b148607f7a 100644
--- a/builtin/ls-files.c
+++ b/builtin/ls-files.c
@@ -414,14 +414,21 @@ static void show_files(struct repository *repo, struct dir_struct *dir)
if (!(show_cached || show_stage || show_deleted || show_modified))
return;
- if (!show_sparse_dirs)
- ensure_full_index(repo->index);
-
for (i = 0; i < repo->index->cache_nr; i++) {
const struct cache_entry *ce = repo->index->cache[i];
struct stat st;
int stat_err;
+ if (S_ISSPARSEDIR(ce->ce_mode) && !show_sparse_dirs) {
+ /*
+ * This is the first time we've hit a sparse dir,
+ * so expansion will leave the first 'i' entries
+ * alone.
+ */
+ ensure_full_index(repo->index);
+ ce = repo->index->cache[i];
+ }
+
construct_fullname(&fullname, repo, ce);
if ((dir->flags & DIR_SHOW_IGNORED) &&
diff --git a/builtin/ls-tree.c b/builtin/ls-tree.c
index 5d55731ca3..ec6940fc7c 100644
--- a/builtin/ls-tree.c
+++ b/builtin/ls-tree.c
@@ -373,7 +373,6 @@ int cmd_ls_tree(int argc,
OPT_END()
};
struct ls_tree_cmdmode_to_fmt *m2f = ls_tree_cmdmode_format;
- struct object_context obj_context = {0};
int ret;
repo_config(the_repository, git_default_config, NULL);
@@ -405,9 +404,8 @@ int cmd_ls_tree(int argc,
ls_tree_usage, ls_tree_options);
if (argc < 1)
usage_with_options(ls_tree_usage, ls_tree_options);
- if (get_oid_with_context(the_repository, argv[0],
- GET_OID_HASH_ANY, &oid,
- &obj_context))
+ if (repo_get_oid_with_flags(the_repository, argv[0], &oid,
+ GET_OID_HASH_ANY))
die("Not a valid object name %s", argv[0]);
/*
@@ -447,6 +445,5 @@ int cmd_ls_tree(int argc,
ret = !!read_tree(the_repository, tree, &options.pathspec, fn, &options);
clear_pathspec(&options.pathspec);
- object_context_release(&obj_context);
return ret;
}
diff --git a/builtin/merge.c b/builtin/merge.c
index b235af730a..c421a11b0b 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -1379,6 +1379,9 @@ int cmd_merge(int argc,
show_usage_with_options_if_asked(argc, argv,
builtin_merge_usage, builtin_merge_options);
+#ifndef WITH_BREAKING_CHANGES
+ warn_on_auto_comment_char = true;
+#endif /* !WITH_BREAKING_CHANGES */
prepare_repo_settings(the_repository);
the_repository->settings.command_requires_full_index = 0;
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
index d3b9e98be3..5f364aa816 100644
--- a/builtin/multi-pack-index.c
+++ b/builtin/multi-pack-index.c
@@ -65,12 +65,20 @@ static int parse_object_dir(const struct option *opt, const char *arg,
char **value = opt->value;
free(*value);
if (unset)
- *value = xstrdup(repo_get_object_directory(the_repository));
+ *value = xstrdup(the_repository->objects->sources->path);
else
*value = real_pathdup(arg, 1);
return 0;
}
+static struct odb_source *handle_object_dir_option(struct repository *repo)
+{
+ struct odb_source *source = odb_find_source(repo->objects, opts.object_dir);
+ if (!source)
+ source = odb_add_to_alternates_memory(repo->objects, opts.object_dir);
+ return source;
+}
+
static struct option common_opts[] = {
OPT_CALLBACK(0, "object-dir", &opts.object_dir,
N_("directory"),
@@ -140,6 +148,7 @@ static int cmd_multi_pack_index_write(int argc, const char **argv,
N_("refs snapshot for selecting bitmap commits")),
OPT_END(),
};
+ struct odb_source *source;
int ret;
opts.flags |= MIDX_WRITE_BITMAP_HASH_CACHE;
@@ -158,6 +167,7 @@ static int cmd_multi_pack_index_write(int argc, const char **argv,
if (argc)
usage_with_options(builtin_multi_pack_index_write_usage,
options);
+ source = handle_object_dir_option(repo);
FREE_AND_NULL(options);
@@ -166,7 +176,7 @@ static int cmd_multi_pack_index_write(int argc, const char **argv,
read_packs_from_stdin(&packs);
- ret = write_midx_file_only(repo, opts.object_dir, &packs,
+ ret = write_midx_file_only(source, &packs,
opts.preferred_pack,
opts.refs_snapshot, opts.flags);
@@ -177,7 +187,7 @@ static int cmd_multi_pack_index_write(int argc, const char **argv,
}
- ret = write_midx_file(repo, opts.object_dir, opts.preferred_pack,
+ ret = write_midx_file(source, opts.preferred_pack,
opts.refs_snapshot, opts.flags);
free(opts.refs_snapshot);
@@ -194,6 +204,8 @@ static int cmd_multi_pack_index_verify(int argc, const char **argv,
N_("force progress reporting"), MIDX_PROGRESS),
OPT_END(),
};
+ struct odb_source *source;
+
options = add_common_options(builtin_multi_pack_index_verify_options);
trace2_cmd_mode(argv[0]);
@@ -206,10 +218,11 @@ static int cmd_multi_pack_index_verify(int argc, const char **argv,
if (argc)
usage_with_options(builtin_multi_pack_index_verify_usage,
options);
+ source = handle_object_dir_option(the_repository);
FREE_AND_NULL(options);
- return verify_midx_file(the_repository, opts.object_dir, opts.flags);
+ return verify_midx_file(source, opts.flags);
}
static int cmd_multi_pack_index_expire(int argc, const char **argv,
@@ -222,6 +235,8 @@ static int cmd_multi_pack_index_expire(int argc, const char **argv,
N_("force progress reporting"), MIDX_PROGRESS),
OPT_END(),
};
+ struct odb_source *source;
+
options = add_common_options(builtin_multi_pack_index_expire_options);
trace2_cmd_mode(argv[0]);
@@ -234,10 +249,11 @@ static int cmd_multi_pack_index_expire(int argc, const char **argv,
if (argc)
usage_with_options(builtin_multi_pack_index_expire_usage,
options);
+ source = handle_object_dir_option(the_repository);
FREE_AND_NULL(options);
- return expire_midx_packs(the_repository, opts.object_dir, opts.flags);
+ return expire_midx_packs(source, opts.flags);
}
static int cmd_multi_pack_index_repack(int argc, const char **argv,
@@ -252,6 +268,7 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv,
N_("force progress reporting"), MIDX_PROGRESS),
OPT_END(),
};
+ struct odb_source *source;
options = add_common_options(builtin_multi_pack_index_repack_options);
@@ -266,11 +283,11 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv,
if (argc)
usage_with_options(builtin_multi_pack_index_repack_usage,
options);
+ source = handle_object_dir_option(the_repository);
FREE_AND_NULL(options);
- return midx_repack(the_repository, opts.object_dir,
- (size_t)opts.batch_size, opts.flags);
+ return midx_repack(source, (size_t)opts.batch_size, opts.flags);
}
int cmd_multi_pack_index(int argc,
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 53a2256250..1494afcf3d 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -1741,7 +1741,7 @@ static int want_object_in_pack_mtime(const struct object_id *oid,
struct multi_pack_index *m = get_multi_pack_index(source);
struct pack_entry e;
- if (m && fill_midx_entry(the_repository, oid, &e, m)) {
+ if (m && fill_midx_entry(m, oid, &e)) {
want = want_object_in_pack_one(e.p, oid, exclude, found_pack, found_offset, found_mtime);
if (want != -1)
return want;
diff --git a/builtin/range-diff.c b/builtin/range-diff.c
index a563abff5f..aafcc99b96 100644
--- a/builtin/range-diff.c
+++ b/builtin/range-diff.c
@@ -6,6 +6,7 @@
#include "parse-options.h"
#include "range-diff.h"
#include "config.h"
+#include "parse.h"
static const char * const builtin_range_diff_usage[] = {
@@ -15,6 +16,21 @@ N_("git range-diff [<options>] <base> <old-tip> <new-tip>"),
NULL
};
+static int parse_max_memory(const struct option *opt, const char *arg, int unset)
+{
+ size_t *max_memory = opt->value;
+ uintmax_t val;
+
+ if (unset)
+ return 0;
+
+ if (!git_parse_unsigned(arg, &val, SIZE_MAX))
+ return error(_("invalid max-memory value: %s"), arg);
+
+ *max_memory = (size_t)val;
+ return 0;
+}
+
int cmd_range_diff(int argc,
const char **argv,
const char *prefix,
@@ -25,6 +41,7 @@ int cmd_range_diff(int argc,
struct strvec diff_merges_arg = STRVEC_INIT;
struct range_diff_options range_diff_opts = {
.creation_factor = RANGE_DIFF_CREATION_FACTOR_DEFAULT,
+ .max_memory = RANGE_DIFF_MAX_MEMORY_DEFAULT,
.diffopt = &diffopt,
.other_arg = &other_arg
};
@@ -40,6 +57,10 @@ int cmd_range_diff(int argc,
PARSE_OPT_OPTARG),
OPT_PASSTHRU_ARGV(0, "diff-merges", &diff_merges_arg,
N_("style"), N_("passed to 'git log'"), 0),
+ OPT_CALLBACK(0, "max-memory", &range_diff_opts.max_memory,
+ N_("size"),
+ N_("maximum memory for cost matrix (default 4G)"),
+ parse_max_memory),
OPT_PASSTHRU_ARGV(0, "remerge-diff", &diff_merges_arg, NULL,
N_("passed to 'git log'"), PARSE_OPT_NOARG),
OPT_BOOL(0, "left-only", &left_only,
diff --git a/builtin/rebase.c b/builtin/rebase.c
index 3c85768d29..67c0352bf8 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -1242,6 +1242,9 @@ int cmd_rebase(int argc,
builtin_rebase_usage,
builtin_rebase_options);
+#ifndef WITH_BREAKING_CHANGES
+ warn_on_auto_comment_char = true;
+#endif /* !WITH_BREAKING_CHANGES */
prepare_repo_settings(the_repository);
the_repository->settings.command_requires_full_index = 0;
diff --git a/builtin/refs.c b/builtin/refs.c
index 76224feba4..91548783b7 100644
--- a/builtin/refs.c
+++ b/builtin/refs.c
@@ -7,6 +7,7 @@
#include "strbuf.h"
#include "worktree.h"
#include "for-each-ref.h"
+#include "refs/refs-internal.h"
#define REFS_MIGRATE_USAGE \
N_("git refs migrate --ref-format=<format> [--no-reflog] [--dry-run]")
@@ -14,6 +15,9 @@
#define REFS_VERIFY_USAGE \
N_("git refs verify [--strict] [--verbose]")
+#define REFS_EXISTS_USAGE \
+ N_("git refs exists <ref>")
+
static int cmd_refs_migrate(int argc, const char **argv, const char *prefix,
struct repository *repo UNUSED)
{
@@ -113,6 +117,48 @@ static int cmd_refs_list(int argc, const char **argv, const char *prefix,
return for_each_ref_core(argc, argv, prefix, repo, refs_list_usage);
}
+static int cmd_refs_exists(int argc, const char **argv, const char *prefix,
+ struct repository *repo UNUSED)
+{
+ struct strbuf unused_referent = STRBUF_INIT;
+ struct object_id unused_oid;
+ unsigned int unused_type;
+ int failure_errno = 0;
+ const char *ref;
+ int ret = 0;
+ const char * const exists_usage[] = {
+ REFS_EXISTS_USAGE,
+ NULL,
+ };
+ struct option options[] = {
+ OPT_END(),
+ };
+
+ argc = parse_options(argc, argv, prefix, options, exists_usage, 0);
+ if (argc != 1)
+ die(_("'git refs exists' requires a reference"));
+
+ ref = *argv++;
+ if (refs_read_raw_ref(get_main_ref_store(the_repository), ref,
+ &unused_oid, &unused_referent, &unused_type,
+ &failure_errno)) {
+ if (failure_errno == ENOENT || failure_errno == EISDIR) {
+ error(_("reference does not exist"));
+ ret = 2;
+ } else {
+ errno = failure_errno;
+ error_errno(_("failed to look up reference"));
+ ret = 1;
+ }
+
+ goto out;
+ }
+
+out:
+ strbuf_release(&unused_referent);
+ return ret;
+}
+
int cmd_refs(int argc,
const char **argv,
const char *prefix,
@@ -122,6 +168,7 @@ int cmd_refs(int argc,
REFS_MIGRATE_USAGE,
REFS_VERIFY_USAGE,
"git refs list " COMMON_USAGE_FOR_EACH_REF,
+ REFS_EXISTS_USAGE,
NULL,
};
parse_opt_subcommand_fn *fn = NULL;
@@ -129,6 +176,7 @@ int cmd_refs(int argc,
OPT_SUBCOMMAND("migrate", &fn, cmd_refs_migrate),
OPT_SUBCOMMAND("verify", &fn, cmd_refs_verify),
OPT_SUBCOMMAND("list", &fn, cmd_refs_list),
+ OPT_SUBCOMMAND("exists", &fn, cmd_refs_exists),
OPT_END(),
};
diff --git a/builtin/repack.c b/builtin/repack.c
index a4def39197..c490a51e91 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -223,9 +223,10 @@ static void mark_packs_for_deletion(struct existing_packs *existing,
static void remove_redundant_pack(const char *dir_name, const char *base_name)
{
struct strbuf buf = STRBUF_INIT;
- struct multi_pack_index *m = get_multi_pack_index(the_repository->objects->sources);
+ struct odb_source *source = the_repository->objects->sources;
+ struct multi_pack_index *m = get_multi_pack_index(source);
strbuf_addf(&buf, "%s.pack", base_name);
- if (m && m->local && midx_contains_pack(m, buf.buf))
+ if (m && source->local && midx_contains_pack(m, buf.buf))
clear_midx_file(the_repository);
strbuf_insertf(&buf, 0, "%s/", dir_name);
unlink_pack_path(buf.buf, 1);
@@ -1711,7 +1712,7 @@ int cmd_repack(int argc,
unsigned flags = 0;
if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX_WRITE_INCREMENTAL, 0))
flags |= MIDX_WRITE_INCREMENTAL;
- write_midx_file(the_repository, repo_get_object_directory(the_repository),
+ write_midx_file(the_repository->objects->sources,
NULL, NULL, flags);
}
diff --git a/builtin/repo.c b/builtin/repo.c
index 8c6e7f42ab..bbb0966f2d 100644
--- a/builtin/repo.c
+++ b/builtin/repo.c
@@ -9,7 +9,7 @@
#include "shallow.h"
static const char *const repo_usage[] = {
- "git repo info [--format=(keyvalue|nul)] [<key>...]",
+ "git repo info [--format=(keyvalue|nul)] [-z] [<key>...]",
NULL
};
@@ -38,6 +38,12 @@ static int get_layout_shallow(struct repository *repo, struct strbuf *buf)
return 0;
}
+static int get_object_format(struct repository *repo, struct strbuf *buf)
+{
+ strbuf_addstr(buf, repo->hash_algo->name);
+ return 0;
+}
+
static int get_references_format(struct repository *repo, struct strbuf *buf)
{
strbuf_addstr(buf,
@@ -49,6 +55,7 @@ static int get_references_format(struct repository *repo, struct strbuf *buf)
static const struct field repo_info_fields[] = {
{ "layout.bare", get_layout_bare },
{ "layout.shallow", get_layout_shallow },
+ { "object.format", get_object_format },
{ "references.format", get_references_format },
};
@@ -112,26 +119,40 @@ static int print_fields(int argc, const char **argv,
return ret;
}
+static int parse_format_cb(const struct option *opt,
+ const char *arg, int unset UNUSED)
+{
+ enum output_format *format = opt->value;
+
+ if (opt->short_name == 'z')
+ *format = FORMAT_NUL_TERMINATED;
+ else if (!strcmp(arg, "nul"))
+ *format = FORMAT_NUL_TERMINATED;
+ else if (!strcmp(arg, "keyvalue"))
+ *format = FORMAT_KEYVALUE;
+ else
+ die(_("invalid format '%s'"), arg);
+
+ return 0;
+}
+
static int repo_info(int argc, const char **argv, const char *prefix,
struct repository *repo)
{
- const char *format_str = "keyvalue";
- enum output_format format;
+ enum output_format format = FORMAT_KEYVALUE;
struct option options[] = {
- OPT_STRING(0, "format", &format_str, N_("format"),
- N_("output format")),
+ OPT_CALLBACK_F(0, "format", &format, N_("format"),
+ N_("output format"),
+ PARSE_OPT_NONEG, parse_format_cb),
+ OPT_CALLBACK_F('z', NULL, &format, NULL,
+ N_("synonym for --format=nul"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG,
+ parse_format_cb),
OPT_END()
};
argc = parse_options(argc, argv, prefix, options, repo_usage, 0);
- if (!strcmp(format_str, "keyvalue"))
- format = FORMAT_KEYVALUE;
- else if (!strcmp(format_str, "nul"))
- format = FORMAT_NUL_TERMINATED;
- else
- die(_("invalid format '%s'"), format_str);
-
return print_fields(argc, argv, repo, format);
}
diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c
index 44ff1b8342..9da92b990d 100644
--- a/builtin/rev-parse.c
+++ b/builtin/rev-parse.c
@@ -708,7 +708,6 @@ int cmd_rev_parse(int argc,
struct object_id oid;
unsigned int flags = 0;
const char *name = NULL;
- struct object_context unused;
struct strbuf buf = STRBUF_INIT;
int seen_end_of_options = 0;
enum format_type format = FORMAT_DEFAULT;
@@ -1141,9 +1140,8 @@ int cmd_rev_parse(int argc,
name++;
type = REVERSED;
}
- if (!get_oid_with_context(the_repository, name,
- flags, &oid, &unused)) {
- object_context_release(&unused);
+ if (!repo_get_oid_with_flags(the_repository, name, &oid,
+ flags)) {
if (output_algo)
repo_oid_to_algop(the_repository, &oid,
output_algo, &oid);
@@ -1153,7 +1151,6 @@ int cmd_rev_parse(int argc,
show_rev(type, &oid, name);
continue;
}
- object_context_release(&unused);
if (verify)
die_no_single_rev(quiet);
if (has_dashdash)
diff --git a/builtin/revert.c b/builtin/revert.c
index c3f92b585d..bedc40f368 100644
--- a/builtin/revert.c
+++ b/builtin/revert.c
@@ -4,6 +4,7 @@
#include "builtin.h"
#include "parse-options.h"
#include "diff.h"
+#include "environment.h"
#include "gettext.h"
#include "revision.h"
#include "rerere.h"
@@ -285,6 +286,9 @@ int cmd_revert(int argc,
struct replay_opts opts = REPLAY_OPTS_INIT;
int res;
+#ifndef WITH_BREAKING_CHANGES
+ warn_on_auto_comment_char = true;
+#endif /* !WITH_BREAKING_CHANGES */
opts.action = REPLAY_REVERT;
sequencer_init_config(&opts);
res = run_sequencer(argc, argv, prefix, &opts);
@@ -302,6 +306,9 @@ struct repository *repo UNUSED)
struct replay_opts opts = REPLAY_OPTS_INIT;
int res;
+#ifndef WITH_BREAKING_CHANGES
+ warn_on_auto_comment_char = true;
+#endif /* !WITH_BREAKING_CHANGES */
opts.action = REPLAY_PICK;
sequencer_init_config(&opts);
res = run_sequencer(argc, argv, prefix, &opts);
diff --git a/builtin/stash.c b/builtin/stash.c
index f5ddee5c7f..b7db7c8364 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -377,7 +377,7 @@ static int diff_tree_binary(struct strbuf *out, struct object_id *w_commit)
* however it should be done together with apply_cached.
*/
cp.git_cmd = 1;
- strvec_pushl(&cp.args, "diff-tree", "--binary", NULL);
+ strvec_pushl(&cp.args, "diff-tree", "--binary", "--no-color", NULL);
strvec_pushf(&cp.args, "%s^2^..%s^2", w_commit_hex, w_commit_hex);
return pipe_command(&cp, NULL, 0, out, 0, NULL, 0);
@@ -1089,7 +1089,6 @@ static int store_stash(int argc, const char **argv, const char *prefix,
int quiet = 0;
const char *stash_msg = NULL;
struct object_id obj;
- struct object_context dummy = {0};
struct option options[] = {
OPT__QUIET(&quiet, N_("be quiet")),
OPT_STRING('m', "message", &stash_msg, "message",
@@ -1109,9 +1108,8 @@ static int store_stash(int argc, const char **argv, const char *prefix,
return -1;
}
- if (get_oid_with_context(the_repository,
- argv[0], quiet ? GET_OID_QUIETLY : 0, &obj,
- &dummy)) {
+ if (repo_get_oid_with_flags(the_repository, argv[0], &obj,
+ quiet ? GET_OID_QUIETLY : 0)) {
if (!quiet)
fprintf_ln(stderr, _("Cannot update %s with %s"),
ref_stash, argv[0]);
@@ -1122,7 +1120,6 @@ static int store_stash(int argc, const char **argv, const char *prefix,
ret = do_store_stash(&obj, stash_msg, quiet);
out:
- object_context_release(&dummy);
return ret;
}
@@ -1284,6 +1281,7 @@ static int stash_staged(struct stash_info *info, struct strbuf *out_patch,
cp_diff_tree.git_cmd = 1;
strvec_pushl(&cp_diff_tree.args, "diff-tree", "-p", "--binary",
+ "--no-color",
"-U1", "HEAD", oid_to_hex(&info->w_tree), "--", NULL);
if (pipe_command(&cp_diff_tree, NULL, 0, out_patch, 0, NULL, 0)) {
ret = -1;
@@ -1346,6 +1344,7 @@ static int stash_patch(struct stash_info *info, const struct pathspec *ps,
cp_diff_tree.git_cmd = 1;
strvec_pushl(&cp_diff_tree.args, "diff-tree", "-p", "-U1", "HEAD",
+ "--no-color",
oid_to_hex(&info->w_tree), "--", NULL);
if (pipe_command(&cp_diff_tree, NULL, 0, out_patch, 0, NULL, 0)) {
ret = -1;
@@ -1720,6 +1719,7 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
cp_diff.git_cmd = 1;
strvec_pushl(&cp_diff.args, "diff-index", "-p",
+ "--no-color",
"--cached", "--binary", "HEAD", "--",
NULL);
add_pathspecs(&cp_diff.args, ps);
@@ -2235,7 +2235,6 @@ static int do_export_stash(struct repository *r,
const char **argv)
{
struct object_id base;
- struct object_context unused;
struct commit *prev;
struct commit_list *items = NULL, **iter = &items, *cur;
int res = 0;
@@ -2269,9 +2268,9 @@ static int do_export_stash(struct repository *r,
struct commit *stash;
if (parse_stash_revision(&revision, argv[i], 1) ||
- get_oid_with_context(r, revision.buf,
- GET_OID_QUIETLY | GET_OID_GENTLY,
- &oid, &unused)) {
+ repo_get_oid_with_flags(r, revision.buf, &oid,
+ GET_OID_QUIETLY |
+ GET_OID_GENTLY)) {
res = error(_("unable to find stash entry %s"), argv[i]);
goto out;
}
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
index 7ae7c82b6c..28124b324d 100644
--- a/builtin/unpack-objects.c
+++ b/builtin/unpack-objects.c
@@ -584,6 +584,7 @@ static void unpack_all(void)
{
int i;
unsigned char *hdr = fill(sizeof(struct pack_header));
+ struct odb_transaction *transaction;
if (get_be32(hdr) != PACK_SIGNATURE)
die("bad pack file");
@@ -599,12 +600,12 @@ static void unpack_all(void)
progress = start_progress(the_repository,
_("Unpacking objects"), nr_objects);
CALLOC_ARRAY(obj_list, nr_objects);
- begin_odb_transaction();
+ transaction = begin_odb_transaction(the_repository->objects);
for (i = 0; i < nr_objects; i++) {
unpack_one(i);
display_progress(progress, i + 1);
}
- end_odb_transaction();
+ end_odb_transaction(transaction);
stop_progress(&progress);
if (delta_list)
diff --git a/builtin/update-index.c b/builtin/update-index.c
index 2380f3ccd6..2ba2d29c95 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -77,7 +77,7 @@ static void report(const char *fmt, ...)
* objects invisible while a transaction is active, so flush the
* transaction here before reporting a change made by update-index.
*/
- flush_odb_transaction();
+ flush_odb_transaction(the_repository->objects->transaction);
va_start(vp, fmt);
vprintf(fmt, vp);
putchar('\n');
@@ -940,6 +940,7 @@ int cmd_update_index(int argc,
strbuf_getline_fn getline_fn;
int parseopt_state = PARSE_OPT_UNKNOWN;
struct repository *r = the_repository;
+ struct odb_transaction *transaction;
struct option options[] = {
OPT_BIT('q', NULL, &refresh_args.flags,
N_("continue refresh even when index needs update"),
@@ -1130,7 +1131,7 @@ int cmd_update_index(int argc,
* Allow the object layer to optimize adding multiple objects in
* a batch.
*/
- begin_odb_transaction();
+ transaction = begin_odb_transaction(the_repository->objects);
while (ctx.argc) {
if (parseopt_state != PARSE_OPT_DONE)
parseopt_state = parse_options_step(&ctx, options,
@@ -1213,7 +1214,7 @@ int cmd_update_index(int argc,
/*
* By now we have added all of the new objects
*/
- end_odb_transaction();
+ end_odb_transaction(transaction);
if (split_index > 0) {
if (repo_config_get_split_index(the_repository) == 0)
diff --git a/bulk-checkin.c b/bulk-checkin.c
index b2809ab039..124c493067 100644
--- a/bulk-checkin.c
+++ b/bulk-checkin.c
@@ -19,11 +19,7 @@
#include "object-file.h"
#include "odb.h"
-static int odb_transaction_nesting;
-
-static struct tmp_objdir *bulk_fsync_objdir;
-
-static struct bulk_checkin_packfile {
+struct bulk_checkin_packfile {
char *pack_tmp_name;
struct hashfile *f;
off_t offset;
@@ -32,27 +28,36 @@ static struct bulk_checkin_packfile {
struct pack_idx_entry **written;
uint32_t alloc_written;
uint32_t nr_written;
-} bulk_checkin_packfile;
+};
+
+struct odb_transaction {
+ struct object_database *odb;
+
+ int nesting;
+ struct tmp_objdir *objdir;
+ struct bulk_checkin_packfile packfile;
+};
-static void finish_tmp_packfile(struct strbuf *basename,
- const char *pack_tmp_name,
- struct pack_idx_entry **written_list,
- uint32_t nr_written,
- struct pack_idx_option *pack_idx_opts,
+static void finish_tmp_packfile(struct odb_transaction *transaction,
+ struct strbuf *basename,
unsigned char hash[])
{
+ struct bulk_checkin_packfile *state = &transaction->packfile;
+ struct repository *repo = transaction->odb->repo;
char *idx_tmp_name = NULL;
- stage_tmp_packfiles(the_repository, basename, pack_tmp_name,
- written_list, nr_written, NULL, pack_idx_opts, hash,
- &idx_tmp_name);
- rename_tmp_packfile_idx(the_repository, basename, &idx_tmp_name);
+ stage_tmp_packfiles(repo, basename, state->pack_tmp_name,
+ state->written, state->nr_written, NULL,
+ &state->pack_idx_opts, hash, &idx_tmp_name);
+ rename_tmp_packfile_idx(repo, basename, &idx_tmp_name);
free(idx_tmp_name);
}
-static void flush_bulk_checkin_packfile(struct bulk_checkin_packfile *state)
+static void flush_bulk_checkin_packfile(struct odb_transaction *transaction)
{
+ struct bulk_checkin_packfile *state = &transaction->packfile;
+ struct repository *repo = transaction->odb->repo;
unsigned char hash[GIT_MAX_RAWSZ];
struct strbuf packname = STRBUF_INIT;
@@ -69,17 +74,17 @@ static void flush_bulk_checkin_packfile(struct bulk_checkin_packfile *state)
CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
int fd = finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, 0);
- fixup_pack_header_footer(the_hash_algo, fd, hash, state->pack_tmp_name,
+ fixup_pack_header_footer(repo->hash_algo, fd, hash, state->pack_tmp_name,
state->nr_written, hash,
state->offset);
close(fd);
}
- strbuf_addf(&packname, "%s/pack/pack-%s.", repo_get_object_directory(the_repository),
- hash_to_hex(hash));
- finish_tmp_packfile(&packname, state->pack_tmp_name,
- state->written, state->nr_written,
- &state->pack_idx_opts, hash);
+ strbuf_addf(&packname, "%s/pack/pack-%s.",
+ repo_get_object_directory(transaction->odb->repo),
+ hash_to_hex_algop(hash, repo->hash_algo));
+
+ finish_tmp_packfile(transaction, &packname, hash);
for (uint32_t i = 0; i < state->nr_written; i++)
free(state->written[i]);
@@ -90,18 +95,18 @@ clear_exit:
strbuf_release(&packname);
/* Make objects we just wrote available to ourselves */
- reprepare_packed_git(the_repository);
+ reprepare_packed_git(repo);
}
/*
* Cleanup after batch-mode fsync_object_files.
*/
-static void flush_batch_fsync(void)
+static void flush_batch_fsync(struct odb_transaction *transaction)
{
struct strbuf temp_path = STRBUF_INIT;
struct tempfile *temp;
- if (!bulk_fsync_objdir)
+ if (!transaction->objdir)
return;
/*
@@ -113,7 +118,8 @@ static void flush_batch_fsync(void)
* to ensure that the data in each new object file is durable before
* the final name is visible.
*/
- strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX", repo_get_object_directory(the_repository));
+ strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX",
+ repo_get_object_directory(transaction->odb->repo));
temp = xmks_tempfile(temp_path.buf);
fsync_or_die(get_tempfile_fd(temp), get_tempfile_path(temp));
delete_tempfile(&temp);
@@ -123,20 +129,21 @@ static void flush_batch_fsync(void)
* Make the object files visible in the primary ODB after their data is
* fully durable.
*/
- tmp_objdir_migrate(bulk_fsync_objdir);
- bulk_fsync_objdir = NULL;
+ tmp_objdir_migrate(transaction->objdir);
+ transaction->objdir = NULL;
}
-static int already_written(struct bulk_checkin_packfile *state, struct object_id *oid)
+static int already_written(struct odb_transaction *transaction,
+ struct object_id *oid)
{
/* The object may already exist in the repository */
- if (odb_has_object(the_repository->objects, oid,
+ if (odb_has_object(transaction->odb, oid,
HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
return 1;
/* Might want to keep the list sorted */
- for (uint32_t i = 0; i < state->nr_written; i++)
- if (oideq(&state->written[i]->oid, oid))
+ for (uint32_t i = 0; i < transaction->packfile.nr_written; i++)
+ if (oideq(&transaction->packfile.written[i]->oid, oid))
return 1;
/* This is a new object we need to keep */
@@ -235,13 +242,15 @@ static int stream_blob_to_pack(struct bulk_checkin_packfile *state,
}
/* Lazily create backing packfile for the state */
-static void prepare_to_stream(struct bulk_checkin_packfile *state,
+static void prepare_to_stream(struct odb_transaction *transaction,
unsigned flags)
{
+ struct bulk_checkin_packfile *state = &transaction->packfile;
if (!(flags & INDEX_WRITE_OBJECT) || state->f)
return;
- state->f = create_tmp_packfile(the_repository, &state->pack_tmp_name);
+ state->f = create_tmp_packfile(transaction->odb->repo,
+ &state->pack_tmp_name);
reset_pack_idx_option(&state->pack_idx_opts);
/* Pretend we are going to write only one object */
@@ -250,11 +259,11 @@ static void prepare_to_stream(struct bulk_checkin_packfile *state,
die_errno("unable to write pack header");
}
-static int deflate_blob_to_pack(struct bulk_checkin_packfile *state,
- struct object_id *result_oid,
- int fd, size_t size,
- const char *path, unsigned flags)
+int index_blob_bulk_checkin(struct odb_transaction *transaction,
+ struct object_id *result_oid, int fd, size_t size,
+ const char *path, unsigned flags)
{
+ struct bulk_checkin_packfile *state = &transaction->packfile;
off_t seekback, already_hashed_to;
struct git_hash_ctx ctx;
unsigned char obuf[16384];
@@ -268,21 +277,21 @@ static int deflate_blob_to_pack(struct bulk_checkin_packfile *state,
header_len = format_object_header((char *)obuf, sizeof(obuf),
OBJ_BLOB, size);
- the_hash_algo->init_fn(&ctx);
+ transaction->odb->repo->hash_algo->init_fn(&ctx);
git_hash_update(&ctx, obuf, header_len);
/* Note: idx is non-NULL when we are writing */
if ((flags & INDEX_WRITE_OBJECT) != 0) {
CALLOC_ARRAY(idx, 1);
- prepare_to_stream(state, flags);
+ prepare_to_stream(transaction, flags);
hashfile_checkpoint_init(state->f, &checkpoint);
}
already_hashed_to = 0;
while (1) {
- prepare_to_stream(state, flags);
+ prepare_to_stream(transaction, flags);
if (idx) {
hashfile_checkpoint(state->f, &checkpoint);
idx->offset = state->offset;
@@ -300,7 +309,7 @@ static int deflate_blob_to_pack(struct bulk_checkin_packfile *state,
BUG("should not happen");
hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
- flush_bulk_checkin_packfile(state);
+ flush_bulk_checkin_packfile(transaction);
if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
return error("cannot seek back");
}
@@ -309,7 +318,7 @@ static int deflate_blob_to_pack(struct bulk_checkin_packfile *state,
return 0;
idx->crc32 = crc32_end(state->f);
- if (already_written(state, result_oid)) {
+ if (already_written(transaction, result_oid)) {
hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
free(idx);
@@ -323,7 +332,7 @@ static int deflate_blob_to_pack(struct bulk_checkin_packfile *state,
return 0;
}
-void prepare_loose_object_bulk_checkin(void)
+void prepare_loose_object_bulk_checkin(struct odb_transaction *transaction)
{
/*
* We lazily create the temporary object directory
@@ -331,15 +340,16 @@ void prepare_loose_object_bulk_checkin(void)
* callers may not know whether any objects will be
* added at the time they call begin_odb_transaction.
*/
- if (!odb_transaction_nesting || bulk_fsync_objdir)
+ if (!transaction || transaction->objdir)
return;
- bulk_fsync_objdir = tmp_objdir_create(the_repository, "bulk-fsync");
- if (bulk_fsync_objdir)
- tmp_objdir_replace_primary_odb(bulk_fsync_objdir, 0);
+ transaction->objdir = tmp_objdir_create(transaction->odb->repo, "bulk-fsync");
+ if (transaction->objdir)
+ tmp_objdir_replace_primary_odb(transaction->objdir, 0);
}
-void fsync_loose_object_bulk_checkin(int fd, const char *filename)
+void fsync_loose_object_bulk_checkin(struct odb_transaction *transaction,
+ int fd, const char *filename)
{
/*
* If we have an active ODB transaction, we issue a call that
@@ -348,7 +358,7 @@ void fsync_loose_object_bulk_checkin(int fd, const char *filename)
* before renaming the objects to their final names as part of
* flush_batch_fsync.
*/
- if (!bulk_fsync_objdir ||
+ if (!transaction || !transaction->objdir ||
git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) {
if (errno == ENOSYS)
warning(_("core.fsyncMethod = batch is unsupported on this platform"));
@@ -356,36 +366,38 @@ void fsync_loose_object_bulk_checkin(int fd, const char *filename)
}
}
-int index_blob_bulk_checkin(struct object_id *oid,
- int fd, size_t size,
- const char *path, unsigned flags)
+struct odb_transaction *begin_odb_transaction(struct object_database *odb)
{
- int status = deflate_blob_to_pack(&bulk_checkin_packfile, oid, fd, size,
- path, flags);
- if (!odb_transaction_nesting)
- flush_bulk_checkin_packfile(&bulk_checkin_packfile);
- return status;
-}
+ if (!odb->transaction) {
+ CALLOC_ARRAY(odb->transaction, 1);
+ odb->transaction->odb = odb;
+ }
-void begin_odb_transaction(void)
-{
- odb_transaction_nesting += 1;
+ odb->transaction->nesting += 1;
+
+ return odb->transaction;
}
-void flush_odb_transaction(void)
+void flush_odb_transaction(struct odb_transaction *transaction)
{
- flush_batch_fsync();
- flush_bulk_checkin_packfile(&bulk_checkin_packfile);
+ if (!transaction)
+ return;
+
+ flush_batch_fsync(transaction);
+ flush_bulk_checkin_packfile(transaction);
}
-void end_odb_transaction(void)
+void end_odb_transaction(struct odb_transaction *transaction)
{
- odb_transaction_nesting -= 1;
- if (odb_transaction_nesting < 0)
+ if (!transaction || transaction->nesting == 0)
BUG("Unbalanced ODB transaction nesting");
- if (odb_transaction_nesting)
+ transaction->nesting -= 1;
+
+ if (transaction->nesting)
return;
- flush_odb_transaction();
+ flush_odb_transaction(transaction);
+ transaction->odb->transaction = NULL;
+ free(transaction);
}
diff --git a/bulk-checkin.h b/bulk-checkin.h
index 7246ea58dc..ac8887f476 100644
--- a/bulk-checkin.h
+++ b/bulk-checkin.h
@@ -5,13 +5,20 @@
#define BULK_CHECKIN_H
#include "object.h"
+#include "odb.h"
-void prepare_loose_object_bulk_checkin(void);
-void fsync_loose_object_bulk_checkin(int fd, const char *filename);
+struct odb_transaction;
+
+void prepare_loose_object_bulk_checkin(struct odb_transaction *transaction);
+void fsync_loose_object_bulk_checkin(struct odb_transaction *transaction,
+ int fd, const char *filename);
/*
- * This creates one packfile per large blob unless bulk-checkin
- * machinery is "plugged".
+ * This writes the specified object to a packfile. Objects written here
+ * during the same transaction are written to the same packfile. The
+ * packfile is not flushed until the transaction is flushed. The caller
+ * is expected to ensure a valid transaction is setup for objects to be
+ * recorded to.
*
* This also bypasses the usual "convert-to-git" dance, and that is on
* purpose. We could write a streaming version of the converting
@@ -24,8 +31,8 @@ void fsync_loose_object_bulk_checkin(int fd, const char *filename);
* binary blobs, they generally do not want to get any conversion, and
* callers should avoid this code path when filters are requested.
*/
-int index_blob_bulk_checkin(struct object_id *oid,
- int fd, size_t size,
+int index_blob_bulk_checkin(struct odb_transaction *transaction,
+ struct object_id *oid, int fd, size_t size,
const char *path, unsigned flags);
/*
@@ -35,20 +42,20 @@ int index_blob_bulk_checkin(struct object_id *oid,
* and objects are only visible after the outermost transaction
* is complete or the transaction is flushed.
*/
-void begin_odb_transaction(void);
+struct odb_transaction *begin_odb_transaction(struct object_database *odb);
/*
* Make any objects that are currently part of a pending object
* database transaction visible. It is valid to call this function
* even if no transaction is active.
*/
-void flush_odb_transaction(void);
+void flush_odb_transaction(struct odb_transaction *transaction);
/*
* Tell the object database to make any objects from the
* current transaction visible if this is the final nested
* transaction.
*/
-void end_odb_transaction(void);
+void end_odb_transaction(struct odb_transaction *transaction);
#endif
diff --git a/cache-tree.c b/cache-tree.c
index 66ef2becbe..d225554eed 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -474,6 +474,7 @@ static int update_one(struct cache_tree *it,
int cache_tree_update(struct index_state *istate, int flags)
{
+ struct odb_transaction *transaction;
int skip, i;
i = verify_cache(istate, flags);
@@ -489,10 +490,10 @@ int cache_tree_update(struct index_state *istate, int flags)
trace_performance_enter();
trace2_region_enter("cache_tree", "update", the_repository);
- begin_odb_transaction();
+ transaction = begin_odb_transaction(the_repository->objects);
i = update_one(istate->cache_tree, istate->cache, istate->cache_nr,
"", 0, &skip, flags);
- end_odb_transaction();
+ end_odb_transaction(transaction);
trace2_region_leave("cache_tree", "update", the_repository);
trace_performance_leave("cache_tree_update");
if (i < 0)
diff --git a/command-list.txt b/command-list.txt
index 1b0bdee00d..accd3d0c4b 100644
--- a/command-list.txt
+++ b/command-list.txt
@@ -124,6 +124,7 @@ git-index-pack plumbingmanipulators
git-init mainporcelain init
git-instaweb ancillaryinterrogators complete
git-interpret-trailers purehelpers
+git-last-modified plumbinginterrogators
git-log mainporcelain info
git-ls-files plumbinginterrogators
git-ls-remote plumbinginterrogators
diff --git a/commit-graph.c b/commit-graph.c
index 3cd9e73e2a..2f20f66cfd 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -812,7 +812,12 @@ int corrected_commit_dates_enabled(struct repository *r)
struct bloom_filter_settings *get_bloom_filter_settings(struct repository *r)
{
- struct commit_graph *g = r->objects->commit_graph;
+ struct commit_graph *g;
+
+ if (!prepare_commit_graph(r))
+ return NULL;
+
+ g = r->objects->commit_graph;
while (g) {
if (g->bloom_filter_settings)
return g->bloom_filter_settings;
diff --git a/config.c b/config.c
index e0ff35d426..74bf76a97e 100644
--- a/config.c
+++ b/config.c
@@ -8,9 +8,11 @@
#include "git-compat-util.h"
#include "abspath.h"
+#include "advice.h"
#include "date.h"
#include "branch.h"
#include "config.h"
+#include "dir.h"
#include "parse.h"
#include "convert.h"
#include "environment.h"
@@ -1948,10 +1950,290 @@ int git_configset_get_pathname(struct config_set *set, const char *key, char **d
return 1;
}
+struct comment_char_config {
+ unsigned last_key_id;
+ bool auto_set;
+ bool auto_set_in_file;
+ struct strintmap key_flags;
+ size_t alloc, nr;
+ struct comment_char_config_item {
+ unsigned key_id;
+ char *path;
+ enum config_scope scope;
+ } *item;
+};
+
+#define COMMENT_CHAR_CFG_INIT { \
+ .key_flags = STRINTMAP_INIT, \
+ }
+
+static void comment_char_config_release(struct comment_char_config *config)
+{
+ strintmap_clear(&config->key_flags);
+ for (size_t i = 0; i < config->nr; i++)
+ free(config->item[i].path);
+ free(config->item);
+}
+
+/* Used to track whether the key occurs more than once in a given file */
+#define KEY_SEEN_ONCE 1u
+#define KEY_SEEN_TWICE 2u
+#define COMMENT_KEY_SHIFT(id) (2 * (id))
+#define COMMENT_KEY_MASK(id) (3u << COMMENT_KEY_SHIFT(id))
+
+static void set_comment_key_flags(struct comment_char_config *config,
+ const char *path, unsigned id, unsigned value)
+{
+ unsigned old = strintmap_get(&config->key_flags, path);
+ unsigned new = (old & ~COMMENT_KEY_MASK(id)) |
+ value << COMMENT_KEY_SHIFT(id);
+
+ strintmap_set(&config->key_flags, path, new);
+}
+
+static unsigned get_comment_key_flags(struct comment_char_config *config,
+ const char *path, unsigned id)
+{
+ unsigned value = strintmap_get(&config->key_flags, path);
+
+ return (value & COMMENT_KEY_MASK(id)) >> COMMENT_KEY_SHIFT(id);
+}
+
+static const char *comment_key_name(unsigned id)
+{
+ static const char *name[] = {
+ "core.commentChar",
+ "core.commentString",
+ };
+
+ if (id >= ARRAY_SIZE(name))
+ BUG("invalid comment key id");
+
+ return name[id];
+}
+
+static void comment_char_callback(const char *key, const char *value,
+ const struct config_context *ctx, void *data)
+{
+ struct comment_char_config *config = data;
+ const struct key_value_info *kvi = ctx->kvi;
+ unsigned key_id;
+
+ if (!strcmp(key, "core.commentchar"))
+ key_id = 0;
+ else if (!strcmp(key, "core.commentstring"))
+ key_id = 1;
+ else
+ return;
+
+ config->last_key_id = key_id;
+ config->auto_set = value && !strcmp(value, "auto");
+ if (kvi->origin_type != CONFIG_ORIGIN_FILE) {
+ return;
+ } else if (get_comment_key_flags(config, kvi->filename, key_id)) {
+ set_comment_key_flags(config, kvi->filename, key_id,
+ KEY_SEEN_TWICE);
+ } else {
+ struct comment_char_config_item *item;
+
+ ALLOC_GROW_BY(config->item, config->nr, 1, config->alloc);
+ item = &config->item[config->nr - 1];
+ item->key_id = key_id;
+ item->scope = kvi->scope;
+ item->path = xstrdup(kvi->filename);
+ set_comment_key_flags(config, kvi->filename, key_id,
+ KEY_SEEN_ONCE);
+ }
+ config->auto_set_in_file = config->auto_set;
+}
+
+static void add_config_scope_arg(struct repository *repo, struct strbuf *buf,
+ struct comment_char_config_item *item)
+{
+ char *global_config = git_global_config();
+ char *system_config = git_system_config();
+
+ if (item->scope == CONFIG_SCOPE_SYSTEM && access(item->path, W_OK)) {
+ /*
+ * If the user cannot write to the system config recommend
+ * setting the global config instead.
+ */
+ strbuf_addstr(buf, "--global ");
+ } else if (fspatheq(item->path, system_config)) {
+ strbuf_addstr(buf, "--system ");
+ } else if (fspatheq(item->path, global_config)) {
+ strbuf_addstr(buf, "--global ");
+ } else if (fspatheq(item->path,
+ mkpath("%s/config",
+ repo_get_git_dir(repo)))) {
+ ; /* --local is the default */
+ } else if (fspatheq(item->path,
+ mkpath("%s/config.worktree",
+ repo_get_common_dir(repo)))) {
+ strbuf_addstr(buf, "--worktree ");
+ } else {
+ const char *path = item->path;
+ const char *home = getenv("HOME");
+
+ strbuf_addstr(buf, "--file ");
+ if (home && !fspathncmp(path, home, strlen(home))) {
+ path += strlen(home);
+ if (!fspathncmp(path, "/", 1))
+ path++;
+ strbuf_addstr(buf, "~/");
+ }
+ sq_quote_buf_pretty(buf, path);
+ strbuf_addch(buf, ' ');
+ }
+
+ free(global_config);
+ free(system_config);
+}
+
+static bool can_unset_comment_char_config(struct comment_char_config *config)
+{
+ for (size_t i = 0; i < config->nr; i++) {
+ struct comment_char_config_item *item = &config->item[i];
+
+ if (item->scope == CONFIG_SCOPE_SYSTEM &&
+ access(item->path, W_OK))
+ return false;
+ }
+
+ return true;
+}
+
+static void add_unset_auto_comment_char_advice(struct repository *repo,
+ struct comment_char_config *config)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ if (!can_unset_comment_char_config(config))
+ return;
+
+ for (size_t i = 0; i < config->nr; i++) {
+ struct comment_char_config_item *item = &config->item[i];
+
+ strbuf_addstr(&buf, " git config unset ");
+ add_config_scope_arg(repo, &buf, item);
+ if (get_comment_key_flags(config, item->path, item->key_id) == KEY_SEEN_TWICE)
+ strbuf_addstr(&buf, "--all ");
+ strbuf_addf(&buf, "%s\n", comment_key_name(item->key_id));
+ }
+ advise(_("\nTo use the default comment string (#) please run\n\n%s"),
+ buf.buf);
+ strbuf_release(&buf);
+}
+
+static void add_comment_char_advice(struct repository *repo,
+ struct comment_char_config *config)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct comment_char_config_item *item;
+ /* TRANSLATORS this is a place holder for the value of core.commentString */
+ const char *placeholder = _("<comment string>");
+
+ /*
+ * If auto is set in the last file that we saw advise the user how to
+ * update their config.
+ */
+ if (!config->auto_set_in_file)
+ return;
+
+ add_unset_auto_comment_char_advice(repo, config);
+ item = &config->item[config->nr - 1];
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, " git config set ");
+ add_config_scope_arg(repo, &buf, item);
+ strbuf_addf(&buf, "%s %s\n", comment_key_name(item->key_id),
+ placeholder);
+ advise(_("\nTo set a custom comment string please run\n\n"
+ "%s\nwhere '%s' is the string you wish to use.\n"),
+ buf.buf, placeholder);
+ strbuf_release(&buf);
+}
+
+#undef KEY_SEEN_ONCE
+#undef KEY_SEEN_TWICE
+#undef COMMENT_KEY_SHIFT
+#undef COMMENT_KEY_MASK
+
+struct repo_config {
+ struct repository *repo;
+ struct comment_char_config comment_char_config;
+};
+
+#define REPO_CONFIG_INIT(repo_) { \
+ .comment_char_config = COMMENT_CHAR_CFG_INIT, \
+ .repo = repo_, \
+ };
+
+static void repo_config_release(struct repo_config *config)
+{
+ comment_char_config_release(&config->comment_char_config);
+}
+
+#ifdef WITH_BREAKING_CHANGES
+static void check_auto_comment_char_config(struct repository *repo,
+ struct comment_char_config *config)
+{
+ if (!config->auto_set)
+ return;
+
+ die_message(_("Support for '%s=auto' has been removed in Git 3.0"),
+ comment_key_name(config->last_key_id));
+ add_comment_char_advice(repo, config);
+ die(NULL);
+}
+#else
+static void check_auto_comment_char_config(struct repository *repo,
+ struct comment_char_config *config)
+{
+ extern bool warn_on_auto_comment_char;
+ const char *DEPRECATED_CONFIG_ENV =
+ "GIT_AUTO_COMMENT_CHAR_CONFIG_WARNING_GIVEN";
+
+ if (!config->auto_set || !warn_on_auto_comment_char)
+ return;
+
+ /*
+ * Use an environment variable to ensure that subprocesses do not repeat
+ * the warning.
+ */
+ if (git_env_bool(DEPRECATED_CONFIG_ENV, false))
+ return;
+
+ setenv(DEPRECATED_CONFIG_ENV, "true", true);
+
+ warning(_("Support for '%s=auto' is deprecated and will be removed in "
+ "Git 3.0"), comment_key_name(config->last_key_id));
+ add_comment_char_advice(repo, config);
+}
+#endif /* WITH_BREAKING_CHANGES */
+
+static void check_deprecated_config(struct repo_config *config)
+{
+ if (!config->repo->check_deprecated_config)
+ return;
+
+ check_auto_comment_char_config(config->repo,
+ &config->comment_char_config);
+}
+
+static int repo_config_callback(const char *key, const char *value,
+ const struct config_context *ctx, void *data)
+{
+ struct repo_config *config = data;
+
+ comment_char_callback(key, value, ctx, &config->comment_char_config);
+ return config_set_callback(key, value, ctx, config->repo->config);
+}
+
/* Functions use to read configuration from a repository */
static void repo_read_config(struct repository *repo)
{
struct config_options opts = { 0 };
+ struct repo_config config = REPO_CONFIG_INIT(repo);
opts.respect_includes = 1;
opts.commondir = repo->commondir;
@@ -1963,8 +2245,8 @@ static void repo_read_config(struct repository *repo)
git_configset_clear(repo->config);
git_configset_init(repo->config);
- if (config_with_options(config_set_callback, repo->config, NULL,
- repo, &opts) < 0)
+ if (config_with_options(repo_config_callback, &config, NULL, repo,
+ &opts) < 0)
/*
* config_with_options() normally returns only
* zero, as most errors are fatal, and
@@ -1977,6 +2259,8 @@ static void repo_read_config(struct repository *repo)
* immediately.
*/
die(_("unknown error occurred while reading the configuration files"));
+ check_deprecated_config(&config);
+ repo_config_release(&config);
}
static void git_config_check_init(struct repository *repo)
@@ -2664,6 +2948,14 @@ int repo_config_set_multivar_in_file_gently(struct repository *r,
char *contents = NULL;
size_t contents_sz;
struct config_store_data store = CONFIG_STORE_INIT;
+ bool saved_check_deprecated_config = r->check_deprecated_config;
+
+ /*
+ * Do not warn or die if there are deprecated config settings as
+ * we want the user to be able to change those settings by running
+ * "git config".
+ */
+ r->check_deprecated_config = false;
validate_comment_string(comment);
@@ -2895,6 +3187,7 @@ out_free:
if (in_fd >= 0)
close(in_fd);
config_store_data_clear(&store);
+ r->check_deprecated_config = saved_check_deprecated_config;
return ret;
write_err_out:
diff --git a/contrib/diff-highlight/README b/contrib/diff-highlight/README
index d4c2343175..1db4440e68 100644
--- a/contrib/diff-highlight/README
+++ b/contrib/diff-highlight/README
@@ -58,6 +58,14 @@ following in your git configuration:
diff = diff-highlight | less
---------------------------------------------
+If you use the interactive patch mode of `git add -p`, `git checkout
+-p`, etc, you may also want to configure it to be used there:
+
+---------------------------------------------
+[interactive]
+ diffFilter = diff-highlight
+---------------------------------------------
+
Color Config
------------
diff --git a/contrib/subtree/git-subtree.sh b/contrib/subtree/git-subtree.sh
index 3fddba797c..17106d1a72 100755
--- a/contrib/subtree/git-subtree.sh
+++ b/contrib/subtree/git-subtree.sh
@@ -785,20 +785,40 @@ ensure_valid_ref_format () {
die "fatal: '$1' does not look like a ref"
}
-# Usage: check if a commit from another subtree should be
+# Usage: should_ignore_subtree_split_commit REV
+#
+# Check if REV is a commit from another subtree and should be
# ignored from processing for splits
should_ignore_subtree_split_commit () {
assert test $# = 1
- local rev="$1"
- if test -n "$(git log -1 --grep="git-subtree-dir:" $rev)"
+
+ git show \
+ --no-patch \
+ --no-show-signature \
+ --format='%(trailers:key=git-subtree-dir,key=git-subtree-mainline)' \
+ "$1" |
+ (
+ have_mainline=
+ subtree_dir=
+
+ while read -r trailer val
+ do
+ case "$trailer" in
+ git-subtree-dir:)
+ subtree_dir="${val%/}" ;;
+ git-subtree-mainline:)
+ have_mainline=y ;;
+ esac
+ done
+
+ if test -n "${subtree_dir}" &&
+ test -z "${have_mainline}" &&
+ test "${subtree_dir}" != "$arg_prefix"
then
- if test -z "$(git log -1 --grep="git-subtree-mainline:" $rev)" &&
- test -z "$(git log -1 --grep="git-subtree-dir: $arg_prefix$" $rev)"
- then
- return 0
- fi
+ return 0
fi
return 1
+ )
}
# Usage: process_split_commit REV PARENTS
diff --git a/contrib/subtree/t/t7900-subtree.sh b/contrib/subtree/t/t7900-subtree.sh
index 3edbb33af4..316dc5269e 100755
--- a/contrib/subtree/t/t7900-subtree.sh
+++ b/contrib/subtree/t/t7900-subtree.sh
@@ -9,6 +9,9 @@ This test verifies the basic operation of the add, merge, split, pull,
and push subcommands of git subtree.
'
+GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
+export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+
TEST_DIRECTORY=$(pwd)/../../../t
. "$TEST_DIRECTORY"/test-lib.sh
. "$TEST_DIRECTORY"/lib-gpg.sh
@@ -68,6 +71,33 @@ test_create_pre2_32_repo () {
git -C "$1-clone" replace HEAD^2 $new_commit
}
+# test_create_subtree_add REPO ORPHAN PREFIX FILENAME ...
+#
+# Create a simple subtree on a new branch named ORPHAN in REPO.
+# The subtree is then merged into the current branch of REPO,
+# under PREFIX. The generated subtree has has one commit
+# with subject and tag FILENAME with a single file "FILENAME.t"
+#
+# When this method returns:
+# - the current branch of REPO will have file PREFIX/FILENAME.t
+# - REPO will have a branch named ORPHAN with subtree history
+#
+# additional arguments are forwarded to "subtree add"
+test_create_subtree_add () {
+ (
+ cd "$1" &&
+ orphan="$2" &&
+ prefix="$3" &&
+ filename="$4" &&
+ shift 4 &&
+ last="$(git branch --show-current)" &&
+ git switch --orphan "$orphan" &&
+ test_commit "$filename" &&
+ git checkout "$last" &&
+ git subtree add --prefix="$prefix" "$@" "$orphan"
+ )
+}
+
test_expect_success 'shows short help text for -h' '
test_expect_code 129 git subtree -h >out 2>err &&
test_must_be_empty err &&
@@ -426,6 +456,47 @@ test_expect_success 'split with multiple subtrees' '
--squash --rejoin -d -m "Sub B Split 1" 2>&1 | grep -w "\[1\]")" = ""
'
+# When subtree split-ing a directory that has other subtree
+# *merges* underneath it, the split must include those subtrees.
+# This test creates a nested subtree, `subA/subB`, and tests
+# that the tree is correct after a subtree split of `subA/`.
+# The test covers:
+# - An initial `subtree add`; and
+# - A follow-up `subtree merge`
+# both with and without `--squashed`.
+for is_squashed in '' 'y'
+do
+ test_expect_success "split keeps nested ${is_squashed:+--squash }subtrees that are part of the split" '
+ subtree_test_create_repo "$test_count" &&
+ (
+ cd "$test_count" &&
+ mkdir subA &&
+ test_commit subA/file1 &&
+ test_create_subtree_add \
+ . mksubtree subA/subB file2 ${is_squashed:+--squash} &&
+ test_path_is_file subA/file1.t &&
+ test_path_is_file subA/subB/file2.t &&
+ git subtree split --prefix=subA --branch=bsplit &&
+ git checkout bsplit &&
+ test_path_is_file file1.t &&
+ test_path_is_file subB/file2.t &&
+ git checkout mksubtree &&
+ git branch -D bsplit &&
+ test_commit file3 &&
+ git checkout main &&
+ git subtree merge \
+ ${is_squashed:+--squash} \
+ --prefix=subA/subB mksubtree &&
+ test_path_is_file subA/subB/file3.t &&
+ git subtree split --prefix=subA --branch=bsplit &&
+ git checkout bsplit &&
+ test_path_is_file file1.t &&
+ test_path_is_file subB/file2.t &&
+ test_path_is_file subB/file3.t
+ )
+ '
+done
+
test_expect_success 'split sub dir/ with --rejoin from scratch' '
subtree_test_create_repo "$test_count" &&
test_create_commit "$test_count" main1 &&
diff --git a/environment.c b/environment.c
index 0e72fdac85..a770b5921d 100644
--- a/environment.c
+++ b/environment.c
@@ -121,7 +121,10 @@ int protect_ntfs = PROTECT_NTFS_DEFAULT;
*/
const char *comment_line_str = "#";
char *comment_line_str_to_free;
+#ifndef WITH_BREAKING_CHANGES
int auto_comment_line_char;
+bool warn_on_auto_comment_char;
+#endif /* !WITH_BREAKING_CHANGES */
/* This is set by setup_git_directory_gently() and/or git_default_config() */
char *git_work_tree_cfg;
@@ -463,16 +466,22 @@ static int git_default_core_config(const char *var, const char *value,
if (!strcmp(var, "core.commentchar") ||
!strcmp(var, "core.commentstring")) {
- if (!value)
+ if (!value) {
return config_error_nonbool(var);
- else if (!strcasecmp(value, "auto"))
+#ifndef WITH_BREAKING_CHANGES
+ } else if (!strcasecmp(value, "auto")) {
auto_comment_line_char = 1;
- else if (value[0]) {
+ FREE_AND_NULL(comment_line_str_to_free);
+ comment_line_str = "#";
+#endif /* !WITH_BREAKING_CHANGES */
+ } else if (value[0]) {
if (strchr(value, '\n'))
return error(_("%s cannot contain newline"), var);
comment_line_str = value;
FREE_AND_NULL(comment_line_str_to_free);
+#ifndef WITH_BREAKING_CHANGES
auto_comment_line_char = 0;
+#endif /* !WITH_BREAKING_CHANGES */
} else
return error(_("%s must have at least one character"), var);
return 0;
diff --git a/environment.h b/environment.h
index 8cfce41015..51898c99cd 100644
--- a/environment.h
+++ b/environment.h
@@ -208,7 +208,10 @@ extern char *excludes_file;
*/
extern const char *comment_line_str;
extern char *comment_line_str_to_free;
+#ifndef WITH_BREAKING_CHANGES
extern int auto_comment_line_char;
+extern bool warn_on_auto_comment_char;
+#endif /* !WITH_BREAKING_CHANGES */
# endif /* USE_THE_REPOSITORY_VARIABLE */
#endif /* ENVIRONMENT_H */
diff --git a/git-curl-compat.h b/git-curl-compat.h
index aa8eed7ed2..659e5a3875 100644
--- a/git-curl-compat.h
+++ b/git-curl-compat.h
@@ -46,6 +46,13 @@
#endif
/**
+ * curl_global_trace() was added in 8.3.0, released September 2023.
+ */
+#if LIBCURL_VERSION_NUM >= 0x080300
+#define GIT_CURL_HAVE_GLOBAL_TRACE 1
+#endif
+
+/**
* CURLOPT_TCP_KEEPCNT was added in 8.9.0, released in July, 2024.
*/
#if LIBCURL_VERSION_NUM >= 0x080900
diff --git a/git-gui/Makefile b/git-gui/Makefile
index 27bbe051de..69b0b84435 100644
--- a/git-gui/Makefile
+++ b/git-gui/Makefile
@@ -186,6 +186,7 @@ install: all
$(QUIET)$(INSTALL_D0)'$(DESTDIR_SQ)$(gitexecdir_SQ)' $(INSTALL_D1)
$(QUIET)$(INSTALL_X0)git-gui $(INSTALL_X1) '$(DESTDIR_SQ)$(gitexecdir_SQ)'
$(QUIET)$(INSTALL_X0)git-gui--askpass $(INSTALL_X1) '$(DESTDIR_SQ)$(gitexecdir_SQ)'
+ $(QUIET)$(INSTALL_X0)git-gui--askyesno $(INSTALL_X1) '$(DESTDIR_SQ)$(gitexecdir_SQ)'
$(QUIET)$(foreach p,$(GITGUI_BUILT_INS), $(INSTALL_L0)'$(DESTDIR_SQ)$(gitexecdir_SQ)/$p' $(INSTALL_L1)'$(DESTDIR_SQ)$(gitexecdir_SQ)/git-gui' $(INSTALL_L2)'$(DESTDIR_SQ)$(gitexecdir_SQ)/$p' $(INSTALL_L3) &&) true
ifdef GITGUI_WINDOWS_WRAPPER
$(QUIET)$(INSTALL_R0)git-gui.tcl $(INSTALL_R1) '$(DESTDIR_SQ)$(gitexecdir_SQ)'
@@ -200,6 +201,7 @@ uninstall:
$(QUIET)$(CLEAN_DST) '$(DESTDIR_SQ)$(gitexecdir_SQ)'
$(QUIET)$(REMOVE_F0)'$(DESTDIR_SQ)$(gitexecdir_SQ)'/git-gui $(REMOVE_F1)
$(QUIET)$(REMOVE_F0)'$(DESTDIR_SQ)$(gitexecdir_SQ)'/git-gui--askpass $(REMOVE_F1)
+ $(QUIET)$(REMOVE_F0)'$(DESTDIR_SQ)$(gitexecdir_SQ)'/git-gui--askyesno $(REMOVE_F1)
$(QUIET)$(foreach p,$(GITGUI_BUILT_INS), $(REMOVE_F0)'$(DESTDIR_SQ)$(gitexecdir_SQ)'/$p $(REMOVE_F1) &&) true
ifdef GITGUI_WINDOWS_WRAPPER
$(QUIET)$(REMOVE_F0)'$(DESTDIR_SQ)$(gitexecdir_SQ)'/git-gui.tcl $(REMOVE_F1)
diff --git a/git-gui/git-gui--askyesno b/git-gui/git-gui--askyesno
new file mode 100755
index 0000000000..142d1bc3de
--- /dev/null
+++ b/git-gui/git-gui--askyesno
@@ -0,0 +1,63 @@
+#!/bin/sh
+# Tcl ignores the next line -*- tcl -*- \
+exec wish "$0" -- "$@"
+
+# This is an implementation of a simple yes no dialog
+# which is injected into the git commandline by git gui
+# in case a yesno question needs to be answered.
+#
+# The window title, which defaults to "Question?", can be
+# overridden via the optional `--title` command-line
+# option.
+
+set NS {}
+set use_ttk [package vsatisfies [package provide Tk] 8.5]
+if {$use_ttk} {
+ set NS ttk
+}
+
+set title "Question?"
+if {$argc < 1} {
+ puts stderr "Usage: $argv0 <question>"
+ exit 1
+} else {
+ if {$argc > 2 && [lindex $argv 0] == "--title"} {
+ set title [lindex $argv 1]
+ set argv [lreplace $argv 0 1]
+ }
+ set prompt [join $argv " "]
+}
+
+${NS}::frame .t
+${NS}::label .t.m -text $prompt -justify center -width 40
+.t.m configure -wraplength 400
+pack .t.m -side top -fill x -padx 20 -pady 20 -expand 1
+pack .t -side top -fill x -ipadx 20 -ipady 20 -expand 1
+
+${NS}::frame .b
+${NS}::frame .b.left -width 200
+${NS}::button .b.yes -text Yes -command {exit 0}
+${NS}::button .b.no -text No -command {exit 1}
+
+pack .b.left -side left -expand 1 -fill x
+pack .b.yes -side left -expand 1
+pack .b.no -side right -expand 1 -ipadx 5
+pack .b -side bottom -fill x -ipadx 20 -ipady 15
+
+bind . <Key-Return> {exit 0}
+bind . <Key-Escape> {exit 1}
+
+if {$::tcl_platform(platform) eq {windows}} {
+ set icopath [file dirname [file normalize $argv0]]
+ if {[file tail $icopath] eq {git-core}} {
+ set icopath [file dirname $icopath]
+ }
+ set icopath [file dirname $icopath]
+ set icopath [file join $icopath share git git-for-windows.ico]
+ if {[file exists $icopath]} {
+ wm iconbitmap . -default $icopath
+ }
+}
+
+wm title . $title
+tk::PlaceWindow .
diff --git a/git-gui/git-gui.sh b/git-gui/git-gui.sh
index a931d7f7c9..d3d3aa14a9 100755
--- a/git-gui/git-gui.sh
+++ b/git-gui/git-gui.sh
@@ -103,7 +103,6 @@ if {[is_Windows]} {
set _path_sep {:}
}
-set _search_path {}
set _path_seen [dict create]
foreach p [split $env(PATH) $_path_sep] {
# Keep only absolute paths, getting rid of ., empty, etc.
@@ -112,12 +111,9 @@ foreach p [split $env(PATH) $_path_sep] {
}
# Keep only the first occurence of any duplicates.
set norm_p [file normalize $p]
- if {[dict exists $_path_seen $norm_p]} {
- continue
- }
dict set _path_seen $norm_p 1
- lappend _search_path $norm_p
}
+set _search_path [dict keys $_path_seen]
unset _path_seen
set env(PATH) [join $_search_path $_path_sep]
@@ -583,21 +579,6 @@ proc open_cmd_pipe {cmd path} {
return [open |$run r]
}
-proc _lappend_nice {cmd_var} {
- global _nice
- upvar $cmd_var cmd
-
- if {![info exists _nice]} {
- set _nice [_which nice]
- if {[catch {safe_exec [list $_nice git version]}]} {
- set _nice {}
- }
- }
- if {$_nice ne {}} {
- lappend cmd $_nice
- }
-}
-
proc git {args} {
git_redir $args {}
}
@@ -631,15 +612,14 @@ proc git_read {cmd {redir {}}} {
return [safe_open_command $cmdp $redir]
}
-proc git_read_nice {cmd} {
- global _git
- set opt [list]
-
- _lappend_nice opt
-
- set cmdp [concat [list $_git] $cmd]
+set _nice [list [_which nice]]
+if {[catch {safe_exec [list {*}$_nice git version]}]} {
+ set _nice {}
+}
- return [safe_open_command [concat $opt $cmdp]]
+proc git_read_nice {cmd} {
+ set cmdp [list {*}$::_nice $::_git {*}$cmd]
+ return [safe_open_command $cmdp]
}
proc git_write {cmd} {
@@ -1130,6 +1110,12 @@ set argv0dir [file dirname [file normalize $::argv0]]
if {![info exists env(SSH_ASKPASS)]} {
set env(SSH_ASKPASS) [file join $argv0dir git-gui--askpass]
}
+if {![info exists env(GIT_ASKPASS)]} {
+ set env(GIT_ASKPASS) [file join $argv0dir git-gui--askpass]
+}
+if {![info exists env(GIT_ASK_YESNO)]} {
+ set env(GIT_ASK_YESNO) [file join $argv0dir git-gui--askyesno]
+}
unset argv0dir
######################################################################
diff --git a/git-gui/lib/index.tcl b/git-gui/lib/index.tcl
index 7aa09c7728..e1d38e54be 100644
--- a/git-gui/lib/index.tcl
+++ b/git-gui/lib/index.tcl
@@ -425,6 +425,11 @@ proc revert_helper {txt paths} {
if {![lock_index begin-update]} return
+ # Workaround for Tcl < 9.0: chord namespaces are not obeyed and
+ # operated in the global namespace. This clears an error that could
+ # have been left over from a previous operation.
+ set ::err {}
+
# Common "after" functionality that waits until multiple asynchronous
# operations are complete (by waiting for them to activate their notes
# on the chord).
@@ -432,7 +437,7 @@ proc revert_helper {txt paths} {
# The asynchronous operations are each indicated below by a comment
# before the code block that starts the async operation.
set after_chord [SimpleChord::new {
- if {[string trim $err] != ""} {
+ if {[info exists err] && [string trim $err] ne ""} {
rescan_on_error $err
} else {
unlock_index
diff --git a/git-send-email.perl b/git-send-email.perl
index 437f8ac46a..cd4b316ddc 100755
--- a/git-send-email.perl
+++ b/git-send-email.perl
@@ -62,7 +62,7 @@ git send-email --translate-aliases
--smtp-user <str> * Username for SMTP-AUTH.
--smtp-pass <str> * Password for SMTP-AUTH; not necessary.
--smtp-encryption <str> * tls or ssl; anything else disables.
- --smtp-ssl * Deprecated. Use '--smtp-encryption ssl'.
+ --smtp-ssl * Deprecated. Use `--smtp-encryption ssl`.
--smtp-ssl-cert-path <str> * Path to ca-certificates (either directory or file).
Pass an empty string to disable certificate
verification.
@@ -73,6 +73,10 @@ git send-email --translate-aliases
--no-smtp-auth * Disable SMTP authentication. Shorthand for
`--smtp-auth=none`
--smtp-debug <0|1> * Disable, enable Net::SMTP debug.
+ --imap-sent-folder <str> * IMAP folder where a copy of the emails should be sent.
+ Make sure `git imap-send` is set up to use this feature.
+ --[no-]use-imap-only * Only copy emails to the IMAP folder specified by
+ `--imap-sent-folder` instead of actually sending them.
--batch-size <int> * send max <int> message per connection.
--relogin-delay <int> * delay <int> seconds between two successive login.
@@ -200,7 +204,7 @@ my $re_encoded_word = qr/=\?($re_token)\?($re_token)\?($re_encoded_text)\?=/;
# Variables we fill in automatically, or via prompting:
my (@to,@cc,@xh,$envelope_sender,
- $initial_in_reply_to,$reply_to,$initial_subject,@files,
+ $initial_in_reply_to,$reply_to,$initial_subject,@files,@imap_copy,
$author,$sender,$smtp_authpass,$annotate,$compose,$time);
# Things we either get from config, *or* are overridden on the
# command-line.
@@ -277,6 +281,7 @@ my ($smtp_server, $smtp_server_port, @smtp_server_options);
my ($smtp_authuser, $smtp_encryption, $smtp_ssl_cert_path);
my ($batch_size, $relogin_delay);
my ($identity, $aliasfiletype, @alias_files, $smtp_domain, $smtp_auth);
+my ($imap_sent_folder);
my ($confirm);
my (@suppress_cc);
my ($auto_8bit_encoding);
@@ -293,6 +298,7 @@ my $mailmap = 0;
my $target_xfer_encoding = 'auto';
my $forbid_sendmail_variables = 1;
my $outlook_id_fix = 'auto';
+my $use_imap_only = 0;
my %config_bool_settings = (
"thread" => \$thread,
@@ -309,6 +315,7 @@ my %config_bool_settings = (
"forbidsendmailvariables" => \$forbid_sendmail_variables,
"mailmap" => \$mailmap,
"outlookidfix" => \$outlook_id_fix,
+ "useimaponly" => \$use_imap_only,
);
my %config_settings = (
@@ -322,6 +329,7 @@ my %config_settings = (
"smtpauth" => \$smtp_auth,
"smtpbatchsize" => \$batch_size,
"smtprelogindelay" => \$relogin_delay,
+ "imapsentfolder" => \$imap_sent_folder,
"to" => \@config_to,
"tocmd" => \$to_cmd,
"cc" => \@config_cc,
@@ -527,6 +535,8 @@ my %options = (
"smtp-domain:s" => \$smtp_domain,
"smtp-auth=s" => \$smtp_auth,
"no-smtp-auth" => sub {$smtp_auth = 'none'},
+ "imap-sent-folder=s" => \$imap_sent_folder,
+ "use-imap-only!" => \$use_imap_only,
"annotate!" => \$annotate,
"compose" => \$compose,
"quiet" => \$quiet,
@@ -1678,6 +1688,8 @@ EOF
if ($dry_run) {
# We don't want to send the email.
+ } elsif ($use_imap_only) {
+ die __("The destination IMAP folder is not properly defined.") if !defined $imap_sent_folder;
} elsif (defined $sendmail_cmd || file_name_is_absolute($smtp_server)) {
my $pid = open my $sm, '|-';
defined $pid or die $!;
@@ -1829,6 +1841,17 @@ EOF
print "\n";
}
+ if ($imap_sent_folder && !$dry_run) {
+ my $imap_header = $header;
+ if (@initial_bcc) {
+ # Bcc is not a part of $header, so we add it here.
+ # This is only for the IMAP copy, not for the actual email
+ # sent to the recipients.
+ $imap_header .= "Bcc: " . join(", ", @initial_bcc) . "\n";
+ }
+ push @imap_copy, "From git-send-email\n$imap_header\n$message";
+ }
+
return 1;
}
@@ -1931,6 +1954,9 @@ sub pre_process_file {
$in_reply_to = $1;
}
}
+ elsif (/^Reply-To: (.*)/i) {
+ $reply_to = $1;
+ }
elsif (/^References: (.*)/i) {
if (!$initial_in_reply_to || $thread) {
$references = $1;
@@ -2223,6 +2249,19 @@ sub cleanup_compose_files {
$smtp->quit if $smtp;
+if ($imap_sent_folder && @imap_copy && !$dry_run) {
+ my $imap_input = join("\n", @imap_copy);
+ eval {
+ print "\nStarting git imap-send...\n";
+ my ($fh, $ctx) = Git::command_input_pipe(['imap-send', '-f', $imap_sent_folder]);
+ print $fh $imap_input;
+ Git::command_close_pipe($fh, $ctx);
+ 1;
+ } or do {
+ warn "Warning: failed to send messages to IMAP folder $imap_sent_folder: $@";
+ };
+}
+
sub apply_transfer_encoding {
my $message = shift;
my $from = shift;
diff --git a/git.c b/git.c
index 5dc210b7b4..d020eef021 100644
--- a/git.c
+++ b/git.c
@@ -565,6 +565,7 @@ static struct cmd_struct commands[] = {
{ "init", cmd_init_db },
{ "init-db", cmd_init_db },
{ "interpret-trailers", cmd_interpret_trailers, RUN_SETUP_GENTLY },
+ { "last-modified", cmd_last_modified, RUN_SETUP },
{ "log", cmd_log, RUN_SETUP },
{ "ls-files", cmd_ls_files, RUN_SETUP },
{ "ls-remote", cmd_ls_remote, RUN_SETUP_GENTLY },
diff --git a/gitk-git/README.md b/gitk-git/README.md
new file mode 100644
index 0000000000..2e307463c6
--- /dev/null
+++ b/gitk-git/README.md
@@ -0,0 +1,93 @@
+Gitk - The Git Repository Browser
+=================================
+
+Gitk is a graphical Git repository browser. It displays the commit
+history of a Git repository as a graph, showing the relationships
+between commits, branches, and tags.
+
+Usage
+=====
+
+To view the history of the current repository:
+```bash
+gitk
+```
+
+To view the history of specific files or directories:
+```bash
+gitk path/to/file
+gitk path/to/directory
+```
+
+To view a specific branch or range of commits:
+```bash
+gitk branch-name
+gitk v1.0..v2.0
+```
+
+For more usage examples and options, see the [gitk manual](https://git-scm.com/docs/gitk).
+
+Building
+========
+
+Gitk is a Tcl/Tk application. It requires Tcl/Tk to be installed on
+your system.
+
+Running directly
+----------------
+
+Gitk can be run from the source directory without installation:
+
+```bash
+./gitk
+```
+
+This allows for quick testing of changes.
+
+Installation
+------------
+
+To install system-wide, you can use either `make` or `meson`:
+
+```bash
+# Install to default location ($HOME/bin)
+make install
+
+# Install to system-wide location
+sudo make install prefix=/usr/local
+
+# Install to custom location
+make install prefix=/opt/gitk
+
+# Using Meson
+meson setup builddir
+meson compile -C builddir
+meson install -C builddir
+```
+
+Both build systems will handle setting the correct Tcl/Tk interpreter
+path and installing translation files.
+
+Contributing
+============
+
+Contributions are welcome! The preferred method for submitting patches
+is via email to the Git mailing list, as this allows for more thorough
+review and broader community feedback. However, GitHub pull requests
+are also accepted.
+
+All commits must be signed off (use `git commit --signoff`) and should
+have commit messages prefixed with `gitk:`.
+
+Email Patches
+-------------
+
+Send patches to git@vger.kernel.org and CC j6t@kdbg.org. See the Git
+project's [patch submission guidelines](https://git-scm.com/docs/SubmittingPatches)
+for detailed instructions on creating and sending patches.
+
+License
+=======
+
+Gitk is distributed under the GNU General Public License, either
+version 2, or (at your option) any later version.
diff --git a/gitk-git/gitk b/gitk-git/gitk
index 3b6acfc592..6e4d71d585 100755
--- a/gitk-git/gitk
+++ b/gitk-git/gitk
@@ -2301,6 +2301,11 @@ proc scrollval {D {koff 0}} {
return [expr int(-($D / $scroll_D0) * max(1, $kscroll-$koff))]
}
+proc precisescrollval {D {koff 0}} {
+ global kscroll
+ return [expr (-($D / 10.0) * max(1, $kscroll-$koff))]
+}
+
proc bind_mousewheel {} {
global canv cflist ctext
bindall <MouseWheel> {allcanvs yview scroll [scrollval %D] units}
@@ -2319,6 +2324,25 @@ proc bind_mousewheel {} {
bind $cflist <Alt-MouseWheel> {$cflist yview scroll [scrollval 5*%D 2] units}
bind $cflist <Alt-Shift-MouseWheel> break
bind $canv <Alt-Shift-MouseWheel> {$canv xview scroll [scrollval 5*%D] units}
+
+ bindall <TouchpadScroll> {
+ lassign [tk::PreciseScrollDeltas %D] deltaX deltaY
+ allcanvs yview scroll [precisescrollval $deltaY] units
+ }
+ bind $ctext <TouchpadScroll> {
+ lassign [tk::PreciseScrollDeltas %D] deltaX deltaY
+ $ctext yview scroll [precisescrollval $deltaY 2] units
+ $ctext xview scroll [precisescrollval $deltaX 2] units
+ }
+ bind $cflist <TouchpadScroll> {
+ lassign [tk::PreciseScrollDeltas %D] deltaX deltaY
+ $cflist yview scroll [precisescrollval $deltaY 2] units
+ }
+ bind $canv <TouchpadScroll> {
+ lassign [tk::PreciseScrollDeltas %D] deltaX deltaY
+ $canv xview scroll [precisescrollval $deltaX] units
+ allcanvs yview scroll [precisescrollval $deltaY] units
+ }
}
}
@@ -12596,7 +12620,7 @@ set foundbgcolor yellow
set currentsearchhitbgcolor orange
# button for popping up context menus
-if {[tk windowingsystem] eq "aqua"} {
+if {[tk windowingsystem] eq "aqua" && [package vcompare $::tcl_version 8.7] < 0} {
set ctxbut <Button-2>
} else {
set ctxbut <Button-3>
diff --git a/http.c b/http.c
index 98853d6483..a7d55dcbba 100644
--- a/http.c
+++ b/http.c
@@ -1348,6 +1348,14 @@ void http_init(struct remote *remote, const char *url, int proactive_auth)
if (curl_global_init(CURL_GLOBAL_ALL) != CURLE_OK)
die("curl_global_init failed");
+#ifdef GIT_CURL_HAVE_GLOBAL_TRACE
+ {
+ const char *comp = getenv("GIT_TRACE_CURL_COMPONENTS");
+ if (comp)
+ curl_global_trace(comp);
+ }
+#endif
+
if (proactive_auth && http_proactive_auth == PROACTIVE_AUTH_NONE)
http_proactive_auth = PROACTIVE_AUTH_IF_CREDENTIALS;
diff --git a/imap-send.c b/imap-send.c
index 254ec83ab7..4bd5b8aa0d 100644
--- a/imap-send.c
+++ b/imap-send.c
@@ -1442,14 +1442,24 @@ static int count_messages(struct strbuf *all_msgs)
while (1) {
if (starts_with(p, "From ")) {
- p = strstr(p+5, "\nFrom: ");
- if (!p) break;
- p = strstr(p+7, "\nDate: ");
- if (!p) break;
- p = strstr(p+7, "\nSubject: ");
- if (!p) break;
- p += 10;
- count++;
+ if (starts_with(p, "From git-send-email")) {
+ p = strstr(p+5, "\nFrom: ");
+ if (!p) break;
+ p += 7;
+ p = strstr(p, "\nTo: ");
+ if (!p) break;
+ p += 5;
+ count++;
+ } else {
+ p = strstr(p+5, "\nFrom: ");
+ if (!p) break;
+ p = strstr(p+7, "\nDate: ");
+ if (!p) break;
+ p = strstr(p+7, "\nSubject: ");
+ if (!p) break;
+ p += 10;
+ count++;
+ }
}
p = strstr(p+5, "\nFrom ");
if (!p)
diff --git a/line-log.c b/line-log.c
index 188d387d40..8bd422148d 100644
--- a/line-log.c
+++ b/line-log.c
@@ -201,7 +201,7 @@ static void range_set_difference(struct range_set *out,
* b: ------|
*/
j++;
- if (j >= b->nr || end < b->ranges[j].start) {
+ if (j >= b->nr || end <= b->ranges[j].start) {
/*
* b exhausted, or
* a: ----|
@@ -408,7 +408,7 @@ static void diff_ranges_filter_touched(struct diff_ranges *out,
assert(out->target.nr == 0);
for (i = 0; i < diff->target.nr; i++) {
- while (diff->target.ranges[i].start > rs->ranges[j].end) {
+ while (diff->target.ranges[i].start >= rs->ranges[j].end) {
j++;
if (j == rs->nr)
return;
@@ -939,9 +939,18 @@ static void dump_diff_hacky_one(struct rev_info *rev, struct line_log_data *rang
long t_cur = t_start;
unsigned int j_last;
+ /*
+ * If a diff range touches multiple line ranges, then all
+ * those line ranges should be shown, so take a step back if
+ * the current line range is still in the previous diff range
+ * (even if only partially).
+ */
+ if (j > 0 && diff->target.ranges[j-1].end > t_start)
+ j--;
+
while (j < diff->target.nr && diff->target.ranges[j].end < t_start)
j++;
- if (j == diff->target.nr || diff->target.ranges[j].start > t_end)
+ if (j == diff->target.nr || diff->target.ranges[j].start >= t_end)
continue;
/* Scan ahead to determine the last diff that falls in this range */
diff --git a/list-objects-filter.c b/list-objects-filter.c
index 7ecd4d9ef5..acd65ebb73 100644
--- a/list-objects-filter.c
+++ b/list-objects-filter.c
@@ -524,12 +524,11 @@ static void filter_sparse_oid__init(
struct filter *filter)
{
struct filter_sparse_data *d = xcalloc(1, sizeof(*d));
- struct object_context oc;
struct object_id sparse_oid;
- if (get_oid_with_context(the_repository,
- filter_options->sparse_oid_name,
- GET_OID_BLOB, &sparse_oid, &oc))
+ if (repo_get_oid_with_flags(the_repository,
+ filter_options->sparse_oid_name,
+ &sparse_oid, GET_OID_BLOB))
die(_("unable to access sparse blob in '%s'"),
filter_options->sparse_oid_name);
if (add_patterns_from_blob_to_list(&sparse_oid, "", 0, &d->pl) < 0)
@@ -544,8 +543,6 @@ static void filter_sparse_oid__init(
filter->filter_data = d;
filter->filter_object_fn = filter_sparse;
filter->free_fn = filter_sparse_free;
-
- object_context_release(&oc);
}
/*
diff --git a/log-tree.c b/log-tree.c
index 233bf9f227..73d21f7176 100644
--- a/log-tree.c
+++ b/log-tree.c
@@ -717,6 +717,7 @@ static void show_diff_of_diff(struct rev_info *opt)
struct range_diff_options range_diff_opts = {
.creation_factor = opt->creation_factor,
.dual_color = 1,
+ .max_memory = RANGE_DIFF_MAX_MEMORY_DEFAULT,
.diffopt = &opts
};
diff --git a/meson.build b/meson.build
index e8ec0eca16..b3dfcc0497 100644
--- a/meson.build
+++ b/meson.build
@@ -607,6 +607,7 @@ builtin_sources = [
'builtin/index-pack.c',
'builtin/init-db.c',
'builtin/interpret-trailers.c',
+ 'builtin/last-modified.c',
'builtin/log.c',
'builtin/ls-files.c',
'builtin/ls-remote.c',
diff --git a/midx-write.c b/midx-write.c
index a0aceab5e0..c73010df6d 100644
--- a/midx-write.c
+++ b/midx-write.c
@@ -1,5 +1,3 @@
-#define DISABLE_SIGN_COMPARE_WARNINGS
-
#include "git-compat-util.h"
#include "abspath.h"
#include "config.h"
@@ -24,11 +22,12 @@
#define BITMAP_POS_UNKNOWN (~((uint32_t)0))
#define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
#define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
+#define NO_PREFERRED_PACK (~((uint32_t)0))
extern int midx_checksum_valid(struct multi_pack_index *m);
-extern void clear_midx_files_ext(const char *object_dir, const char *ext,
+extern void clear_midx_files_ext(struct odb_source *source, const char *ext,
const char *keep_hash);
-extern void clear_incremental_midx_files_ext(const char *object_dir,
+extern void clear_incremental_midx_files_ext(struct odb_source *source,
const char *ext,
const char **keep_hashes,
uint32_t hashes_nr);
@@ -104,7 +103,7 @@ struct write_midx_context {
unsigned large_offsets_needed:1;
uint32_t num_large_offsets;
- int preferred_pack_idx;
+ uint32_t preferred_pack_idx;
int incremental;
uint32_t num_multi_pack_indexes_before;
@@ -112,6 +111,7 @@ struct write_midx_context {
struct string_list *to_include;
struct repository *repo;
+ struct odb_source *source;
};
static int should_include_pack(const struct write_midx_context *ctx,
@@ -260,7 +260,7 @@ static void midx_fanout_sort(struct midx_fanout *fanout)
static void midx_fanout_add_midx_fanout(struct midx_fanout *fanout,
struct multi_pack_index *m,
uint32_t cur_fanout,
- int preferred_pack)
+ uint32_t preferred_pack)
{
uint32_t start = m->num_objects_in_base, end;
uint32_t cur_object;
@@ -274,7 +274,7 @@ static void midx_fanout_add_midx_fanout(struct midx_fanout *fanout,
end = m->num_objects_in_base + ntohl(m->chunk_oid_fanout[cur_fanout]);
for (cur_object = start; cur_object < end; cur_object++) {
- if ((preferred_pack > -1) &&
+ if ((preferred_pack != NO_PREFERRED_PACK) &&
(preferred_pack == nth_midxed_pack_int_id(m, cur_object))) {
/*
* Objects from preferred packs are added
@@ -364,7 +364,8 @@ static void compute_sorted_entries(struct write_midx_context *ctx,
preferred, cur_fanout);
}
- if (-1 < ctx->preferred_pack_idx && ctx->preferred_pack_idx < start_pack)
+ if (ctx->preferred_pack_idx != NO_PREFERRED_PACK &&
+ ctx->preferred_pack_idx < start_pack)
midx_fanout_add_pack_fanout(&fanout, ctx->info,
ctx->preferred_pack_idx, 1,
cur_fanout);
@@ -648,7 +649,6 @@ static uint32_t *midx_pack_order(struct write_midx_context *ctx)
}
static void write_midx_reverse_index(struct write_midx_context *ctx,
- const char *object_dir,
unsigned char *midx_hash)
{
struct strbuf buf = STRBUF_INIT;
@@ -657,11 +657,10 @@ static void write_midx_reverse_index(struct write_midx_context *ctx,
trace2_region_enter("midx", "write_midx_reverse_index", ctx->repo);
if (ctx->incremental)
- get_split_midx_filename_ext(ctx->repo->hash_algo, &buf,
- object_dir, midx_hash,
- MIDX_EXT_REV);
+ get_split_midx_filename_ext(ctx->source, &buf,
+ midx_hash, MIDX_EXT_REV);
else
- get_midx_filename_ext(ctx->repo->hash_algo, &buf, object_dir,
+ get_midx_filename_ext(ctx->source, &buf,
midx_hash, MIDX_EXT_REV);
tmp_file = write_rev_file_order(ctx->repo, NULL, ctx->pack_order,
@@ -836,14 +835,13 @@ static struct commit **find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr
}
static int write_midx_bitmap(struct write_midx_context *ctx,
- const char *object_dir,
const unsigned char *midx_hash,
struct packing_data *pdata,
struct commit **commits,
uint32_t commits_nr,
unsigned flags)
{
- int ret, i;
+ int ret;
uint16_t options = 0;
struct bitmap_writer writer;
struct pack_idx_entry **index;
@@ -852,12 +850,11 @@ static int write_midx_bitmap(struct write_midx_context *ctx,
trace2_region_enter("midx", "write_midx_bitmap", ctx->repo);
if (ctx->incremental)
- get_split_midx_filename_ext(ctx->repo->hash_algo, &bitmap_name,
- object_dir, midx_hash,
- MIDX_EXT_BITMAP);
+ get_split_midx_filename_ext(ctx->source, &bitmap_name,
+ midx_hash, MIDX_EXT_BITMAP);
else
- get_midx_filename_ext(ctx->repo->hash_algo, &bitmap_name,
- object_dir, midx_hash, MIDX_EXT_BITMAP);
+ get_midx_filename_ext(ctx->source, &bitmap_name,
+ midx_hash, MIDX_EXT_BITMAP);
if (flags & MIDX_WRITE_BITMAP_HASH_CACHE)
options |= BITMAP_OPT_HASH_CACHE;
@@ -871,7 +868,7 @@ static int write_midx_bitmap(struct write_midx_context *ctx,
* this order).
*/
ALLOC_ARRAY(index, pdata->nr_objects);
- for (i = 0; i < pdata->nr_objects; i++)
+ for (uint32_t i = 0; i < pdata->nr_objects; i++)
index[i] = &pdata->objects[i].idx;
bitmap_writer_init(&writer, ctx->repo, pdata,
@@ -892,7 +889,7 @@ static int write_midx_bitmap(struct write_midx_context *ctx,
* happens between bitmap_writer_build_type_index() and
* bitmap_writer_finish().
*/
- for (i = 0; i < pdata->nr_objects; i++)
+ for (uint32_t i = 0; i < pdata->nr_objects; i++)
index[ctx->pack_order[i]] = &pdata->objects[i].idx;
bitmap_writer_select_commits(&writer, commits, commits_nr);
@@ -913,15 +910,7 @@ cleanup:
return ret;
}
-static struct multi_pack_index *lookup_multi_pack_index(struct repository *r,
- const char *object_dir)
-{
- struct odb_source *source = odb_find_source(r->objects, object_dir);
- return get_multi_pack_index(source);
-}
-
-static int fill_packs_from_midx(struct write_midx_context *ctx,
- const char *preferred_pack_name, uint32_t flags)
+static int fill_packs_from_midx(struct write_midx_context *ctx)
{
struct multi_pack_index *m;
@@ -929,30 +918,10 @@ static int fill_packs_from_midx(struct write_midx_context *ctx,
uint32_t i;
for (i = 0; i < m->num_packs; i++) {
- ALLOC_GROW(ctx->info, ctx->nr + 1, ctx->alloc);
-
- /*
- * If generating a reverse index, need to have
- * packed_git's loaded to compare their
- * mtimes and object count.
- *
- * If a preferred pack is specified, need to
- * have packed_git's loaded to ensure the chosen
- * preferred pack has a non-zero object count.
- */
- if (flags & MIDX_WRITE_REV_INDEX ||
- preferred_pack_name) {
- if (prepare_midx_pack(ctx->repo, m,
- m->num_packs_in_base + i)) {
- error(_("could not load pack"));
- return 1;
- }
-
- if (open_pack_index(m->packs[i]))
- die(_("could not open index for %s"),
- m->packs[i]->pack_name);
- }
+ if (prepare_midx_pack(m, m->num_packs_in_base + i))
+ return error(_("could not load pack"));
+ ALLOC_GROW(ctx->info, ctx->nr + 1, ctx->alloc);
fill_pack_info(&ctx->info[ctx->nr++], m->packs[i],
m->pack_names[i],
m->num_packs_in_base + i);
@@ -989,10 +958,9 @@ static int link_midx_to_chain(struct multi_pack_index *m)
for (i = 0; i < ARRAY_SIZE(midx_exts); i++) {
const unsigned char *hash = get_midx_checksum(m);
- get_midx_filename_ext(m->repo->hash_algo, &from, m->object_dir,
+ get_midx_filename_ext(m->source, &from,
hash, midx_exts[i].non_split);
- get_split_midx_filename_ext(m->repo->hash_algo, &to,
- m->object_dir, hash,
+ get_split_midx_filename_ext(m->source, &to, hash,
midx_exts[i].split);
if (link(from.buf, to.buf) < 0 && errno != ENOENT) {
@@ -1011,7 +979,7 @@ done:
return ret;
}
-static void clear_midx_files(struct repository *r, const char *object_dir,
+static void clear_midx_files(struct odb_source *source,
const char **hashes, uint32_t hashes_nr,
unsigned incremental)
{
@@ -1030,16 +998,16 @@ static void clear_midx_files(struct repository *r, const char *object_dir,
uint32_t i, j;
for (i = 0; i < ARRAY_SIZE(exts); i++) {
- clear_incremental_midx_files_ext(object_dir, exts[i],
+ clear_incremental_midx_files_ext(source, exts[i],
hashes, hashes_nr);
for (j = 0; j < hashes_nr; j++)
- clear_midx_files_ext(object_dir, exts[i], hashes[j]);
+ clear_midx_files_ext(source, exts[i], hashes[j]);
}
if (incremental)
- get_midx_filename(r->hash_algo, &buf, object_dir);
+ get_midx_filename(source, &buf);
else
- get_midx_chain_filename(&buf, object_dir);
+ get_midx_chain_filename(source, &buf);
if (unlink(buf.buf) && errno != ENOENT)
die_errno(_("failed to clear multi-pack-index at %s"), buf.buf);
@@ -1047,45 +1015,49 @@ static void clear_midx_files(struct repository *r, const char *object_dir,
strbuf_release(&buf);
}
-static int write_midx_internal(struct repository *r, const char *object_dir,
+static int write_midx_internal(struct odb_source *source,
struct string_list *packs_to_include,
struct string_list *packs_to_drop,
const char *preferred_pack_name,
const char *refs_snapshot,
unsigned flags)
{
+ struct repository *r = source->odb->repo;
struct strbuf midx_name = STRBUF_INIT;
unsigned char midx_hash[GIT_MAX_RAWSZ];
- uint32_t i, start_pack;
+ uint32_t start_pack;
struct hashfile *f = NULL;
struct lock_file lk;
struct tempfile *incr;
- struct write_midx_context ctx = { 0 };
+ struct write_midx_context ctx = {
+ .preferred_pack_idx = NO_PREFERRED_PACK,
+ };
int bitmapped_packs_concat_len = 0;
int pack_name_concat_len = 0;
int dropped_packs = 0;
- int result = 0;
+ int result = -1;
const char **keep_hashes = NULL;
struct chunkfile *cf;
trace2_region_enter("midx", "write_midx_internal", r);
ctx.repo = r;
+ ctx.source = source;
ctx.incremental = !!(flags & MIDX_WRITE_INCREMENTAL);
if (ctx.incremental)
strbuf_addf(&midx_name,
"%s/pack/multi-pack-index.d/tmp_midx_XXXXXX",
- object_dir);
+ source->path);
else
- get_midx_filename(r->hash_algo, &midx_name, object_dir);
+ get_midx_filename(source, &midx_name);
if (safe_create_leading_directories(r, midx_name.buf))
die_errno(_("unable to create leading directories of %s"),
midx_name.buf);
if (!packs_to_include || ctx.incremental) {
- struct multi_pack_index *m = lookup_multi_pack_index(r, object_dir);
+ struct multi_pack_index *m = get_multi_pack_index(source);
if (m && !midx_checksum_valid(m)) {
warning(_("ignoring existing multi-pack-index; checksum mismatch"));
m = NULL;
@@ -1116,15 +1088,13 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
if (flags & MIDX_WRITE_BITMAP && load_midx_revindex(m)) {
error(_("could not load reverse index for MIDX %s"),
hash_to_hex_algop(get_midx_checksum(m),
- m->repo->hash_algo));
- result = 1;
+ m->source->odb->repo->hash_algo));
goto cleanup;
}
ctx.num_multi_pack_indexes_before++;
m = m->base_midx;
}
- } else if (ctx.m && fill_packs_from_midx(&ctx, preferred_pack_name,
- flags) < 0) {
+ } else if (ctx.m && fill_packs_from_midx(&ctx)) {
goto cleanup;
}
@@ -1139,7 +1109,7 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
ctx.to_include = packs_to_include;
- for_each_file_in_pack_dir(object_dir, add_pack_to_midx, &ctx);
+ for_each_file_in_pack_dir(source->path, add_pack_to_midx, &ctx);
stop_progress(&ctx.progress);
if ((ctx.m && ctx.nr == ctx.m->num_packs + ctx.m->num_packs_in_base) &&
@@ -1159,18 +1129,21 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
* corresponding bitmap (or one wasn't requested).
*/
if (!want_bitmap)
- clear_midx_files_ext(object_dir, "bitmap", NULL);
+ clear_midx_files_ext(source, "bitmap", NULL);
+ result = 0;
goto cleanup;
}
}
- if (ctx.incremental && !ctx.nr)
+ if (ctx.incremental && !ctx.nr) {
+ result = 0;
goto cleanup; /* nothing to do */
+ }
if (preferred_pack_name) {
- ctx.preferred_pack_idx = -1;
+ ctx.preferred_pack_idx = NO_PREFERRED_PACK;
- for (i = 0; i < ctx.nr; i++) {
+ for (size_t i = 0; i < ctx.nr; i++) {
if (!cmp_idx_or_pack_name(preferred_pack_name,
ctx.info[i].pack_name)) {
ctx.preferred_pack_idx = i;
@@ -1178,14 +1151,21 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
}
}
- if (ctx.preferred_pack_idx == -1)
+ if (ctx.preferred_pack_idx == NO_PREFERRED_PACK)
warning(_("unknown preferred pack: '%s'"),
preferred_pack_name);
} else if (ctx.nr &&
(flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))) {
- struct packed_git *oldest = ctx.info[ctx.preferred_pack_idx].p;
+ struct packed_git *oldest = ctx.info[0].p;
ctx.preferred_pack_idx = 0;
+ /*
+ * Attempt opening the pack index to populate num_objects.
+ * Ignore failiures as they can be expected and are not
+ * fatal during this selection time.
+ */
+ open_pack_index(oldest);
+
if (packs_to_drop && packs_to_drop->nr)
BUG("cannot write a MIDX bitmap during expiration");
@@ -1195,11 +1175,12 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
* pack-order has all of its objects selected from that pack
* (and not another pack containing a duplicate)
*/
- for (i = 1; i < ctx.nr; i++) {
+ for (size_t i = 1; i < ctx.nr; i++) {
struct packed_git *p = ctx.info[i].p;
if (!oldest->num_objects || p->mtime < oldest->mtime) {
oldest = p;
+ open_pack_index(oldest);
ctx.preferred_pack_idx = i;
}
}
@@ -1211,22 +1192,26 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
* objects to resolve, so the preferred value doesn't
* matter.
*/
- ctx.preferred_pack_idx = -1;
+ ctx.preferred_pack_idx = NO_PREFERRED_PACK;
}
} else {
/*
* otherwise don't mark any pack as preferred to avoid
* interfering with expiration logic below
*/
- ctx.preferred_pack_idx = -1;
+ ctx.preferred_pack_idx = NO_PREFERRED_PACK;
}
- if (ctx.preferred_pack_idx > -1) {
+ if (ctx.preferred_pack_idx != NO_PREFERRED_PACK) {
struct packed_git *preferred = ctx.info[ctx.preferred_pack_idx].p;
+
+ if (open_pack_index(preferred))
+ die(_("failed to open preferred pack %s"),
+ ctx.info[ctx.preferred_pack_idx].pack_name);
+
if (!preferred->num_objects) {
error(_("cannot select preferred pack %s with no objects"),
preferred->pack_name);
- result = 1;
goto cleanup;
}
}
@@ -1234,7 +1219,7 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
compute_sorted_entries(&ctx, start_pack);
ctx.large_offsets_needed = 0;
- for (i = 0; i < ctx.entries_nr; i++) {
+ for (size_t i = 0; i < ctx.entries_nr; i++) {
if (ctx.entries[i].offset > 0x7fffffff)
ctx.num_large_offsets++;
if (ctx.entries[i].offset > 0xffffffff)
@@ -1244,10 +1229,10 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
QSORT(ctx.info, ctx.nr, pack_info_compare);
if (packs_to_drop && packs_to_drop->nr) {
- int drop_index = 0;
+ size_t drop_index = 0;
int missing_drops = 0;
- for (i = 0; i < ctx.nr && drop_index < packs_to_drop->nr; i++) {
+ for (size_t i = 0; i < ctx.nr && drop_index < packs_to_drop->nr; i++) {
int cmp = strcmp(ctx.info[i].pack_name,
packs_to_drop->items[drop_index].string);
@@ -1265,10 +1250,8 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
}
}
- if (missing_drops) {
- result = 1;
+ if (missing_drops)
goto cleanup;
- }
}
/*
@@ -1278,7 +1261,7 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
* pack_perm[old_id] = new_id
*/
ALLOC_ARRAY(ctx.pack_perm, ctx.nr);
- for (i = 0; i < ctx.nr; i++) {
+ for (size_t i = 0; i < ctx.nr; i++) {
if (ctx.info[i].expired) {
dropped_packs++;
ctx.pack_perm[ctx.info[i].orig_pack_int_id] = PACK_EXPIRED;
@@ -1287,7 +1270,7 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
}
}
- for (i = 0; i < ctx.nr; i++) {
+ for (size_t i = 0; i < ctx.nr; i++) {
if (ctx.info[i].expired)
continue;
pack_name_concat_len += strlen(ctx.info[i].pack_name) + 1;
@@ -1314,7 +1297,6 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
if (ctx.nr - dropped_packs == 0) {
error(_("no pack files to index."));
- result = 1;
goto cleanup;
}
@@ -1327,20 +1309,20 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
if (ctx.incremental) {
struct strbuf lock_name = STRBUF_INIT;
- get_midx_chain_filename(&lock_name, object_dir);
+ get_midx_chain_filename(source, &lock_name);
hold_lock_file_for_update(&lk, lock_name.buf, LOCK_DIE_ON_ERROR);
strbuf_release(&lock_name);
incr = mks_tempfile_m(midx_name.buf, 0444);
if (!incr) {
error(_("unable to create temporary MIDX layer"));
- return -1;
+ goto cleanup;
}
if (adjust_shared_perm(r, get_tempfile_path(incr))) {
error(_("unable to adjust shared permissions for '%s'"),
get_tempfile_path(incr));
- return -1;
+ goto cleanup;
}
f = hashfd(r->hash_algo, get_tempfile_fd(incr),
@@ -1390,7 +1372,7 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
if (flags & MIDX_WRITE_REV_INDEX &&
git_env_bool("GIT_TEST_MIDX_WRITE_REV", 0))
- write_midx_reverse_index(&ctx, object_dir, midx_hash);
+ write_midx_reverse_index(&ctx, midx_hash);
if (flags & MIDX_WRITE_BITMAP) {
struct packing_data pdata;
@@ -1413,11 +1395,10 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
FREE_AND_NULL(ctx.entries);
ctx.entries_nr = 0;
- if (write_midx_bitmap(&ctx, object_dir,
+ if (write_midx_bitmap(&ctx,
midx_hash, &pdata, commits, commits_nr,
flags) < 0) {
error(_("could not write multi-pack bitmap"));
- result = 1;
clear_packing_data(&pdata);
free(commits);
goto cleanup;
@@ -1431,6 +1412,9 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
* have been freed in the previous if block.
*/
+ if (ctx.num_multi_pack_indexes_before == UINT32_MAX)
+ die(_("too many multi-pack-indexes"));
+
CALLOC_ARRAY(keep_hashes, ctx.num_multi_pack_indexes_before + 1);
if (ctx.incremental) {
@@ -1440,18 +1424,18 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
if (!chainf) {
error_errno(_("unable to open multi-pack-index chain file"));
- return -1;
+ goto cleanup;
}
if (link_midx_to_chain(ctx.base_midx) < 0)
- return -1;
+ goto cleanup;
- get_split_midx_filename_ext(r->hash_algo, &final_midx_name,
- object_dir, midx_hash, MIDX_EXT_MIDX);
+ get_split_midx_filename_ext(source, &final_midx_name,
+ midx_hash, MIDX_EXT_MIDX);
if (rename_tempfile(&incr, final_midx_name.buf) < 0) {
error_errno(_("unable to rename new multi-pack-index layer"));
- return -1;
+ goto cleanup;
}
strbuf_release(&final_midx_name);
@@ -1459,7 +1443,7 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
keep_hashes[ctx.num_multi_pack_indexes_before] =
xstrdup(hash_to_hex_algop(midx_hash, r->hash_algo));
- for (i = 0; i < ctx.num_multi_pack_indexes_before; i++) {
+ for (uint32_t i = 0; i < ctx.num_multi_pack_indexes_before; i++) {
uint32_t j = ctx.num_multi_pack_indexes_before - i - 1;
keep_hashes[j] = xstrdup(hash_to_hex_algop(get_midx_checksum(m),
@@ -1467,7 +1451,7 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
m = m->base_midx;
}
- for (i = 0; i < ctx.num_multi_pack_indexes_before + 1; i++)
+ for (uint32_t i = 0; i <= ctx.num_multi_pack_indexes_before; i++)
fprintf(get_lock_file_fp(&lk), "%s\n", keep_hashes[i]);
} else {
keep_hashes[ctx.num_multi_pack_indexes_before] =
@@ -1480,12 +1464,13 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
if (commit_lock_file(&lk) < 0)
die_errno(_("could not write multi-pack-index"));
- clear_midx_files(r, object_dir, keep_hashes,
+ clear_midx_files(source, keep_hashes,
ctx.num_multi_pack_indexes_before + 1,
ctx.incremental);
+ result = 0;
cleanup:
- for (i = 0; i < ctx.nr; i++) {
+ for (size_t i = 0; i < ctx.nr; i++) {
if (ctx.info[i].p) {
close_pack(ctx.info[i].p);
free(ctx.info[i].p);
@@ -1498,7 +1483,7 @@ cleanup:
free(ctx.pack_perm);
free(ctx.pack_order);
if (keep_hashes) {
- for (i = 0; i < ctx.num_multi_pack_indexes_before + 1; i++)
+ for (uint32_t i = 0; i <= ctx.num_multi_pack_indexes_before; i++)
free((char *)keep_hashes[i]);
free(keep_hashes);
}
@@ -1509,29 +1494,29 @@ cleanup:
return result;
}
-int write_midx_file(struct repository *r, const char *object_dir,
+int write_midx_file(struct odb_source *source,
const char *preferred_pack_name,
const char *refs_snapshot, unsigned flags)
{
- return write_midx_internal(r, object_dir, NULL, NULL,
+ return write_midx_internal(source, NULL, NULL,
preferred_pack_name, refs_snapshot,
flags);
}
-int write_midx_file_only(struct repository *r, const char *object_dir,
+int write_midx_file_only(struct odb_source *source,
struct string_list *packs_to_include,
const char *preferred_pack_name,
const char *refs_snapshot, unsigned flags)
{
- return write_midx_internal(r, object_dir, packs_to_include, NULL,
+ return write_midx_internal(source, packs_to_include, NULL,
preferred_pack_name, refs_snapshot, flags);
}
-int expire_midx_packs(struct repository *r, const char *object_dir, unsigned flags)
+int expire_midx_packs(struct odb_source *source, unsigned flags)
{
uint32_t i, *count, result = 0;
struct string_list packs_to_drop = STRING_LIST_INIT_DUP;
- struct multi_pack_index *m = lookup_multi_pack_index(r, object_dir);
+ struct multi_pack_index *m = get_multi_pack_index(source);
struct progress *progress = NULL;
if (!m)
@@ -1544,7 +1529,7 @@ int expire_midx_packs(struct repository *r, const char *object_dir, unsigned fla
if (flags & MIDX_PROGRESS)
progress = start_delayed_progress(
- r,
+ source->odb->repo,
_("Counting referenced objects"),
m->num_objects);
for (i = 0; i < m->num_objects; i++) {
@@ -1556,7 +1541,7 @@ int expire_midx_packs(struct repository *r, const char *object_dir, unsigned fla
if (flags & MIDX_PROGRESS)
progress = start_delayed_progress(
- r,
+ source->odb->repo,
_("Finding and deleting unreferenced packfiles"),
m->num_packs);
for (i = 0; i < m->num_packs; i++) {
@@ -1566,7 +1551,7 @@ int expire_midx_packs(struct repository *r, const char *object_dir, unsigned fla
if (count[i])
continue;
- if (prepare_midx_pack(r, m, i))
+ if (prepare_midx_pack(m, i))
continue;
if (m->packs[i]->pack_keep || m->packs[i]->is_cruft)
@@ -1584,7 +1569,7 @@ int expire_midx_packs(struct repository *r, const char *object_dir, unsigned fla
free(count);
if (packs_to_drop.nr)
- result = write_midx_internal(r, object_dir, NULL,
+ result = write_midx_internal(source, NULL,
&packs_to_drop, NULL, NULL, flags);
string_list_clear(&packs_to_drop, 0);
@@ -1612,13 +1597,12 @@ static int compare_by_mtime(const void *a_, const void *b_)
return 0;
}
-static int want_included_pack(struct repository *r,
- struct multi_pack_index *m,
+static int want_included_pack(struct multi_pack_index *m,
int pack_kept_objects,
uint32_t pack_int_id)
{
struct packed_git *p;
- if (prepare_midx_pack(r, m, pack_int_id))
+ if (prepare_midx_pack(m, pack_int_id))
return 0;
p = m->packs[pack_int_id];
if (!pack_kept_objects && p->pack_keep)
@@ -1640,7 +1624,7 @@ static void fill_included_packs_all(struct repository *r,
repo_config_get_bool(r, "repack.packkeptobjects", &pack_kept_objects);
for (i = 0; i < m->num_packs; i++) {
- if (!want_included_pack(r, m, pack_kept_objects, i))
+ if (!want_included_pack(m, pack_kept_objects, i))
continue;
include_pack[i] = 1;
@@ -1664,7 +1648,7 @@ static void fill_included_packs_batch(struct repository *r,
for (i = 0; i < m->num_packs; i++) {
pack_info[i].pack_int_id = i;
- if (prepare_midx_pack(r, m, i))
+ if (prepare_midx_pack(m, i))
continue;
pack_info[i].mtime = m->packs[i]->mtime;
@@ -1683,7 +1667,7 @@ static void fill_included_packs_batch(struct repository *r,
struct packed_git *p = m->packs[pack_int_id];
uint64_t expected_size;
- if (!want_included_pack(r, m, pack_kept_objects, pack_int_id))
+ if (!want_included_pack(m, pack_kept_objects, pack_int_id))
continue;
/*
@@ -1710,14 +1694,15 @@ static void fill_included_packs_batch(struct repository *r,
free(pack_info);
}
-int midx_repack(struct repository *r, const char *object_dir, size_t batch_size, unsigned flags)
+int midx_repack(struct odb_source *source, size_t batch_size, unsigned flags)
{
+ struct repository *r = source->odb->repo;
int result = 0;
uint32_t i, packs_to_repack = 0;
unsigned char *include_pack;
struct child_process cmd = CHILD_PROCESS_INIT;
FILE *cmd_in;
- struct multi_pack_index *m = lookup_multi_pack_index(r, object_dir);
+ struct multi_pack_index *m = get_multi_pack_index(source);
/*
* When updating the default for these configuration
@@ -1751,7 +1736,7 @@ int midx_repack(struct repository *r, const char *object_dir, size_t batch_size,
strvec_push(&cmd.args, "pack-objects");
- strvec_pushf(&cmd.args, "%s/pack/pack", object_dir);
+ strvec_pushf(&cmd.args, "%s/pack/pack", source->path);
if (delta_base_offset)
strvec_push(&cmd.args, "--delta-base-offset");
@@ -1792,7 +1777,7 @@ int midx_repack(struct repository *r, const char *object_dir, size_t batch_size,
goto cleanup;
}
- result = write_midx_internal(r, object_dir, NULL, NULL, NULL, NULL,
+ result = write_midx_internal(source, NULL, NULL, NULL, NULL,
flags);
cleanup:
diff --git a/midx.c b/midx.c
index 7d407682e6..7726c13d7e 100644
--- a/midx.c
+++ b/midx.c
@@ -16,9 +16,9 @@
#define MIDX_PACK_ERROR ((void *)(intptr_t)-1)
int midx_checksum_valid(struct multi_pack_index *m);
-void clear_midx_files_ext(const char *object_dir, const char *ext,
+void clear_midx_files_ext(struct odb_source *source, const char *ext,
const char *keep_hash);
-void clear_incremental_midx_files_ext(const char *object_dir, const char *ext,
+void clear_incremental_midx_files_ext(struct odb_source *source, const char *ext,
char **keep_hashes,
uint32_t hashes_nr);
int cmp_idx_or_pack_name(const char *idx_or_pack_name,
@@ -26,22 +26,20 @@ int cmp_idx_or_pack_name(const char *idx_or_pack_name,
const unsigned char *get_midx_checksum(struct multi_pack_index *m)
{
- return m->data + m->data_len - m->repo->hash_algo->rawsz;
+ return m->data + m->data_len - m->source->odb->repo->hash_algo->rawsz;
}
-void get_midx_filename(const struct git_hash_algo *hash_algo,
- struct strbuf *out, const char *object_dir)
+void get_midx_filename(struct odb_source *source, struct strbuf *out)
{
- get_midx_filename_ext(hash_algo, out, object_dir, NULL, NULL);
+ get_midx_filename_ext(source, out, NULL, NULL);
}
-void get_midx_filename_ext(const struct git_hash_algo *hash_algo,
- struct strbuf *out, const char *object_dir,
+void get_midx_filename_ext(struct odb_source *source, struct strbuf *out,
const unsigned char *hash, const char *ext)
{
- strbuf_addf(out, "%s/pack/multi-pack-index", object_dir);
+ strbuf_addf(out, "%s/pack/multi-pack-index", source->path);
if (ext)
- strbuf_addf(out, "-%s.%s", hash_to_hex_algop(hash, hash_algo), ext);
+ strbuf_addf(out, "-%s.%s", hash_to_hex_algop(hash, source->odb->repo->hash_algo), ext);
}
static int midx_read_oid_fanout(const unsigned char *chunk_start,
@@ -95,11 +93,10 @@ static int midx_read_object_offsets(const unsigned char *chunk_start,
return 0;
}
-static struct multi_pack_index *load_multi_pack_index_one(struct repository *r,
- const char *object_dir,
- const char *midx_name,
- int local)
+static struct multi_pack_index *load_multi_pack_index_one(struct odb_source *source,
+ const char *midx_name)
{
+ struct repository *r = source->odb->repo;
struct multi_pack_index *m = NULL;
int fd;
struct stat st;
@@ -129,11 +126,10 @@ static struct multi_pack_index *load_multi_pack_index_one(struct repository *r,
midx_map = xmmap(NULL, midx_size, PROT_READ, MAP_PRIVATE, fd, 0);
close(fd);
- FLEX_ALLOC_STR(m, object_dir, object_dir);
+ CALLOC_ARRAY(m, 1);
m->data = midx_map;
m->data_len = midx_size;
- m->local = local;
- m->repo = r;
+ m->source = source;
m->signature = get_be32(m->data);
if (m->signature != MIDX_SIGNATURE)
@@ -224,24 +220,23 @@ cleanup_fail:
return NULL;
}
-void get_midx_chain_dirname(struct strbuf *buf, const char *object_dir)
+void get_midx_chain_dirname(struct odb_source *source, struct strbuf *buf)
{
- strbuf_addf(buf, "%s/pack/multi-pack-index.d", object_dir);
+ strbuf_addf(buf, "%s/pack/multi-pack-index.d", source->path);
}
-void get_midx_chain_filename(struct strbuf *buf, const char *object_dir)
+void get_midx_chain_filename(struct odb_source *source, struct strbuf *buf)
{
- get_midx_chain_dirname(buf, object_dir);
+ get_midx_chain_dirname(source, buf);
strbuf_addstr(buf, "/multi-pack-index-chain");
}
-void get_split_midx_filename_ext(const struct git_hash_algo *hash_algo,
- struct strbuf *buf, const char *object_dir,
+void get_split_midx_filename_ext(struct odb_source *source, struct strbuf *buf,
const unsigned char *hash, const char *ext)
{
- get_midx_chain_dirname(buf, object_dir);
+ get_midx_chain_dirname(source, buf);
strbuf_addf(buf, "/multi-pack-index-%s.%s",
- hash_to_hex_algop(hash, hash_algo), ext);
+ hash_to_hex_algop(hash, source->odb->repo->hash_algo), ext);
}
static int open_multi_pack_index_chain(const struct git_hash_algo *hash_algo,
@@ -297,19 +292,18 @@ static int add_midx_to_chain(struct multi_pack_index *midx,
return 1;
}
-static struct multi_pack_index *load_midx_chain_fd_st(struct repository *r,
- const char *object_dir,
- int local,
+static struct multi_pack_index *load_midx_chain_fd_st(struct odb_source *source,
int fd, struct stat *st,
int *incomplete_chain)
{
+ const struct git_hash_algo *hash_algo = source->odb->repo->hash_algo;
struct multi_pack_index *midx_chain = NULL;
struct strbuf buf = STRBUF_INIT;
int valid = 1;
uint32_t i, count;
FILE *fp = xfdopen(fd, "r");
- count = st->st_size / (r->hash_algo->hexsz + 1);
+ count = st->st_size / (hash_algo->hexsz + 1);
for (i = 0; i < count; i++) {
struct multi_pack_index *m;
@@ -318,7 +312,7 @@ static struct multi_pack_index *load_midx_chain_fd_st(struct repository *r,
if (strbuf_getline_lf(&buf, fp) == EOF)
break;
- if (get_oid_hex_algop(buf.buf, &layer, r->hash_algo)) {
+ if (get_oid_hex_algop(buf.buf, &layer, hash_algo)) {
warning(_("invalid multi-pack-index chain: line '%s' "
"not a hash"),
buf.buf);
@@ -329,9 +323,9 @@ static struct multi_pack_index *load_midx_chain_fd_st(struct repository *r,
valid = 0;
strbuf_reset(&buf);
- get_split_midx_filename_ext(r->hash_algo, &buf, object_dir,
+ get_split_midx_filename_ext(source, &buf,
layer.hash, MIDX_EXT_MIDX);
- m = load_multi_pack_index_one(r, object_dir, buf.buf, local);
+ m = load_multi_pack_index_one(source, buf.buf);
if (m) {
if (add_midx_to_chain(m, midx_chain)) {
@@ -354,40 +348,34 @@ static struct multi_pack_index *load_midx_chain_fd_st(struct repository *r,
return midx_chain;
}
-static struct multi_pack_index *load_multi_pack_index_chain(struct repository *r,
- const char *object_dir,
- int local)
+static struct multi_pack_index *load_multi_pack_index_chain(struct odb_source *source)
{
struct strbuf chain_file = STRBUF_INIT;
struct stat st;
int fd;
struct multi_pack_index *m = NULL;
- get_midx_chain_filename(&chain_file, object_dir);
- if (open_multi_pack_index_chain(r->hash_algo, chain_file.buf, &fd, &st)) {
+ get_midx_chain_filename(source, &chain_file);
+ if (open_multi_pack_index_chain(source->odb->repo->hash_algo, chain_file.buf, &fd, &st)) {
int incomplete;
/* ownership of fd is taken over by load function */
- m = load_midx_chain_fd_st(r, object_dir, local, fd, &st,
- &incomplete);
+ m = load_midx_chain_fd_st(source, fd, &st, &incomplete);
}
strbuf_release(&chain_file);
return m;
}
-struct multi_pack_index *load_multi_pack_index(struct repository *r,
- const char *object_dir,
- int local)
+struct multi_pack_index *load_multi_pack_index(struct odb_source *source)
{
struct strbuf midx_name = STRBUF_INIT;
struct multi_pack_index *m;
- get_midx_filename(r->hash_algo, &midx_name, object_dir);
+ get_midx_filename(source, &midx_name);
- m = load_multi_pack_index_one(r, object_dir,
- midx_name.buf, local);
+ m = load_multi_pack_index_one(source, midx_name.buf);
if (!m)
- m = load_multi_pack_index_chain(r, object_dir, local);
+ m = load_multi_pack_index_chain(source);
strbuf_release(&midx_name);
@@ -450,9 +438,10 @@ static uint32_t midx_for_pack(struct multi_pack_index **_m,
return pack_int_id - m->num_packs_in_base;
}
-int prepare_midx_pack(struct repository *r, struct multi_pack_index *m,
+int prepare_midx_pack(struct multi_pack_index *m,
uint32_t pack_int_id)
{
+ struct repository *r = m->source->odb->repo;
struct strbuf pack_name = STRBUF_INIT;
struct strbuf key = STRBUF_INIT;
struct packed_git *p;
@@ -464,7 +453,7 @@ int prepare_midx_pack(struct repository *r, struct multi_pack_index *m,
if (m->packs[pack_int_id])
return 0;
- strbuf_addf(&pack_name, "%s/pack/%s", m->object_dir,
+ strbuf_addf(&pack_name, "%s/pack/%s", m->source->path,
m->pack_names[pack_int_id]);
/* pack_map holds the ".pack" name, but we have the .idx */
@@ -475,7 +464,8 @@ int prepare_midx_pack(struct repository *r, struct multi_pack_index *m,
strhash(key.buf), key.buf,
struct packed_git, packmap_ent);
if (!p) {
- p = add_packed_git(r, pack_name.buf, pack_name.len, m->local);
+ p = add_packed_git(r, pack_name.buf, pack_name.len,
+ m->source->local);
if (p) {
install_packed_git(r, p);
list_add_tail(&p->mru, &r->objects->packed_git_mru);
@@ -507,7 +497,7 @@ struct packed_git *nth_midxed_pack(struct multi_pack_index *m,
#define MIDX_CHUNK_BITMAPPED_PACKS_WIDTH (2 * sizeof(uint32_t))
-int nth_bitmapped_pack(struct repository *r, struct multi_pack_index *m,
+int nth_bitmapped_pack(struct multi_pack_index *m,
struct bitmapped_pack *bp, uint32_t pack_int_id)
{
uint32_t local_pack_int_id = midx_for_pack(&m, pack_int_id);
@@ -515,7 +505,7 @@ int nth_bitmapped_pack(struct repository *r, struct multi_pack_index *m,
if (!m->chunk_bitmapped_packs)
return error(_("MIDX does not contain the BTMP chunk"));
- if (prepare_midx_pack(r, m, pack_int_id))
+ if (prepare_midx_pack(m, pack_int_id))
return error(_("could not load bitmapped pack %"PRIu32), pack_int_id);
bp->p = m->packs[local_pack_int_id];
@@ -534,7 +524,8 @@ int bsearch_one_midx(const struct object_id *oid, struct multi_pack_index *m,
uint32_t *result)
{
int ret = bsearch_hash(oid->hash, m->chunk_oid_fanout,
- m->chunk_oid_lookup, m->repo->hash_algo->rawsz,
+ m->chunk_oid_lookup,
+ m->source->odb->repo->hash_algo->rawsz,
result);
if (result)
*result += m->num_objects_in_base;
@@ -565,7 +556,7 @@ struct object_id *nth_midxed_object_oid(struct object_id *oid,
n = midx_for_object(&m, n);
oidread(oid, m->chunk_oid_lookup + st_mult(m->hash_len, n),
- m->repo->hash_algo);
+ m->source->odb->repo->hash_algo);
return oid;
}
@@ -600,10 +591,9 @@ uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos)
(off_t)pos * MIDX_CHUNK_OFFSET_WIDTH);
}
-int fill_midx_entry(struct repository *r,
+int fill_midx_entry(struct multi_pack_index *m,
const struct object_id *oid,
- struct pack_entry *e,
- struct multi_pack_index *m)
+ struct pack_entry *e)
{
uint32_t pos;
uint32_t pack_int_id;
@@ -615,7 +605,7 @@ int fill_midx_entry(struct repository *r,
midx_for_object(&m, pos);
pack_int_id = nth_midxed_pack_int_id(m, pos);
- if (prepare_midx_pack(r, m, pack_int_id))
+ if (prepare_midx_pack(m, pack_int_id))
return 0;
p = m->packs[pack_int_id - m->num_packs_in_base];
@@ -723,7 +713,7 @@ int midx_preferred_pack(struct multi_pack_index *m, uint32_t *pack_int_id)
return 0;
}
-int prepare_multi_pack_index_one(struct odb_source *source, int local)
+int prepare_multi_pack_index_one(struct odb_source *source)
{
struct repository *r = source->odb->repo;
@@ -734,14 +724,14 @@ int prepare_multi_pack_index_one(struct odb_source *source, int local)
if (source->midx)
return 1;
- source->midx = load_multi_pack_index(r, source->path, local);
+ source->midx = load_multi_pack_index(source);
return !!source->midx;
}
int midx_checksum_valid(struct multi_pack_index *m)
{
- return hashfile_checksum_valid(m->repo->hash_algo,
+ return hashfile_checksum_valid(m->source->odb->repo->hash_algo,
m->data, m->data_len);
}
@@ -768,7 +758,7 @@ static void clear_midx_file_ext(const char *full_path, size_t full_path_len UNUS
die_errno(_("failed to remove %s"), full_path);
}
-void clear_midx_files_ext(const char *object_dir, const char *ext,
+void clear_midx_files_ext(struct odb_source *source, const char *ext,
const char *keep_hash)
{
struct clear_midx_data data;
@@ -782,7 +772,7 @@ void clear_midx_files_ext(const char *object_dir, const char *ext,
}
data.ext = ext;
- for_each_file_in_pack_dir(object_dir,
+ for_each_file_in_pack_dir(source->path,
clear_midx_file_ext,
&data);
@@ -791,7 +781,7 @@ void clear_midx_files_ext(const char *object_dir, const char *ext,
free(data.keep);
}
-void clear_incremental_midx_files_ext(const char *object_dir, const char *ext,
+void clear_incremental_midx_files_ext(struct odb_source *source, const char *ext,
char **keep_hashes,
uint32_t hashes_nr)
{
@@ -807,7 +797,7 @@ void clear_incremental_midx_files_ext(const char *object_dir, const char *ext,
data.keep_nr = hashes_nr;
data.ext = ext;
- for_each_file_in_pack_subdir(object_dir, "multi-pack-index.d",
+ for_each_file_in_pack_subdir(source->path, "multi-pack-index.d",
clear_midx_file_ext, &data);
for (i = 0; i < hashes_nr; i++)
@@ -819,7 +809,7 @@ void clear_midx_file(struct repository *r)
{
struct strbuf midx = STRBUF_INIT;
- get_midx_filename(r->hash_algo, &midx, r->objects->sources->path);
+ get_midx_filename(r->objects->sources, &midx);
if (r->objects) {
struct odb_source *source;
@@ -834,8 +824,8 @@ void clear_midx_file(struct repository *r)
if (remove_path(midx.buf))
die(_("failed to clear multi-pack-index at %s"), midx.buf);
- clear_midx_files_ext(r->objects->sources->path, MIDX_EXT_BITMAP, NULL);
- clear_midx_files_ext(r->objects->sources->path, MIDX_EXT_REV, NULL);
+ clear_midx_files_ext(r->objects->sources, MIDX_EXT_BITMAP, NULL);
+ clear_midx_files_ext(r->objects->sources, MIDX_EXT_REV, NULL);
strbuf_release(&midx);
}
@@ -879,12 +869,13 @@ static int compare_pair_pos_vs_id(const void *_a, const void *_b)
display_progress(progress, _n); \
} while (0)
-int verify_midx_file(struct repository *r, const char *object_dir, unsigned flags)
+int verify_midx_file(struct odb_source *source, unsigned flags)
{
+ struct repository *r = source->odb->repo;
struct pair_pos_vs_id *pairs = NULL;
uint32_t i;
struct progress *progress = NULL;
- struct multi_pack_index *m = load_multi_pack_index(r, object_dir, 1);
+ struct multi_pack_index *m = load_multi_pack_index(source);
struct multi_pack_index *curr;
verify_midx_error = 0;
@@ -893,7 +884,7 @@ int verify_midx_file(struct repository *r, const char *object_dir, unsigned flag
struct stat sb;
struct strbuf filename = STRBUF_INIT;
- get_midx_filename(r->hash_algo, &filename, object_dir);
+ get_midx_filename(source, &filename);
if (!stat(filename.buf, &sb)) {
error(_("multi-pack-index file exists, but failed to parse"));
@@ -911,7 +902,7 @@ int verify_midx_file(struct repository *r, const char *object_dir, unsigned flag
_("Looking for referenced packfiles"),
m->num_packs + m->num_packs_in_base);
for (i = 0; i < m->num_packs + m->num_packs_in_base; i++) {
- if (prepare_midx_pack(r, m, i))
+ if (prepare_midx_pack(m, i))
midx_report("failed to load pack in position %d", i);
display_progress(progress, i + 1);
@@ -988,7 +979,7 @@ int verify_midx_file(struct repository *r, const char *object_dir, unsigned flag
nth_midxed_object_oid(&oid, m, pairs[i].pos);
- if (!fill_midx_entry(r, &oid, &e, m)) {
+ if (!fill_midx_entry(m, &oid, &e)) {
midx_report(_("failed to load pack entry for oid[%d] = %s"),
pairs[i].pos, oid_to_hex(&oid));
continue;
diff --git a/midx.h b/midx.h
index 076382de8a..e241d2d690 100644
--- a/midx.h
+++ b/midx.h
@@ -35,6 +35,8 @@ struct odb_source;
"GIT_TEST_MULTI_PACK_INDEX_WRITE_INCREMENTAL"
struct multi_pack_index {
+ struct odb_source *source;
+
const unsigned char *data;
size_t data_len;
@@ -50,7 +52,6 @@ struct multi_pack_index {
uint32_t num_objects;
int preferred_pack_idx;
- int local;
int has_chain;
const unsigned char *chunk_pack_names;
@@ -71,10 +72,6 @@ struct multi_pack_index {
const char **pack_names;
struct packed_git **packs;
-
- struct repository *repo;
-
- char object_dir[FLEX_ARRAY];
};
#define MIDX_PROGRESS (1 << 0)
@@ -89,24 +86,19 @@ struct multi_pack_index {
#define MIDX_EXT_MIDX "midx"
const unsigned char *get_midx_checksum(struct multi_pack_index *m);
-void get_midx_filename(const struct git_hash_algo *hash_algo,
- struct strbuf *out, const char *object_dir);
-void get_midx_filename_ext(const struct git_hash_algo *hash_algo,
- struct strbuf *out, const char *object_dir,
+void get_midx_filename(struct odb_source *source, struct strbuf *out);
+void get_midx_filename_ext(struct odb_source *source, struct strbuf *out,
const unsigned char *hash, const char *ext);
-void get_midx_chain_dirname(struct strbuf *buf, const char *object_dir);
-void get_midx_chain_filename(struct strbuf *buf, const char *object_dir);
-void get_split_midx_filename_ext(const struct git_hash_algo *hash_algo,
- struct strbuf *buf, const char *object_dir,
+void get_midx_chain_dirname(struct odb_source *source, struct strbuf *out);
+void get_midx_chain_filename(struct odb_source *source, struct strbuf *out);
+void get_split_midx_filename_ext(struct odb_source *source, struct strbuf *buf,
const unsigned char *hash, const char *ext);
-struct multi_pack_index *load_multi_pack_index(struct repository *r,
- const char *object_dir,
- int local);
-int prepare_midx_pack(struct repository *r, struct multi_pack_index *m, uint32_t pack_int_id);
+struct multi_pack_index *load_multi_pack_index(struct odb_source *source);
+int prepare_midx_pack(struct multi_pack_index *m, uint32_t pack_int_id);
struct packed_git *nth_midxed_pack(struct multi_pack_index *m,
uint32_t pack_int_id);
-int nth_bitmapped_pack(struct repository *r, struct multi_pack_index *m,
+int nth_bitmapped_pack(struct multi_pack_index *m,
struct bitmapped_pack *bp, uint32_t pack_int_id);
int bsearch_one_midx(const struct object_id *oid, struct multi_pack_index *m,
uint32_t *result);
@@ -118,27 +110,27 @@ uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos);
struct object_id *nth_midxed_object_oid(struct object_id *oid,
struct multi_pack_index *m,
uint32_t n);
-int fill_midx_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e, struct multi_pack_index *m);
+int fill_midx_entry(struct multi_pack_index *m, const struct object_id *oid, struct pack_entry *e);
int midx_contains_pack(struct multi_pack_index *m,
const char *idx_or_pack_name);
int midx_preferred_pack(struct multi_pack_index *m, uint32_t *pack_int_id);
-int prepare_multi_pack_index_one(struct odb_source *source, int local);
+int prepare_multi_pack_index_one(struct odb_source *source);
/*
* Variant of write_midx_file which writes a MIDX containing only the packs
* specified in packs_to_include.
*/
-int write_midx_file(struct repository *r, const char *object_dir,
+int write_midx_file(struct odb_source *source,
const char *preferred_pack_name, const char *refs_snapshot,
unsigned flags);
-int write_midx_file_only(struct repository *r, const char *object_dir,
+int write_midx_file_only(struct odb_source *source,
struct string_list *packs_to_include,
const char *preferred_pack_name,
const char *refs_snapshot, unsigned flags);
void clear_midx_file(struct repository *r);
-int verify_midx_file(struct repository *r, const char *object_dir, unsigned flags);
-int expire_midx_packs(struct repository *r, const char *object_dir, unsigned flags);
-int midx_repack(struct repository *r, const char *object_dir, size_t batch_size, unsigned flags);
+int verify_midx_file(struct odb_source *source, unsigned flags);
+int expire_midx_packs(struct odb_source *source, unsigned flags);
+int midx_repack(struct odb_source *source, size_t batch_size, unsigned flags);
void close_midx(struct multi_pack_index *m);
diff --git a/object-file.c b/object-file.c
index 2bc36ab3ee..bc15af4245 100644
--- a/object-file.c
+++ b/object-file.c
@@ -674,7 +674,7 @@ static void close_loose_object(struct odb_source *source,
goto out;
if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
- fsync_loose_object_bulk_checkin(fd, filename);
+ fsync_loose_object_bulk_checkin(source->odb->transaction, fd, filename);
else if (fsync_object_files > 0)
fsync_or_die(fd, filename);
else
@@ -852,7 +852,7 @@ static int write_loose_object(struct odb_source *source,
static struct strbuf filename = STRBUF_INIT;
if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
- prepare_loose_object_bulk_checkin();
+ prepare_loose_object_bulk_checkin(source->odb->transaction);
odb_loose_path(source, &filename, oid);
@@ -941,7 +941,7 @@ int stream_loose_object(struct odb_source *source,
int hdrlen;
if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
- prepare_loose_object_bulk_checkin();
+ prepare_loose_object_bulk_checkin(source->odb->transaction);
/* Since oid is not determined, save tmp file to odb path. */
strbuf_addf(&filename, "%s/", source->path);
@@ -1253,18 +1253,26 @@ int index_fd(struct index_state *istate, struct object_id *oid,
* Call xsize_t() only when needed to avoid potentially unnecessary
* die() for large files.
*/
- if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(istate, path))
+ if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(istate, path)) {
ret = index_stream_convert_blob(istate, oid, fd, path, flags);
- else if (!S_ISREG(st->st_mode))
+ } else if (!S_ISREG(st->st_mode)) {
ret = index_pipe(istate, oid, fd, type, path, flags);
- else if ((st->st_size >= 0 && (size_t) st->st_size <= repo_settings_get_big_file_threshold(istate->repo)) ||
- type != OBJ_BLOB ||
- (path && would_convert_to_git(istate, path)))
+ } else if ((st->st_size >= 0 &&
+ (size_t)st->st_size <= repo_settings_get_big_file_threshold(istate->repo)) ||
+ type != OBJ_BLOB ||
+ (path && would_convert_to_git(istate, path))) {
ret = index_core(istate, oid, fd, xsize_t(st->st_size),
type, path, flags);
- else
- ret = index_blob_bulk_checkin(oid, fd, xsize_t(st->st_size), path,
- flags);
+ } else {
+ struct odb_transaction *transaction;
+
+ transaction = begin_odb_transaction(the_repository->objects);
+ ret = index_blob_bulk_checkin(transaction,
+ oid, fd, xsize_t(st->st_size),
+ path, flags);
+ end_odb_transaction(transaction);
+ }
+
close(fd);
return ret;
}
diff --git a/object-name.c b/object-name.c
index 732056ff5e..7774991d28 100644
--- a/object-name.c
+++ b/object-name.c
@@ -696,15 +696,14 @@ static inline char get_hex_char_from_oid(const struct object_id *oid,
return hex[oid->hash[pos >> 1] & 0xf];
}
-static int extend_abbrev_len(const struct object_id *oid, void *cb_data)
+static int extend_abbrev_len(const struct object_id *oid,
+ struct min_abbrev_data *mad)
{
- struct min_abbrev_data *mad = cb_data;
-
unsigned int i = mad->init_len;
while (mad->hex[i] && mad->hex[i] == get_hex_char_from_oid(oid, i))
i++;
- if (i < GIT_MAX_RAWSZ && i >= mad->cur_len)
+ if (mad->hex[i] && i >= mad->cur_len)
mad->cur_len = i + 1;
return 0;
@@ -1858,55 +1857,35 @@ int repo_get_oid_committish(struct repository *r,
const char *name,
struct object_id *oid)
{
- struct object_context unused;
- int ret = get_oid_with_context(r, name, GET_OID_COMMITTISH,
- oid, &unused);
- object_context_release(&unused);
- return ret;
+ return repo_get_oid_with_flags(r, name, oid, GET_OID_COMMITTISH);
}
int repo_get_oid_treeish(struct repository *r,
const char *name,
struct object_id *oid)
{
- struct object_context unused;
- int ret = get_oid_with_context(r, name, GET_OID_TREEISH,
- oid, &unused);
- object_context_release(&unused);
- return ret;
+ return repo_get_oid_with_flags(r, name, oid, GET_OID_TREEISH);
}
int repo_get_oid_commit(struct repository *r,
const char *name,
struct object_id *oid)
{
- struct object_context unused;
- int ret = get_oid_with_context(r, name, GET_OID_COMMIT,
- oid, &unused);
- object_context_release(&unused);
- return ret;
+ return repo_get_oid_with_flags(r, name, oid, GET_OID_COMMIT);
}
int repo_get_oid_tree(struct repository *r,
const char *name,
struct object_id *oid)
{
- struct object_context unused;
- int ret = get_oid_with_context(r, name, GET_OID_TREE,
- oid, &unused);
- object_context_release(&unused);
- return ret;
+ return repo_get_oid_with_flags(r, name, oid, GET_OID_TREE);
}
int repo_get_oid_blob(struct repository *r,
const char *name,
struct object_id *oid)
{
- struct object_context unused;
- int ret = get_oid_with_context(r, name, GET_OID_BLOB,
- oid, &unused);
- object_context_release(&unused);
- return ret;
+ return repo_get_oid_with_flags(r, name, oid, GET_OID_BLOB);
}
/* Must be called only when object_name:filename doesn't exist. */
diff --git a/object.c b/object.c
index c1553ee433..986114a6db 100644
--- a/object.c
+++ b/object.c
@@ -517,12 +517,11 @@ struct parsed_object_pool *parsed_object_pool_new(struct repository *repo)
memset(o, 0, sizeof(*o));
o->repo = repo;
- o->blob_state = allocate_alloc_state();
- o->tree_state = allocate_alloc_state();
- o->commit_state = allocate_alloc_state();
- o->tag_state = allocate_alloc_state();
- o->object_state = allocate_alloc_state();
-
+ o->blob_state = alloc_state_alloc();
+ o->tree_state = alloc_state_alloc();
+ o->commit_state = alloc_state_alloc();
+ o->tag_state = alloc_state_alloc();
+ o->object_state = alloc_state_alloc();
o->is_shallow = -1;
CALLOC_ARRAY(o->shallow_stat, 1);
@@ -573,16 +572,11 @@ void parsed_object_pool_clear(struct parsed_object_pool *o)
o->buffer_slab = NULL;
parsed_object_pool_reset_commit_grafts(o);
- clear_alloc_state(o->blob_state);
- clear_alloc_state(o->tree_state);
- clear_alloc_state(o->commit_state);
- clear_alloc_state(o->tag_state);
- clear_alloc_state(o->object_state);
+ alloc_state_free_and_null(&o->blob_state);
+ alloc_state_free_and_null(&o->tree_state);
+ alloc_state_free_and_null(&o->commit_state);
+ alloc_state_free_and_null(&o->tag_state);
+ alloc_state_free_and_null(&o->object_state);
stat_validity_clear(o->shallow_stat);
- FREE_AND_NULL(o->blob_state);
- FREE_AND_NULL(o->tree_state);
- FREE_AND_NULL(o->commit_state);
- FREE_AND_NULL(o->tag_state);
- FREE_AND_NULL(o->object_state);
FREE_AND_NULL(o->shallow_stat);
}
diff --git a/odb.c b/odb.c
index 2a92a018c4..75c443fe66 100644
--- a/odb.c
+++ b/odb.c
@@ -139,23 +139,21 @@ static void read_info_alternates(struct object_database *odb,
const char *relative_base,
int depth);
-static int link_alt_odb_entry(struct object_database *odb,
- const struct strbuf *entry,
- const char *relative_base,
- int depth,
- const char *normalized_objdir)
+static struct odb_source *link_alt_odb_entry(struct object_database *odb,
+ const char *dir,
+ const char *relative_base,
+ int depth)
{
- struct odb_source *alternate;
+ struct odb_source *alternate = NULL;
struct strbuf pathbuf = STRBUF_INIT;
struct strbuf tmp = STRBUF_INIT;
khiter_t pos;
- int ret = -1;
- if (!is_absolute_path(entry->buf) && relative_base) {
+ if (!is_absolute_path(dir) && relative_base) {
strbuf_realpath(&pathbuf, relative_base, 1);
strbuf_addch(&pathbuf, '/');
}
- strbuf_addbuf(&pathbuf, entry);
+ strbuf_addstr(&pathbuf, dir);
if (!strbuf_realpath(&tmp, pathbuf.buf, 0)) {
error(_("unable to normalize alternate object path: %s"),
@@ -171,11 +169,15 @@ static int link_alt_odb_entry(struct object_database *odb,
while (pathbuf.len && pathbuf.buf[pathbuf.len - 1] == '/')
strbuf_setlen(&pathbuf, pathbuf.len - 1);
- if (!alt_odb_usable(odb, &pathbuf, normalized_objdir, &pos))
+ strbuf_reset(&tmp);
+ strbuf_realpath(&tmp, odb->sources->path, 1);
+
+ if (!alt_odb_usable(odb, &pathbuf, tmp.buf, &pos))
goto error;
CALLOC_ARRAY(alternate, 1);
alternate->odb = odb;
+ alternate->local = false;
/* pathbuf.buf is already in r->objects->source_by_path */
alternate->path = strbuf_detach(&pathbuf, NULL);
@@ -188,11 +190,11 @@ static int link_alt_odb_entry(struct object_database *odb,
/* recursively add alternates */
read_info_alternates(odb, alternate->path, depth + 1);
- ret = 0;
+
error:
strbuf_release(&tmp);
strbuf_release(&pathbuf);
- return ret;
+ return alternate;
}
static const char *parse_alt_odb_entry(const char *string,
@@ -227,8 +229,7 @@ static const char *parse_alt_odb_entry(const char *string,
static void link_alt_odb_entries(struct object_database *odb, const char *alt,
int sep, const char *relative_base, int depth)
{
- struct strbuf objdirbuf = STRBUF_INIT;
- struct strbuf entry = STRBUF_INIT;
+ struct strbuf dir = STRBUF_INIT;
if (!alt || !*alt)
return;
@@ -239,17 +240,13 @@ static void link_alt_odb_entries(struct object_database *odb, const char *alt,
return;
}
- strbuf_realpath(&objdirbuf, odb->sources->path, 1);
-
while (*alt) {
- alt = parse_alt_odb_entry(alt, sep, &entry);
- if (!entry.len)
+ alt = parse_alt_odb_entry(alt, sep, &dir);
+ if (!dir.len)
continue;
- link_alt_odb_entry(odb, &entry,
- relative_base, depth, objdirbuf.buf);
+ link_alt_odb_entry(odb, dir.buf, relative_base, depth);
}
- strbuf_release(&entry);
- strbuf_release(&objdirbuf);
+ strbuf_release(&dir);
}
static void read_info_alternates(struct object_database *odb,
@@ -272,7 +269,7 @@ static void read_info_alternates(struct object_database *odb,
}
void odb_add_to_alternates_file(struct object_database *odb,
- const char *reference)
+ const char *dir)
{
struct lock_file lock = LOCK_INIT;
char *alts = repo_git_path(odb->repo, "objects/info/alternates");
@@ -289,7 +286,7 @@ void odb_add_to_alternates_file(struct object_database *odb,
struct strbuf line = STRBUF_INIT;
while (strbuf_getline(&line, in) != EOF) {
- if (!strcmp(reference, line.buf)) {
+ if (!strcmp(dir, line.buf)) {
found = 1;
break;
}
@@ -305,27 +302,24 @@ void odb_add_to_alternates_file(struct object_database *odb,
if (found) {
rollback_lock_file(&lock);
} else {
- fprintf_or_die(out, "%s\n", reference);
+ fprintf_or_die(out, "%s\n", dir);
if (commit_lock_file(&lock))
die_errno(_("unable to move new alternates file into place"));
if (odb->loaded_alternates)
- link_alt_odb_entries(odb, reference,
- '\n', NULL, 0);
+ link_alt_odb_entries(odb, dir, '\n', NULL, 0);
}
free(alts);
}
-void odb_add_to_alternates_memory(struct object_database *odb,
- const char *reference)
+struct odb_source *odb_add_to_alternates_memory(struct object_database *odb,
+ const char *dir)
{
/*
* Make sure alternates are initialized, or else our entry may be
* overwritten when they are.
*/
odb_prepare_alternates(odb);
-
- link_alt_odb_entries(odb, reference,
- '\n', NULL, 0);
+ return link_alt_odb_entry(odb, dir, NULL, 0);
}
struct odb_source *odb_set_temporary_primary_source(struct object_database *odb,
@@ -463,6 +457,12 @@ struct odb_source *odb_find_source(struct object_database *odb, const char *obj_
free(obj_dir_real);
strbuf_release(&odb_path_real);
+ return source;
+}
+
+struct odb_source *odb_find_source_or_die(struct object_database *odb, const char *obj_dir)
+{
+ struct odb_source *source = odb_find_source(odb, obj_dir);
if (!source)
die(_("could not find object directory matching %s"), obj_dir);
return source;
diff --git a/odb.h b/odb.h
index 3dfc66d75a..bd7374f92f 100644
--- a/odb.h
+++ b/odb.h
@@ -64,6 +64,14 @@ struct odb_source {
struct multi_pack_index *midx;
/*
+ * Figure out whether this is the local source of the owning
+ * repository, which would typically be its ".git/objects" directory.
+ * This local object directory is usually where objects would be
+ * written to.
+ */
+ bool local;
+
+ /*
* This is a temporary object store created by the tmp_objdir
* facility. Disable ref updates since the objects in the store
* might be discarded on rollback.
@@ -84,6 +92,7 @@ struct odb_source {
struct packed_git;
struct cached_object_entry;
+struct odb_transaction;
/*
* The object database encapsulates access to objects in a repository. It
@@ -95,6 +104,13 @@ struct object_database {
struct repository *repo;
/*
+ * State of current current object database transaction. Only one
+ * transaction may be pending at a time. Is NULL when no transaction is
+ * configured.
+ */
+ struct odb_transaction *transaction;
+
+ /*
* Set of all object directories; the main directory is first (and
* cannot be NULL after initialization). Subsequent directories are
* alternates.
@@ -178,11 +194,14 @@ struct object_database *odb_new(struct repository *repo);
void odb_clear(struct object_database *o);
/*
- * Find source by its object directory path. Dies in case the source couldn't
- * be found.
+ * Find source by its object directory path. Returns a `NULL` pointer in case
+ * the source could not be found.
*/
struct odb_source *odb_find_source(struct object_database *odb, const char *obj_dir);
+/* Same as `odb_find_source()`, but dies in case the source doesn't exist. */
+struct odb_source *odb_find_source_or_die(struct object_database *odb, const char *obj_dir);
+
/*
* Replace the current writable object directory with the specified temporary
* object directory; returns the former primary source.
@@ -257,8 +276,8 @@ void odb_add_to_alternates_file(struct object_database *odb,
* recursive alternates it points to), but do not modify the on-disk alternates
* file.
*/
-void odb_add_to_alternates_memory(struct object_database *odb,
- const char *dir);
+struct odb_source *odb_add_to_alternates_memory(struct object_database *odb,
+ const char *dir);
/*
* Read an object from the database. Returns the object data and assigns object
diff --git a/pack-bitmap.c b/pack-bitmap.c
index d14421ee20..058bdb5d7d 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -216,7 +216,7 @@ static uint32_t bitmap_num_objects(struct bitmap_index *index)
static struct repository *bitmap_repo(struct bitmap_index *bitmap_git)
{
if (bitmap_is_midx(bitmap_git))
- return bitmap_git->midx->repo;
+ return bitmap_git->midx->source->odb->repo;
return bitmap_git->pack->repo;
}
@@ -418,13 +418,12 @@ char *midx_bitmap_filename(struct multi_pack_index *midx)
{
struct strbuf buf = STRBUF_INIT;
if (midx->has_chain)
- get_split_midx_filename_ext(midx->repo->hash_algo, &buf,
- midx->object_dir,
+ get_split_midx_filename_ext(midx->source, &buf,
get_midx_checksum(midx),
MIDX_EXT_BITMAP);
else
- get_midx_filename_ext(midx->repo->hash_algo, &buf,
- midx->object_dir, get_midx_checksum(midx),
+ get_midx_filename_ext(midx->source, &buf,
+ get_midx_checksum(midx),
MIDX_EXT_BITMAP);
return strbuf_detach(&buf, NULL);
@@ -463,7 +462,7 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git,
if (bitmap_git->pack || bitmap_git->midx) {
struct strbuf buf = STRBUF_INIT;
- get_midx_filename(midx->repo->hash_algo, &buf, midx->object_dir);
+ get_midx_filename(midx->source, &buf);
trace2_data_string("bitmap", bitmap_repo(bitmap_git),
"ignoring extra midx bitmap file", buf.buf);
close(fd);
@@ -493,7 +492,7 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git,
}
for (i = 0; i < bitmap_git->midx->num_packs + bitmap_git->midx->num_packs_in_base; i++) {
- if (prepare_midx_pack(bitmap_repo(bitmap_git), bitmap_git->midx, i)) {
+ if (prepare_midx_pack(bitmap_git->midx, i)) {
warning(_("could not open pack %s"),
bitmap_git->midx->pack_names[i]);
goto cleanup;
@@ -2466,7 +2465,7 @@ void reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git,
struct multi_pack_index *m = bitmap_git->midx;
for (i = 0; i < m->num_packs + m->num_packs_in_base; i++) {
struct bitmapped_pack pack;
- if (nth_bitmapped_pack(r, bitmap_git->midx, &pack, i) < 0) {
+ if (nth_bitmapped_pack(bitmap_git->midx, &pack, i) < 0) {
warning(_("unable to load pack: '%s', disabling pack-reuse"),
bitmap_git->midx->pack_names[i]);
free(packs);
diff --git a/pack-revindex.c b/pack-revindex.c
index 0cc422a1e6..d0791cc493 100644
--- a/pack-revindex.c
+++ b/pack-revindex.c
@@ -379,25 +379,25 @@ int load_midx_revindex(struct multi_pack_index *m)
* not want to accidentally call munmap() in the middle of the
* MIDX.
*/
- trace2_data_string("load_midx_revindex", m->repo,
+ trace2_data_string("load_midx_revindex", m->source->odb->repo,
"source", "midx");
m->revindex_data = (const uint32_t *)m->chunk_revindex;
return 0;
}
- trace2_data_string("load_midx_revindex", m->repo,
+ trace2_data_string("load_midx_revindex", m->source->odb->repo,
"source", "rev");
if (m->has_chain)
- get_split_midx_filename_ext(m->repo->hash_algo, &revindex_name,
- m->object_dir, get_midx_checksum(m),
+ get_split_midx_filename_ext(m->source, &revindex_name,
+ get_midx_checksum(m),
MIDX_EXT_REV);
else
- get_midx_filename_ext(m->repo->hash_algo, &revindex_name,
- m->object_dir, get_midx_checksum(m),
+ get_midx_filename_ext(m->source, &revindex_name,
+ get_midx_checksum(m),
MIDX_EXT_REV);
- ret = load_revindex_from_disk(m->repo->hash_algo,
+ ret = load_revindex_from_disk(m->source->odb->repo->hash_algo,
revindex_name.buf,
m->num_objects,
&m->revindex_map,
diff --git a/packfile.c b/packfile.c
index 5d73932f50..acb680966d 100644
--- a/packfile.c
+++ b/packfile.c
@@ -935,14 +935,14 @@ static void prepare_pack(const char *full_name, size_t full_name_len,
report_garbage(PACKDIR_FILE_GARBAGE, full_name);
}
-static void prepare_packed_git_one(struct odb_source *source, int local)
+static void prepare_packed_git_one(struct odb_source *source)
{
struct string_list garbage = STRING_LIST_INIT_DUP;
struct prepare_pack_data data = {
.m = source->midx,
.r = source->odb->repo,
.garbage = &garbage,
- .local = local,
+ .local = source->local,
};
for_each_file_in_pack_dir(source->path, prepare_pack, &data);
@@ -1037,9 +1037,8 @@ static void prepare_packed_git(struct repository *r)
odb_prepare_alternates(r->objects);
for (source = r->objects->sources; source; source = source->next) {
- int local = (source == r->objects->sources);
- prepare_multi_pack_index_one(source, local);
- prepare_packed_git_one(source, local);
+ prepare_multi_pack_index_one(source);
+ prepare_packed_git_one(source);
}
rearrange_packed_git(r);
@@ -1092,7 +1091,7 @@ struct packed_git *get_all_packs(struct repository *r)
if (!m)
continue;
for (uint32_t i = 0; i < m->num_packs + m->num_packs_in_base; i++)
- prepare_midx_pack(r, m, i);
+ prepare_midx_pack(m, i);
}
return r->objects->packed_git;
@@ -2078,7 +2077,7 @@ int find_pack_entry(struct repository *r, const struct object_id *oid, struct pa
prepare_packed_git(r);
for (struct odb_source *source = r->objects->sources; source; source = source->next)
- if (source->midx && fill_midx_entry(r, oid, e, source->midx))
+ if (source->midx && fill_midx_entry(source->midx, oid, e))
return 1;
if (!r->objects->packed_git)
diff --git a/path-walk.c b/path-walk.c
index 2d4ddbadd5..f1ceed99e9 100644
--- a/path-walk.c
+++ b/path-walk.c
@@ -105,6 +105,24 @@ static void push_to_stack(struct path_walk_context *ctx,
prio_queue_put(&ctx->path_stack, xstrdup(path));
}
+static void add_path_to_list(struct path_walk_context *ctx,
+ const char *path,
+ enum object_type type,
+ struct object_id *oid,
+ int interesting)
+{
+ struct type_and_oid_list *list = strmap_get(&ctx->paths_to_lists, path);
+
+ if (!list) {
+ CALLOC_ARRAY(list, 1);
+ list->type = type;
+ strmap_put(&ctx->paths_to_lists, path, list);
+ }
+
+ list->maybe_interesting |= interesting;
+ oid_array_append(&list->oids, oid);
+}
+
static int add_tree_entries(struct path_walk_context *ctx,
const char *base_path,
struct object_id *oid)
@@ -129,7 +147,6 @@ static int add_tree_entries(struct path_walk_context *ctx,
init_tree_desc(&desc, &tree->object.oid, tree->buffer, tree->size);
while (tree_entry(&desc, &entry)) {
- struct type_and_oid_list *list;
struct object *o;
/* Not actually true, but we will ignore submodules later. */
enum object_type type = S_ISDIR(entry.mode) ? OBJ_TREE : OBJ_BLOB;
@@ -190,17 +207,10 @@ static int add_tree_entries(struct path_walk_context *ctx,
continue;
}
- if (!(list = strmap_get(&ctx->paths_to_lists, path.buf))) {
- CALLOC_ARRAY(list, 1);
- list->type = type;
- strmap_put(&ctx->paths_to_lists, path.buf, list);
- }
- push_to_stack(ctx, path.buf);
-
- if (!(o->flags & UNINTERESTING))
- list->maybe_interesting = 1;
+ add_path_to_list(ctx, path.buf, type, &entry.oid,
+ !(o->flags & UNINTERESTING));
- oid_array_append(&list->oids, &entry.oid);
+ push_to_stack(ctx, path.buf);
}
free_tree_buffer(tree);
@@ -377,15 +387,9 @@ static int setup_pending_objects(struct path_walk_info *info,
if (!info->trees)
continue;
if (pending->path) {
- struct type_and_oid_list *list;
char *path = *pending->path ? xstrfmt("%s/", pending->path)
: xstrdup("");
- if (!(list = strmap_get(&ctx->paths_to_lists, path))) {
- CALLOC_ARRAY(list, 1);
- list->type = OBJ_TREE;
- strmap_put(&ctx->paths_to_lists, path, list);
- }
- oid_array_append(&list->oids, &obj->oid);
+ add_path_to_list(ctx, path, OBJ_TREE, &obj->oid, 1);
free(path);
} else {
/* assume a root tree, such as a lightweight tag. */
@@ -396,19 +400,10 @@ static int setup_pending_objects(struct path_walk_info *info,
case OBJ_BLOB:
if (!info->blobs)
continue;
- if (pending->path) {
- struct type_and_oid_list *list;
- char *path = pending->path;
- if (!(list = strmap_get(&ctx->paths_to_lists, path))) {
- CALLOC_ARRAY(list, 1);
- list->type = OBJ_BLOB;
- strmap_put(&ctx->paths_to_lists, path, list);
- }
- oid_array_append(&list->oids, &obj->oid);
- } else {
- /* assume a root tree, such as a lightweight tag. */
+ if (pending->path)
+ add_path_to_list(ctx, pending->path, OBJ_BLOB, &obj->oid, 1);
+ else
oid_array_append(&tagged_blobs->oids, &obj->oid);
- }
break;
case OBJ_COMMIT:
diff --git a/promisor-remote.c b/promisor-remote.c
index 08b0da8962..77ebf537e2 100644
--- a/promisor-remote.c
+++ b/promisor-remote.c
@@ -314,9 +314,162 @@ static int allow_unsanitized(char ch)
return ch > 32 && ch < 127;
}
-static void promisor_info_vecs(struct repository *repo,
- struct strvec *names,
- struct strvec *urls)
+/*
+ * All the fields used in "promisor-remote" protocol capability,
+ * including the mandatory "name" and "url" ones.
+ */
+static const char promisor_field_name[] = "name";
+static const char promisor_field_url[] = "url";
+static const char promisor_field_filter[] = "partialCloneFilter";
+static const char promisor_field_token[] = "token";
+
+/*
+ * List of optional field names that can be used in the
+ * "promisor-remote" protocol capability (others must be
+ * ignored). Each field should correspond to a configurable property
+ * of a remote that can be relevant for the client.
+ */
+static const char *known_fields[] = {
+ promisor_field_filter, /* Filter used for partial clone */
+ promisor_field_token, /* Authentication token for the remote */
+ NULL
+};
+
+/*
+ * Check if 'field' is in the list of the known field names for the
+ * "promisor-remote" protocol capability.
+ */
+static int is_known_field(const char *field)
+{
+ const char **p;
+
+ for (p = known_fields; *p; p++)
+ if (!strcasecmp(*p, field))
+ return 1;
+ return 0;
+}
+
+static int is_valid_field(struct string_list_item *item, void *cb_data)
+{
+ const char *field = item->string;
+ const char *config_key = (const char *)cb_data;
+
+ if (!is_known_field(field)) {
+ warning(_("unsupported field '%s' in '%s' config"), field, config_key);
+ return 0;
+ }
+ return 1;
+}
+
+static char *fields_from_config(struct string_list *fields_list, const char *config_key)
+{
+ char *fields = NULL;
+
+ if (!repo_config_get_string(the_repository, config_key, &fields) && *fields) {
+ string_list_split_in_place_f(fields_list, fields, ",", -1,
+ STRING_LIST_SPLIT_TRIM |
+ STRING_LIST_SPLIT_NONEMPTY);
+ filter_string_list(fields_list, 0, is_valid_field, (void *)config_key);
+ }
+
+ return fields;
+}
+
+static struct string_list *fields_sent(void)
+{
+ static struct string_list fields_list = STRING_LIST_INIT_NODUP;
+ static int initialized;
+
+ if (!initialized) {
+ fields_list.cmp = strcasecmp;
+ fields_from_config(&fields_list, "promisor.sendFields");
+ initialized = 1;
+ }
+
+ return &fields_list;
+}
+
+static struct string_list *fields_checked(void)
+{
+ static struct string_list fields_list = STRING_LIST_INIT_NODUP;
+ static int initialized;
+
+ if (!initialized) {
+ fields_list.cmp = strcasecmp;
+ fields_from_config(&fields_list, "promisor.checkFields");
+ initialized = 1;
+ }
+
+ return &fields_list;
+}
+
+/*
+ * Struct for promisor remotes involved in the "promisor-remote"
+ * protocol capability.
+ *
+ * Except for "name", each <member> in this struct and its <value>
+ * should correspond (either on the client side or on the server side)
+ * to a "remote.<name>.<member>" config variable set to <value> where
+ * "<name>" is a promisor remote name.
+ */
+struct promisor_info {
+ const char *name;
+ const char *url;
+ const char *filter;
+ const char *token;
+};
+
+static void promisor_info_free(struct promisor_info *p)
+{
+ free((char *)p->name);
+ free((char *)p->url);
+ free((char *)p->filter);
+ free((char *)p->token);
+ free(p);
+}
+
+static void promisor_info_list_clear(struct string_list *list)
+{
+ for (size_t i = 0; i < list->nr; i++)
+ promisor_info_free(list->items[i].util);
+ string_list_clear(list, 0);
+}
+
+static void set_one_field(struct promisor_info *p,
+ const char *field, const char *value)
+{
+ if (!strcasecmp(field, promisor_field_filter))
+ p->filter = xstrdup(value);
+ else if (!strcasecmp(field, promisor_field_token))
+ p->token = xstrdup(value);
+ else
+ BUG("invalid field '%s'", field);
+}
+
+static void set_fields(struct promisor_info *p,
+ struct string_list *field_names)
+{
+ struct string_list_item *item;
+
+ for_each_string_list_item(item, field_names) {
+ char *key = xstrfmt("remote.%s.%s", p->name, item->string);
+ const char *val;
+ if (!repo_config_get_string_tmp(the_repository, key, &val) && *val)
+ set_one_field(p, item->string, val);
+ free(key);
+ }
+}
+
+/*
+ * Populate 'list' with promisor remote information from the config.
+ * The 'util' pointer of each list item will hold a 'struct
+ * promisor_info'. Except "name" and "url", only members of that
+ * struct specified by the 'field_names' list are set (using values
+ * from the configuration).
+ */
+static void promisor_config_info_list(struct repository *repo,
+ struct string_list *list,
+ struct string_list *field_names)
{
struct promisor_remote *r;
@@ -328,8 +481,17 @@ static void promisor_info_vecs(struct repository *repo,
/* Only add remotes with a non empty URL */
if (!repo_config_get_string_tmp(the_repository, url_key, &url) && *url) {
- strvec_push(names, r->name);
- strvec_push(urls, url);
+ struct promisor_info *new_info = xcalloc(1, sizeof(*new_info));
+ struct string_list_item *item;
+
+ new_info->name = xstrdup(r->name);
+ new_info->url = xstrdup(url);
+
+ if (field_names)
+ set_fields(new_info, field_names);
+
+ item = string_list_append(list, new_info->name);
+ item->util = new_info;
}
free(url_key);
@@ -340,47 +502,45 @@ char *promisor_remote_info(struct repository *repo)
{
struct strbuf sb = STRBUF_INIT;
int advertise_promisors = 0;
- struct strvec names = STRVEC_INIT;
- struct strvec urls = STRVEC_INIT;
+ struct string_list config_info = STRING_LIST_INIT_NODUP;
+ struct string_list_item *item;
repo_config_get_bool(the_repository, "promisor.advertise", &advertise_promisors);
if (!advertise_promisors)
return NULL;
- promisor_info_vecs(repo, &names, &urls);
+ promisor_config_info_list(repo, &config_info, fields_sent());
- if (!names.nr)
+ if (!config_info.nr)
return NULL;
- for (size_t i = 0; i < names.nr; i++) {
- if (i)
+ for_each_string_list_item(item, &config_info) {
+ struct promisor_info *p = item->util;
+
+ if (item != config_info.items)
strbuf_addch(&sb, ';');
- strbuf_addstr(&sb, "name=");
- strbuf_addstr_urlencode(&sb, names.v[i], allow_unsanitized);
- strbuf_addstr(&sb, ",url=");
- strbuf_addstr_urlencode(&sb, urls.v[i], allow_unsanitized);
+
+ strbuf_addf(&sb, "%s=", promisor_field_name);
+ strbuf_addstr_urlencode(&sb, p->name, allow_unsanitized);
+ strbuf_addf(&sb, ",%s=", promisor_field_url);
+ strbuf_addstr_urlencode(&sb, p->url, allow_unsanitized);
+
+ if (p->filter) {
+ strbuf_addf(&sb, ",%s=", promisor_field_filter);
+ strbuf_addstr_urlencode(&sb, p->filter, allow_unsanitized);
+ }
+ if (p->token) {
+ strbuf_addf(&sb, ",%s=", promisor_field_token);
+ strbuf_addstr_urlencode(&sb, p->token, allow_unsanitized);
+ }
}
- strvec_clear(&names);
- strvec_clear(&urls);
+ promisor_info_list_clear(&config_info);
return strbuf_detach(&sb, NULL);
}
-/*
- * Find first index of 'nicks' where there is 'nick'. 'nick' is
- * compared case sensitively to the strings in 'nicks'. If not found
- * 'nicks->nr' is returned.
- */
-static size_t remote_nick_find(struct strvec *nicks, const char *nick)
-{
- for (size_t i = 0; i < nicks->nr; i++)
- if (!strcmp(nicks->v[i], nick))
- return i;
- return nicks->nr;
-}
-
enum accept_promisor {
ACCEPT_NONE = 0,
ACCEPT_KNOWN_URL,
@@ -388,23 +548,84 @@ enum accept_promisor {
ACCEPT_ALL
};
+static int match_field_against_config(const char *field, const char *value,
+ struct promisor_info *config_info)
+{
+ if (config_info->filter && !strcasecmp(field, promisor_field_filter))
+ return !strcmp(config_info->filter, value);
+ else if (config_info->token && !strcasecmp(field, promisor_field_token))
+ return !strcmp(config_info->token, value);
+
+ return 0;
+}
+
+static int all_fields_match(struct promisor_info *advertised,
+ struct string_list *config_info,
+ int in_list)
+{
+ struct string_list *fields = fields_checked();
+ struct string_list_item *item_checked;
+
+ for_each_string_list_item(item_checked, fields) {
+ int match = 0;
+ const char *field = item_checked->string;
+ const char *value = NULL;
+ struct string_list_item *item;
+
+ if (!strcasecmp(field, promisor_field_filter))
+ value = advertised->filter;
+ else if (!strcasecmp(field, promisor_field_token))
+ value = advertised->token;
+
+ if (!value)
+ return 0;
+
+ if (in_list) {
+ for_each_string_list_item(item, config_info) {
+ struct promisor_info *p = item->util;
+ if (match_field_against_config(field, value, p)) {
+ match = 1;
+ break;
+ }
+ }
+ } else {
+ item = string_list_lookup(config_info, advertised->name);
+ if (item) {
+ struct promisor_info *p = item->util;
+ match = match_field_against_config(field, value, p);
+ }
+ }
+
+ if (!match)
+ return 0;
+ }
+
+ return 1;
+}
+
static int should_accept_remote(enum accept_promisor accept,
- const char *remote_name, const char *remote_url,
- struct strvec *names, struct strvec *urls)
+ struct promisor_info *advertised,
+ struct string_list *config_info)
{
- size_t i;
+ struct promisor_info *p;
+ struct string_list_item *item;
+ const char *remote_name = advertised->name;
+ const char *remote_url = advertised->url;
if (accept == ACCEPT_ALL)
- return 1;
+ return all_fields_match(advertised, config_info, 1);
- i = remote_nick_find(names, remote_name);
+ /* Get config info for that promisor remote */
+ item = string_list_lookup(config_info, remote_name);
- if (i >= names->nr)
+ if (!item)
/* We don't know about that remote */
return 0;
+ p = item->util;
+
if (accept == ACCEPT_KNOWN_NAME)
- return 1;
+ return all_fields_match(advertised, config_info, 0);
if (accept != ACCEPT_KNOWN_URL)
BUG("Unhandled 'enum accept_promisor' value '%d'", accept);
@@ -414,24 +635,72 @@ static int should_accept_remote(enum accept_promisor accept,
return 0;
}
- if (!strcmp(urls->v[i], remote_url))
- return 1;
+ if (!strcmp(p->url, remote_url))
+ return all_fields_match(advertised, config_info, 0);
warning(_("known remote named '%s' but with URL '%s' instead of '%s'"),
- remote_name, urls->v[i], remote_url);
+ remote_name, p->url, remote_url);
return 0;
}
+static int skip_field_name_prefix(const char *elem, const char *field_name, const char **value)
+{
+ const char *p;
+ if (!skip_prefix(elem, field_name, &p) || *p != '=')
+ return 0;
+ *value = p + 1;
+ return 1;
+}
+
+static struct promisor_info *parse_one_advertised_remote(const char *remote_info)
+{
+ struct promisor_info *info = xcalloc(1, sizeof(*info));
+ struct string_list elem_list = STRING_LIST_INIT_DUP;
+ struct string_list_item *item;
+
+ string_list_split(&elem_list, remote_info, ",", -1);
+
+ for_each_string_list_item(item, &elem_list) {
+ const char *elem = item->string;
+ const char *p = strchr(elem, '=');
+
+ if (!p) {
+ warning(_("invalid element '%s' from remote info"), elem);
+ continue;
+ }
+
+ if (skip_field_name_prefix(elem, promisor_field_name, &p))
+ info->name = url_percent_decode(p);
+ else if (skip_field_name_prefix(elem, promisor_field_url, &p))
+ info->url = url_percent_decode(p);
+ else if (skip_field_name_prefix(elem, promisor_field_filter, &p))
+ info->filter = url_percent_decode(p);
+ else if (skip_field_name_prefix(elem, promisor_field_token, &p))
+ info->token = url_percent_decode(p);
+ }
+
+ string_list_clear(&elem_list, 0);
+
+ if (!info->name || !info->url) {
+ warning(_("server advertised a promisor remote without a name or URL: %s"),
+ remote_info);
+ promisor_info_free(info);
+ return NULL;
+ }
+
+ return info;
+}
+
static void filter_promisor_remote(struct repository *repo,
struct strvec *accepted,
const char *info)
{
- struct strbuf **remotes;
const char *accept_str;
enum accept_promisor accept = ACCEPT_NONE;
- struct strvec names = STRVEC_INIT;
- struct strvec urls = STRVEC_INIT;
+ struct string_list config_info = STRING_LIST_INIT_NODUP;
+ struct string_list remote_info = STRING_LIST_INIT_DUP;
+ struct string_list_item *item;
if (!repo_config_get_string_tmp(the_repository, "promisor.acceptfromserver", &accept_str)) {
if (!*accept_str || !strcasecmp("None", accept_str))
@@ -450,49 +719,31 @@ static void filter_promisor_remote(struct repository *repo,
if (accept == ACCEPT_NONE)
return;
- if (accept != ACCEPT_ALL)
- promisor_info_vecs(repo, &names, &urls);
-
/* Parse remote info received */
- remotes = strbuf_split_str(info, ';', 0);
-
- for (size_t i = 0; remotes[i]; i++) {
- struct strbuf **elems;
- const char *remote_name = NULL;
- const char *remote_url = NULL;
- char *decoded_name = NULL;
- char *decoded_url = NULL;
-
- strbuf_strip_suffix(remotes[i], ";");
- elems = strbuf_split(remotes[i], ',');
-
- for (size_t j = 0; elems[j]; j++) {
- int res;
- strbuf_strip_suffix(elems[j], ",");
- res = skip_prefix(elems[j]->buf, "name=", &remote_name) ||
- skip_prefix(elems[j]->buf, "url=", &remote_url);
- if (!res)
- warning(_("unknown element '%s' from remote info"),
- elems[j]->buf);
- }
+ string_list_split(&remote_info, info, ";", -1);
+
+ for_each_string_list_item(item, &remote_info) {
+ struct promisor_info *advertised;
- if (remote_name)
- decoded_name = url_percent_decode(remote_name);
- if (remote_url)
- decoded_url = url_percent_decode(remote_url);
+ advertised = parse_one_advertised_remote(item->string);
+
+ if (!advertised)
+ continue;
+
+ if (!config_info.nr) {
+ promisor_config_info_list(repo, &config_info, fields_checked());
+ string_list_sort(&config_info);
+ }
- if (decoded_name && should_accept_remote(accept, decoded_name, decoded_url, &names, &urls))
- strvec_push(accepted, decoded_name);
+ if (should_accept_remote(accept, advertised, &config_info))
+ strvec_push(accepted, advertised->name);
- strbuf_list_free(elems);
- free(decoded_name);
- free(decoded_url);
+ promisor_info_free(advertised);
}
- strvec_clear(&names);
- strvec_clear(&urls);
- strbuf_list_free(remotes);
+ promisor_info_list_clear(&config_info);
+ string_list_clear(&remote_info, 0);
}
char *promisor_remote_reply(const char *info)
@@ -518,16 +769,15 @@ char *promisor_remote_reply(const char *info)
void mark_promisor_remotes_as_accepted(struct repository *r, const char *remotes)
{
- struct strbuf **accepted_remotes = strbuf_split_str(remotes, ';', 0);
+ struct string_list accepted_remotes = STRING_LIST_INIT_DUP;
+ struct string_list_item *item;
- for (size_t i = 0; accepted_remotes[i]; i++) {
- struct promisor_remote *p;
- char *decoded_remote;
+ string_list_split(&accepted_remotes, remotes, ";", -1);
- strbuf_strip_suffix(accepted_remotes[i], ";");
- decoded_remote = url_percent_decode(accepted_remotes[i]->buf);
+ for_each_string_list_item(item, &accepted_remotes) {
+ char *decoded_remote = url_percent_decode(item->string);
+ struct promisor_remote *p = repo_promisor_remote_find(r, decoded_remote);
- p = repo_promisor_remote_find(r, decoded_remote);
if (p)
p->accepted = 1;
else
@@ -537,5 +787,5 @@ void mark_promisor_remotes_as_accepted(struct repository *r, const char *remotes
free(decoded_remote);
}
- strbuf_list_free(accepted_remotes);
+ string_list_clear(&accepted_remotes, 0);
}
diff --git a/range-diff.c b/range-diff.c
index 8a2dcbee32..ca449a0769 100644
--- a/range-diff.c
+++ b/range-diff.c
@@ -325,13 +325,24 @@ static int diffsize(const char *a, const char *b)
}
static void get_correspondences(struct string_list *a, struct string_list *b,
- int creation_factor)
+ int creation_factor, size_t max_memory)
{
int n = a->nr + b->nr;
int *cost, c, *a2b, *b2a;
int i, j;
-
- ALLOC_ARRAY(cost, st_mult(n, n));
+ size_t cost_size = st_mult(n, n);
+ size_t cost_bytes = st_mult(sizeof(int), cost_size);
+ if (cost_bytes >= max_memory) {
+ struct strbuf cost_str = STRBUF_INIT;
+ struct strbuf max_str = STRBUF_INIT;
+ strbuf_humanise_bytes(&cost_str, cost_bytes);
+ strbuf_humanise_bytes(&max_str, max_memory);
+ die(_("range-diff: unable to compute the range-diff, since it "
+ "exceeds the maximum memory for the cost matrix: %s "
+ "(%"PRIuMAX" bytes) needed, limited to %s (%"PRIuMAX" bytes)"),
+ cost_str.buf, (uintmax_t)cost_bytes, max_str.buf, (uintmax_t)max_memory);
+ }
+ ALLOC_ARRAY(cost, cost_size);
ALLOC_ARRAY(a2b, n);
ALLOC_ARRAY(b2a, n);
@@ -591,7 +602,8 @@ int show_range_diff(const char *range1, const char *range2,
if (!res) {
find_exact_matches(&branch1, &branch2);
get_correspondences(&branch1, &branch2,
- range_diff_opts->creation_factor);
+ range_diff_opts->creation_factor,
+ range_diff_opts->max_memory);
output(&branch1, &branch2, range_diff_opts);
}
diff --git a/range-diff.h b/range-diff.h
index cd85000b5a..9d39818e34 100644
--- a/range-diff.h
+++ b/range-diff.h
@@ -5,6 +5,10 @@
#include "strvec.h"
#define RANGE_DIFF_CREATION_FACTOR_DEFAULT 60
+#define RANGE_DIFF_MAX_MEMORY_DEFAULT \
+ (sizeof(void*) >= 8 ? \
+ ((size_t)(1024L * 1024L) * (size_t)(4L * 1024L)) : /* 4GB on 64-bit */ \
+ ((size_t)(1024L * 1024L) * (size_t)(2L * 1024L))) /* 2GB on 32-bit */
/*
* A much higher value than the default, when we KNOW we are comparing
@@ -17,6 +21,7 @@ struct range_diff_options {
unsigned dual_color:1;
unsigned left_only:1, right_only:1;
unsigned include_merges:1;
+ size_t max_memory;
const struct diff_options *diffopt; /* may be NULL */
const struct strvec *other_arg; /* may be NULL */
};
diff --git a/read-cache.c b/read-cache.c
index 06ad74db22..229b8ef11c 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -3947,6 +3947,7 @@ int add_files_to_cache(struct repository *repo, const char *prefix,
const struct pathspec *pathspec, char *ps_matched,
int include_sparse, int flags)
{
+ struct odb_transaction *transaction;
struct update_callback_data data;
struct rev_info rev;
@@ -3972,9 +3973,9 @@ int add_files_to_cache(struct repository *repo, const char *prefix,
* This function is invoked from commands other than 'add', which
* may not have their own transaction active.
*/
- begin_odb_transaction();
+ transaction = begin_odb_transaction(repo->objects);
run_diff_files(&rev, DIFF_RACY_IS_MODIFIED);
- end_odb_transaction();
+ end_odb_transaction(transaction);
release_revisions(&rev);
return !!data.add_errors;
diff --git a/repository.c b/repository.c
index ecd691181f..6faf5c7398 100644
--- a/repository.c
+++ b/repository.c
@@ -57,6 +57,7 @@ void initialize_repository(struct repository *repo)
repo->parsed_objects = parsed_object_pool_new(repo);
ALLOC_ARRAY(repo->index, 1);
index_state_init(repo->index, repo);
+ repo->check_deprecated_config = true;
/*
* When a command runs inside a repository, it learns what
@@ -168,6 +169,7 @@ void repo_set_gitdir(struct repository *repo,
if (!repo->objects->sources) {
CALLOC_ARRAY(repo->objects->sources, 1);
repo->objects->sources->odb = repo->objects;
+ repo->objects->sources->local = true;
repo->objects->sources_tail = &repo->objects->sources->next;
}
expand_base_dir(&repo->objects->sources->path, o->object_dir,
diff --git a/repository.h b/repository.h
index 042dc93f0f..5808a5d610 100644
--- a/repository.h
+++ b/repository.h
@@ -161,6 +161,9 @@ struct repository {
/* Indicate if a repository has a different 'commondir' from 'gitdir' */
unsigned different_commondir:1;
+
+ /* Should repo_config() check for deprecated settings */
+ bool check_deprecated_config;
};
#ifdef USE_THE_REPOSITORY_VARIABLE
diff --git a/t/Makefile b/t/Makefile
index 757674e727..ab8a5b54aa 100644
--- a/t/Makefile
+++ b/t/Makefile
@@ -189,15 +189,9 @@ perf:
.PHONY: libgit-sys-test libgit-rs-test
libgit-sys-test:
- $(QUIET)(\
- cd ../contrib/libgit-sys && \
- cargo test \
- )
-libgit-rs-test:
- $(QUIET)(\
- cd ../contrib/libgit-rs && \
- cargo test \
- )
+ $(QUIET)cargo test --manifest-path ../contrib/libgit-sys/Cargo.toml
+libgit-rs-test: libgit-sys-test
+ $(QUIET)cargo test --manifest-path ../contrib/libgit-rs/Cargo.toml
ifdef INCLUDE_LIBGIT_RS
-all:: libgit-sys-test libgit-rs-test
+all:: libgit-rs-test
endif
diff --git a/t/helper/test-read-midx.c b/t/helper/test-read-midx.c
index da2aa036b5..6de5d1665a 100644
--- a/t/helper/test-read-midx.c
+++ b/t/helper/test-read-midx.c
@@ -11,14 +11,24 @@
#include "gettext.h"
#include "pack-revindex.h"
+static struct multi_pack_index *setup_midx(const char *object_dir)
+{
+ struct odb_source *source;
+ setup_git_directory();
+ source = odb_find_source(the_repository->objects, object_dir);
+ if (!source)
+ source = odb_add_to_alternates_memory(the_repository->objects,
+ object_dir);
+ return load_multi_pack_index(source);
+}
+
static int read_midx_file(const char *object_dir, const char *checksum,
int show_objects)
{
uint32_t i;
struct multi_pack_index *m;
- setup_git_directory();
- m = load_multi_pack_index(the_repository, object_dir, 1);
+ m = setup_midx(object_dir);
if (!m)
return 1;
@@ -56,7 +66,7 @@ static int read_midx_file(const char *object_dir, const char *checksum,
for (i = 0; i < m->num_packs; i++)
printf("%s\n", m->pack_names[i]);
- printf("object-dir: %s\n", m->object_dir);
+ printf("object-dir: %s\n", m->source->path);
if (show_objects) {
struct object_id oid;
@@ -65,7 +75,7 @@ static int read_midx_file(const char *object_dir, const char *checksum,
for (i = 0; i < m->num_objects; i++) {
nth_midxed_object_oid(&oid, m,
i + m->num_objects_in_base);
- fill_midx_entry(the_repository, &oid, &e, m);
+ fill_midx_entry(m, &oid, &e);
printf("%s %"PRIu64"\t%s\n",
oid_to_hex(&oid), e.offset, e.p->pack_name);
@@ -81,8 +91,7 @@ static int read_midx_checksum(const char *object_dir)
{
struct multi_pack_index *m;
- setup_git_directory();
- m = load_multi_pack_index(the_repository, object_dir, 1);
+ m = setup_midx(object_dir);
if (!m)
return 1;
printf("%s\n", hash_to_hex(get_midx_checksum(m)));
@@ -96,9 +105,7 @@ static int read_midx_preferred_pack(const char *object_dir)
struct multi_pack_index *midx = NULL;
uint32_t preferred_pack;
- setup_git_directory();
-
- midx = load_multi_pack_index(the_repository, object_dir, 1);
+ midx = setup_midx(object_dir);
if (!midx)
return 1;
@@ -119,14 +126,12 @@ static int read_midx_bitmapped_packs(const char *object_dir)
struct bitmapped_pack pack;
uint32_t i;
- setup_git_directory();
-
- midx = load_multi_pack_index(the_repository, object_dir, 1);
+ midx = setup_midx(object_dir);
if (!midx)
return 1;
for (i = 0; i < midx->num_packs + midx->num_packs_in_base; i++) {
- if (nth_bitmapped_pack(the_repository, midx, &pack, i) < 0) {
+ if (nth_bitmapped_pack(midx, &pack, i) < 0) {
close_midx(midx);
return 1;
}
diff --git a/t/meson.build b/t/meson.build
index baeeba2ce6..7974795fe4 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -206,11 +206,13 @@ integration_tests = [
't1419-exclude-refs.sh',
't1420-lost-found.sh',
't1421-reflog-write.sh',
+ 't1422-show-ref-exists.sh',
't1430-bad-ref-name.sh',
't1450-fsck.sh',
't1451-fsck-buffer.sh',
't1460-refs-migrate.sh',
't1461-refs-list.sh',
+ 't1462-refs-exists.sh',
't1500-rev-parse.sh',
't1501-work-tree.sh',
't1502-rev-parse-parseopt.sh',
@@ -951,6 +953,7 @@ integration_tests = [
't8012-blame-colors.sh',
't8013-blame-ignore-revs.sh',
't8014-blame-ignore-fuzzy.sh',
+ 't8020-last-modified.sh',
't9001-send-email.sh',
't9002-column.sh',
't9003-help-autocorrect.sh',
@@ -1144,6 +1147,7 @@ benchmarks = [
'perf/p7820-grep-engines.sh',
'perf/p7821-grep-engines-fixed.sh',
'perf/p7822-grep-perl-character.sh',
+ 'perf/p8020-last-modified.sh',
'perf/p9210-scalar.sh',
'perf/p9300-fast-import-export.sh',
]
@@ -1219,4 +1223,4 @@ if perl.found() and time.found()
timeout: 0,
)
endforeach
-endif \ No newline at end of file
+endif
diff --git a/t/perf/p8020-last-modified.sh b/t/perf/p8020-last-modified.sh
new file mode 100755
index 0000000000..cb1f98d3db
--- /dev/null
+++ b/t/perf/p8020-last-modified.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+test_description='last-modified perf tests'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+test_perf 'top-level last-modified' '
+ git last-modified HEAD
+'
+
+test_perf 'top-level recursive last-modified' '
+ git last-modified -r HEAD
+'
+
+test_perf 'subdir last-modified' '
+ git ls-tree -d HEAD >subtrees &&
+ path="$(head -n 1 subtrees | cut -f2)" &&
+ git last-modified -r HEAD -- "$path"
+'
+
+test_done
diff --git a/t/show-ref-exists-tests.sh b/t/show-ref-exists-tests.sh
new file mode 100644
index 0000000000..36e8e9df33
--- /dev/null
+++ b/t/show-ref-exists-tests.sh
@@ -0,0 +1,77 @@
+git_show_ref_exists=${git_show_ref_exists:-git show-ref --exists}
+
+test_expect_success setup '
+ test_commit --annotate A &&
+ git checkout -b side &&
+ test_commit --annotate B &&
+ git checkout main &&
+ test_commit C &&
+ git branch B A^0
+'
+
+test_expect_success '--exists with existing reference' '
+ ${git_show_ref_exists} refs/heads/$GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+'
+
+test_expect_success '--exists with missing reference' '
+ test_expect_code 2 ${git_show_ref_exists} refs/heads/does-not-exist
+'
+
+test_expect_success '--exists does not use DWIM' '
+ test_expect_code 2 ${git_show_ref_exists} $GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME 2>err &&
+ grep "reference does not exist" err
+'
+
+test_expect_success '--exists with HEAD' '
+ ${git_show_ref_exists} HEAD
+'
+
+test_expect_success '--exists with bad reference name' '
+ test_when_finished "git update-ref -d refs/heads/bad...name" &&
+ new_oid=$(git rev-parse HEAD) &&
+ test-tool ref-store main update-ref msg refs/heads/bad...name $new_oid $ZERO_OID REF_SKIP_REFNAME_VERIFICATION &&
+ ${git_show_ref_exists} refs/heads/bad...name
+'
+
+test_expect_success '--exists with arbitrary symref' '
+ test_when_finished "git symbolic-ref -d refs/symref" &&
+ git symbolic-ref refs/symref refs/heads/$GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME &&
+ ${git_show_ref_exists} refs/symref
+'
+
+test_expect_success '--exists with dangling symref' '
+ test_when_finished "git symbolic-ref -d refs/heads/dangling" &&
+ git symbolic-ref refs/heads/dangling refs/heads/does-not-exist &&
+ ${git_show_ref_exists} refs/heads/dangling
+'
+
+test_expect_success '--exists with nonexistent object ID' '
+ test-tool ref-store main update-ref msg refs/heads/missing-oid $(test_oid 001) $ZERO_OID REF_SKIP_OID_VERIFICATION &&
+ ${git_show_ref_exists} refs/heads/missing-oid
+'
+
+test_expect_success '--exists with non-commit object' '
+ tree_oid=$(git rev-parse HEAD^{tree}) &&
+ test-tool ref-store main update-ref msg refs/heads/tree ${tree_oid} $ZERO_OID REF_SKIP_OID_VERIFICATION &&
+ ${git_show_ref_exists} refs/heads/tree
+'
+
+test_expect_success '--exists with directory fails with generic error' '
+ cat >expect <<-EOF &&
+ error: reference does not exist
+ EOF
+ test_expect_code 2 ${git_show_ref_exists} refs/heads 2>err &&
+ test_cmp expect err
+'
+
+test_expect_success '--exists with non-existent special ref' '
+ test_expect_code 2 ${git_show_ref_exists} FETCH_HEAD
+'
+
+test_expect_success '--exists with existing special ref' '
+ test_when_finished "rm .git/FETCH_HEAD" &&
+ git rev-parse HEAD >.git/FETCH_HEAD &&
+ ${git_show_ref_exists} FETCH_HEAD
+'
+
+test_done
diff --git a/t/t0450-txt-doc-vs-help.sh b/t/t0450-txt-doc-vs-help.sh
index 2f7504ae7e..e12e18f97f 100755
--- a/t/t0450-txt-doc-vs-help.sh
+++ b/t/t0450-txt-doc-vs-help.sh
@@ -41,7 +41,7 @@ help_to_synopsis () {
}
builtin_to_adoc () {
- echo "$GIT_BUILD_DIR/Documentation/git-$1.adoc"
+ echo "$GIT_SOURCE_DIR/Documentation/git-$1.adoc"
}
adoc_to_synopsis () {
@@ -112,10 +112,19 @@ do
adoc="$(builtin_to_adoc "$builtin")" &&
preq="$(echo BUILTIN_ADOC_$builtin | tr '[:lower:]-' '[:upper:]_')" &&
- if test -f "$adoc"
+ # If and only if *.adoc is missing, builtin shall be listed in t0450/adoc-missing.
+ if grep -q "^$builtin$" "$TEST_DIRECTORY"/t0450/adoc-missing
then
+ test_expect_success "$builtin appropriately marked as not having .adoc" '
+ ! test -f "$adoc"
+ '
+ else
test_set_prereq "$preq"
- fi &&
+
+ test_expect_success "$builtin appropriately marked as having .adoc" '
+ test -f "$adoc"
+ '
+ fi
# *.adoc output assertions
test_expect_success "$preq" "$builtin *.adoc SYNOPSIS has dashed labels" '
diff --git a/t/t0450/adoc-missing b/t/t0450/adoc-missing
new file mode 100644
index 0000000000..1ec9f8dcf3
--- /dev/null
+++ b/t/t0450/adoc-missing
@@ -0,0 +1,9 @@
+checkout--worker
+merge-ours
+merge-recursive
+merge-recursive-ours
+merge-recursive-theirs
+merge-subtree
+pickaxe
+submodule--helper
+upload-archive--writer
diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh
index d8101139b4..b0f691c151 100755
--- a/t/t1092-sparse-checkout-compatibility.sh
+++ b/t/t1092-sparse-checkout-compatibility.sh
@@ -1506,6 +1506,8 @@ test_expect_success 'sparse-index is not expanded' '
ensure_not_expanded reset --hard &&
ensure_not_expanded restore -s rename-out-to-out -- deep/deeper1 &&
+ ensure_not_expanded ls-files deep/deeper1 &&
+
echo >>sparse-index/README.md &&
ensure_not_expanded add -A &&
echo >>sparse-index/extra.txt &&
@@ -1607,6 +1609,17 @@ test_expect_success 'describe tested on all' '
test_all_match git describe --dirty
'
+test_expect_success 'ls-files filtering and expansion' '
+ init_repos &&
+
+ # This filtering will hit a sparse directory midway
+ # through the iteration.
+ test_all_match git ls-files deep &&
+
+ # This pathspec will filter the index to only a sparse
+ # directory.
+ test_all_match git ls-files folder1
+'
test_expect_success 'sparse-index is not expanded: describe' '
init_repos &&
diff --git a/t/t1403-show-ref.sh b/t/t1403-show-ref.sh
index 9da3650e91..36c903ca19 100755
--- a/t/t1403-show-ref.sh
+++ b/t/t1403-show-ref.sh
@@ -228,69 +228,4 @@ test_expect_success 'show-ref sub-modes are mutually exclusive' '
grep "cannot be used together" err
'
-test_expect_success '--exists with existing reference' '
- git show-ref --exists refs/heads/$GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
-'
-
-test_expect_success '--exists with missing reference' '
- test_expect_code 2 git show-ref --exists refs/heads/does-not-exist
-'
-
-test_expect_success '--exists does not use DWIM' '
- test_expect_code 2 git show-ref --exists $GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME 2>err &&
- grep "reference does not exist" err
-'
-
-test_expect_success '--exists with HEAD' '
- git show-ref --exists HEAD
-'
-
-test_expect_success '--exists with bad reference name' '
- test_when_finished "git update-ref -d refs/heads/bad...name" &&
- new_oid=$(git rev-parse HEAD) &&
- test-tool ref-store main update-ref msg refs/heads/bad...name $new_oid $ZERO_OID REF_SKIP_REFNAME_VERIFICATION &&
- git show-ref --exists refs/heads/bad...name
-'
-
-test_expect_success '--exists with arbitrary symref' '
- test_when_finished "git symbolic-ref -d refs/symref" &&
- git symbolic-ref refs/symref refs/heads/$GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME &&
- git show-ref --exists refs/symref
-'
-
-test_expect_success '--exists with dangling symref' '
- test_when_finished "git symbolic-ref -d refs/heads/dangling" &&
- git symbolic-ref refs/heads/dangling refs/heads/does-not-exist &&
- git show-ref --exists refs/heads/dangling
-'
-
-test_expect_success '--exists with nonexistent object ID' '
- test-tool ref-store main update-ref msg refs/heads/missing-oid $(test_oid 001) $ZERO_OID REF_SKIP_OID_VERIFICATION &&
- git show-ref --exists refs/heads/missing-oid
-'
-
-test_expect_success '--exists with non-commit object' '
- tree_oid=$(git rev-parse HEAD^{tree}) &&
- test-tool ref-store main update-ref msg refs/heads/tree ${tree_oid} $ZERO_OID REF_SKIP_OID_VERIFICATION &&
- git show-ref --exists refs/heads/tree
-'
-
-test_expect_success '--exists with directory fails with generic error' '
- cat >expect <<-EOF &&
- error: reference does not exist
- EOF
- test_expect_code 2 git show-ref --exists refs/heads 2>err &&
- test_cmp expect err
-'
-
-test_expect_success '--exists with non-existent special ref' '
- test_expect_code 2 git show-ref --exists FETCH_HEAD
-'
-
-test_expect_success '--exists with existing special ref' '
- test_when_finished "rm .git/FETCH_HEAD" &&
- git rev-parse HEAD >.git/FETCH_HEAD &&
- git show-ref --exists FETCH_HEAD
-'
-
test_done
diff --git a/t/t1422-show-ref-exists.sh b/t/t1422-show-ref-exists.sh
new file mode 100755
index 0000000000..fdca3f16c8
--- /dev/null
+++ b/t/t1422-show-ref-exists.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+test_description='show-ref --exists'
+GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
+export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+
+. ./test-lib.sh
+
+. "$TEST_DIRECTORY"/show-ref-exists-tests.sh
diff --git a/t/t1462-refs-exists.sh b/t/t1462-refs-exists.sh
new file mode 100755
index 0000000000..349453c4ca
--- /dev/null
+++ b/t/t1462-refs-exists.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+test_description='refs exists'
+GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
+export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+
+. ./test-lib.sh
+
+git_show_ref_exists='git refs exists'
+. "$TEST_DIRECTORY"/show-ref-exists-tests.sh
diff --git a/t/t1900-repo.sh b/t/t1900-repo.sh
index a69c715357..2beba67889 100755
--- a/t/t1900-repo.sh
+++ b/t/t1900-repo.sh
@@ -63,6 +63,12 @@ test_expect_success 'setup remote' '
test_repo_info 'shallow repository = true is retrieved correctly' \
'git clone --depth 1 "file://$PWD/remote"' 'shallow' 'layout.shallow' 'true'
+test_repo_info 'object.format = sha1 is retrieved correctly' \
+ 'git init --object-format=sha1' 'sha1' 'object.format' 'sha1'
+
+test_repo_info 'object.format = sha256 is retrieved correctly' \
+ 'git init --object-format=sha256' 'sha256' 'object.format' 'sha256'
+
test_expect_success 'values returned in order requested' '
cat >expect <<-\EOF &&
layout.bare=false
@@ -92,4 +98,16 @@ test_expect_success 'git-repo-info aborts when requesting an invalid format' '
test_cmp expect actual
'
+test_expect_success '-z uses nul-terminated format' '
+ printf "layout.bare\nfalse\0layout.shallow\nfalse\0" >expected &&
+ git repo info -z layout.bare layout.shallow >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'git repo info uses the last requested format' '
+ echo "layout.bare=false" >expected &&
+ git repo info --format=nul -z --format=keyvalue layout.bare >actual &&
+ test_cmp expected actual
+'
+
test_done
diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh
index 34d6ad0770..e778dd8ae4 100755
--- a/t/t3404-rebase-interactive.sh
+++ b/t/t3404-rebase-interactive.sh
@@ -1176,7 +1176,7 @@ test_expect_success 'rebase -i respects core.commentchar' '
test B = $(git cat-file commit HEAD^ | sed -ne \$p)
'
-test_expect_success 'rebase -i respects core.commentchar=auto' '
+test_expect_success !WITH_BREAKING_CHANGES 'rebase -i respects core.commentchar=auto' '
test_config core.commentchar auto &&
write_script copy-edit-script.sh <<-\EOF &&
cp "$1" edit-script
@@ -1184,8 +1184,23 @@ test_expect_success 'rebase -i respects core.commentchar=auto' '
test_when_finished "git rebase --abort || :" &&
(
test_set_editor "$(pwd)/copy-edit-script.sh" &&
- git rebase -i HEAD^
+ git rebase -i HEAD^ 2>err
) &&
+ sed -n "s/^hint: *\$//p; s/^hint: //p; s/^warning: //p" err >actual &&
+ cat >expect <<-EOF &&
+ Support for ${SQ}core.commentChar=auto${SQ} is deprecated and will be removed in Git 3.0
+
+ To use the default comment string (#) please run
+
+ git config unset core.commentChar
+
+ To set a custom comment string please run
+
+ git config set core.commentChar <comment string>
+
+ where ${SQ}<comment string>${SQ} is the string you wish to use.
+ EOF
+ test_cmp expect actual &&
test -z "$(grep -ve "^#" -e "^\$" -e "^pick" edit-script)"
'
diff --git a/t/t3418-rebase-continue.sh b/t/t3418-rebase-continue.sh
index b8a8dd77e7..f9b8999db5 100755
--- a/t/t3418-rebase-continue.sh
+++ b/t/t3418-rebase-continue.sh
@@ -328,7 +328,7 @@ test_expect_success 'there is no --no-reschedule-failed-exec in an ongoing rebas
test_expect_code 129 git rebase --edit-todo --no-reschedule-failed-exec
'
-test_expect_success 'no change in comment character due to conflicts markers with core.commentChar=auto' '
+test_expect_success !WITH_BREAKING_CHANGES 'no change in comment character due to conflicts markers with core.commentChar=auto' '
git checkout -b branch-a &&
test_commit A F1 &&
git checkout -b branch-b HEAD^ &&
diff --git a/t/t3701-add-interactive.sh b/t/t3701-add-interactive.sh
index 04d2a19835..d9fe289a7a 100755
--- a/t/t3701-add-interactive.sh
+++ b/t/t3701-add-interactive.sh
@@ -866,6 +866,44 @@ test_expect_success 'colorized diffs respect diff.wsErrorHighlight' '
test_grep "old<" output
'
+test_expect_success 'diff color respects color.diff' '
+ git reset --hard &&
+
+ echo old >test &&
+ git add test &&
+ echo new >test &&
+
+ printf n >n &&
+ force_color git \
+ -c color.interactive=auto \
+ -c color.interactive.prompt=blue \
+ -c color.diff=false \
+ -c color.diff.old=red \
+ add -p >output.raw 2>&1 <n &&
+ test_decode_color <output.raw >output &&
+ test_grep "BLUE.*Stage this hunk" output &&
+ test_grep ! "RED" output
+'
+
+test_expect_success 're-coloring diff without color.interactive' '
+ git reset --hard &&
+
+ test_write_lines 1 2 3 >test &&
+ git add test &&
+ test_write_lines one 2 three >test &&
+
+ test_write_lines s n n |
+ force_color git \
+ -c color.interactive=false \
+ -c color.interactive.prompt=blue \
+ -c color.diff=true \
+ -c color.diff.frag="bold magenta" \
+ add -p >output.raw 2>&1 &&
+ test_decode_color <output.raw >output &&
+ test_grep "<BOLD;MAGENTA>@@" output &&
+ test_grep ! "BLUE" output
+'
+
test_expect_success 'diffFilter filters diff' '
git reset --hard &&
@@ -1283,6 +1321,12 @@ test_expect_success 'stash accepts -U and --inter-hunk-context' '
test_grep "@@ -2,20 +2,20 @@" actual
'
+test_expect_success 'set up base for -p color tests' '
+ echo commit >file &&
+ git commit -am "commit state" &&
+ git tag patch-base
+'
+
for cmd in add checkout commit reset restore "stash save" "stash push"
do
test_expect_success "$cmd rejects invalid context options" '
@@ -1299,6 +1343,15 @@ do
test_must_fail git $cmd --inter-hunk-context 2 2>actual &&
test_grep -E ".--inter-hunk-context. requires .(--interactive/)?--patch." actual
'
+
+ test_expect_success "$cmd falls back to color.ui" '
+ git reset --hard patch-base &&
+ echo working-tree >file &&
+ test_write_lines y |
+ force_color git -c color.ui=false $cmd -p >output.raw 2>&1 &&
+ test_decode_color <output.raw >output &&
+ test_cmp output.raw output
+ '
done
test_done
diff --git a/t/t3904-stash-patch.sh b/t/t3904-stash-patch.sh
index ae313e3c70..90a4ff2c10 100755
--- a/t/t3904-stash-patch.sh
+++ b/t/t3904-stash-patch.sh
@@ -107,4 +107,23 @@ test_expect_success 'stash -p with split hunk' '
! grep "added line 2" test
'
+test_expect_success 'stash -p not confused by GIT_PAGER_IN_USE' '
+ echo to-stash >test &&
+ # Set both GIT_PAGER_IN_USE and TERM. Our goal is to entice any
+ # diff subprocesses into thinking that they could output
+ # color, even though their stdout is not going into a tty.
+ echo y |
+ GIT_PAGER_IN_USE=1 TERM=vt100 git stash -p &&
+ git diff --exit-code
+'
+
+test_expect_success 'index push not confused by GIT_PAGER_IN_USE' '
+ echo index >test &&
+ git add test &&
+ echo working-tree >test &&
+ # As above, we try to entice the child diff into using color.
+ GIT_PAGER_IN_USE=1 TERM=vt100 git stash push test &&
+ git diff --exit-code
+'
+
test_done
diff --git a/t/t4211-line-log.sh b/t/t4211-line-log.sh
index 950451cf6a..0a7c3ca42f 100755
--- a/t/t4211-line-log.sh
+++ b/t/t4211-line-log.sh
@@ -78,6 +78,8 @@ canned_test "-L :main:a.c -L 4,18:a.c simple" multiple-overlapping
canned_test "-L 4:a.c -L 8,12:a.c simple" multiple-superset
canned_test "-L 8,12:a.c -L 4:a.c simple" multiple-superset
+canned_test "-L 10,16:b.c -L 18,26:b.c main" no-assertion-error
+
test_bad_opts "-L" "switch.*requires a value"
test_bad_opts "-L b.c" "argument not .start,end:file"
test_bad_opts "-L 1:" "argument not .start,end:file"
diff --git a/t/t4211/sha1/expect.multiple b/t/t4211/sha1/expect.multiple
index 76ad5b598c..1eee8a7801 100644
--- a/t/t4211/sha1/expect.multiple
+++ b/t/t4211/sha1/expect.multiple
@@ -102,3 +102,9 @@ diff --git a/a.c b/a.c
+ s++;
+ }
+}
+@@ -0,0 +16,5 @@
++int main ()
++{
++ printf("%d\n", f(15));
++ return 0;
++}
diff --git a/t/t4211/sha1/expect.no-assertion-error b/t/t4211/sha1/expect.no-assertion-error
new file mode 100644
index 0000000000..994c37db1e
--- /dev/null
+++ b/t/t4211/sha1/expect.no-assertion-error
@@ -0,0 +1,90 @@
+commit 0d8dcfc6b968e06a27d5215bad1fdde3de9d6235
+Author: Thomas Rast <trast@student.ethz.ch>
+Date: Thu Feb 28 10:50:24 2013 +0100
+
+ move within the file
+
+diff --git a/b.c b/b.c
+--- a/b.c
++++ b/b.c
+@@ -25,0 +18,9 @@
++long f(long x)
++{
++ int s = 0;
++ while (x) {
++ x /= 2;
++ s++;
++ }
++ return s;
++}
+
+commit 4659538844daa2849b1a9e7d6fadb96fcd26fc83
+Author: Thomas Rast <trast@student.ethz.ch>
+Date: Thu Feb 28 10:48:43 2013 +0100
+
+ change back to complete line
+
+diff --git a/a.c b/a.c
+--- a/a.c
++++ b/a.c
+@@ -18,5 +18,7 @@
+ int main ()
+ {
+ printf("%ld\n", f(15));
+ return 0;
+-}
+\ No newline at end of file
++}
++
++/* incomplete lines are bad! */
+
+commit 100b61a6f2f720f812620a9d10afb3a960ccb73c
+Author: Thomas Rast <trast@student.ethz.ch>
+Date: Thu Feb 28 10:48:10 2013 +0100
+
+ change to an incomplete line at end
+
+diff --git a/a.c b/a.c
+--- a/a.c
++++ b/a.c
+@@ -18,5 +18,5 @@
+ int main ()
+ {
+ printf("%ld\n", f(15));
+ return 0;
+-}
++}
+\ No newline at end of file
+
+commit a6eb82647d5d67f893da442f8f9375fd89a3b1e2
+Author: Thomas Rast <trast@student.ethz.ch>
+Date: Thu Feb 28 10:45:16 2013 +0100
+
+ touch both functions
+
+diff --git a/a.c b/a.c
+--- a/a.c
++++ b/a.c
+@@ -17,5 +17,5 @@
+ int main ()
+ {
+- printf("%d\n", f(15));
++ printf("%ld\n", f(15));
+ return 0;
+ }
+
+commit de4c48ae814792c02a49c4c3c0c757ae69c55f6a
+Author: Thomas Rast <trast@student.ethz.ch>
+Date: Thu Feb 28 10:44:48 2013 +0100
+
+ initial
+
+diff --git a/a.c b/a.c
+--- /dev/null
++++ b/a.c
+@@ -0,0 +16,5 @@
++int main ()
++{
++ printf("%d\n", f(15));
++ return 0;
++}
diff --git a/t/t4211/sha1/expect.two-ranges b/t/t4211/sha1/expect.two-ranges
index 6109aa0dce..c5164f3be3 100644
--- a/t/t4211/sha1/expect.two-ranges
+++ b/t/t4211/sha1/expect.two-ranges
@@ -100,3 +100,9 @@ diff --git a/a.c b/a.c
+ s++;
+ }
+}
+@@ -0,0 +16,5 @@
++int main ()
++{
++ printf("%d\n", f(15));
++ return 0;
++}
diff --git a/t/t4211/sha256/expect.multiple b/t/t4211/sha256/expect.multiple
index ca00409b9a..dbd987b74a 100644
--- a/t/t4211/sha256/expect.multiple
+++ b/t/t4211/sha256/expect.multiple
@@ -102,3 +102,9 @@ diff --git a/a.c b/a.c
+ s++;
+ }
+}
+@@ -0,0 +16,5 @@
++int main ()
++{
++ printf("%d\n", f(15));
++ return 0;
++}
diff --git a/t/t4211/sha256/expect.no-assertion-error b/t/t4211/sha256/expect.no-assertion-error
new file mode 100644
index 0000000000..36ed12aa9c
--- /dev/null
+++ b/t/t4211/sha256/expect.no-assertion-error
@@ -0,0 +1,90 @@
+commit eb871b8aa9aff323e484723039c9a92ab0266e060bc0ef2afb08fadda25c5ace
+Author: Thomas Rast <trast@student.ethz.ch>
+Date: Thu Feb 28 10:50:24 2013 +0100
+
+ move within the file
+
+diff --git a/b.c b/b.c
+--- a/b.c
++++ b/b.c
+@@ -25,0 +18,9 @@
++long f(long x)
++{
++ int s = 0;
++ while (x) {
++ x /= 2;
++ s++;
++ }
++ return s;
++}
+
+commit 5526ed05c2476b56af8b7be499e8f78bd50f490740733a9ca7e1f55878fa90a9
+Author: Thomas Rast <trast@student.ethz.ch>
+Date: Thu Feb 28 10:48:43 2013 +0100
+
+ change back to complete line
+
+diff --git a/a.c b/a.c
+--- a/a.c
++++ b/a.c
+@@ -18,5 +18,7 @@
+ int main ()
+ {
+ printf("%ld\n", f(15));
+ return 0;
+-}
+\ No newline at end of file
++}
++
++/* incomplete lines are bad! */
+
+commit 29f32ac3141c48b22803e5c4127b719917b67d0f8ca8c5248bebfa2a19f7da10
+Author: Thomas Rast <trast@student.ethz.ch>
+Date: Thu Feb 28 10:48:10 2013 +0100
+
+ change to an incomplete line at end
+
+diff --git a/a.c b/a.c
+--- a/a.c
++++ b/a.c
+@@ -18,5 +18,5 @@
+ int main ()
+ {
+ printf("%ld\n", f(15));
+ return 0;
+-}
++}
+\ No newline at end of file
+
+commit ccf97b9878189c40a981da50b15713bb80a35755326320ec80900caf22ced46f
+Author: Thomas Rast <trast@student.ethz.ch>
+Date: Thu Feb 28 10:45:16 2013 +0100
+
+ touch both functions
+
+diff --git a/a.c b/a.c
+--- a/a.c
++++ b/a.c
+@@ -17,5 +17,5 @@
+ int main ()
+ {
+- printf("%d\n", f(15));
++ printf("%ld\n", f(15));
+ return 0;
+ }
+
+commit 1dd7e9b2b1699324b53b341e728653b913bc192a14dfea168c5b51f2b3d03592
+Author: Thomas Rast <trast@student.ethz.ch>
+Date: Thu Feb 28 10:44:48 2013 +0100
+
+ initial
+
+diff --git a/a.c b/a.c
+--- /dev/null
++++ b/a.c
+@@ -0,0 +16,5 @@
++int main ()
++{
++ printf("%d\n", f(15));
++ return 0;
++}
diff --git a/t/t4211/sha256/expect.two-ranges b/t/t4211/sha256/expect.two-ranges
index af57c8b997..6a94d3b9cb 100644
--- a/t/t4211/sha256/expect.two-ranges
+++ b/t/t4211/sha256/expect.two-ranges
@@ -100,3 +100,9 @@ diff --git a/a.c b/a.c
+ s++;
+ }
+}
+@@ -0,0 +16,5 @@
++int main ()
++{
++ printf("%d\n", f(15));
++ return 0;
++}
diff --git a/t/t5319-multi-pack-index.sh b/t/t5319-multi-pack-index.sh
index bd75dea950..93f319a4b2 100755
--- a/t/t5319-multi-pack-index.sh
+++ b/t/t5319-multi-pack-index.sh
@@ -28,11 +28,11 @@ midx_read_expect () {
EOF
if test $NUM_PACKS -ge 1
then
- ls $OBJECT_DIR/pack/ | grep idx | sort
+ ls "$OBJECT_DIR"/pack/ | grep idx | sort
fi &&
printf "object-dir: $OBJECT_DIR\n"
} >expect &&
- test-tool read-midx $OBJECT_DIR >actual &&
+ test-tool read-midx "$OBJECT_DIR" >actual &&
test_cmp expect actual
}
@@ -305,7 +305,7 @@ test_expect_success 'midx picks objects from preferred pack' '
ofs=$(git show-index <objects/pack/test-BC-$bc.idx | grep $b |
cut -d" " -f1) &&
- printf "%s %s\tobjects/pack/test-BC-%s.pack\n" \
+ printf "%s %s\t./objects/pack/test-BC-%s.pack\n" \
"$b" "$ofs" "$bc" >expect &&
grep ^$b out >actual &&
@@ -639,7 +639,7 @@ test_expect_success 'force some 64-bit offsets with pack-objects' '
( cd ../objects64 && pwd ) >.git/objects/info/alternates &&
midx64=$(git multi-pack-index --object-dir=../objects64 write)
) &&
- midx_read_expect 1 63 5 objects64 " large-offsets"
+ midx_read_expect 1 63 5 "$(pwd)/objects64" " large-offsets"
'
test_expect_success 'verify multi-pack-index with 64-bit offsets' '
@@ -989,6 +989,23 @@ test_expect_success 'repack --batch-size=0 repacks everything' '
)
'
+test_expect_success EXPENSIVE 'repack/expire with many packs' '
+ cp -r dup many &&
+ (
+ cd many &&
+
+ for i in $(test_seq 1 100)
+ do
+ test_commit extra$i &&
+ git maintenance run --task=loose-objects || return 1
+ done &&
+
+ git multi-pack-index write &&
+ git multi-pack-index repack &&
+ git multi-pack-index expire
+ )
+'
+
test_expect_success 'repack --batch-size=<large> repacks everything' '
(
cd dup2 &&
@@ -1083,7 +1100,10 @@ test_expect_success 'load reverse index when missing .idx, .pack' '
mv $idx.bak $idx &&
mv $pack $pack.bak &&
- git cat-file --batch-check="%(objectsize:disk)" <tip
+ git cat-file --batch-check="%(objectsize:disk)" <tip &&
+
+ test_must_fail git multi-pack-index write 2>err &&
+ test_grep "could not load pack" err
)
'
diff --git a/t/t5530-upload-pack-error.sh b/t/t5530-upload-pack-error.sh
index 558eedf25a..d40292cfb7 100755
--- a/t/t5530-upload-pack-error.sh
+++ b/t/t5530-upload-pack-error.sh
@@ -4,8 +4,6 @@ test_description='errors in upload-pack'
. ./test-lib.sh
-D=$(pwd)
-
corrupt_repo () {
object_sha1=$(git rev-parse "$1") &&
ob=$(expr "$object_sha1" : "\(..\)") &&
@@ -21,11 +19,7 @@ test_expect_success 'setup and corrupt repository' '
test_tick &&
echo changed >file &&
git commit -a -m changed &&
- corrupt_repo HEAD:file
-
-'
-
-test_expect_success 'fsck fails' '
+ corrupt_repo HEAD:file &&
test_must_fail git fsck
'
@@ -40,17 +34,12 @@ test_expect_success 'upload-pack fails due to error in pack-objects packing' '
'
test_expect_success 'corrupt repo differently' '
-
git hash-object -w file &&
- corrupt_repo HEAD^^{tree}
-
-'
-
-test_expect_success 'fsck fails' '
+ corrupt_repo HEAD^^{tree} &&
test_must_fail git fsck
'
-test_expect_success 'upload-pack fails due to error in rev-list' '
+test_expect_success 'upload-pack fails due to error in rev-list' '
printf "%04xwant %s\n%04xshallow %s00000009done\n0000" \
$(($hexsz + 10)) $(git rev-parse HEAD) \
$(($hexsz + 12)) $(git rev-parse HEAD^) >input &&
@@ -59,7 +48,6 @@ test_expect_success 'upload-pack fails due to error in rev-list' '
'
test_expect_success 'upload-pack fails due to bad want (no object)' '
-
printf "%04xwant %s multi_ack_detailed\n00000009done\n0000" \
$(($hexsz + 29)) $(test_oid deadbeef) >input &&
test_must_fail git upload-pack . <input >output 2>output.err &&
@@ -69,7 +57,6 @@ test_expect_success 'upload-pack fails due to bad want (no object)' '
'
test_expect_success 'upload-pack fails due to bad want (not tip)' '
-
oid=$(echo an object we have | git hash-object -w --stdin) &&
printf "%04xwant %s multi_ack_detailed\n00000009done\n0000" \
$(($hexsz + 29)) "$oid" >input &&
@@ -80,7 +67,6 @@ test_expect_success 'upload-pack fails due to bad want (not tip)' '
'
test_expect_success 'upload-pack fails due to error in pack-objects enumeration' '
-
printf "%04xwant %s\n00000009done\n0000" \
$((hexsz + 10)) $(git rev-parse HEAD) >input &&
test_must_fail git upload-pack . <input >/dev/null 2>output.err &&
@@ -105,18 +91,48 @@ test_expect_success 'upload-pack tolerates EOF just after stateless client wants
test_cmp expect actual
'
-test_expect_success 'create empty repository' '
-
- mkdir foo &&
- cd foo &&
- git init
-
-'
-
test_expect_success 'fetch fails' '
+ git init foo &&
+ test_must_fail git -C foo fetch .. main
+'
- test_must_fail git fetch .. main
+test_expect_success 'upload-pack ACKs repeated non-commit objects repeatedly (protocol v0)' '
+ commit_id=$(git rev-parse HEAD) &&
+ tree_id=$(git rev-parse HEAD^{tree}) &&
+ test-tool pkt-line pack >request <<-EOF &&
+ want $commit_id
+ 0000
+ have $tree_id
+ have $tree_id
+ 0000
+ EOF
+ git upload-pack --stateless-rpc . <request >actual &&
+ depacketize <actual >actual.raw &&
+ grep ^ACK actual.raw >actual.acks &&
+ cat >expect <<-EOF &&
+ ACK $tree_id
+ ACK $tree_id
+ EOF
+ test_cmp expect actual.acks
+'
+test_expect_success 'upload-pack ACKs repeated non-commit objects once only (protocol v2)' '
+ commit_id=$(git rev-parse HEAD) &&
+ tree_id=$(git rev-parse HEAD^{tree}) &&
+ test-tool pkt-line pack >request <<-EOF &&
+ command=fetch
+ object-format=$(test_oid algo)
+ 0001
+ want $commit_id
+ have $tree_id
+ have $tree_id
+ 0000
+ EOF
+ GIT_PROTOCOL=version=2 git upload-pack . <request >actual &&
+ depacketize <actual >actual.raw &&
+ grep ^ACK actual.raw >actual.acks &&
+ echo "ACK $tree_id" >expect &&
+ test_cmp expect actual.acks
'
test_done
diff --git a/t/t5564-http-proxy.sh b/t/t5564-http-proxy.sh
index b27e481f95..c3903faf2d 100755
--- a/t/t5564-http-proxy.sh
+++ b/t/t5564-http-proxy.sh
@@ -72,7 +72,9 @@ test_expect_success SOCKS_PROXY 'clone via Unix socket' '
test_when_finished "rm -rf clone" &&
test_config_global http.proxy "socks4://localhost$PWD/%2530.sock" && {
{
- GIT_TRACE_CURL=$PWD/trace git clone "$HTTPD_URL/smart/repo.git" clone 2>err &&
+ GIT_TRACE_CURL=$PWD/trace \
+ GIT_TRACE_CURL_COMPONENTS=socks \
+ git clone "$HTTPD_URL/smart/repo.git" clone 2>err &&
grep -i "SOCKS4 request granted" trace
} ||
old_libcurl_error err
diff --git a/t/t5710-promisor-remote-capability.sh b/t/t5710-promisor-remote-capability.sh
index cb061b1f35..023735d6a8 100755
--- a/t/t5710-promisor-remote-capability.sh
+++ b/t/t5710-promisor-remote-capability.sh
@@ -295,6 +295,71 @@ test_expect_success "clone with 'KnownUrl' and empty url, so not advertised" '
check_missing_objects server 1 "$oid"
'
+test_expect_success "clone with promisor.sendFields" '
+ git -C server config promisor.advertise true &&
+ test_when_finished "rm -rf client" &&
+
+ git -C server remote add otherLop "https://invalid.invalid" &&
+ git -C server config remote.otherLop.token "fooBar" &&
+ git -C server config remote.otherLop.stuff "baz" &&
+ git -C server config remote.otherLop.partialCloneFilter "blob:limit=10k" &&
+ test_when_finished "git -C server remote remove otherLop" &&
+ test_config -C server promisor.sendFields "partialCloneFilter, token" &&
+ test_when_finished "rm trace" &&
+
+ # Clone from server to create a client
+ GIT_TRACE_PACKET="$(pwd)/trace" GIT_NO_LAZY_FETCH=0 git clone \
+ -c remote.lop.promisor=true \
+ -c remote.lop.fetch="+refs/heads/*:refs/remotes/lop/*" \
+ -c remote.lop.url="file://$(pwd)/lop" \
+ -c promisor.acceptfromserver=All \
+ --no-local --filter="blob:limit=5k" server client &&
+
+ # Check that fields are properly transmitted
+ ENCODED_URL=$(echo "file://$(pwd)/lop" | sed -e "s/ /%20/g") &&
+ PR1="name=lop,url=$ENCODED_URL,partialCloneFilter=blob:none" &&
+ PR2="name=otherLop,url=https://invalid.invalid,partialCloneFilter=blob:limit=10k,token=fooBar" &&
+ test_grep "clone< promisor-remote=$PR1;$PR2" trace &&
+ test_grep "clone> promisor-remote=lop;otherLop" trace &&
+
+ # Check that the largest object is still missing on the server
+ check_missing_objects server 1 "$oid"
+'
+
+test_expect_success "clone with promisor.checkFields" '
+ git -C server config promisor.advertise true &&
+ test_when_finished "rm -rf client" &&
+
+ git -C server remote add otherLop "https://invalid.invalid" &&
+ git -C server config remote.otherLop.token "fooBar" &&
+ git -C server config remote.otherLop.stuff "baz" &&
+ git -C server config remote.otherLop.partialCloneFilter "blob:limit=10k" &&
+ test_when_finished "git -C server remote remove otherLop" &&
+ test_config -C server promisor.sendFields "partialCloneFilter, token" &&
+ test_when_finished "rm trace" &&
+
+ # Clone from server to create a client
+ GIT_TRACE_PACKET="$(pwd)/trace" GIT_NO_LAZY_FETCH=0 git clone \
+ -c remote.lop.promisor=true \
+ -c remote.lop.fetch="+refs/heads/*:refs/remotes/lop/*" \
+ -c remote.lop.url="file://$(pwd)/lop" \
+ -c remote.lop.partialCloneFilter="blob:none" \
+ -c promisor.acceptfromserver=All \
+ -c promisor.checkFields=partialcloneFilter \
+ --no-local --filter="blob:limit=5k" server client &&
+
+ # Check that fields are properly transmitted
+ ENCODED_URL=$(echo "file://$(pwd)/lop" | sed -e "s/ /%20/g") &&
+ PR1="name=lop,url=$ENCODED_URL,partialCloneFilter=blob:none" &&
+ PR2="name=otherLop,url=https://invalid.invalid,partialCloneFilter=blob:limit=10k,token=fooBar" &&
+ test_grep "clone< promisor-remote=$PR1;$PR2" trace &&
+ test_grep "clone> promisor-remote=lop" trace &&
+ test_grep ! "clone> promisor-remote=lop;otherLop" trace &&
+
+ # Check that the largest object is still missing on the server
+ check_missing_objects server 1 "$oid"
+'
+
test_expect_success "clone with promisor.advertise set to 'true' but don't delete the client" '
git -C server config promisor.advertise true &&
diff --git a/t/t7502-commit-porcelain.sh b/t/t7502-commit-porcelain.sh
index b37e2018a7..05f6da4ad9 100755
--- a/t/t7502-commit-porcelain.sh
+++ b/t/t7502-commit-porcelain.sh
@@ -956,13 +956,39 @@ test_expect_success 'commit --status with custom comment character' '
test_grep "^; Changes to be committed:" .git/COMMIT_EDITMSG
'
-test_expect_success 'switch core.commentchar' '
+test_expect_success !WITH_BREAKING_CHANGES 'switch core.commentchar' '
test_commit "#foo" foo &&
- GIT_EDITOR=.git/FAKE_EDITOR git -c core.commentChar=auto commit --amend &&
+ cat >config-include <<-\EOF &&
+ [core]
+ commentString=:
+ commentString=%
+ commentChar=auto
+ EOF
+ test_when_finished "rm config-include" &&
+ test_config include.path "$(pwd)/config-include" &&
+ test_config core.commentChar ! &&
+ GIT_EDITOR=.git/FAKE_EDITOR git commit --amend 2>err &&
+ sed -n "s/^hint: *\$//p; s/^hint: //p; s/^warning: //p" err >actual &&
+ cat >expect <<-EOF &&
+ Support for ${SQ}core.commentChar=auto${SQ} is deprecated and will be removed in Git 3.0
+
+ To use the default comment string (#) please run
+
+ git config unset core.commentChar
+ git config unset --file ~/config-include --all core.commentString
+ git config unset --file ~/config-include core.commentChar
+
+ To set a custom comment string please run
+
+ git config set --file ~/config-include core.commentChar <comment string>
+
+ where ${SQ}<comment string>${SQ} is the string you wish to use.
+ EOF
+ test_cmp expect actual &&
test_grep "^; Changes to be committed:" .git/COMMIT_EDITMSG
'
-test_expect_success 'switch core.commentchar but out of options' '
+test_expect_success !WITH_BREAKING_CHANGES 'switch core.commentchar but out of options' '
cat >text <<\EOF &&
# 1
; 2
@@ -982,4 +1008,24 @@ EOF
)
'
+test_expect_success WITH_BREAKING_CHANGES 'core.commentChar=auto is rejected' '
+ test_config core.commentChar auto &&
+ test_must_fail git rev-parse --git-dir 2>err &&
+ sed -n "s/^hint: *\$//p; s/^hint: //p; s/^fatal: //p" err >actual &&
+ cat >expect <<-EOF &&
+ Support for ${SQ}core.commentChar=auto${SQ} has been removed in Git 3.0
+
+ To use the default comment string (#) please run
+
+ git config unset core.commentChar
+
+ To set a custom comment string please run
+
+ git config set core.commentChar <comment string>
+
+ where ${SQ}<comment string>${SQ} is the string you wish to use.
+ EOF
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t7700-repack.sh b/t/t7700-repack.sh
index 611755cc13..73b78bdd88 100755
--- a/t/t7700-repack.sh
+++ b/t/t7700-repack.sh
@@ -838,4 +838,67 @@ test_expect_success '-n overrides repack.updateServerInfo=true' '
test_server_info_missing
'
+test_expect_success 'pending objects are repacked appropriately' '
+ test_when_finished rm -rf pending &&
+ git init pending &&
+
+ (
+ cd pending &&
+
+ # Commit file, a/b/c and never change them.
+ mkdir -p a/b &&
+ echo singleton >file &&
+ echo stuff >a/b/c &&
+ echo more >a/d &&
+ git add file a &&
+ git commit -m "single blobs" &&
+
+ # Files a/d and a/e will not be singletons.
+ echo d >a/d &&
+ echo e >a/e &&
+ git add a &&
+ git commit -m "more blobs" &&
+
+ # This use of a sparse index helps to force
+ # test that the cache-tree is walked, too.
+ git sparse-checkout set --sparse-index a x &&
+
+ # Create staged changes:
+ # * a/e now has multiple versions.
+ # * a/i now has only one version.
+ echo f >a/d &&
+ echo h >a/e &&
+ echo i >a/i &&
+ git add a &&
+
+ # Stage and unstage a change to make use of
+ # resolve-undo cache and how that impacts fsck.
+ mkdir x &&
+ echo y >x/y &&
+ git add x &&
+ xy=$(git rev-parse :x/y) &&
+ git rm --cached x/y &&
+
+ # The blob for x/y must persist through repacks,
+ # but fsck currently ignores the REUC extension
+ # for finding links to the blob.
+ cat >expect <<-EOF &&
+ dangling blob $xy
+ EOF
+
+ # Bring the loose objects into a packfile to avoid
+ # leftovers in next test. Without this, the loose
+ # objects persist and the test succeeds for other
+ # reasons.
+ git repack -adf &&
+ git fsck >out &&
+ test_cmp expect out &&
+
+ # Test path walk version with pack.useSparse.
+ git -c pack.useSparse=true repack -adf --path-walk &&
+ git fsck >out &&
+ test_cmp expect out
+ )
+'
+
test_done
diff --git a/t/t8020-last-modified.sh b/t/t8020-last-modified.sh
new file mode 100755
index 0000000000..5eb4cef035
--- /dev/null
+++ b/t/t8020-last-modified.sh
@@ -0,0 +1,210 @@
+#!/bin/sh
+
+test_description='last-modified tests'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit 1 file &&
+ mkdir a &&
+ test_commit 2 a/file &&
+ mkdir a/b &&
+ test_commit 3 a/b/file
+'
+
+test_expect_success 'cannot run last-modified on two trees' '
+ test_must_fail git last-modified HEAD HEAD~1
+'
+
+check_last_modified() {
+ local indir= &&
+ while test $# != 0
+ do
+ case "$1" in
+ -C)
+ indir="$2"
+ shift
+ ;;
+ *)
+ break
+ ;;
+ esac &&
+ shift
+ done &&
+
+ cat >expect &&
+ test_when_finished "rm -f tmp.*" &&
+ git ${indir:+-C "$indir"} last-modified "$@" >tmp.1 &&
+ git name-rev --annotate-stdin --name-only --tags \
+ <tmp.1 >tmp.2 &&
+ tr '\t' ' ' <tmp.2 >actual &&
+ test_cmp expect actual
+}
+
+test_expect_success 'last-modified non-recursive' '
+ check_last_modified <<-\EOF
+ 3 a
+ 1 file
+ EOF
+'
+
+test_expect_success 'last-modified recursive' '
+ check_last_modified -r <<-\EOF
+ 3 a/b/file
+ 2 a/file
+ 1 file
+ EOF
+'
+
+test_expect_success 'last-modified recursive with show-trees' '
+ check_last_modified -r -t <<-\EOF
+ 3 a
+ 3 a/b
+ 3 a/b/file
+ 2 a/file
+ 1 file
+ EOF
+'
+
+test_expect_success 'last-modified non-recursive with show-trees' '
+ check_last_modified -t <<-\EOF
+ 3 a
+ 1 file
+ EOF
+'
+
+test_expect_success 'last-modified subdir' '
+ check_last_modified a <<-\EOF
+ 3 a
+ EOF
+'
+
+test_expect_success 'last-modified subdir recursive' '
+ check_last_modified -r a <<-\EOF
+ 3 a/b/file
+ 2 a/file
+ EOF
+'
+
+test_expect_success 'last-modified from non-HEAD commit' '
+ check_last_modified HEAD^ <<-\EOF
+ 2 a
+ 1 file
+ EOF
+'
+
+test_expect_success 'last-modified from subdir defaults to root' '
+ check_last_modified -C a <<-\EOF
+ 3 a
+ 1 file
+ EOF
+'
+
+test_expect_success 'last-modified from subdir uses relative pathspecs' '
+ check_last_modified -C a -r b <<-\EOF
+ 3 a/b/file
+ EOF
+'
+
+test_expect_success 'limit last-modified traversal by count' '
+ check_last_modified -1 <<-\EOF
+ 3 a
+ ^2 file
+ EOF
+'
+
+test_expect_success 'limit last-modified traversal by commit' '
+ check_last_modified HEAD~2..HEAD <<-\EOF
+ 3 a
+ ^1 file
+ EOF
+'
+
+test_expect_success 'only last-modified files in the current tree' '
+ git rm -rf a &&
+ git commit -m "remove a" &&
+ check_last_modified <<-\EOF
+ 1 file
+ EOF
+'
+
+test_expect_success 'cross merge boundaries in blaming' '
+ git checkout HEAD^0 &&
+ git rm -rf . &&
+ test_commit m1 &&
+ git checkout HEAD^ &&
+ git rm -rf . &&
+ test_commit m2 &&
+ git merge m1 &&
+ check_last_modified <<-\EOF
+ m2 m2.t
+ m1 m1.t
+ EOF
+'
+
+test_expect_success 'last-modified merge for resolved conflicts' '
+ git checkout HEAD^0 &&
+ git rm -rf . &&
+ test_commit c1 conflict &&
+ git checkout HEAD^ &&
+ git rm -rf . &&
+ test_commit c2 conflict &&
+ test_must_fail git merge c1 &&
+ test_commit resolved conflict &&
+ check_last_modified conflict <<-\EOF
+ resolved conflict
+ EOF
+'
+
+
+# Consider `file` with this content through history:
+#
+# A---B---B-------B---B
+# \ /
+# C---D
+test_expect_success 'last-modified merge ignores content from branch' '
+ git checkout HEAD^0 &&
+ git rm -rf . &&
+ test_commit a1 file A &&
+ test_commit a2 file B &&
+ test_commit a3 file C &&
+ test_commit a4 file D &&
+ git checkout a2 &&
+ git merge --no-commit --no-ff a4 &&
+ git checkout a2 -- file &&
+ git merge --continue &&
+ check_last_modified <<-\EOF
+ a2 file
+ EOF
+'
+
+# Consider `file` with this content through history:
+#
+# A---B---B---C---D---B---B
+# \ /
+# B-------B
+test_expect_success 'last-modified merge undoes changes' '
+ git checkout HEAD^0 &&
+ git rm -rf . &&
+ test_commit b1 file A &&
+ test_commit b2 file B &&
+ test_commit b3 file C &&
+ test_commit b4 file D &&
+ git checkout b2 &&
+ test_commit b5 file2 2 &&
+ git checkout b4 &&
+ git merge --no-commit --no-ff b5 &&
+ git checkout b2 -- file &&
+ git merge --continue &&
+ check_last_modified <<-\EOF
+ b5 file2
+ b2 file
+ EOF
+'
+
+test_expect_success 'last-modified complains about unknown arguments' '
+ test_must_fail git last-modified --foo 2>err &&
+ grep "unknown last-modified argument: --foo" err
+'
+
+test_done
diff --git a/t/unit-tests/clar/.github/workflows/ci.yml b/t/unit-tests/clar/.github/workflows/ci.yml
index 0065843d17..4d4724222c 100644
--- a/t/unit-tests/clar/.github/workflows/ci.yml
+++ b/t/unit-tests/clar/.github/workflows/ci.yml
@@ -13,23 +13,56 @@ jobs:
platform:
- os: ubuntu-latest
generator: Unix Makefiles
+ env:
+ CFLAGS: "-Werror -Wall -Wextra"
+ - os: ubuntu-latest
+ generator: Unix Makefiles
+ env:
+ CC: "clang"
+ CFLAGS: "-Werror -Wall -Wextra -fsanitize=leak"
+ - os: ubuntu-latest
+ generator: Unix Makefiles
+ image: i386/debian:latest
+ env:
+ CFLAGS: "-Werror -Wall -Wextra"
- os: macos-latest
generator: Unix Makefiles
+ env:
+ CFLAGS: "-Werror -Wall -Wextra"
- os: windows-latest
generator: Visual Studio 17 2022
- os: windows-latest
generator: MSYS Makefiles
+ env:
+ CFLAGS: "-Werror -Wall -Wextra"
- os: windows-latest
generator: MinGW Makefiles
+ env:
+ CFLAGS: "-Werror -Wall -Wextra"
+ fail-fast: false
runs-on: ${{ matrix.platform.os }}
+ container: ${{matrix.platform.image}}
+
+ env:
+ CC: ${{matrix.platform.env.CC}}
+ CFLAGS: ${{matrix.platform.env.CFLAGS}}
steps:
+ - name: Prepare 32 bit container image
+ if: matrix.platform.image == 'i386/debian:latest'
+ run: apt -q update && apt -q -y install cmake gcc libc6-amd64 lib64stdc++6 make python3
- name: Check out
- uses: actions/checkout@v2
+ uses: actions/checkout@v4
- name: Build
+ shell: bash
run: |
mkdir build
cd build
cmake .. -G "${{matrix.platform.generator}}"
- cmake --build .
+ cmake --build . --verbose
+ - name: Test
+ shell: bash
+ run: |
+ cd build
+ CTEST_OUTPUT_ON_FAILURE=1 ctest --build-config Debug
diff --git a/t/unit-tests/clar/CMakeLists.txt b/t/unit-tests/clar/CMakeLists.txt
index 12d4af114f..125db05bc1 100644
--- a/t/unit-tests/clar/CMakeLists.txt
+++ b/t/unit-tests/clar/CMakeLists.txt
@@ -1,8 +1,15 @@
+include(CheckFunctionExists)
+
cmake_minimum_required(VERSION 3.16..3.29)
project(clar LANGUAGES C)
-option(BUILD_TESTS "Build test executable" ON)
+option(BUILD_EXAMPLE "Build the example." ON)
+
+check_function_exists(realpath CLAR_HAS_REALPATH)
+if(CLAR_HAS_REALPATH)
+ add_compile_definitions(-DCLAR_HAS_REALPATH)
+endif()
add_library(clar INTERFACE)
target_sources(clar INTERFACE
@@ -25,4 +32,8 @@ if(CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME)
if(BUILD_TESTING)
add_subdirectory(test)
endif()
+
+ if(BUILD_EXAMPLE)
+ add_subdirectory(example)
+ endif()
endif()
diff --git a/t/unit-tests/clar/README.md b/t/unit-tests/clar/README.md
index a8961c5f10..41595989ca 100644
--- a/t/unit-tests/clar/README.md
+++ b/t/unit-tests/clar/README.md
@@ -26,8 +26,7 @@ Can you count to funk?
~~~~ sh
$ mkdir tests
$ cp -r $CLAR_ROOT/clar* tests
- $ cp $CLAR_ROOT/test/clar_test.h tests
- $ cp $CLAR_ROOT/test/main.c.sample tests/main.c
+ $ cp $CLAR_ROOT/example/*.c tests
~~~~
- **One: Write some tests**
@@ -147,7 +146,7 @@ To use Clar:
1. copy the Clar boilerplate to your test directory
2. copy (and probably modify) the sample `main.c` (from
- `$CLAR_PATH/test/main.c.sample`)
+ `$CLAR_PATH/example/main.c`)
3. run the Clar mixer (a.k.a. `generate.py`) to scan your test directory and
write out the test suite metadata.
4. compile your test files and the Clar boilerplate into a single test
@@ -159,7 +158,7 @@ The Clar boilerplate gives you a set of useful test assertions and features
the `clar.c` and `clar.h` files, plus the code in the `clar/` subdirectory.
You should not need to edit these files.
-The sample `main.c` (i.e. `$CLAR_PATH/test/main.c.sample`) file invokes
+The sample `main.c` (i.e. `$CLAR_PATH/example/main.c`) file invokes
`clar_test(argc, argv)` to run the tests. Usually, you will edit this file
to perform any framework specific initialization and teardown that you need.
@@ -251,11 +250,16 @@ suite.
- `cl_fixture(const char *)`: Gets the full path to a fixture file.
-Please do note that these methods are *always* available whilst running a
-test, even when calling auxiliary/static functions inside the same file.
+### Auxiliary / helper functions
-It's strongly encouraged to perform test assertions in auxiliary methods,
-instead of returning error values. This is considered good Clar style.
+The clar API is always available while running a test, even when calling
+"auxiliary" (helper) functions.
+
+You're encouraged to perform test assertions in those auxiliary
+methods, instead of returning error values. This is considered good
+Clar style. _However_, when you do this, you need to call `cl_invoke`
+to preserve the current state; this ensures that failures are reported
+as coming from the actual test, instead of the auxiliary method.
Style Example:
@@ -310,20 +314,19 @@ static void check_string(const char *str)
void test_example__a_test_with_auxiliary_methods(void)
{
- check_string("foo");
- check_string("bar");
+ cl_invoke(check_string("foo"));
+ cl_invoke(check_string("bar"));
}
~~~~
About Clar
==========
-Clar has been written from scratch by [Vicent Martí](https://github.com/vmg),
-to replace the old testing framework in [libgit2][libgit2].
-
-Do you know what languages are *in* on the SF startup scene? Node.js *and*
-Latin. Follow [@vmg](https://www.twitter.com/vmg) on Twitter to
-receive more lessons on word etymology. You can be hip too.
-
+Clar was originally written by [Vicent Martí](https://github.com/vmg),
+to replace the old testing framework in [libgit2][libgit2]. It is
+currently maintained by [Edward Thomson](https://github.com/ethomson),
+and used by the [libgit2][libgit2] and [git][git] projects, amongst
+others.
[libgit2]: https://github.com/libgit2/libgit2
+[git]: https://github.com/git/git
diff --git a/t/unit-tests/clar/clar.c b/t/unit-tests/clar/clar.c
index 03a3aa8e87..d6176e50b2 100644
--- a/t/unit-tests/clar/clar.c
+++ b/t/unit-tests/clar/clar.c
@@ -79,6 +79,8 @@
# else
# define p_snprintf snprintf
# endif
+
+# define localtime_r(timer, buf) (localtime_s(buf, timer) == 0 ? buf : NULL)
#else
# include <sys/wait.h> /* waitpid(2) */
# include <unistd.h>
@@ -150,7 +152,6 @@ static struct {
enum cl_output_format output_format;
- int report_errors_only;
int exit_on_error;
int verbosity;
@@ -164,6 +165,10 @@ static struct {
struct clar_report *reports;
struct clar_report *last_report;
+ const char *invoke_file;
+ const char *invoke_func;
+ size_t invoke_line;
+
void (*local_cleanup)(void *);
void *local_cleanup_payload;
@@ -190,7 +195,7 @@ struct clar_suite {
};
/* From clar_print_*.c */
-static void clar_print_init(int test_count, int suite_count, const char *suite_names);
+static void clar_print_init(int test_count, int suite_count);
static void clar_print_shutdown(int test_count, int suite_count, int error_count);
static void clar_print_error(int num, const struct clar_report *report, const struct clar_error *error);
static void clar_print_ontest(const char *suite_name, const char *test_name, int test_number, enum cl_test_status failed);
@@ -199,8 +204,10 @@ static void clar_print_onabortv(const char *msg, va_list argp);
static void clar_print_onabort(const char *msg, ...);
/* From clar_sandbox.c */
-static void clar_unsandbox(void);
-static void clar_sandbox(void);
+static void clar_tempdir_init(void);
+static void clar_tempdir_shutdown(void);
+static int clar_sandbox_create(const char *suite_name, const char *test_name);
+static int clar_sandbox_cleanup(void);
/* From summary.h */
static struct clar_summary *clar_summary_init(const char *filename);
@@ -304,6 +311,8 @@ clar_run_test(
CL_TRACE(CL_TRACE__TEST__BEGIN);
+ clar_sandbox_create(suite->name, test->name);
+
_clar.last_report->start = time(NULL);
clar_time_now(&start);
@@ -328,9 +337,13 @@ clar_run_test(
if (_clar.local_cleanup != NULL)
_clar.local_cleanup(_clar.local_cleanup_payload);
+ clar__clear_invokepoint();
+
if (cleanup->ptr != NULL)
cleanup->ptr();
+ clar_sandbox_cleanup();
+
CL_TRACE(CL_TRACE__TEST__END);
_clar.tests_ran++;
@@ -339,11 +352,7 @@ clar_run_test(
_clar.local_cleanup = NULL;
_clar.local_cleanup_payload = NULL;
- if (_clar.report_errors_only) {
- clar_report_errors(_clar.last_report);
- } else {
- clar_print_ontest(suite->name, test->name, _clar.tests_ran, _clar.last_report->status);
- }
+ clar_print_ontest(suite->name, test->name, _clar.tests_ran, _clar.last_report->status);
}
static void
@@ -360,8 +369,7 @@ clar_run_suite(const struct clar_suite *suite, const char *filter)
if (_clar.exit_on_error && _clar.total_errors)
return;
- if (!_clar.report_errors_only)
- clar_print_onsuite(suite->name, ++_clar.suites_ran);
+ clar_print_onsuite(suite->name, ++_clar.suites_ran);
_clar.active_suite = suite->name;
_clar.active_test = NULL;
@@ -428,12 +436,12 @@ clar_usage(const char *arg)
printf(" -iname Include the suite with `name`\n");
printf(" -xname Exclude the suite with `name`\n");
printf(" -v Increase verbosity (show suite names)\n");
- printf(" -q Only report tests that had an error\n");
+ printf(" -q Decrease verbosity, inverse to -v\n");
printf(" -Q Quit as soon as a test fails\n");
printf(" -t Display results in tap format\n");
printf(" -l Print suite names\n");
printf(" -r[filename] Write summary file (to the optional filename)\n");
- exit(-1);
+ exit(1);
}
static void
@@ -441,18 +449,11 @@ clar_parse_args(int argc, char **argv)
{
int i;
- /* Verify options before execute */
for (i = 1; i < argc; ++i) {
char *argument = argv[i];
- if (argument[0] != '-' || argument[1] == '\0'
- || strchr("sixvqQtlr", argument[1]) == NULL) {
+ if (argument[0] != '-' || argument[1] == '\0')
clar_usage(argv[0]);
- }
- }
-
- for (i = 1; i < argc; ++i) {
- char *argument = argv[i];
switch (argument[1]) {
case 's':
@@ -465,8 +466,13 @@ clar_parse_args(int argc, char **argv)
argument += offset;
arglen = strlen(argument);
- if (arglen == 0)
- clar_usage(argv[0]);
+ if (arglen == 0) {
+ if (i + 1 == argc)
+ clar_usage(argv[0]);
+
+ argument = argv[++i];
+ arglen = strlen(argument);
+ }
for (j = 0; j < _clar_suite_count; ++j) {
suitelen = strlen(_clar_suites[j].name);
@@ -483,9 +489,6 @@ clar_parse_args(int argc, char **argv)
++found;
- if (!exact)
- _clar.verbosity = MAX(_clar.verbosity, 1);
-
switch (action) {
case 's': {
struct clar_explicit *explicit;
@@ -517,23 +520,37 @@ clar_parse_args(int argc, char **argv)
if (!found)
clar_abort("No suite matching '%s' found.\n", argument);
+
break;
}
case 'q':
- _clar.report_errors_only = 1;
+ if (argument[2] != '\0')
+ clar_usage(argv[0]);
+
+ _clar.verbosity--;
break;
case 'Q':
+ if (argument[2] != '\0')
+ clar_usage(argv[0]);
+
_clar.exit_on_error = 1;
break;
case 't':
+ if (argument[2] != '\0')
+ clar_usage(argv[0]);
+
_clar.output_format = CL_OUTPUT_TAP;
break;
case 'l': {
size_t j;
+
+ if (argument[2] != '\0')
+ clar_usage(argv[0]);
+
printf("Test suites (use -s<name> to run just one):\n");
for (j = 0; j < _clar_suite_count; ++j)
printf(" %3d: %s\n", (int)j, _clar_suites[j].name);
@@ -542,23 +559,27 @@ clar_parse_args(int argc, char **argv)
}
case 'v':
+ if (argument[2] != '\0')
+ clar_usage(argv[0]);
+
_clar.verbosity++;
break;
case 'r':
_clar.write_summary = 1;
free(_clar.summary_filename);
+
if (*(argument + 2)) {
if ((_clar.summary_filename = strdup(argument + 2)) == NULL)
clar_abort("Failed to allocate summary filename.\n");
} else {
_clar.summary_filename = NULL;
}
+
break;
default:
- clar_abort("Unexpected commandline argument '%s'.\n",
- argument[1]);
+ clar_usage(argv[0]);
}
}
}
@@ -571,11 +592,7 @@ clar_test_init(int argc, char **argv)
if (argc > 1)
clar_parse_args(argc, argv);
- clar_print_init(
- (int)_clar_callback_count,
- (int)_clar_suite_count,
- ""
- );
+ clar_print_init((int)_clar_callback_count, (int)_clar_suite_count);
if (!_clar.summary_filename &&
(summary_env = getenv("CLAR_SUMMARY")) != NULL) {
@@ -591,7 +608,7 @@ clar_test_init(int argc, char **argv)
if (_clar.write_summary)
_clar.summary = clar_summary_init(_clar.summary_filename);
- clar_sandbox();
+ clar_tempdir_init();
}
int
@@ -623,7 +640,7 @@ clar_test_shutdown(void)
_clar.total_errors
);
- clar_unsandbox();
+ clar_tempdir_shutdown();
if (_clar.write_summary && clar_summary_shutdown(_clar.summary) < 0)
clar_abort("Failed to write the summary file '%s: %s.\n",
@@ -635,6 +652,14 @@ clar_test_shutdown(void)
}
for (report = _clar.reports; report; report = report_next) {
+ struct clar_error *error, *error_next;
+
+ for (error = report->errors; error; error = error_next) {
+ free(error->description);
+ error_next = error->next;
+ free(error);
+ }
+
report_next = report->next;
free(report);
}
@@ -660,7 +685,7 @@ static void abort_test(void)
clar_print_onabort(
"Fatal error: a cleanup method raised an exception.\n");
clar_report_errors(_clar.last_report);
- exit(-1);
+ exit(1);
}
CL_TRACE(CL_TRACE__TEST__LONGJMP);
@@ -695,9 +720,9 @@ void clar__fail(
_clar.last_report->last_error = error;
- error->file = file;
- error->function = function;
- error->line_number = line;
+ error->file = _clar.invoke_file ? _clar.invoke_file : file;
+ error->function = _clar.invoke_func ? _clar.invoke_func : function;
+ error->line_number = _clar.invoke_line ? _clar.invoke_line : line;
error->error_msg = error_msg;
if (description != NULL &&
@@ -754,7 +779,12 @@ void clar__assert_equal(
p_snprintf(buf, sizeof(buf), "'%s' != '%s' (at byte %d)",
s1, s2, pos);
} else {
- p_snprintf(buf, sizeof(buf), "'%s' != '%s'", s1, s2);
+ const char *q1 = s1 ? "'" : "";
+ const char *q2 = s2 ? "'" : "";
+ s1 = s1 ? s1 : "NULL";
+ s2 = s2 ? s2 : "NULL";
+ p_snprintf(buf, sizeof(buf), "%s%s%s != %s%s%s",
+ q1, s1, q1, q2, s2, q2);
}
}
}
@@ -767,12 +797,17 @@ void clar__assert_equal(
if (!is_equal) {
if (s1 && s2) {
int pos;
- for (pos = 0; s1[pos] == s2[pos] && pos < len; ++pos)
+ for (pos = 0; pos < len && s1[pos] == s2[pos]; ++pos)
/* find differing byte offset */;
p_snprintf(buf, sizeof(buf), "'%.*s' != '%.*s' (at byte %d)",
len, s1, len, s2, pos);
} else {
- p_snprintf(buf, sizeof(buf), "'%.*s' != '%.*s'", len, s1, len, s2);
+ const char *q1 = s1 ? "'" : "";
+ const char *q2 = s2 ? "'" : "";
+ s1 = s1 ? s1 : "NULL";
+ s2 = s2 ? s2 : "NULL";
+ p_snprintf(buf, sizeof(buf), "%s%.*s%s != %s%.*s%s",
+ q1, len, s1, q1, q2, len, s2, q2);
}
}
}
@@ -790,7 +825,12 @@ void clar__assert_equal(
p_snprintf(buf, sizeof(buf), "'%ls' != '%ls' (at byte %d)",
wcs1, wcs2, pos);
} else {
- p_snprintf(buf, sizeof(buf), "'%ls' != '%ls'", wcs1, wcs2);
+ const char *q1 = wcs1 ? "'" : "";
+ const char *q2 = wcs2 ? "'" : "";
+ wcs1 = wcs1 ? wcs1 : L"NULL";
+ wcs2 = wcs2 ? wcs2 : L"NULL";
+ p_snprintf(buf, sizeof(buf), "%s%ls%s != %s%ls%s",
+ q1, wcs1, q1, q2, wcs2, q2);
}
}
}
@@ -803,12 +843,17 @@ void clar__assert_equal(
if (!is_equal) {
if (wcs1 && wcs2) {
int pos;
- for (pos = 0; wcs1[pos] == wcs2[pos] && pos < len; ++pos)
+ for (pos = 0; pos < len && wcs1[pos] == wcs2[pos]; ++pos)
/* find differing byte offset */;
p_snprintf(buf, sizeof(buf), "'%.*ls' != '%.*ls' (at byte %d)",
len, wcs1, len, wcs2, pos);
} else {
- p_snprintf(buf, sizeof(buf), "'%.*ls' != '%.*ls'", len, wcs1, len, wcs2);
+ const char *q1 = wcs1 ? "'" : "";
+ const char *q2 = wcs2 ? "'" : "";
+ wcs1 = wcs1 ? wcs1 : L"NULL";
+ wcs2 = wcs2 ? wcs2 : L"NULL";
+ p_snprintf(buf, sizeof(buf), "%s%.*ls%s != %s%.*ls%s",
+ q1, len, wcs1, q1, q2, len, wcs2, q2);
}
}
}
@@ -850,6 +895,23 @@ void cl_set_cleanup(void (*cleanup)(void *), void *opaque)
_clar.local_cleanup_payload = opaque;
}
+void clar__set_invokepoint(
+ const char *file,
+ const char *func,
+ size_t line)
+{
+ _clar.invoke_file = file;
+ _clar.invoke_func = func;
+ _clar.invoke_line = line;
+}
+
+void clar__clear_invokepoint(void)
+{
+ _clar.invoke_file = NULL;
+ _clar.invoke_func = NULL;
+ _clar.invoke_line = 0;
+}
+
#include "clar/sandbox.h"
#include "clar/fixtures.h"
#include "clar/fs.h"
diff --git a/t/unit-tests/clar/clar.h b/t/unit-tests/clar/clar.h
index 8c22382bd5..ca72292ae9 100644
--- a/t/unit-tests/clar/clar.h
+++ b/t/unit-tests/clar/clar.h
@@ -8,6 +8,25 @@
#define __CLAR_TEST_H__
#include <stdlib.h>
+#include <limits.h>
+
+#if defined(_WIN32) && defined(CLAR_WIN32_LONGPATHS)
+# define CLAR_MAX_PATH 4096
+#elif defined(_WIN32)
+# define CLAR_MAX_PATH MAX_PATH
+#else
+# define CLAR_MAX_PATH PATH_MAX
+#endif
+
+#ifndef CLAR_SELFTEST
+# define CLAR_CURRENT_FILE __FILE__
+# define CLAR_CURRENT_LINE __LINE__
+# define CLAR_CURRENT_FUNC __func__
+#else
+# define CLAR_CURRENT_FILE "file"
+# define CLAR_CURRENT_LINE 42
+# define CLAR_CURRENT_FUNC "func"
+#endif
enum cl_test_status {
CL_TEST_OK,
@@ -30,6 +49,7 @@ void clar_test_shutdown(void);
int clar_test(int argc, char *argv[]);
const char *clar_sandbox_path(void);
+const char *clar_tempdir_path(void);
void cl_set_cleanup(void (*cleanup)(void *), void *opaque);
void cl_fs_cleanup(void);
@@ -84,18 +104,32 @@ const char *cl_fixture_basename(const char *fixture_name);
#endif
/**
+ * Invoke a helper function, which itself will use `cl_assert`
+ * constructs. This will preserve the stack information of the
+ * current call point, so that function name and line number
+ * information is shown from the line of the test, instead of
+ * the helper function.
+ */
+#define cl_invoke(expr) \
+ do { \
+ clar__set_invokepoint(CLAR_CURRENT_FILE, CLAR_CURRENT_FUNC, CLAR_CURRENT_LINE); \
+ expr; \
+ clar__clear_invokepoint(); \
+ } while(0)
+
+/**
* Assertion macros with explicit error message
*/
-#define cl_must_pass_(expr, desc) clar__assert((expr) >= 0, __FILE__, __func__, __LINE__, "Function call failed: " #expr, desc, 1)
-#define cl_must_fail_(expr, desc) clar__assert((expr) < 0, __FILE__, __func__, __LINE__, "Expected function call to fail: " #expr, desc, 1)
-#define cl_assert_(expr, desc) clar__assert((expr) != 0, __FILE__, __func__, __LINE__, "Expression is not true: " #expr, desc, 1)
+#define cl_must_pass_(expr, desc) clar__assert((expr) >= 0, CLAR_CURRENT_FILE, CLAR_CURRENT_FUNC, CLAR_CURRENT_LINE, "Function call failed: " #expr, desc, 1)
+#define cl_must_fail_(expr, desc) clar__assert((expr) < 0, CLAR_CURRENT_FILE, CLAR_CURRENT_FUNC, CLAR_CURRENT_LINE, "Expected function call to fail: " #expr, desc, 1)
+#define cl_assert_(expr, desc) clar__assert((expr) != 0, CLAR_CURRENT_FILE, CLAR_CURRENT_FUNC, CLAR_CURRENT_LINE, "Expression is not true: " #expr, desc, 1)
/**
* Check macros with explicit error message
*/
-#define cl_check_pass_(expr, desc) clar__assert((expr) >= 0, __FILE__, __func__, __LINE__, "Function call failed: " #expr, desc, 0)
-#define cl_check_fail_(expr, desc) clar__assert((expr) < 0, __FILE__, __func__, __LINE__, "Expected function call to fail: " #expr, desc, 0)
-#define cl_check_(expr, desc) clar__assert((expr) != 0, __FILE__, __func__, __LINE__, "Expression is not true: " #expr, desc, 0)
+#define cl_check_pass_(expr, desc) clar__assert((expr) >= 0, CLAR_CURRENT_FILE, CLAR_CURRENT_FUNC, CLAR_CURRENT_LINE, "Function call failed: " #expr, desc, 0)
+#define cl_check_fail_(expr, desc) clar__assert((expr) < 0, CLAR_CURRENT_FILE, CLAR_CURRENT_FUNC, CLAR_CURRENT_LINE, "Expected function call to fail: " #expr, desc, 0)
+#define cl_check_(expr, desc) clar__assert((expr) != 0, CLAR_CURRENT_FILE, CLAR_CURRENT_FUNC, CLAR_CURRENT_LINE, "Expression is not true: " #expr, desc, 0)
/**
* Assertion macros with no error message
@@ -114,33 +148,33 @@ const char *cl_fixture_basename(const char *fixture_name);
/**
* Forced failure/warning
*/
-#define cl_fail(desc) clar__fail(__FILE__, __func__, __LINE__, "Test failed.", desc, 1)
-#define cl_warning(desc) clar__fail(__FILE__, __func__, __LINE__, "Warning during test execution:", desc, 0)
+#define cl_fail(desc) clar__fail(CLAR_CURRENT_FILE, CLAR_CURRENT_FUNC, CLAR_CURRENT_LINE, "Test failed.", desc, 1)
+#define cl_warning(desc) clar__fail(CLAR_CURRENT_FILE, CLAR_CURRENT_FUNC, CLAR_CURRENT_LINE, "Warning during test execution:", desc, 0)
#define cl_skip() clar__skip()
/**
* Typed assertion macros
*/
-#define cl_assert_equal_s(s1,s2) clar__assert_equal(__FILE__,__func__,__LINE__,"String mismatch: " #s1 " != " #s2, 1, "%s", (s1), (s2))
-#define cl_assert_equal_s_(s1,s2,note) clar__assert_equal(__FILE__,__func__,__LINE__,"String mismatch: " #s1 " != " #s2 " (" #note ")", 1, "%s", (s1), (s2))
+#define cl_assert_equal_s(s1,s2) clar__assert_equal(CLAR_CURRENT_FILE,CLAR_CURRENT_FUNC,CLAR_CURRENT_LINE,"String mismatch: " #s1 " != " #s2, 1, "%s", (s1), (s2))
+#define cl_assert_equal_s_(s1,s2,note) clar__assert_equal(CLAR_CURRENT_FILE,CLAR_CURRENT_FUNC,CLAR_CURRENT_LINE,"String mismatch: " #s1 " != " #s2 " (" #note ")", 1, "%s", (s1), (s2))
-#define cl_assert_equal_wcs(wcs1,wcs2) clar__assert_equal(__FILE__,__func__,__LINE__,"String mismatch: " #wcs1 " != " #wcs2, 1, "%ls", (wcs1), (wcs2))
-#define cl_assert_equal_wcs_(wcs1,wcs2,note) clar__assert_equal(__FILE__,__func__,__LINE__,"String mismatch: " #wcs1 " != " #wcs2 " (" #note ")", 1, "%ls", (wcs1), (wcs2))
+#define cl_assert_equal_wcs(wcs1,wcs2) clar__assert_equal(CLAR_CURRENT_FILE,CLAR_CURRENT_FUNC,CLAR_CURRENT_LINE,"String mismatch: " #wcs1 " != " #wcs2, 1, "%ls", (wcs1), (wcs2))
+#define cl_assert_equal_wcs_(wcs1,wcs2,note) clar__assert_equal(CLAR_CURRENT_FILE,CLAR_CURRENT_FUNC,CLAR_CURRENT_LINE,"String mismatch: " #wcs1 " != " #wcs2 " (" #note ")", 1, "%ls", (wcs1), (wcs2))
-#define cl_assert_equal_strn(s1,s2,len) clar__assert_equal(__FILE__,__func__,__LINE__,"String mismatch: " #s1 " != " #s2, 1, "%.*s", (s1), (s2), (int)(len))
-#define cl_assert_equal_strn_(s1,s2,len,note) clar__assert_equal(__FILE__,__func__,__LINE__,"String mismatch: " #s1 " != " #s2 " (" #note ")", 1, "%.*s", (s1), (s2), (int)(len))
+#define cl_assert_equal_strn(s1,s2,len) clar__assert_equal(CLAR_CURRENT_FILE,CLAR_CURRENT_FUNC,CLAR_CURRENT_LINE,"String mismatch: " #s1 " != " #s2, 1, "%.*s", (s1), (s2), (int)(len))
+#define cl_assert_equal_strn_(s1,s2,len,note) clar__assert_equal(CLAR_CURRENT_FILE,CLAR_CURRENT_FUNC,CLAR_CURRENT_LINE,"String mismatch: " #s1 " != " #s2 " (" #note ")", 1, "%.*s", (s1), (s2), (int)(len))
-#define cl_assert_equal_wcsn(wcs1,wcs2,len) clar__assert_equal(__FILE__,__func__,__LINE__,"String mismatch: " #wcs1 " != " #wcs2, 1, "%.*ls", (wcs1), (wcs2), (int)(len))
-#define cl_assert_equal_wcsn_(wcs1,wcs2,len,note) clar__assert_equal(__FILE__,__func__,__LINE__,"String mismatch: " #wcs1 " != " #wcs2 " (" #note ")", 1, "%.*ls", (wcs1), (wcs2), (int)(len))
+#define cl_assert_equal_wcsn(wcs1,wcs2,len) clar__assert_equal(CLAR_CURRENT_FILE,CLAR_CURRENT_FUNC,CLAR_CURRENT_LINE,"String mismatch: " #wcs1 " != " #wcs2, 1, "%.*ls", (wcs1), (wcs2), (int)(len))
+#define cl_assert_equal_wcsn_(wcs1,wcs2,len,note) clar__assert_equal(CLAR_CURRENT_FILE,CLAR_CURRENT_FUNC,CLAR_CURRENT_LINE,"String mismatch: " #wcs1 " != " #wcs2 " (" #note ")", 1, "%.*ls", (wcs1), (wcs2), (int)(len))
-#define cl_assert_equal_i(i1,i2) clar__assert_equal(__FILE__,__func__,__LINE__,#i1 " != " #i2, 1, "%d", (int)(i1), (int)(i2))
-#define cl_assert_equal_i_(i1,i2,note) clar__assert_equal(__FILE__,__func__,__LINE__,#i1 " != " #i2 " (" #note ")", 1, "%d", (i1), (i2))
-#define cl_assert_equal_i_fmt(i1,i2,fmt) clar__assert_equal(__FILE__,__func__,__LINE__,#i1 " != " #i2, 1, (fmt), (int)(i1), (int)(i2))
+#define cl_assert_equal_i(i1,i2) clar__assert_equal(CLAR_CURRENT_FILE,CLAR_CURRENT_FUNC,CLAR_CURRENT_LINE,#i1 " != " #i2, 1, "%d", (int)(i1), (int)(i2))
+#define cl_assert_equal_i_(i1,i2,note) clar__assert_equal(CLAR_CURRENT_FILE,CLAR_CURRENT_FUNC,CLAR_CURRENT_LINE,#i1 " != " #i2 " (" #note ")", 1, "%d", (i1), (i2))
+#define cl_assert_equal_i_fmt(i1,i2,fmt) clar__assert_equal(CLAR_CURRENT_FILE,CLAR_CURRENT_FUNC,CLAR_CURRENT_LINE,#i1 " != " #i2, 1, (fmt), (int)(i1), (int)(i2))
-#define cl_assert_equal_b(b1,b2) clar__assert_equal(__FILE__,__func__,__LINE__,#b1 " != " #b2, 1, "%d", (int)((b1) != 0),(int)((b2) != 0))
+#define cl_assert_equal_b(b1,b2) clar__assert_equal(CLAR_CURRENT_FILE,CLAR_CURRENT_FUNC,CLAR_CURRENT_LINE,#b1 " != " #b2, 1, "%d", (int)((b1) != 0),(int)((b2) != 0))
-#define cl_assert_equal_p(p1,p2) clar__assert_equal(__FILE__,__func__,__LINE__,"Pointer mismatch: " #p1 " != " #p2, 1, "%p", (p1), (p2))
+#define cl_assert_equal_p(p1,p2) clar__assert_equal(CLAR_CURRENT_FILE,CLAR_CURRENT_FUNC,CLAR_CURRENT_LINE,"Pointer mismatch: " #p1 " != " #p2, 1, "%p", (p1), (p2))
void clar__skip(void);
@@ -170,4 +204,11 @@ void clar__assert_equal(
const char *fmt,
...);
+void clar__set_invokepoint(
+ const char *file,
+ const char *func,
+ size_t line);
+
+void clar__clear_invokepoint(void);
+
#endif
diff --git a/t/unit-tests/clar/clar/fixtures.h b/t/unit-tests/clar/clar/fixtures.h
index 6ec6423484..9f1023df59 100644
--- a/t/unit-tests/clar/clar/fixtures.h
+++ b/t/unit-tests/clar/clar/fixtures.h
@@ -2,7 +2,7 @@
static const char *
fixture_path(const char *base, const char *fixture_name)
{
- static char _path[4096];
+ static char _path[CLAR_MAX_PATH];
size_t root_len;
root_len = strlen(base);
@@ -28,7 +28,7 @@ const char *cl_fixture(const char *fixture_name)
void cl_fixture_sandbox(const char *fixture_name)
{
- fs_copy(cl_fixture(fixture_name), _clar_path);
+ fs_copy(cl_fixture(fixture_name), clar_sandbox_path());
}
const char *cl_fixture_basename(const char *fixture_name)
@@ -45,6 +45,6 @@ const char *cl_fixture_basename(const char *fixture_name)
void cl_fixture_cleanup(const char *fixture_name)
{
- fs_rm(fixture_path(_clar_path, cl_fixture_basename(fixture_name)));
+ fs_rm(fixture_path(clar_sandbox_path(), cl_fixture_basename(fixture_name)));
}
#endif
diff --git a/t/unit-tests/clar/clar/fs.h b/t/unit-tests/clar/clar/fs.h
index 2203743fb4..f1311d91e8 100644
--- a/t/unit-tests/clar/clar/fs.h
+++ b/t/unit-tests/clar/clar/fs.h
@@ -8,12 +8,6 @@
#ifdef _WIN32
-#ifdef CLAR_WIN32_LONGPATHS
-# define CLAR_MAX_PATH 4096
-#else
-# define CLAR_MAX_PATH MAX_PATH
-#endif
-
#define RM_RETRY_COUNT 5
#define RM_RETRY_DELAY 10
@@ -296,7 +290,7 @@ void
cl_fs_cleanup(void)
{
#ifdef CLAR_FIXTURE_PATH
- fs_rm(fixture_path(_clar_path, "*"));
+ fs_rm(fixture_path(clar_tempdir_path(), "*"));
#else
((void)fs_copy); /* unused */
#endif
@@ -371,17 +365,19 @@ static void
fs_copydir_helper(const char *source, const char *dest, int dest_mode)
{
DIR *source_dir;
- struct dirent *d;
mkdir(dest, dest_mode);
cl_assert_(source_dir = opendir(source), "Could not open source dir");
- for (;;) {
+ while (1) {
+ struct dirent *d;
char *child;
errno = 0;
- if ((d = readdir(source_dir)) == NULL)
+ d = readdir(source_dir);
+ if (!d)
break;
+
if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
continue;
@@ -479,15 +475,18 @@ static void
fs_rmdir_helper(const char *path)
{
DIR *dir;
- struct dirent *d;
cl_assert_(dir = opendir(path), "Could not open dir");
- for (;;) {
+
+ while (1) {
+ struct dirent *d;
char *child;
errno = 0;
- if ((d = readdir(dir)) == NULL)
+ d = readdir(dir);
+ if (!d)
break;
+
if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
continue;
@@ -524,7 +523,7 @@ fs_rm(const char *path)
void
cl_fs_cleanup(void)
{
- clar_unsandbox();
- clar_sandbox();
+ clar_tempdir_shutdown();
+ clar_tempdir_init();
}
#endif
diff --git a/t/unit-tests/clar/clar/print.h b/t/unit-tests/clar/clar/print.h
index 69d0ee967e..89b66591d7 100644
--- a/t/unit-tests/clar/clar/print.h
+++ b/t/unit-tests/clar/clar/print.h
@@ -1,9 +1,13 @@
/* clap: clar protocol, the traditional clar output format */
-static void clar_print_clap_init(int test_count, int suite_count, const char *suite_names)
+static void clar_print_clap_init(int test_count, int suite_count)
{
(void)test_count;
- printf("Loaded %d suites: %s\n", (int)suite_count, suite_names);
+
+ if (_clar.verbosity < 0)
+ return;
+
+ printf("Loaded %d suites:\n", (int)suite_count);
printf("Started (test status codes: OK='.' FAILURE='F' SKIPPED='S')\n");
}
@@ -13,10 +17,27 @@ static void clar_print_clap_shutdown(int test_count, int suite_count, int error_
(void)suite_count;
(void)error_count;
- printf("\n\n");
+ if (_clar.verbosity >= 0)
+ printf("\n\n");
clar_report_all();
}
+
+static void clar_print_indented(const char *str, int indent)
+{
+ const char *bol, *eol;
+
+ for (bol = str; *bol; bol = eol) {
+ eol = strchr(bol, '\n');
+ if (eol)
+ eol++;
+ else
+ eol = bol + strlen(bol);
+ printf("%*s%.*s", indent, "", (int)(eol - bol), bol);
+ }
+ putc('\n', stdout);
+}
+
static void clar_print_clap_error(int num, const struct clar_report *report, const struct clar_error *error)
{
printf(" %d) Failure:\n", num);
@@ -27,10 +48,10 @@ static void clar_print_clap_error(int num, const struct clar_report *report, con
error->file,
error->line_number);
- printf(" %s\n", error->error_msg);
+ clar_print_indented(error->error_msg, 2);
if (error->description != NULL)
- printf(" %s\n", error->description);
+ clar_print_indented(error->description, 2);
printf("\n");
fflush(stdout);
@@ -41,14 +62,17 @@ static void clar_print_clap_ontest(const char *suite_name, const char *test_name
(void)test_name;
(void)test_number;
+ if (_clar.verbosity < 0)
+ return;
+
if (_clar.verbosity > 1) {
printf("%s::%s: ", suite_name, test_name);
switch (status) {
case CL_TEST_OK: printf("ok\n"); break;
case CL_TEST_FAILURE: printf("fail\n"); break;
- case CL_TEST_SKIP: printf("skipped"); break;
- case CL_TEST_NOTRUN: printf("notrun"); break;
+ case CL_TEST_SKIP: printf("skipped\n"); break;
+ case CL_TEST_NOTRUN: printf("notrun\n"); break;
}
} else {
switch (status) {
@@ -64,6 +88,8 @@ static void clar_print_clap_ontest(const char *suite_name, const char *test_name
static void clar_print_clap_onsuite(const char *suite_name, int suite_index)
{
+ if (_clar.verbosity < 0)
+ return;
if (_clar.verbosity == 1)
printf("\n%s", suite_name);
@@ -77,11 +103,10 @@ static void clar_print_clap_onabort(const char *fmt, va_list arg)
/* tap: test anywhere protocol format */
-static void clar_print_tap_init(int test_count, int suite_count, const char *suite_names)
+static void clar_print_tap_init(int test_count, int suite_count)
{
(void)test_count;
(void)suite_count;
- (void)suite_names;
printf("TAP version 13\n");
}
@@ -127,18 +152,20 @@ static void clar_print_tap_ontest(const char *suite_name, const char *test_name,
case CL_TEST_FAILURE:
printf("not ok %d - %s::%s\n", test_number, suite_name, test_name);
- printf(" ---\n");
- printf(" reason: |\n");
- printf(" %s\n", error->error_msg);
+ if (_clar.verbosity >= 0) {
+ printf(" ---\n");
+ printf(" reason: |\n");
+ clar_print_indented(error->error_msg, 6);
- if (error->description)
- printf(" %s\n", error->description);
+ if (error->description)
+ clar_print_indented(error->description, 6);
- printf(" at:\n");
- printf(" file: '"); print_escaped(error->file); printf("'\n");
- printf(" line: %" PRIuMAX "\n", error->line_number);
- printf(" function: '%s'\n", error->function);
- printf(" ---\n");
+ printf(" at:\n");
+ printf(" file: '"); print_escaped(error->file); printf("'\n");
+ printf(" line: %" PRIuMAX "\n", error->line_number);
+ printf(" function: '%s'\n", error->function);
+ printf(" ---\n");
+ }
break;
case CL_TEST_SKIP:
@@ -152,6 +179,8 @@ static void clar_print_tap_ontest(const char *suite_name, const char *test_name,
static void clar_print_tap_onsuite(const char *suite_name, int suite_index)
{
+ if (_clar.verbosity < 0)
+ return;
printf("# start of suite %d: %s\n", suite_index, suite_name);
}
@@ -177,9 +206,9 @@ static void clar_print_tap_onabort(const char *fmt, va_list arg)
} \
} while (0)
-static void clar_print_init(int test_count, int suite_count, const char *suite_names)
+static void clar_print_init(int test_count, int suite_count)
{
- PRINT(init, test_count, suite_count, suite_names);
+ PRINT(init, test_count, suite_count);
}
static void clar_print_shutdown(int test_count, int suite_count, int error_count)
diff --git a/t/unit-tests/clar/clar/sandbox.h b/t/unit-tests/clar/clar/sandbox.h
index bc960f50e0..52add8aceb 100644
--- a/t/unit-tests/clar/clar/sandbox.h
+++ b/t/unit-tests/clar/clar/sandbox.h
@@ -2,7 +2,17 @@
#include <sys/syslimits.h>
#endif
-static char _clar_path[4096 + 1];
+/*
+ * The tempdir is the temporary directory for the entirety of the clar
+ * process execution. The sandbox is an individual temporary directory
+ * for the execution of an individual test. Sandboxes are deleted
+ * entirely after test execution to avoid pollution across tests.
+ */
+
+static char _clar_tempdir[CLAR_MAX_PATH];
+static size_t _clar_tempdir_len;
+
+static char _clar_sandbox[CLAR_MAX_PATH];
static int
is_valid_tmp_path(const char *path)
@@ -15,7 +25,10 @@ is_valid_tmp_path(const char *path)
if (!S_ISDIR(st.st_mode))
return 0;
- return (access(path, W_OK) == 0);
+ if (access(path, W_OK) != 0)
+ return 0;
+
+ return (strlen(path) < CLAR_MAX_PATH);
}
static int
@@ -31,14 +44,11 @@ find_tmp_path(char *buffer, size_t length)
for (i = 0; i < var_count; ++i) {
const char *env = getenv(env_vars[i]);
+
if (!env)
continue;
if (is_valid_tmp_path(env)) {
-#ifdef __APPLE__
- if (length >= PATH_MAX && realpath(env, buffer) != NULL)
- return 0;
-#endif
strncpy(buffer, env, length - 1);
buffer[length - 1] = '\0';
return 0;
@@ -47,21 +57,18 @@ find_tmp_path(char *buffer, size_t length)
/* If the environment doesn't say anything, try to use /tmp */
if (is_valid_tmp_path("/tmp")) {
-#ifdef __APPLE__
- if (length >= PATH_MAX && realpath("/tmp", buffer) != NULL)
- return 0;
-#endif
strncpy(buffer, "/tmp", length - 1);
buffer[length - 1] = '\0';
return 0;
}
#else
- DWORD env_len = GetEnvironmentVariable("CLAR_TMP", buffer, (DWORD)length);
- if (env_len > 0 && env_len < (DWORD)length)
+ DWORD len = GetEnvironmentVariable("CLAR_TMP", buffer, (DWORD)length);
+ if (len > 0 && len < (DWORD)length)
return 0;
- if (GetTempPath((DWORD)length, buffer))
+ len = GetTempPath((DWORD)length, buffer);
+ if (len > 0 && len < (DWORD)length)
return 0;
#endif
@@ -75,17 +82,53 @@ find_tmp_path(char *buffer, size_t length)
return -1;
}
-static void clar_unsandbox(void)
+static int canonicalize_tmp_path(char *buffer)
+{
+#ifdef _WIN32
+ char tmp[CLAR_MAX_PATH], *p;
+ DWORD ret;
+
+ ret = GetFullPathName(buffer, CLAR_MAX_PATH, tmp, NULL);
+
+ if (ret == 0 || ret > CLAR_MAX_PATH)
+ return -1;
+
+ ret = GetLongPathName(tmp, buffer, CLAR_MAX_PATH);
+
+ if (ret == 0 || ret > CLAR_MAX_PATH)
+ return -1;
+
+ /* normalize path to POSIX forward slashes */
+ for (p = buffer; *p; p++)
+ if (*p == '\\')
+ *p = '/';
+
+ return 0;
+#elif defined(CLAR_HAS_REALPATH)
+ char tmp[CLAR_MAX_PATH];
+
+ if (realpath(buffer, tmp) == NULL)
+ return -1;
+
+ strcpy(buffer, tmp);
+ return 0;
+#else
+ (void)buffer;
+ return 0;
+#endif
+}
+
+static void clar_tempdir_shutdown(void)
{
- if (_clar_path[0] == '\0')
+ if (_clar_tempdir[0] == '\0')
return;
cl_must_pass(chdir(".."));
- fs_rm(_clar_path);
+ fs_rm(_clar_tempdir);
}
-static int build_sandbox_path(void)
+static int build_tempdir_path(void)
{
#ifdef CLAR_TMPDIR
const char path_tail[] = CLAR_TMPDIR "_XXXXXX";
@@ -95,64 +138,153 @@ static int build_sandbox_path(void)
size_t len;
- if (find_tmp_path(_clar_path, sizeof(_clar_path)) < 0)
+ if (find_tmp_path(_clar_tempdir, sizeof(_clar_tempdir)) < 0 ||
+ canonicalize_tmp_path(_clar_tempdir) < 0)
return -1;
- len = strlen(_clar_path);
+ len = strlen(_clar_tempdir);
-#ifdef _WIN32
- { /* normalize path to POSIX forward slashes */
- size_t i;
- for (i = 0; i < len; ++i) {
- if (_clar_path[i] == '\\')
- _clar_path[i] = '/';
- }
- }
-#endif
+ if (len + strlen(path_tail) + 2 > CLAR_MAX_PATH)
+ return -1;
- if (_clar_path[len - 1] != '/') {
- _clar_path[len++] = '/';
- }
+ if (_clar_tempdir[len - 1] != '/')
+ _clar_tempdir[len++] = '/';
- strncpy(_clar_path + len, path_tail, sizeof(_clar_path) - len);
+ strncpy(_clar_tempdir + len, path_tail, sizeof(_clar_tempdir) - len);
#if defined(__MINGW32__)
- if (_mktemp(_clar_path) == NULL)
+ if (_mktemp(_clar_tempdir) == NULL)
return -1;
- if (mkdir(_clar_path, 0700) != 0)
+ if (mkdir(_clar_tempdir, 0700) != 0)
return -1;
#elif defined(_WIN32)
- if (_mktemp_s(_clar_path, sizeof(_clar_path)) != 0)
+ if (_mktemp_s(_clar_tempdir, sizeof(_clar_tempdir)) != 0)
return -1;
- if (mkdir(_clar_path, 0700) != 0)
+ if (mkdir(_clar_tempdir, 0700) != 0)
return -1;
-#elif defined(__sun) || defined(__TANDEM)
- if (mktemp(_clar_path) == NULL)
+#elif defined(__sun) || defined(__TANDEM) || defined(__hpux)
+ if (mktemp(_clar_tempdir) == NULL)
return -1;
- if (mkdir(_clar_path, 0700) != 0)
+ if (mkdir(_clar_tempdir, 0700) != 0)
return -1;
#else
- if (mkdtemp(_clar_path) == NULL)
+ if (mkdtemp(_clar_tempdir) == NULL)
return -1;
#endif
+ _clar_tempdir_len = strlen(_clar_tempdir);
return 0;
}
-static void clar_sandbox(void)
+static void clar_tempdir_init(void)
{
- if (_clar_path[0] == '\0' && build_sandbox_path() < 0)
- clar_abort("Failed to build sandbox path.\n");
+ if (_clar_tempdir[0] == '\0' && build_tempdir_path() < 0)
+ clar_abort("Failed to build tempdir path.\n");
- if (chdir(_clar_path) != 0)
- clar_abort("Failed to change into sandbox directory '%s': %s.\n",
- _clar_path, strerror(errno));
+ if (chdir(_clar_tempdir) != 0)
+ clar_abort("Failed to change into tempdir '%s': %s.\n",
+ _clar_tempdir, strerror(errno));
+
+#if !defined(CLAR_SANDBOX_TEST_NAMES) && defined(_WIN32)
+ srand(clock() ^ (unsigned int)time(NULL) ^ GetCurrentProcessId() ^ GetCurrentThreadId());
+#elif !defined(CLAR_SANDBOX_TEST_NAMES)
+ srand(clock() ^ time(NULL) ^ ((unsigned)getpid() << 16));
+#endif
+}
+
+static void append(char *dst, const char *src)
+{
+ char *d;
+ const char *s;
+
+ for (d = dst; *d; d++)
+ ;
+
+ for (s = src; *s; d++, s++)
+ if (*s == ':')
+ *d = '_';
+ else
+ *d = *s;
+
+ *d = '\0';
+}
+
+static int clar_sandbox_create(const char *suite_name, const char *test_name)
+{
+#ifndef CLAR_SANDBOX_TEST_NAMES
+ char alpha[] = "0123456789abcdef";
+ int num = rand();
+#endif
+
+ cl_assert(_clar_sandbox[0] == '\0');
+
+ /*
+ * We may want to use test names as sandbox directory names for
+ * readability, _however_ on platforms with restrictions for short
+ * file / folder names (eg, Windows), this may be too long.
+ */
+#ifdef CLAR_SANDBOX_TEST_NAMES
+ cl_assert(strlen(_clar_tempdir) + strlen(suite_name) + strlen(test_name) + 3 < CLAR_MAX_PATH);
+
+ strcpy(_clar_sandbox, _clar_tempdir);
+ _clar_sandbox[_clar_tempdir_len] = '/';
+ _clar_sandbox[_clar_tempdir_len + 1] = '\0';
+
+ append(_clar_sandbox, suite_name);
+ append(_clar_sandbox, "__");
+ append(_clar_sandbox, test_name);
+#else
+ ((void)suite_name);
+ ((void)test_name);
+ ((void)append);
+
+ cl_assert(strlen(_clar_tempdir) + 9 < CLAR_MAX_PATH);
+
+ strcpy(_clar_sandbox, _clar_tempdir);
+ _clar_sandbox[_clar_tempdir_len] = '/';
+
+ _clar_sandbox[_clar_tempdir_len + 1] = alpha[(num & 0xf0000000) >> 28];
+ _clar_sandbox[_clar_tempdir_len + 2] = alpha[(num & 0x0f000000) >> 24];
+ _clar_sandbox[_clar_tempdir_len + 3] = alpha[(num & 0x00f00000) >> 20];
+ _clar_sandbox[_clar_tempdir_len + 4] = alpha[(num & 0x000f0000) >> 16];
+ _clar_sandbox[_clar_tempdir_len + 5] = alpha[(num & 0x0000f000) >> 12];
+ _clar_sandbox[_clar_tempdir_len + 6] = alpha[(num & 0x00000f00) >> 8];
+ _clar_sandbox[_clar_tempdir_len + 7] = alpha[(num & 0x000000f0) >> 4];
+ _clar_sandbox[_clar_tempdir_len + 8] = alpha[(num & 0x0000000f) >> 0];
+ _clar_sandbox[_clar_tempdir_len + 9] = '\0';
+#endif
+
+ if (mkdir(_clar_sandbox, 0700) != 0)
+ return -1;
+
+ if (chdir(_clar_sandbox) != 0)
+ return -1;
+
+ return 0;
+}
+
+static int clar_sandbox_cleanup(void)
+{
+ cl_assert(_clar_sandbox[0] != '\0');
+
+ if (chdir(_clar_tempdir) != 0)
+ return -1;
+
+ fs_rm(_clar_sandbox);
+ _clar_sandbox[0] = '\0';
+
+ return 0;
+}
+
+const char *clar_tempdir_path(void)
+{
+ return _clar_tempdir;
}
const char *clar_sandbox_path(void)
{
- return _clar_path;
+ return _clar_sandbox;
}
diff --git a/t/unit-tests/clar/clar/summary.h b/t/unit-tests/clar/clar/summary.h
index 0d0b646fe7..7b85f162d8 100644
--- a/t/unit-tests/clar/clar/summary.h
+++ b/t/unit-tests/clar/clar/summary.h
@@ -23,10 +23,11 @@ static int clar_summary_testsuite(struct clar_summary *summary,
int idn, const char *name, time_t timestamp,
int test_count, int fail_count, int error_count)
{
- struct tm *tm = localtime(&timestamp);
+ struct tm tm;
char iso_dt[20];
- if (strftime(iso_dt, sizeof(iso_dt), "%Y-%m-%dT%H:%M:%S", tm) == 0)
+ localtime_r(&timestamp, &tm);
+ if (strftime(iso_dt, sizeof(iso_dt), "%Y-%m-%dT%H:%M:%S", &tm) == 0)
return -1;
return fprintf(summary->fp, "\t<testsuite"
diff --git a/t/unit-tests/clar/example/CMakeLists.txt b/t/unit-tests/clar/example/CMakeLists.txt
new file mode 100644
index 0000000000..b72f187523
--- /dev/null
+++ b/t/unit-tests/clar/example/CMakeLists.txt
@@ -0,0 +1,28 @@
+find_package(Python COMPONENTS Interpreter REQUIRED)
+
+add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/clar.suite"
+ COMMAND "${Python_EXECUTABLE}" "${CMAKE_SOURCE_DIR}/generate.py" --output "${CMAKE_CURRENT_BINARY_DIR}"
+ DEPENDS main.c example.c
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
+)
+
+add_executable(example)
+set_target_properties(example PROPERTIES
+ C_STANDARD 90
+ C_STANDARD_REQUIRED ON
+ C_EXTENSIONS OFF
+)
+target_sources(example PRIVATE
+ main.c
+ example.c
+ "${CMAKE_CURRENT_BINARY_DIR}/clar.suite"
+)
+target_compile_definitions(example PRIVATE)
+target_compile_options(example PRIVATE
+ $<IF:$<CXX_COMPILER_ID:MSVC>,/W4,-Wall>
+)
+target_include_directories(example PRIVATE
+ "${CMAKE_SOURCE_DIR}"
+ "${CMAKE_CURRENT_BINARY_DIR}"
+)
+target_link_libraries(example clar)
diff --git a/t/unit-tests/clar/example/example.c b/t/unit-tests/clar/example/example.c
new file mode 100644
index 0000000000..c07d6bf68e
--- /dev/null
+++ b/t/unit-tests/clar/example/example.c
@@ -0,0 +1,6 @@
+#include "clar.h"
+
+void test_example__simple_assert(void)
+{
+ cl_assert_equal_i(1, 1);
+}
diff --git a/t/unit-tests/clar/test/main.c.sample b/t/unit-tests/clar/example/main.c
index a4d91b72fa..f8def7fa6e 100644
--- a/t/unit-tests/clar/test/main.c.sample
+++ b/t/unit-tests/clar/example/main.c
@@ -5,7 +5,7 @@
* For full terms see the included COPYING file.
*/
-#include "clar_test.h"
+#include "clar.h"
/*
* Minimal main() for clar tests.
diff --git a/t/unit-tests/clar/generate.py b/t/unit-tests/clar/generate.py
index 80996ac3e7..fd2f0ee83b 100755
--- a/t/unit-tests/clar/generate.py
+++ b/t/unit-tests/clar/generate.py
@@ -158,17 +158,24 @@ class TestSuite(object):
def find_modules(self):
modules = []
- for root, _, files in os.walk(self.path):
- module_root = root[len(self.path):]
- module_root = [c for c in module_root.split(os.sep) if c]
- tests_in_module = fnmatch.filter(files, "*.c")
+ if os.path.isfile(self.path):
+ full_path = os.path.abspath(self.path)
+ module_name = os.path.basename(self.path)
+ module_name = os.path.splitext(module_name)[0]
+ modules.append((full_path, module_name))
+ else:
+ for root, _, files in os.walk(self.path):
+ module_root = root[len(self.path):]
+ module_root = [c for c in module_root.split(os.sep) if c]
- for test_file in tests_in_module:
- full_path = os.path.join(root, test_file)
- module_name = "_".join(module_root + [test_file[:-2]]).replace("-", "_")
+ tests_in_module = fnmatch.filter(files, "*.c")
- modules.append((full_path, module_name))
+ for test_file in tests_in_module:
+ full_path = os.path.join(root, test_file)
+ module_name = "_".join(module_root + [test_file[:-2]]).replace("-", "_")
+
+ modules.append((full_path, module_name))
return modules
@@ -217,6 +224,7 @@ class TestSuite(object):
def write(self):
output = os.path.join(self.output, 'clar.suite')
+ os.makedirs(self.output, exist_ok=True)
if not self.should_generate(output):
return False
@@ -258,7 +266,11 @@ if __name__ == '__main__':
sys.exit(1)
path = args.pop() if args else '.'
+ if os.path.isfile(path) and not options.output:
+ print("Must provide --output when specifying a file")
+ sys.exit(1)
output = options.output or path
+
suite = TestSuite(path, output)
suite.load(options.force)
suite.disable(options.excluded)
diff --git a/t/unit-tests/clar/test/CMakeLists.txt b/t/unit-tests/clar/test/CMakeLists.txt
index 7f2c1dc17a..f240166439 100644
--- a/t/unit-tests/clar/test/CMakeLists.txt
+++ b/t/unit-tests/clar/test/CMakeLists.txt
@@ -2,12 +2,12 @@ find_package(Python COMPONENTS Interpreter REQUIRED)
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/clar.suite"
COMMAND "${Python_EXECUTABLE}" "${CMAKE_SOURCE_DIR}/generate.py" --output "${CMAKE_CURRENT_BINARY_DIR}"
- DEPENDS main.c sample.c clar_test.h
+ DEPENDS main.c selftest.c
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
)
-add_executable(clar_test)
-set_target_properties(clar_test PROPERTIES
+add_executable(selftest)
+set_target_properties(selftest PROPERTIES
C_STANDARD 90
C_STANDARD_REQUIRED ON
C_EXTENSIONS OFF
@@ -15,25 +15,35 @@ set_target_properties(clar_test PROPERTIES
# MSVC generates all kinds of warnings. We may want to fix these in the future
# and then unconditionally treat warnings as errors.
-if(NOT MSVC)
- set_target_properties(clar_test PROPERTIES
+if (NOT MSVC)
+ set_target_properties(selftest PROPERTIES
COMPILE_WARNING_AS_ERROR ON
)
endif()
-target_sources(clar_test PRIVATE
+target_sources(selftest PRIVATE
main.c
- sample.c
+ selftest.c
"${CMAKE_CURRENT_BINARY_DIR}/clar.suite"
)
-target_compile_definitions(clar_test PRIVATE
- CLAR_FIXTURE_PATH="${CMAKE_CURRENT_SOURCE_DIR}/resources/"
+target_compile_definitions(selftest PRIVATE
+ CLAR_FIXTURE_PATH="${CMAKE_CURRENT_SOURCE_DIR}/expected/"
)
-target_compile_options(clar_test PRIVATE
+target_compile_options(selftest PRIVATE
$<IF:$<CXX_COMPILER_ID:MSVC>,/W4,-Wall>
)
-target_include_directories(clar_test PRIVATE
+target_include_directories(selftest PRIVATE
"${CMAKE_SOURCE_DIR}"
"${CMAKE_CURRENT_BINARY_DIR}"
)
-target_link_libraries(clar_test clar)
+target_link_libraries(selftest clar)
+
+add_test(NAME build_selftest
+ COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" --config "$<CONFIG>" --target selftest
+)
+set_tests_properties(build_selftest PROPERTIES FIXTURES_SETUP clar_test_fixture)
+
+add_subdirectory(suites)
+
+add_test(NAME selftest COMMAND "${CMAKE_CURRENT_BINARY_DIR}/selftest" $<TARGET_FILE_DIR:combined_suite>)
+set_tests_properties(selftest PROPERTIES FIXTURES_REQUIRED clar_test_fixture)
diff --git a/t/unit-tests/clar/test/clar_test.h b/t/unit-tests/clar/test/clar_test.h
deleted file mode 100644
index 0fcaa639aa..0000000000
--- a/t/unit-tests/clar/test/clar_test.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (c) Vicent Marti. All rights reserved.
- *
- * This file is part of clar, distributed under the ISC license.
- * For full terms see the included COPYING file.
- */
-#ifndef __CLAR_TEST__
-#define __CLAR_TEST__
-
-/* Import the standard clar helper functions */
-#include "clar.h"
-
-/* Your custom shared includes / defines here */
-extern int global_test_counter;
-
-#endif
diff --git a/t/unit-tests/clar/test/expected/help b/t/unit-tests/clar/test/expected/help
new file mode 100644
index 0000000000..9428def2d7
--- /dev/null
+++ b/t/unit-tests/clar/test/expected/help
@@ -0,0 +1,12 @@
+Usage: combined [options]
+
+Options:
+ -sname Run only the suite with `name` (can go to individual test name)
+ -iname Include the suite with `name`
+ -xname Exclude the suite with `name`
+ -v Increase verbosity (show suite names)
+ -q Decrease verbosity, inverse to -v
+ -Q Quit as soon as a test fails
+ -t Display results in tap format
+ -l Print suite names
+ -r[filename] Write summary file (to the optional filename)
diff --git a/t/unit-tests/clar/test/expected/quiet b/t/unit-tests/clar/test/expected/quiet
new file mode 100644
index 0000000000..280c99d8ad
--- /dev/null
+++ b/t/unit-tests/clar/test/expected/quiet
@@ -0,0 +1,44 @@
+ 1) Failure:
+combined::1 [file:42]
+ Function call failed: -1
+
+ 2) Failure:
+combined::2 [file:42]
+ Expression is not true: 100 == 101
+
+ 3) Failure:
+combined::strings [file:42]
+ String mismatch: "mismatched" != actual ("this one fails")
+ 'mismatched' != 'expected' (at byte 0)
+
+ 4) Failure:
+combined::strings_with_length [file:42]
+ String mismatch: "exactly" != actual ("this one fails")
+ 'exa' != 'exp' (at byte 2)
+
+ 5) Failure:
+combined::int [file:42]
+ 101 != value ("extra note on failing test")
+ 101 != 100
+
+ 6) Failure:
+combined::int_fmt [file:42]
+ 022 != value
+ 0022 != 0144
+
+ 7) Failure:
+combined::bool [file:42]
+ 0 != value
+ 0 != 1
+
+ 8) Failure:
+combined::multiline_description [file:42]
+ Function call failed: -1
+ description line 1
+ description line 2
+
+ 9) Failure:
+combined::null_string [file:42]
+ String mismatch: "expected" != actual ("this one fails")
+ 'expected' != NULL
+
diff --git a/t/unit-tests/clar/test/expected/specific_test b/t/unit-tests/clar/test/expected/specific_test
new file mode 100644
index 0000000000..6c22e9f507
--- /dev/null
+++ b/t/unit-tests/clar/test/expected/specific_test
@@ -0,0 +1,9 @@
+Loaded 1 suites:
+Started (test status codes: OK='.' FAILURE='F' SKIPPED='S')
+F
+
+ 1) Failure:
+combined::bool [file:42]
+ 0 != value
+ 0 != 1
+
diff --git a/t/unit-tests/clar/test/expected/stop_on_failure b/t/unit-tests/clar/test/expected/stop_on_failure
new file mode 100644
index 0000000000..c23610754f
--- /dev/null
+++ b/t/unit-tests/clar/test/expected/stop_on_failure
@@ -0,0 +1,8 @@
+Loaded 1 suites:
+Started (test status codes: OK='.' FAILURE='F' SKIPPED='S')
+F
+
+ 1) Failure:
+combined::1 [file:42]
+ Function call failed: -1
+
diff --git a/t/unit-tests/clar/test/expected/suite_names b/t/unit-tests/clar/test/expected/suite_names
new file mode 100644
index 0000000000..10d1538427
--- /dev/null
+++ b/t/unit-tests/clar/test/expected/suite_names
@@ -0,0 +1,2 @@
+Test suites (use -s<name> to run just one):
+ 0: combined
diff --git a/t/unit-tests/clar/test/expected/summary.xml b/t/unit-tests/clar/test/expected/summary.xml
new file mode 100644
index 0000000000..9a89d43a59
--- /dev/null
+++ b/t/unit-tests/clar/test/expected/summary.xml
@@ -0,0 +1,41 @@
+<testsuites>
+ <testsuite id="0" name="selftest" hostname="localhost" timestamp="2024-09-06T10:04:08" tests="8" failures="8" errors="0">
+ <testcase name="1" classname="selftest" time="0.00">
+ <failure type="assert"><![CDATA[Function call failed: -1
+(null)]]></failure>
+ </testcase>
+ <testcase name="2" classname="selftest" time="0.00">
+ <failure type="assert"><![CDATA[Expression is not true: 100 == 101
+(null)]]></failure>
+ </testcase>
+ <testcase name="strings" classname="selftest" time="0.00">
+ <failure type="assert"><![CDATA[String mismatch: "mismatched" != actual ("this one fails")
+'mismatched' != 'expected' (at byte 0)]]></failure>
+ </testcase>
+ <testcase name="strings_with_length" classname="selftest" time="0.00">
+ <failure type="assert"><![CDATA[String mismatch: "exactly" != actual ("this one fails")
+'exa' != 'exp' (at byte 2)]]></failure>
+ </testcase>
+ <testcase name="int" classname="selftest" time="0.00">
+ <failure type="assert"><![CDATA[101 != value ("extra note on failing test")
+101 != 100]]></failure>
+ </testcase>
+ <testcase name="int_fmt" classname="selftest" time="0.00">
+ <failure type="assert"><![CDATA[022 != value
+0022 != 0144]]></failure>
+ </testcase>
+ <testcase name="bool" classname="selftest" time="0.00">
+ <failure type="assert"><![CDATA[0 != value
+0 != 1]]></failure>
+ </testcase>
+ <testcase name="multiline_description" classname="selftest" time="0.00">
+ <failure type="assert"><![CDATA[Function call failed: −1
+description line 1
+description line 2]]></failure>
+ </testcase>
+ <testcase name="null_string" classname="selftest" time="0.00">
+ <failure type="assert"><![CDATA[String mismatch: "expected" != actual ("this one fails")
+'expected' != NULL]]></failure>
+ </testcase>
+ </testsuite>
+</testsuites>
diff --git a/t/unit-tests/clar/test/expected/summary_with_filename b/t/unit-tests/clar/test/expected/summary_with_filename
new file mode 100644
index 0000000000..460160791d
--- /dev/null
+++ b/t/unit-tests/clar/test/expected/summary_with_filename
@@ -0,0 +1,49 @@
+Loaded 1 suites:
+Started (test status codes: OK='.' FAILURE='F' SKIPPED='S')
+FFFFFFFFF
+
+ 1) Failure:
+combined::1 [file:42]
+ Function call failed: -1
+
+ 2) Failure:
+combined::2 [file:42]
+ Expression is not true: 100 == 101
+
+ 3) Failure:
+combined::strings [file:42]
+ String mismatch: "mismatched" != actual ("this one fails")
+ 'mismatched' != 'expected' (at byte 0)
+
+ 4) Failure:
+combined::strings_with_length [file:42]
+ String mismatch: "exactly" != actual ("this one fails")
+ 'exa' != 'exp' (at byte 2)
+
+ 5) Failure:
+combined::int [file:42]
+ 101 != value ("extra note on failing test")
+ 101 != 100
+
+ 6) Failure:
+combined::int_fmt [file:42]
+ 022 != value
+ 0022 != 0144
+
+ 7) Failure:
+combined::bool [file:42]
+ 0 != value
+ 0 != 1
+
+ 8) Failure:
+combined::multiline_description [file:42]
+ Function call failed: -1
+ description line 1
+ description line 2
+
+ 9) Failure:
+combined::null_string [file:42]
+ String mismatch: "expected" != actual ("this one fails")
+ 'expected' != NULL
+
+written summary file to different.xml
diff --git a/t/unit-tests/clar/test/expected/summary_without_filename b/t/unit-tests/clar/test/expected/summary_without_filename
new file mode 100644
index 0000000000..7874c1d98b
--- /dev/null
+++ b/t/unit-tests/clar/test/expected/summary_without_filename
@@ -0,0 +1,49 @@
+Loaded 1 suites:
+Started (test status codes: OK='.' FAILURE='F' SKIPPED='S')
+FFFFFFFFF
+
+ 1) Failure:
+combined::1 [file:42]
+ Function call failed: -1
+
+ 2) Failure:
+combined::2 [file:42]
+ Expression is not true: 100 == 101
+
+ 3) Failure:
+combined::strings [file:42]
+ String mismatch: "mismatched" != actual ("this one fails")
+ 'mismatched' != 'expected' (at byte 0)
+
+ 4) Failure:
+combined::strings_with_length [file:42]
+ String mismatch: "exactly" != actual ("this one fails")
+ 'exa' != 'exp' (at byte 2)
+
+ 5) Failure:
+combined::int [file:42]
+ 101 != value ("extra note on failing test")
+ 101 != 100
+
+ 6) Failure:
+combined::int_fmt [file:42]
+ 022 != value
+ 0022 != 0144
+
+ 7) Failure:
+combined::bool [file:42]
+ 0 != value
+ 0 != 1
+
+ 8) Failure:
+combined::multiline_description [file:42]
+ Function call failed: -1
+ description line 1
+ description line 2
+
+ 9) Failure:
+combined::null_string [file:42]
+ String mismatch: "expected" != actual ("this one fails")
+ 'expected' != NULL
+
+written summary file to summary.xml
diff --git a/t/unit-tests/clar/test/expected/tap b/t/unit-tests/clar/test/expected/tap
new file mode 100644
index 0000000000..bddbd5dfe9
--- /dev/null
+++ b/t/unit-tests/clar/test/expected/tap
@@ -0,0 +1,92 @@
+TAP version 13
+# start of suite 1: combined
+not ok 1 - combined::1
+ ---
+ reason: |
+ Function call failed: -1
+ at:
+ file: 'file'
+ line: 42
+ function: 'func'
+ ---
+not ok 2 - combined::2
+ ---
+ reason: |
+ Expression is not true: 100 == 101
+ at:
+ file: 'file'
+ line: 42
+ function: 'func'
+ ---
+not ok 3 - combined::strings
+ ---
+ reason: |
+ String mismatch: "mismatched" != actual ("this one fails")
+ 'mismatched' != 'expected' (at byte 0)
+ at:
+ file: 'file'
+ line: 42
+ function: 'func'
+ ---
+not ok 4 - combined::strings_with_length
+ ---
+ reason: |
+ String mismatch: "exactly" != actual ("this one fails")
+ 'exa' != 'exp' (at byte 2)
+ at:
+ file: 'file'
+ line: 42
+ function: 'func'
+ ---
+not ok 5 - combined::int
+ ---
+ reason: |
+ 101 != value ("extra note on failing test")
+ 101 != 100
+ at:
+ file: 'file'
+ line: 42
+ function: 'func'
+ ---
+not ok 6 - combined::int_fmt
+ ---
+ reason: |
+ 022 != value
+ 0022 != 0144
+ at:
+ file: 'file'
+ line: 42
+ function: 'func'
+ ---
+not ok 7 - combined::bool
+ ---
+ reason: |
+ 0 != value
+ 0 != 1
+ at:
+ file: 'file'
+ line: 42
+ function: 'func'
+ ---
+not ok 8 - combined::multiline_description
+ ---
+ reason: |
+ Function call failed: -1
+ description line 1
+ description line 2
+ at:
+ file: 'file'
+ line: 42
+ function: 'func'
+ ---
+not ok 9 - combined::null_string
+ ---
+ reason: |
+ String mismatch: "expected" != actual ("this one fails")
+ 'expected' != NULL
+ at:
+ file: 'file'
+ line: 42
+ function: 'func'
+ ---
+1..9
diff --git a/t/unit-tests/clar/test/expected/without_arguments b/t/unit-tests/clar/test/expected/without_arguments
new file mode 100644
index 0000000000..1111d418a0
--- /dev/null
+++ b/t/unit-tests/clar/test/expected/without_arguments
@@ -0,0 +1,48 @@
+Loaded 1 suites:
+Started (test status codes: OK='.' FAILURE='F' SKIPPED='S')
+FFFFFFFFF
+
+ 1) Failure:
+combined::1 [file:42]
+ Function call failed: -1
+
+ 2) Failure:
+combined::2 [file:42]
+ Expression is not true: 100 == 101
+
+ 3) Failure:
+combined::strings [file:42]
+ String mismatch: "mismatched" != actual ("this one fails")
+ 'mismatched' != 'expected' (at byte 0)
+
+ 4) Failure:
+combined::strings_with_length [file:42]
+ String mismatch: "exactly" != actual ("this one fails")
+ 'exa' != 'exp' (at byte 2)
+
+ 5) Failure:
+combined::int [file:42]
+ 101 != value ("extra note on failing test")
+ 101 != 100
+
+ 6) Failure:
+combined::int_fmt [file:42]
+ 022 != value
+ 0022 != 0144
+
+ 7) Failure:
+combined::bool [file:42]
+ 0 != value
+ 0 != 1
+
+ 8) Failure:
+combined::multiline_description [file:42]
+ Function call failed: -1
+ description line 1
+ description line 2
+
+ 9) Failure:
+combined::null_string [file:42]
+ String mismatch: "expected" != actual ("this one fails")
+ 'expected' != NULL
+
diff --git a/t/unit-tests/clar/test/main.c b/t/unit-tests/clar/test/main.c
index 59e56ad255..94af440643 100644
--- a/t/unit-tests/clar/test/main.c
+++ b/t/unit-tests/clar/test/main.c
@@ -1,23 +1,9 @@
-/*
- * Copyright (c) Vicent Marti. All rights reserved.
- *
- * This file is part of clar, distributed under the ISC license.
- * For full terms see the included COPYING file.
- */
+#include <stdio.h>
+#include <string.h>
-#include "clar_test.h"
+#include "selftest.h"
-/*
- * Sample main() for clar tests.
- *
- * You should write your own main routine for clar tests that does specific
- * setup and teardown as necessary for your application. The only required
- * line is the call to `clar_test(argc, argv)`, which will execute the test
- * suite. If you want to check the return value of the test application,
- * your main() should return the same value returned by clar_test().
- */
-
-int global_test_counter = 0;
+const char *selftest_suite_directory;
#ifdef _WIN32
int __cdecl main(int argc, char *argv[])
@@ -25,16 +11,15 @@ int __cdecl main(int argc, char *argv[])
int main(int argc, char *argv[])
#endif
{
- int ret;
-
- /* Your custom initialization here */
- global_test_counter = 0;
-
- /* Run the test suite */
- ret = clar_test(argc, argv);
+ if (argc < 2) {
+ fprintf(stderr, "usage: %s <selftest-suite-directory> <options>\n",
+ argv[0]);
+ exit(1);
+ }
- /* Your custom cleanup here */
- cl_assert_equal_i(8, global_test_counter);
+ selftest_suite_directory = argv[1];
+ memmove(argv + 1, argv + 2, argc - 1);
+ argc -= 1;
- return ret;
+ return clar_test(argc, argv);
}
diff --git a/t/unit-tests/clar/test/selftest.c b/t/unit-tests/clar/test/selftest.c
new file mode 100644
index 0000000000..eed83e4512
--- /dev/null
+++ b/t/unit-tests/clar/test/selftest.c
@@ -0,0 +1,370 @@
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/stat.h>
+
+#include "selftest.h"
+
+#ifdef _WIN32
+# define WIN32_LEAN_AND_MEAN
+# include <windows.h>
+
+static char *read_full(HANDLE h, int is_pipe)
+{
+ char *data = NULL;
+ size_t data_size = 0;
+
+ while (1) {
+ CHAR buf[4096];
+ DWORD bytes_read;
+
+ if (!ReadFile(h, buf, sizeof(buf), &bytes_read, NULL)) {
+ if (!is_pipe)
+ cl_fail("Failed reading file handle.");
+ cl_assert_equal_i(GetLastError(), ERROR_BROKEN_PIPE);
+ break;
+ }
+ if (!bytes_read)
+ break;
+
+ data = realloc(data, data_size + bytes_read);
+ cl_assert(data);
+ memcpy(data + data_size, buf, bytes_read);
+ data_size += bytes_read;
+ }
+
+ data = realloc(data, data_size + 1);
+ cl_assert(data);
+ data[data_size] = '\0';
+
+ while (strstr(data, "\r\n")) {
+ char *ptr = strstr(data, "\r\n");
+ memmove(ptr, ptr + 1, strlen(ptr));
+ }
+
+ return data;
+}
+
+static char *read_file(const char *path)
+{
+ char *content;
+ HANDLE file;
+
+ file = CreateFile(path, GENERIC_READ, FILE_SHARE_READ, NULL,
+ OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ cl_assert(file != INVALID_HANDLE_VALUE);
+ content = read_full(file, 0);
+ cl_assert_equal_b(1, CloseHandle(file));
+
+ return content;
+}
+
+static char *execute(const char *suite, int expected_error_code, const char **args, size_t nargs)
+{
+ SECURITY_ATTRIBUTES security_attributes = { 0 };
+ PROCESS_INFORMATION process_info = { 0 };
+ STARTUPINFO startup_info = { 0 };
+ char binary_path[4096] = { 0 };
+ char cmdline[4096] = { 0 };
+ char *output = NULL;
+ HANDLE stdout_write;
+ HANDLE stdout_read;
+ DWORD exit_code;
+ size_t i;
+
+ snprintf(binary_path, sizeof(binary_path), "%s/%s_suite.exe",
+ selftest_suite_directory, suite);
+
+ /*
+ * Assemble command line arguments. In theory we'd have to properly
+ * quote them. In practice none of our tests actually care.
+ */
+ snprintf(cmdline, sizeof(cmdline), suite);
+ for (i = 0; i < nargs; i++) {
+ size_t cmdline_len = strlen(cmdline);
+ const char *arg = args[i];
+ cl_assert(cmdline_len + strlen(arg) < sizeof(cmdline));
+ snprintf(cmdline + cmdline_len, sizeof(cmdline) - cmdline_len,
+ " %s", arg);
+ }
+
+ /*
+ * Create a pipe that we will use to read data from the child process.
+ * The writing side needs to be inheritable such that the child can use
+ * it as stdout and stderr. The reading side should only be used by the
+ * parent.
+ */
+ security_attributes.nLength = sizeof(security_attributes);
+ security_attributes.bInheritHandle = TRUE;
+ cl_assert_equal_b(1, CreatePipe(&stdout_read, &stdout_write, &security_attributes, 0));
+ cl_assert_equal_b(1, SetHandleInformation(stdout_read, HANDLE_FLAG_INHERIT, 0));
+
+ /*
+ * Create the child process with our pipe.
+ */
+ startup_info.cb = sizeof(startup_info);
+ startup_info.hStdError = stdout_write;
+ startup_info.hStdOutput = stdout_write;
+ startup_info.dwFlags |= STARTF_USESTDHANDLES;
+ cl_assert_equal_b(1, CreateProcess(binary_path, cmdline, NULL, NULL, TRUE,
+ 0, NULL, NULL, &startup_info, &process_info));
+ cl_assert_equal_b(1, CloseHandle(stdout_write));
+
+ output = read_full(stdout_read, 1);
+ cl_assert_equal_b(1, CloseHandle(stdout_read));
+ cl_assert_equal_b(1, GetExitCodeProcess(process_info.hProcess, &exit_code));
+ cl_assert_equal_i(exit_code, expected_error_code);
+
+ return output;
+}
+
+static void assert_output(const char *suite, const char *expected_output_file, int expected_error_code, ...)
+{
+ char *expected_output = NULL;
+ char *output = NULL;
+ const char *args[16];
+ va_list ap;
+ size_t i;
+
+ va_start(ap, expected_error_code);
+ for (i = 0; ; i++) {
+ const char *arg = va_arg(ap, const char *);
+ if (!arg)
+ break;
+ cl_assert(i < sizeof(args) / sizeof(*args));
+ args[i] = arg;
+ }
+ va_end(ap);
+
+ output = execute(suite, expected_error_code, args, i);
+ expected_output = read_file(cl_fixture(expected_output_file));
+ cl_assert_equal_s(output, expected_output);
+
+ free(expected_output);
+ free(output);
+}
+
+#else
+# include <errno.h>
+# include <fcntl.h>
+# include <limits.h>
+# include <unistd.h>
+# include <sys/wait.h>
+
+static char *read_full(int fd)
+{
+ size_t data_bytes = 0;
+ char *data = NULL;
+
+ while (1) {
+ char buf[4096];
+ ssize_t n;
+
+ n = read(fd, buf, sizeof(buf));
+ if (n < 0) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ cl_fail("Failed reading from child process.");
+ }
+ if (!n)
+ break;
+
+ data = realloc(data, data_bytes + n);
+ cl_assert(data);
+
+ memcpy(data + data_bytes, buf, n);
+ data_bytes += n;
+ }
+
+ data = realloc(data, data_bytes + 1);
+ cl_assert(data);
+ data[data_bytes] = '\0';
+
+ return data;
+}
+
+static char *read_file(const char *path)
+{
+ char *data;
+ int fd;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ cl_fail("Failed reading expected file.");
+
+ data = read_full(fd);
+ cl_must_pass(close(fd));
+
+ return data;
+}
+
+static char *execute(const char *suite, int expected_error_code, const char **args, size_t nargs)
+{
+ int pipe_fds[2];
+ pid_t pid;
+
+ cl_must_pass(pipe(pipe_fds));
+
+ pid = fork();
+ if (!pid) {
+ const char *final_args[17] = { NULL };
+ char binary_path[4096];
+ size_t len = 0;
+ size_t i;
+
+ cl_assert(nargs < sizeof(final_args) / sizeof(*final_args));
+ final_args[0] = suite;
+ for (i = 0; i < nargs; i++)
+ final_args[i + 1] = args[i];
+
+ if (dup2(pipe_fds[1], STDOUT_FILENO) < 0 ||
+ dup2(pipe_fds[1], STDERR_FILENO) < 0 ||
+ close(0) < 0 ||
+ close(pipe_fds[0]) < 0 ||
+ close(pipe_fds[1]) < 0)
+ exit(1);
+
+ cl_assert(len + strlen(selftest_suite_directory) < sizeof(binary_path));
+ strcpy(binary_path, selftest_suite_directory);
+ len += strlen(selftest_suite_directory);
+
+ cl_assert(len + 1 < sizeof(binary_path));
+ binary_path[len] = '/';
+ len += 1;
+
+ cl_assert(len + strlen(suite) < sizeof(binary_path));
+ strcpy(binary_path + len, suite);
+ len += strlen(suite);
+
+ cl_assert(len + strlen("_suite") < sizeof(binary_path));
+ strcpy(binary_path + len, "_suite");
+ len += strlen("_suite");
+
+ binary_path[len] = '\0';
+
+ execv(binary_path, (char **) final_args);
+ exit(1);
+ } else if (pid > 0) {
+ pid_t waited_pid;
+ char *output;
+ int stat;
+
+ cl_must_pass(close(pipe_fds[1]));
+
+ output = read_full(pipe_fds[0]);
+
+ waited_pid = waitpid(pid, &stat, 0);
+ cl_assert_equal_i(pid, waited_pid);
+ cl_assert(WIFEXITED(stat));
+ cl_assert_equal_i(WEXITSTATUS(stat), expected_error_code);
+
+ return output;
+ } else {
+ cl_fail("Fork failed.");
+ }
+
+ return NULL;
+}
+
+static void assert_output(const char *suite, const char *expected_output_file, int expected_error_code, ...)
+{
+ char *expected_output, *output;
+ const char *args[16];
+ va_list ap;
+ size_t i;
+
+ va_start(ap, expected_error_code);
+ for (i = 0; ; i++) {
+ cl_assert(i < sizeof(args) / sizeof(*args));
+ args[i] = va_arg(ap, const char *);
+ if (!args[i])
+ break;
+ }
+ va_end(ap);
+
+ output = execute(suite, expected_error_code, args, i);
+ expected_output = read_file(cl_fixture(expected_output_file));
+ cl_assert_equal_s(output, expected_output);
+
+ free(expected_output);
+ free(output);
+}
+#endif
+
+void test_selftest__help(void)
+{
+ cl_invoke(assert_output("combined", "help", 1, "-h", NULL));
+}
+
+void test_selftest__without_arguments(void)
+{
+ cl_invoke(assert_output("combined", "without_arguments", 9, NULL));
+}
+
+void test_selftest__specific_test(void)
+{
+ cl_invoke(assert_output("combined", "specific_test", 1, "-scombined::bool", NULL));
+}
+
+void test_selftest__stop_on_failure(void)
+{
+ cl_invoke(assert_output("combined", "stop_on_failure", 1, "-Q", NULL));
+}
+
+void test_selftest__quiet(void)
+{
+ cl_invoke(assert_output("combined", "quiet", 9, "-q", NULL));
+}
+
+void test_selftest__tap(void)
+{
+ cl_invoke(assert_output("combined", "tap", 9, "-t", NULL));
+}
+
+void test_selftest__suite_names(void)
+{
+ cl_invoke(assert_output("combined", "suite_names", 0, "-l", NULL));
+}
+
+void test_selftest__summary_without_filename(void)
+{
+ struct stat st;
+ cl_invoke(assert_output("combined", "summary_without_filename", 9, "-r", NULL));
+ /* The summary contains timestamps, so we cannot verify its contents. */
+ cl_must_pass(stat("summary.xml", &st));
+}
+
+void test_selftest__summary_with_filename(void)
+{
+ struct stat st;
+ cl_invoke(assert_output("combined", "summary_with_filename", 9, "-rdifferent.xml", NULL));
+ /* The summary contains timestamps, so we cannot verify its contents. */
+ cl_must_pass(stat("different.xml", &st));
+}
+
+void test_selftest__pointer_equal(void)
+{
+ const char *args[] = {
+ "-spointer::equal",
+ "-t"
+ };
+ char *output = execute("pointer", 0, args, 2);
+ cl_assert_equal_s(output,
+ "TAP version 13\n"
+ "# start of suite 1: pointer\n"
+ "ok 1 - pointer::equal\n"
+ "1..1\n"
+ );
+ free(output);
+}
+
+void test_selftest__pointer_unequal(void)
+{
+ const char *args[] = {
+ "-spointer::unequal",
+ };
+ char *output = execute("pointer", 1, args, 1);
+ cl_assert(output);
+ cl_assert(strstr(output, "Pointer mismatch: "));
+ free(output);
+}
diff --git a/t/unit-tests/clar/test/selftest.h b/t/unit-tests/clar/test/selftest.h
new file mode 100644
index 0000000000..c24e0c5af4
--- /dev/null
+++ b/t/unit-tests/clar/test/selftest.h
@@ -0,0 +1,3 @@
+#include "clar.h"
+
+extern const char *selftest_suite_directory;
diff --git a/t/unit-tests/clar/test/suites/CMakeLists.txt b/t/unit-tests/clar/test/suites/CMakeLists.txt
new file mode 100644
index 0000000000..fa8ab9416a
--- /dev/null
+++ b/t/unit-tests/clar/test/suites/CMakeLists.txt
@@ -0,0 +1,53 @@
+list(APPEND suites
+ "combined"
+ "pointer"
+)
+
+foreach(suite IN LISTS suites)
+ add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${suite}/clar.suite"
+ COMMAND "${Python_EXECUTABLE}"
+ "${CMAKE_SOURCE_DIR}/generate.py"
+ "${CMAKE_CURRENT_SOURCE_DIR}/${suite}.c"
+ --output "${CMAKE_CURRENT_BINARY_DIR}/${suite}"
+ DEPENDS ${suite}.c
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
+ )
+
+ add_executable(${suite}_suite)
+ set_target_properties(${suite}_suite PROPERTIES
+ C_STANDARD 90
+ C_STANDARD_REQUIRED ON
+ C_EXTENSIONS OFF
+ )
+
+ # MSVC generates all kinds of warnings. We may want to fix these in the future
+ # and then unconditionally treat warnings as errors.
+ if(NOT MSVC)
+ set_target_properties(${suite}_suite PROPERTIES
+ COMPILE_WARNING_AS_ERROR ON
+ )
+ endif()
+
+ target_sources(${suite}_suite PRIVATE
+ main.c
+ ${suite}.c
+ "${CMAKE_CURRENT_BINARY_DIR}/${suite}/clar.suite"
+ )
+ target_compile_definitions(${suite}_suite PRIVATE
+ CLAR_FIXTURE_PATH="${CMAKE_CURRENT_SOURCE_DIR}/resources/"
+ CLAR_SELFTEST
+ )
+ target_compile_options(${suite}_suite PRIVATE
+ $<IF:$<CXX_COMPILER_ID:MSVC>,/W4,-Wall>
+ )
+ target_include_directories(${suite}_suite PRIVATE
+ "${CMAKE_SOURCE_DIR}"
+ "${CMAKE_CURRENT_BINARY_DIR}/${suite}"
+ )
+ target_link_libraries(${suite}_suite clar)
+
+ add_test(NAME build_${suite}_suite
+ COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" --config "$<CONFIG>" --target selftest
+ )
+ set_tests_properties(build_${suite}_suite PROPERTIES FIXTURES_SETUP clar_test_fixture)
+endforeach()
diff --git a/t/unit-tests/clar/test/sample.c b/t/unit-tests/clar/test/suites/combined.c
index faa1209262..e8b41c98c3 100644
--- a/t/unit-tests/clar/test/sample.c
+++ b/t/unit-tests/clar/test/suites/combined.c
@@ -1,6 +1,7 @@
-#include "clar_test.h"
#include <sys/stat.h>
+#include "clar.h"
+
static int file_size(const char *filename)
{
struct stat st;
@@ -10,19 +11,14 @@ static int file_size(const char *filename)
return -1;
}
-void test_sample__initialize(void)
-{
- global_test_counter++;
-}
-
-void test_sample__cleanup(void)
+void test_combined__cleanup(void)
{
cl_fixture_cleanup("test");
cl_assert(file_size("test/file") == -1);
}
-void test_sample__1(void)
+void test_combined__1(void)
{
cl_assert(1);
cl_must_pass(0); /* 0 == success */
@@ -30,7 +26,7 @@ void test_sample__1(void)
cl_must_pass(-1); /* demonstrate a failing call */
}
-void test_sample__2(void)
+void test_combined__2(void)
{
cl_fixture_sandbox("test");
@@ -39,7 +35,7 @@ void test_sample__2(void)
cl_assert(100 == 101);
}
-void test_sample__strings(void)
+void test_combined__strings(void)
{
const char *actual = "expected";
cl_assert_equal_s("expected", actual);
@@ -47,7 +43,7 @@ void test_sample__strings(void)
cl_assert_equal_s_("mismatched", actual, "this one fails");
}
-void test_sample__strings_with_length(void)
+void test_combined__strings_with_length(void)
{
const char *actual = "expected";
cl_assert_equal_strn("expected_", actual, 8);
@@ -56,29 +52,34 @@ void test_sample__strings_with_length(void)
cl_assert_equal_strn_("exactly", actual, 3, "this one fails");
}
-void test_sample__int(void)
+void test_combined__int(void)
{
int value = 100;
cl_assert_equal_i(100, value);
cl_assert_equal_i_(101, value, "extra note on failing test");
}
-void test_sample__int_fmt(void)
+void test_combined__int_fmt(void)
{
int value = 100;
cl_assert_equal_i_fmt(022, value, "%04o");
}
-void test_sample__bool(void)
+void test_combined__bool(void)
{
int value = 100;
cl_assert_equal_b(1, value); /* test equality as booleans */
cl_assert_equal_b(0, value);
}
-void test_sample__ptr(void)
+void test_combined__multiline_description(void)
{
- const char *actual = "expected";
- cl_assert_equal_p(actual, actual); /* pointers to same object */
- cl_assert_equal_p(&actual, actual);
+ cl_must_pass_(-1, "description line 1\ndescription line 2");
+}
+
+void test_combined__null_string(void)
+{
+ const char *actual = NULL;
+ cl_assert_equal_s(actual, actual);
+ cl_assert_equal_s_("expected", actual, "this one fails");
}
diff --git a/t/unit-tests/clar/test/suites/main.c b/t/unit-tests/clar/test/suites/main.c
new file mode 100644
index 0000000000..3ab581d390
--- /dev/null
+++ b/t/unit-tests/clar/test/suites/main.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) Vicent Marti. All rights reserved.
+ *
+ * This file is part of clar, distributed under the ISC license.
+ * For full terms see the included COPYING file.
+ */
+
+#include "clar.h"
+
+/*
+ * Selftest main() for clar tests.
+ *
+ * You should write your own main routine for clar tests that does specific
+ * setup and teardown as necessary for your application. The only required
+ * line is the call to `clar_test(argc, argv)`, which will execute the test
+ * suite. If you want to check the return value of the test application,
+ * your main() should return the same value returned by clar_test().
+ */
+
+#ifdef _WIN32
+int __cdecl main(int argc, char *argv[])
+#else
+int main(int argc, char *argv[])
+#endif
+{
+ return clar_test(argc, argv);
+}
diff --git a/t/unit-tests/clar/test/suites/pointer.c b/t/unit-tests/clar/test/suites/pointer.c
new file mode 100644
index 0000000000..20535b159e
--- /dev/null
+++ b/t/unit-tests/clar/test/suites/pointer.c
@@ -0,0 +1,13 @@
+#include "clar.h"
+
+void test_pointer__equal(void)
+{
+ void *p1 = (void *)0x1;
+ cl_assert_equal_p(p1, p1);
+}
+
+void test_pointer__unequal(void)
+{
+ void *p1 = (void *)0x1, *p2 = (void *)0x2;
+ cl_assert_equal_p(p1, p2);
+}
diff --git a/t/unit-tests/clar/test/resources/test/file b/t/unit-tests/clar/test/suites/resources/test/file
index 220f4aa98a..220f4aa98a 100644
--- a/t/unit-tests/clar/test/resources/test/file
+++ b/t/unit-tests/clar/test/suites/resources/test/file
diff --git a/upload-pack.c b/upload-pack.c
index 91fcdcad9b..f78fabc1e1 100644
--- a/upload-pack.c
+++ b/upload-pack.c
@@ -476,20 +476,17 @@ static void create_pack_file(struct upload_pack_data *pack_data,
static int do_got_oid(struct upload_pack_data *data, const struct object_id *oid)
{
- int we_knew_they_have = 0;
struct object *o = parse_object_with_flags(the_repository, oid,
PARSE_OBJECT_SKIP_HASH_CHECK |
PARSE_OBJECT_DISCARD_TREE);
if (!o)
die("oops (%s)", oid_to_hex(oid));
+
if (o->type == OBJ_COMMIT) {
struct commit_list *parents;
struct commit *commit = (struct commit *)o;
- if (o->flags & THEY_HAVE)
- we_knew_they_have = 1;
- else
- o->flags |= THEY_HAVE;
+
if (!data->oldest_have || (commit->date < data->oldest_have))
data->oldest_have = commit->date;
for (parents = commit->parents;
@@ -497,11 +494,13 @@ static int do_got_oid(struct upload_pack_data *data, const struct object_id *oid
parents = parents->next)
parents->item->object.flags |= THEY_HAVE;
}
- if (!we_knew_they_have) {
- add_object_array(o, NULL, &data->have_obj);
- return 1;
- }
- return 0;
+
+ if (o->flags & THEY_HAVE)
+ return 0;
+ o->flags |= THEY_HAVE;
+
+ add_object_array(o, NULL, &data->have_obj);
+ return 1;
}
static int got_oid(struct upload_pack_data *data,
diff --git a/xdiff/xutils.c b/xdiff/xutils.c
index 444a108f87..78d1cf74b1 100644
--- a/xdiff/xutils.c
+++ b/xdiff/xutils.c
@@ -249,7 +249,7 @@ int xdl_recmatch(const char *l1, long s1, const char *l2, long s2, long flags)
return 1;
}
-static unsigned long xdl_hash_record_with_whitespace(char const **data,
+unsigned long xdl_hash_record_with_whitespace(char const **data,
char const *top, long flags) {
unsigned long ha = 5381;
char const *ptr = *data;
@@ -294,19 +294,67 @@ static unsigned long xdl_hash_record_with_whitespace(char const **data,
return ha;
}
-unsigned long xdl_hash_record(char const **data, char const *top, long flags) {
- unsigned long ha = 5381;
+/*
+ * Compiler reassociation barrier: pretend to modify X and Y to disallow
+ * changing evaluation order with respect to following uses of X and Y.
+ */
+#ifdef __GNUC__
+#define REASSOC_FENCE(x, y) __asm__("" : "+r"(x), "+r"(y))
+#else
+#define REASSOC_FENCE(x, y)
+#endif
+
+unsigned long xdl_hash_record_verbatim(char const **data, char const *top) {
+ unsigned long ha = 5381, c0, c1;
char const *ptr = *data;
-
- if (flags & XDF_WHITESPACE_FLAGS)
- return xdl_hash_record_with_whitespace(data, top, flags);
-
+#if 0
+ /*
+ * The baseline form of the optimized loop below. This is the djb2
+ * hash (the above function uses a variant with XOR instead of ADD).
+ */
for (; ptr < top && *ptr != '\n'; ptr++) {
ha += (ha << 5);
- ha ^= (unsigned long) *ptr;
+ ha += (unsigned long) *ptr;
}
*data = ptr < top ? ptr + 1: ptr;
-
+#else
+ /* Process two characters per iteration. */
+ if (top - ptr >= 2) do {
+ if ((c0 = ptr[0]) == '\n') {
+ *data = ptr + 1;
+ return ha;
+ }
+ if ((c1 = ptr[1]) == '\n') {
+ *data = ptr + 2;
+ c0 += ha;
+ REASSOC_FENCE(c0, ha);
+ ha = ha * 32 + c0;
+ return ha;
+ }
+ /*
+ * Combine characters C0 and C1 into the hash HA. We have
+ * HA = (HA * 33 + C0) * 33 + C1, and we want to ensure
+ * that dependency chain over HA is just one multiplication
+ * and one addition, i.e. we want to evaluate this as
+ * HA = HA * 33 * 33 + (C0 * 33 + C1), and likewise prefer
+ * (C0 * 32 + (C0 + C1)) for the expression in parenthesis.
+ */
+ ha *= 33 * 33;
+ c1 += c0;
+ REASSOC_FENCE(c1, c0);
+ c1 += c0 * 32;
+ REASSOC_FENCE(c1, ha);
+ ha += c1;
+
+ ptr += 2;
+ } while (ptr < top - 1);
+ *data = top;
+ if (ptr < top && (c0 = ptr[0]) != '\n') {
+ c0 += ha;
+ REASSOC_FENCE(c0, ha);
+ ha = ha * 32 + c0;
+ }
+#endif
return ha;
}
diff --git a/xdiff/xutils.h b/xdiff/xutils.h
index fd0bba94e8..13f6831047 100644
--- a/xdiff/xutils.h
+++ b/xdiff/xutils.h
@@ -34,7 +34,15 @@ void *xdl_cha_alloc(chastore_t *cha);
long xdl_guess_lines(mmfile_t *mf, long sample);
int xdl_blankline(const char *line, long size, long flags);
int xdl_recmatch(const char *l1, long s1, const char *l2, long s2, long flags);
-unsigned long xdl_hash_record(char const **data, char const *top, long flags);
+unsigned long xdl_hash_record_verbatim(char const **data, char const *top);
+unsigned long xdl_hash_record_with_whitespace(char const **data, char const *top, long flags);
+static inline unsigned long xdl_hash_record(char const **data, char const *top, long flags)
+{
+ if (flags & XDF_WHITESPACE_FLAGS)
+ return xdl_hash_record_with_whitespace(data, top, flags);
+ else
+ return xdl_hash_record_verbatim(data, top);
+}
unsigned int xdl_hashbits(unsigned int size);
int xdl_num_out(char *out, long val);
int xdl_emit_hunk_hdr(long s1, long c1, long s2, long c2,