diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d3c6b0d26cd..9faeb3a9d6d 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -28,13 +28,15 @@ Checklist You do not need to check all the boxes below all at once. Feel free to take your time and add more commits. If you're done and ready for review, please check the last box. Enable a checkbox by replacing [ ] with [x]. + +Please always follow these steps: +- Read the [contribution guidelines](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#providing-patches). +- Enable [maintainer edits](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork). +- Run `gofmt` on the code in all commits. +- Format all commit messages in the same style as [the other commits in the repository](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#git-commits). --> -- [ ] I have read the [contribution guidelines](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#providing-patches). -- [ ] I have [enabled maintainer edits](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork). - [ ] I have added tests for all code changes. - [ ] I have added documentation for relevant changes (in the manual). - [ ] There's a new file in `changelog/unreleased/` that describes the changes for our users (see [template](https://github.com/restic/restic/blob/master/changelog/TEMPLATE)). -- [ ] I have run `gofmt` on the code in all commits. -- [ ] All commit messages are formatted in the same style as [the other commits in the repo](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#git-commits). - [ ] I'm done! This pull request is ready for review. diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a1976784930..1a75604e628 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -20,12 +20,16 @@ jobs: contents: read packages: write + outputs: + image: ${{ steps.image.outputs.image }} + digest: ${{ steps.build-and-push.outputs.digest }} + steps: - name: Checkout repository uses: actions/checkout@v4 - name: Log in to the Container registry - uses: docker/login-action@3d58c274f17dffee475a5520cbe67f0a882c4dbb + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} @@ -33,10 +37,11 @@ jobs: - name: Extract metadata (tags, labels) for Docker id: meta - uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} tags: | + type=sha type=ref,event=branch type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} @@ -45,7 +50,7 @@ jobs: uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 + uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 - name: Ensure consistent binaries run: | @@ -55,7 +60,8 @@ jobs: if: github.ref != 'refs/heads/master' - name: Build and push Docker image - uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4 + id: build-and-push + uses: docker/build-push-action@15560696de535e4014efeff63c48f16952e52dd1 with: push: true context: . @@ -64,3 +70,26 @@ jobs: pull: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} + + - name: Output image + id: image + run: | + # NOTE: Set the image as an output because the `env` context is not + # available to the inputs of a reusable workflow call. + image_name="${REGISTRY}/${IMAGE_NAME}" + echo "image=$image_name" >> "$GITHUB_OUTPUT" + + provenance: + needs: [build-and-push-image] + permissions: + actions: read # for detecting the Github Actions environment. + id-token: write # for creating OIDC tokens for signing. + packages: write # for uploading attestations. + if: github.repository == 'restic/restic' + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.0.0 + with: + image: ${{ needs.build-and-push-image.outputs.image }} + digest: ${{ needs.build-and-push-image.outputs.digest }} + registry-username: ${{ github.actor }} + secrets: + registry-password: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 45681c6c5c1..5a41723bbd3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -13,7 +13,7 @@ permissions: contents: read env: - latest_go: "1.21.x" + latest_go: "1.23.x" GO111MODULE: on jobs: @@ -23,34 +23,34 @@ jobs: # list of jobs to run: include: - job_name: Windows - go: 1.21.x + go: 1.23.x os: windows-latest - job_name: macOS - go: 1.21.x + go: 1.23.x os: macOS-latest test_fuse: false - job_name: Linux - go: 1.21.x + go: 1.23.x os: ubuntu-latest test_cloud_backends: true test_fuse: true check_changelog: true - job_name: Linux (race) - go: 1.21.x + go: 1.23.x os: ubuntu-latest test_fuse: true test_opts: "-race" - job_name: Linux - go: 1.20.x + go: 1.22.x os: ubuntu-latest test_fuse: true - job_name: Linux - go: 1.19.x + go: 1.21.x os: ubuntu-latest test_fuse: true @@ -61,6 +61,9 @@ jobs: GOPROXY: https://proxy.golang.org steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up Go ${{ matrix.go }} uses: actions/setup-go@v5 with: @@ -69,7 +72,7 @@ jobs: - name: Get programs (Linux/macOS) run: | echo "build Go tools" - go install github.com/restic/rest-server/cmd/rest-server@latest + go install github.com/restic/rest-server/cmd/rest-server@master echo "install minio server" mkdir $HOME/bin @@ -101,7 +104,7 @@ jobs: $ProgressPreference = 'SilentlyContinue' echo "build Go tools" - go install github.com/restic/rest-server/... + go install github.com/restic/rest-server/cmd/rest-server@master echo "install minio server" mkdir $Env:USERPROFILE/bin @@ -134,9 +137,6 @@ jobs: echo $Env:USERPROFILE\tar\bin >> $Env:GITHUB_PATH if: matrix.os == 'windows-latest' - - name: Check out code - uses: actions/checkout@v4 - - name: Build with build.go run: | go run build.go @@ -225,14 +225,14 @@ jobs: name: Cross Compile for subset ${{ matrix.subset }} steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up Go ${{ env.latest_go }} uses: actions/setup-go@v5 with: go-version: ${{ env.latest_go }} - - name: Check out code - uses: actions/checkout@v4 - - name: Cross-compile for subset ${{ matrix.subset }} run: | mkdir build-output build-output-debug @@ -242,20 +242,24 @@ jobs: lint: name: lint runs-on: ubuntu-latest + permissions: + contents: read + # allow annotating code in the PR + checks: write steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up Go ${{ env.latest_go }} uses: actions/setup-go@v5 with: go-version: ${{ env.latest_go }} - - name: Check out code - uses: actions/checkout@v4 - - name: golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v6 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.55.2 + version: v1.61.0 args: --verbose --timeout 5m # only run golangci-lint for pull requests, otherwise ALL hints get @@ -293,7 +297,7 @@ jobs: - name: Docker meta id: meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: # list of Docker images to use as base name for tags images: | @@ -316,7 +320,7 @@ jobs: - name: Build and push id: docker_build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: push: false context: . diff --git a/.golangci.yml b/.golangci.yml index c08331401cb..e632965bb2d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -38,6 +38,8 @@ linters: # ensure that http response bodies are closed - bodyclose + - importas + issues: # don't use the default exclude rules, this hides (among others) ignored # errors from Close() calls @@ -54,3 +56,14 @@ issues: # staticcheck: there's no easy way to replace these packages - "SA1019: \"golang.org/x/crypto/poly1305\" is deprecated" - "SA1019: \"golang.org/x/crypto/openpgp\" is deprecated" + + exclude-rules: + # revive: ignore unused parameters in tests + - path: (_test\.go|testing\.go|backend/.*/tests\.go) + text: "unused-parameter:" + +linters-settings: + importas: + alias: + - pkg: github.com/restic/restic/internal/test + alias: rtest diff --git a/CHANGELOG.md b/CHANGELOG.md index b8969a4438c..7ab47f11dc5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # Table of Contents +* [Changelog for 0.17.3](#changelog-for-restic-0173-2024-11-08) +* [Changelog for 0.17.2](#changelog-for-restic-0172-2024-10-27) +* [Changelog for 0.17.1](#changelog-for-restic-0171-2024-09-05) +* [Changelog for 0.17.0](#changelog-for-restic-0170-2024-07-26) +* [Changelog for 0.16.5](#changelog-for-restic-0165-2024-07-01) * [Changelog for 0.16.4](#changelog-for-restic-0164-2024-02-04) * [Changelog for 0.16.3](#changelog-for-restic-0163-2024-01-14) * [Changelog for 0.16.2](#changelog-for-restic-0162-2023-10-29) @@ -33,6 +38,1116 @@ * [Changelog for 0.6.0](#changelog-for-restic-060-2017-05-29) +# Changelog for restic 0.17.3 (2024-11-08) +The following sections list the changes in restic 0.17.3 relevant to +restic users. The changes are ordered by importance. + +## Summary + + * Fix #4971: Fix unusable `mount` on macOS Sonoma + * Fix #5003: Fix metadata errors during backup of removable disks on Windows + * Fix #5101: Do not retry load/list operation if SFTP connection is broken + * Fix #5107: Fix metadata error on Windows for backups using VSS + * Enh #5096: Allow `prune --dry-run` without lock + +## Details + + * Bugfix #4971: Fix unusable `mount` on macOS Sonoma + + On macOS Sonoma when using FUSE-T, it was not possible to access files in a + mounted repository. This issue is now resolved. + + https://github.com/restic/restic/issues/4971 + https://github.com/restic/restic/pull/5048 + + * Bugfix #5003: Fix metadata errors during backup of removable disks on Windows + + Since restic 0.17.0, backing up removable disks on Windows could report errors + with retrieving metadata like shown below. + + ``` + error: incomplete metadata for d:\filename: get named security info failed with: Access is denied. + ``` + + This has now been fixed. + + https://github.com/restic/restic/issues/5003 + https://github.com/restic/restic/pull/5123 + https://forum.restic.net/t/backing-up-a-folder-from-a-veracrypt-volume-brings-up-errors-since-restic-v17-0/8444 + + * Bugfix #5101: Do not retry load/list operation if SFTP connection is broken + + When using restic with the SFTP backend, backend operations that load a file or + list files were retried even if the SFTP connection was broken. This has now + been fixed. + + https://github.com/restic/restic/pull/5101 + https://forum.restic.net/t/restic-hanging-on-backup/8559 + + * Bugfix #5107: Fix metadata error on Windows for backups using VSS + + Since restic 0.17.2, when creating a backup on Windows using + `--use-fs-snapshot`, restic would report an error like the following: + + ``` + error: incomplete metadata for C:\: get EA failed while opening file handle for path \\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX\, with: The process cannot access the file because it is being used by another process. + ``` + + This has now been fixed by correctly handling paths that refer to volume shadow + copy snapshots. + + https://github.com/restic/restic/issues/5107 + https://github.com/restic/restic/pull/5110 + https://github.com/restic/restic/pull/5112 + + * Enhancement #5096: Allow `prune --dry-run` without lock + + The `prune --dry-run --no-lock` now allows performing a dry-run without locking + the repository. Note that if the repository is modified concurrently, `prune` + may return inaccurate statistics or errors. + + https://github.com/restic/restic/pull/5096 + + +# Changelog for restic 0.17.2 (2024-10-27) +The following sections list the changes in restic 0.17.2 relevant to +restic users. The changes are ordered by importance. + +## Summary + + * Fix #4004: Support container-level SAS/SAT tokens for Azure backend + * Fix #5047: Resolve potential error during concurrent cache cleanup + * Fix #5050: Return error if `tag` fails to lock repository + * Fix #5057: Exclude irregular files from backups + * Fix #5063: Correctly `backup` extended metadata when using VSS on Windows + +## Details + + * Bugfix #4004: Support container-level SAS/SAT tokens for Azure backend + + Restic previously expected SAS/SAT tokens to be generated at the account level, + which prevented tokens created at the container level from being used to + initialize a repository. This caused an error when attempting to initialize a + repository with container-level tokens. + + Restic now supports both account-level and container-level SAS/SAT tokens for + initializing a repository. + + https://github.com/restic/restic/issues/4004 + https://github.com/restic/restic/pull/5093 + + * Bugfix #5047: Resolve potential error during concurrent cache cleanup + + When multiple restic processes ran concurrently, they could compete to remove + obsolete snapshots from the local backend cache, sometimes leading to a "no such + file or directory" error. Restic now suppresses this error to prevent issues + during cache cleanup. + + https://github.com/restic/restic/pull/5047 + + * Bugfix #5050: Return error if `tag` fails to lock repository + + Since restic 0.17.0, the `tag` command did not return an error when it failed to + open or lock the repository. This issue has now been fixed. + + https://github.com/restic/restic/issues/5050 + https://github.com/restic/restic/pull/5056 + + * Bugfix #5057: Exclude irregular files from backups + + Since restic 0.17.1, files with the type `irregular` could mistakenly be + included in snapshots, especially when backing up special file types on Windows + that restic cannot process. This issue has now been fixed. + + Previously, this bug caused the `check` command to report errors like the + following one: + + ``` + tree 12345678[...]: node "example.zip" with invalid type "irregular" + ``` + + To repair affected snapshots, upgrade to restic 0.17.2 and run: + + ``` + restic repair snapshots --forget + ``` + + This will remove the `irregular` files from the snapshots (creating a new + snapshot ID for each of the affected snapshots). + + https://github.com/restic/restic/pull/5057 + https://forum.restic.net/t/errors-found-by-check-1-invalid-type-irregular-2-ciphertext-verification-failed/8447/2 + + * Bugfix #5063: Correctly `backup` extended metadata when using VSS on Windows + + On Windows, when creating a backup with the `--use-fs-snapshot` option, restic + read extended metadata from the original filesystem path instead of from the + snapshot. This could result in errors if files were removed during the backup + process. + + This issue has now been resolved. + + https://github.com/restic/restic/issues/5063 + https://github.com/restic/restic/pull/5097 + https://github.com/restic/restic/pull/5099 + + +# Changelog for restic 0.17.1 (2024-09-05) +The following sections list the changes in restic 0.17.1 relevant to +restic users. The changes are ordered by importance. + +## Summary + + * Fix #2004: Correctly handle volume names in `backup` command on Windows + * Fix #4945: Include missing backup error text with `--json` + * Fix #4953: Correctly handle long paths on older Windows versions + * Fix #4957: Fix delayed cancellation of certain commands + * Fix #4958: Don't ignore metadata-setting errors during restore + * Fix #4969: Correctly restore timestamp for files with resource forks on macOS + * Fix #4975: Prevent `backup --stdin-from-command` from panicking + * Fix #4980: Skip extended attribute processing on unsupported Windows volumes + * Fix #5004: Fix spurious "A Required Privilege Is Not Held by the Client" error + * Fix #5005: Fix rare failures to retry locking a repository + * Fix #5018: Improve HTTP/2 support for REST backend + * Chg #4953: Also back up files with incomplete metadata + * Enh #4795: Display progress bar for `restore --verify` + * Enh #4934: Automatically clear removed snapshots from cache + * Enh #4944: Print JSON-formatted errors during `restore --json` + * Enh #4959: Return exit code 12 for "bad password" errors + * Enh #4970: Make timeout for stuck requests customizable + +## Details + + * Bugfix #2004: Correctly handle volume names in `backup` command on Windows + + On Windows, when the specified backup target only included the volume name + without a trailing slash, for example, `C:`, then restoring the resulting + snapshot would result in an error. Note that using `C:\` as backup target worked + correctly. + + Specifying volume names is now handled correctly. To restore snapshots created + before this bugfix, use the : syntax. For example, to restore + a snapshot with ID `12345678` that backed up `C:`, use the following command: + + ``` + restic restore 12345678:/C/C:./ --target output/folder + ``` + + https://github.com/restic/restic/issues/2004 + https://github.com/restic/restic/pull/5028 + + * Bugfix #4945: Include missing backup error text with `--json` + + Previously, when running a backup with the `--json` option, restic failed to + include the actual error message in the output, resulting in `"error": {}` being + displayed. + + This has now been fixed, and restic now includes the error text in JSON output. + + https://github.com/restic/restic/issues/4945 + https://github.com/restic/restic/pull/4946 + + * Bugfix #4953: Correctly handle long paths on older Windows versions + + On older Windows versions, like Windows Server 2012, restic 0.17.0 failed to + back up files with long paths. This problem has now been resolved. + + https://github.com/restic/restic/issues/4953 + https://github.com/restic/restic/pull/4954 + + * Bugfix #4957: Fix delayed cancellation of certain commands + + Since restic 0.17.0, some commands did not immediately respond to cancellation + via Ctrl-C (SIGINT) and continued running for a short period. The most affected + commands were `diff`,`find`, `ls`, `stats` and `rewrite`. This is now resolved. + + https://github.com/restic/restic/issues/4957 + https://github.com/restic/restic/pull/4960 + + * Bugfix #4958: Don't ignore metadata-setting errors during restore + + Previously, restic used to ignore errors when setting timestamps, attributes, or + file modes during a restore. It now reports those errors, except for permission + related errors when running without root privileges. + + https://github.com/restic/restic/pull/4958 + + * Bugfix #4969: Correctly restore timestamp for files with resource forks on macOS + + On macOS, timestamps were not restored for files with resource forks. This has + now been fixed. + + https://github.com/restic/restic/issues/4969 + https://github.com/restic/restic/pull/5006 + + * Bugfix #4975: Prevent `backup --stdin-from-command` from panicking + + Restic would previously crash if `--stdin-from-command` was specified without + providing a command. This issue has now been fixed. + + https://github.com/restic/restic/issues/4975 + https://github.com/restic/restic/pull/4976 + + * Bugfix #4980: Skip extended attribute processing on unsupported Windows volumes + + With restic 0.17.0, backups of certain Windows paths, such as network drives, + failed due to errors while fetching extended attributes. + + Restic now skips extended attribute processing for volumes where they are not + supported. + + https://github.com/restic/restic/issues/4955 + https://github.com/restic/restic/issues/4950 + https://github.com/restic/restic/pull/4980 + https://github.com/restic/restic/pull/4998 + + * Bugfix #5004: Fix spurious "A Required Privilege Is Not Held by the Client" error + + On Windows, creating a backup could sometimes trigger the following error: + + ``` + error: nodeFromFileInfo [...]: get named security info failed with: a required privilege is not held by the client. + ``` + + This has now been fixed. + + https://github.com/restic/restic/issues/5004 + https://github.com/restic/restic/pull/5019 + + * Bugfix #5005: Fix rare failures to retry locking a repository + + Restic 0.17.0 could in rare cases fail to retry locking a repository if one of + the lock files failed to load, resulting in the error: + + ``` + unable to create lock in backend: circuit breaker open for file + ``` + + This issue has now been addressed. The error handling now properly retries the + locking operation. In addition, restic waits a few seconds between locking + retries to increase chances of successful locking. + + https://github.com/restic/restic/issues/5005 + https://github.com/restic/restic/pull/5011 + https://github.com/restic/restic/pull/5012 + + * Bugfix #5018: Improve HTTP/2 support for REST backend + + If `rest-server` tried to gracefully shut down an HTTP/2 connection still in use + by the client, it could result in the following error: + + ``` + http2: Transport: cannot retry err [http2: Transport received Server's graceful shutdown GOAWAY] after Request.Body was written; define Request.GetBody to avoid this error + ``` + + This issue has now been resolved. + + https://github.com/restic/restic/pull/5018 + https://forum.restic.net/t/receiving-http2-goaway-messages-with-windows-restic-v0-17-0/8367 + + * Change #4953: Also back up files with incomplete metadata + + If restic failed to read extended metadata for a file or folder during a backup, + then the file or folder was not included in the resulting snapshot. Instead, a + warning message was printed along with returning exit code 3 once the backup was + finished. + + Now, restic also includes items for which the extended metadata could not be + read in a snapshot. The warning message has been updated to: + + ``` + incomplete metadata for /path/to/file:
+ ``` + + https://github.com/restic/restic/issues/4953 + https://github.com/restic/restic/pull/4977 + + * Enhancement #4795: Display progress bar for `restore --verify` + + When the `restore` command is run with `--verify`, it now displays a progress + bar while the verification step is running. The progress bar is not shown when + the `--json` flag is specified. + + https://github.com/restic/restic/issues/4795 + https://github.com/restic/restic/pull/4989 + + * Enhancement #4934: Automatically clear removed snapshots from cache + + Previously, restic only removed snapshots from the cache on the host where the + `forget` command was executed. On other hosts that use the same repository, the + old snapshots remained in the cache. + + Restic now automatically clears old snapshots from the local cache of the + current host. + + https://github.com/restic/restic/issues/4934 + https://github.com/restic/restic/pull/4981 + + * Enhancement #4944: Print JSON-formatted errors during `restore --json` + + Restic used to print any `restore` errors directly to the console as freeform + text messages, even when using the `--json` option. + + Now, when `--json` is specified, restic prints them as JSON formatted messages. + + https://github.com/restic/restic/issues/4944 + https://github.com/restic/restic/pull/4946 + + * Enhancement #4959: Return exit code 12 for "bad password" errors + + Restic now returns exit code 12 when it cannot open the repository due to an + incorrect password. + + https://github.com/restic/restic/pull/4959 + + * Enhancement #4970: Make timeout for stuck requests customizable + + Restic monitors connections to the backend to detect stuck requests. If a + request does not return any data within five minutes, restic assumes the request + is stuck and retries it. However, for large repositories this timeout might be + insufficient to collect a list of all files, causing the following error: + + `List(data) returned error, retrying after 1s: [...]: request timeout` + + It is now possible to increase the timeout using the `--stuck-request-timeout` + option. + + https://github.com/restic/restic/issues/4970 + https://github.com/restic/restic/pull/5014 + + +# Changelog for restic 0.17.0 (2024-07-26) +The following sections list the changes in restic 0.17.0 relevant to +restic users. The changes are ordered by importance. + +## Summary + + * Fix #3600: Handle unreadable xattrs in folders above `backup` source + * Fix #4209: Fix slow SFTP upload performance + * Fix #4503: Correct hardlink handling in `stats` command + * Fix #4568: Prevent `forget --keep-tags ` from deleting all snapshots + * Fix #4615: Make `find` not sometimes ignore directories + * Fix #4656: Properly report ID of newly added keys + * Fix #4703: Shutdown cleanly when receiving SIGTERM + * Fix #4709: Correct `--no-lock` handling of `ls` and `tag` commands + * Fix #4760: Fix possible error on concurrent cache cleanup + * Fix #4850: Handle UTF-16 password files in `key` command correctly + * Fix #4902: Update snapshot summary on `rewrite` + * Chg #956: Return exit code 10 and 11 for non-existing and locked repository + * Chg #4540: Require at least ARMv6 for ARM binaries + * Chg #4602: Deprecate legacy index format and `s3legacy` repository layout + * Chg #4627: Redesign backend error handling to improve reliability + * Chg #4707: Disable S3 anonymous authentication by default + * Chg #4744: Include full key ID in JSON output of `key list` + * Enh #662: Optionally skip snapshot creation if nothing changed + * Enh #693: Include snapshot size in `snapshots` output + * Enh #805: Add bitrot detection to `diff` command + * Enh #828: Improve features of the `repair packs` command + * Enh #1786: Support repositories with empty password + * Enh #2348: Add `--delete` option to `restore` command + * Enh #3067: Add extended options to configure Windows Shadow Copy Service + * Enh #3406: Improve `dump` performance for large files + * Enh #3806: Optimize and make `prune` command resumable + * Enh #4006: (alpha) Store deviceID only for hardlinks + * Enh #4048: Add support for FUSE-T with `mount` on macOS + * Enh #4251: Support reading backup from a command's standard output + * Enh #4287: Support connection to rest-server using unix socket + * Enh #4354: Significantly reduce `prune` memory usage + * Enh #4437: Make `check` command create non-existent cache directory + * Enh #4472: Support AWS Assume Role for S3 backend + * Enh #4547: Add `--json` option to `version` command + * Enh #4549: Add `--ncdu` option to `ls` command + * Enh #4573: Support rewriting host and time metadata in snapshots + * Enh #4583: Ignore `s3.storage-class` archive tiers for metadata + * Enh #4590: Speed up `mount` command's error detection + * Enh #4601: Add support for feature flags + * Enh #4611: Back up more file metadata on Windows + * Enh #4664: Make `ls` use `message_type` field in JSON output + * Enh #4676: Make `key` command's actions separate sub-commands + * Enh #4678: Add `--target` option to the `dump` command + * Enh #4708: Back up and restore SecurityDescriptors on Windows + * Enh #4733: Allow specifying `--host` via environment variable + * Enh #4737: Include snapshot ID in `reason` field of `forget` JSON output + * Enh #4764: Support forgetting all snapshots + * Enh #4768: Allow specifying custom User-Agent for outgoing requests + * Enh #4781: Add `restore` options to read include/exclude patterns from files + * Enh #4807: Support Extended Attributes on Windows NTFS + * Enh #4817: Make overwrite behavior of `restore` customizable + * Enh #4839: Add dry-run support to `restore` command + +## Details + + * Bugfix #3600: Handle unreadable xattrs in folders above `backup` source + + When backup sources are specified using absolute paths, `backup` also includes + information about the parent folders of the backup sources in the snapshot. + + If the extended attributes for some of these folders could not be read due to + missing permissions, this caused the backup to fail. This has now been fixed. + + https://github.com/restic/restic/issues/3600 + https://github.com/restic/restic/pull/4668 + https://forum.restic.net/t/parent-directories-above-the-snapshot-source-path-fatal-error-permission-denied/7216 + + * Bugfix #4209: Fix slow SFTP upload performance + + Since restic 0.12.1, the upload speed of the sftp backend to a remote server has + regressed significantly. This has now been fixed. + + https://github.com/restic/restic/issues/4209 + https://github.com/restic/restic/pull/4782 + + * Bugfix #4503: Correct hardlink handling in `stats` command + + If files on different devices had the same inode ID, the `stats` command did not + correctly calculate the snapshot size. This has now been fixed. + + https://github.com/restic/restic/pull/4503 + https://github.com/restic/restic/pull/4006 + https://forum.restic.net/t/possible-bug-in-stats/6461/8 + + * Bugfix #4568: Prevent `forget --keep-tags ` from deleting all snapshots + + Running `forget --keep-tags `, where `` is a tag that does not + exist in the repository, would remove all snapshots. This is especially + problematic if the tag name contains a typo. + + The `forget` command now fails with an error if all snapshots in a snapshot + group would be deleted. This prevents the above example from deleting all + snapshots. + + It is possible to temporarily disable the new check by setting the environment + variable `RESTIC_FEATURES=safe-forget-keep-tags=false`. Note that this feature + flag will be removed in the next minor restic version. + + https://github.com/restic/restic/pull/4568 + https://github.com/restic/restic/pull/4764 + + * Bugfix #4615: Make `find` not sometimes ignore directories + + In some cases, the `find` command ignored empty or moved directories. This has + now been fixed. + + https://github.com/restic/restic/pull/4615 + + * Bugfix #4656: Properly report ID of newly added keys + + `restic key add` now reports the ID of the newly added key. This simplifies + selecting a specific key using the `--key-hint key` option. + + https://github.com/restic/restic/issues/4656 + https://github.com/restic/restic/pull/4657 + + * Bugfix #4703: Shutdown cleanly when receiving SIGTERM + + Previously, when restic received the SIGTERM signal it would terminate + immediately, skipping cleanup and potentially causing issues like stale locks + being left behind. This primarily effected containerized restic invocations that + use SIGTERM, but could also be triggered via a simple `killall restic`. + + This has now been fixed, such that restic shuts down cleanly when receiving the + SIGTERM signal. + + https://github.com/restic/restic/pull/4703 + + * Bugfix #4709: Correct `--no-lock` handling of `ls` and `tag` commands + + The `ls` command never locked the repository. This has now been fixed, with the + old behavior still being supported using `ls --no-lock`. The latter invocation + also works with older restic versions. + + The `tag` command erroneously accepted the `--no-lock` command. This command now + always requires an exclusive lock. + + https://github.com/restic/restic/pull/4709 + + * Bugfix #4760: Fix possible error on concurrent cache cleanup + + If multiple restic processes concurrently cleaned up no longer existing files + from the cache, this could cause some of the processes to fail with an `no such + file or directory` error. This has now been fixed. + + https://github.com/restic/restic/issues/4760 + https://github.com/restic/restic/pull/4761 + + * Bugfix #4850: Handle UTF-16 password files in `key` command correctly + + Previously, `key add` and `key passwd` did not properly decode UTF-16 encoded + passwords read from a password file. This has now been fixed to correctly match + the encoding when opening a repository. + + https://github.com/restic/restic/issues/4850 + https://github.com/restic/restic/pull/4851 + + * Bugfix #4902: Update snapshot summary on `rewrite` + + Restic previously did not recalculate the total number of files and bytes + processed when files were excluded from a snapshot by the `rewrite` command. + This has now been fixed. + + https://github.com/restic/restic/issues/4902 + https://github.com/restic/restic/pull/4905 + + * Change #956: Return exit code 10 and 11 for non-existing and locked repository + + If a repository does not exist or cannot be locked, restic previously always + returned exit code 1. This made it difficult to distinguish these cases from + other errors. + + Restic now returns exit code 10 if the repository does not exist, and exit code + 11 if the repository could be not locked due to a conflicting lock. + + https://github.com/restic/restic/issues/956 + https://github.com/restic/restic/pull/4884 + + * Change #4540: Require at least ARMv6 for ARM binaries + + The official release binaries of restic now require at least ARMv6 support for + ARM platforms. + + https://github.com/restic/restic/issues/4540 + https://github.com/restic/restic/pull/4542 + + * Change #4602: Deprecate legacy index format and `s3legacy` repository layout + + Support for the legacy index format used by restic before version 0.2.0 has been + deprecated and will be removed in the next minor restic version. You can use + `restic repair index` to update the index to the current format. + + It is possible to temporarily reenable support for the legacy index format by + setting the environment variable `RESTIC_FEATURES=deprecate-legacy-index=false`. + Note that this feature flag will be removed in the next minor restic version. + + Support for the `s3legacy` repository layout used for the S3 backend before + restic 0.7.0 has been deprecated and will be removed in the next minor restic + version. You can migrate your S3 repository to the current layout using + `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout`. + + It is possible to temporarily reenable support for the `s3legacy` layout by + setting the environment variable + `RESTIC_FEATURES=deprecate-s3-legacy-layout=false`. Note that this feature flag + will be removed in the next minor restic version. + + https://github.com/restic/restic/issues/4602 + https://github.com/restic/restic/pull/4724 + https://github.com/restic/restic/pull/4743 + + * Change #4627: Redesign backend error handling to improve reliability + + Restic now downloads pack files in large chunks instead of using a streaming + download. This prevents failures due to interrupted streams. The `restore` + command now also retries downloading individual blobs that could not be + retrieved. + + HTTP requests that are stuck for more than two minutes while uploading or + downloading are now forcibly interrupted. This ensures that stuck requests are + retried after a short timeout. + + Attempts to access a missing or truncated file will no longer be retried. This + avoids unnecessary retries in those cases. All other backend requests are + retried for up to 15 minutes. This ensures that temporarily interrupted network + connections can be tolerated. + + If a download yields a corrupt file or blob, then the download will be retried + once. + + Most parts of the new backend error handling can temporarily be disabled by + setting the environment variable `RESTIC_FEATURES=backend-error-redesign=false`. + Note that this feature flag will be removed in the next minor restic version. + + https://github.com/restic/restic/issues/4627 + https://github.com/restic/restic/issues/4193 + https://github.com/restic/restic/issues/4515 + https://github.com/restic/restic/issues/1523 + https://github.com/restic/restic/pull/4605 + https://github.com/restic/restic/pull/4792 + https://github.com/restic/restic/pull/4520 + https://github.com/restic/restic/pull/4800 + https://github.com/restic/restic/pull/4784 + https://github.com/restic/restic/pull/4844 + + * Change #4707: Disable S3 anonymous authentication by default + + When using the S3 backend with anonymous authentication, it continuously tried + to retrieve new authentication credentials, causing bad performance. + + Now, to use anonymous authentication, it is necessary to pass the extended + option `-o s3.unsafe-anonymous-auth=true` to restic. + + It is possible to temporarily revert to the old behavior by setting the + environment variable `RESTIC_FEATURES=explicit-s3-anonymous-auth=false`. Note + that this feature flag will be removed in the next minor restic version. + + https://github.com/restic/restic/issues/4707 + https://github.com/restic/restic/pull/4908 + + * Change #4744: Include full key ID in JSON output of `key list` + + The JSON output of the `key list` command has changed to include the full key ID + instead of just a shortened version of the ID, as the latter can be ambiguous in + some rare cases. To derive the short ID, please truncate the full ID down to + eight characters. + + https://github.com/restic/restic/issues/4744 + https://github.com/restic/restic/pull/4745 + + * Enhancement #662: Optionally skip snapshot creation if nothing changed + + The `backup` command always created a snapshot even if nothing in the backup set + changed compared to the parent snapshot. + + Restic now supports the `--skip-if-unchanged` option for the `backup` command, + which omits creating a snapshot if the new snapshot's content would be identical + to that of the parent snapshot. + + https://github.com/restic/restic/issues/662 + https://github.com/restic/restic/pull/4816 + + * Enhancement #693: Include snapshot size in `snapshots` output + + The `snapshots` command now prints the size for snapshots created using this or + a future restic version. To achieve this, the `backup` command now stores the + backup summary statistics in the snapshot. + + The text output of the `snapshots` command only shows the snapshot size. The + other statistics are only included in the JSON output. To inspect these + statistics use `restic snapshots --json` or `restic cat snapshot `. + + https://github.com/restic/restic/issues/693 + https://github.com/restic/restic/pull/4705 + https://github.com/restic/restic/pull/4913 + + * Enhancement #805: Add bitrot detection to `diff` command + + The output of the `diff` command now includes the modifier `?` for files to + indicate bitrot in backed up files. The `?` will appear whenever there is a + difference in content while the metadata is exactly the same. + + Since files with unchanged metadata are normally not read again when creating a + backup, the detection is only effective when the right-hand side of the diff has + been created with `backup --force`. + + https://github.com/restic/restic/issues/805 + https://github.com/restic/restic/pull/4526 + + * Enhancement #828: Improve features of the `repair packs` command + + The `repair packs` command has been improved to also be able to process + truncated pack files. The `check` and `check --read-data` command will provide + instructions on using the command if necessary to repair a repository. See the + guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html for + further instructions. + + https://github.com/restic/restic/issues/828 + https://github.com/restic/restic/pull/4644 + https://github.com/restic/restic/pull/4882 + + * Enhancement #1786: Support repositories with empty password + + Restic previously required a password to create or operate on repositories. + Using the new option `--insecure-no-password` it is now possible to disable this + requirement. Restic will not prompt for a password when using this option. + + For security reasons, the option must always be specified when operating on + repositories with an empty password, and specifying `--insecure-no-password` + while also passing a password to restic via a CLI option or environment variable + results in an error. + + The `init` and `copy` commands add the related `--from-insecure-no-password` + option, which applies to the source repository. The `key add` and `key passwd` + commands add the `--new-insecure-no-password` option to add or set an empty + password. + + https://github.com/restic/restic/issues/1786 + https://github.com/restic/restic/issues/4326 + https://github.com/restic/restic/pull/4698 + https://github.com/restic/restic/pull/4808 + + * Enhancement #2348: Add `--delete` option to `restore` command + + The `restore` command now supports a `--delete` option that allows removing + files and directories from the target directory that do not exist in the + snapshot. This option also allows files in the snapshot to replace non-empty + directories having the same name. + + To check that only expected files are deleted, add the `--dry-run --verbose=2` + options. + + https://github.com/restic/restic/issues/2348 + https://github.com/restic/restic/pull/4881 + + * Enhancement #3067: Add extended options to configure Windows Shadow Copy Service + + Previous, restic always used a 120 seconds timeout and unconditionally created + VSS snapshots for all volume mount points on disk. This behavior can now be + fine-tuned by the following new extended options (available only on Windows): + + - `-o vss.timeout`: Time that VSS can spend creating snapshot before timing out + (default: 120s) - `-o vss.exclude-all-mount-points`: Exclude mountpoints from + snapshotting on all volumes (default: false) - `-o vss.exclude-volumes`: + Semicolon separated list of volumes to exclude from snapshotting - `-o + vss.provider`: VSS provider identifier which will be used for snapshotting + + For example, change VSS timeout to five minutes and disable snapshotting of + mount points on all volumes: + + Restic backup --use-fs-snapshot -o vss.timeout=5m -o + vss.exclude-all-mount-points=true + + Exclude drive `d:`, mount point `c:\mnt` and a specific volume from + snapshotting: + + Restic backup --use-fs-snapshot -o + vss.exclude-volumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" + + Uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default + provider: + + Restic backup --use-fs-snapshot -o + vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5} + + https://github.com/restic/restic/pull/3067 + + * Enhancement #3406: Improve `dump` performance for large files + + The `dump` command now retrieves the data chunks for a file in parallel. This + improves the download performance by up to as many times as the configured + number of parallel backend connections. + + https://github.com/restic/restic/issues/3406 + https://github.com/restic/restic/pull/4796 + + * Enhancement #3806: Optimize and make `prune` command resumable + + Previously, if the `prune` command was interrupted, a later `prune` run would + start repacking pack files from the start, as `prune` did not update the index + while repacking. + + The `prune` command now supports resuming interrupted prune runs. The update of + the repository index has also been optimized to use less memory and only rewrite + parts of the index that have changed. + + https://github.com/restic/restic/issues/3806 + https://github.com/restic/restic/pull/4812 + + * Enhancement #4006: (alpha) Store deviceID only for hardlinks + + Set `RESTIC_FEATURES=device-id-for-hardlinks` to enable this alpha feature. The + feature flag will be removed after repository format version 3 becomes available + or be replaced with a different solution. + + When creating backups from a filesystem snapshot, for example created using + BTRFS subvolumes, the deviceID of the filesystem changes compared to previous + snapshots. This prevented restic from deduplicating the directory metadata of a + snapshot. + + When this alpha feature is enabled, the deviceID is only stored for hardlinks, + which significantly reduces the metadata duplication for most backups. + + https://github.com/restic/restic/pull/4006 + + * Enhancement #4048: Add support for FUSE-T with `mount` on macOS + + The restic `mount` command now supports creating FUSE mounts using FUSE-T on + macOS. + + https://github.com/restic/restic/issues/4048 + https://github.com/restic/restic/pull/4825 + + * Enhancement #4251: Support reading backup from a command's standard output + + The `backup` command now supports the `--stdin-from-command` option. When using + this option, the arguments to `backup` are interpreted as a command instead of + paths to back up. `backup` then executes the given command and stores the + standard output from it in the backup, similar to the what the `--stdin` option + does. This also enables restic to verify that the command completes with exit + code zero. A non-zero exit code causes the backup to fail. + + Note that the `--stdin` option does not have to be specified at the same time, + and that the `--stdin-filename` option also applies to `--stdin-from-command`. + + Example: `restic backup --stdin-from-command --stdin-filename dump.sql mysqldump + [...]` + + https://github.com/restic/restic/issues/4251 + https://github.com/restic/restic/pull/4410 + + * Enhancement #4287: Support connection to rest-server using unix socket + + Restic now supports using a unix socket to connect to a rest-server version + 0.13.0 or later. This allows running restic as follows: + + ``` + rest-server --listen unix:/tmp/rest.socket --data /path/to/data & + restic -r rest:http+unix:///tmp/rest.socket:/my_backup_repo/ [...] + ``` + + https://github.com/restic/restic/issues/4287 + https://github.com/restic/restic/pull/4655 + + * Enhancement #4354: Significantly reduce `prune` memory usage + + The `prune` command has been optimized to use up to 60% less memory. The memory + usage should now be roughly similar to creating a backup. + + https://github.com/restic/restic/pull/4354 + https://github.com/restic/restic/pull/4812 + + * Enhancement #4437: Make `check` command create non-existent cache directory + + Previously, if a custom cache directory was specified for the `check` command, + but the directory did not exist, `check` continued with the cache disabled. + + The `check` command now attempts to create the cache directory before + initializing the cache. + + https://github.com/restic/restic/issues/4437 + https://github.com/restic/restic/pull/4805 + https://github.com/restic/restic/pull/4883 + + * Enhancement #4472: Support AWS Assume Role for S3 backend + + Previously only credentials discovered via the Minio discovery methods were used + to authenticate. + + However, there are many circumstances where the discovered credentials have + lower permissions and need to assume a specific role. This is now possible using + the following new environment variables: + + - RESTIC_AWS_ASSUME_ROLE_ARN - RESTIC_AWS_ASSUME_ROLE_SESSION_NAME - + RESTIC_AWS_ASSUME_ROLE_EXTERNAL_ID - RESTIC_AWS_ASSUME_ROLE_REGION (defaults to + us-east-1) - RESTIC_AWS_ASSUME_ROLE_POLICY - RESTIC_AWS_ASSUME_ROLE_STS_ENDPOINT + + https://github.com/restic/restic/issues/4472 + https://github.com/restic/restic/pull/4474 + + * Enhancement #4547: Add `--json` option to `version` command + + Restic now supports outputting restic version along with the Go version, OS and + architecture used to build restic in JSON format using `version --json`. + + https://github.com/restic/restic/issues/4547 + https://github.com/restic/restic/pull/4553 + + * Enhancement #4549: Add `--ncdu` option to `ls` command + + NCDU (NCurses Disk Usage) is a tool to analyse disk usage of directories. It has + an option to save a directory tree and analyse it later. + + The `ls` command now supports outputting snapshot information in the NCDU format + using the `--ncdu` option. Example usage: `restic ls latest --ncdu | ncdu -f -` + + https://github.com/restic/restic/issues/4549 + https://github.com/restic/restic/pull/4550 + https://github.com/restic/restic/pull/4911 + + * Enhancement #4573: Support rewriting host and time metadata in snapshots + + The `rewrite` command now supports rewriting the host and/or time metadata of a + snapshot using the new `--new-host` and `--new-time` options. + + https://github.com/restic/restic/pull/4573 + + * Enhancement #4583: Ignore `s3.storage-class` archive tiers for metadata + + Restic used to store all files on S3 using the specified `s3.storage-class`. + + Now, restic will only use non-archive storage tiers for metadata, to avoid + problems when accessing a repository. To restore any data, it is still necessary + to manually warm up the required data beforehand. + + NOTE: There is no official cold storage support in restic, use this option at + your own risk. + + https://github.com/restic/restic/issues/4583 + https://github.com/restic/restic/pull/4584 + + * Enhancement #4590: Speed up `mount` command's error detection + + The `mount` command now checks for the existence of the mountpoint before + opening the repository, leading to quicker error detection. + + https://github.com/restic/restic/pull/4590 + + * Enhancement #4601: Add support for feature flags + + Restic now supports feature flags that can be used to enable and disable + experimental features. The flags can be set using the environment variable + `RESTIC_FEATURES`. To get a list of currently supported feature flags, use the + `features` command. + + https://github.com/restic/restic/issues/4601 + https://github.com/restic/restic/pull/4666 + + * Enhancement #4611: Back up more file metadata on Windows + + Previously, restic did not back up all common Windows-specific metadata. + + Restic now stores file creation time and file attributes like the hidden, + read-only and encrypted flags when backing up files and folders on Windows. + + https://github.com/restic/restic/pull/4611 + + * Enhancement #4664: Make `ls` use `message_type` field in JSON output + + The `ls` command was the only restic command that used the `struct_type` field + in its JSON output format to specify the message type. + + The JSON output of the `ls` command now also includes the `message_type` field, + which is consistent with other commands. The `struct_type` field is still + included, but now deprecated. + + https://github.com/restic/restic/pull/4664 + + * Enhancement #4676: Make `key` command's actions separate sub-commands + + Each of the `add`, `list`, `remove` and `passwd` actions provided by the `key` + command is now a separate sub-command and have its own documentation which can + be invoked using `restic key --help`. + + https://github.com/restic/restic/issues/4676 + https://github.com/restic/restic/pull/4685 + + * Enhancement #4678: Add `--target` option to the `dump` command + + Restic `dump` always printed to the standard output. It now supports specifying + a `--target` file to write its output to. + + https://github.com/restic/restic/issues/4678 + https://github.com/restic/restic/pull/4682 + https://github.com/restic/restic/pull/4692 + + * Enhancement #4708: Back up and restore SecurityDescriptors on Windows + + Restic now backs up and restores SecurityDescriptors for files and folders on + Windows which includes owner, group, discretionary access control list (DACL) + and system access control list (SACL). + + This requires the user to be a member of backup operators or the application + must be run as admin. If that is not the case, only the current user's owner, + group and DACL will be backed up, and during restore only the DACL of the backed + up file will be restored, with the current user's owner and group being set on + the restored file. + + https://github.com/restic/restic/pull/4708 + + * Enhancement #4733: Allow specifying `--host` via environment variable + + Restic commands that operate on snapshots, such as `restic backup` and `restic + snapshots`, support the `--host` option to specify the hostname for grouping + snapshots. + + Such commands now also support specifying the hostname via the environment + variable `RESTIC_HOST`. Note that `--host` still takes precedence over the + environment variable. + + https://github.com/restic/restic/issues/4733 + https://github.com/restic/restic/pull/4734 + + * Enhancement #4737: Include snapshot ID in `reason` field of `forget` JSON output + + The JSON output of the `forget` command now includes `id` and `short_id` of + snapshots in the `reason` field. + + https://github.com/restic/restic/pull/4737 + + * Enhancement #4764: Support forgetting all snapshots + + The `forget` command now supports the `--unsafe-allow-remove-all` option, which + removes all snapshots in the repository. + + This option must always be combined with a snapshot filter (by host, path or + tag). For example, the command `forget --tag example --unsafe-allow-remove-all` + removes all snapshots with the tag "example". + + https://github.com/restic/restic/pull/4764 + + * Enhancement #4768: Allow specifying custom User-Agent for outgoing requests + + Restic now supports setting a custom `User-Agent` for outgoing HTTP requests + using the global option `--http-user-agent` or the `RESTIC_HTTP_USER_AGENT` + environment variable. + + https://github.com/restic/restic/issues/4768 + https://github.com/restic/restic/pull/4810 + + * Enhancement #4781: Add `restore` options to read include/exclude patterns from files + + Restic now supports reading include and exclude patterns from files using the + `--include-file`, `--exclude-file`, `--iinclude-file` and `--iexclude-file` + options of the `restore` command. + + https://github.com/restic/restic/issues/4781 + https://github.com/restic/restic/pull/4811 + + * Enhancement #4807: Support Extended Attributes on Windows NTFS + + Restic now backs up and restores Extended Attributes for files and folders on + Windows NTFS. + + https://github.com/restic/restic/pull/4807 + + * Enhancement #4817: Make overwrite behavior of `restore` customizable + + The `restore` command now supports an `--overwrite` option to configure whether + already existing files are overwritten. The overwrite behavior can be configured + using the following option values: + + - `--overwrite always` (default): Always overwrites already existing files. The + `restore` command will verify the existing file content and only restore + mismatching parts to minimize downloads. Updates the metadata of all files. - + `--overwrite if-changed`: Like `always`, but speeds up the file content check by + assuming that files with matching size and modification time (mtime) are already + up to date. In case of a mismatch, the full file content is verified like with + `always`. Updates the metadata of all files. - `--overwrite if-newer`: Like + `always`, but only overwrites existing files when the file in the snapshot has a + newer modification time (mtime) than the existing file. - `--overwrite never`: + Never overwrites existing files. + + https://github.com/restic/restic/issues/4817 + https://github.com/restic/restic/issues/200 + https://github.com/restic/restic/issues/407 + https://github.com/restic/restic/issues/2662 + https://github.com/restic/restic/pull/4837 + https://github.com/restic/restic/pull/4838 + https://github.com/restic/restic/pull/4864 + https://github.com/restic/restic/pull/4921 + + * Enhancement #4839: Add dry-run support to `restore` command + + The `restore` command now supports the `--dry-run` option to perform a dry run. + Pass the `--verbose=2` option to see which files would remain unchanged, and + which would be updated or freshly restored. + + https://github.com/restic/restic/pull/4839 + + +# Changelog for restic 0.16.5 (2024-07-01) +The following sections list the changes in restic 0.16.5 relevant to +restic users. The changes are ordered by importance. + +## Summary + + * Enh #4799: Add option to force use of Azure CLI credential + * Enh #4873: Update dependencies + +## Details + + * Enhancement #4799: Add option to force use of Azure CLI credential + + A new environment variable `AZURE_FORCE_CLI_CREDENTIAL=true` allows forcing the + use of Azure CLI credential, ignoring other credentials like managed identity. + + https://github.com/restic/restic/pull/4799 + + * Enhancement #4873: Update dependencies + + A few potentially vulnerable dependencies were updated. + + https://github.com/restic/restic/issues/4873 + https://github.com/restic/restic/pull/4878 + + # Changelog for restic 0.16.4 (2024-02-04) The following sections list the changes in restic 0.16.4 relevant to restic users. The changes are ordered by importance. @@ -596,7 +1711,7 @@ restic users. The changes are ordered by importance. * Enhancement #3941: Support `--group-by` for backup parent selection Previously, the `backup` command by default selected the parent snapshot based - on the hostname and the backup targets. When the backup path list changed, the + on the hostname and the backup paths. When the backup path list changed, the `backup` command was unable to determine a suitable parent snapshot and had to read all files again. @@ -2490,7 +3605,7 @@ restic users. The changes are ordered by importance. * Fix #3151: Don't create invalid snapshots when `backup` is interrupted * Fix #3152: Do not hang until foregrounded when completed in background * Fix #3166: Improve error handling in the `restore` command - * Fix #3232: Correct statistics for overlapping targets + * Fix #3232: Correct statistics for overlapping backup sources * Fix #3249: Improve error handling in `gs` backend * Chg #3095: Deleting files on Google Drive now moves them to the trash * Enh #909: Back up mountpoints as empty directories @@ -2657,10 +3772,10 @@ restic users. The changes are ordered by importance. https://github.com/restic/restic/issues/3166 https://github.com/restic/restic/pull/3207 - * Bugfix #3232: Correct statistics for overlapping targets + * Bugfix #3232: Correct statistics for overlapping backup sources A user reported that restic's statistics and progress information during backup - was not correctly calculated when the backup targets (files/dirs to save) + was not correctly calculated when the backup sources (files/dirs to save) overlap. For example, consider a directory `foo` which contains (among others) a file `foo/bar`. When `restic backup foo foo/bar` was run, restic counted the size of the file `foo/bar` twice, so the completeness percentage as well as the @@ -2925,7 +4040,7 @@ restic users. The changes are ordered by importance. * Enhancement #3106: Parallelize scan of snapshot content in `copy` and `prune` The `copy` and `prune` commands used to traverse the directories of snapshots - one by one to find used data. This snapshot traversal is now parallized which + one by one to find used data. This snapshot traversal is now parallelized which can speed up this step several times. In addition the `check` command now reports how many snapshots have already been @@ -3024,11 +4139,10 @@ restic users. The changes are ordered by importance. * Bugfix #1756: Mark repository files as read-only when using the local backend - Files stored in a local repository were marked as writeable on the filesystem - for non-Windows systems, which did not prevent accidental file modifications - outside of restic. In addition, the local backend did not work with certain - filesystems and network mounts which do not permit modifications of file - permissions. + Files stored in a local repository were marked as writable on the filesystem for + non-Windows systems, which did not prevent accidental file modifications outside + of restic. In addition, the local backend did not work with certain filesystems + and network mounts which do not permit modifications of file permissions. Restic now marks files stored in a local repository as read-only on the filesystem on non-Windows systems. The error handling is improved to support @@ -3122,7 +4236,7 @@ restic users. The changes are ordered by importance. was unable to backup those files before. This update enables backing up these files. - This needs to be enabled explicitely using the --use-fs-snapshot option of the + This needs to be enabled explicitly using the --use-fs-snapshot option of the backup command. https://github.com/restic/restic/issues/340 @@ -3332,8 +4446,8 @@ restic users. The changes are ordered by importance. * Bugfix #2668: Don't abort the stats command when data blobs are missing - Runing the stats command in the blobs-per-file mode on a repository with missing - data blobs previously resulted in a crash. + Running the stats command in the blobs-per-file mode on a repository with + missing data blobs previously resulted in a crash. https://github.com/restic/restic/pull/2668 @@ -3488,7 +4602,7 @@ restic users. The changes are ordered by importance. NOTE: This new implementation does not guarantee order in which blobs are written to the target files and, for example, the last blob of a file can be - written to the file before any of the preceeding file blobs. It is therefore + written to the file before any of the preceding file blobs. It is therefore possible to have gaps in the data written to the target files if restore fails or interrupted by the user. @@ -3722,7 +4836,7 @@ restic users. The changes are ordered by importance. will be disabled if the --ignore-inode flag was given. If this change causes problems for you, please open an issue, and we can look in - to adding a seperate flag to disable just the ctime check. + to adding a separate flag to disable just the ctime check. https://github.com/restic/restic/issues/2179 https://github.com/restic/restic/pull/2212 @@ -4096,7 +5210,7 @@ restic users. The changes are ordered by importance. * Enhancement #1876: Display reason why forget keeps snapshots We've added a column to the list of snapshots `forget` keeps which details the - reasons to keep a particuliar snapshot. This makes debugging policies for forget + reasons to keep a particular snapshot. This makes debugging policies for forget much easier. Please remember to always try things out with `--dry-run`! https://github.com/restic/restic/pull/1876 @@ -4409,7 +5523,7 @@ restic users. The changes are ordered by importance. * Enh #1665: Improve cache handling for `restic check` * Enh #1709: Improve messages `restic check` prints * Enh #1721: Add `cache` command to list cache dirs - * Enh #1735: Allow keeping a time range of snaphots + * Enh #1735: Allow keeping a time range of snapshots * Enh #1758: Allow saving OneDrive folders in Windows * Enh #1782: Use default AWS credentials chain for S3 backend @@ -4615,7 +5729,7 @@ restic users. The changes are ordered by importance. https://github.com/restic/restic/issues/1721 https://github.com/restic/restic/pull/1749 - * Enhancement #1735: Allow keeping a time range of snaphots + * Enhancement #1735: Allow keeping a time range of snapshots We've added the `--keep-within` option to the `forget` command. It instructs restic to keep all snapshots within the given duration since the newest @@ -4717,7 +5831,7 @@ restic users. The changes are ordered by importance. already exists. This is not accurate, the file could have been created between the HTTP request - testing for it, and when writing starts, so we've relaxed this requeriment, + testing for it, and when writing starts, so we've relaxed this requirement, which saves one additional HTTP request per newly added file. https://github.com/restic/restic/pull/1623 @@ -4737,7 +5851,7 @@ restic users. The changes are ordered by importance. ## Summary - * Fix #1506: Limit bandwith at the http.RoundTripper for HTTP based backends + * Fix #1506: Limit bandwidth at the http.RoundTripper for HTTP based backends * Fix #1512: Restore directory permissions as the last step * Fix #1528: Correctly create missing subdirs in data/ * Fix #1589: Complete intermediate index upload @@ -4757,7 +5871,7 @@ restic users. The changes are ordered by importance. ## Details - * Bugfix #1506: Limit bandwith at the http.RoundTripper for HTTP based backends + * Bugfix #1506: Limit bandwidth at the http.RoundTripper for HTTP based backends https://github.com/restic/restic/issues/1506 https://github.com/restic/restic/pull/1511 @@ -4814,7 +5928,7 @@ restic users. The changes are ordered by importance. * Bugfix #1595: Backup: Remove bandwidth display This commit removes the bandwidth displayed during backup process. It is - misleading and seldomly correct, because it's neither the "read bandwidth" (only + misleading and seldom correct, because it's neither the "read bandwidth" (only for the very first backup) nor the "upload bandwidth". Many users are confused about (and rightly so), c.f. #1581, #1033, #1591 @@ -5087,7 +6201,7 @@ restic users. The changes are ordered by importance. We've added a local cache for metadata so that restic doesn't need to load all metadata (snapshots, indexes, ...) from the repo each time it starts. By default the cache is active, but there's a new global option `--no-cache` that can be - used to disable the cache. By deafult, the cache a standard cache folder for the + used to disable the cache. By default, the cache a standard cache folder for the OS, which can be overridden with `--cache-dir`. The cache will automatically populate, indexes and snapshots are saved as they are loaded. Cache directories for repos that haven't been used recently can automatically be removed by restic @@ -5174,7 +6288,7 @@ restic users. The changes are ordered by importance. * Enhancement #1319: Make `check` print `no errors found` explicitly - The `check` command now explicetly prints `No errors were found` when no errors + The `check` command now explicitly prints `No errors were found` when no errors could be found. https://github.com/restic/restic/issues/1303 diff --git a/README.md b/README.md index ad6b13cefa5..ef12f3e1b2a 100644 --- a/README.md +++ b/README.md @@ -10,8 +10,7 @@ For detailed usage and installation instructions check out the [documentation](h You can ask questions in our [Discourse forum](https://forum.restic.net). -Quick start ------------ +## Quick start Once you've [installed](https://restic.readthedocs.io/en/latest/020_installation.html) restic, start off with creating a repository for your backups: @@ -59,7 +58,7 @@ Therefore, restic supports the following backends for storing backups natively: Restic is a program that does backups right and was designed with the following principles in mind: -- **Easy:** Doing backups should be a frictionless process, otherwise +- **Easy**: Doing backups should be a frictionless process, otherwise you might be tempted to skip it. Restic should be easy to configure and use, so that, in the event of a data loss, you can just restore it. Likewise, restoring data should not be complicated. @@ -92,20 +91,17 @@ reproduce a byte identical version from the source code for that release. Instructions on how to do that are contained in the [builder repository](https://github.com/restic/builder). -News ----- +## News -You can follow the restic project on Mastodon [@resticbackup](https://fosstodon.org/@restic) or by subscribing to +You can follow the restic project on Mastodon [@resticbackup](https://fosstodon.org/@restic) or subscribe to the [project blog](https://restic.net/blog/). -License -------- +## License Restic is licensed under [BSD 2-Clause License](https://opensource.org/licenses/BSD-2-Clause). You can find the -complete text in [``LICENSE``](LICENSE). +complete text in [`LICENSE`](LICENSE). -Sponsorship ------------ +## Sponsorship Backend integration tests for Google Cloud Storage and Microsoft Azure Blob Storage are sponsored by [AppsCode](https://appscode.com)! diff --git a/VERSION b/VERSION index 5f2491c5adc..e2d1ad6ac90 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.16.4 +0.17.3-dev diff --git a/build.go b/build.go index b3b7f5eee13..5a4baf1c645 100644 --- a/build.go +++ b/build.go @@ -58,7 +58,7 @@ var config = Config{ Main: "./cmd/restic", // package name for the main package DefaultBuildTags: []string{"selfupdate"}, // specify build tags which are always used Tests: []string{"./..."}, // tests to run - MinVersion: GoVersion{Major: 1, Minor: 18, Patch: 0}, // minimum Go version supported + MinVersion: GoVersion{Major: 1, Minor: 21, Patch: 0}, // minimum Go version supported } // Config configures the build. diff --git a/changelog/0.10.0_2020-09-19/pull-2195 b/changelog/0.10.0_2020-09-19/pull-2195 index a139aa4e120..7898568fa8e 100644 --- a/changelog/0.10.0_2020-09-19/pull-2195 +++ b/changelog/0.10.0_2020-09-19/pull-2195 @@ -10,7 +10,7 @@ https://github.com/restic/restic/issues/2244 NOTE: This new implementation does not guarantee order in which blobs are written to the target files and, for example, the last blob of a -file can be written to the file before any of the preceeding file blobs. +file can be written to the file before any of the preceding file blobs. It is therefore possible to have gaps in the data written to the target files if restore fails or interrupted by the user. diff --git a/changelog/0.12.0_2021-02-14/issue-3232 b/changelog/0.12.0_2021-02-14/issue-3232 index 7d9f5c3b7ae..30b9ee29396 100644 --- a/changelog/0.12.0_2021-02-14/issue-3232 +++ b/changelog/0.12.0_2021-02-14/issue-3232 @@ -1,7 +1,7 @@ -Bugfix: Correct statistics for overlapping targets +Bugfix: Correct statistics for overlapping backup sources A user reported that restic's statistics and progress information during backup -was not correctly calculated when the backup targets (files/dirs to save) +was not correctly calculated when the backup sources (files/dirs to save) overlap. For example, consider a directory `foo` which contains (among others) a file `foo/bar`. When `restic backup foo foo/bar` was run, restic counted the size of the file `foo/bar` twice, so the completeness percentage as well as the diff --git a/changelog/0.16.0_2023-07-31/issue-3941 b/changelog/0.16.0_2023-07-31/issue-3941 index ff56d52ccc6..f1f02db9387 100644 --- a/changelog/0.16.0_2023-07-31/issue-3941 +++ b/changelog/0.16.0_2023-07-31/issue-3941 @@ -1,7 +1,7 @@ Enhancement: Support `--group-by` for backup parent selection Previously, the `backup` command by default selected the parent snapshot based -on the hostname and the backup targets. When the backup path list changed, the +on the hostname and the backup paths. When the backup path list changed, the `backup` command was unable to determine a suitable parent snapshot and had to read all files again. diff --git a/changelog/0.16.5_2024-07-01/issue-4873 b/changelog/0.16.5_2024-07-01/issue-4873 new file mode 100644 index 00000000000..22d25dff8dd --- /dev/null +++ b/changelog/0.16.5_2024-07-01/issue-4873 @@ -0,0 +1,6 @@ +Enhancement: Update dependencies + +A few potentially vulnerable dependencies were updated. + +https://github.com/restic/restic/issues/4873 +https://github.com/restic/restic/pull/4878 diff --git a/changelog/0.16.5_2024-07-01/pull-4799 b/changelog/0.16.5_2024-07-01/pull-4799 new file mode 100644 index 00000000000..0179bc51a91 --- /dev/null +++ b/changelog/0.16.5_2024-07-01/pull-4799 @@ -0,0 +1,5 @@ +Enhancement: Add option to force use of Azure CLI credential + +A new environment variable `AZURE_FORCE_CLI_CREDENTIAL=true` allows forcing the use of Azure CLI credential, ignoring other credentials like managed identity. + +https://github.com/restic/restic/pull/4799 diff --git a/changelog/0.17.0_2024-07-26/issue-1786 b/changelog/0.17.0_2024-07-26/issue-1786 new file mode 100644 index 00000000000..41517f5dbca --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-1786 @@ -0,0 +1,20 @@ +Enhancement: Support repositories with empty password + +Restic previously required a password to create or operate on repositories. +Using the new option `--insecure-no-password` it is now possible to disable +this requirement. Restic will not prompt for a password when using this option. + +For security reasons, the option must always be specified when operating on +repositories with an empty password, and specifying `--insecure-no-password` +while also passing a password to restic via a CLI option or environment +variable results in an error. + +The `init` and `copy` commands add the related `--from-insecure-no-password` +option, which applies to the source repository. The `key add` and `key passwd` +commands add the `--new-insecure-no-password` option to add or set an empty +password. + +https://github.com/restic/restic/issues/1786 +https://github.com/restic/restic/issues/4326 +https://github.com/restic/restic/pull/4698 +https://github.com/restic/restic/pull/4808 diff --git a/changelog/0.17.0_2024-07-26/issue-2348 b/changelog/0.17.0_2024-07-26/issue-2348 new file mode 100644 index 00000000000..c329ae0a293 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-2348 @@ -0,0 +1,12 @@ +Enhancement: Add `--delete` option to `restore` command + +The `restore` command now supports a `--delete` option that allows removing +files and directories from the target directory that do not exist in the +snapshot. This option also allows files in the snapshot to replace non-empty +directories having the same name. + +To check that only expected files are deleted, add the `--dry-run --verbose=2` +options. + +https://github.com/restic/restic/issues/2348 +https://github.com/restic/restic/pull/4881 diff --git a/changelog/0.17.0_2024-07-26/issue-3600 b/changelog/0.17.0_2024-07-26/issue-3600 new file mode 100644 index 00000000000..b972ecc647f --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-3600 @@ -0,0 +1,11 @@ +Bugfix: Handle unreadable xattrs in folders above `backup` source + +When backup sources are specified using absolute paths, `backup` also includes +information about the parent folders of the backup sources in the snapshot. + +If the extended attributes for some of these folders could not be read due to +missing permissions, this caused the backup to fail. This has now been fixed. + +https://github.com/restic/restic/issues/3600 +https://github.com/restic/restic/pull/4668 +https://forum.restic.net/t/parent-directories-above-the-snapshot-source-path-fatal-error-permission-denied/7216 diff --git a/changelog/0.17.0_2024-07-26/issue-3806 b/changelog/0.17.0_2024-07-26/issue-3806 new file mode 100644 index 00000000000..6b0663c9f95 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-3806 @@ -0,0 +1,12 @@ +Enhancement: Optimize and make `prune` command resumable + +Previously, if the `prune` command was interrupted, a later `prune` run would +start repacking pack files from the start, as `prune` did not update the index +while repacking. + +The `prune` command now supports resuming interrupted prune runs. The update +of the repository index has also been optimized to use less memory and only +rewrite parts of the index that have changed. + +https://github.com/restic/restic/issues/3806 +https://github.com/restic/restic/pull/4812 diff --git a/changelog/0.17.0_2024-07-26/issue-4048 b/changelog/0.17.0_2024-07-26/issue-4048 new file mode 100644 index 00000000000..3b9c61d2048 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4048 @@ -0,0 +1,6 @@ +Enhancement: Add support for FUSE-T with `mount` on macOS + +The restic `mount` command now supports creating FUSE mounts using FUSE-T on macOS. + +https://github.com/restic/restic/issues/4048 +https://github.com/restic/restic/pull/4825 diff --git a/changelog/0.17.0_2024-07-26/issue-4209 b/changelog/0.17.0_2024-07-26/issue-4209 new file mode 100644 index 00000000000..04eb8ef18b8 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4209 @@ -0,0 +1,7 @@ +Bugfix: Fix slow SFTP upload performance + +Since restic 0.12.1, the upload speed of the sftp backend to a remote server +has regressed significantly. This has now been fixed. + +https://github.com/restic/restic/issues/4209 +https://github.com/restic/restic/pull/4782 diff --git a/changelog/unreleased/issue-4251 b/changelog/0.17.0_2024-07-26/issue-4251 similarity index 91% rename from changelog/unreleased/issue-4251 rename to changelog/0.17.0_2024-07-26/issue-4251 index d1d3f450842..5541f2d7ea0 100644 --- a/changelog/unreleased/issue-4251 +++ b/changelog/0.17.0_2024-07-26/issue-4251 @@ -1,4 +1,4 @@ -Enhancement: Support reading backup from a commands's standard output +Enhancement: Support reading backup from a command's standard output The `backup` command now supports the `--stdin-from-command` option. When using this option, the arguments to `backup` are interpreted as a command instead of diff --git a/changelog/0.17.0_2024-07-26/issue-4287 b/changelog/0.17.0_2024-07-26/issue-4287 new file mode 100644 index 00000000000..cd25a8deeaa --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4287 @@ -0,0 +1,12 @@ +Enhancement: Support connection to rest-server using unix socket + +Restic now supports using a unix socket to connect to a rest-server +version 0.13.0 or later. This allows running restic as follows: + +``` +rest-server --listen unix:/tmp/rest.socket --data /path/to/data & +restic -r rest:http+unix:///tmp/rest.socket:/my_backup_repo/ [...] +``` + +https://github.com/restic/restic/issues/4287 +https://github.com/restic/restic/pull/4655 diff --git a/changelog/0.17.0_2024-07-26/issue-4437 b/changelog/0.17.0_2024-07-26/issue-4437 new file mode 100644 index 00000000000..bc76c09835c --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4437 @@ -0,0 +1,11 @@ +Enhancement: Make `check` command create non-existent cache directory + +Previously, if a custom cache directory was specified for the `check` command, +but the directory did not exist, `check` continued with the cache disabled. + +The `check` command now attempts to create the cache directory before +initializing the cache. + +https://github.com/restic/restic/issues/4437 +https://github.com/restic/restic/pull/4805 +https://github.com/restic/restic/pull/4883 diff --git a/changelog/unreleased/issue-4472 b/changelog/0.17.0_2024-07-26/issue-4472 similarity index 84% rename from changelog/unreleased/issue-4472 rename to changelog/0.17.0_2024-07-26/issue-4472 index 3049fdf30f4..beb3612b858 100644 --- a/changelog/unreleased/issue-4472 +++ b/changelog/0.17.0_2024-07-26/issue-4472 @@ -1,11 +1,11 @@ -Enhancement: Allow AWS Assume Role to be used for S3 backend +Enhancement: Support AWS Assume Role for S3 backend Previously only credentials discovered via the Minio discovery methods were used to authenticate. However, there are many circumstances where the discovered credentials have lower permissions and need to assume a specific role. This is now possible -using the following new environment variables. +using the following new environment variables: - RESTIC_AWS_ASSUME_ROLE_ARN - RESTIC_AWS_ASSUME_ROLE_SESSION_NAME diff --git a/changelog/unreleased/issue-4540 b/changelog/0.17.0_2024-07-26/issue-4540 similarity index 59% rename from changelog/unreleased/issue-4540 rename to changelog/0.17.0_2024-07-26/issue-4540 index 9a706141e6c..25358c33236 100644 --- a/changelog/unreleased/issue-4540 +++ b/changelog/0.17.0_2024-07-26/issue-4540 @@ -1,6 +1,7 @@ Change: Require at least ARMv6 for ARM binaries -The official release binaries of restic now require at least ARMv6 support for ARM platforms. +The official release binaries of restic now require +at least ARMv6 support for ARM platforms. https://github.com/restic/restic/issues/4540 https://github.com/restic/restic/pull/4542 diff --git a/changelog/0.17.0_2024-07-26/issue-4547 b/changelog/0.17.0_2024-07-26/issue-4547 new file mode 100644 index 00000000000..bb69a59e6aa --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4547 @@ -0,0 +1,7 @@ +Enhancement: Add `--json` option to `version` command + +Restic now supports outputting restic version along with the Go version, OS +and architecture used to build restic in JSON format using `version --json`. + +https://github.com/restic/restic/issues/4547 +https://github.com/restic/restic/pull/4553 diff --git a/changelog/0.17.0_2024-07-26/issue-4549 b/changelog/0.17.0_2024-07-26/issue-4549 new file mode 100644 index 00000000000..245ed484ada --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4549 @@ -0,0 +1,11 @@ +Enhancement: Add `--ncdu` option to `ls` command + +NCDU (NCurses Disk Usage) is a tool to analyse disk usage of directories. It has +an option to save a directory tree and analyse it later. + +The `ls` command now supports outputting snapshot information in the NCDU format +using the `--ncdu` option. Example usage: `restic ls latest --ncdu | ncdu -f -` + +https://github.com/restic/restic/issues/4549 +https://github.com/restic/restic/pull/4550 +https://github.com/restic/restic/pull/4911 diff --git a/changelog/0.17.0_2024-07-26/issue-4568 b/changelog/0.17.0_2024-07-26/issue-4568 new file mode 100644 index 00000000000..00394fc449f --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4568 @@ -0,0 +1,16 @@ +Bugfix: Prevent `forget --keep-tags ` from deleting all snapshots + +Running `forget --keep-tags `, where `` is a tag that does +not exist in the repository, would remove all snapshots. This is especially +problematic if the tag name contains a typo. + +The `forget` command now fails with an error if all snapshots in a snapshot +group would be deleted. This prevents the above example from deleting all +snapshots. + +It is possible to temporarily disable the new check by setting the environment +variable `RESTIC_FEATURES=safe-forget-keep-tags=false`. Note that this feature +flag will be removed in the next minor restic version. + +https://github.com/restic/restic/pull/4568 +https://github.com/restic/restic/pull/4764 diff --git a/changelog/0.17.0_2024-07-26/issue-4583 b/changelog/0.17.0_2024-07-26/issue-4583 new file mode 100644 index 00000000000..bc1d030cc04 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4583 @@ -0,0 +1,13 @@ +Enhancement: Ignore `s3.storage-class` archive tiers for metadata + +Restic used to store all files on S3 using the specified `s3.storage-class`. + +Now, restic will only use non-archive storage tiers for metadata, to avoid +problems when accessing a repository. To restore any data, it is still +necessary to manually warm up the required data beforehand. + +NOTE: There is no official cold storage support in restic, use this option at +your own risk. + +https://github.com/restic/restic/issues/4583 +https://github.com/restic/restic/pull/4584 diff --git a/changelog/0.17.0_2024-07-26/issue-4601 b/changelog/0.17.0_2024-07-26/issue-4601 new file mode 100644 index 00000000000..8efeba47f3c --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4601 @@ -0,0 +1,9 @@ +Enhancement: Add support for feature flags + +Restic now supports feature flags that can be used to enable and disable +experimental features. The flags can be set using the environment variable +`RESTIC_FEATURES`. To get a list of currently supported feature flags, use +the `features` command. + +https://github.com/restic/restic/issues/4601 +https://github.com/restic/restic/pull/4666 diff --git a/changelog/0.17.0_2024-07-26/issue-4602 b/changelog/0.17.0_2024-07-26/issue-4602 new file mode 100644 index 00000000000..3fe19db7988 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4602 @@ -0,0 +1,22 @@ +Change: Deprecate legacy index format and `s3legacy` repository layout + +Support for the legacy index format used by restic before version 0.2.0 has +been deprecated and will be removed in the next minor restic version. You can +use `restic repair index` to update the index to the current format. + +It is possible to temporarily reenable support for the legacy index format by +setting the environment variable `RESTIC_FEATURES=deprecate-legacy-index=false`. +Note that this feature flag will be removed in the next minor restic version. + +Support for the `s3legacy` repository layout used for the S3 backend before +restic 0.7.0 has been deprecated and will be removed in the next minor restic +version. You can migrate your S3 repository to the current layout using +`RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout`. + +It is possible to temporarily reenable support for the `s3legacy` layout by +setting the environment variable `RESTIC_FEATURES=deprecate-s3-legacy-layout=false`. +Note that this feature flag will be removed in the next minor restic version. + +https://github.com/restic/restic/issues/4602 +https://github.com/restic/restic/pull/4724 +https://github.com/restic/restic/pull/4743 diff --git a/changelog/0.17.0_2024-07-26/issue-4627 b/changelog/0.17.0_2024-07-26/issue-4627 new file mode 100644 index 00000000000..87a18560482 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4627 @@ -0,0 +1,33 @@ +Change: Redesign backend error handling to improve reliability + +Restic now downloads pack files in large chunks instead of using a streaming +download. This prevents failures due to interrupted streams. The `restore` +command now also retries downloading individual blobs that could not be +retrieved. + +HTTP requests that are stuck for more than two minutes while uploading or +downloading are now forcibly interrupted. This ensures that stuck requests are +retried after a short timeout. + +Attempts to access a missing or truncated file will no longer be retried. This +avoids unnecessary retries in those cases. All other backend requests are +retried for up to 15 minutes. This ensures that temporarily interrupted network +connections can be tolerated. + +If a download yields a corrupt file or blob, then the download will be retried +once. + +Most parts of the new backend error handling can temporarily be disabled by +setting the environment variable `RESTIC_FEATURES=backend-error-redesign=false`. +Note that this feature flag will be removed in the next minor restic version. + +https://github.com/restic/restic/issues/4627 +https://github.com/restic/restic/issues/4193 +https://github.com/restic/restic/pull/4605 +https://github.com/restic/restic/pull/4792 +https://github.com/restic/restic/issues/4515 +https://github.com/restic/restic/issues/1523 +https://github.com/restic/restic/pull/4520 +https://github.com/restic/restic/pull/4800 +https://github.com/restic/restic/pull/4784 +https://github.com/restic/restic/pull/4844 diff --git a/changelog/unreleased/issue-4656 b/changelog/0.17.0_2024-07-26/issue-4656 similarity index 54% rename from changelog/unreleased/issue-4656 rename to changelog/0.17.0_2024-07-26/issue-4656 index 8d16f0b4885..ef8c1e12ae6 100644 --- a/changelog/unreleased/issue-4656 +++ b/changelog/0.17.0_2024-07-26/issue-4656 @@ -1,6 +1,6 @@ -Bugfix: Properly report the ID of newly added keys +Bugfix: Properly report ID of newly added keys -`restic key add` now reports the ID of a newly added key. This simplifies +`restic key add` now reports the ID of the newly added key. This simplifies selecting a specific key using the `--key-hint key` option. https://github.com/restic/restic/issues/4656 diff --git a/changelog/0.17.0_2024-07-26/issue-4676 b/changelog/0.17.0_2024-07-26/issue-4676 new file mode 100644 index 00000000000..ecea793617c --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4676 @@ -0,0 +1,8 @@ +Enhancement: Make `key` command's actions separate sub-commands + +Each of the `add`, `list`, `remove` and `passwd` actions provided by the `key` +command is now a separate sub-command and have its own documentation which can +be invoked using `restic key --help`. + +https://github.com/restic/restic/issues/4676 +https://github.com/restic/restic/pull/4685 diff --git a/changelog/0.17.0_2024-07-26/issue-4678 b/changelog/0.17.0_2024-07-26/issue-4678 new file mode 100644 index 00000000000..401449bd2ee --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4678 @@ -0,0 +1,8 @@ +Enhancement: Add `--target` option to the `dump` command + +Restic `dump` always printed to the standard output. It now supports specifying +a `--target` file to write its output to. + +https://github.com/restic/restic/issues/4678 +https://github.com/restic/restic/pull/4682 +https://github.com/restic/restic/pull/4692 diff --git a/changelog/0.17.0_2024-07-26/issue-4707 b/changelog/0.17.0_2024-07-26/issue-4707 new file mode 100644 index 00000000000..3c8f1a2f39a --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4707 @@ -0,0 +1,14 @@ +Change: Disable S3 anonymous authentication by default + +When using the S3 backend with anonymous authentication, it continuously +tried to retrieve new authentication credentials, causing bad performance. + +Now, to use anonymous authentication, it is necessary to pass the extended +option `-o s3.unsafe-anonymous-auth=true` to restic. + +It is possible to temporarily revert to the old behavior by setting the +environment variable `RESTIC_FEATURES=explicit-s3-anonymous-auth=false`. Note +that this feature flag will be removed in the next minor restic version. + +https://github.com/restic/restic/issues/4707 +https://github.com/restic/restic/pull/4908 diff --git a/changelog/0.17.0_2024-07-26/issue-4733 b/changelog/0.17.0_2024-07-26/issue-4733 new file mode 100644 index 00000000000..fb5a072d68c --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4733 @@ -0,0 +1,12 @@ +Enhancement: Allow specifying `--host` via environment variable + +Restic commands that operate on snapshots, such as `restic backup` and +`restic snapshots`, support the `--host` option to specify the hostname +for grouping snapshots. + +Such commands now also support specifying the hostname via the environment +variable `RESTIC_HOST`. Note that `--host` still takes precedence over the +environment variable. + +https://github.com/restic/restic/issues/4733 +https://github.com/restic/restic/pull/4734 diff --git a/changelog/0.17.0_2024-07-26/issue-4744 b/changelog/0.17.0_2024-07-26/issue-4744 new file mode 100644 index 00000000000..b5c759bedfd --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4744 @@ -0,0 +1,9 @@ +Change: Include full key ID in JSON output of `key list` + +The JSON output of the `key list` command has changed to include the full key +ID instead of just a shortened version of the ID, as the latter can be ambiguous +in some rare cases. To derive the short ID, please truncate the full ID down to +eight characters. + +https://github.com/restic/restic/issues/4744 +https://github.com/restic/restic/pull/4745 diff --git a/changelog/0.17.0_2024-07-26/issue-4760 b/changelog/0.17.0_2024-07-26/issue-4760 new file mode 100644 index 00000000000..e56f41a443d --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4760 @@ -0,0 +1,8 @@ +Bugfix: Fix possible error on concurrent cache cleanup + +If multiple restic processes concurrently cleaned up no longer existing files +from the cache, this could cause some of the processes to fail with an `no such +file or directory` error. This has now been fixed. + +https://github.com/restic/restic/issues/4760 +https://github.com/restic/restic/pull/4761 diff --git a/changelog/0.17.0_2024-07-26/issue-4768 b/changelog/0.17.0_2024-07-26/issue-4768 new file mode 100644 index 00000000000..9fb1a29de81 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4768 @@ -0,0 +1,8 @@ +Enhancement: Allow specifying custom User-Agent for outgoing requests + +Restic now supports setting a custom `User-Agent` for outgoing HTTP requests +using the global option `--http-user-agent` or the `RESTIC_HTTP_USER_AGENT` +environment variable. + +https://github.com/restic/restic/issues/4768 +https://github.com/restic/restic/pull/4810 \ No newline at end of file diff --git a/changelog/0.17.0_2024-07-26/issue-4781 b/changelog/0.17.0_2024-07-26/issue-4781 new file mode 100644 index 00000000000..2c9584d7758 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4781 @@ -0,0 +1,8 @@ +Enhancement: Add `restore` options to read include/exclude patterns from files + +Restic now supports reading include and exclude patterns from files using the +`--include-file`, `--exclude-file`, `--iinclude-file` and `--iexclude-file` +options of the `restore` command. + +https://github.com/restic/restic/issues/4781 +https://github.com/restic/restic/pull/4811 \ No newline at end of file diff --git a/changelog/0.17.0_2024-07-26/issue-4817 b/changelog/0.17.0_2024-07-26/issue-4817 new file mode 100644 index 00000000000..83c68277563 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4817 @@ -0,0 +1,26 @@ +Enhancement: Make overwrite behavior of `restore` customizable + +The `restore` command now supports an `--overwrite` option to configure whether +already existing files are overwritten. The overwrite behavior can be configured +using the following option values: + +- `--overwrite always` (default): Always overwrites already existing files. + The `restore` command will verify the existing file content and only restore + mismatching parts to minimize downloads. Updates the metadata of all files. +- `--overwrite if-changed`: Like `always`, but speeds up the file content check + by assuming that files with matching size and modification time (mtime) are + already up to date. In case of a mismatch, the full file content is verified + like with `always`. Updates the metadata of all files. +- `--overwrite if-newer`: Like `always`, but only overwrites existing files + when the file in the snapshot has a newer modification time (mtime) than the + existing file. +- `--overwrite never`: Never overwrites existing files. + +https://github.com/restic/restic/issues/4817 +https://github.com/restic/restic/issues/200 +https://github.com/restic/restic/issues/407 +https://github.com/restic/restic/issues/2662 +https://github.com/restic/restic/pull/4837 +https://github.com/restic/restic/pull/4838 +https://github.com/restic/restic/pull/4864 +https://github.com/restic/restic/pull/4921 diff --git a/changelog/0.17.0_2024-07-26/issue-4850 b/changelog/0.17.0_2024-07-26/issue-4850 new file mode 100644 index 00000000000..b04edd1599f --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4850 @@ -0,0 +1,8 @@ +Bugfix: Handle UTF-16 password files in `key` command correctly + +Previously, `key add` and `key passwd` did not properly decode UTF-16 +encoded passwords read from a password file. This has now been fixed +to correctly match the encoding when opening a repository. + +https://github.com/restic/restic/issues/4850 +https://github.com/restic/restic/pull/4851 diff --git a/changelog/0.17.0_2024-07-26/issue-4902 b/changelog/0.17.0_2024-07-26/issue-4902 new file mode 100644 index 00000000000..507d8abbea9 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-4902 @@ -0,0 +1,8 @@ +Bugfix: Update snapshot summary on `rewrite` + +Restic previously did not recalculate the total number of files and bytes +processed when files were excluded from a snapshot by the `rewrite` command. +This has now been fixed. + +https://github.com/restic/restic/issues/4902 +https://github.com/restic/restic/pull/4905 diff --git a/changelog/0.17.0_2024-07-26/issue-662 b/changelog/0.17.0_2024-07-26/issue-662 new file mode 100644 index 00000000000..9fd2f27d0df --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-662 @@ -0,0 +1,11 @@ +Enhancement: Optionally skip snapshot creation if nothing changed + +The `backup` command always created a snapshot even if nothing in the +backup set changed compared to the parent snapshot. + +Restic now supports the `--skip-if-unchanged` option for the `backup` +command, which omits creating a snapshot if the new snapshot's content +would be identical to that of the parent snapshot. + +https://github.com/restic/restic/issues/662 +https://github.com/restic/restic/pull/4816 diff --git a/changelog/0.17.0_2024-07-26/issue-693 b/changelog/0.17.0_2024-07-26/issue-693 new file mode 100644 index 00000000000..4a8c766a42a --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-693 @@ -0,0 +1,13 @@ +Enhancement: Include snapshot size in `snapshots` output + +The `snapshots` command now prints the size for snapshots created using this +or a future restic version. To achieve this, the `backup` command now stores +the backup summary statistics in the snapshot. + +The text output of the `snapshots` command only shows the snapshot size. The +other statistics are only included in the JSON output. To inspect these +statistics use `restic snapshots --json` or `restic cat snapshot `. + +https://github.com/restic/restic/issues/693 +https://github.com/restic/restic/pull/4705 +https://github.com/restic/restic/pull/4913 diff --git a/changelog/0.17.0_2024-07-26/issue-828 b/changelog/0.17.0_2024-07-26/issue-828 new file mode 100644 index 00000000000..72d66dae04c --- /dev/null +++ b/changelog/0.17.0_2024-07-26/issue-828 @@ -0,0 +1,11 @@ +Enhancement: Improve features of the `repair packs` command + +The `repair packs` command has been improved to also be able to process +truncated pack files. The `check` and `check --read-data` command will provide +instructions on using the command if necessary to repair a repository. See the +guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html for +further instructions. + +https://github.com/restic/restic/issues/828 +https://github.com/restic/restic/pull/4644 +https://github.com/restic/restic/pull/4882 diff --git a/changelog/0.17.0_2024-07-26/pull-3067 b/changelog/0.17.0_2024-07-26/pull-3067 new file mode 100644 index 00000000000..9ecec4838d9 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-3067 @@ -0,0 +1,25 @@ +Enhancement: Add extended options to configure Windows Shadow Copy Service + +Previous, restic always used a 120 seconds timeout and unconditionally created +VSS snapshots for all volume mount points on disk. This behavior can now be +fine-tuned by the following new extended options (available only on Windows): + +- `-o vss.timeout`: Time that VSS can spend creating snapshot before timing out (default: 120s) +- `-o vss.exclude-all-mount-points`: Exclude mountpoints from snapshotting on all volumes (default: false) +- `-o vss.exclude-volumes`: Semicolon separated list of volumes to exclude from snapshotting +- `-o vss.provider`: VSS provider identifier which will be used for snapshotting + +For example, change VSS timeout to five minutes and disable snapshotting of +mount points on all volumes: + + restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.exclude-all-mount-points=true + +Exclude drive `d:`, mount point `c:\mnt` and a specific volume from snapshotting: + + restic backup --use-fs-snapshot -o vss.exclude-volumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" + +Uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default provider: + + restic backup --use-fs-snapshot -o vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5} + +https://github.com/restic/restic/pull/3067 diff --git a/changelog/0.17.0_2024-07-26/pull-4006 b/changelog/0.17.0_2024-07-26/pull-4006 new file mode 100644 index 00000000000..3bfacb8a060 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4006 @@ -0,0 +1,15 @@ +Enhancement: (alpha) Store deviceID only for hardlinks + +Set `RESTIC_FEATURES=device-id-for-hardlinks` to enable this alpha feature. +The feature flag will be removed after repository format version 3 becomes +available or be replaced with a different solution. + +When creating backups from a filesystem snapshot, for example created using +BTRFS subvolumes, the deviceID of the filesystem changes compared to previous +snapshots. This prevented restic from deduplicating the directory metadata of +a snapshot. + +When this alpha feature is enabled, the deviceID is only stored for hardlinks, +which significantly reduces the metadata duplication for most backups. + +https://github.com/restic/restic/pull/4006 diff --git a/changelog/0.17.0_2024-07-26/pull-4354 b/changelog/0.17.0_2024-07-26/pull-4354 new file mode 100644 index 00000000000..d3cf3324924 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4354 @@ -0,0 +1,7 @@ +Enhancement: Significantly reduce `prune` memory usage + +The `prune` command has been optimized to use up to 60% less memory. +The memory usage should now be roughly similar to creating a backup. + +https://github.com/restic/restic/pull/4354 +https://github.com/restic/restic/pull/4812 diff --git a/changelog/0.17.0_2024-07-26/pull-4503 b/changelog/0.17.0_2024-07-26/pull-4503 new file mode 100644 index 00000000000..549aa9f532b --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4503 @@ -0,0 +1,8 @@ +Bugfix: Correct hardlink handling in `stats` command + +If files on different devices had the same inode ID, the `stats` command +did not correctly calculate the snapshot size. This has now been fixed. + +https://forum.restic.net/t/possible-bug-in-stats/6461/8 +https://github.com/restic/restic/pull/4503 +https://github.com/restic/restic/pull/4006 diff --git a/changelog/0.17.0_2024-07-26/pull-4526 b/changelog/0.17.0_2024-07-26/pull-4526 new file mode 100644 index 00000000000..4d0fee69104 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4526 @@ -0,0 +1,12 @@ +Enhancement: Add bitrot detection to `diff` command + +The output of the `diff` command now includes the modifier `?` for files to +indicate bitrot in backed up files. The `?` will appear whenever there is a +difference in content while the metadata is exactly the same. + +Since files with unchanged metadata are normally not read again when creating +a backup, the detection is only effective when the right-hand side of the diff +has been created with `backup --force`. + +https://github.com/restic/restic/issues/805 +https://github.com/restic/restic/pull/4526 diff --git a/changelog/0.17.0_2024-07-26/pull-4573 b/changelog/0.17.0_2024-07-26/pull-4573 new file mode 100644 index 00000000000..36fc727bef0 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4573 @@ -0,0 +1,6 @@ +Enhancement: Support rewriting host and time metadata in snapshots + +The `rewrite` command now supports rewriting the host and/or time metadata of +a snapshot using the new `--new-host` and `--new-time` options. + +https://github.com/restic/restic/pull/4573 diff --git a/changelog/0.17.0_2024-07-26/pull-4590 b/changelog/0.17.0_2024-07-26/pull-4590 new file mode 100644 index 00000000000..7904c18af42 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4590 @@ -0,0 +1,6 @@ +Enhancement: Speed up `mount` command's error detection + +The `mount` command now checks for the existence of the mountpoint before +opening the repository, leading to quicker error detection. + +https://github.com/restic/restic/pull/4590 diff --git a/changelog/0.17.0_2024-07-26/pull-4611 b/changelog/0.17.0_2024-07-26/pull-4611 new file mode 100644 index 00000000000..426ed590ffd --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4611 @@ -0,0 +1,9 @@ +Enhancement: Back up more file metadata on Windows + +Previously, restic did not back up all common Windows-specific metadata. + +Restic now stores file creation time and file attributes like the hidden, +read-only and encrypted flags when backing up files and folders on Windows. + +https://github.com/restic/restic/pull/4611 + diff --git a/changelog/unreleased/pull-4615 b/changelog/0.17.0_2024-07-26/pull-4615 similarity index 64% rename from changelog/unreleased/pull-4615 rename to changelog/0.17.0_2024-07-26/pull-4615 index 7e2d4a01712..a8916df3c8d 100644 --- a/changelog/unreleased/pull-4615 +++ b/changelog/0.17.0_2024-07-26/pull-4615 @@ -1,6 +1,6 @@ -Bugfix: `find` ignored directories in some cases +Bugfix: Make `find` not sometimes ignore directories In some cases, the `find` command ignored empty or moved directories. This has -been fixed. +now been fixed. https://github.com/restic/restic/pull/4615 diff --git a/changelog/0.17.0_2024-07-26/pull-4664 b/changelog/0.17.0_2024-07-26/pull-4664 new file mode 100644 index 00000000000..655ccd082bd --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4664 @@ -0,0 +1,10 @@ +Enhancement: Make `ls` use `message_type` field in JSON output + +The `ls` command was the only restic command that used the `struct_type` field +in its JSON output format to specify the message type. + +The JSON output of the `ls` command now also includes the `message_type` field, +which is consistent with other commands. The `struct_type` field is still +included, but now deprecated. + +https://github.com/restic/restic/pull/4664 diff --git a/changelog/0.17.0_2024-07-26/pull-4703 b/changelog/0.17.0_2024-07-26/pull-4703 new file mode 100644 index 00000000000..178842c6c62 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4703 @@ -0,0 +1,11 @@ +Bugfix: Shutdown cleanly when receiving SIGTERM + +Previously, when restic received the SIGTERM signal it would terminate +immediately, skipping cleanup and potentially causing issues like stale locks +being left behind. This primarily effected containerized restic invocations +that use SIGTERM, but could also be triggered via a simple `killall restic`. + +This has now been fixed, such that restic shuts down cleanly when receiving +the SIGTERM signal. + +https://github.com/restic/restic/pull/4703 diff --git a/changelog/0.17.0_2024-07-26/pull-4708 b/changelog/0.17.0_2024-07-26/pull-4708 new file mode 100644 index 00000000000..16bf33e5795 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4708 @@ -0,0 +1,13 @@ +Enhancement: Back up and restore SecurityDescriptors on Windows + +Restic now backs up and restores SecurityDescriptors for files and folders on +Windows which includes owner, group, discretionary access control list (DACL) +and system access control list (SACL). + +This requires the user to be a member of backup operators or the application +must be run as admin. If that is not the case, only the current user's owner, +group and DACL will be backed up, and during restore only the DACL of the +backed up file will be restored, with the current user's owner and group +being set on the restored file. + +https://github.com/restic/restic/pull/4708 diff --git a/changelog/0.17.0_2024-07-26/pull-4709 b/changelog/0.17.0_2024-07-26/pull-4709 new file mode 100644 index 00000000000..62be8b54b31 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4709 @@ -0,0 +1,10 @@ +Bugfix: Correct `--no-lock` handling of `ls` and `tag` commands + +The `ls` command never locked the repository. This has now been fixed, with the +old behavior still being supported using `ls --no-lock`. The latter invocation +also works with older restic versions. + +The `tag` command erroneously accepted the `--no-lock` command. This command +now always requires an exclusive lock. + +https://github.com/restic/restic/pull/4709 diff --git a/changelog/0.17.0_2024-07-26/pull-4737 b/changelog/0.17.0_2024-07-26/pull-4737 new file mode 100644 index 00000000000..bf528237db8 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4737 @@ -0,0 +1,6 @@ +Enhancement: Include snapshot ID in `reason` field of `forget` JSON output + +The JSON output of the `forget` command now includes `id` and `short_id` of +snapshots in the `reason` field. + +https://github.com/restic/restic/pull/4737 diff --git a/changelog/0.17.0_2024-07-26/pull-4764 b/changelog/0.17.0_2024-07-26/pull-4764 new file mode 100644 index 00000000000..d85eadbc317 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4764 @@ -0,0 +1,10 @@ +Enhancement: Support forgetting all snapshots + +The `forget` command now supports the `--unsafe-allow-remove-all` option, which +removes all snapshots in the repository. + +This option must always be combined with a snapshot filter (by host, path or +tag). For example, the command `forget --tag example --unsafe-allow-remove-all` +removes all snapshots with the tag "example". + +https://github.com/restic/restic/pull/4764 diff --git a/changelog/0.17.0_2024-07-26/pull-4796 b/changelog/0.17.0_2024-07-26/pull-4796 new file mode 100644 index 00000000000..2729c635e60 --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4796 @@ -0,0 +1,8 @@ +Enhancement: Improve `dump` performance for large files + +The `dump` command now retrieves the data chunks for a file in +parallel. This improves the download performance by up to as many +times as the configured number of parallel backend connections. + +https://github.com/restic/restic/issues/3406 +https://github.com/restic/restic/pull/4796 diff --git a/changelog/0.17.0_2024-07-26/pull-4807 b/changelog/0.17.0_2024-07-26/pull-4807 new file mode 100644 index 00000000000..b5e5cd7fd2f --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4807 @@ -0,0 +1,6 @@ +Enhancement: Support Extended Attributes on Windows NTFS + +Restic now backs up and restores Extended Attributes for files +and folders on Windows NTFS. + +https://github.com/restic/restic/pull/4807 diff --git a/changelog/0.17.0_2024-07-26/pull-4839 b/changelog/0.17.0_2024-07-26/pull-4839 new file mode 100644 index 00000000000..672ac2e69fb --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4839 @@ -0,0 +1,7 @@ +Enhancement: Add dry-run support to `restore` command + +The `restore` command now supports the `--dry-run` option to perform +a dry run. Pass the `--verbose=2` option to see which files would +remain unchanged, and which would be updated or freshly restored. + +https://github.com/restic/restic/pull/4839 diff --git a/changelog/0.17.0_2024-07-26/pull-4884 b/changelog/0.17.0_2024-07-26/pull-4884 new file mode 100644 index 00000000000..3a7e0d3423c --- /dev/null +++ b/changelog/0.17.0_2024-07-26/pull-4884 @@ -0,0 +1,11 @@ +Change: Return exit code 10 and 11 for non-existing and locked repository + +If a repository does not exist or cannot be locked, restic previously always +returned exit code 1. This made it difficult to distinguish these cases from +other errors. + +Restic now returns exit code 10 if the repository does not exist, and exit code +11 if the repository could be not locked due to a conflicting lock. + +https://github.com/restic/restic/issues/956 +https://github.com/restic/restic/pull/4884 diff --git a/changelog/0.17.1_2024-09-05/issue-2004 b/changelog/0.17.1_2024-09-05/issue-2004 new file mode 100644 index 00000000000..5372eeb8c2d --- /dev/null +++ b/changelog/0.17.1_2024-09-05/issue-2004 @@ -0,0 +1,18 @@ +Bugfix: Correctly handle volume names in `backup` command on Windows + +On Windows, when the specified backup target only included the volume +name without a trailing slash, for example, `C:`, then restoring the +resulting snapshot would result in an error. Note that using `C:\` +as backup target worked correctly. + +Specifying volume names is now handled correctly. To restore snapshots +created before this bugfix, use the : syntax. For +example, to restore a snapshot with ID `12345678` that backed up `C:`, +use the following command: + +``` +restic restore 12345678:/C/C:./ --target output/folder +``` + +https://github.com/restic/restic/issues/2004 +https://github.com/restic/restic/pull/5028 diff --git a/changelog/0.17.1_2024-09-05/issue-4795 b/changelog/0.17.1_2024-09-05/issue-4795 new file mode 100644 index 00000000000..ff86f09312c --- /dev/null +++ b/changelog/0.17.1_2024-09-05/issue-4795 @@ -0,0 +1,8 @@ +Enhancement: Display progress bar for `restore --verify` + +When the `restore` command is run with `--verify`, it now displays a progress +bar while the verification step is running. The progress bar is not shown when +the `--json` flag is specified. + +https://github.com/restic/restic/issues/4795 +https://github.com/restic/restic/pull/4989 diff --git a/changelog/0.17.1_2024-09-05/issue-4934 b/changelog/0.17.1_2024-09-05/issue-4934 new file mode 100644 index 00000000000..df77109a7d0 --- /dev/null +++ b/changelog/0.17.1_2024-09-05/issue-4934 @@ -0,0 +1,11 @@ +Enhancement: Automatically clear removed snapshots from cache + +Previously, restic only removed snapshots from the cache on the host where the +`forget` command was executed. On other hosts that use the same repository, the +old snapshots remained in the cache. + +Restic now automatically clears old snapshots from the local cache of the +current host. + +https://github.com/restic/restic/issues/4934 +https://github.com/restic/restic/pull/4981 diff --git a/changelog/0.17.1_2024-09-05/issue-4944 b/changelog/0.17.1_2024-09-05/issue-4944 new file mode 100644 index 00000000000..95ae24c03d1 --- /dev/null +++ b/changelog/0.17.1_2024-09-05/issue-4944 @@ -0,0 +1,9 @@ +Enhancement: Print JSON-formatted errors during `restore --json` + +Restic used to print any `restore` errors directly to the console as freeform +text messages, even when using the `--json` option. + +Now, when `--json` is specified, restic prints them as JSON formatted messages. + +https://github.com/restic/restic/issues/4944 +https://github.com/restic/restic/pull/4946 diff --git a/changelog/0.17.1_2024-09-05/issue-4945 b/changelog/0.17.1_2024-09-05/issue-4945 new file mode 100644 index 00000000000..a7a483fed53 --- /dev/null +++ b/changelog/0.17.1_2024-09-05/issue-4945 @@ -0,0 +1,10 @@ +Bugfix: Include missing backup error text with `--json` + +Previously, when running a backup with the `--json` option, restic failed to +include the actual error message in the output, resulting in `"error": {}` +being displayed. + +This has now been fixed, and restic now includes the error text in JSON output. + +https://github.com/restic/restic/issues/4945 +https://github.com/restic/restic/pull/4946 diff --git a/changelog/0.17.1_2024-09-05/issue-4953 b/changelog/0.17.1_2024-09-05/issue-4953 new file mode 100644 index 00000000000..c542377fcba --- /dev/null +++ b/changelog/0.17.1_2024-09-05/issue-4953 @@ -0,0 +1,7 @@ +Bugfix: Correctly handle long paths on older Windows versions + +On older Windows versions, like Windows Server 2012, restic 0.17.0 failed to +back up files with long paths. This problem has now been resolved. + +https://github.com/restic/restic/issues/4953 +https://github.com/restic/restic/pull/4954 diff --git a/changelog/0.17.1_2024-09-05/issue-4957 b/changelog/0.17.1_2024-09-05/issue-4957 new file mode 100644 index 00000000000..59c73b5c744 --- /dev/null +++ b/changelog/0.17.1_2024-09-05/issue-4957 @@ -0,0 +1,8 @@ +Bugfix: Fix delayed cancellation of certain commands + +Since restic 0.17.0, some commands did not immediately respond to cancellation +via Ctrl-C (SIGINT) and continued running for a short period. The most affected +commands were `diff`,`find`, `ls`, `stats` and `rewrite`. This is now resolved. + +https://github.com/restic/restic/issues/4957 +https://github.com/restic/restic/pull/4960 diff --git a/changelog/0.17.1_2024-09-05/issue-4969 b/changelog/0.17.1_2024-09-05/issue-4969 new file mode 100644 index 00000000000..d92392a2050 --- /dev/null +++ b/changelog/0.17.1_2024-09-05/issue-4969 @@ -0,0 +1,7 @@ +Bugfix: Correctly restore timestamp for files with resource forks on macOS + +On macOS, timestamps were not restored for files with resource forks. This has +now been fixed. + +https://github.com/restic/restic/issues/4969 +https://github.com/restic/restic/pull/5006 diff --git a/changelog/0.17.1_2024-09-05/issue-4970 b/changelog/0.17.1_2024-09-05/issue-4970 new file mode 100644 index 00000000000..422ae3c2579 --- /dev/null +++ b/changelog/0.17.1_2024-09-05/issue-4970 @@ -0,0 +1,15 @@ +Enhancement: Make timeout for stuck requests customizable + +Restic monitors connections to the backend to detect stuck requests. If a +request does not return any data within five minutes, restic assumes the +request is stuck and retries it. However, for large repositories this timeout +might be insufficient to collect a list of all files, causing the following +error: + +`List(data) returned error, retrying after 1s: [...]: request timeout` + +It is now possible to increase the timeout using the `--stuck-request-timeout` +option. + +https://github.com/restic/restic/issues/4970 +https://github.com/restic/restic/pull/5014 diff --git a/changelog/0.17.1_2024-09-05/issue-4975 b/changelog/0.17.1_2024-09-05/issue-4975 new file mode 100644 index 00000000000..614642c0676 --- /dev/null +++ b/changelog/0.17.1_2024-09-05/issue-4975 @@ -0,0 +1,7 @@ +Bugfix: Prevent `backup --stdin-from-command` from panicking + +Restic would previously crash if `--stdin-from-command` was specified without +providing a command. This issue has now been fixed. + +https://github.com/restic/restic/issues/4975 +https://github.com/restic/restic/pull/4976 diff --git a/changelog/0.17.1_2024-09-05/issue-5004 b/changelog/0.17.1_2024-09-05/issue-5004 new file mode 100644 index 00000000000..72e98a9a4c6 --- /dev/null +++ b/changelog/0.17.1_2024-09-05/issue-5004 @@ -0,0 +1,12 @@ +Bugfix: Fix spurious "A Required Privilege Is Not Held by the Client" error + +On Windows, creating a backup could sometimes trigger the following error: + +``` +error: nodeFromFileInfo [...]: get named security info failed with: a required privilege is not held by the client. +``` + +This has now been fixed. + +https://github.com/restic/restic/issues/5004 +https://github.com/restic/restic/pull/5019 diff --git a/changelog/0.17.1_2024-09-05/issue-5005 b/changelog/0.17.1_2024-09-05/issue-5005 new file mode 100644 index 00000000000..16ac83b4abf --- /dev/null +++ b/changelog/0.17.1_2024-09-05/issue-5005 @@ -0,0 +1,16 @@ +Bugfix: Fix rare failures to retry locking a repository + +Restic 0.17.0 could in rare cases fail to retry locking a repository if one of +the lock files failed to load, resulting in the error: + +``` +unable to create lock in backend: circuit breaker open for file +``` + +This issue has now been addressed. The error handling now properly retries the +locking operation. In addition, restic waits a few seconds between locking +retries to increase chances of successful locking. + +https://github.com/restic/restic/issues/5005 +https://github.com/restic/restic/pull/5011 +https://github.com/restic/restic/pull/5012 diff --git a/changelog/0.17.1_2024-09-05/pull-4958 b/changelog/0.17.1_2024-09-05/pull-4958 new file mode 100644 index 00000000000..dae9b2c8e97 --- /dev/null +++ b/changelog/0.17.1_2024-09-05/pull-4958 @@ -0,0 +1,7 @@ +Bugfix: Don't ignore metadata-setting errors during restore + +Previously, restic used to ignore errors when setting timestamps, attributes, +or file modes during a restore. It now reports those errors, except for +permission related errors when running without root privileges. + +https://github.com/restic/restic/pull/4958 diff --git a/changelog/0.17.1_2024-09-05/pull-4959 b/changelog/0.17.1_2024-09-05/pull-4959 new file mode 100644 index 00000000000..80b2780b22c --- /dev/null +++ b/changelog/0.17.1_2024-09-05/pull-4959 @@ -0,0 +1,6 @@ +Enhancement: Return exit code 12 for "bad password" errors + +Restic now returns exit code 12 when it cannot open the repository due to an +incorrect password. + +https://github.com/restic/restic/pull/4959 diff --git a/changelog/0.17.1_2024-09-05/pull-4977 b/changelog/0.17.1_2024-09-05/pull-4977 new file mode 100644 index 00000000000..781576a5696 --- /dev/null +++ b/changelog/0.17.1_2024-09-05/pull-4977 @@ -0,0 +1,16 @@ +Change: Also back up files with incomplete metadata + +If restic failed to read extended metadata for a file or folder during a +backup, then the file or folder was not included in the resulting snapshot. +Instead, a warning message was printed along with returning exit code 3 once +the backup was finished. + +Now, restic also includes items for which the extended metadata could not be +read in a snapshot. The warning message has been updated to: + +``` +incomplete metadata for /path/to/file:
+``` + +https://github.com/restic/restic/issues/4953 +https://github.com/restic/restic/pull/4977 diff --git a/changelog/0.17.1_2024-09-05/pull-4980 b/changelog/0.17.1_2024-09-05/pull-4980 new file mode 100644 index 00000000000..b51ee8d5998 --- /dev/null +++ b/changelog/0.17.1_2024-09-05/pull-4980 @@ -0,0 +1,12 @@ +Bugfix: Skip extended attribute processing on unsupported Windows volumes + +With restic 0.17.0, backups of certain Windows paths, such as network drives, +failed due to errors while fetching extended attributes. + +Restic now skips extended attribute processing for volumes where they are not +supported. + +https://github.com/restic/restic/pull/4980 +https://github.com/restic/restic/pull/4998 +https://github.com/restic/restic/issues/4955 +https://github.com/restic/restic/issues/4950 diff --git a/changelog/0.17.1_2024-09-05/pull-5018 b/changelog/0.17.1_2024-09-05/pull-5018 new file mode 100644 index 00000000000..ca600c3e1d2 --- /dev/null +++ b/changelog/0.17.1_2024-09-05/pull-5018 @@ -0,0 +1,13 @@ +Bugfix: Improve HTTP/2 support for REST backend + +If `rest-server` tried to gracefully shut down an HTTP/2 connection still in +use by the client, it could result in the following error: + +``` +http2: Transport: cannot retry err [http2: Transport received Server's graceful shutdown GOAWAY] after Request.Body was written; define Request.GetBody to avoid this error +``` + +This issue has now been resolved. + +https://github.com/restic/restic/pull/5018 +https://forum.restic.net/t/receiving-http2-goaway-messages-with-windows-restic-v0-17-0/8367 diff --git a/changelog/0.17.2_2024-10-27/issue-4004 b/changelog/0.17.2_2024-10-27/issue-4004 new file mode 100644 index 00000000000..d95ad02e9ce --- /dev/null +++ b/changelog/0.17.2_2024-10-27/issue-4004 @@ -0,0 +1,12 @@ +Bugfix: Support container-level SAS/SAT tokens for Azure backend + +Restic previously expected SAS/SAT tokens to be generated at the account level, +which prevented tokens created at the container level from being used to +initialize a repository. This caused an error when attempting to initialize a +repository with container-level tokens. + +Restic now supports both account-level and container-level SAS/SAT tokens for +initializing a repository. + +https://github.com/restic/restic/issues/4004 +https://github.com/restic/restic/pull/5093 diff --git a/changelog/0.17.2_2024-10-27/issue-5050 b/changelog/0.17.2_2024-10-27/issue-5050 new file mode 100644 index 00000000000..34536f6dae3 --- /dev/null +++ b/changelog/0.17.2_2024-10-27/issue-5050 @@ -0,0 +1,7 @@ +Bugfix: Return error if `tag` fails to lock repository + +Since restic 0.17.0, the `tag` command did not return an error when it failed +to open or lock the repository. This issue has now been fixed. + +https://github.com/restic/restic/issues/5050 +https://github.com/restic/restic/pull/5056 diff --git a/changelog/0.17.2_2024-10-27/issue-5063 b/changelog/0.17.2_2024-10-27/issue-5063 new file mode 100644 index 00000000000..54f97f0af9b --- /dev/null +++ b/changelog/0.17.2_2024-10-27/issue-5063 @@ -0,0 +1,12 @@ +Bugfix: Correctly `backup` extended metadata when using VSS on Windows + +On Windows, when creating a backup with the `--use-fs-snapshot` option, restic +read extended metadata from the original filesystem path instead of from the +snapshot. This could result in errors if files were removed during the backup +process. + +This issue has now been resolved. + +https://github.com/restic/restic/issues/5063 +https://github.com/restic/restic/pull/5097 +https://github.com/restic/restic/pull/5099 diff --git a/changelog/0.17.2_2024-10-27/pull-5047 b/changelog/0.17.2_2024-10-27/pull-5047 new file mode 100644 index 00000000000..ace02c3b47a --- /dev/null +++ b/changelog/0.17.2_2024-10-27/pull-5047 @@ -0,0 +1,8 @@ +Bugfix: Resolve potential error during concurrent cache cleanup + +When multiple restic processes ran concurrently, they could compete to remove +obsolete snapshots from the local backend cache, sometimes leading to a "no +such file or directory" error. Restic now suppresses this error to prevent +issues during cache cleanup. + +https://github.com/restic/restic/pull/5047 diff --git a/changelog/0.17.2_2024-10-27/pull-5057 b/changelog/0.17.2_2024-10-27/pull-5057 new file mode 100644 index 00000000000..aba2992b756 --- /dev/null +++ b/changelog/0.17.2_2024-10-27/pull-5057 @@ -0,0 +1,24 @@ +Bugfix: Exclude irregular files from backups + +Since restic 0.17.1, files with the type `irregular` could mistakenly be included +in snapshots, especially when backing up special file types on Windows that +restic cannot process. This issue has now been fixed. + +Previously, this bug caused the `check` command to report errors like the +following one: + +``` + tree 12345678[...]: node "example.zip" with invalid type "irregular" +``` + +To repair affected snapshots, upgrade to restic 0.17.2 and run: + +``` +restic repair snapshots --forget +``` + +This will remove the `irregular` files from the snapshots (creating +a new snapshot ID for each of the affected snapshots). + +https://github.com/restic/restic/pull/5057 +https://forum.restic.net/t/errors-found-by-check-1-invalid-type-irregular-2-ciphertext-verification-failed/8447/2 diff --git a/changelog/0.17.3_2024-11-08/issue-4971 b/changelog/0.17.3_2024-11-08/issue-4971 new file mode 100644 index 00000000000..235d18cb53b --- /dev/null +++ b/changelog/0.17.3_2024-11-08/issue-4971 @@ -0,0 +1,7 @@ +Bugfix: Fix unusable `mount` on macOS Sonoma + +On macOS Sonoma when using FUSE-T, it was not possible to access files in +a mounted repository. This issue is now resolved. + +https://github.com/restic/restic/issues/4971 +https://github.com/restic/restic/pull/5048 diff --git a/changelog/0.17.3_2024-11-08/issue-5003 b/changelog/0.17.3_2024-11-08/issue-5003 new file mode 100644 index 00000000000..f88ed3113ed --- /dev/null +++ b/changelog/0.17.3_2024-11-08/issue-5003 @@ -0,0 +1,14 @@ +Bugfix: Fix metadata errors during backup of removable disks on Windows + +Since restic 0.17.0, backing up removable disks on Windows could report +errors with retrieving metadata like shown below. + +``` +error: incomplete metadata for d:\filename: get named security info failed with: Access is denied. +``` + +This has now been fixed. + +https://github.com/restic/restic/issues/5003 +https://github.com/restic/restic/pull/5123 +https://forum.restic.net/t/backing-up-a-folder-from-a-veracrypt-volume-brings-up-errors-since-restic-v17-0/8444 diff --git a/changelog/0.17.3_2024-11-08/issue-5107 b/changelog/0.17.3_2024-11-08/issue-5107 new file mode 100644 index 00000000000..13bb380e4ef --- /dev/null +++ b/changelog/0.17.3_2024-11-08/issue-5107 @@ -0,0 +1,15 @@ +Bugfix: Fix metadata error on Windows for backups using VSS + +Since restic 0.17.2, when creating a backup on Windows using `--use-fs-snapshot`, +restic would report an error like the following: + +``` +error: incomplete metadata for C:\: get EA failed while opening file handle for path \\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX\, with: The process cannot access the file because it is being used by another process. +``` + +This has now been fixed by correctly handling paths that refer to volume +shadow copy snapshots. + +https://github.com/restic/restic/issues/5107 +https://github.com/restic/restic/pull/5110 +https://github.com/restic/restic/pull/5112 diff --git a/changelog/0.17.3_2024-11-08/pull-5096 b/changelog/0.17.3_2024-11-08/pull-5096 new file mode 100644 index 00000000000..b1cc6edd33a --- /dev/null +++ b/changelog/0.17.3_2024-11-08/pull-5096 @@ -0,0 +1,8 @@ +Enhancement: Allow `prune --dry-run` without lock + +The `prune --dry-run --no-lock` now allows performing a dry-run +without locking the repository. Note that if the repository is +modified concurrently, `prune` may return inaccurate statistics +or errors. + +https://github.com/restic/restic/pull/5096 diff --git a/changelog/0.17.3_2024-11-08/pull-5101 b/changelog/0.17.3_2024-11-08/pull-5101 new file mode 100644 index 00000000000..4152eb185f7 --- /dev/null +++ b/changelog/0.17.3_2024-11-08/pull-5101 @@ -0,0 +1,8 @@ +Bugfix: Do not retry load/list operation if SFTP connection is broken + +When using restic with the SFTP backend, backend operations that load a +file or list files were retried even if the SFTP connection was broken. +This has now been fixed. + +https://github.com/restic/restic/pull/5101 +https://forum.restic.net/t/restic-hanging-on-backup/8559 diff --git a/changelog/TEMPLATE b/changelog/TEMPLATE index 9304359b348..7d6065e046a 100644 --- a/changelog/TEMPLATE +++ b/changelog/TEMPLATE @@ -5,6 +5,8 @@ Enhancement: Allow custom bar in the foo command # Describe the problem in the past tense, the new behavior in the present # tense. Mention the affected commands, backends, operating systems, etc. +# If the problem description just says that a feature was missing, then +# only explain the new behavior. # Focus on user-facing behavior, not the implementation. # Use "Restic now ..." instead of "We have changed ...". diff --git a/changelog/changelog-github.tmpl b/changelog/changelog-github.tmpl index d19788daf53..9936da8e609 100644 --- a/changelog/changelog-github.tmpl +++ b/changelog/changelog-github.tmpl @@ -15,7 +15,7 @@ Details {{ range $entry := .Entries }}{{ with $entry }} * {{ .Type }} #{{ .PrimaryID }}: {{ .Title }} {{ range $par := .Paragraphs }} - {{ $par }} +{{ indent 3 $par }} {{ end }} {{ range $id := .Issues -}} {{ ` ` }}[#{{ $id }}](https://github.com/restic/restic/issues/{{ $id -}}) diff --git a/changelog/unreleased/issue-1843 b/changelog/unreleased/issue-1843 new file mode 100644 index 00000000000..1b891adc9c1 --- /dev/null +++ b/changelog/unreleased/issue-1843 @@ -0,0 +1,9 @@ +Bugfix: Correctly restore timestamp on long filepaths on old Windows versions + +The `restore` command did not restore timestamps on file paths longer than 256 +characters on Windows versions before Windows 10 1607. + +This issue is now resolved. + +https://github.com/restic/restic/issues/1843 +https://github.com/restic/restic/pull/5061 diff --git a/changelog/unreleased/issue-2165 b/changelog/unreleased/issue-2165 new file mode 100644 index 00000000000..12bc9dfd980 --- /dev/null +++ b/changelog/unreleased/issue-2165 @@ -0,0 +1,16 @@ +Bugfix: Ignore disappeared backup source files + +If during a backup files were removed between restic listing the directory +content and backing up the file in question, the following error could occur: + +``` +error: lstat /some/file/name: no such file or directory +``` + +The backup command now ignores this particular error and silently skips the +removed file. + +https://github.com/restic/restic/issues/2165 +https://github.com/restic/restic/issues/3098 +https://github.com/restic/restic/pull/5143 +https://github.com/restic/restic/pull/5145 diff --git a/changelog/unreleased/issue-2511 b/changelog/unreleased/issue-2511 new file mode 100644 index 00000000000..97b01ccde60 --- /dev/null +++ b/changelog/unreleased/issue-2511 @@ -0,0 +1,6 @@ +Enhancement: Allow generating shell completions to stdout + +Restic `generate` now supports passing `-` passed as file name to `--[shell]-completion` option. + +https://github.com/restic/restic/issues/2511 +https://github.com/restic/restic/pull/5053 diff --git a/changelog/unreleased/issue-3697 b/changelog/unreleased/issue-3697 new file mode 100644 index 00000000000..514f9d70864 --- /dev/null +++ b/changelog/unreleased/issue-3697 @@ -0,0 +1,12 @@ +Enhancement: Allow excluding online-only cloud files (e.g. OneDrive) + +Restic treated OneDrive Files On-Demand as though they were regular files +for the purpose of backup which caused issues with VSS, could make backup +incredibly slow (as OneDrive attempted to download files), or could fill +the source disk (e.g. 1TB of files in OneDrive on a 500GB disk). +Restic now allows the user to exclude these files when backing up with +the `--exclude-cloud-files` switch. + +https://github.com/restic/restic/issues/3697 +https://github.com/restic/restic/issues/4935 +https://github.com/restic/restic/pull/4990 \ No newline at end of file diff --git a/changelog/unreleased/issue-4515 b/changelog/unreleased/issue-4515 deleted file mode 100644 index 3832dc6050c..00000000000 --- a/changelog/unreleased/issue-4515 +++ /dev/null @@ -1,8 +0,0 @@ -Change: Don't retry to load files that don't exist - -Restic used to always retry to load files. It now only retries to load -files if they exist. - -https://github.com/restic/restic/issues/4515 -https://github.com/restic/restic/issues/1523 -https://github.com/restic/restic/pull/4520 diff --git a/changelog/unreleased/issue-4521 b/changelog/unreleased/issue-4521 new file mode 100644 index 00000000000..709741d1193 --- /dev/null +++ b/changelog/unreleased/issue-4521 @@ -0,0 +1,21 @@ +Enhancement: Add config option to set Microsoft Blob Storage Access Tier + +The `azure.access-tier` option can be passed to Restic (using `-o`) to +specify the access tier for Microsoft Blob Storage objects created by Restic. + +The access tier is passed as-is to Microsoft Blob Storage, so it needs to be +understood by the API. The allowed values are `Hot`, `Cool`, or `Cold`. + +If unspecified, the default is inferred from the default configured on the +storage account. + +You can mix access tiers in the same container, and the setting isn't +stored in the restic repository, so be sure to specify it with each +command that writes to Microsoft Blob Storage. + +There is no official `Archive` storage support in restic, use this option at +your own risk. To restore any data, it is still necessary to manually warm up +the required data in the `Archive` tier. + +https://github.com/restic/restic/issues/4521 +https://github.com/restic/restic/pull/5046 \ No newline at end of file diff --git a/changelog/unreleased/issue-4529 b/changelog/unreleased/issue-4529 deleted file mode 100644 index fed726d2d1d..00000000000 --- a/changelog/unreleased/issue-4529 +++ /dev/null @@ -1,18 +0,0 @@ -Enhancement: Add extra verification of data integrity before upload - -Hardware issues, or a bug in restic or its dependencies, could previously cause -corruption in the files restic created and stored in the repository. Detecting -such corruption previously required explicitly running the `check --read-data` -or `check --read-data-subset` commands. - -To further ensure data integrity, even in the case of hardware issues or -software bugs, restic now performs additional verification of the files about -to be uploaded to the repository. - -These extra checks will increase CPU usage during backups. They can therefore, -if absolutely necessary, be disabled using the `--no-extra-verify` global -option. Please note that this should be combined with more active checking -using the previously mentioned check commands. - -https://github.com/restic/restic/issues/4529 -https://github.com/restic/restic/pull/4681 diff --git a/changelog/unreleased/issue-4547 b/changelog/unreleased/issue-4547 deleted file mode 100644 index edb1cf69329..00000000000 --- a/changelog/unreleased/issue-4547 +++ /dev/null @@ -1,7 +0,0 @@ -Enhancement: Add support for `--json` option to `version` command - -Restic now supports outputting restic version and used go version, OS and -architecture via JSON when using the version command. - -https://github.com/restic/restic/issues/4547 -https://github.com/restic/restic/pull/4553 diff --git a/changelog/unreleased/issue-4549 b/changelog/unreleased/issue-4549 deleted file mode 100644 index 4829a9881d4..00000000000 --- a/changelog/unreleased/issue-4549 +++ /dev/null @@ -1,11 +0,0 @@ -Enhancement: Add `--ncdu` option to `ls` command - -NCDU (NCurses Disk Usage) is a tool to analyse disk usage of directories. -It has an option to save a directory tree and analyse it later. -The `ls` command now supports the `--ncdu` option which outputs information -about a snapshot in the NCDU format. - -You can use it as follows: `restic ls latest --ncdu | ncdu -f -` - -https://github.com/restic/restic/issues/4549 -https://github.com/restic/restic/pull/4550 diff --git a/changelog/unreleased/issue-4583 b/changelog/unreleased/issue-4583 deleted file mode 100644 index 97b0e6ba702..00000000000 --- a/changelog/unreleased/issue-4583 +++ /dev/null @@ -1,12 +0,0 @@ -Enhancement: Ignore s3.storage-class for metadata if archive tier is specified - -There is no official cold storage support in restic, use this option at your -own risk. - -Restic always stored all files on s3 using the specified `s3.storage-class`. -Now, restic will store metadata using a non-archive storage tier to avoid -problems when accessing a repository. To restore any data, it is still -necessary to manually warm up the required data beforehand. - -https://github.com/restic/restic/issues/4583 -https://github.com/restic/restic/pull/4584 diff --git a/changelog/unreleased/issue-4676 b/changelog/unreleased/issue-4676 deleted file mode 100644 index e95118e726a..00000000000 --- a/changelog/unreleased/issue-4676 +++ /dev/null @@ -1,8 +0,0 @@ -Enhancement: Move key add, list, remove and passwd as separate sub-commands - -Restic now provides usage documentation for the `key` command. Each sub-command; -`add`, `list`, `remove` and `passwd` now have their own sub-command documentation -which can be invoked using `restic key --help`. - -https://github.com/restic/restic/issues/4676 -https://github.com/restic/restic/pull/4685 diff --git a/changelog/unreleased/issue-4677 b/changelog/unreleased/issue-4677 deleted file mode 100644 index 8fa6cf65be4..00000000000 --- a/changelog/unreleased/issue-4677 +++ /dev/null @@ -1,19 +0,0 @@ -Bugfix: Downgrade zstd library to fix rare data corruption at max. compression - -In restic 0.16.3, backups where the compression level was set to `max` (using -`--compression max`) could in rare and very specific circumstances result in -data corruption due to a bug in the library used for compressing data. Restic -0.16.1 and 0.16.2 were not affected. - -Restic now uses the previous version of the library used to compress data, the -same version used by restic 0.16.2. Please note that the `auto` compression -level (which restic uses by default) was never affected, and even if you used -`max` compression, chances of being affected by this issue are small. - -To check a repository for any corruption, run `restic check --read-data`. This -will download and verify the whole repository and can be used at any time to -completely verify the integrity of a repository. If the `check` command detects -anomalies, follow the suggested steps. - -https://github.com/restic/restic/issues/4677 -https://github.com/restic/restic/pull/4679 diff --git a/changelog/unreleased/issue-4678 b/changelog/unreleased/issue-4678 deleted file mode 100644 index f98711ea420..00000000000 --- a/changelog/unreleased/issue-4678 +++ /dev/null @@ -1,7 +0,0 @@ -Enhancement: Add --target flag to the dump command - -Restic `dump` always printed to the standard output. It now permits to select a -`--target` file to write the output to. - -https://github.com/restic/restic/issues/4678 -https://github.com/restic/restic/pull/4682 diff --git a/changelog/unreleased/issue-4948 b/changelog/unreleased/issue-4948 new file mode 100644 index 00000000000..3fd350d0d7c --- /dev/null +++ b/changelog/unreleased/issue-4948 @@ -0,0 +1,6 @@ +Enhancement: Format exit errors as JSON with --json + +Restic now prints any exit error messages as JSON when requested. + +https://github.com/restic/restic/issues/4948 +https://github.com/restic/restic/pull/4952 diff --git a/changelog/unreleased/issue-4983 b/changelog/unreleased/issue-4983 new file mode 100644 index 00000000000..e5292cf5c5c --- /dev/null +++ b/changelog/unreleased/issue-4983 @@ -0,0 +1,8 @@ +Enhancement: add SLSA provenance to the GHCR Container images + +Restic's GitHub Container Registry (GHCR) image build workflow now includes +SLSA provenance generation. This enhancement improves the security and +traceability of images built and pushed to GHCR. + +https://github.com/restic/restic/issues/4983 +https://github.com/restic/restic/pull/4999 \ No newline at end of file diff --git a/changelog/unreleased/issue-5081 b/changelog/unreleased/issue-5081 new file mode 100644 index 00000000000..6cf1bf5929f --- /dev/null +++ b/changelog/unreleased/issue-5081 @@ -0,0 +1,7 @@ +Enhancement: Retry loading repository config + +Restic now retries loading the repository config file when opening a repository. +In addition, the `init` command now also retries backend operations. + +https://github.com/restic/restic/issues/5081 +https://github.com/restic/restic/pull/5095 diff --git a/changelog/unreleased/issue-5089 b/changelog/unreleased/issue-5089 new file mode 100644 index 00000000000..43c5c83667d --- /dev/null +++ b/changelog/unreleased/issue-5089 @@ -0,0 +1,13 @@ +Enhancement: Allow including/excluding extended file attributes during restore + +Restic restore attempts to restore all extended file attributes. +Now two new command line flags are added to restore to control which +extended file attributes will be restored. + +The new flags are `--exclude-xattr` and `--include-xattr`. + +If the flags are not provided, restic will default to restoring +all extended file attributes. + +https://github.com/restic/restic/issues/5089 +https://github.com/restic/restic/pull/5129 diff --git a/changelog/unreleased/issue-5092 b/changelog/unreleased/issue-5092 new file mode 100644 index 00000000000..b6a32b68bfc --- /dev/null +++ b/changelog/unreleased/issue-5092 @@ -0,0 +1,8 @@ +Enhancement: Indicate the of deleted files/directories during restore + +Restic now indicates the number of deleted files/directories during restore. +The `--json` output now includes a `files_deleted` field that shows the number +of files and directories that were deleted during restore. + +https://github.com/restic/restic/issues/5092 +https://github.com/restic/restic/pull/5100 diff --git a/changelog/unreleased/issue-5131 b/changelog/unreleased/issue-5131 new file mode 100644 index 00000000000..fd38a216dd3 --- /dev/null +++ b/changelog/unreleased/issue-5131 @@ -0,0 +1,6 @@ +Enhancement: Add DragonflyBSD support + +Restic can now be compiled on DragonflyBSD. + +https://github.com/restic/restic/issues/5131 +https://github.com/restic/restic/pull/5138 diff --git a/changelog/unreleased/issue-5137 b/changelog/unreleased/issue-5137 new file mode 100644 index 00000000000..ba681202cd3 --- /dev/null +++ b/changelog/unreleased/issue-5137 @@ -0,0 +1,8 @@ +Enhancement: Restic tag command returns the modified snapshot information + +Restic `tag` command now returns the modified snapshot information in the +output. Added `--json` option to the command to get the output in JSON format +for scripting access. + +https://github.com/restic/restic/issues/5137 +https://github.com/restic/restic/pull/5144 \ No newline at end of file diff --git a/changelog/unreleased/issue-5174 b/changelog/unreleased/issue-5174 new file mode 100644 index 00000000000..7f7922a7aa9 --- /dev/null +++ b/changelog/unreleased/issue-5174 @@ -0,0 +1,6 @@ +Enhancement: Enable xattr support, on NetBSD 10+ + +Restic now supports backing up, and restoring extended attributes, on systems running NetBSD 10, or later. + +https://github.com/restic/restic/issues/5174 +https://github.com/restic/restic/pull/5180 diff --git a/changelog/unreleased/pull-4503 b/changelog/unreleased/pull-4503 deleted file mode 100644 index 3ce5c48e862..00000000000 --- a/changelog/unreleased/pull-4503 +++ /dev/null @@ -1,7 +0,0 @@ -Bugfix: Correct hardlink handling in `stats` command - -If files on different devices had the same inode id, then the `stats` command -did not correctly calculate the snapshot size. This has been fixed. - -https://github.com/restic/restic/pull/4503 -https://forum.restic.net/t/possible-bug-in-stats/6461/8 diff --git a/changelog/unreleased/pull-4526 b/changelog/unreleased/pull-4526 deleted file mode 100644 index 3a538f57a70..00000000000 --- a/changelog/unreleased/pull-4526 +++ /dev/null @@ -1,11 +0,0 @@ -Enhancement: Add bitrot detection to `diff` command - -The output of the `diff` command now includes the modifier `?` for files -to indicate bitrot in backed up files. It will appear whenever there is a -difference in content while the metadata is exactly the same. Since files with -unchanged metadata are normally not read again when creating a backup, the -detection is only effective if the right-hand side of the diff has been created -with "backup --force". - -https://github.com/restic/restic/issues/805 -https://github.com/restic/restic/pull/4526 diff --git a/changelog/unreleased/pull-4573 b/changelog/unreleased/pull-4573 deleted file mode 100644 index bd5c2c423cd..00000000000 --- a/changelog/unreleased/pull-4573 +++ /dev/null @@ -1,5 +0,0 @@ -Enhancement: Add `--new-host` and `--new-time` options to `rewrite` command - -`restic rewrite` now allows rewriting the host and / or time metadata of a snapshot. - -https://github.com/restic/restic/pull/4573 diff --git a/changelog/unreleased/pull-4590 b/changelog/unreleased/pull-4590 deleted file mode 100644 index 353d2161615..00000000000 --- a/changelog/unreleased/pull-4590 +++ /dev/null @@ -1,7 +0,0 @@ -Enhancement: `mount` tests mountpoint existence before opening the repository - -The restic `mount` command now checks for the existence of the -mountpoint before opening the repository, leading to quicker error -detection. - -https://github.com/restic/restic/pull/4590 diff --git a/changelog/unreleased/pull-4938 b/changelog/unreleased/pull-4938 new file mode 100644 index 00000000000..0fa876ca094 --- /dev/null +++ b/changelog/unreleased/pull-4938 @@ -0,0 +1,7 @@ +Change: Update dependencies and require Go 1.21 or newer + +We have updated all dependencies. Since some libraries require newer Go standard +library features, support for Go 1.19 and 1.20 has been dropped, which means that +restic now requires at least Go 1.21 to build. + +https://github.com/restic/restic/pull/4938 diff --git a/changelog/unreleased/pull-5054 b/changelog/unreleased/pull-5054 new file mode 100644 index 00000000000..6efd5882c44 --- /dev/null +++ b/changelog/unreleased/pull-5054 @@ -0,0 +1,7 @@ +Enhancement: Compress ZIP archives created by `dump` command + +Restic did not compress the archives that were created by using +the `dump` command. It now saves some disk space when exporting +archives using the DEFLATE algorithm for "zip" archives. + +https://github.com/restic/restic/pull/5054 diff --git a/changelog/unreleased/pull-5119 b/changelog/unreleased/pull-5119 new file mode 100644 index 00000000000..731e3ecd769 --- /dev/null +++ b/changelog/unreleased/pull-5119 @@ -0,0 +1,6 @@ +Enhancement: Include backup start and end in JSON output + +The JSON output of the backup command now also includes the timestamps +of the `backup_start` and `backup_end` times. + +https://github.com/restic/restic/pull/5119 diff --git a/changelog/unreleased/pull-5141 b/changelog/unreleased/pull-5141 new file mode 100644 index 00000000000..7f71f2269c5 --- /dev/null +++ b/changelog/unreleased/pull-5141 @@ -0,0 +1,7 @@ +Enhancement: Provide clear error message if AZURE_ACCOUNT_NAME is not set + +If AZURE_ACCOUNT_NAME is not set, any command related to an Azure repository +would result in a misleading networking error. Restic will now detect this and +provide a clear warning that the variable is not defined. + +https://github.com/restic/restic/pull/5141 diff --git a/changelog/unreleased/pull-5153 b/changelog/unreleased/pull-5153 new file mode 100644 index 00000000000..e76d7f2fc84 --- /dev/null +++ b/changelog/unreleased/pull-5153 @@ -0,0 +1,6 @@ +Bugfix: Include root tree when searching using `find --tree` + +`restic find --tree` didn't find trees referenced by `restic snapshot --json`. +It now correctly includes the root tree when searching. + +https://github.com/restic/restic/pull/5153 diff --git a/changelog/unreleased/pull-5162 b/changelog/unreleased/pull-5162 new file mode 100644 index 00000000000..18e6be2b604 --- /dev/null +++ b/changelog/unreleased/pull-5162 @@ -0,0 +1,7 @@ +Change: Promote feature flags + +The `explicit-s3-anonymous-auth` and `safe-forget-keep-tags` features are +now stable and can no longer be disabled. The feature flags will be removed +in restic 0.19.0. + +https://github.com/restic/restic/pull/5162 diff --git a/cmd/restic/cleanup.go b/cmd/restic/cleanup.go index 75933fe966a..90ea93b9235 100644 --- a/cmd/restic/cleanup.go +++ b/cmd/restic/cleanup.go @@ -1,89 +1,41 @@ package main import ( + "context" "os" "os/signal" - "sync" "syscall" "github.com/restic/restic/internal/debug" ) -var cleanupHandlers struct { - sync.Mutex - list []func(code int) (int, error) - done bool - ch chan os.Signal -} - -func init() { - cleanupHandlers.ch = make(chan os.Signal, 1) - go CleanupHandler(cleanupHandlers.ch) - signal.Notify(cleanupHandlers.ch, syscall.SIGINT) -} +func createGlobalContext() context.Context { + ctx, cancel := context.WithCancel(context.Background()) -// AddCleanupHandler adds the function f to the list of cleanup handlers so -// that it is executed when all the cleanup handlers are run, e.g. when SIGINT -// is received. -func AddCleanupHandler(f func(code int) (int, error)) { - cleanupHandlers.Lock() - defer cleanupHandlers.Unlock() + ch := make(chan os.Signal, 1) + go cleanupHandler(ch, cancel) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) - // reset the done flag for integration tests - cleanupHandlers.done = false - - cleanupHandlers.list = append(cleanupHandlers.list, f) + return ctx } -// RunCleanupHandlers runs all registered cleanup handlers -func RunCleanupHandlers(code int) int { - cleanupHandlers.Lock() - defer cleanupHandlers.Unlock() - - if cleanupHandlers.done { - return code - } - cleanupHandlers.done = true +// cleanupHandler handles the SIGINT and SIGTERM signals. +func cleanupHandler(c <-chan os.Signal, cancel context.CancelFunc) { + s := <-c + debug.Log("signal %v received, cleaning up", s) + Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s) - for _, f := range cleanupHandlers.list { - var err error - code, err = f(code) - if err != nil { - Warnf("error in cleanup handler: %v\n", err) - } + if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" { + _, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n") + _, _ = os.Stderr.WriteString(debug.DumpStacktrace()) + _, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n") } - cleanupHandlers.list = nil - return code -} - -// CleanupHandler handles the SIGINT signals. -func CleanupHandler(c <-chan os.Signal) { - for s := range c { - debug.Log("signal %v received, cleaning up", s) - Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s) - - if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" { - _, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n") - _, _ = os.Stderr.WriteString(debug.DumpStacktrace()) - _, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n") - } - code := 0 - - if s == syscall.SIGINT { - code = 130 - } else { - code = 1 - } - - Exit(code) - } + cancel() } -// Exit runs the cleanup handlers and then terminates the process with the -// given exit code. +// Exit terminates the process with the given exit code. func Exit(code int) { - code = RunCleanupHandlers(code) debug.Log("exiting with status code %d", code) os.Exit(code) } diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index be7a2aa3fb9..93b4556c7eb 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -20,10 +20,12 @@ import ( "github.com/restic/restic/internal/archiver" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/textfile" + "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/backup" "github.com/restic/restic/internal/ui/termstatus" ) @@ -41,8 +43,11 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was a fatal error (no snapshot created). Exit status is 3 if some source data could not be read (incomplete snapshot created). +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, - PreRun: func(cmd *cobra.Command, args []string) { + PreRun: func(_ *cobra.Command, _ []string) { if backupOptions.Host == "" { hostname, err := os.Hostname() if err != nil { @@ -52,6 +57,7 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea backupOptions.Host = hostname } }, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { term, cancel := setupTermstatus() @@ -62,7 +68,7 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea // BackupOptions bundles all options for the backup command. type BackupOptions struct { - excludePatternOptions + filter.ExcludePatternOptions Parent string GroupBy restic.SnapshotGroupByOptions @@ -71,6 +77,7 @@ type BackupOptions struct { ExcludeIfPresent []string ExcludeCaches bool ExcludeLargerThan string + ExcludeCloudFiles bool Stdin bool StdinFilename string StdinCommand bool @@ -87,9 +94,11 @@ type BackupOptions struct { DryRun bool ReadConcurrency uint NoScan bool + SkipIfUnchanged bool } var backupOptions BackupOptions +var backupFSTestHook func(fs fs.FS) fs.FS // ErrInvalidSourceData is used to report an incomplete backup var ErrInvalidSourceData = errors.New("at least one source file could not be read") @@ -101,9 +110,9 @@ func init() { f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: latest snapshot in the group determined by --group-by and not newer than the timestamp determined by --time)") backupOptions.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true} f.VarP(&backupOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')") - f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`) + f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the source files/directories (overrides the "parent" flag)`) - initExcludePatternOptions(f, &backupOptions.excludePatternOptions) + backupOptions.ExcludePatternOptions.Add(f) f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems, don't cross filesystem boundaries and subvolumes") f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes `filename[:header]`, exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)") @@ -114,7 +123,7 @@ func init() { f.BoolVar(&backupOptions.StdinCommand, "stdin-from-command", false, "interpret arguments as command to execute and store its stdout") f.Var(&backupOptions.Tags, "tag", "add `tags` for the new snapshot in the format `tag[,tag,...]` (can be specified multiple times)") f.UintVar(&backupOptions.ReadConcurrency, "read-concurrency", 0, "read `n` files concurrently (default: $RESTIC_READ_CONCURRENCY or 2)") - f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag") + f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually (default: $RESTIC_HOST). To prevent an expensive rescan use the \"parent\" flag") f.StringVar(&backupOptions.Host, "hostname", "", "set the `hostname` for the snapshot manually") err := f.MarkDeprecated("hostname", "use --host") if err != nil { @@ -132,11 +141,18 @@ func init() { f.BoolVar(&backupOptions.NoScan, "no-scan", false, "do not run scanner to estimate size of backup") if runtime.GOOS == "windows" { f.BoolVar(&backupOptions.UseFsSnapshot, "use-fs-snapshot", false, "use filesystem snapshot where possible (currently only Windows VSS)") + f.BoolVar(&backupOptions.ExcludeCloudFiles, "exclude-cloud-files", false, "excludes online-only cloud files (such as OneDrive Files On-Demand)") } + f.BoolVar(&backupOptions.SkipIfUnchanged, "skip-if-unchanged", false, "skip snapshot creation if identical to parent snapshot") // parse read concurrency from env, on error the default value will be used readConcurrency, _ := strconv.ParseUint(os.Getenv("RESTIC_READ_CONCURRENCY"), 10, 32) backupOptions.ReadConcurrency = uint(readConcurrency) + + // parse host from env, if not exists or empty the default value will be used + if host := os.Getenv("RESTIC_HOST"); host != "" { + backupOptions.Host = host + } } // filterExisting returns a slice of all existing items, or an error if no @@ -153,7 +169,7 @@ func filterExisting(items []string) (result []string, err error) { } if len(result) == 0 { - return nil, errors.Fatal("all target directories/files do not exist") + return nil, errors.Fatal("all source directories/files do not exist") } return @@ -252,7 +268,7 @@ func readFilenamesRaw(r io.Reader) (names []string, err error) { // Check returns an error when an invalid combination of options was set. func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error { - if gopts.password == "" { + if gopts.password == "" && !gopts.InsecureNoPassword { if opts.Stdin { return errors.Fatal("cannot read both password and data from stdin") } @@ -286,9 +302,9 @@ func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error { // collectRejectByNameFuncs returns a list of all functions which may reject data // from being saved in a snapshot based on path only -func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (fs []RejectByNameFunc, err error) { +func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (fs []archiver.RejectByNameFunc, err error) { // exclude restic cache - if repo.Cache != nil { + if repo.Cache() != nil { f, err := rejectResticCache(repo) if err != nil { return nil, err @@ -297,49 +313,67 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) ( fs = append(fs, f) } - fsPatterns, err := opts.excludePatternOptions.CollectPatterns() + fsPatterns, err := opts.ExcludePatternOptions.CollectPatterns(Warnf) if err != nil { return nil, err } - fs = append(fs, fsPatterns...) + for _, pat := range fsPatterns { + fs = append(fs, archiver.RejectByNameFunc(pat)) + } - if opts.ExcludeCaches { - opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55") + return fs, nil +} + +// collectRejectFuncs returns a list of all functions which may reject data +// from being saved in a snapshot based on path and file info +func collectRejectFuncs(opts BackupOptions, targets []string, fs fs.FS) (funcs []archiver.RejectFunc, err error) { + // allowed devices + if opts.ExcludeOtherFS && !opts.Stdin && !opts.StdinCommand { + f, err := archiver.RejectByDevice(targets, fs) + if err != nil { + return nil, err + } + funcs = append(funcs, f) } - for _, spec := range opts.ExcludeIfPresent { - f, err := rejectIfPresent(spec) + if len(opts.ExcludeLargerThan) != 0 && !opts.Stdin && !opts.StdinCommand { + maxSize, err := ui.ParseBytes(opts.ExcludeLargerThan) if err != nil { return nil, err } - fs = append(fs, f) + f, err := archiver.RejectBySize(maxSize) + if err != nil { + return nil, err + } + funcs = append(funcs, f) } - return fs, nil -} - -// collectRejectFuncs returns a list of all functions which may reject data -// from being saved in a snapshot based on path and file info -func collectRejectFuncs(opts BackupOptions, targets []string) (fs []RejectFunc, err error) { - // allowed devices - if opts.ExcludeOtherFS && !opts.Stdin { - f, err := rejectByDevice(targets) + if opts.ExcludeCloudFiles && !opts.Stdin && !opts.StdinCommand { + if runtime.GOOS != "windows" { + return nil, errors.Fatalf("exclude-cloud-files is only supported on Windows") + } + f, err := archiver.RejectCloudFiles(Warnf) if err != nil { return nil, err } - fs = append(fs, f) + funcs = append(funcs, f) + } + + if opts.ExcludeCaches { + opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55") } - if len(opts.ExcludeLargerThan) != 0 && !opts.Stdin { - f, err := rejectBySize(opts.ExcludeLargerThan) + for _, spec := range opts.ExcludeIfPresent { + f, err := archiver.RejectIfPresent(spec, Warnf) if err != nil { return nil, err } - fs = append(fs, f) + + funcs = append(funcs, f) } - return fs, nil + return funcs, nil } // collectTargets returns a list of target files/dirs from several sources. @@ -398,7 +432,7 @@ func collectTargets(opts BackupOptions, args []string) (targets []string, err er // and have the ability to use both files-from and args at the same time. targets = append(targets, args...) if len(targets) == 0 && !opts.Stdin { - return nil, errors.Fatal("nothing to backup, please specify target files/dirs") + return nil, errors.Fatal("nothing to backup, please specify source files/dirs") } targets, err = filterExisting(targets) @@ -440,7 +474,16 @@ func findParentSnapshot(ctx context.Context, repo restic.ListerLoaderUnpacked, o } func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { - err := opts.Check(gopts, args) + var vsscfg fs.VSSConfig + var err error + + if runtime.GOOS == "windows" { + if vsscfg, err = fs.ParseVSSConfig(gopts.extended); err != nil { + return err + } + } + + err = opts.Check(gopts, args) if err != nil { return err } @@ -451,6 +494,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } timeStamp := time.Now() + backupStart := timeStamp if opts.TimeStamp != "" { timeStamp, err = time.ParseInLocation(TimeFormat, opts.TimeStamp, time.Local) if err != nil { @@ -462,10 +506,11 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter Verbosef("open repository\n") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, opts.DryRun) if err != nil { return err } + defer unlock() var progressPrinter backup.ProgressPrinter if gopts.JSON { @@ -477,34 +522,12 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter calculateProgressInterval(!gopts.Quiet, gopts.JSON)) defer progressReporter.Done() - if opts.DryRun { - repo.SetDryRun() - } - - if !gopts.JSON { - progressPrinter.V("lock repository") - } - if !opts.DryRun { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } - // rejectByNameFuncs collect functions that can reject items from the backup based on path only rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo) if err != nil { return err } - // rejectFuncs collect functions that can reject items from the backup based on path and file info - rejectFuncs, err := collectRejectFuncs(opts, targets) - if err != nil { - return err - } - var parentSnapshot *restic.Snapshot if !opts.Stdin { parentSnapshot, err = findParentSnapshot(ctx, repo, opts, targets, timeStamp) @@ -526,38 +549,19 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) - err = repo.LoadIndex(ctx, bar) if err != nil { return err } - selectByNameFilter := func(item string) bool { - for _, reject := range rejectByNameFuncs { - if reject(item) { - return false - } - } - return true - } - - selectFilter := func(item string, fi os.FileInfo) bool { - for _, reject := range rejectFuncs { - if reject(item, fi) { - return false - } - } - return true - } - var targetFS fs.FS = fs.Local{} if runtime.GOOS == "windows" && opts.UseFsSnapshot { if err = fs.HasSufficientPrivilegesForVSS(); err != nil { return err } - errorHandler := func(item string, err error) error { - return progressReporter.Error(item, err) + errorHandler := func(item string, err error) { + _ = progressReporter.Error(item, err) } messageHandler := func(msg string, args ...interface{}) { @@ -566,7 +570,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } } - localVss := fs.NewLocalVss(errorHandler, messageHandler) + localVss := fs.NewLocalVss(errorHandler, messageHandler, vsscfg) defer localVss.DeleteSnapshots() targetFS = localVss } @@ -592,6 +596,19 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter targets = []string{filename} } + if backupFSTestHook != nil { + targetFS = backupFSTestHook(targetFS) + } + + // rejectFuncs collect functions that can reject items from the backup based on path and file info + rejectFuncs, err := collectRejectFuncs(opts, targets, targetFS) + if err != nil { + return err + } + + selectByNameFilter := archiver.CombineRejectByNames(rejectByNameFuncs) + selectFilter := archiver.CombineRejects(rejectFuncs) + wg, wgCtx := errgroup.WithContext(ctx) cancelCtx, cancel := context.WithCancel(wgCtx) defer cancel() @@ -638,18 +655,20 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } snapshotOpts := archiver.SnapshotOptions{ - Excludes: opts.Excludes, - Tags: opts.Tags.Flatten(), - Time: timeStamp, - Hostname: opts.Host, - ParentSnapshot: parentSnapshot, - ProgramVersion: "restic " + version, + Excludes: opts.Excludes, + Tags: opts.Tags.Flatten(), + BackupStart: backupStart, + Time: timeStamp, + Hostname: opts.Host, + ParentSnapshot: parentSnapshot, + ProgramVersion: "restic " + version, + SkipIfUnchanged: opts.SkipIfUnchanged, } if !gopts.JSON { progressPrinter.V("start backup on %v", targets) } - _, id, err := arch.Snapshot(ctx, targets, snapshotOpts) + _, id, summary, err := arch.Snapshot(ctx, targets, snapshotOpts) // cleanly shutdown all running goroutines cancel() @@ -663,10 +682,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } // Report finished execution - progressReporter.Finish(id, opts.DryRun) - if !gopts.JSON && !opts.DryRun { - progressPrinter.P("snapshot %s saved\n", id.Str()) - } + progressReporter.Finish(id, summary, opts.DryRun) if !success { return ErrInvalidSourceData } diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index c60e9c543be..06d71e345aa 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -8,8 +8,8 @@ import ( "path/filepath" "runtime" "testing" + "time" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -31,7 +31,7 @@ func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) { err := testRunBackupAssumeFailure(t, dir, target, opts, gopts) - rtest.Assert(t, err == nil, "Error while backing up") + rtest.Assert(t, err == nil, "Error while backing up: %v", err) } func TestBackup(t *testing.T) { @@ -52,14 +52,14 @@ func testBackup(t *testing.T, useFsSnapshot bool) { opts := BackupOptions{UseFsSnapshot: useFsSnapshot} // first backup - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) testListSnapshots(t, env.gopts, 1) testRunCheck(t, env.gopts) stat1 := dirStats(env.repo) // second backup, implicit incremental - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) snapshotIDs := testListSnapshots(t, env.gopts, 2) stat2 := dirStats(env.repo) @@ -71,7 +71,7 @@ func testBackup(t *testing.T, useFsSnapshot bool) { testRunCheck(t, env.gopts) // third backup, explicit incremental opts.Parent = snapshotIDs[0].String() - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) snapshotIDs = testListSnapshots(t, env.gopts, 3) stat3 := dirStats(env.repo) @@ -84,7 +84,7 @@ func testBackup(t *testing.T, useFsSnapshot bool) { for i, snapshotID := range snapshotIDs { restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) - testRunRestore(t, env.gopts, restoredir, snapshotID) + testRunRestore(t, env.gopts, restoredir, snapshotID.String()+":"+toPathInSnapshot(filepath.Dir(env.testdata))) diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata")) rtest.Assert(t, diff == "", "directories are not equal: %v", diff) } @@ -92,6 +92,20 @@ func testBackup(t *testing.T, useFsSnapshot bool) { testRunCheck(t, env.gopts) } +func toPathInSnapshot(path string) string { + // use path as is on most platforms, but convert it on windows + if runtime.GOOS == "windows" { + // the path generated by the test is always local so take the shortcut + vol := filepath.VolumeName(path) + if vol[len(vol)-1] != ':' { + panic(fmt.Sprintf("unexpected path: %q", path)) + } + path = vol[:len(vol)-1] + string(filepath.Separator) + path[len(vol)+1:] + path = filepath.ToSlash(path) + } + return path +} + func TestBackupWithRelativePath(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() @@ -112,6 +126,63 @@ func TestBackupWithRelativePath(t *testing.T) { rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "second snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID) } +type vssDeleteOriginalFS struct { + fs.FS + testdata string + hasRemoved bool +} + +func (f *vssDeleteOriginalFS) Lstat(name string) (*fs.ExtendedFileInfo, error) { + if !f.hasRemoved { + // call Lstat to trigger snapshot creation + _, _ = f.FS.Lstat(name) + // nuke testdata + var err error + for i := 0; i < 3; i++ { + // The CI sometimes runs into "The process cannot access the file because it is being used by another process" errors + // thus try a few times to remove the data + err = os.RemoveAll(f.testdata) + if err == nil { + break + } + time.Sleep(10 * time.Millisecond) + } + if err != nil { + return nil, err + } + f.hasRemoved = true + } + return f.FS.Lstat(name) +} + +func TestBackupVSS(t *testing.T) { + if runtime.GOOS != "windows" || fs.HasSufficientPrivilegesForVSS() != nil { + t.Skip("vss fs test can only be run on windows with admin privileges") + } + + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + opts := BackupOptions{UseFsSnapshot: true} + + var testFS *vssDeleteOriginalFS + backupFSTestHook = func(fs fs.FS) fs.FS { + testFS = &vssDeleteOriginalFS{ + FS: fs, + testdata: env.testdata, + } + return testFS + } + defer func() { + backupFSTestHook = nil + }() + + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testListSnapshots(t, env.gopts, 1) + rtest.Equals(t, true, testFS.hasRemoved, "testdata was not removed") +} + func TestBackupParentSelection(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() @@ -250,29 +321,18 @@ func TestBackupTreeLoadError(t *testing.T) { opts := BackupOptions{} // Backup a subdirectory first, such that we can remove the tree pack for the subdirectory testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts) - - r, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - rtest.OK(t, r.LoadIndex(context.TODO(), nil)) - treePacks := restic.NewIDSet() - r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { - if pb.Type == restic.TreeBlob { - treePacks.Insert(pb.PackID) - } - }) + treePacks := listTreePacks(env.gopts, t) testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) testRunCheck(t, env.gopts) // delete the subdirectory pack first - for id := range treePacks { - rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()})) - } + removePacks(env.gopts, t, treePacks) testRunRebuildIndex(t, env.gopts) // now the repo is missing the tree blob in the index; check should report this testRunCheckMustFail(t, env.gopts) // second backup should report an error but "heal" this situation - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory") testRunCheck(t, env.gopts) @@ -305,12 +365,7 @@ func TestBackupExclude(t *testing.T) { for _, filename := range backupExcludeFilenames { fp := filepath.Join(datadir, filename) rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755)) - - f, err := os.Create(fp) - rtest.OK(t, err) - - fmt.Fprint(f, filename) - rtest.OK(t, f.Close()) + rtest.OK(t, os.WriteFile(fp, []byte(filename), 0o666)) } snapshots := make(map[string]struct{}) @@ -406,6 +461,7 @@ func TestIncrementalBackup(t *testing.T) { t.Logf("repository grown by %d bytes", stat3.size-stat2.size) } +// nolint: staticcheck // false positive nil pointer dereference check func TestBackupTags(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() @@ -441,6 +497,7 @@ func TestBackupTags(t *testing.T) { "expected parent to be %v, got %v", parent.ID, newest.Parent) } +// nolint: staticcheck // false positive nil pointer dereference check func TestBackupProgramVersion(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() @@ -509,7 +566,7 @@ func TestHardLink(t *testing.T) { for i, snapshotID := range snapshotIDs { restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) - testRunRestore(t, env.gopts, restoredir, snapshotID) + testRunRestore(t, env.gopts, restoredir, snapshotID.String()) diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata")) rtest.Assert(t, diff == "", "directories are not equal %v", diff) @@ -637,3 +694,32 @@ func TestStdinFromCommandFailNoOutputAndExitCode(t *testing.T) { testRunCheck(t, env.gopts) } + +func TestBackupEmptyPassword(t *testing.T) { + // basic sanity test that empty passwords work + env, cleanup := withTestEnvironment(t) + defer cleanup() + + env.gopts.password = "" + env.gopts.InsecureNoPassword = true + + testSetupBackupData(t, env) + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{}, env.gopts) + testListSnapshots(t, env.gopts, 1) + testRunCheck(t, env.gopts) +} + +func TestBackupSkipIfUnchanged(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + opts := BackupOptions{SkipIfUnchanged: true} + + for i := 0; i < 3; i++ { + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testListSnapshots(t, env.gopts, 1) + } + + testRunCheck(t, env.gopts) +} diff --git a/cmd/restic/cmd_backup_test.go b/cmd/restic/cmd_backup_test.go index 5cbc42436c8..44e08ff9630 100644 --- a/cmd/restic/cmd_backup_test.go +++ b/cmd/restic/cmd_backup_test.go @@ -39,21 +39,24 @@ func TestCollectTargets(t *testing.T) { f1, err := os.Create(filepath.Join(dir, "fromfile")) rtest.OK(t, err) // Empty lines should be ignored. A line starting with '#' is a comment. - fmt.Fprintf(f1, "\n%s*\n # here's a comment\n", f1.Name()) + _, err = fmt.Fprintf(f1, "\n%s*\n # here's a comment\n", f1.Name()) + rtest.OK(t, err) rtest.OK(t, f1.Close()) f2, err := os.Create(filepath.Join(dir, "fromfile-verbatim")) rtest.OK(t, err) for _, filename := range []string{fooSpace, barStar} { // Empty lines should be ignored. CR+LF is allowed. - fmt.Fprintf(f2, "%s\r\n\n", filepath.Join(dir, filename)) + _, err = fmt.Fprintf(f2, "%s\r\n\n", filepath.Join(dir, filename)) + rtest.OK(t, err) } rtest.OK(t, f2.Close()) f3, err := os.Create(filepath.Join(dir, "fromfile-raw")) rtest.OK(t, err) for _, filename := range []string{"baz", "quux"} { - fmt.Fprintf(f3, "%s\x00", filepath.Join(dir, filename)) + _, err = fmt.Fprintf(f3, "%s\x00", filepath.Join(dir, filename)) + rtest.OK(t, err) } rtest.OK(t, err) rtest.OK(t, f3.Close()) diff --git a/cmd/restic/cmd_cache.go b/cmd/restic/cmd_cache.go index 4a10d102772..cd970b699e6 100644 --- a/cmd/restic/cmd_cache.go +++ b/cmd/restic/cmd_cache.go @@ -8,9 +8,8 @@ import ( "strings" "time" - "github.com/restic/restic/internal/cache" + "github.com/restic/restic/internal/backend/cache" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/table" "github.com/spf13/cobra" @@ -25,10 +24,12 @@ The "cache" command allows listing and cleaning local cache directories. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return runCache(cacheOptions, globalOptions, args) }, } @@ -87,7 +88,7 @@ func runCache(opts CacheOptions, gopts GlobalOptions, args []string) error { for _, item := range oldDirs { dir := filepath.Join(cachedir, item.Name()) - err = fs.RemoveAll(dir) + err = os.RemoveAll(dir) if err != nil { Warnf("unable to remove %v: %v\n", dir, err) } diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index 92f58b2e734..6160c54dfc2 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -7,12 +7,13 @@ import ( "github.com/spf13/cobra" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) +var catAllowedCmds = []string{"config", "index", "snapshot", "key", "masterkey", "lock", "pack", "blob", "tree"} + var cmdCat = &cobra.Command{ Use: "cat [flags] [masterkey|config|pack ID|blob ID|snapshot ID|index ID|key ID|lock ID|tree snapshot:subfolder]", Short: "Print internal objects to stdout", @@ -22,12 +23,18 @@ The "cat" command is used to print internal objects to stdout. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runCat(cmd.Context(), globalOptions, args) }, + ValidArgs: catAllowedCmds, } func init() { @@ -35,21 +42,19 @@ func init() { } func validateCatArgs(args []string) error { - var allowedCmds = []string{"config", "index", "snapshot", "key", "masterkey", "lock", "pack", "blob", "tree"} - if len(args) < 1 { return errors.Fatal("type not specified") } validType := false - for _, v := range allowedCmds { + for _, v := range catAllowedCmds { if v == args[0] { validType = true break } } if !validType { - return errors.Fatalf("invalid type %q, must be one of [%s]", args[0], strings.Join(allowedCmds, "|")) + return errors.Fatalf("invalid type %q, must be one of [%s]", args[0], strings.Join(catAllowedCmds, "|")) } if args[0] != "masterkey" && args[0] != "config" && len(args) != 2 { @@ -64,19 +69,11 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error { return err } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() tpe := args[0] @@ -154,9 +151,9 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error { return nil case "pack": - h := backend.Handle{Type: restic.PackFile, Name: id.String()} - buf, err := backend.LoadAll(ctx, nil, repo.Backend(), h) - if err != nil { + buf, err := repo.LoadRaw(ctx, restic.PackFile, id) + // allow returning broken pack files + if buf == nil { return err } @@ -176,8 +173,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error { } for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { - bh := restic.BlobHandle{ID: id, Type: t} - if !repo.Index().Has(bh) { + if _, ok := repo.LookupBlobSize(t, id); !ok { continue } diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index f04a4fe71b1..8788b0caf33 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -11,12 +11,14 @@ import ( "github.com/spf13/cobra" - "github.com/restic/restic/internal/cache" + "github.com/restic/restic/internal/backend/cache" "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" + "github.com/restic/restic/internal/ui/progress" + "github.com/restic/restic/internal/ui/termstatus" ) var cmdCheck = &cobra.Command{ @@ -32,13 +34,20 @@ repository and not use a local cache. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return runCheck(cmd.Context(), checkOptions, globalOptions, args) + term, cancel := setupTermstatus() + defer cancel() + return runCheck(cmd.Context(), checkOptions, globalOptions, args, term) }, - PreRunE: func(cmd *cobra.Command, args []string) error { + PreRunE: func(_ *cobra.Command, _ []string) error { return checkFlags(checkOptions) }, } @@ -154,7 +163,7 @@ func parsePercentage(s string) (float64, error) { // - if the user explicitly requested --no-cache, we don't use any cache // - if the user provides --cache-dir, we use a cache in a temporary sub-directory of the specified directory and the sub-directory is deleted after the check // - by default, we use a cache in a temporary directory that is deleted after the check -func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions) (cleanup func()) { +func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions, printer progress.Printer) (cleanup func()) { cleanup = func() {} if opts.WithCache { // use the default cache, no setup needed @@ -171,53 +180,54 @@ func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions) (cleanup func()) cachedir = cache.EnvDir() } - // use a cache in a temporary directory + if cachedir != "" { + // use a cache in a temporary directory + err := os.MkdirAll(cachedir, 0755) + if err != nil { + Warnf("unable to create cache directory %s, disabling cache: %v\n", cachedir, err) + gopts.NoCache = true + return cleanup + } + } tempdir, err := os.MkdirTemp(cachedir, "restic-check-cache-") if err != nil { // if an error occurs, don't use any cache - Warnf("unable to create temporary directory for cache during check, disabling cache: %v\n", err) + printer.E("unable to create temporary directory for cache during check, disabling cache: %v\n", err) gopts.NoCache = true return cleanup } gopts.CacheDir = tempdir - Verbosef("using temporary cache in %v\n", tempdir) + printer.P("using temporary cache in %v\n", tempdir) cleanup = func() { - err := fs.RemoveAll(tempdir) + err := os.RemoveAll(tempdir) if err != nil { - Warnf("error removing temporary cache directory: %v\n", err) + printer.E("error removing temporary cache directory: %v\n", err) } } return cleanup } -func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args []string) error { +func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args []string, term *termstatus.Terminal) error { if len(args) != 0 { return errors.Fatal("the check command expects no arguments, only options - please see `restic help check` for usage and flags") } - cleanup := prepareCheckCache(opts, &gopts) - AddCleanupHandler(func(code int) (int, error) { - cleanup() - return code, nil - }) + printer := newTerminalProgressPrinter(gopts.verbosity, term) - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } + cleanup := prepareCheckCache(opts, &gopts, printer) + defer cleanup() if !gopts.NoLock { - Verbosef("create exclusive lock for repository\n") - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } + printer.P("create exclusive lock for repository\n") } + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, gopts.NoLock) + if err != nil { + return err + } + defer unlock() chkr := checker.New(repo, opts.CheckUnused) err = chkr.LoadSnapshots(ctx) @@ -225,71 +235,88 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args return err } - Verbosef("load indexes\n") - bar := newIndexProgress(gopts.Quiet, gopts.JSON) + printer.P("load indexes\n") + bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) hints, errs := chkr.LoadIndex(ctx, bar) + if ctx.Err() != nil { + return ctx.Err() + } errorsFound := false suggestIndexRebuild := false mixedFound := false for _, hint := range hints { switch hint.(type) { - case *checker.ErrDuplicatePacks, *checker.ErrOldIndexFormat: - Printf("%v\n", hint) + case *checker.ErrDuplicatePacks: + term.Print(hint.Error()) suggestIndexRebuild = true case *checker.ErrMixedPack: - Printf("%v\n", hint) + term.Print(hint.Error()) mixedFound = true default: - Warnf("error: %v\n", hint) + printer.E("error: %v\n", hint) errorsFound = true } } if suggestIndexRebuild { - Printf("Duplicate packs/old indexes are non-critical, you can run `restic repair index' to correct this.\n") + term.Print("Duplicate packs are non-critical, you can run `restic repair index' to correct this.\n") } if mixedFound { - Printf("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n") + term.Print("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n") } if len(errs) > 0 { for _, err := range errs { - Warnf("error: %v\n", err) + printer.E("error: %v\n", err) } - return errors.Fatal("LoadIndex returned errors") + + printer.E("\nThe repository index is damaged and must be repaired. You must run `restic repair index' to correct this.\n\n") + return errors.Fatal("repository contains errors") } orphanedPacks := 0 errChan := make(chan error) + salvagePacks := restic.NewIDSet() - Verbosef("check all packs\n") + printer.P("check all packs\n") go chkr.Packs(ctx, errChan) for err := range errChan { - if checker.IsOrphanedPack(err) { - orphanedPacks++ - Verbosef("%v\n", err) - } else if err == checker.ErrLegacyLayout { - Verbosef("repository still uses the S3 legacy layout\nPlease run `restic migrate s3legacy` to correct this.\n") + var packErr *checker.PackError + if errors.As(err, &packErr) { + if packErr.Orphaned { + orphanedPacks++ + printer.V("%v\n", err) + } else { + if packErr.Truncated { + salvagePacks.Insert(packErr.ID) + } + errorsFound = true + printer.E("%v\n", err) + } } else { errorsFound = true - Warnf("%v\n", err) + printer.E("%v\n", err) } } - if orphanedPacks > 0 { - Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks) + if orphanedPacks > 0 && !errorsFound { + // hide notice if repository is damaged + printer.P("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks) + } + if ctx.Err() != nil { + return ctx.Err() } - Verbosef("check snapshots, trees and blobs\n") + printer.P("check snapshots, trees and blobs\n") errChan = make(chan error) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() - bar := newProgressMax(!gopts.Quiet, 0, "snapshots") + bar := newTerminalProgressMax(!gopts.Quiet, 0, "snapshots", term) defer bar.Done() chkr.Structure(ctx, bar, errChan) }() @@ -297,16 +324,12 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args for err := range errChan { errorsFound = true if e, ok := err.(*checker.TreeError); ok { - var clean string - if stdoutCanUpdateStatus() { - clean = clearLine(0) - } - Warnf(clean+"error for tree %v:\n", e.ID.Str()) + printer.E("error for tree %v:\n", e.ID.Str()) for _, treeErr := range e.Errors { - Warnf(" %v\n", treeErr) + printer.E(" %v\n", treeErr) } } else { - Warnf("error: %v\n", err) + printer.E("error: %v\n", err) } } @@ -314,10 +337,17 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args // Must happen after `errChan` is read from in the above loop to avoid // deadlocking in the case of errors. wg.Wait() + if ctx.Err() != nil { + return ctx.Err() + } if opts.CheckUnused { - for _, id := range chkr.UnusedBlobs(ctx) { - Verbosef("unused blob %v\n", id) + unused, err := chkr.UnusedBlobs(ctx) + if err != nil { + return err + } + for _, id := range unused { + printer.P("unused blob %v\n", id) errorsFound = true } } @@ -325,38 +355,24 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args doReadData := func(packs map[restic.ID]int64) { packCount := uint64(len(packs)) - p := newProgressMax(!gopts.Quiet, packCount, "packs") + p := newTerminalProgressMax(!gopts.Quiet, packCount, "packs", term) errChan := make(chan error) go chkr.ReadPacks(ctx, packs, p, errChan) - var salvagePacks restic.IDs - for err := range errChan { errorsFound = true - Warnf("%v\n", err) - if err, ok := err.(*checker.ErrPackData); ok { - if strings.Contains(err.Error(), "wrong data returned, hash is") { - salvagePacks = append(salvagePacks, err.PackID) - } + printer.E("%v\n", err) + if err, ok := err.(*repository.ErrPackData); ok { + salvagePacks.Insert(err.PackID) } } p.Done() - - if len(salvagePacks) > 0 { - Warnf("\nThe repository contains pack files with damaged blobs. These blobs must be removed to repair the repository. This can be done using the following commands:\n\n") - var strIds []string - for _, id := range salvagePacks { - strIds = append(strIds, id.String()) - } - Warnf("RESTIC_FEATURES=repair-packs-v1 restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIds, " ")) - Warnf("Corrupted blobs are either caused by hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n") - } } switch { case opts.ReadData: - Verbosef("read all data\n") + printer.P("read all data\n") doReadData(selectPacksByBucket(chkr.GetPacks(), 1, 1)) case opts.ReadDataSubset != "": var packs map[restic.ID]int64 @@ -366,12 +382,12 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args totalBuckets := dataSubset[1] packs = selectPacksByBucket(chkr.GetPacks(), bucket, totalBuckets) packCount := uint64(len(packs)) - Verbosef("read group #%d of %d data packs (out of total %d packs in %d groups)\n", bucket, packCount, chkr.CountPacks(), totalBuckets) + printer.P("read group #%d of %d data packs (out of total %d packs in %d groups)\n", bucket, packCount, chkr.CountPacks(), totalBuckets) } else if strings.HasSuffix(opts.ReadDataSubset, "%") { percentage, err := parsePercentage(opts.ReadDataSubset) if err == nil { packs = selectRandomPacksByPercentage(chkr.GetPacks(), percentage) - Verbosef("read %.1f%% of data packs\n", percentage) + printer.P("read %.1f%% of data packs\n", percentage) } } else { repoSize := int64(0) @@ -387,7 +403,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args subsetSize = repoSize } packs = selectRandomPacksByFileSize(chkr.GetPacks(), subsetSize, repoSize) - Verbosef("read %d bytes of data packs\n", subsetSize) + printer.P("read %d bytes of data packs\n", subsetSize) } if packs == nil { return errors.Fatal("internal error: failed to select packs to check") @@ -395,11 +411,27 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args doReadData(packs) } + if len(salvagePacks) > 0 { + printer.E("\nThe repository contains damaged pack files. These damaged files must be removed to repair the repository. This can be done using the following commands. Please read the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html first.\n\n") + var strIDs []string + for id := range salvagePacks { + strIDs = append(strIDs, id.String()) + } + printer.E("restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIDs, " ")) + printer.E("Damaged pack files can be caused by backend problems, hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n") + } + + if ctx.Err() != nil { + return ctx.Err() + } + if errorsFound { + if len(salvagePacks) == 0 { + printer.E("\nThe repository is damaged and must be repaired. Please follow the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html .\n\n") + } return errors.Fatal("repository contains errors") } - - Verbosef("no errors were found\n") + printer.P("no errors were found\n") return nil } diff --git a/cmd/restic/cmd_check_integration_test.go b/cmd/restic/cmd_check_integration_test.go index 9eb4fec62a1..f1e6517e093 100644 --- a/cmd/restic/cmd_check_integration_test.go +++ b/cmd/restic/cmd_check_integration_test.go @@ -1,10 +1,12 @@ package main import ( + "bytes" "context" "testing" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func testRunCheck(t testing.TB, gopts GlobalOptions) { @@ -23,12 +25,14 @@ func testRunCheckMustFail(t testing.TB, gopts GlobalOptions) { } func testRunCheckOutput(gopts GlobalOptions, checkUnused bool) (string, error) { - buf, err := withCaptureStdout(func() error { + buf := bytes.NewBuffer(nil) + gopts.stdout = buf + err := withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { opts := CheckOptions{ ReadData: true, CheckUnused: checkUnused, } - return runCheck(context.TODO(), opts, gopts, nil) + return runCheck(context.TODO(), opts, gopts, nil, term) }) return buf.String(), err } diff --git a/cmd/restic/cmd_check_test.go b/cmd/restic/cmd_check_test.go index 4d54488cdd5..18d607a14c3 100644 --- a/cmd/restic/cmd_check_test.go +++ b/cmd/restic/cmd_check_test.go @@ -1,12 +1,17 @@ package main import ( + "io/fs" "math" + "os" "reflect" + "strings" "testing" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" ) func TestParsePercentage(t *testing.T) { @@ -163,3 +168,79 @@ func TestSelectNoRandomPacksByFileSize(t *testing.T) { selectedPacks := selectRandomPacksByFileSize(testPacks, 10, 500) rtest.Assert(t, len(selectedPacks) == 0, "Expected 0 selected packs") } + +func checkIfFileWithSimilarNameExists(files []fs.DirEntry, fileName string) bool { + found := false + for _, file := range files { + if file.IsDir() { + dirName := file.Name() + if strings.Contains(dirName, fileName) { + found = true + } + } + } + return found +} + +func TestPrepareCheckCache(t *testing.T) { + // Create a temporary directory for the cache + tmpDirBase := t.TempDir() + + testCases := []struct { + opts CheckOptions + withValidCache bool + }{ + {CheckOptions{WithCache: true}, true}, // Shouldn't create temp directory + {CheckOptions{WithCache: false}, true}, // Should create temp directory + {CheckOptions{WithCache: false}, false}, // Should create cache directory first, then temp directory + } + + for _, testCase := range testCases { + t.Run("", func(t *testing.T) { + if !testCase.withValidCache { + // remove tmpDirBase to simulate non-existing cache directory + err := os.Remove(tmpDirBase) + rtest.OK(t, err) + } + gopts := GlobalOptions{CacheDir: tmpDirBase} + cleanup := prepareCheckCache(testCase.opts, &gopts, &progress.NoopPrinter{}) + files, err := os.ReadDir(tmpDirBase) + rtest.OK(t, err) + + if !testCase.opts.WithCache { + // If using a temporary cache directory, the cache directory should exist + // listing all directories inside tmpDirBase (cacheDir) + // one directory should be tmpDir created by prepareCheckCache with 'restic-check-cache-' in path + found := checkIfFileWithSimilarNameExists(files, "restic-check-cache-") + if !found { + t.Errorf("Expected temporary directory to exist, but it does not") + } + } else { + // If not using the cache, the temp directory should not exist + rtest.Assert(t, len(files) == 0, "expected cache directory not to exist, but it does: %v", files) + } + + // Call the cleanup function to remove the temporary cache directory + cleanup() + + // Verify that the cache directory has been removed + files, err = os.ReadDir(tmpDirBase) + rtest.OK(t, err) + rtest.Assert(t, len(files) == 0, "Expected cache directory to be removed, but it still exists: %v", files) + }) + } +} + +func TestPrepareDefaultCheckCache(t *testing.T) { + gopts := GlobalOptions{CacheDir: ""} + cleanup := prepareCheckCache(CheckOptions{}, &gopts, &progress.NoopPrinter{}) + _, err := os.ReadDir(gopts.CacheDir) + rtest.OK(t, err) + + // Call the cleanup function to remove the temporary cache directory + cleanup() + + // Verify that the cache directory has been removed + _, err = os.ReadDir(gopts.CacheDir) + rtest.Assert(t, errors.Is(err, os.ErrNotExist), "Expected cache directory to be removed, but it still exists") +} diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 92922b42b62..cd92193ac38 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -30,7 +30,18 @@ This means that copied files, which existed in both the source and destination repository, /may occupy up to twice their space/ in the destination repository. This can be mitigated by the "--copy-chunker-params" option when initializing a new destination repository using the "init" command. + +EXIT STATUS +=========== + +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, + DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runCopy(cmd.Context(), copyOptions, globalOptions, args) }, @@ -53,7 +64,7 @@ func init() { } func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []string) error { - secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "destination") + secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "destination") if err != nil { return err } @@ -62,30 +73,17 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args [] gopts, secondaryGopts = secondaryGopts, gopts } - srcRepo, err := OpenRepository(ctx, gopts) + ctx, srcRepo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() - dstRepo, err := OpenRepository(ctx, secondaryGopts) - if err != nil { - return err - } - - if !gopts.NoLock { - var srcLock *restic.Lock - srcLock, ctx, err = lockRepo(ctx, srcRepo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(srcLock) - if err != nil { - return err - } - } - - dstLock, ctx, err := lockRepo(ctx, dstRepo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(dstLock) + ctx, dstRepo, unlock, err := openWithAppendLock(ctx, secondaryGopts, false) if err != nil { return err } + defer unlock() srcSnapshotLister, err := restic.MemorizeList(ctx, srcRepo, restic.SnapshotFile) if err != nil { @@ -116,6 +114,9 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args [] // also consider identical snapshot copies dstSnapshotByOriginal[*sn.ID()] = append(dstSnapshotByOriginal[*sn.ID()], sn) } + if ctx.Err() != nil { + return ctx.Err() + } // remember already processed trees across all snapshots visitedTrees := restic.NewIDSet() @@ -160,7 +161,7 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args [] } Verbosef("snapshot %s saved\n", newID.Str()) } - return nil + return ctx.Err() } func similarSnapshots(sna *restic.Snapshot, snb *restic.Snapshot) bool { @@ -197,7 +198,7 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep packList := restic.NewIDSet() enqueue := func(h restic.BlobHandle) { - pb := srcRepo.Index().Lookup(h) + pb := srcRepo.LookupBlob(h.Type, h.ID) copyBlobs.Insert(h) for _, p := range pb { packList.Insert(p.PackID) @@ -212,7 +213,7 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep // Do we already have this tree blob? treeHandle := restic.BlobHandle{ID: tree.ID, Type: restic.TreeBlob} - if !dstRepo.Index().Has(treeHandle) { + if _, ok := dstRepo.LookupBlobSize(treeHandle.Type, treeHandle.ID); !ok { // copy raw tree bytes to avoid problems if the serialization changes enqueue(treeHandle) } @@ -222,7 +223,7 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep // Copy the blobs for this file. for _, blobID := range entry.Content { h := restic.BlobHandle{Type: restic.DataBlob, ID: blobID} - if !dstRepo.Index().Has(h) { + if _, ok := dstRepo.LookupBlobSize(h.Type, h.ID); !ok { enqueue(h) } } diff --git a/cmd/restic/cmd_copy_integration_test.go b/cmd/restic/cmd_copy_integration_test.go index 1c883769088..9ae78ba5090 100644 --- a/cmd/restic/cmd_copy_integration_test.go +++ b/cmd/restic/cmd_copy_integration_test.go @@ -13,10 +13,12 @@ func testRunCopy(t testing.TB, srcGopts GlobalOptions, dstGopts GlobalOptions) { gopts := srcGopts gopts.Repo = dstGopts.Repo gopts.password = dstGopts.password + gopts.InsecureNoPassword = dstGopts.InsecureNoPassword copyOpts := CopyOptions{ secondaryRepoOptions: secondaryRepoOptions{ - Repo: srcGopts.Repo, - password: srcGopts.password, + Repo: srcGopts.Repo, + password: srcGopts.password, + InsecureNoPassword: srcGopts.InsecureNoPassword, }, } @@ -60,11 +62,11 @@ func TestCopy(t *testing.T) { for i, snapshotID := range snapshotIDs { restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) origRestores[restoredir] = struct{}{} - testRunRestore(t, env.gopts, restoredir, snapshotID) + testRunRestore(t, env.gopts, restoredir, snapshotID.String()) } for i, snapshotID := range copiedSnapshotIDs { restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i)) - testRunRestore(t, env2.gopts, restoredir, snapshotID) + testRunRestore(t, env2.gopts, restoredir, snapshotID.String()) foundMatch := false for cmpdir := range origRestores { diff := directoriesContentsDiff(restoredir, cmpdir) @@ -134,3 +136,22 @@ func TestCopyUnstableJSON(t *testing.T) { testRunCheck(t, env2.gopts) testListSnapshots(t, env2.gopts, 1) } + +func TestCopyToEmptyPassword(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + env2, cleanup2 := withTestEnvironment(t) + defer cleanup2() + env2.gopts.password = "" + env2.gopts.InsecureNoPassword = true + + testSetupBackupData(t, env) + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, BackupOptions{}, env.gopts) + + testRunInit(t, env2.gopts) + testRunCopy(t, env.gopts, env2.gopts) + + testListSnapshots(t, env.gopts, 1) + testListSnapshots(t, env2.gopts, 1) + testRunCheck(t, env2.gopts) +} diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index a87e7a0c596..4ce17f899e2 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -20,18 +20,19 @@ import ( "github.com/spf13/cobra" "golang.org/x/sync/errgroup" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/repository/index" + "github.com/restic/restic/internal/repository/pack" "github.com/restic/restic/internal/restic" ) var cmdDebug = &cobra.Command{ - Use: "debug", - Short: "Debug commands", + Use: "debug", + Short: "Debug commands", + GroupID: cmdGroupDefault, + DisableAutoGenTag: true, } var cmdDebugDump = &cobra.Command{ @@ -44,7 +45,11 @@ is used for debugging purposes only. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { @@ -138,7 +143,7 @@ func printPacks(ctx context.Context, repo *repository.Repository, wr io.Writer) } func dumpIndexes(ctx context.Context, repo restic.ListerLoaderUnpacked, wr io.Writer) error { - return index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error { + return index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, err error) error { Printf("index_id: %v\n", id) if err != nil { return err @@ -153,19 +158,11 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error return errors.Fatal("type not specified") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() tpe := args[0] @@ -324,10 +321,11 @@ func loadBlobs(ctx context.Context, opts DebugExamineOptions, repo restic.Reposi if err != nil { panic(err) } - be := repo.Backend() - h := backend.Handle{ - Name: packID.String(), - Type: restic.PackFile, + + pack, err := repo.LoadRaw(ctx, restic.PackFile, packID) + // allow processing broken pack files + if pack == nil { + return err } wg, ctx := errgroup.WithContext(ctx) @@ -339,19 +337,11 @@ func loadBlobs(ctx context.Context, opts DebugExamineOptions, repo restic.Reposi wg.Go(func() error { for _, blob := range list { Printf(" loading blob %v at %v (length %v)\n", blob.ID, blob.Offset, blob.Length) - buf := make([]byte, blob.Length) - err := be.Load(ctx, h, int(blob.Length), int64(blob.Offset), func(rd io.Reader) error { - n, err := io.ReadFull(rd, buf) - if err != nil { - return fmt.Errorf("read error after %d bytes: %v", n, err) - } - return nil - }) - if err != nil { - Warnf("error read: %v\n", err) + if int(blob.Offset+blob.Length) > len(pack) { + Warnf("skipping truncated blob\n") continue } - + buf := pack[blob.Offset : blob.Offset+blob.Length] key := repo.Key() nonce, plaintext := buf[:key.NonceSize()], buf[key.NonceSize():] @@ -442,10 +432,15 @@ func storePlainBlob(id restic.ID, prefix string, plain []byte) error { } func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamineOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + if opts.ExtractPack && gopts.NoLock { + return fmt.Errorf("--extract-pack and --no-lock are mutually exclusive") + } + + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() ids := make([]restic.ID, 0) for _, name := range args { @@ -464,15 +459,6 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamine return errors.Fatal("no pack files to examine") } - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } - bar := newIndexProgress(gopts.Quiet, gopts.JSON) err = repo.LoadIndex(ctx, bar) if err != nil { @@ -494,20 +480,12 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamine func examinePack(ctx context.Context, opts DebugExamineOptions, repo restic.Repository, id restic.ID) error { Printf("examine %v\n", id) - h := backend.Handle{ - Type: restic.PackFile, - Name: id.String(), - } - fi, err := repo.Backend().Stat(ctx, h) - if err != nil { - return err - } - Printf(" file size is %v\n", fi.Size) - - buf, err := backend.LoadAll(ctx, nil, repo.Backend(), h) - if err != nil { + buf, err := repo.LoadRaw(ctx, restic.PackFile, id) + // also process damaged pack files + if buf == nil { return err } + Printf(" file size is %v\n", len(buf)) gotID := restic.Hash(buf) if !id.Equal(gotID) { Printf(" wanted hash %v, got %v\n", id, gotID) @@ -520,13 +498,13 @@ func examinePack(ctx context.Context, opts DebugExamineOptions, repo restic.Repo blobsLoaded := false // examine all data the indexes have for the pack file - for b := range repo.Index().ListPacks(ctx, restic.NewIDSet(id)) { + for b := range repo.ListPacksFromIndex(ctx, restic.NewIDSet(id)) { blobs := b.Blobs if len(blobs) == 0 { continue } - checkPackSize(blobs, fi.Size) + checkPackSize(blobs, len(buf)) err = loadBlobs(ctx, opts, repo, id, blobs) if err != nil { @@ -539,11 +517,11 @@ func examinePack(ctx context.Context, opts DebugExamineOptions, repo restic.Repo Printf(" ========================================\n") Printf(" inspect the pack itself\n") - blobs, _, err := repo.ListPack(ctx, id, fi.Size) + blobs, _, err := repo.ListPack(ctx, id, int64(len(buf))) if err != nil { return fmt.Errorf("pack %v: %v", id.Str(), err) } - checkPackSize(blobs, fi.Size) + checkPackSize(blobs, len(buf)) if !blobsLoaded { return loadBlobs(ctx, opts, repo, id, blobs) @@ -551,7 +529,7 @@ func examinePack(ctx context.Context, opts DebugExamineOptions, repo restic.Repo return nil } -func checkPackSize(blobs []restic.Blob, fileSize int64) { +func checkPackSize(blobs []restic.Blob, fileSize int) { // track current size and offset var size, offset uint64 diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 06f2be2aeca..d1067b5ecd1 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -33,14 +33,19 @@ Metadata comparison will likely not work if a backup was created using the '--ignore-inode' or '--ignore-ctime' option. To only compare files in specific subfolders, you can use the -":" syntax, where "subfolder" is a path within the +"snapshotID:subfolder" syntax, where "subfolder" is a path within the snapshot. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runDiff(cmd.Context(), diffOptions, globalOptions, args) @@ -103,9 +108,9 @@ func (s *DiffStat) Add(node *restic.Node) { } switch node.Type { - case "file": + case restic.NodeTypeFile: s.Files++ - case "dir": + case restic.NodeTypeDir: s.Dirs++ default: s.Others++ @@ -119,7 +124,7 @@ func addBlobs(bs restic.BlobSet, node *restic.Node) { } switch node.Type { - case "file": + case restic.NodeTypeFile: for _, blob := range node.Content { h := restic.BlobHandle{ ID: blob, @@ -127,7 +132,7 @@ func addBlobs(bs restic.BlobSet, node *restic.Node) { } bs.Insert(h) } - case "dir": + case restic.NodeTypeDir: h := restic.BlobHandle{ ID: *node.Subtree, Type: restic.TreeBlob, @@ -156,7 +161,7 @@ func updateBlobs(repo restic.Loader, blobs restic.BlobSet, stats *DiffStat) { stats.TreeBlobs++ } - size, found := repo.LookupBlobSize(h.ID, h.Type) + size, found := repo.LookupBlobSize(h.Type, h.ID) if !found { Warnf("unable to find blob size for %v\n", h) continue @@ -174,23 +179,27 @@ func (c *Comparer) printDir(ctx context.Context, mode string, stats *DiffStat, b } for _, node := range tree.Nodes { + if ctx.Err() != nil { + return ctx.Err() + } + name := path.Join(prefix, node.Name) - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { name += "/" } c.printChange(NewChange(name, mode)) stats.Add(node) addBlobs(blobs, node) - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { err := c.printDir(ctx, mode, stats, blobs, name, *node.Subtree) - if err != nil { + if err != nil && err != context.Canceled { Warnf("error: %v\n", err) } } } - return nil + return ctx.Err() } func (c *Comparer) collectDir(ctx context.Context, blobs restic.BlobSet, id restic.ID) error { @@ -201,17 +210,21 @@ func (c *Comparer) collectDir(ctx context.Context, blobs restic.BlobSet, id rest } for _, node := range tree.Nodes { + if ctx.Err() != nil { + return ctx.Err() + } + addBlobs(blobs, node) - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { err := c.collectDir(ctx, blobs, *node.Subtree) - if err != nil { + if err != nil && err != context.Canceled { Warnf("error: %v\n", err) } } } - return nil + return ctx.Err() } func uniqueNodeNames(tree1, tree2 *restic.Tree) (tree1Nodes, tree2Nodes map[string]*restic.Node, uniqueNames []string) { @@ -252,6 +265,10 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref tree1Nodes, tree2Nodes, names := uniqueNodeNames(tree1, tree2) for _, name := range names { + if ctx.Err() != nil { + return ctx.Err() + } + node1, t1 := tree1Nodes[name] node2, t2 := tree2Nodes[name] @@ -267,12 +284,12 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref mod += "T" } - if node2.Type == "dir" { + if node2.Type == restic.NodeTypeDir { name += "/" } - if node1.Type == "file" && - node2.Type == "file" && + if node1.Type == restic.NodeTypeFile && + node2.Type == restic.NodeTypeFile && !reflect.DeepEqual(node1.Content, node2.Content) { mod += "M" stats.ChangedFiles++ @@ -294,49 +311,49 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref c.printChange(NewChange(name, mod)) } - if node1.Type == "dir" && node2.Type == "dir" { + if node1.Type == restic.NodeTypeDir && node2.Type == restic.NodeTypeDir { var err error if (*node1.Subtree).Equal(*node2.Subtree) { err = c.collectDir(ctx, stats.BlobsCommon, *node1.Subtree) } else { err = c.diffTree(ctx, stats, name, *node1.Subtree, *node2.Subtree) } - if err != nil { + if err != nil && err != context.Canceled { Warnf("error: %v\n", err) } } case t1 && !t2: prefix := path.Join(prefix, name) - if node1.Type == "dir" { + if node1.Type == restic.NodeTypeDir { prefix += "/" } c.printChange(NewChange(prefix, "-")) stats.Removed.Add(node1) - if node1.Type == "dir" { + if node1.Type == restic.NodeTypeDir { err := c.printDir(ctx, "-", &stats.Removed, stats.BlobsBefore, prefix, *node1.Subtree) - if err != nil { + if err != nil && err != context.Canceled { Warnf("error: %v\n", err) } } case !t1 && t2: prefix := path.Join(prefix, name) - if node2.Type == "dir" { + if node2.Type == restic.NodeTypeDir { prefix += "/" } c.printChange(NewChange(prefix, "+")) stats.Added.Add(node2) - if node2.Type == "dir" { + if node2.Type == restic.NodeTypeDir { err := c.printDir(ctx, "+", &stats.Added, stats.BlobsAfter, prefix, *node2.Subtree) - if err != nil { + if err != nil && err != context.Canceled { Warnf("error: %v\n", err) } } } } - return nil + return ctx.Err() } func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []string) error { @@ -344,19 +361,11 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args [] return errors.Fatalf("specify two snapshot IDs") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() // cache snapshots listing be, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) @@ -418,7 +427,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args [] } if gopts.Quiet { - c.printChange = func(change *Change) {} + c.printChange = func(_ *Change) {} } stats := &DiffStatsContainer{ diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 1628d6a9533..6b7f8d01285 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -28,14 +28,19 @@ The special snapshotID "latest" can be used to use the latest snapshot in the repository. To include the folder content at the root of the archive, you can use the -":" syntax, where "subfolder" is a path within the +"snapshotID:subfolder" syntax, where "subfolder" is a path within the snapshot. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runDump(cmd.Context(), dumpOptions, globalOptions, args) @@ -82,19 +87,23 @@ func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.BlobLoade item := filepath.Join(prefix, pathComponents[0]) l := len(pathComponents) for _, node := range tree.Nodes { + if ctx.Err() != nil { + return ctx.Err() + } + // If dumping something in the highest level it will just take the // first item it finds and dump that according to the switch case below. if node.Name == pathComponents[0] { switch { - case l == 1 && dump.IsFile(node): + case l == 1 && node.Type == restic.NodeTypeFile: return d.WriteNode(ctx, node) - case l > 1 && dump.IsDir(node): + case l > 1 && node.Type == restic.NodeTypeDir: subtree, err := restic.LoadTree(ctx, repo, *node.Subtree) if err != nil { return errors.Wrapf(err, "cannot load subtree for %q", item) } return printFromTree(ctx, subtree, repo, item, pathComponents[1:], d, canWriteArchiveFunc) - case dump.IsDir(node): + case node.Type == restic.NodeTypeDir: if err := canWriteArchiveFunc(); err != nil { return err } @@ -105,7 +114,7 @@ func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.BlobLoade return d.DumpTree(ctx, subtree, item) case l > 1: return fmt.Errorf("%q should be a dir, but is a %q", item, node.Type) - case !dump.IsFile(node): + case node.Type != restic.NodeTypeFile: return fmt.Errorf("%q should be a file, but is a %q", item, node.Type) } } @@ -131,19 +140,11 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args [] splittedPath := splitPath(path.Clean(pathToPrint)) - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() sn, subfolder, err := (&restic.SnapshotFilter{ Hosts: opts.Hosts, @@ -174,7 +175,7 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args [] canWriteArchiveFunc := checkStdoutArchive if opts.Target != "" { - file, err := os.OpenFile(opts.Target, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o666) + file, err := os.Create(opts.Target) if err != nil { return fmt.Errorf("cannot dump to file: %w", err) } diff --git a/cmd/restic/cmd_features.go b/cmd/restic/cmd_features.go new file mode 100644 index 00000000000..a2f04be311c --- /dev/null +++ b/cmd/restic/cmd_features.go @@ -0,0 +1,59 @@ +package main + +import ( + "fmt" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" + "github.com/restic/restic/internal/ui/table" + + "github.com/spf13/cobra" +) + +var featuresCmd = &cobra.Command{ + Use: "features", + Short: "Print list of feature flags", + Long: ` +The "features" command prints a list of supported feature flags. + +To pass feature flags to restic, set the RESTIC_FEATURES environment variable +to "featureA=true,featureB=false". Specifying an unknown feature flag is an error. + +A feature can either be in alpha, beta, stable or deprecated state. +An _alpha_ feature is disabled by default and may change in arbitrary ways between restic versions or be removed. +A _beta_ feature is enabled by default, but still can change in minor ways or be removed. +A _stable_ feature is always enabled and cannot be disabled. The flag will be removed in a future restic version. +A _deprecated_ feature is always disabled and cannot be enabled. The flag will be removed in a future restic version. + +EXIT STATUS +=========== + +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +`, + GroupID: cmdGroupAdvanced, + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.Fatal("the feature command expects no arguments") + } + + fmt.Printf("All Feature Flags:\n") + flags := feature.Flag.List() + + tab := table.New() + tab.AddColumn("Name", "{{ .Name }}") + tab.AddColumn("Type", "{{ .Type }}") + tab.AddColumn("Default", "{{ .Default }}") + tab.AddColumn("Description", "{{ .Description }}") + + for _, flag := range flags { + tab.AddRow(flag) + } + return tab.Write(globalOptions.stdout) + }, +} + +func init() { + cmdRoot.AddCommand(featuresCmd) +} diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 04e6ae3ddb7..2e06fa00c60 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -33,8 +33,13 @@ restic find --pack 025c1d06 EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runFind(cmd.Context(), findOptions, globalOptions, args) @@ -126,6 +131,7 @@ func (s *statefulOutput) PrintPatternJSON(path string, node *restic.Node) { // Make the following attributes disappear Name byte `json:"name,omitempty"` ExtendedAttributes byte `json:"extended_attributes,omitempty"` + GenericAttributes byte `json:"generic_attributes,omitempty"` Device byte `json:"device,omitempty"` Content byte `json:"content,omitempty"` Subtree byte `json:"subtree,omitempty"` @@ -292,7 +298,7 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error } var errIfNoMatch error - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { var childMayMatch bool for _, pat := range f.pat.pattern { mayMatch, err := filter.ChildMatch(pat, normalizedNodepath) @@ -330,6 +336,26 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error }}) } +func (f *Finder) findTree(treeID restic.ID, nodepath string) error { + found := false + if _, ok := f.treeIDs[treeID.String()]; ok { + found = true + } else if _, ok := f.treeIDs[treeID.Str()]; ok { + found = true + } + if found { + f.out.PrintObject("tree", treeID.String(), nodepath, "", f.out.newsn) + f.itemsFound++ + // Terminate if we have found all trees (and we are not + // looking for blobs) + if f.itemsFound >= len(f.treeIDs) && f.blobIDs == nil { + // Return an error to terminate the Walk + return errors.New("OK") + } + } + return nil +} + func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error { debug.Log("searching IDs in snapshot %s", sn.ID()) @@ -348,31 +374,26 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error { } if node == nil { + if nodepath == "/" { + if err := f.findTree(parentTreeID, "/"); err != nil { + return err + } + } return nil } if node.Type == "dir" && f.treeIDs != nil { - treeID := node.Subtree - found := false - if _, ok := f.treeIDs[treeID.Str()]; ok { - found = true - } else if _, ok := f.treeIDs[treeID.String()]; ok { - found = true - } - if found { - f.out.PrintObject("tree", treeID.String(), nodepath, "", sn) - f.itemsFound++ - // Terminate if we have found all trees (and we are not - // looking for blobs) - if f.itemsFound >= len(f.treeIDs) && f.blobIDs == nil { - // Return an error to terminate the Walk - return errors.New("OK") - } + if err := f.findTree(*node.Subtree, nodepath); err != nil { + return err } } - if node.Type == "file" && f.blobIDs != nil { + if node.Type == restic.NodeTypeFile && f.blobIDs != nil { for _, id := range node.Content { + if ctx.Err() != nil { + return ctx.Err() + } + idStr := id.String() if _, ok := f.blobIDs[idStr]; !ok { // Look for short ID form @@ -438,7 +459,10 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error { if err != errAllPacksFound { // try to resolve unknown pack ids from the index - packIDs = f.indexPacksToBlobs(ctx, packIDs) + packIDs, err = f.indexPacksToBlobs(ctx, packIDs) + if err != nil { + return err + } } if len(packIDs) > 0 { @@ -455,13 +479,13 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error { return nil } -func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) map[string]struct{} { +func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) (map[string]struct{}, error) { wctx, cancel := context.WithCancel(ctx) defer cancel() // remember which packs were found in the index indexPackIDs := make(map[string]struct{}) - f.repo.Index().Each(wctx, func(pb restic.PackedBlob) { + err := f.repo.ListBlobs(wctx, func(pb restic.PackedBlob) { idStr := pb.PackID.String() // keep entry in packIDs as Each() returns individual index entries matchingID := false @@ -480,6 +504,9 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc indexPackIDs[idStr] = struct{}{} } }) + if err != nil { + return nil, err + } for id := range indexPackIDs { delete(packIDs, id) @@ -492,19 +519,17 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc } Warnf("some pack files are missing from the repository, getting their blobs from the repository index: %v\n\n", list) } - return packIDs + return packIDs, nil } func (f *Finder) findObjectPack(id string, t restic.BlobType) { - idx := f.repo.Index() - rid, err := restic.ParseID(id) if err != nil { Printf("Note: cannot find pack for object '%s', unable to parse ID: %v\n", id, err) return } - blobs := idx.Lookup(restic.BlobHandle{ID: rid, Type: t}) + blobs := f.repo.LookupBlob(t, rid) if len(blobs) == 0 { Printf("Object %s not found in the index\n", rid.Str()) return @@ -562,19 +587,11 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args [] return errors.Fatal("cannot have several ID types") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { @@ -615,6 +632,9 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args [] for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) { filteredSnapshots = append(filteredSnapshots, sn) } + if ctx.Err() != nil { + return ctx.Err() + } sort.Slice(filteredSnapshots, func(i, j int) bool { return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time) diff --git a/cmd/restic/cmd_find_integration_test.go b/cmd/restic/cmd_find_integration_test.go index dd8ab87fd7f..7e35cb141ac 100644 --- a/cmd/restic/cmd_find_integration_test.go +++ b/cmd/restic/cmd_find_integration_test.go @@ -10,11 +10,10 @@ import ( rtest "github.com/restic/restic/internal/test" ) -func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte { +func testRunFind(t testing.TB, wantJSON bool, opts FindOptions, gopts GlobalOptions, pattern string) []byte { buf, err := withCaptureStdout(func() error { gopts.JSON = wantJSON - opts := FindOptions{} return runFind(context.TODO(), opts, gopts, []string{pattern}) }) rtest.OK(t, err) @@ -31,14 +30,14 @@ func TestFind(t *testing.T) { testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) testRunCheck(t, env.gopts) - results := testRunFind(t, false, env.gopts, "unexistingfile") + results := testRunFind(t, false, FindOptions{}, env.gopts, "unexistingfile") rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile) - results = testRunFind(t, false, env.gopts, "testfile") + results = testRunFind(t, false, FindOptions{}, env.gopts, "testfile") lines := strings.Split(string(results), "\n") rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile) - results = testRunFind(t, false, env.gopts, "testfile*") + results = testRunFind(t, false, FindOptions{}, env.gopts, "testfile*") lines = strings.Split(string(results), "\n") rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile) } @@ -67,21 +66,28 @@ func TestFindJSON(t *testing.T) { testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) testRunCheck(t, env.gopts) + snapshot, _ := testRunSnapshots(t, env.gopts) - results := testRunFind(t, true, env.gopts, "unexistingfile") + results := testRunFind(t, true, FindOptions{}, env.gopts, "unexistingfile") matches := []testMatches{} rtest.OK(t, json.Unmarshal(results, &matches)) rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile) - results = testRunFind(t, true, env.gopts, "testfile") + results = testRunFind(t, true, FindOptions{}, env.gopts, "testfile") rtest.OK(t, json.Unmarshal(results, &matches)) rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile) rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile) rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile) - results = testRunFind(t, true, env.gopts, "testfile*") + results = testRunFind(t, true, FindOptions{}, env.gopts, "testfile*") rtest.OK(t, json.Unmarshal(results, &matches)) rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile) rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile) rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile) + + results = testRunFind(t, true, FindOptions{TreeID: true}, env.gopts, snapshot.Tree.String()) + rtest.OK(t, json.Unmarshal(results, &matches)) + rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", matches) + rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", matches[0].Matches) + rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile) } diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 65ff449a38e..f9ae85cd158 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -3,11 +3,13 @@ package main import ( "context" "encoding/json" + "fmt" "io" "strconv" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" ) @@ -18,6 +20,9 @@ var cmdForget = &cobra.Command{ The "forget" command removes snapshots according to a policy. All snapshots are first divided into groups according to "--group-by", and after that the policy specified by the "--keep-*" options is applied to each group individually. +If there are not enough snapshots to keep one for each duration related +"--keep-{within-,}*" option, the oldest snapshot in the group is kept +additionally. Please note that this command really only deletes the snapshot object in the repository, which is a reference to data stored there. In order to remove the @@ -29,11 +34,18 @@ security considerations. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return runForget(cmd.Context(), forgetOptions, forgetPruneOptions, globalOptions, args) + term, cancel := setupTermstatus() + defer cancel() + return runForget(cmd.Context(), forgetOptions, forgetPruneOptions, globalOptions, term, args) }, } @@ -88,6 +100,8 @@ type ForgetOptions struct { WithinYearly restic.Duration KeepTags restic.TagLists + UnsafeAllowRemoveAll bool + restic.SnapshotFilter Compact bool @@ -117,6 +131,7 @@ func init() { f.VarP(&forgetOptions.WithinMonthly, "keep-within-monthly", "", "keep monthly snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot") f.VarP(&forgetOptions.WithinYearly, "keep-within-yearly", "", "keep yearly snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot") f.Var(&forgetOptions.KeepTags, "keep-tag", "keep snapshots with this `taglist` (can be specified multiple times)") + f.BoolVar(&forgetOptions.UnsafeAllowRemoveAll, "unsafe-allow-remove-all", false, "allow deleting all snapshots of a snapshot group") initMultiSnapshotFilter(f, &forgetOptions.SnapshotFilter, false) f.StringArrayVar(&forgetOptions.Hosts, "hostname", nil, "only consider snapshots with the given `hostname` (can be specified multiple times)") @@ -152,7 +167,7 @@ func verifyForgetOptions(opts *ForgetOptions) error { return nil } -func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOptions, gopts GlobalOptions, args []string) error { +func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { err := verifyForgetOptions(&opts) if err != nil { return err @@ -163,23 +178,21 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption return err } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - if gopts.NoLock && !opts.DryRun { return errors.Fatal("--no-lock is only applicable in combination with --dry-run for forget command") } - if !opts.DryRun || !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun && gopts.NoLock) + if err != nil { + return err } + defer unlock() + + verbosity := gopts.verbosity + if gopts.JSON { + verbosity = 0 + } + printer := newTerminalProgressPrinter(verbosity, term) var snapshots restic.Snapshots removeSnIDs := restic.NewIDSet() @@ -187,6 +200,9 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { snapshots = append(snapshots, sn) } + if ctx.Err() != nil { + return ctx.Err() + } var jsonGroups []*ForgetGroup @@ -217,72 +233,91 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption Tags: opts.KeepTags, } - if policy.Empty() && len(args) == 0 { - if !gopts.JSON { - Verbosef("no policy was specified, no snapshots will be removed\n") + if policy.Empty() { + if opts.UnsafeAllowRemoveAll { + if opts.SnapshotFilter.Empty() { + return errors.Fatal("--unsafe-allow-remove-all is not allowed unless a snapshot filter option is specified") + } + // UnsafeAllowRemoveAll together with snapshot filter is fine + } else { + return errors.Fatal("no policy was specified, no snapshots will be removed") } } - if !policy.Empty() { - if !gopts.JSON { - Verbosef("Applying Policy: %v\n", policy) - } + printer.P("Applying Policy: %v\n", policy) - for k, snapshotGroup := range snapshotGroups { - if gopts.Verbose >= 1 && !gopts.JSON { - err = PrintSnapshotGroupHeader(globalOptions.stdout, k) - if err != nil { - return err - } - } + for k, snapshotGroup := range snapshotGroups { + if ctx.Err() != nil { + return ctx.Err() + } - var key restic.SnapshotGroupKey - if json.Unmarshal([]byte(k), &key) != nil { + if gopts.Verbose >= 1 && !gopts.JSON { + err = PrintSnapshotGroupHeader(globalOptions.stdout, k) + if err != nil { return err } + } - var fg ForgetGroup - fg.Tags = key.Tags - fg.Host = key.Hostname - fg.Paths = key.Paths + var key restic.SnapshotGroupKey + if json.Unmarshal([]byte(k), &key) != nil { + return err + } - keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy) + var fg ForgetGroup + fg.Tags = key.Tags + fg.Host = key.Hostname + fg.Paths = key.Paths - if len(keep) != 0 && !gopts.Quiet && !gopts.JSON { - Printf("keep %d snapshots:\n", len(keep)) - PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact) - Printf("\n") - } - addJSONSnapshots(&fg.Keep, keep) + keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy) - if len(remove) != 0 && !gopts.Quiet && !gopts.JSON { - Printf("remove %d snapshots:\n", len(remove)) - PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact) - Printf("\n") - } - addJSONSnapshots(&fg.Remove, remove) + if !policy.Empty() && len(keep) == 0 { + return fmt.Errorf("refusing to delete last snapshot of snapshot group \"%v\"", key.String()) + } + if len(keep) != 0 && !gopts.Quiet && !gopts.JSON { + printer.P("keep %d snapshots:\n", len(keep)) + PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact) + printer.P("\n") + } + fg.Keep = asJSONSnapshots(keep) + + if len(remove) != 0 && !gopts.Quiet && !gopts.JSON { + printer.P("remove %d snapshots:\n", len(remove)) + PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact) + printer.P("\n") + } + fg.Remove = asJSONSnapshots(remove) - fg.Reasons = reasons + fg.Reasons = asJSONKeeps(reasons) - jsonGroups = append(jsonGroups, &fg) + jsonGroups = append(jsonGroups, &fg) - for _, sn := range remove { - removeSnIDs.Insert(*sn.ID()) - } + for _, sn := range remove { + removeSnIDs.Insert(*sn.ID()) } } } + if ctx.Err() != nil { + return ctx.Err() + } + if len(removeSnIDs) > 0 { if !opts.DryRun { - err := DeleteFilesChecked(ctx, gopts, repo, removeSnIDs, restic.SnapshotFile) + bar := printer.NewCounter("files deleted") + err := restic.ParallelRemove(ctx, repo, removeSnIDs, restic.WriteableSnapshotFile, func(id restic.ID, err error) error { + if err != nil { + printer.E("unable to remove %v/%v from the repository\n", restic.SnapshotFile, id) + } else { + printer.VV("removed %v/%v\n", restic.SnapshotFile, id) + } + return nil + }, bar) + bar.Done() if err != nil { return err } } else { - if !gopts.JSON { - Printf("Would have removed the following snapshots:\n%v\n\n", removeSnIDs) - } + printer.P("Would have removed the following snapshots:\n%v\n\n", removeSnIDs) } } @@ -294,15 +329,13 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption } if len(removeSnIDs) > 0 && opts.Prune { - if !gopts.JSON { - if opts.DryRun { - Verbosef("%d snapshots would be removed, running prune dry run\n", len(removeSnIDs)) - } else { - Verbosef("%d snapshots have been removed, running prune\n", len(removeSnIDs)) - } + if opts.DryRun { + printer.P("%d snapshots would be removed, running prune dry run\n", len(removeSnIDs)) + } else { + printer.P("%d snapshots have been removed, running prune\n", len(removeSnIDs)) } pruneOptions.DryRun = opts.DryRun - return runPruneWithRepo(ctx, pruneOptions, gopts, repo, removeSnIDs) + return runPruneWithRepo(ctx, pruneOptions, gopts, repo, removeSnIDs, term) } return nil @@ -310,23 +343,47 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption // ForgetGroup helps to print what is forgotten in JSON. type ForgetGroup struct { - Tags []string `json:"tags"` - Host string `json:"host"` - Paths []string `json:"paths"` - Keep []Snapshot `json:"keep"` - Remove []Snapshot `json:"remove"` - Reasons []restic.KeepReason `json:"reasons"` + Tags []string `json:"tags"` + Host string `json:"host"` + Paths []string `json:"paths"` + Keep []Snapshot `json:"keep"` + Remove []Snapshot `json:"remove"` + Reasons []KeepReason `json:"reasons"` } -func addJSONSnapshots(js *[]Snapshot, list restic.Snapshots) { +func asJSONSnapshots(list restic.Snapshots) []Snapshot { + var resultList []Snapshot for _, sn := range list { k := Snapshot{ Snapshot: sn, ID: sn.ID(), ShortID: sn.ID().Str(), } - *js = append(*js, k) + resultList = append(resultList, k) + } + return resultList +} + +// KeepReason helps to print KeepReasons as JSON with Snapshots with their ID included. +type KeepReason struct { + Snapshot Snapshot `json:"snapshot"` + Matches []string `json:"matches"` +} + +func asJSONKeeps(list []restic.KeepReason) []KeepReason { + var resultList []KeepReason + for _, keep := range list { + k := KeepReason{ + Snapshot: Snapshot{ + Snapshot: keep.Snapshot, + ID: keep.Snapshot.ID(), + ShortID: keep.Snapshot.ID().Str(), + }, + Matches: keep.Matches, + } + resultList = append(resultList, k) } + return resultList } func printJSONForget(stdout io.Writer, forgets []*ForgetGroup) error { diff --git a/cmd/restic/cmd_forget_integration_test.go b/cmd/restic/cmd_forget_integration_test.go index 1c027a2408e..96dd7c63e29 100644 --- a/cmd/restic/cmd_forget_integration_test.go +++ b/cmd/restic/cmd_forget_integration_test.go @@ -2,15 +2,65 @@ package main import ( "context" + "path/filepath" + "strings" "testing" + "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) -func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) { - opts := ForgetOptions{} +func testRunForgetMayFail(gopts GlobalOptions, opts ForgetOptions, args ...string) error { pruneOpts := PruneOptions{ MaxUnused: "5%", } - rtest.OK(t, runForget(context.TODO(), opts, pruneOpts, gopts, args)) + return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runForget(context.TODO(), opts, pruneOpts, gopts, term, args) + }) +} + +func testRunForget(t testing.TB, gopts GlobalOptions, opts ForgetOptions, args ...string) { + rtest.OK(t, testRunForgetMayFail(gopts, opts, args...)) +} + +func TestRunForgetSafetyNet(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + + opts := BackupOptions{ + Host: "example", + } + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) + testListSnapshots(t, env.gopts, 2) + + // --keep-tags invalid + err := testRunForgetMayFail(env.gopts, ForgetOptions{ + KeepTags: restic.TagLists{restic.TagList{"invalid"}}, + GroupBy: restic.SnapshotGroupByOptions{Host: true, Path: true}, + }) + rtest.Assert(t, strings.Contains(err.Error(), `refusing to delete last snapshot of snapshot group "host example, path`), "wrong error message got %v", err) + + // disallow `forget --unsafe-allow-remove-all` + err = testRunForgetMayFail(env.gopts, ForgetOptions{ + UnsafeAllowRemoveAll: true, + }) + rtest.Assert(t, strings.Contains(err.Error(), `--unsafe-allow-remove-all is not allowed unless a snapshot filter option is specified`), "wrong error message got %v", err) + + // disallow `forget` without options + err = testRunForgetMayFail(env.gopts, ForgetOptions{}) + rtest.Assert(t, strings.Contains(err.Error(), `no policy was specified, no snapshots will be removed`), "wrong error message got %v", err) + + // `forget --host example --unsafe-allow-remove-all` should work + testRunForget(t, env.gopts, ForgetOptions{ + UnsafeAllowRemoveAll: true, + GroupBy: restic.SnapshotGroupByOptions{Host: true, Path: true}, + SnapshotFilter: restic.SnapshotFilter{ + Hosts: []string{opts.Host}, + }, + }) + testListSnapshots(t, env.gopts, 0) } diff --git a/cmd/restic/cmd_generate.go b/cmd/restic/cmd_generate.go index 9d1652e936c..66b3fa7c52f 100644 --- a/cmd/restic/cmd_generate.go +++ b/cmd/restic/cmd_generate.go @@ -1,6 +1,8 @@ package main import ( + "io" + "os" "time" "github.com/restic/restic/internal/errors" @@ -18,10 +20,11 @@ and the auto-completion files for bash, fish and zsh). EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return runGenerate(genOpts, args) }, } @@ -40,10 +43,10 @@ func init() { cmdRoot.AddCommand(cmdGenerate) fs := cmdGenerate.Flags() fs.StringVar(&genOpts.ManDir, "man", "", "write man pages to `directory`") - fs.StringVar(&genOpts.BashCompletionFile, "bash-completion", "", "write bash completion `file`") - fs.StringVar(&genOpts.FishCompletionFile, "fish-completion", "", "write fish completion `file`") - fs.StringVar(&genOpts.ZSHCompletionFile, "zsh-completion", "", "write zsh completion `file`") - fs.StringVar(&genOpts.PowerShellCompletionFile, "powershell-completion", "", "write powershell completion `file`") + fs.StringVar(&genOpts.BashCompletionFile, "bash-completion", "", "write bash completion `file` (`-` for stdout)") + fs.StringVar(&genOpts.FishCompletionFile, "fish-completion", "", "write fish completion `file` (`-` for stdout)") + fs.StringVar(&genOpts.ZSHCompletionFile, "zsh-completion", "", "write zsh completion `file` (`-` for stdout)") + fs.StringVar(&genOpts.PowerShellCompletionFile, "powershell-completion", "", "write powershell completion `file` (`-` for stdout)") } func writeManpages(dir string) error { @@ -64,32 +67,44 @@ func writeManpages(dir string) error { return doc.GenManTree(cmdRoot, header, dir) } -func writeBashCompletion(file string) error { +func writeCompletion(filename string, shell string, generate func(w io.Writer) error) (err error) { if stdoutIsTerminal() { - Verbosef("writing bash completion file to %v\n", file) + Verbosef("writing %s completion file to %v\n", shell, filename) } - return cmdRoot.GenBashCompletionFile(file) -} - -func writeFishCompletion(file string) error { - if stdoutIsTerminal() { - Verbosef("writing fish completion file to %v\n", file) + var outWriter io.Writer + if filename != "-" { + var outFile *os.File + outFile, err = os.Create(filename) + if err != nil { + return + } + defer func() { err = outFile.Close() }() + outWriter = outFile + } else { + outWriter = globalOptions.stdout } - return cmdRoot.GenFishCompletionFile(file, true) -} -func writeZSHCompletion(file string) error { - if stdoutIsTerminal() { - Verbosef("writing zsh completion file to %v\n", file) - } - return cmdRoot.GenZshCompletionFile(file) + err = generate(outWriter) + return } -func writePowerShellCompletion(file string) error { - if stdoutIsTerminal() { - Verbosef("writing powershell completion file to %v\n", file) +func checkStdoutForSingleShell(opts generateOptions) error { + completionFileOpts := []string{ + opts.BashCompletionFile, + opts.FishCompletionFile, + opts.ZSHCompletionFile, + opts.PowerShellCompletionFile, + } + seenIsStdout := false + for _, completionFileOpt := range completionFileOpts { + if completionFileOpt == "-" { + if seenIsStdout { + return errors.Fatal("the generate command can generate shell completions to stdout for single shell only") + } + seenIsStdout = true + } } - return cmdRoot.GenPowerShellCompletionFile(file) + return nil } func runGenerate(opts generateOptions, args []string) error { @@ -104,29 +119,34 @@ func runGenerate(opts generateOptions, args []string) error { } } + err := checkStdoutForSingleShell(opts) + if err != nil { + return err + } + if opts.BashCompletionFile != "" { - err := writeBashCompletion(opts.BashCompletionFile) + err := writeCompletion(opts.BashCompletionFile, "bash", cmdRoot.GenBashCompletion) if err != nil { return err } } if opts.FishCompletionFile != "" { - err := writeFishCompletion(opts.FishCompletionFile) + err := writeCompletion(opts.FishCompletionFile, "fish", func(w io.Writer) error { return cmdRoot.GenFishCompletion(w, true) }) if err != nil { return err } } if opts.ZSHCompletionFile != "" { - err := writeZSHCompletion(opts.ZSHCompletionFile) + err := writeCompletion(opts.ZSHCompletionFile, "zsh", cmdRoot.GenZshCompletion) if err != nil { return err } } if opts.PowerShellCompletionFile != "" { - err := writePowerShellCompletion(opts.PowerShellCompletionFile) + err := writeCompletion(opts.PowerShellCompletionFile, "powershell", cmdRoot.GenPowerShellCompletion) if err != nil { return err } diff --git a/cmd/restic/cmd_generate_integration_test.go b/cmd/restic/cmd_generate_integration_test.go new file mode 100644 index 00000000000..0480abc04db --- /dev/null +++ b/cmd/restic/cmd_generate_integration_test.go @@ -0,0 +1,40 @@ +package main + +import ( + "bytes" + "strings" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func TestGenerateStdout(t *testing.T) { + testCases := []struct { + name string + opts generateOptions + }{ + {"bash", generateOptions{BashCompletionFile: "-"}}, + {"fish", generateOptions{FishCompletionFile: "-"}}, + {"zsh", generateOptions{ZSHCompletionFile: "-"}}, + {"powershell", generateOptions{PowerShellCompletionFile: "-"}}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + buf := bytes.NewBuffer(nil) + globalOptions.stdout = buf + err := runGenerate(tc.opts, []string{}) + rtest.OK(t, err) + completionString := buf.String() + rtest.Assert(t, strings.Contains(completionString, "# "+tc.name+" completion for restic"), "has no expected completion header") + }) + } + + t.Run("Generate shell completions to stdout for two shells", func(t *testing.T) { + buf := bytes.NewBuffer(nil) + globalOptions.stdout = buf + opts := generateOptions{BashCompletionFile: "-", FishCompletionFile: "-"} + err := runGenerate(opts, []string{}) + rtest.Assert(t, err != nil, "generate shell completions to stdout for two shells fails") + }) +} diff --git a/cmd/restic/cmd_init.go b/cmd/restic/cmd_init.go index 7154279e8ba..2a2aae1dc85 100644 --- a/cmd/restic/cmd_init.go +++ b/cmd/restic/cmd_init.go @@ -23,8 +23,10 @@ The "init" command initializes a new repository. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runInit(cmd.Context(), initOptions, globalOptions, args) @@ -80,7 +82,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args [] return err } - gopts.password, err = ReadPasswordTwice(gopts, + gopts.password, err = ReadPasswordTwice(ctx, gopts, "enter password for new repository: ", "enter password again: ") if err != nil { @@ -131,7 +133,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args [] func maybeReadChunkerPolynomial(ctx context.Context, opts InitOptions, gopts GlobalOptions) (*chunker.Pol, error) { if opts.CopyChunkerParameters { - otherGopts, _, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "secondary") + otherGopts, _, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "secondary") if err != nil { return nil, err } diff --git a/cmd/restic/cmd_init_integration_test.go b/cmd/restic/cmd_init_integration_test.go index 9b5eed6e08e..4795d5510b9 100644 --- a/cmd/restic/cmd_init_integration_test.go +++ b/cmd/restic/cmd_init_integration_test.go @@ -2,6 +2,8 @@ package main import ( "context" + "os" + "path/filepath" "testing" "github.com/restic/restic/internal/repository" @@ -16,6 +18,11 @@ func testRunInit(t testing.TB, opts GlobalOptions) { rtest.OK(t, runInit(context.TODO(), InitOptions{}, opts, nil)) t.Logf("repository initialized at %v", opts.Repo) + + // create temporary junk files to verify that restic does not trip over them + for _, path := range []string{"index", "snapshots", "keys", "locks", filepath.Join("data", "00")} { + rtest.OK(t, os.WriteFile(filepath.Join(opts.Repo, path, "tmp12345"), []byte("junk file"), 0o600)) + } } func TestInitCopyChunkerParams(t *testing.T) { diff --git a/cmd/restic/cmd_key.go b/cmd/restic/cmd_key.go index c687eca53d6..a94caa0d843 100644 --- a/cmd/restic/cmd_key.go +++ b/cmd/restic/cmd_key.go @@ -11,6 +11,8 @@ var cmdKey = &cobra.Command{ The "key" command allows you to set multiple access keys or passwords per repository. `, + DisableAutoGenTag: true, + GroupID: cmdGroupDefault, } func init() { diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index 43a38f4ebbd..2737410a05b 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -3,12 +3,11 @@ package main import ( "context" "fmt" - "os" - "strings" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/spf13/cobra" + "github.com/spf13/pflag" ) var cmdKeyAdd = &cobra.Command{ @@ -20,29 +19,37 @@ The "add" sub-command creates a new key and validates the key. Returns the new k EXIT STATUS =========== -Exit status is 0 if the command is successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - return runKeyAdd(cmd.Context(), globalOptions, keyAddOpts, args) - }, } type KeyAddOptions struct { - NewPasswordFile string - Username string - Hostname string + NewPasswordFile string + InsecureNoPassword bool + Username string + Hostname string } -var keyAddOpts KeyAddOptions +func (opts *KeyAddOptions) Add(flags *pflag.FlagSet) { + flags.StringVarP(&opts.NewPasswordFile, "new-password-file", "", "", "`file` from which to read the new password") + flags.BoolVar(&opts.InsecureNoPassword, "new-insecure-no-password", false, "add an empty password for the repository (insecure)") + flags.StringVarP(&opts.Username, "user", "", "", "the username for new key") + flags.StringVarP(&opts.Hostname, "host", "", "", "the hostname for new key") +} func init() { cmdKey.AddCommand(cmdKeyAdd) - flags := cmdKeyAdd.Flags() - flags.StringVarP(&keyAddOpts.NewPasswordFile, "new-password-file", "", "", "`file` from which to read the new password") - flags.StringVarP(&keyAddOpts.Username, "user", "", "", "the username for new key") - flags.StringVarP(&keyAddOpts.Hostname, "host", "", "", "the hostname for new key") + var keyAddOpts KeyAddOptions + keyAddOpts.Add(cmdKeyAdd.Flags()) + cmdKeyAdd.RunE = func(cmd *cobra.Command, args []string) error { + return runKeyAdd(cmd.Context(), globalOptions, keyAddOpts, args) + } } func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, args []string) error { @@ -50,22 +57,17 @@ func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, arg return fmt.Errorf("the key add command expects no arguments, only options - please see `restic help key add` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false) if err != nil { return err } + defer unlock() return addKey(ctx, repo, gopts, opts) } func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyAddOptions) error { - pw, err := getNewPassword(gopts, opts.NewPasswordFile) + pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile, opts.InsecureNoPassword) if err != nil { return err } @@ -88,33 +90,41 @@ func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOption // testKeyNewPassword is used to set a new password during integration testing. var testKeyNewPassword string -func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error) { +func getNewPassword(ctx context.Context, gopts GlobalOptions, newPasswordFile string, insecureNoPassword bool) (string, error) { if testKeyNewPassword != "" { return testKeyNewPassword, nil } + if insecureNoPassword { + if newPasswordFile != "" { + return "", fmt.Errorf("only either --new-password-file or --new-insecure-no-password may be specified") + } + return "", nil + } + if newPasswordFile != "" { - return loadPasswordFromFile(newPasswordFile) + password, err := loadPasswordFromFile(newPasswordFile) + if err != nil { + return "", err + } + if password == "" { + return "", fmt.Errorf("an empty password is not allowed by default. Pass the flag `--new-insecure-no-password` to restic to disable this check") + } + return password, nil } // Since we already have an open repository, temporary remove the password // to prompt the user for the passwd. newopts := gopts newopts.password = "" + // empty passwords are already handled above + newopts.InsecureNoPassword = false - return ReadPasswordTwice(newopts, + return ReadPasswordTwice(ctx, newopts, "enter new password: ", "enter password again: ") } -func loadPasswordFromFile(pwdFile string) (string, error) { - s, err := os.ReadFile(pwdFile) - if os.IsNotExist(err) { - return "", errors.Fatalf("%s does not exist", pwdFile) - } - return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile") -} - func switchToNewKeyAndRemoveIfBroken(ctx context.Context, repo *repository.Repository, key *repository.Key, pw string) error { // Verify new key to make sure it really works. A broken key can render the // whole repository inaccessible diff --git a/cmd/restic/cmd_key_integration_test.go b/cmd/restic/cmd_key_integration_test.go index 16cc1bdad7f..0b453388725 100644 --- a/cmd/restic/cmd_key_integration_test.go +++ b/cmd/restic/cmd_key_integration_test.go @@ -3,6 +3,8 @@ package main import ( "bufio" "context" + "os" + "path/filepath" "regexp" "strings" "testing" @@ -109,6 +111,43 @@ func TestKeyAddRemove(t *testing.T) { testRunKeyAddNewKeyUserHost(t, env.gopts) } +func TestKeyAddInvalid(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + testRunInit(t, env.gopts) + + err := runKeyAdd(context.TODO(), env.gopts, KeyAddOptions{ + NewPasswordFile: "some-file", + InsecureNoPassword: true, + }, []string{}) + rtest.Assert(t, strings.Contains(err.Error(), "only either"), "unexpected error message, got %q", err) + + pwfile := filepath.Join(t.TempDir(), "pwfile") + rtest.OK(t, os.WriteFile(pwfile, []byte{}, 0o666)) + + err = runKeyAdd(context.TODO(), env.gopts, KeyAddOptions{ + NewPasswordFile: pwfile, + }, []string{}) + rtest.Assert(t, strings.Contains(err.Error(), "an empty password is not allowed by default"), "unexpected error message, got %q", err) +} + +func TestKeyAddEmpty(t *testing.T) { + env, cleanup := withTestEnvironment(t) + // must list keys more than once + env.gopts.backendTestHook = nil + defer cleanup() + testRunInit(t, env.gopts) + + rtest.OK(t, runKeyAdd(context.TODO(), env.gopts, KeyAddOptions{ + InsecureNoPassword: true, + }, []string{})) + + env.gopts.password = "" + env.gopts.InsecureNoPassword = true + + testRunCheck(t, env.gopts) +} + type emptySaveBackend struct { backend.Backend } diff --git a/cmd/restic/cmd_key_list.go b/cmd/restic/cmd_key_list.go index 517b7c84b7b..1c70cce8a74 100644 --- a/cmd/restic/cmd_key_list.go +++ b/cmd/restic/cmd_key_list.go @@ -23,7 +23,11 @@ used to access the repository. EXIT STATUS =========== -Exit status is 0 if the command is successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { @@ -40,19 +44,11 @@ func runKeyList(ctx context.Context, gopts GlobalOptions, args []string) error { return fmt.Errorf("the key list command expects no arguments, only options - please see `restic help key list` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() return listKeys(ctx, repo, gopts) } @@ -61,6 +57,7 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions type keyInfo struct { Current bool `json:"current"` ID string `json:"id"` + ShortID string `json:"-"` UserName string `json:"userName"` HostName string `json:"hostName"` Created string `json:"created"` @@ -69,7 +66,7 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions var m sync.Mutex var keys []keyInfo - err := restic.ParallelList(ctx, s, restic.KeyFile, s.Connections(), func(ctx context.Context, id restic.ID, size int64) error { + err := restic.ParallelList(ctx, s, restic.KeyFile, s.Connections(), func(ctx context.Context, id restic.ID, _ int64) error { k, err := repository.LoadKey(ctx, s, id) if err != nil { Warnf("LoadKey() failed: %v\n", err) @@ -78,7 +75,8 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions key := keyInfo{ Current: id == s.KeyID(), - ID: id.Str(), + ID: id.String(), + ShortID: id.Str(), UserName: k.Username, HostName: k.Hostname, Created: k.Created.Local().Format(TimeFormat), @@ -99,7 +97,7 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions } tab := table.New() - tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ID }}") + tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ShortID }}") tab.AddColumn("User", "{{ .UserName }}") tab.AddColumn("Host", "{{ .HostName }}") tab.AddColumn("Created", "{{ .Created }}") diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index cb916274cc1..9bb1417494d 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -19,27 +19,27 @@ Returns the new key ID. EXIT STATUS =========== -Exit status is 0 if the command is successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - return runKeyPasswd(cmd.Context(), globalOptions, keyPasswdOpts, args) - }, } type KeyPasswdOptions struct { KeyAddOptions } -var keyPasswdOpts KeyPasswdOptions - func init() { cmdKey.AddCommand(cmdKeyPasswd) - flags := cmdKeyPasswd.Flags() - flags.StringVarP(&keyPasswdOpts.NewPasswordFile, "new-password-file", "", "", "`file` from which to read the new password") - flags.StringVarP(&keyPasswdOpts.Username, "user", "", "", "the username for new key") - flags.StringVarP(&keyPasswdOpts.Hostname, "host", "", "", "the hostname for new key") + var keyPasswdOpts KeyPasswdOptions + keyPasswdOpts.KeyAddOptions.Add(cmdKeyPasswd.Flags()) + cmdKeyPasswd.RunE = func(cmd *cobra.Command, args []string) error { + return runKeyPasswd(cmd.Context(), globalOptions, keyPasswdOpts, args) + } } func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOptions, args []string) error { @@ -47,22 +47,17 @@ func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOption return fmt.Errorf("the key passwd command expects no arguments, only options - please see `restic help key passwd` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() return changePassword(ctx, repo, gopts, opts) } func changePassword(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyPasswdOptions) error { - pw, err := getNewPassword(gopts, opts.NewPasswordFile) + pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile, opts.InsecureNoPassword) if err != nil { return err } diff --git a/cmd/restic/cmd_key_remove.go b/cmd/restic/cmd_key_remove.go index c8e303ffc80..3cb2e0bd789 100644 --- a/cmd/restic/cmd_key_remove.go +++ b/cmd/restic/cmd_key_remove.go @@ -20,7 +20,11 @@ removing the current key being used to access the repository. EXIT STATUS =========== -Exit status is 0 if the command is successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { @@ -37,20 +41,13 @@ func runKeyRemove(ctx context.Context, gopts GlobalOptions, args []string) error return fmt.Errorf("key remove expects one argument as the key id") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - - idPrefix := args[0] - - return deleteKey(ctx, repo, idPrefix) + return deleteKey(ctx, repo, args[0]) } func deleteKey(ctx context.Context, repo *repository.Repository, idPrefix string) error { diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index 8be99234fc3..d66cddc4f62 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -2,16 +2,20 @@ package main import ( "context" + "strings" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" "github.com/spf13/cobra" ) +var listAllowedArgs = []string{"blobs", "packs", "index", "snapshots", "keys", "locks"} +var listAllowedArgsUseString = strings.Join(listAllowedArgs, "|") + var cmdList = &cobra.Command{ - Use: "list [flags] [blobs|packs|index|snapshots|keys|locks]", + Use: "list [flags] [" + listAllowedArgsUseString + "]", Short: "List objects in the repository", Long: ` The "list" command allows listing objects in the repository based on type. @@ -19,12 +23,19 @@ The "list" command allows listing objects in the repository based on type. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { return runList(cmd.Context(), globalOptions, args) }, + ValidArgs: listAllowedArgs, + Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), } func init() { @@ -36,19 +47,11 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error { return errors.Fatal("type not specified") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock || args[0] == "locks") if err != nil { return err } - - if !gopts.NoLock && args[0] != "locks" { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() var t restic.FileType switch args[0] { @@ -63,20 +66,19 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error { case "locks": t = restic.LockFile case "blobs": - return index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error { + return index.ForAllIndexes(ctx, repo, repo, func(_ restic.ID, idx *index.Index, err error) error { if err != nil { return err } - idx.Each(ctx, func(blobs restic.PackedBlob) { + return idx.Each(ctx, func(blobs restic.PackedBlob) { Printf("%v %v\n", blobs.Type, blobs.ID) }) - return nil }) default: return errors.Fatal("invalid type") } - return repo.List(ctx, t, func(id restic.ID, size int64) error { + return repo.List(ctx, t, func(id restic.ID, _ int64) error { Printf("%s\n", id) return nil }) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index f412546ae8f..06ae6cc2054 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -39,9 +39,14 @@ a path separator); paths use the forward slash '/' as separator. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { return runLs(cmd.Context(), lsOptions, globalOptions, args) }, @@ -70,41 +75,40 @@ func init() { } type lsPrinter interface { - Snapshot(sn *restic.Snapshot) - Node(path string, node *restic.Node) - LeaveDir(path string) - Close() + Snapshot(sn *restic.Snapshot) error + Node(path string, node *restic.Node, isPrefixDirectory bool) error + LeaveDir(path string) error + Close() error } type jsonLsPrinter struct { enc *json.Encoder } -func (p *jsonLsPrinter) Snapshot(sn *restic.Snapshot) { +func (p *jsonLsPrinter) Snapshot(sn *restic.Snapshot) error { type lsSnapshot struct { *restic.Snapshot - ID *restic.ID `json:"id"` - ShortID string `json:"short_id"` - StructType string `json:"struct_type"` // "snapshot" + ID *restic.ID `json:"id"` + ShortID string `json:"short_id"` + MessageType string `json:"message_type"` // "snapshot" + StructType string `json:"struct_type"` // "snapshot", deprecated } - err := p.enc.Encode(lsSnapshot{ - Snapshot: sn, - ID: sn.ID(), - ShortID: sn.ID().Str(), - StructType: "snapshot", + return p.enc.Encode(lsSnapshot{ + Snapshot: sn, + ID: sn.ID(), + ShortID: sn.ID().Str(), + MessageType: "snapshot", + StructType: "snapshot", }) - if err != nil { - Warnf("JSON encode failed: %v\n", err) - } } // Print node in our custom JSON format, followed by a newline. -func (p *jsonLsPrinter) Node(path string, node *restic.Node) { - err := lsNodeJSON(p.enc, path, node) - if err != nil { - Warnf("JSON encode failed: %v\n", err) +func (p *jsonLsPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) error { + if isPrefixDirectory { + return nil } + return lsNodeJSON(p.enc, path, node) } func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error { @@ -121,12 +125,13 @@ func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error { AccessTime time.Time `json:"atime,omitempty"` ChangeTime time.Time `json:"ctime,omitempty"` Inode uint64 `json:"inode,omitempty"` - StructType string `json:"struct_type"` // "node" + MessageType string `json:"message_type"` // "node" + StructType string `json:"struct_type"` // "node", deprecated size uint64 // Target for Size pointer. }{ Name: node.Name, - Type: node.Type, + Type: string(node.Type), Path: path, UID: node.UID, GID: node.GID, @@ -137,19 +142,20 @@ func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error { AccessTime: node.AccessTime, ChangeTime: node.ChangeTime, Inode: node.Inode, + MessageType: "node", StructType: "node", } // Always print size for regular files, even when empty, // but never for other types. - if node.Type == "file" { + if node.Type == restic.NodeTypeFile { n.Size = &n.size } return enc.Encode(n) } -func (p *jsonLsPrinter) LeaveDir(_ string) {} -func (p *jsonLsPrinter) Close() {} +func (p *jsonLsPrinter) LeaveDir(_ string) error { return nil } +func (p *jsonLsPrinter) Close() error { return nil } type ncduLsPrinter struct { out io.Writer @@ -159,16 +165,17 @@ type ncduLsPrinter struct { // lsSnapshotNcdu prints a restic snapshot in Ncdu save format. // It opens the JSON list. Nodes are added with lsNodeNcdu and the list is closed by lsCloseNcdu. // Format documentation: https://dev.yorhel.nl/ncdu/jsonfmt -func (p *ncduLsPrinter) Snapshot(sn *restic.Snapshot) { +func (p *ncduLsPrinter) Snapshot(sn *restic.Snapshot) error { const NcduMajorVer = 1 const NcduMinorVer = 2 snapshotBytes, err := json.Marshal(sn) if err != nil { - Warnf("JSON encode failed: %v\n", err) + return err } p.depth++ - fmt.Fprintf(p.out, "[%d, %d, %s", NcduMajorVer, NcduMinorVer, string(snapshotBytes)) + _, err = fmt.Fprintf(p.out, "[%d, %d, %s, [{\"name\":\"/\"}", NcduMajorVer, NcduMinorVer, string(snapshotBytes)) + return err } func lsNcduNode(_ string, node *restic.Node) ([]byte, error) { @@ -186,14 +193,17 @@ func lsNcduNode(_ string, node *restic.Node) ([]byte, error) { Mtime int64 `json:"mtime"` } + const blockSize = 512 + outNode := NcduNode{ - Name: node.Name, - Asize: node.Size, - Dsize: node.Size, + Name: node.Name, + Asize: node.Size, + // round up to nearest full blocksize + Dsize: (node.Size + blockSize - 1) / blockSize * blockSize, Dev: node.DeviceID, Ino: node.Inode, NLink: node.Links, - NotReg: node.Type != "dir" && node.Type != "file", + NotReg: node.Type != restic.NodeTypeDir && node.Type != restic.NodeTypeFile, UID: node.UID, GID: node.GID, Mode: uint16(node.Mode & os.ModePerm), @@ -209,31 +219,38 @@ func lsNcduNode(_ string, node *restic.Node) ([]byte, error) { if node.Mode&os.ModeSticky != 0 { outNode.Mode |= 0o1000 } + if outNode.Mtime < 0 { + // ncdu does not allow negative times + outNode.Mtime = 0 + } return json.Marshal(outNode) } -func (p *ncduLsPrinter) Node(path string, node *restic.Node) { +func (p *ncduLsPrinter) Node(path string, node *restic.Node, _ bool) error { out, err := lsNcduNode(path, node) if err != nil { - Warnf("JSON encode failed: %v\n", err) + return err } - if node.Type == "dir" { - fmt.Fprintf(p.out, ",\n%s[\n%s%s", strings.Repeat(" ", p.depth), strings.Repeat(" ", p.depth+1), string(out)) + if node.Type == restic.NodeTypeDir { + _, err = fmt.Fprintf(p.out, ",\n%s[\n%s%s", strings.Repeat(" ", p.depth), strings.Repeat(" ", p.depth+1), string(out)) p.depth++ } else { - fmt.Fprintf(p.out, ",\n%s%s", strings.Repeat(" ", p.depth), string(out)) + _, err = fmt.Fprintf(p.out, ",\n%s%s", strings.Repeat(" ", p.depth), string(out)) } + return err } -func (p *ncduLsPrinter) LeaveDir(_ string) { +func (p *ncduLsPrinter) LeaveDir(_ string) error { p.depth-- - fmt.Fprintf(p.out, "\n%s]", strings.Repeat(" ", p.depth)) + _, err := fmt.Fprintf(p.out, "\n%s]", strings.Repeat(" ", p.depth)) + return err } -func (p *ncduLsPrinter) Close() { - fmt.Fprint(p.out, "\n]\n") +func (p *ncduLsPrinter) Close() error { + _, err := fmt.Fprint(p.out, "\n]\n]\n") + return err } type textLsPrinter struct { @@ -242,15 +259,23 @@ type textLsPrinter struct { HumanReadable bool } -func (p *textLsPrinter) Snapshot(sn *restic.Snapshot) { +func (p *textLsPrinter) Snapshot(sn *restic.Snapshot) error { Verbosef("%v filtered by %v:\n", sn, p.dirs) + return nil } -func (p *textLsPrinter) Node(path string, node *restic.Node) { - Printf("%s\n", formatNode(path, node, p.ListLong, p.HumanReadable)) +func (p *textLsPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) error { + if !isPrefixDirectory { + Printf("%s\n", formatNode(path, node, p.ListLong, p.HumanReadable)) + } + return nil } -func (p *textLsPrinter) LeaveDir(_ string) {} -func (p *textLsPrinter) Close() {} +func (p *textLsPrinter) LeaveDir(_ string) error { + return nil +} +func (p *textLsPrinter) Close() error { + return nil +} func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []string) error { if len(args) == 0 { @@ -305,10 +330,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return false } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { @@ -352,7 +378,9 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return err } - printer.Snapshot(sn) + if err := printer.Snapshot(sn); err != nil { + return err + } processNode := func(_ restic.ID, nodepath string, node *restic.Node, err error) error { if err != nil { @@ -362,9 +390,13 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return nil } + printedDir := false if withinDir(nodepath) { - // if we're within a dir, print the node - printer.Node(nodepath, node) + // if we're within a target path, print the node + if err := printer.Node(nodepath, node, false); err != nil { + return err + } + printedDir = true // if recursive listing is requested, signal the walker that it // should continue walking recursively @@ -376,12 +408,22 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri // if there's an upcoming match deeper in the tree (but we're not // there yet), signal the walker to descend into any subdirs if approachingMatchingTree(nodepath) { + // print node leading up to the target paths + if !printedDir { + return printer.Node(nodepath, node, true) + } return nil } // otherwise, signal the walker to not walk recursively into any // subdirs - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { + // immediately generate leaveDir if the directory is skipped + if printedDir { + if err := printer.LeaveDir(nodepath); err != nil { + return err + } + } return walker.ErrSkipNode } return nil @@ -389,11 +431,12 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri err = walker.Walk(ctx, repo, *sn.Tree, walker.WalkVisitor{ ProcessNode: processNode, - LeaveDir: func(path string) { + LeaveDir: func(path string) error { // the root path `/` has no corresponding node and is thus also skipped by processNode - if withinDir(path) && path != "/" { - printer.LeaveDir(path) + if path != "/" { + return printer.LeaveDir(path) } + return nil }, }) @@ -401,6 +444,5 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return err } - printer.Close() - return nil + return printer.Close() } diff --git a/cmd/restic/cmd_ls_integration_test.go b/cmd/restic/cmd_ls_integration_test.go index 1b3c964e4ec..f5655bdff9b 100644 --- a/cmd/restic/cmd_ls_integration_test.go +++ b/cmd/restic/cmd_ls_integration_test.go @@ -3,7 +3,6 @@ package main import ( "context" "encoding/json" - "path/filepath" "strings" "testing" @@ -26,22 +25,27 @@ func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string { func assertIsValidJSON(t *testing.T, data []byte) { // Sanity check: output must be valid JSON. - var v interface{} + var v []any err := json.Unmarshal(data, &v) rtest.OK(t, err) + rtest.Assert(t, len(v) == 4, "invalid ncdu output, expected 4 array elements, got %v", len(v)) } func TestRunLsNcdu(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() - testRunInit(t, env.gopts) + testSetupBackupData(t, env) opts := BackupOptions{} - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - - ncdu := testRunLsWithOpts(t, env.gopts, LsOptions{Ncdu: true}, []string{"latest"}) - assertIsValidJSON(t, ncdu) - - ncdu = testRunLsWithOpts(t, env.gopts, LsOptions{Ncdu: true}, []string{"latest", "/testdata"}) - assertIsValidJSON(t, ncdu) + // backup such that there are multiple toplevel elements + testRunBackup(t, env.testdata+"/0", []string{"."}, opts, env.gopts) + + for _, paths := range [][]string{ + {"latest"}, + {"latest", "/0"}, + {"latest", "/0", "/0/9"}, + } { + ncdu := testRunLsWithOpts(t, env.gopts, LsOptions{Ncdu: true}, paths) + assertIsValidJSON(t, ncdu) + } } diff --git a/cmd/restic/cmd_ls_test.go b/cmd/restic/cmd_ls_test.go index 41c235eabad..3d4e1dbc7a2 100644 --- a/cmd/restic/cmd_ls_test.go +++ b/cmd/restic/cmd_ls_test.go @@ -23,7 +23,7 @@ var lsTestNodes = []lsTestNode{ path: "/bar/baz", Node: restic.Node{ Name: "baz", - Type: "file", + Type: restic.NodeTypeFile, Size: 12345, UID: 10000000, GID: 20000000, @@ -39,7 +39,7 @@ var lsTestNodes = []lsTestNode{ path: "/foo/empty", Node: restic.Node{ Name: "empty", - Type: "file", + Type: restic.NodeTypeFile, Size: 0, UID: 1001, GID: 1001, @@ -56,7 +56,7 @@ var lsTestNodes = []lsTestNode{ path: "/foo/link", Node: restic.Node{ Name: "link", - Type: "symlink", + Type: restic.NodeTypeSymlink, Mode: os.ModeSymlink | 0777, LinkTarget: "not printed", }, @@ -66,7 +66,7 @@ var lsTestNodes = []lsTestNode{ path: "/some/directory", Node: restic.Node{ Name: "directory", - Type: "dir", + Type: restic.NodeTypeDir, Mode: os.ModeDir | 0755, ModTime: time.Date(2020, 1, 2, 3, 4, 5, 0, time.UTC), AccessTime: time.Date(2021, 2, 3, 4, 5, 6, 7, time.UTC), @@ -79,7 +79,7 @@ var lsTestNodes = []lsTestNode{ path: "/some/sticky", Node: restic.Node{ Name: "sticky", - Type: "dir", + Type: restic.NodeTypeDir, Mode: os.ModeDir | 0755 | os.ModeSetuid | os.ModeSetgid | os.ModeSticky, }, }, @@ -87,11 +87,11 @@ var lsTestNodes = []lsTestNode{ func TestLsNodeJSON(t *testing.T) { for i, expect := range []string{ - `{"name":"baz","type":"file","path":"/bar/baz","uid":10000000,"gid":20000000,"size":12345,"permissions":"----------","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`, - `{"name":"empty","type":"file","path":"/foo/empty","uid":1001,"gid":1001,"size":0,"permissions":"----------","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`, - `{"name":"link","type":"symlink","path":"/foo/link","uid":0,"gid":0,"mode":134218239,"permissions":"Lrwxrwxrwx","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`, - `{"name":"directory","type":"dir","path":"/some/directory","uid":0,"gid":0,"mode":2147484141,"permissions":"drwxr-xr-x","mtime":"2020-01-02T03:04:05Z","atime":"2021-02-03T04:05:06.000000007Z","ctime":"2022-03-04T05:06:07.000000008Z","struct_type":"node"}`, - `{"name":"sticky","type":"dir","path":"/some/sticky","uid":0,"gid":0,"mode":2161115629,"permissions":"dugtrwxr-xr-x","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`, + `{"name":"baz","type":"file","path":"/bar/baz","uid":10000000,"gid":20000000,"size":12345,"permissions":"----------","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","message_type":"node","struct_type":"node"}`, + `{"name":"empty","type":"file","path":"/foo/empty","uid":1001,"gid":1001,"size":0,"permissions":"----------","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","message_type":"node","struct_type":"node"}`, + `{"name":"link","type":"symlink","path":"/foo/link","uid":0,"gid":0,"mode":134218239,"permissions":"Lrwxrwxrwx","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","message_type":"node","struct_type":"node"}`, + `{"name":"directory","type":"dir","path":"/some/directory","uid":0,"gid":0,"mode":2147484141,"permissions":"drwxr-xr-x","mtime":"2020-01-02T03:04:05Z","atime":"2021-02-03T04:05:06.000000007Z","ctime":"2022-03-04T05:06:07.000000008Z","message_type":"node","struct_type":"node"}`, + `{"name":"sticky","type":"dir","path":"/some/sticky","uid":0,"gid":0,"mode":2161115629,"permissions":"dugtrwxr-xr-x","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","message_type":"node","struct_type":"node"}`, } { c := lsTestNodes[i] buf := new(bytes.Buffer) @@ -109,11 +109,11 @@ func TestLsNodeJSON(t *testing.T) { func TestLsNcduNode(t *testing.T) { for i, expect := range []string{ - `{"name":"baz","asize":12345,"dsize":12345,"dev":0,"ino":0,"nlink":1,"notreg":false,"uid":10000000,"gid":20000000,"mode":0,"mtime":-62135596800}`, - `{"name":"empty","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":3840,"notreg":false,"uid":1001,"gid":1001,"mode":0,"mtime":-62135596800}`, - `{"name":"link","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":true,"uid":0,"gid":0,"mode":511,"mtime":-62135596800}`, + `{"name":"baz","asize":12345,"dsize":12800,"dev":0,"ino":0,"nlink":1,"notreg":false,"uid":10000000,"gid":20000000,"mode":0,"mtime":0}`, + `{"name":"empty","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":3840,"notreg":false,"uid":1001,"gid":1001,"mode":0,"mtime":0}`, + `{"name":"link","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":true,"uid":0,"gid":0,"mode":511,"mtime":0}`, `{"name":"directory","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":493,"mtime":1577934245}`, - `{"name":"sticky","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":4077,"mtime":-62135596800}`, + `{"name":"sticky","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":4077,"mtime":0}`, } { c := lsTestNodes[i] out, err := lsNcduNode(c.path, &c.Node) @@ -132,28 +132,39 @@ func TestLsNcdu(t *testing.T) { printer := &ncduLsPrinter{ out: &buf, } + modTime := time.Date(2020, 1, 2, 3, 4, 5, 0, time.UTC) - printer.Snapshot(&restic.Snapshot{ + rtest.OK(t, printer.Snapshot(&restic.Snapshot{ Hostname: "host", Paths: []string{"/example"}, - }) - printer.Node("/directory", &restic.Node{ - Type: "dir", - Name: "directory", - }) - printer.Node("/directory/data", &restic.Node{ - Type: "file", - Name: "data", - Size: 42, - }) - printer.LeaveDir("/directory") - printer.Close() - - rtest.Equals(t, `[1, 2, {"time":"0001-01-01T00:00:00Z","tree":null,"paths":["/example"],"hostname":"host"}, + })) + rtest.OK(t, printer.Node("/directory", &restic.Node{ + Type: restic.NodeTypeDir, + Name: "directory", + ModTime: modTime, + }, false)) + rtest.OK(t, printer.Node("/directory/data", &restic.Node{ + Type: restic.NodeTypeFile, + Name: "data", + Size: 42, + ModTime: modTime, + }, false)) + rtest.OK(t, printer.LeaveDir("/directory")) + rtest.OK(t, printer.Node("/file", &restic.Node{ + Type: restic.NodeTypeFile, + Name: "file", + Size: 12345, + ModTime: modTime, + }, false)) + rtest.OK(t, printer.Close()) + + rtest.Equals(t, `[1, 2, {"time":"0001-01-01T00:00:00Z","tree":null,"paths":["/example"],"hostname":"host"}, [{"name":"/"}, [ - {"name":"directory","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":-62135596800}, - {"name":"data","asize":42,"dsize":42,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":-62135596800} - ] + {"name":"directory","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":1577934245}, + {"name":"data","asize":42,"dsize":512,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":1577934245} + ], + {"name":"file","asize":12345,"dsize":12800,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":1577934245} +] ] `, buf.String()) } diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index fd2e762c0f4..5c3e425edfc 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -5,6 +5,8 @@ import ( "github.com/restic/restic/internal/migrations" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" ) @@ -20,11 +22,18 @@ names are specified, these migrations are applied. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { - return runMigrate(cmd.Context(), migrateOptions, globalOptions, args) + term, cancel := setupTermstatus() + defer cancel() + return runMigrate(cmd.Context(), migrateOptions, globalOptions, args, term) }, } @@ -41,8 +50,8 @@ func init() { f.BoolVarP(&migrateOptions.Force, "force", "f", false, `apply a migration a second time`) } -func checkMigrations(ctx context.Context, repo restic.Repository) error { - Printf("available migrations:\n") +func checkMigrations(ctx context.Context, repo restic.Repository, printer progress.Printer) error { + printer.P("available migrations:\n") found := false for _, m := range migrations.All { @@ -52,23 +61,25 @@ func checkMigrations(ctx context.Context, repo restic.Repository) error { } if ok { - Printf(" %v\t%v\n", m.Name(), m.Desc()) + printer.P(" %v\t%v\n", m.Name(), m.Desc()) found = true } } if !found { - Printf("no migrations found\n") + printer.P("no migrations found\n") } return nil } -func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, repo restic.Repository, args []string) error { +func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, repo restic.Repository, args []string, term *termstatus.Terminal, printer progress.Printer) error { var firsterr error for _, name := range args { + found := false for _, m := range migrations.All { if m.Name() == name { + found = true ok, reason, err := m.Check(ctx, repo) if err != nil { return err @@ -79,58 +90,59 @@ func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptio if reason == "" { reason = "check failed" } - Warnf("migration %v cannot be applied: %v\nIf you want to apply this migration anyway, re-run with option --force\n", m.Name(), reason) + printer.E("migration %v cannot be applied: %v\nIf you want to apply this migration anyway, re-run with option --force\n", m.Name(), reason) continue } - Warnf("check for migration %v failed, continuing anyway\n", m.Name()) + printer.E("check for migration %v failed, continuing anyway\n", m.Name()) } if m.RepoCheck() { - Printf("checking repository integrity...\n") + printer.P("checking repository integrity...\n") checkOptions := CheckOptions{} checkGopts := gopts // the repository is already locked checkGopts.NoLock = true - err = runCheck(ctx, checkOptions, checkGopts, []string{}) + + err = runCheck(ctx, checkOptions, checkGopts, []string{}, term) if err != nil { return err } } - Printf("applying migration %v...\n", m.Name()) + printer.P("applying migration %v...\n", m.Name()) if err = m.Apply(ctx, repo); err != nil { - Warnf("migration %v failed: %v\n", m.Name(), err) + printer.E("migration %v failed: %v\n", m.Name(), err) if firsterr == nil { firsterr = err } continue } - Printf("migration %v: success\n", m.Name()) + printer.P("migration %v: success\n", m.Name()) } } + if !found { + printer.E("unknown migration %v", name) + } } return firsterr } -func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } +func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, args []string, term *termstatus.Terminal) error { + printer := newTerminalProgressPrinter(gopts.verbosity, term) - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() if len(args) == 0 { - return checkMigrations(ctx, repo) + return checkMigrations(ctx, repo, printer) } - return applyMigrations(ctx, opts, gopts, repo, args) + return applyMigrations(ctx, opts, gopts, repo, args, term, printer) } diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 5fd81b3444f..b8a66dc908f 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -15,7 +15,6 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" - resticfs "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/fuse" systemFuse "github.com/anacrolix/fuse" @@ -64,9 +63,14 @@ The default path templates are: EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { return runMount(cmd.Context(), mountOptions, globalOptions, args) }, @@ -117,7 +121,7 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args // Check the existence of the mount point at the earliest stage to // prevent unnecessary computations while opening the repository. - if _, err := resticfs.Stat(mountpoint); errors.Is(err, os.ErrNotExist) { + if _, err := os.Stat(mountpoint); errors.Is(err, os.ErrNotExist) { Verbosef("Mountpoint %s doesn't exist\n", mountpoint) return err } @@ -125,19 +129,11 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args debug.Log("start mount") defer debug.Log("finish mount") - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() bar := newIndexProgress(gopts.Quiet, gopts.JSON) err = repo.LoadIndex(ctx, bar) @@ -160,28 +156,15 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args } } - AddCleanupHandler(func(code int) (int, error) { - debug.Log("running umount cleanup handler for mount at %v", mountpoint) - err := umount(mountpoint) - if err != nil { - Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err) - } - // replace error code of sigint - if code == 130 { - code = 0 - } - return code, nil - }) + systemFuse.Debug = func(msg interface{}) { + debug.Log("fuse: %v", msg) + } c, err := systemFuse.Mount(mountpoint, mountOptions...) if err != nil { return err } - systemFuse.Debug = func(msg interface{}) { - debug.Log("fuse: %v", msg) - } - cfg := fuse.Config{ OwnerIsRoot: opts.OwnerRoot, Filter: opts.SnapshotFilter, @@ -195,15 +178,26 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args Printf("When finished, quit with Ctrl-c here or umount the mountpoint.\n") debug.Log("serving mount at %v", mountpoint) - err = fs.Serve(c, root) - if err != nil { - return err - } - <-c.Ready - return c.MountError -} + done := make(chan struct{}) + + go func() { + defer close(done) + err = fs.Serve(c, root) + }() + + select { + case <-ctx.Done(): + debug.Log("running umount cleanup handler for mount at %v", mountpoint) + err := systemFuse.Unmount(mountpoint) + if err != nil { + Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err) + } + + return ErrOK + case <-done: + // clean shutdown, nothing to do + } -func umount(mountpoint string) error { - return systemFuse.Unmount(mountpoint) + return err } diff --git a/cmd/restic/cmd_mount_integration_test.go b/cmd/restic/cmd_mount_integration_test.go index 1b069d58211..c5f4d193a26 100644 --- a/cmd/restic/cmd_mount_integration_test.go +++ b/cmd/restic/cmd_mount_integration_test.go @@ -12,8 +12,8 @@ import ( "testing" "time" + systemFuse "github.com/anacrolix/fuse" "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -67,7 +67,7 @@ func testRunMount(t testing.TB, gopts GlobalOptions, dir string, wg *sync.WaitGr func testRunUmount(t testing.TB, dir string) { var err error for i := 0; i < mountWait; i++ { - if err = umount(dir); err == nil { + if err = systemFuse.Unmount(dir); err == nil { t.Logf("directory %v umounted", dir) return } @@ -87,12 +87,12 @@ func listSnapshots(t testing.TB, dir string) []string { return names } -func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) { +func checkSnapshots(t testing.TB, gopts GlobalOptions, mountpoint string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) { t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs) var wg sync.WaitGroup wg.Add(1) - go testRunMount(t, global, mountpoint, &wg) + go testRunMount(t, gopts, mountpoint, &wg) waitForMount(t, mountpoint) defer wg.Wait() defer testRunUmount(t, mountpoint) @@ -101,7 +101,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit t.Fatal(`virtual directory "snapshots" doesn't exist`) } - ids := listSnapshots(t, repodir) + ids := listSnapshots(t, gopts.Repo) t.Logf("found %v snapshots in repo: %v", len(ids), ids) namesInSnapshots := listSnapshots(t, mountpoint) @@ -125,6 +125,10 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit } } + _, repo, unlock, err := openWithReadLock(context.TODO(), gopts, false) + rtest.OK(t, err) + defer unlock() + for _, id := range snapshotIDs { snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id) rtest.OK(t, err) @@ -160,11 +164,6 @@ func TestMount(t *testing.T) { t.Skip("Skipping fuse tests") } - debugEnabled := debug.TestLogToStderr(t) - if debugEnabled { - defer debug.TestDisableLog(t) - } - env, cleanup := withTestEnvironment(t) // must list snapshots more than once env.gopts.backendTestHook = nil @@ -172,10 +171,7 @@ func TestMount(t *testing.T) { testRunInit(t, env.gopts) - repo, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, []restic.ID{}, 0) + checkSnapshots(t, env.gopts, env.mountpoint, []restic.ID{}, 0) rtest.SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz")) @@ -185,7 +181,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 1, "expected one snapshot, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 2) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 2) // second backup, implicit incremental testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts) @@ -193,7 +189,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 2, "expected two snapshots, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 3) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 3) // third backup, explicit incremental bopts := BackupOptions{Parent: snapshotIDs[0].String()} @@ -202,7 +198,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 3, "expected three snapshots, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 4) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 4) } func TestMountSameTimestamps(t *testing.T) { @@ -210,6 +206,11 @@ func TestMountSameTimestamps(t *testing.T) { t.Skip("Skipping fuse tests") } + debugEnabled := debug.TestLogToStderr(t) + if debugEnabled { + defer debug.TestDisableLog(t) + } + env, cleanup := withTestEnvironment(t) // must list snapshots more than once env.gopts.backendTestHook = nil @@ -217,14 +218,11 @@ func TestMountSameTimestamps(t *testing.T) { rtest.SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz")) - repo, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - ids := []restic.ID{ restic.TestParseID("280303689e5027328889a06d718b729e96a1ce6ae9ef8290bff550459ae611ee"), restic.TestParseID("75ad6cdc0868e082f2596d5ab8705e9f7d87316f5bf5690385eeff8dbe49d9f5"), restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"), } - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, ids, 4) + checkSnapshots(t, env.gopts, env.mountpoint, ids, 4) } diff --git a/cmd/restic/cmd_options.go b/cmd/restic/cmd_options.go index 471319dfb4c..9c07b262674 100644 --- a/cmd/restic/cmd_options.go +++ b/cmd/restic/cmd_options.go @@ -17,11 +17,12 @@ The "options" command prints a list of extended options. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, - Hidden: true, + GroupID: cmdGroupAdvanced, DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, _ []string) { fmt.Printf("All Extended Options:\n") var maxLen int for _, opt := range options.List() { diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index efd8f6e3a38..fce109bddd6 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -4,26 +4,20 @@ import ( "context" "math" "runtime" - "sort" "strconv" "strings" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/progress" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" ) -var errorIndexIncomplete = errors.Fatal("index is not complete") -var errorPacksMissing = errors.Fatal("packs from index missing in repo") -var errorSizeNotMatching = errors.Fatal("pack size does not match calculated size from index") - var cmdPrune = &cobra.Command{ Use: "prune [flags]", Short: "Remove unneeded data from the repository", @@ -34,11 +28,18 @@ referenced and therefore not needed any more. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - return runPrune(cmd.Context(), pruneOptions, globalOptions) + RunE: func(cmd *cobra.Command, _ []string) error { + term, cancel := setupTermstatus() + defer cancel() + return runPrune(cmd.Context(), pruneOptions, globalOptions, term) }, } @@ -55,9 +56,9 @@ type PruneOptions struct { MaxRepackSize string MaxRepackBytes uint64 - RepackCachableOnly bool - RepackSmall bool - RepackUncompressed bool + RepackCacheableOnly bool + RepackSmall bool + RepackUncompressed bool } var pruneOptions PruneOptions @@ -73,8 +74,8 @@ func init() { func addPruneOptions(c *cobra.Command, pruneOptions *PruneOptions) { f := c.Flags() f.StringVar(&pruneOptions.MaxUnused, "max-unused", "5%", "tolerate given `limit` of unused data (absolute value in bytes with suffixes k/K, m/M, g/G, t/T, a value in % or the word 'unlimited')") - f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "maximum `size` to repack (allowed suffixes: k/K, m/M, g/G, t/T)") - f.BoolVar(&pruneOptions.RepackCachableOnly, "repack-cacheable-only", false, "only repack packs which are cacheable") + f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "stop after repacking this much data in total (allowed suffixes for `size`: k/K, m/M, g/G, t/T)") + f.BoolVar(&pruneOptions.RepackCacheableOnly, "repack-cacheable-only", false, "only repack packs which are cacheable") f.BoolVar(&pruneOptions.RepackSmall, "repack-small", false, "repack pack files below 80% of target pack size") f.BoolVar(&pruneOptions.RepackUncompressed, "repack-uncompressed", false, "repack all uncompressed data") } @@ -101,7 +102,7 @@ func verifyPruneOptions(opts *PruneOptions) error { // parse MaxUnused either as unlimited, a percentage, or an absolute number of bytes switch { case maxUnused == "unlimited": - opts.maxUnusedBytes = func(used uint64) uint64 { + opts.maxUnusedBytes = func(_ uint64) uint64 { return math.MaxUint64 } @@ -130,7 +131,7 @@ func verifyPruneOptions(opts *PruneOptions) error { return errors.Fatalf("invalid number of bytes %q for --max-unused: %v", opts.MaxUnused, err) } - opts.maxUnusedBytes = func(used uint64) uint64 { + opts.maxUnusedBytes = func(_ uint64) uint64 { return uint64(size) } } @@ -138,7 +139,7 @@ func verifyPruneOptions(opts *PruneOptions) error { return nil } -func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error { +func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term *termstatus.Terminal) error { err := verifyPruneOptions(&opts) if err != nil { return err @@ -148,18 +149,15 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err + if gopts.NoLock && !opts.DryRun { + return errors.Fatal("--no-lock is only applicable in combination with --dry-run for prune command") } - if repo.Connections() < 2 { - return errors.Fatal("prune requires a backend connection limit of at least two") - } - - if repo.Config().Version < 2 && opts.RepackUncompressed { - return errors.Fatal("compression requires at least repository format version 2") + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun && gopts.NoLock) + if err != nil { + return err } + defer unlock() if opts.UnsafeNoSpaceRecovery != "" { repoID := repo.Config().ID @@ -169,648 +167,107 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error opts.unsafeRecovery = true } - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - - return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet()) + return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet(), term) } -func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet) error { - // we do not need index updates while pruning! - repo.DisableAutoIndexUpdate() - - if repo.Cache == nil { +func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet, term *termstatus.Terminal) error { + if repo.Cache() == nil { Print("warning: running prune without a cache, this may be very slow!\n") } - Verbosef("loading indexes...\n") + printer := newTerminalProgressPrinter(gopts.verbosity, term) + + printer.P("loading indexes...\n") // loading the index before the snapshots is ok, as we use an exclusive lock here - bar := newIndexProgress(gopts.Quiet, gopts.JSON) + bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) err := repo.LoadIndex(ctx, bar) if err != nil { return err } - plan, stats, err := planPrune(ctx, opts, repo, ignoreSnapshots, gopts.Quiet) - if err != nil { - return err - } - - if opts.DryRun { - Verbosef("\nWould have made the following changes:") - } - - err = printPruneStats(stats) - if err != nil { - return err - } - - // Trigger GC to reset garbage collection threshold - runtime.GC() - - return doPrune(ctx, opts, gopts, repo, plan) -} - -type pruneStats struct { - blobs struct { - used uint - duplicate uint - unused uint - remove uint - repack uint - repackrm uint - } - size struct { - used uint64 - duplicate uint64 - unused uint64 - remove uint64 - repack uint64 - repackrm uint64 - unref uint64 - uncompressed uint64 - } - packs struct { - used uint - unused uint - partlyUsed uint - unref uint - keep uint - repack uint - remove uint - } -} - -type prunePlan struct { - removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) - repackPacks restic.IDSet // packs to repack - keepBlobs restic.CountedBlobSet // blobs to keep during repacking - removePacks restic.IDSet // packs to remove - ignorePacks restic.IDSet // packs to ignore when rebuilding the index -} - -type packInfo struct { - usedBlobs uint - unusedBlobs uint - usedSize uint64 - unusedSize uint64 - tpe restic.BlobType - uncompressed bool -} - -type packInfoWithID struct { - ID restic.ID - packInfo - mustCompress bool -} - -// planPrune selects which files to rewrite and which to delete and which blobs to keep. -// Also some summary statistics are returned. -func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (prunePlan, pruneStats, error) { - var stats pruneStats + popts := repository.PruneOptions{ + DryRun: opts.DryRun, + UnsafeRecovery: opts.unsafeRecovery, - usedBlobs, err := getUsedBlobs(ctx, repo, ignoreSnapshots, quiet) - if err != nil { - return prunePlan{}, stats, err - } + MaxUnusedBytes: opts.maxUnusedBytes, + MaxRepackBytes: opts.MaxRepackBytes, - Verbosef("searching used packs...\n") - keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats) - if err != nil { - return prunePlan{}, stats, err + RepackCacheableOnly: opts.RepackCacheableOnly, + RepackSmall: opts.RepackSmall, + RepackUncompressed: opts.RepackUncompressed, } - Verbosef("collecting packs for deletion and repacking\n") - plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, quiet) + plan, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet) error { + return getUsedBlobs(ctx, repo, usedBlobs, ignoreSnapshots, printer) + }, printer) if err != nil { - return prunePlan{}, stats, err - } - - if len(plan.repackPacks) != 0 { - blobCount := keepBlobs.Len() - // when repacking, we do not want to keep blobs which are - // already contained in kept packs, so delete them from keepBlobs - repo.Index().Each(ctx, func(blob restic.PackedBlob) { - if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) { - return - } - keepBlobs.Delete(blob.BlobHandle) - }) - - if keepBlobs.Len() < blobCount/2 { - // replace with copy to shrink map to necessary size if there's a chance to benefit - keepBlobs = keepBlobs.Copy() - } - } else { - // keepBlobs is only needed if packs are repacked - keepBlobs = nil - } - plan.keepBlobs = keepBlobs - - return plan, stats, nil -} - -func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *pruneStats) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { - // iterate over all blobs in index to find out which blobs are duplicates - // The counter in usedBlobs describes how many instances of the blob exist in the repository index - // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist - idx.Each(ctx, func(blob restic.PackedBlob) { - bh := blob.BlobHandle - count, ok := usedBlobs[bh] - if ok { - if count < math.MaxUint8 { - // don't overflow, but saturate count at 255 - // this can lead to a non-optimal pack selection, but won't cause - // problems otherwise - count++ - } - - usedBlobs[bh] = count - } - }) - - // Check if all used blobs have been found in index - missingBlobs := restic.NewBlobSet() - for bh, count := range usedBlobs { - if count == 0 { - // blob does not exist in any pack files - missingBlobs.Insert(bh) - } - } - - if len(missingBlobs) != 0 { - Warnf("%v not found in the index\n\n"+ - "Integrity check failed: Data seems to be missing.\n"+ - "Will not start prune to prevent (additional) data loss!\n"+ - "Please report this error (along with the output of the 'prune' run) at\n"+ - "https://github.com/restic/restic/issues/new/choose\n", missingBlobs) - return nil, nil, errorIndexIncomplete - } - - indexPack := make(map[restic.ID]packInfo) - - // save computed pack header size - for pid, hdrSize := range pack.Size(ctx, idx, true) { - // initialize tpe with NumBlobTypes to indicate it's not set - indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)} - } - - hasDuplicates := false - // iterate over all blobs in index to generate packInfo - idx.Each(ctx, func(blob restic.PackedBlob) { - ip := indexPack[blob.PackID] - - // Set blob type if not yet set - if ip.tpe == restic.NumBlobTypes { - ip.tpe = blob.Type - } - - // mark mixed packs with "Invalid blob type" - if ip.tpe != blob.Type { - ip.tpe = restic.InvalidBlob - } - - bh := blob.BlobHandle - size := uint64(blob.Length) - dupCount := usedBlobs[bh] - switch { - case dupCount >= 2: - hasDuplicates = true - // mark as unused for now, we will later on select one copy - ip.unusedSize += size - ip.unusedBlobs++ - - // count as duplicate, will later on change one copy to be counted as used - stats.size.duplicate += size - stats.blobs.duplicate++ - case dupCount == 1: // used blob, not duplicate - ip.usedSize += size - ip.usedBlobs++ - - stats.size.used += size - stats.blobs.used++ - default: // unused blob - ip.unusedSize += size - ip.unusedBlobs++ - - stats.size.unused += size - stats.blobs.unused++ - } - if !blob.IsCompressed() { - ip.uncompressed = true - } - // update indexPack - indexPack[blob.PackID] = ip - }) - - // if duplicate blobs exist, those will be set to either "used" or "unused": - // - mark only one occurrence of duplicate blobs as used - // - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used" - // - if there are no used blobs in a pack, possibly mark duplicates as "unused" - if hasDuplicates { - // iterate again over all blobs in index (this is pretty cheap, all in-mem) - idx.Each(ctx, func(blob restic.PackedBlob) { - bh := blob.BlobHandle - count, ok := usedBlobs[bh] - // skip non-duplicate, aka. normal blobs - // count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining - if !ok || count == 1 { - return - } - - ip := indexPack[blob.PackID] - size := uint64(blob.Length) - switch { - case ip.usedBlobs > 0, count == 0: - // other used blobs in pack or "last" occurrence -> transition to used - ip.usedSize += size - ip.usedBlobs++ - ip.unusedSize -= size - ip.unusedBlobs-- - // same for the global statistics - stats.size.used += size - stats.blobs.used++ - stats.size.duplicate -= size - stats.blobs.duplicate-- - // let other occurrences remain marked as unused - usedBlobs[bh] = 1 - default: - // remain unused and decrease counter - count-- - if count == 1 { - // setting count to 1 would lead to forgetting that this blob had duplicates - // thus use the special value zero. This will select the last instance of the blob for keeping. - count = 0 - } - usedBlobs[bh] = count - } - // update indexPack - indexPack[blob.PackID] = ip - }) + return err } - - // Sanity check. If no duplicates exist, all blobs have value 1. After handling - // duplicates, this also applies to duplicates. - for _, count := range usedBlobs { - if count != 1 { - panic("internal error during blob selection") - } + if ctx.Err() != nil { + return ctx.Err() } - return usedBlobs, indexPack, nil -} - -func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *pruneStats, quiet bool) (prunePlan, error) { - removePacksFirst := restic.NewIDSet() - removePacks := restic.NewIDSet() - repackPacks := restic.NewIDSet() - - var repackCandidates []packInfoWithID - var repackSmallCandidates []packInfoWithID - repoVersion := repo.Config().Version - // only repack very small files by default - targetPackSize := repo.PackSize() / 25 - if opts.RepackSmall { - // consider files with at least 80% of the target size as large enough - targetPackSize = repo.PackSize() / 5 * 4 + if popts.DryRun { + printer.P("\nWould have made the following changes:") } - // loop over all packs and decide what to do - bar := newProgressMax(!quiet, uint64(len(indexPack)), "packs processed") - err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { - p, ok := indexPack[id] - if !ok { - // Pack was not referenced in index and is not used => immediately remove! - Verboseff("will remove pack %v as it is unused and not indexed\n", id.Str()) - removePacksFirst.Insert(id) - stats.size.unref += uint64(packSize) - return nil - } - - if p.unusedSize+p.usedSize != uint64(packSize) && p.usedBlobs != 0 { - // Pack size does not fit and pack is needed => error - // If the pack is not needed, this is no error, the pack can - // and will be simply removed, see below. - Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n", - id.Str(), p.unusedSize+p.usedSize, packSize) - return errorSizeNotMatching - } - - // statistics - switch { - case p.usedBlobs == 0: - stats.packs.unused++ - case p.unusedBlobs == 0: - stats.packs.used++ - default: - stats.packs.partlyUsed++ - } - - if p.uncompressed { - stats.size.uncompressed += p.unusedSize + p.usedSize - } - mustCompress := false - if repoVersion >= 2 { - // repo v2: always repack tree blobs if uncompressed - // compress data blobs if requested - mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed - } - - // decide what to do - switch { - case p.usedBlobs == 0: - // All blobs in pack are no longer used => remove pack! - removePacks.Insert(id) - stats.blobs.remove += p.unusedBlobs - stats.size.remove += p.unusedSize - - case opts.RepackCachableOnly && p.tpe == restic.DataBlob: - // if this is a data pack and --repack-cacheable-only is set => keep pack! - stats.packs.keep++ - - case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress: - if packSize >= int64(targetPackSize) { - // All blobs in pack are used and not mixed => keep pack! - stats.packs.keep++ - } else { - repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) - } - - default: - // all other packs are candidates for repacking - repackCandidates = append(repackCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) - } - - delete(indexPack, id) - bar.Add(1) - return nil - }) - bar.Done() + err = printPruneStats(printer, plan.Stats()) if err != nil { - return prunePlan{}, err - } - - // At this point indexPacks contains only missing packs! - - // missing packs that are not needed can be ignored - ignorePacks := restic.NewIDSet() - for id, p := range indexPack { - if p.usedBlobs == 0 { - ignorePacks.Insert(id) - stats.blobs.remove += p.unusedBlobs - stats.size.remove += p.unusedSize - delete(indexPack, id) - } - } - - if len(indexPack) != 0 { - Warnf("The index references %d needed pack files which are missing from the repository:\n", len(indexPack)) - for id := range indexPack { - Warnf(" %v\n", id) - } - return prunePlan{}, errorPacksMissing - } - if len(ignorePacks) != 0 { - Warnf("Missing but unneeded pack files are referenced in the index, will be repaired\n") - for id := range ignorePacks { - Warnf("will forget missing pack file %v\n", id) - } - } - - if len(repackSmallCandidates) < 10 { - // too few small files to be worth the trouble, this also prevents endlessly repacking - // if there is just a single pack file below the target size - stats.packs.keep += uint(len(repackSmallCandidates)) - } else { - repackCandidates = append(repackCandidates, repackSmallCandidates...) - } - - // Sort repackCandidates such that packs with highest ratio unused/used space are picked first. - // This is equivalent to sorting by unused / total space. - // Instead of unused[i] / used[i] > unused[j] / used[j] we use - // unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64 - // Moreover packs containing trees and too small packs are sorted to the beginning - sort.Slice(repackCandidates, func(i, j int) bool { - pi := repackCandidates[i].packInfo - pj := repackCandidates[j].packInfo - switch { - case pi.tpe != restic.DataBlob && pj.tpe == restic.DataBlob: - return true - case pj.tpe != restic.DataBlob && pi.tpe == restic.DataBlob: - return false - case pi.unusedSize+pi.usedSize < uint64(targetPackSize) && pj.unusedSize+pj.usedSize >= uint64(targetPackSize): - return true - case pj.unusedSize+pj.usedSize < uint64(targetPackSize) && pi.unusedSize+pi.usedSize >= uint64(targetPackSize): - return false - } - return pi.unusedSize*pj.usedSize > pj.unusedSize*pi.usedSize - }) - - repack := func(id restic.ID, p packInfo) { - repackPacks.Insert(id) - stats.blobs.repack += p.unusedBlobs + p.usedBlobs - stats.size.repack += p.unusedSize + p.usedSize - stats.blobs.repackrm += p.unusedBlobs - stats.size.repackrm += p.unusedSize - if p.uncompressed { - stats.size.uncompressed -= p.unusedSize + p.usedSize - } - } - - // calculate limit for number of unused bytes in the repo after repacking - maxUnusedSizeAfter := opts.maxUnusedBytes(stats.size.used) - - for _, p := range repackCandidates { - reachedUnusedSizeAfter := (stats.size.unused-stats.size.remove-stats.size.repackrm < maxUnusedSizeAfter) - reachedRepackSize := stats.size.repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes - packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize) - - switch { - case reachedRepackSize: - stats.packs.keep++ - - case p.tpe != restic.DataBlob, p.mustCompress: - // repacking non-data packs / uncompressed-trees is only limited by repackSize - repack(p.ID, p.packInfo) - - case reachedUnusedSizeAfter && packIsLargeEnough: - // for all other packs stop repacking if tolerated unused size is reached. - stats.packs.keep++ - - default: - repack(p.ID, p.packInfo) - } + return err } - stats.packs.unref = uint(len(removePacksFirst)) - stats.packs.repack = uint(len(repackPacks)) - stats.packs.remove = uint(len(removePacks)) - - if repo.Config().Version < 2 { - // compression not supported for repository format version 1 - stats.size.uncompressed = 0 - } + // Trigger GC to reset garbage collection threshold + runtime.GC() - return prunePlan{removePacksFirst: removePacksFirst, - removePacks: removePacks, - repackPacks: repackPacks, - ignorePacks: ignorePacks, - }, nil + return plan.Execute(ctx, printer) } // printPruneStats prints out the statistics -func printPruneStats(stats pruneStats) error { - Verboseff("\nused: %10d blobs / %s\n", stats.blobs.used, ui.FormatBytes(stats.size.used)) - if stats.blobs.duplicate > 0 { - Verboseff("duplicates: %10d blobs / %s\n", stats.blobs.duplicate, ui.FormatBytes(stats.size.duplicate)) - } - Verboseff("unused: %10d blobs / %s\n", stats.blobs.unused, ui.FormatBytes(stats.size.unused)) - if stats.size.unref > 0 { - Verboseff("unreferenced: %s\n", ui.FormatBytes(stats.size.unref)) - } - totalBlobs := stats.blobs.used + stats.blobs.unused + stats.blobs.duplicate - totalSize := stats.size.used + stats.size.duplicate + stats.size.unused + stats.size.unref - unusedSize := stats.size.duplicate + stats.size.unused - Verboseff("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize)) - Verboseff("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize)) - - Verbosef("\nto repack: %10d blobs / %s\n", stats.blobs.repack, ui.FormatBytes(stats.size.repack)) - Verbosef("this removes: %10d blobs / %s\n", stats.blobs.repackrm, ui.FormatBytes(stats.size.repackrm)) - Verbosef("to delete: %10d blobs / %s\n", stats.blobs.remove, ui.FormatBytes(stats.size.remove+stats.size.unref)) - totalPruneSize := stats.size.remove + stats.size.repackrm + stats.size.unref - Verbosef("total prune: %10d blobs / %s\n", stats.blobs.remove+stats.blobs.repackrm, ui.FormatBytes(totalPruneSize)) - if stats.size.uncompressed > 0 { - Verbosef("not yet compressed: %s\n", ui.FormatBytes(stats.size.uncompressed)) - } - Verbosef("remaining: %10d blobs / %s\n", totalBlobs-(stats.blobs.remove+stats.blobs.repackrm), ui.FormatBytes(totalSize-totalPruneSize)) - unusedAfter := unusedSize - stats.size.remove - stats.size.repackrm - Verbosef("unused size after prune: %s (%s of remaining size)\n", +func printPruneStats(printer progress.Printer, stats repository.PruneStats) error { + printer.V("\nused: %10d blobs / %s\n", stats.Blobs.Used, ui.FormatBytes(stats.Size.Used)) + if stats.Blobs.Duplicate > 0 { + printer.V("duplicates: %10d blobs / %s\n", stats.Blobs.Duplicate, ui.FormatBytes(stats.Size.Duplicate)) + } + printer.V("unused: %10d blobs / %s\n", stats.Blobs.Unused, ui.FormatBytes(stats.Size.Unused)) + if stats.Size.Unref > 0 { + printer.V("unreferenced: %s\n", ui.FormatBytes(stats.Size.Unref)) + } + totalBlobs := stats.Blobs.Used + stats.Blobs.Unused + stats.Blobs.Duplicate + totalSize := stats.Size.Used + stats.Size.Duplicate + stats.Size.Unused + stats.Size.Unref + unusedSize := stats.Size.Duplicate + stats.Size.Unused + printer.V("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize)) + printer.V("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize)) + + printer.P("\nto repack: %10d blobs / %s\n", stats.Blobs.Repack, ui.FormatBytes(stats.Size.Repack)) + printer.P("this removes: %10d blobs / %s\n", stats.Blobs.Repackrm, ui.FormatBytes(stats.Size.Repackrm)) + printer.P("to delete: %10d blobs / %s\n", stats.Blobs.Remove, ui.FormatBytes(stats.Size.Remove+stats.Size.Unref)) + totalPruneSize := stats.Size.Remove + stats.Size.Repackrm + stats.Size.Unref + printer.P("total prune: %10d blobs / %s\n", stats.Blobs.Remove+stats.Blobs.Repackrm, ui.FormatBytes(totalPruneSize)) + if stats.Size.Uncompressed > 0 { + printer.P("not yet compressed: %s\n", ui.FormatBytes(stats.Size.Uncompressed)) + } + printer.P("remaining: %10d blobs / %s\n", totalBlobs-(stats.Blobs.Remove+stats.Blobs.Repackrm), ui.FormatBytes(totalSize-totalPruneSize)) + unusedAfter := unusedSize - stats.Size.Remove - stats.Size.Repackrm + printer.P("unused size after prune: %s (%s of remaining size)\n", ui.FormatBytes(unusedAfter), ui.FormatPercent(unusedAfter, totalSize-totalPruneSize)) - Verbosef("\n") - Verboseff("totally used packs: %10d\n", stats.packs.used) - Verboseff("partly used packs: %10d\n", stats.packs.partlyUsed) - Verboseff("unused packs: %10d\n\n", stats.packs.unused) - - Verboseff("to keep: %10d packs\n", stats.packs.keep) - Verboseff("to repack: %10d packs\n", stats.packs.repack) - Verboseff("to delete: %10d packs\n", stats.packs.remove) - if stats.packs.unref > 0 { - Verboseff("to delete: %10d unreferenced packs\n\n", stats.packs.unref) - } - return nil -} + printer.P("\n") + printer.V("totally used packs: %10d\n", stats.Packs.Used) + printer.V("partly used packs: %10d\n", stats.Packs.PartlyUsed) + printer.V("unused packs: %10d\n\n", stats.Packs.Unused) -// doPrune does the actual pruning: -// - remove unreferenced packs first -// - repack given pack files while keeping the given blobs -// - rebuild the index while ignoring all files that will be deleted -// - delete the files -// plan.removePacks and plan.ignorePacks are modified in this function. -func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo restic.Repository, plan prunePlan) (err error) { - if opts.DryRun { - if !gopts.JSON && gopts.verbosity >= 2 { - Printf("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") - if len(plan.removePacksFirst) > 0 { - Printf("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) - } - Printf("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks) - Printf("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks) - } - // Always quit here if DryRun was set! - return nil + printer.V("to keep: %10d packs\n", stats.Packs.Keep) + printer.V("to repack: %10d packs\n", stats.Packs.Repack) + printer.V("to delete: %10d packs\n", stats.Packs.Remove) + if stats.Packs.Unref > 0 { + printer.V("to delete: %10d unreferenced packs\n\n", stats.Packs.Unref) } - - // unreferenced packs can be safely deleted first - if len(plan.removePacksFirst) != 0 { - Verbosef("deleting unreferenced packs\n") - DeleteFiles(ctx, gopts, repo, plan.removePacksFirst, restic.PackFile) - } - - if len(plan.repackPacks) != 0 { - Verbosef("repacking packs\n") - bar := newProgressMax(!gopts.Quiet, uint64(len(plan.repackPacks)), "packs repacked") - _, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar) - bar.Done() - if err != nil { - return errors.Fatal(err.Error()) - } - - // Also remove repacked packs - plan.removePacks.Merge(plan.repackPacks) - - if len(plan.keepBlobs) != 0 { - Warnf("%v was not repacked\n\n"+ - "Integrity check failed.\n"+ - "Please report this error (along with the output of the 'prune' run) at\n"+ - "https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs) - return errors.Fatal("internal error: blobs were not repacked") - } - - // allow GC of the blob set - plan.keepBlobs = nil - } - - if len(plan.ignorePacks) == 0 { - plan.ignorePacks = plan.removePacks - } else { - plan.ignorePacks.Merge(plan.removePacks) - } - - if opts.unsafeRecovery { - Verbosef("deleting index files\n") - indexFiles := repo.Index().(*index.MasterIndex).IDs() - err = DeleteFilesChecked(ctx, gopts, repo, indexFiles, restic.IndexFile) - if err != nil { - return errors.Fatalf("%s", err) - } - } else if len(plan.ignorePacks) != 0 { - err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, false) - if err != nil { - return errors.Fatalf("%s", err) - } - } - - if len(plan.removePacks) != 0 { - Verbosef("removing %d old packs\n", len(plan.removePacks)) - DeleteFiles(ctx, gopts, repo, plan.removePacks, restic.PackFile) - } - - if opts.unsafeRecovery { - err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, true) - if err != nil { - return errors.Fatalf("%s", err) - } - } - - Verbosef("done\n") return nil } -func rebuildIndexFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool) error { - Verbosef("rebuilding index\n") - - bar := newProgressMax(!gopts.Quiet, 0, "packs processed") - return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{ - SaveProgress: bar, - DeleteProgress: func() *progress.Counter { - return newProgressMax(!gopts.Quiet, 0, "old indexes deleted") - }, - DeleteReport: func(id restic.ID, err error) { - if gopts.verbosity > 2 { - Verbosef("removed index %v\n", id.String()) - } - }, - SkipDeletion: skipDeletion, - }) -} - -func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (usedBlobs restic.CountedBlobSet, err error) { +func getUsedBlobs(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet, ignoreSnapshots restic.IDSet, printer progress.Printer) error { var snapshotTrees restic.IDs - Verbosef("loading all snapshots...\n") - err = restic.ForAllSnapshots(ctx, repo, repo, ignoreSnapshots, + printer.P("loading all snapshots...\n") + err := restic.ForAllSnapshots(ctx, repo, repo, ignoreSnapshots, func(id restic.ID, sn *restic.Snapshot, err error) error { if err != nil { debug.Log("failed to load snapshot %v (error %v)", id, err) @@ -821,23 +278,14 @@ func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots r return nil }) if err != nil { - return nil, errors.Fatalf("failed loading snapshot: %v", err) + return errors.Fatalf("failed loading snapshot: %v", err) } - Verbosef("finding data that is still in use for %d snapshots\n", len(snapshotTrees)) + printer.P("finding data that is still in use for %d snapshots\n", len(snapshotTrees)) - usedBlobs = restic.NewCountedBlobSet() - - bar := newProgressMax(!quiet, uint64(len(snapshotTrees)), "snapshots") + bar := printer.NewCounter("snapshots") + bar.SetMax(uint64(len(snapshotTrees))) defer bar.Done() - err = restic.FindUsedBlobs(ctx, repo, snapshotTrees, usedBlobs, bar) - if err != nil { - if repo.Backend().IsNotExist(err) { - return nil, errors.Fatal("unable to load a tree from the repository: " + err.Error()) - } - - return nil, err - } - return usedBlobs, nil + return restic.FindUsedBlobs(ctx, repo, snapshotTrees, usedBlobs, bar) } diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index ebfa7ae4e30..536ec40d886 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -7,7 +7,9 @@ import ( "testing" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/repository" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) { @@ -16,7 +18,9 @@ func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) { defer func() { gopts.backendTestHook = oldHook }() - rtest.OK(t, runPrune(context.TODO(), opts, gopts)) + rtest.OK(t, withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runPrune(context.TODO(), opts, gopts, term) + })) } func TestPrune(t *testing.T) { @@ -31,7 +35,7 @@ func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) { } t.Run("0"+suffix, func(t *testing.T) { opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery} - checkOpts := CheckOptions{ReadData: true, CheckUnused: true} + checkOpts := CheckOptions{ReadData: true, CheckUnused: !unsafeNoSpaceRecovery} testPrune(t, opts, checkOpts) }) @@ -47,8 +51,8 @@ func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) { testPrune(t, opts, checkOpts) }) - t.Run("CachableOnly"+suffix, func(t *testing.T) { - opts := PruneOptions{MaxUnused: "5%", RepackCachableOnly: true, unsafeRecovery: unsafeNoSpaceRecovery} + t.Run("CacheableOnly"+suffix, func(t *testing.T) { + opts := PruneOptions{MaxUnused: "5%", RepackCacheableOnly: true, unsafeRecovery: unsafeNoSpaceRecovery} checkOpts := CheckOptions{ReadData: true} testPrune(t, opts, checkOpts) }) @@ -71,7 +75,7 @@ func createPrunableRepo(t *testing.T, env *testEnvironment) { testListSnapshots(t, env.gopts, 3) testRunForgetJSON(t, env.gopts) - testRunForget(t, env.gopts, firstSnapshot.String()) + testRunForget(t, env.gopts, ForgetOptions{}, firstSnapshot.String()) } func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) { @@ -84,7 +88,9 @@ func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) { pruneOpts := PruneOptions{ MaxUnused: "5%", } - return runForget(context.TODO(), opts, pruneOpts, gopts, args) + return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runForget(context.TODO(), opts, pruneOpts, gopts, term, args) + }) }) rtest.OK(t, err) @@ -105,7 +111,9 @@ func testPrune(t *testing.T, pruneOpts PruneOptions, checkOpts CheckOptions) { createPrunableRepo(t, env) testRunPrune(t, env.gopts, pruneOpts) - rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil)) + rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runCheck(context.TODO(), checkOpts, env.gopts, nil, term) + })) } var pruneDefaultOptions = PruneOptions{MaxUnused: "5%"} @@ -123,7 +131,7 @@ func TestPruneWithDamagedRepository(t *testing.T) { // create and delete snapshot to create unused blobs testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) firstSnapshot := testListSnapshots(t, env.gopts, 1)[0] - testRunForget(t, env.gopts, firstSnapshot.String()) + testRunForget(t, env.gopts, ForgetOptions{}, firstSnapshot.String()) oldPacks := listPacks(env.gopts, t) @@ -138,8 +146,9 @@ func TestPruneWithDamagedRepository(t *testing.T) { env.gopts.backendTestHook = oldHook }() // prune should fail - rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing, - "prune should have reported index not complete error") + rtest.Equals(t, repository.ErrPacksMissing, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runPrune(context.TODO(), pruneDefaultOptions, env.gopts, term) + }), "prune should have reported index not complete error") } // Test repos for edge cases @@ -210,7 +219,9 @@ func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, o if checkOK { testRunCheck(t, env.gopts) } else { - rtest.Assert(t, runCheck(context.TODO(), optionsCheck, env.gopts, nil) != nil, + rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runCheck(context.TODO(), optionsCheck, env.gopts, nil, term) + }) != nil, "check should have reported an error") } @@ -218,7 +229,9 @@ func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, o testRunPrune(t, env.gopts, optionsPrune) testRunCheck(t, env.gopts) } else { - rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil, + rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runPrune(context.TODO(), optionsPrune, env.gopts, term) + }) != nil, "prune should have reported an error") } } diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index 499abdf80f2..78fc2d14859 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -22,10 +22,15 @@ It can be used if, for example, a snapshot has been removed by accident with "fo EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { return runRecover(cmd.Context(), globalOptions) }, } @@ -40,16 +45,11 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { return err } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false) if err != nil { return err } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { @@ -66,23 +66,29 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { // tree. If it is not referenced, we have a root tree. trees := make(map[restic.ID]bool) - repo.Index().Each(ctx, func(blob restic.PackedBlob) { + err = repo.ListBlobs(ctx, func(blob restic.PackedBlob) { if blob.Type == restic.TreeBlob { trees[blob.Blob.ID] = false } }) + if err != nil { + return err + } Verbosef("load %d trees\n", len(trees)) bar = newProgressMax(!gopts.Quiet, uint64(len(trees)), "trees loaded") for id := range trees { tree, err := restic.LoadTree(ctx, repo, id) + if ctx.Err() != nil { + return ctx.Err() + } if err != nil { Warnf("unable to load tree %v: %v\n", id.Str(), err) continue } for _, node := range tree.Nodes { - if node.Type == "dir" && node.Subtree != nil { + if node.Type == restic.NodeTypeDir && node.Subtree != nil { trees[*node.Subtree] = true } } @@ -91,7 +97,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { bar.Done() Verbosef("load snapshots\n") - err = restic.ForAllSnapshots(ctx, snapshotLister, repo, nil, func(id restic.ID, sn *restic.Snapshot, err error) error { + err = restic.ForAllSnapshots(ctx, snapshotLister, repo, nil, func(_ restic.ID, sn *restic.Snapshot, _ error) error { trees[*sn.Tree] = true return nil }) @@ -114,11 +120,15 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { return nil } + if ctx.Err() != nil { + return ctx.Err() + } + tree := restic.NewTree(len(roots)) for id := range roots { var subtreeID = id node := restic.Node{ - Type: "dir", + Type: restic.NodeTypeDir, Name: id.Str(), Mode: 0755, Subtree: &subtreeID, @@ -158,7 +168,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { } -func createSnapshot(ctx context.Context, name, hostname string, tags []string, repo restic.SaverUnpacked, tree *restic.ID) error { +func createSnapshot(ctx context.Context, name, hostname string, tags []string, repo restic.SaverUnpacked[restic.WriteableFileType], tree *restic.ID) error { sn, err := restic.NewSnapshot([]string{name}, tags, hostname, time.Now()) if err != nil { return errors.Fatalf("unable to save snapshot: %v", err) diff --git a/cmd/restic/cmd_repair.go b/cmd/restic/cmd_repair.go index aefe02f3c46..6a1a1f9dc34 100644 --- a/cmd/restic/cmd_repair.go +++ b/cmd/restic/cmd_repair.go @@ -5,8 +5,10 @@ import ( ) var cmdRepair = &cobra.Command{ - Use: "repair", - Short: "Repair the repository", + Use: "repair", + Short: "Repair the repository", + GroupID: cmdGroupDefault, + DisableAutoGenTag: true, } func init() { diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index fc5506b340c..83c1bfa7f9b 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -3,10 +3,8 @@ package main import ( "context" - "github.com/restic/restic/internal/index" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -21,11 +19,17 @@ repository. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions) + RunE: func(cmd *cobra.Command, _ []string) error { + term, cancel := setupTermstatus() + defer cancel() + return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions, term) }, } @@ -55,110 +59,22 @@ func init() { } } -func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions) error { - repo, err := OpenRepository(ctx, gopts) +func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, term *termstatus.Terminal) error { + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - - return rebuildIndex(ctx, opts, gopts, repo) -} - -func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, repo *repository.Repository) error { - var obsoleteIndexes restic.IDs - packSizeFromList := make(map[restic.ID]int64) - packSizeFromIndex := make(map[restic.ID]int64) - removePacks := restic.NewIDSet() - - if opts.ReadAllPacks { - // get list of old index files but start with empty index - err := repo.List(ctx, restic.IndexFile, func(id restic.ID, size int64) error { - obsoleteIndexes = append(obsoleteIndexes, id) - return nil - }) - if err != nil { - return err - } - } else { - Verbosef("loading indexes...\n") - mi := index.NewMasterIndex() - err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error { - if err != nil { - Warnf("removing invalid index %v: %v\n", id, err) - obsoleteIndexes = append(obsoleteIndexes, id) - return nil - } - - mi.Insert(idx) - return nil - }) - if err != nil { - return err - } - - err = mi.MergeFinalIndexes() - if err != nil { - return err - } - - err = repo.SetIndex(mi) - if err != nil { - return err - } - packSizeFromIndex = pack.Size(ctx, repo.Index(), false) - } - - Verbosef("getting pack files to read...\n") - err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { - size, ok := packSizeFromIndex[id] - if !ok || size != packSize { - // Pack was not referenced in index or size does not match - packSizeFromList[id] = packSize - removePacks.Insert(id) - } - if !ok { - Warnf("adding pack file to index %v\n", id) - } else if size != packSize { - Warnf("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size) - } - delete(packSizeFromIndex, id) - return nil - }) - if err != nil { - return err - } - for id := range packSizeFromIndex { - // forget pack files that are referenced in the index but do not exist - // when rebuilding the index - removePacks.Insert(id) - Warnf("removing not found pack file %v\n", id) - } - - if len(packSizeFromList) > 0 { - Verbosef("reading pack files\n") - bar := newProgressMax(!gopts.Quiet, uint64(len(packSizeFromList)), "packs") - invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar) - bar.Done() - if err != nil { - return err - } - - for _, id := range invalidFiles { - Verboseff("skipped incomplete pack file: %v\n", id) - } - } + printer := newTerminalProgressPrinter(gopts.verbosity, term) - err = rebuildIndexFiles(ctx, gopts, repo, removePacks, obsoleteIndexes, false) + err = repository.RepairIndex(ctx, repo, repository.RepairIndexOptions{ + ReadAllPacks: opts.ReadAllPacks, + }, printer) if err != nil { return err } - Verbosef("done\n") + printer.P("done\n") return nil } diff --git a/cmd/restic/cmd_repair_index_integration_test.go b/cmd/restic/cmd_repair_index_integration_test.go index e3271361ae0..9bfc93b401a 100644 --- a/cmd/restic/cmd_repair_index_integration_test.go +++ b/cmd/restic/cmd_repair_index_integration_test.go @@ -10,15 +10,18 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) { rtest.OK(t, withRestoreGlobalOptions(func() error { - globalOptions.stdout = io.Discard - return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts) + return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + globalOptions.stdout = io.Discard + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts, term) + }) })) } @@ -65,7 +68,7 @@ func TestRebuildIndexAlwaysFull(t *testing.T) { defer func() { index.IndexFull = indexFull }() - index.IndexFull = func(*index.Index, bool) bool { return true } + index.IndexFull = func(*index.Index) bool { return true } testRebuildIndex(t, nil) } @@ -126,12 +129,13 @@ func TestRebuildIndexFailsOnAppendOnly(t *testing.T) { rtest.SetupTarTestFixture(t, env.base, datafile) err := withRestoreGlobalOptions(func() error { - globalOptions.stdout = io.Discard - env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { return &appendOnlyBackend{r}, nil } - return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts) + return withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + globalOptions.stdout = io.Discard + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term) + }) }) if err == nil { diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index 04b06c33b94..290c3734e74 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -1,11 +1,11 @@ package main import ( + "bytes" "context" "io" "os" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" @@ -17,15 +17,17 @@ var cmdRepairPacks = &cobra.Command{ Use: "packs [packIDs...]", Short: "Salvage damaged pack files", Long: ` -WARNING: The CLI for this command is experimental and will likely change in the future! - The "repair packs" command extracts intact blobs from the specified pack files, rebuilds the index to remove the damaged pack files and removes the pack files from the repository. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { @@ -40,13 +42,6 @@ func init() { } func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { - // FIXME discuss and add proper feature flag mechanism - flag, _ := os.LookupEnv("RESTIC_FEATURES") - if flag != "repair-packs-v1" { - return errors.Fatal("This command is experimental and may change/be removed without notice between restic versions. " + - "Set the environment variable 'RESTIC_FEATURES=repair-packs-v1' to enable it.") - } - ids := restic.NewIDSet() for _, arg := range args { id, err := restic.ParseID(arg) @@ -59,41 +54,37 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T return errors.Fatal("no ids specified") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } + printer := newTerminalProgressPrinter(gopts.verbosity, term) - bar := newIndexProgress(gopts.Quiet, gopts.JSON) + bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) err = repo.LoadIndex(ctx, bar) if err != nil { return errors.Fatalf("%s", err) } - printer := newTerminalProgressPrinter(gopts.verbosity, term) - printer.P("saving backup copies of pack files to current folder") for id := range ids { + buf, err := repo.LoadRaw(ctx, restic.PackFile, id) + // corrupted data is fine + if buf == nil { + return err + } + f, err := os.OpenFile("pack-"+id.String(), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o666) if err != nil { return err } - - err = repo.Backend().Load(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()}, 0, 0, func(rd io.Reader) error { - _, err := f.Seek(0, 0) - if err != nil { - return err - } - _, err = io.Copy(f, rd) + if _, err := io.Copy(f, bytes.NewReader(buf)); err != nil { + _ = f.Close() return err - }) - if err != nil { + } + if err := f.Close(); err != nil { return err } } diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index 19e457b1fff..ba952432afa 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -37,7 +37,11 @@ snapshot! EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { @@ -66,22 +70,11 @@ func init() { } func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun) if err != nil { return err } - - if !opts.DryRun { - var lock *restic.Lock - var err error - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } else { - repo.SetDryRun() - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { @@ -99,7 +92,11 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt // - files whose contents are not fully available (-> file will be modified) rewriter := walker.NewTreeRewriter(walker.RewriteOpts{ RewriteNode: func(node *restic.Node, path string) *restic.Node { - if node.Type != "file" { + if node.Type == restic.NodeTypeIrregular || node.Type == restic.NodeTypeInvalid { + Verbosef(" file %q: removed node with invalid type %q\n", path, node.Type) + return nil + } + if node.Type != restic.NodeTypeFile { return node } @@ -108,7 +105,7 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt var newSize uint64 // check all contents and remove if not available for _, id := range node.Content { - if size, found := repo.LookupBlobSize(id, restic.DataBlob); !found { + if size, found := repo.LookupBlobSize(restic.DataBlob, id); !found { ok = false } else { newContent = append(newContent, id) @@ -125,7 +122,7 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt node.Size = newSize return node }, - RewriteFailedTree: func(nodeID restic.ID, path string, _ error) (restic.ID, error) { + RewriteFailedTree: func(_ restic.ID, path string, _ error) (restic.ID, error) { if path == "/" { Verbosef(" dir %q: not readable\n", path) // remove snapshots with invalid root node @@ -156,6 +153,9 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt changedCount++ } } + if ctx.Err() != nil { + return ctx.Err() + } Verbosef("\n") if changedCount == 0 { diff --git a/cmd/restic/cmd_repair_snapshots_integration_test.go b/cmd/restic/cmd_repair_snapshots_integration_test.go index 34cd186d3ff..9f65c9328a8 100644 --- a/cmd/restic/cmd_repair_snapshots_integration_test.go +++ b/cmd/restic/cmd_repair_snapshots_integration_test.go @@ -62,7 +62,7 @@ func TestRepairSnapshotsWithLostData(t *testing.T) { testRunCheckMustFail(t, env.gopts) // repository must be ok after removing the broken snapshots - testRunForget(t, env.gopts, snapshotIDs[0].String(), snapshotIDs[1].String()) + testRunForget(t, env.gopts, ForgetOptions{}, snapshotIDs[0].String(), snapshotIDs[1].String()) testListSnapshots(t, env.gopts, 2) _, err := testRunCheckOutput(env.gopts, false) rtest.OK(t, err) @@ -86,7 +86,7 @@ func TestRepairSnapshotsWithLostTree(t *testing.T) { // remove tree for foo/bar and the now completely broken first snapshot removePacks(env.gopts, t, restic.NewIDSet(oldPacks...)) - testRunForget(t, env.gopts, oldSnapshot[0].String()) + testRunForget(t, env.gopts, ForgetOptions{}, oldSnapshot[0].String()) testRunCheckMustFail(t, env.gopts) // repair diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 1208d30eba4..7a3b029daab 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -2,7 +2,7 @@ package main import ( "context" - "strings" + "path/filepath" "time" "github.com/restic/restic/internal/debug" @@ -27,14 +27,19 @@ a directory. The special snapshotID "latest" can be used to restore the latest snapshot in the repository. -To only restore a specific subfolder, you can use the ":" +To only restore a specific subfolder, you can use the "snapshotID:subfolder" syntax, where "subfolder" is a path within the snapshot. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { term, cancel := setupTermstatus() @@ -45,14 +50,17 @@ Exit status is 0 if the command was successful, and non-zero if there was any er // RestoreOptions collects all options for the restore command. type RestoreOptions struct { - Exclude []string - InsensitiveExclude []string - Include []string - InsensitiveInclude []string - Target string + filter.ExcludePatternOptions + filter.IncludePatternOptions + Target string restic.SnapshotFilter - Sparse bool - Verify bool + DryRun bool + Sparse bool + Verify bool + Overwrite restorer.OverwriteBehavior + Delete bool + ExcludeXattrPattern []string + IncludeXattrPattern []string } var restoreOptions RestoreOptions @@ -61,52 +69,37 @@ func init() { cmdRoot.AddCommand(cmdRestore) flags := cmdRestore.Flags() - flags.StringArrayVarP(&restoreOptions.Exclude, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)") - flags.StringArrayVar(&restoreOptions.InsensitiveExclude, "iexclude", nil, "same as --exclude but ignores the casing of `pattern`") - flags.StringArrayVarP(&restoreOptions.Include, "include", "i", nil, "include a `pattern`, exclude everything else (can be specified multiple times)") - flags.StringArrayVar(&restoreOptions.InsensitiveInclude, "iinclude", nil, "same as --include but ignores the casing of `pattern`") flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to") + restoreOptions.ExcludePatternOptions.Add(flags) + restoreOptions.IncludePatternOptions.Add(flags) + + flags.StringArrayVar(&restoreOptions.ExcludeXattrPattern, "exclude-xattr", nil, "exclude xattr by `pattern` (can be specified multiple times)") + flags.StringArrayVar(&restoreOptions.IncludeXattrPattern, "include-xattr", nil, "include xattr by `pattern` (can be specified multiple times)") + initSingleSnapshotFilter(flags, &restoreOptions.SnapshotFilter) + flags.BoolVar(&restoreOptions.DryRun, "dry-run", false, "do not write any data, just show what would be done") flags.BoolVar(&restoreOptions.Sparse, "sparse", false, "restore files as sparse") flags.BoolVar(&restoreOptions.Verify, "verify", false, "verify restored files content") + flags.Var(&restoreOptions.Overwrite, "overwrite", "overwrite behavior, one of (always|if-changed|if-newer|never) (default: always)") + flags.BoolVar(&restoreOptions.Delete, "delete", false, "delete files from target directory if they do not exist in snapshot. Use '--dry-run -vv' to check what would be deleted") } func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { - hasExcludes := len(opts.Exclude) > 0 || len(opts.InsensitiveExclude) > 0 - hasIncludes := len(opts.Include) > 0 || len(opts.InsensitiveInclude) > 0 - - // Validate provided patterns - if len(opts.Exclude) > 0 { - if err := filter.ValidatePatterns(opts.Exclude); err != nil { - return errors.Fatalf("--exclude: %s", err) - } - } - if len(opts.InsensitiveExclude) > 0 { - if err := filter.ValidatePatterns(opts.InsensitiveExclude); err != nil { - return errors.Fatalf("--iexclude: %s", err) - } - } - if len(opts.Include) > 0 { - if err := filter.ValidatePatterns(opts.Include); err != nil { - return errors.Fatalf("--include: %s", err) - } - } - if len(opts.InsensitiveInclude) > 0 { - if err := filter.ValidatePatterns(opts.InsensitiveInclude); err != nil { - return errors.Fatalf("--iinclude: %s", err) - } + excludePatternFns, err := opts.ExcludePatternOptions.CollectPatterns(Warnf) + if err != nil { + return err } - for i, str := range opts.InsensitiveExclude { - opts.InsensitiveExclude[i] = strings.ToLower(str) + includePatternFns, err := opts.IncludePatternOptions.CollectPatterns(Warnf) + if err != nil { + return err } - for i, str := range opts.InsensitiveInclude { - opts.InsensitiveInclude[i] = strings.ToLower(str) - } + hasExcludes := len(excludePatternFns) > 0 + hasIncludes := len(includePatternFns) > 0 switch { case len(args) == 0: @@ -123,23 +116,23 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, return errors.Fatal("exclude and include patterns are mutually exclusive") } + if opts.DryRun && opts.Verify { + return errors.Fatal("--dry-run and --verify are mutually exclusive") + } + + if opts.Delete && filepath.Clean(opts.Target) == "/" && !hasExcludes && !hasIncludes { + return errors.Fatal("'--target / --delete' must be combined with an include or exclude filter") + } + snapshotIDString := args[0] debug.Log("restore %v to %v", snapshotIDString, opts.Target) - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() sn, subfolder, err := (&restic.SnapshotFilter{ Hosts: opts.Hosts, @@ -164,59 +157,64 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, msg := ui.NewMessage(term, gopts.verbosity) var printer restoreui.ProgressPrinter if gopts.JSON { - printer = restoreui.NewJSONProgress(term) + printer = restoreui.NewJSONProgress(term, gopts.verbosity) } else { - printer = restoreui.NewTextProgress(term) + printer = restoreui.NewTextProgress(term, gopts.verbosity) } progress := restoreui.NewProgress(printer, calculateProgressInterval(!gopts.Quiet, gopts.JSON)) - res := restorer.NewRestorer(repo, sn, opts.Sparse, progress) + res := restorer.NewRestorer(repo, sn, restorer.Options{ + DryRun: opts.DryRun, + Sparse: opts.Sparse, + Progress: progress, + Overwrite: opts.Overwrite, + Delete: opts.Delete, + }) totalErrors := 0 res.Error = func(location string, err error) error { - msg.E("ignoring error for %s: %s\n", location, err) totalErrors++ - return nil + return progress.Error(location, err) + } + res.Warn = func(message string) { + msg.E("Warning: %s\n", message) } - excludePatterns := filter.ParsePatterns(opts.Exclude) - insensitiveExcludePatterns := filter.ParsePatterns(opts.InsensitiveExclude) - selectExcludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { - matched, err := filter.List(excludePatterns, item) - if err != nil { - msg.E("error for exclude pattern: %v", err) - } - - matchedInsensitive, err := filter.List(insensitiveExcludePatterns, strings.ToLower(item)) - if err != nil { - msg.E("error for iexclude pattern: %v", err) + selectExcludeFilter := func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + matched := false + for _, rejectFn := range excludePatternFns { + matched = matched || rejectFn(item) + + // implementing a short-circuit here to improve the performance + // to prevent additional pattern matching once the first pattern + // matches. + if matched { + break + } } - // An exclude filter is basically a 'wildcard but foo', // so even if a childMayMatch, other children of a dir may not, // therefore childMayMatch does not matter, but we should not go down // unless the dir is selected for restore - selectedForRestore = !matched && !matchedInsensitive - childMayBeSelected = selectedForRestore && node.Type == "dir" + selectedForRestore = !matched + childMayBeSelected = selectedForRestore && isDir return selectedForRestore, childMayBeSelected } - includePatterns := filter.ParsePatterns(opts.Include) - insensitiveIncludePatterns := filter.ParsePatterns(opts.InsensitiveInclude) - selectIncludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { - matched, childMayMatch, err := filter.ListWithChild(includePatterns, item) - if err != nil { - msg.E("error for include pattern: %v", err) + selectIncludeFilter := func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + selectedForRestore = false + childMayBeSelected = false + for _, includeFn := range includePatternFns { + matched, childMayMatch := includeFn(item) + selectedForRestore = selectedForRestore || matched + childMayBeSelected = childMayBeSelected || childMayMatch + + if selectedForRestore && childMayBeSelected { + break + } } - - matchedInsensitive, childMayMatchInsensitive, err := filter.ListWithChild(insensitiveIncludePatterns, strings.ToLower(item)) - if err != nil { - msg.E("error for iexclude pattern: %v", err) - } - - selectedForRestore = matched || matchedInsensitive - childMayBeSelected = (childMayMatch || childMayMatchInsensitive) && node.Type == "dir" + childMayBeSelected = childMayBeSelected && isDir return selectedForRestore, childMayBeSelected } @@ -227,11 +225,16 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, res.SelectFilter = selectIncludeFilter } + res.XattrSelectFilter, err = getXattrSelectFilter(opts) + if err != nil { + return err + } + if !gopts.JSON { msg.P("restoring %s to %s\n", res.Snapshot(), opts.Target) } - err = res.RestoreTo(ctx, opts.Target) + countRestoredFiles, err := res.RestoreTo(ctx, opts.Target) if err != nil { return err } @@ -248,7 +251,8 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, } var count int t0 := time.Now() - count, err = res.VerifyFiles(ctx, opts.Target) + bar := newTerminalProgressMax(!gopts.Quiet && !gopts.JSON && stdoutIsTerminal(), 0, "files verified", term) + count, err = res.VerifyFiles(ctx, opts.Target, countRestoredFiles, bar) if err != nil { return err } @@ -264,3 +268,38 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, return nil } + +func getXattrSelectFilter(opts RestoreOptions) (func(xattrName string) bool, error) { + hasXattrExcludes := len(opts.ExcludeXattrPattern) > 0 + hasXattrIncludes := len(opts.IncludeXattrPattern) > 0 + + if hasXattrExcludes && hasXattrIncludes { + return nil, errors.Fatal("exclude and include xattr patterns are mutually exclusive") + } + + if hasXattrExcludes { + if err := filter.ValidatePatterns(opts.ExcludeXattrPattern); err != nil { + return nil, errors.Fatalf("--exclude-xattr: %s", err) + } + + return func(xattrName string) bool { + shouldReject := filter.RejectByPattern(opts.ExcludeXattrPattern, Warnf)(xattrName) + return !shouldReject + }, nil + } + + if hasXattrIncludes { + // User has either input include xattr pattern(s) or we're using our default include pattern + if err := filter.ValidatePatterns(opts.IncludeXattrPattern); err != nil { + return nil, errors.Fatalf("--include-xattr: %s", err) + } + + return func(xattrName string) bool { + shouldInclude, _ := filter.IncludeByPattern(opts.IncludeXattrPattern, Warnf)(xattrName) + return shouldInclude + }, nil + } + + // default to including all xattrs + return func(_ string) bool { return true }, nil +} diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index 2c7cbe1fb67..945c24a37e5 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -4,30 +4,30 @@ import ( "context" "fmt" "io" - mrand "math/rand" + "math/rand" "os" "path/filepath" + "strings" "syscall" "testing" "time" - "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui/termstatus" ) -func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) { +func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID string) { testRunRestoreExcludes(t, opts, dir, snapshotID, nil) } -func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) { +func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID string, excludes []string) { opts := RestoreOptions{ - Target: dir, - Exclude: excludes, + Target: dir, } + opts.Excludes = excludes - rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts)) + rtest.OK(t, testRunRestoreAssumeFailure(snapshotID, opts, gopts)) } func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts GlobalOptions) error { @@ -50,22 +50,132 @@ func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths [ func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) { opts := RestoreOptions{ - Target: dir, - Include: includes, + Target: dir, + } + opts.Includes = includes + + rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts)) +} + +func testRunRestoreIncludesFromFile(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includesFile string) { + opts := RestoreOptions{ + Target: dir, + } + opts.IncludeFiles = []string{includesFile} + + rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts)) +} + +func testRunRestoreExcludesFromFile(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludesFile string) { + opts := RestoreOptions{ + Target: dir, } + opts.ExcludeFiles = []string{excludesFile} rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts)) } +func TestRestoreMustFailWhenUsingBothIncludesAndExcludes(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + // Add both include and exclude patterns + includePatterns := []string{"dir1/*include_me.txt", "dir2/**", "dir4/**/*_me.txt"} + excludePatterns := []string{"dir1/*include_me.txt", "dir2/**", "dir4/**/*_me.txt"} + + restoredir := filepath.Join(env.base, "restore") + + restoreOpts := RestoreOptions{ + Target: restoredir, + } + restoreOpts.Includes = includePatterns + restoreOpts.Excludes = excludePatterns + + err := testRunRestoreAssumeFailure("latest", restoreOpts, env.gopts) + rtest.Assert(t, err != nil && strings.Contains(err.Error(), "exclude and include patterns are mutually exclusive"), + "expected: %s error, got %v", "exclude and include patterns are mutually exclusive", err) +} + +func TestRestoreIncludes(t *testing.T) { + testfiles := []struct { + path string + size uint + include bool // Whether this file should be included in the restore + }{ + {"dir1/include_me.txt", 100, true}, + {"dir1/something_else.txt", 200, false}, + {"dir2/also_include_me.txt", 150, true}, + {"dir2/important_file.txt", 150, true}, + {"dir3/not_included.txt", 180, false}, + {"dir4/subdir/should_include_me.txt", 120, true}, + } + + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + // Create test files and directories + for _, testFile := range testfiles { + fullPath := filepath.Join(env.testdata, testFile.path) + rtest.OK(t, os.MkdirAll(filepath.Dir(fullPath), 0755)) + rtest.OK(t, appendRandomData(fullPath, testFile.size)) + } + + opts := BackupOptions{} + + // Perform backup + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + snapshotID := testListSnapshots(t, env.gopts, 1)[0] + + // Restore using includes + includePatterns := []string{"dir1/*include_me.txt", "dir2/**", "dir4/**/*_me.txt"} + restoredir := filepath.Join(env.base, "restore") + testRunRestoreIncludes(t, env.gopts, restoredir, snapshotID, includePatterns) + + testRestoreFileInclusions := func(t *testing.T) { + // Check that only the included files are restored + for _, testFile := range testfiles { + restoredFilePath := filepath.Join(restoredir, "testdata", testFile.path) + _, err := os.Stat(restoredFilePath) + if testFile.include { + rtest.OK(t, err) + } else { + rtest.Assert(t, os.IsNotExist(err), "File %s should not have been restored", testFile.path) + } + } + } + + testRestoreFileInclusions(t) + + // Create an include file with some patterns + patternsFile := env.base + "/patternsFile" + fileErr := os.WriteFile(patternsFile, []byte(strings.Join(includePatterns, "\n")), 0644) + if fileErr != nil { + t.Fatalf("Could not write include file: %v", fileErr) + } + + restoredir = filepath.Join(env.base, "restore-include-from-file") + + testRunRestoreIncludesFromFile(t, env.gopts, restoredir, snapshotID, patternsFile) + + testRestoreFileInclusions(t) +} + func TestRestoreFilter(t *testing.T) { testfiles := []struct { - name string - size uint + name string + size uint + exclude bool }{ - {"testfile1.c", 100}, - {"testfile2.exe", 101}, - {"subdir1/subdir2/testfile3.docx", 102}, - {"subdir1/subdir2/testfile4.c", 102}, + {"testfile1.c", 100, true}, + {"testfile2.exe", 101, true}, + {"subdir1/subdir2/testfile3.docx", 102, true}, + {"subdir1/subdir2/testfile4.c", 102, false}, } env, cleanup := withTestEnvironment(t) @@ -87,24 +197,43 @@ func TestRestoreFilter(t *testing.T) { snapshotID := testListSnapshots(t, env.gopts, 1)[0] // no restore filter should restore all files - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID) + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID.String()) for _, testFile := range testfiles { rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size))) } - for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} { - base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1)) - testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat}) + excludePatterns := []string{"testfile1.c", "*.exe", "*file3*"} + + // checks if the files are excluded correctly + testRestoredFileExclusions := func(t *testing.T, restoredir string) { for _, testFile := range testfiles { - err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size)) - if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok { - rtest.OK(t, err) + restoredFilePath := filepath.Join(restoredir, "testdata", testFile.name) + _, err := os.Stat(restoredFilePath) + if testFile.exclude { + rtest.Assert(t, os.IsNotExist(err), "File %s should not have been restored", testFile.name) } else { - rtest.Assert(t, os.IsNotExist(err), - "expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err) + rtest.OK(t, testFileSize(restoredFilePath, int64(testFile.size))) } } } + + // restore with excludes + restoredir := filepath.Join(env.base, "restore-with-excludes") + testRunRestoreExcludes(t, env.gopts, restoredir, snapshotID.String(), excludePatterns) + testRestoredFileExclusions(t, restoredir) + + // Create an exclude file with some patterns + patternsFile := env.base + "/patternsFile" + fileErr := os.WriteFile(patternsFile, []byte(strings.Join(excludePatterns, "\n")), 0644) + if fileErr != nil { + t.Fatalf("Could not write include file: %v", fileErr) + } + + // restore with excludes from file + restoredir = filepath.Join(env.base, "restore-with-exclude-from-file") + testRunRestoreExcludesFromFile(t, env.gopts, restoredir, snapshotID, patternsFile) + + testRestoredFileExclusions(t, restoredir) } func TestRestore(t *testing.T) { @@ -116,7 +245,7 @@ func TestRestore(t *testing.T) { for i := 0; i < 10; i++ { p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i)) rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) - rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21)))) + rtest.OK(t, appendRandomData(p, uint(rand.Intn(2<<21)))) } opts := BackupOptions{} @@ -210,7 +339,7 @@ func TestRestoreWithPermissionFailure(t *testing.T) { _ = withRestoreGlobalOptions(func() error { globalOptions.stderr = io.Discard - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0]) + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0].String()) return nil }) @@ -273,35 +402,21 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) { "meta data of intermediate directory hasn't been restore") } -func TestRestoreLocalLayout(t *testing.T) { +func TestRestoreDefaultLayout(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() - var tests = []struct { - filename string - layout string - }{ - {"repo-layout-default.tar.gz", ""}, - {"repo-layout-s3legacy.tar.gz", ""}, - {"repo-layout-default.tar.gz", "default"}, - {"repo-layout-s3legacy.tar.gz", "s3legacy"}, - } - - for _, test := range tests { - datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename) + datafile := filepath.Join("..", "..", "internal", "backend", "testdata", "repo-layout-default.tar.gz") - rtest.SetupTarTestFixture(t, env.base, datafile) - - env.gopts.extended["local.layout"] = test.layout + rtest.SetupTarTestFixture(t, env.base, datafile) - // check the repo - testRunCheck(t, env.gopts) + // check the repo + testRunCheck(t, env.gopts) - // restore latest snapshot - target := filepath.Join(env.base, "restore") - testRunRestoreLatest(t, env.gopts, target, nil, nil) + // restore latest snapshot + target := filepath.Join(env.base, "restore") + testRunRestoreLatest(t, env.gopts, target, nil, nil) - rtest.RemoveAll(t, filepath.Join(env.base, "repo")) - rtest.RemoveAll(t, target) - } + rtest.RemoveAll(t, filepath.Join(env.base, "repo")) + rtest.RemoveAll(t, target) } diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 8dad492afb4..707f8af9baa 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -2,15 +2,14 @@ package main import ( "context" - "fmt" "time" "github.com/spf13/cobra" "golang.org/x/sync/errgroup" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/walker" @@ -39,8 +38,13 @@ use the "prune" command. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runRewrite(cmd.Context(), rewriteOptions, globalOptions, args) @@ -84,7 +88,7 @@ type RewriteOptions struct { Metadata snapshotMetadataArgs restic.SnapshotFilter - excludePatternOptions + filter.ExcludePatternOptions } var rewriteOptions RewriteOptions @@ -99,7 +103,7 @@ func init() { f.StringVar(&rewriteOptions.Metadata.Time, "new-time", "", "replace time of the backup") initMultiSnapshotFilter(f, &rewriteOptions.SnapshotFilter, true) - initExcludePatternOptions(f, &rewriteOptions.excludePatternOptions) + rewriteOptions.ExcludePatternOptions.Add(f) } type rewriteFilterFunc func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) @@ -109,7 +113,7 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti return false, errors.Errorf("snapshot %v has nil tree", sn.ID().Str()) } - rejectByNameFuncs, err := opts.excludePatternOptions.CollectPatterns() + rejectByNameFuncs, err := opts.ExcludePatternOptions.CollectPatterns(Warnf) if err != nil { return false, err } @@ -132,22 +136,31 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti return true } - rewriter := walker.NewTreeRewriter(walker.RewriteOpts{ - RewriteNode: func(node *restic.Node, path string) *restic.Node { - if selectByName(path) { - return node - } - Verbosef(fmt.Sprintf("excluding %s\n", path)) - return nil - }, - DisableNodeCache: true, - }) + rewriteNode := func(node *restic.Node, path string) *restic.Node { + if selectByName(path) { + return node + } + Verbosef("excluding %s\n", path) + return nil + } + + rewriter, querySize := walker.NewSnapshotSizeRewriter(rewriteNode) filter = func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) { - return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree) + id, err := rewriter.RewriteTree(ctx, repo, "/", *sn.Tree) + if err != nil { + return restic.ID{}, err + } + ss := querySize() + if sn.Summary != nil { + sn.Summary.TotalFilesProcessed = ss.FileCount + sn.Summary.TotalBytesProcessed = ss.FileSize + } + return id, err } + } else { - filter = func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) { + filter = func(_ context.Context, sn *restic.Snapshot) (restic.ID, error) { return *sn.Tree, nil } } @@ -181,8 +194,7 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r if dryRun { Verbosef("would delete empty snapshot\n") } else { - h := backend.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()} - if err = repo.Backend().Remove(ctx, h); err != nil { + if err = repo.RemoveUnpacked(ctx, restic.WriteableSnapshotFile, *sn.ID()); err != nil { return false, err } debug.Log("removed empty snapshot %v", sn.ID()) @@ -241,8 +253,7 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r Verbosef("saved new snapshot %v\n", id.Str()) if forget { - h := backend.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()} - if err = repo.Backend().Remove(ctx, h); err != nil { + if err = repo.RemoveUnpacked(ctx, restic.WriteableSnapshotFile, *sn.ID()); err != nil { return false, err } debug.Log("removed old snapshot %v", sn.ID()) @@ -252,31 +263,26 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r } func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, args []string) error { - if opts.excludePatternOptions.Empty() && opts.Metadata.empty() { + if opts.ExcludePatternOptions.Empty() && opts.Metadata.empty() { return errors.Fatal("Nothing to do: no excludes provided and no new metadata provided") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } + var ( + repo *repository.Repository + unlock func() + err error + ) - if !opts.DryRun { - var lock *restic.Lock - var err error - if opts.Forget { - Verbosef("create exclusive lock for repository\n") - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - } else { - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - } - defer unlockRepo(lock) - if err != nil { - return err - } + if opts.Forget { + Verbosef("create exclusive lock for repository\n") + ctx, repo, unlock, err = openWithExclusiveLock(ctx, gopts, opts.DryRun) } else { - repo.SetDryRun() + ctx, repo, unlock, err = openWithAppendLock(ctx, gopts, opts.DryRun) } + if err != nil { + return err + } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { @@ -299,6 +305,9 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a changedCount++ } } + if ctx.Err() != nil { + return ctx.Err() + } Verbosef("\n") if changedCount == 0 { diff --git a/cmd/restic/cmd_rewrite_integration_test.go b/cmd/restic/cmd_rewrite_integration_test.go index 532855f5776..6471d49ba21 100644 --- a/cmd/restic/cmd_rewrite_integration_test.go +++ b/cmd/restic/cmd_rewrite_integration_test.go @@ -5,13 +5,15 @@ import ( "path/filepath" "testing" + "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui" ) func testRunRewriteExclude(t testing.TB, gopts GlobalOptions, excludes []string, forget bool, metadata snapshotMetadataArgs) { opts := RewriteOptions{ - excludePatternOptions: excludePatternOptions{ + ExcludePatternOptions: filter.ExcludePatternOptions{ Excludes: excludes, }, Forget: forget, @@ -33,6 +35,24 @@ func createBasicRewriteRepo(t testing.TB, env *testEnvironment) restic.ID { return snapshotIDs[0] } +func getSnapshot(t testing.TB, snapshotID restic.ID, env *testEnvironment) *restic.Snapshot { + t.Helper() + + ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false) + rtest.OK(t, err) + defer unlock() + + snapshots, err := restic.TestLoadAllSnapshots(ctx, repo, nil) + rtest.OK(t, err) + + for _, s := range snapshots { + if *s.ID() == snapshotID { + return s + } + } + return nil +} + func TestRewrite(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() @@ -63,10 +83,21 @@ func TestRewriteReplace(t *testing.T) { defer cleanup() snapshotID := createBasicRewriteRepo(t, env) + snapshot := getSnapshot(t, snapshotID, env) + // exclude some data testRunRewriteExclude(t, env.gopts, []string{"3"}, true, snapshotMetadataArgs{Hostname: "", Time: ""}) + bytesExcluded, err := ui.ParseBytes("16K") + rtest.OK(t, err) + newSnapshotIDs := testListSnapshots(t, env.gopts, 1) rtest.Assert(t, snapshotID != newSnapshotIDs[0], "snapshot id should have changed") + + newSnapshot := getSnapshot(t, newSnapshotIDs[0], env) + + rtest.Equals(t, snapshot.Summary.TotalFilesProcessed-1, newSnapshot.Summary.TotalFilesProcessed, "snapshot file count should have changed") + rtest.Equals(t, snapshot.Summary.TotalBytesProcessed-uint64(bytesExcluded), newSnapshot.Summary.TotalBytesProcessed, "snapshot size should have changed") + // check forbids unused blobs, thus remove them first testRunPrune(t, env.gopts, PruneOptions{MaxUnused: "0"}) testRunCheck(t, env.gopts) @@ -78,8 +109,11 @@ func testRewriteMetadata(t *testing.T, metadata snapshotMetadataArgs) { createBasicRewriteRepo(t, env) testRunRewriteExclude(t, env.gopts, []string{}, true, metadata) - repo, _ := OpenRepository(context.TODO(), env.gopts) - snapshots, err := restic.TestLoadAllSnapshots(context.TODO(), repo, nil) + ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false) + rtest.OK(t, err) + defer unlock() + + snapshots, err := restic.TestLoadAllSnapshots(ctx, repo, nil) rtest.OK(t, err) rtest.Assert(t, len(snapshots) == 1, "expected one snapshot, got %v", len(snapshots)) newSnapshot := snapshots[0] diff --git a/cmd/restic/cmd_self_update.go b/cmd/restic/cmd_self_update.go index 4b86c416f41..09c86bf2ca9 100644 --- a/cmd/restic/cmd_self_update.go +++ b/cmd/restic/cmd_self_update.go @@ -24,7 +24,11 @@ files. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index e94f2ed9ba3..f935cec86c7 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/table" "github.com/spf13/cobra" ) @@ -22,8 +23,13 @@ The "snapshots" command lists all snapshots stored in the repository. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runSnapshots(cmd.Context(), snapshotOptions, globalOptions, args) @@ -58,36 +64,35 @@ func init() { } func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() var snapshots restic.Snapshots for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { snapshots = append(snapshots, sn) } + if ctx.Err() != nil { + return ctx.Err() + } snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy) if err != nil { return err } for k, list := range snapshotGroups { + if ctx.Err() != nil { + return ctx.Err() + } + if opts.Last { // This branch should be removed in the same time // that --last. - list = FilterLastestSnapshots(list, 1) + list = FilterLatestSnapshots(list, 1) } else if opts.Latest > 0 { - list = FilterLastestSnapshots(list, opts.Latest) + list = FilterLatestSnapshots(list, opts.Latest) } sort.Sort(sort.Reverse(list)) snapshotGroups[k] = list @@ -102,6 +107,10 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions } for k, list := range snapshotGroups { + if ctx.Err() != nil { + return ctx.Err() + } + if grouped { err := PrintSnapshotGroupHeader(globalOptions.stdout, k) if err != nil { @@ -130,11 +139,11 @@ func newFilterLastSnapshotsKey(sn *restic.Snapshot) filterLastSnapshotsKey { return filterLastSnapshotsKey{sn.Hostname, strings.Join(paths, "|")} } -// FilterLastestSnapshots filters a list of snapshots to only return +// FilterLatestSnapshots filters a list of snapshots to only return // the limit last entries for each hostname and path. If the snapshot // contains multiple paths, they will be joined and treated as one // item. -func FilterLastestSnapshots(list restic.Snapshots, limit int) restic.Snapshots { +func FilterLatestSnapshots(list restic.Snapshots, limit int) restic.Snapshots { // Sort the snapshots so that the newer ones are listed first sort.SliceStable(list, func(i, j int) bool { return list[i].Time.After(list[j].Time) @@ -163,6 +172,11 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke keepReasons[*id] = reasons[i] } } + // check if any snapshot contains a summary + hasSize := false + for _, sn := range list { + hasSize = hasSize || (sn.Summary != nil) + } // always sort the snapshots so that the newer ones are listed last sort.SliceStable(list, func(i, j int) bool { @@ -189,6 +203,9 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke tab.AddColumn("Time", "{{ .Timestamp }}") tab.AddColumn("Host", "{{ .Hostname }}") tab.AddColumn("Tags ", `{{ join .Tags "\n" }}`) + if hasSize { + tab.AddColumn("Size", `{{ .Size }}`) + } } else { tab.AddColumn("ID", "{{ .ID }}") tab.AddColumn("Time", "{{ .Timestamp }}") @@ -198,6 +215,9 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke tab.AddColumn("Reasons", `{{ join .Reasons "\n" }}`) } tab.AddColumn("Paths", `{{ join .Paths "\n" }}`) + if hasSize { + tab.AddColumn("Size", `{{ .Size }}`) + } } type snapshot struct { @@ -207,6 +227,7 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke Tags []string Reasons []string Paths []string + Size string } var multiline bool @@ -228,6 +249,10 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke multiline = true } + if sn.Summary != nil { + data.Size = ui.FormatBytes(sn.Summary.TotalBytesProcessed) + } + tab.AddRow(data) } @@ -271,7 +296,9 @@ func PrintSnapshotGroupHeader(stdout io.Writer, groupKeyJSON string) error { } // Info - fmt.Fprintf(stdout, "snapshots") + if _, err := fmt.Fprintf(stdout, "snapshots"); err != nil { + return err + } var infoStrings []string if key.Hostname != "" { infoStrings = append(infoStrings, "host ["+key.Hostname+"]") @@ -283,11 +310,13 @@ func PrintSnapshotGroupHeader(stdout io.Writer, groupKeyJSON string) error { infoStrings = append(infoStrings, "paths ["+strings.Join(key.Paths, ", ")+"]") } if infoStrings != nil { - fmt.Fprintf(stdout, " for (%s)", strings.Join(infoStrings, ", ")) + if _, err := fmt.Fprintf(stdout, " for (%s)", strings.Join(infoStrings, ", ")); err != nil { + return err + } } - fmt.Fprintf(stdout, ":\n") + _, err = fmt.Fprintf(stdout, ":\n") - return nil + return err } // Snapshot helps to print Snapshots as JSON with their ID included. @@ -298,13 +327,13 @@ type Snapshot struct { ShortID string `json:"short_id"` } -// SnapshotGroup helps to print SnaphotGroups as JSON with their GroupReasons included. +// SnapshotGroup helps to print SnapshotGroups as JSON with their GroupReasons included. type SnapshotGroup struct { GroupKey restic.SnapshotGroupKey `json:"group_key"` Snapshots []Snapshot `json:"snapshots"` } -// printSnapshotsJSON writes the JSON representation of list to stdout. +// printSnapshotGroupJSON writes the JSON representation of list to stdout. func printSnapshotGroupJSON(stdout io.Writer, snGroups map[string]restic.Snapshots, grouped bool) error { if grouped { snapshotGroups := []SnapshotGroup{} diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 1bece21d0af..e0b60a29ebf 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -2,6 +2,7 @@ package main import ( "context" + "crypto/sha256" "encoding/json" "fmt" "path/filepath" @@ -16,7 +17,6 @@ import ( "github.com/restic/restic/internal/ui/table" "github.com/restic/restic/internal/walker" - "github.com/minio/sha256-simd" "github.com/spf13/cobra" ) @@ -38,7 +38,7 @@ depending on what you are trying to calculate. The modes are: * restore-size: (default) Counts the size of the restored files. -* files-by-contents: Counts total size of files, where a file is +* files-by-contents: Counts total size of unique files, where a file is considered unique if it has unique contents. * raw-data: Counts the size of blobs in the repository, regardless of how many files reference them. @@ -49,8 +49,13 @@ Refer to the online manual for more details about each mode. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runStats(cmd.Context(), statsOptions, globalOptions, args) @@ -67,10 +72,20 @@ type StatsOptions struct { var statsOptions StatsOptions +func must(err error) { + if err != nil { + panic(fmt.Sprintf("error during setup: %v", err)) + } +} + func init() { cmdRoot.AddCommand(cmdStats) f := cmdStats.Flags() f.StringVar(&statsOptions.countMode, "mode", countModeRestoreSize, "counting mode: restore-size (default), files-by-contents, blobs-per-file or raw-data") + must(cmdStats.RegisterFlagCompletionFunc("mode", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return []string{countModeRestoreSize, countModeUniqueFilesByContents, countModeBlobsPerFile, countModeRawData}, cobra.ShellCompDirectiveDefault + })) + initMultiSnapshotFilter(f, &statsOptions.SnapshotFilter, true) } @@ -80,19 +95,11 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args return err } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { @@ -125,15 +132,14 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args return fmt.Errorf("error walking snapshot: %v", err) } } - - if err != nil { - return err + if ctx.Err() != nil { + return ctx.Err() } if opts.countMode == countModeRawData { // the blob handles have been collected, but not yet counted for blobHandle := range stats.blobs { - pbs := repo.Index().Lookup(blobHandle) + pbs := repo.LookupBlob(blobHandle.Type, blobHandle.ID) if len(pbs) == 0 { return fmt.Errorf("blob %v not found", blobHandle) } @@ -247,7 +253,7 @@ func statsWalkTree(repo restic.Loader, opts StatsOptions, stats *statsContainer, } if _, ok := stats.fileBlobs[nodePath][blobID]; !ok { // is always a data blob since we're accessing it via a file's Content array - blobSize, found := repo.LookupBlobSize(blobID, restic.DataBlob) + blobSize, found := repo.LookupBlobSize(restic.DataBlob, blobID) if !found { return fmt.Errorf("blob %s not found for tree %s", blobID, parentTreeID) } @@ -270,11 +276,14 @@ func statsWalkTree(repo restic.Loader, opts StatsOptions, stats *statsContainer, // will still be restored stats.TotalFileCount++ - // if inodes are present, only count each inode once - // (hard links do not increase restore size) - if !hardLinkIndex.Has(node.Inode, node.DeviceID) || node.Inode == 0 { - hardLinkIndex.Add(node.Inode, node.DeviceID, struct{}{}) + if node.Links == 1 || node.Type == restic.NodeTypeDir { stats.TotalSize += node.Size + } else { + // if hardlinks are present only count each deviceID+inode once + if !hardLinkIndex.Has(node.Inode, node.DeviceID) || node.Inode == 0 { + hardLinkIndex.Add(node.Inode, node.DeviceID, struct{}{}) + stats.TotalSize += node.Size + } } } @@ -357,7 +366,10 @@ func statsDebug(ctx context.Context, repo restic.Repository) error { Warnf("File Type: %v\n%v\n", t, hist) } - hist := statsDebugBlobs(ctx, repo) + hist, err := statsDebugBlobs(ctx, repo) + if err != nil { + return err + } for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { Warnf("Blob Type: %v\n%v\n\n", t, hist[t]) } @@ -367,7 +379,7 @@ func statsDebug(ctx context.Context, repo restic.Repository) error { func statsDebugFileType(ctx context.Context, repo restic.Lister, tpe restic.FileType) (*sizeHistogram, error) { hist := newSizeHistogram(2 * repository.MaxPackSize) - err := repo.List(ctx, tpe, func(id restic.ID, size int64) error { + err := repo.List(ctx, tpe, func(_ restic.ID, size int64) error { hist.Add(uint64(size)) return nil }) @@ -375,17 +387,17 @@ func statsDebugFileType(ctx context.Context, repo restic.Lister, tpe restic.File return hist, err } -func statsDebugBlobs(ctx context.Context, repo restic.Repository) [restic.NumBlobTypes]*sizeHistogram { +func statsDebugBlobs(ctx context.Context, repo restic.Repository) ([restic.NumBlobTypes]*sizeHistogram, error) { var hist [restic.NumBlobTypes]*sizeHistogram for i := 0; i < len(hist); i++ { hist[i] = newSizeHistogram(2 * chunker.MaxSize) } - repo.Index().Each(ctx, func(pb restic.PackedBlob) { + err := repo.ListBlobs(ctx, func(pb restic.PackedBlob) { hist[pb.Type].Add(uint64(pb.Length)) }) - return hist + return hist, err } type sizeClass struct { diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index 01f3ad8afef..f71e2556c45 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -5,11 +5,12 @@ import ( "github.com/spf13/cobra" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui" + "github.com/restic/restic/internal/ui/termstatus" ) var cmdTag = &cobra.Command{ @@ -26,11 +27,18 @@ When no snapshotID is given, all snapshots matching the host, tag and path filte EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return runTag(cmd.Context(), tagOptions, globalOptions, args) + term, cancel := setupTermstatus() + defer cancel() + return runTag(cmd.Context(), tagOptions, globalOptions, term, args) }, } @@ -54,7 +62,18 @@ func init() { initMultiSnapshotFilter(tagFlags, &tagOptions.SnapshotFilter, true) } -func changeTags(ctx context.Context, repo *repository.Repository, sn *restic.Snapshot, setTags, addTags, removeTags []string) (bool, error) { +type changedSnapshot struct { + MessageType string `json:"message_type"` // changed + OldSnapshotID restic.ID `json:"old_snapshot_id"` + NewSnapshotID restic.ID `json:"new_snapshot_id"` +} + +type changedSnapshotsSummary struct { + MessageType string `json:"message_type"` // summary + ChangedSnapshots int `json:"changed_snapshots"` +} + +func changeTags(ctx context.Context, repo *repository.Repository, sn *restic.Snapshot, setTags, addTags, removeTags []string, printFunc func(changedSnapshot)) (bool, error) { var changed bool if len(setTags) != 0 { @@ -83,20 +102,21 @@ func changeTags(ctx context.Context, repo *repository.Repository, sn *restic.Sna return false, err } - debug.Log("new snapshot saved as %v", id) + debug.Log("old snapshot %v saved as a new snapshot %v", sn.ID(), id) // Remove the old snapshot. - h := backend.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()} - if err = repo.Backend().Remove(ctx, h); err != nil { + if err = repo.RemoveUnpacked(ctx, restic.WriteableSnapshotFile, *sn.ID()); err != nil { return false, err } debug.Log("old snapshot %v removed", sn.ID()) + + printFunc(changedSnapshot{MessageType: "changed", OldSnapshotID: *sn.ID(), NewSnapshotID: id}) } return changed, nil } -func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []string) error { +func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { if len(opts.SetTags) == 0 && len(opts.AddTags) == 0 && len(opts.RemoveTags) == 0 { return errors.Fatal("nothing to do!") } @@ -104,36 +124,51 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st return errors.Fatal("--set and --add/--remove cannot be given at the same time") } - repo, err := OpenRepository(ctx, gopts) + Verbosef("create exclusive lock for repository\n") + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() - if !gopts.NoLock { - Verbosef("create exclusive lock for repository\n") - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err + printFunc := func(c changedSnapshot) { + Verboseff("old snapshot ID: %v -> new snapshot ID: %v\n", c.OldSnapshotID, c.NewSnapshotID) + } + + summary := changedSnapshotsSummary{MessageType: "summary", ChangedSnapshots: 0} + printSummary := func(c changedSnapshotsSummary) { + if c.ChangedSnapshots == 0 { + Verbosef("no snapshots were modified\n") + } else { + Verbosef("modified %v snapshots\n", c.ChangedSnapshots) + } + } + + if gopts.JSON { + printFunc = func(c changedSnapshot) { + term.Print(ui.ToJSONString(c)) + } + printSummary = func(c changedSnapshotsSummary) { + term.Print(ui.ToJSONString(c)) } } - changeCnt := 0 for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { - changed, err := changeTags(ctx, repo, sn, opts.SetTags.Flatten(), opts.AddTags.Flatten(), opts.RemoveTags.Flatten()) + changed, err := changeTags(ctx, repo, sn, opts.SetTags.Flatten(), opts.AddTags.Flatten(), opts.RemoveTags.Flatten(), printFunc) if err != nil { Warnf("unable to modify the tags for snapshot ID %q, ignoring: %v\n", sn.ID(), err) continue } if changed { - changeCnt++ + summary.ChangedSnapshots++ } } - if changeCnt == 0 { - Verbosef("no snapshots were modified\n") - } else { - Verbosef("modified tags on %v snapshots\n", changeCnt) + + if ctx.Err() != nil { + return ctx.Err() } + + printSummary(summary) + return nil } diff --git a/cmd/restic/cmd_tag_integration_test.go b/cmd/restic/cmd_tag_integration_test.go index 3b902c51e2b..53360ca84a6 100644 --- a/cmd/restic/cmd_tag_integration_test.go +++ b/cmd/restic/cmd_tag_integration_test.go @@ -9,9 +9,10 @@ import ( ) func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) { - rtest.OK(t, runTag(context.TODO(), opts, gopts, []string{})) + rtest.OK(t, runTag(context.TODO(), opts, gopts, nil, []string{})) } +// nolint: staticcheck // false positive nil pointer dereference check func TestTag(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() diff --git a/cmd/restic/cmd_unlock.go b/cmd/restic/cmd_unlock.go index 7b449d949da..825eb815c10 100644 --- a/cmd/restic/cmd_unlock.go +++ b/cmd/restic/cmd_unlock.go @@ -3,7 +3,7 @@ package main import ( "context" - "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/repository" "github.com/spf13/cobra" ) @@ -16,10 +16,12 @@ The "unlock" command removes stale locks that have been created by other restic EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { return runUnlock(cmd.Context(), unlockOptions, globalOptions) }, } @@ -43,9 +45,9 @@ func runUnlock(ctx context.Context, opts UnlockOptions, gopts GlobalOptions) err return err } - fn := restic.RemoveStaleLocks + fn := repository.RemoveStaleLocks if opts.RemoveAll { - fn = restic.RemoveAllLocks + fn = repository.RemoveAllLocks } processed, err := fn(ctx, repo) diff --git a/cmd/restic/cmd_version.go b/cmd/restic/cmd_version.go index 73469750f75..daf984a8532 100644 --- a/cmd/restic/cmd_version.go +++ b/cmd/restic/cmd_version.go @@ -18,23 +18,26 @@ and the version of this software. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, _ []string) { if globalOptions.JSON { type jsonVersion struct { - Version string `json:"version"` - GoVersion string `json:"go_version"` - GoOS string `json:"go_os"` - GoArch string `json:"go_arch"` + MessageType string `json:"message_type"` // version + Version string `json:"version"` + GoVersion string `json:"go_version"` + GoOS string `json:"go_os"` + GoArch string `json:"go_arch"` } jsonS := jsonVersion{ - Version: version, - GoVersion: runtime.Version(), - GoOS: runtime.GOOS, - GoArch: runtime.GOARCH, + MessageType: "version", + Version: version, + GoVersion: runtime.Version(), + GoOS: runtime.GOOS, + GoArch: runtime.GOARCH, } err := json.NewEncoder(globalOptions.stdout).Encode(jsonS) diff --git a/cmd/restic/delete.go b/cmd/restic/delete.go deleted file mode 100644 index c3a7e039dd7..00000000000 --- a/cmd/restic/delete.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "context" - - "github.com/restic/restic/internal/restic" -) - -// DeleteFiles deletes the given fileList of fileType in parallel -// it will print a warning if there is an error, but continue deleting the remaining files -func DeleteFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) { - _ = deleteFiles(ctx, gopts, true, repo, fileList, fileType) -} - -// DeleteFilesChecked deletes the given fileList of fileType in parallel -// if an error occurs, it will cancel and return this error -func DeleteFilesChecked(ctx context.Context, gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error { - return deleteFiles(ctx, gopts, false, repo, fileList, fileType) -} - -// deleteFiles deletes the given fileList of fileType in parallel -// if ignoreError=true, it will print a warning if there was an error, else it will abort. -func deleteFiles(ctx context.Context, gopts GlobalOptions, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error { - bar := newProgressMax(!gopts.JSON && !gopts.Quiet, 0, "files deleted") - defer bar.Done() - - return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error { - if err != nil { - if !gopts.JSON { - Warnf("unable to remove %v/%v from the repository\n", fileType, id) - } - if !ignoreError { - return err - } - } - if !gopts.JSON && gopts.verbosity > 2 { - Verbosef("removed %v/%v\n", fileType, id) - } - return nil - }, bar) -} diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go index 09594461096..1c05f4abba8 100644 --- a/cmd/restic/exclude.go +++ b/cmd/restic/exclude.go @@ -1,353 +1,22 @@ package main import ( - "bufio" - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - + "github.com/restic/restic/internal/archiver" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/textfile" - "github.com/restic/restic/internal/ui" - "github.com/spf13/pflag" ) -type rejectionCache struct { - m map[string]bool - mtx sync.Mutex -} - -// Lock locks the mutex in rc. -func (rc *rejectionCache) Lock() { - if rc != nil { - rc.mtx.Lock() - } -} - -// Unlock unlocks the mutex in rc. -func (rc *rejectionCache) Unlock() { - if rc != nil { - rc.mtx.Unlock() - } -} - -// Get returns the last stored value for dir and a second boolean that -// indicates whether that value was actually written to the cache. It is the -// callers responsibility to call rc.Lock and rc.Unlock before using this -// method, otherwise data races may occur. -func (rc *rejectionCache) Get(dir string) (bool, bool) { - if rc == nil || rc.m == nil { - return false, false - } - v, ok := rc.m[dir] - return v, ok -} - -// Store stores a new value for dir. It is the callers responsibility to call -// rc.Lock and rc.Unlock before using this method, otherwise data races may -// occur. -func (rc *rejectionCache) Store(dir string, rejected bool) { - if rc == nil { - return - } - if rc.m == nil { - rc.m = make(map[string]bool) - } - rc.m[dir] = rejected -} - -// RejectByNameFunc is a function that takes a filename of a -// file that would be included in the backup. The function returns true if it -// should be excluded (rejected) from the backup. -type RejectByNameFunc func(path string) bool - -// RejectFunc is a function that takes a filename and os.FileInfo of a -// file that would be included in the backup. The function returns true if it -// should be excluded (rejected) from the backup. -type RejectFunc func(path string, fi os.FileInfo) bool - -// rejectByPattern returns a RejectByNameFunc which rejects files that match -// one of the patterns. -func rejectByPattern(patterns []string) RejectByNameFunc { - parsedPatterns := filter.ParsePatterns(patterns) - return func(item string) bool { - matched, err := filter.List(parsedPatterns, item) - if err != nil { - Warnf("error for exclude pattern: %v", err) - } - - if matched { - debug.Log("path %q excluded by an exclude pattern", item) - return true - } - - return false - } -} - -// Same as `rejectByPattern` but case insensitive. -func rejectByInsensitivePattern(patterns []string) RejectByNameFunc { - for index, path := range patterns { - patterns[index] = strings.ToLower(path) - } - - rejFunc := rejectByPattern(patterns) - return func(item string) bool { - return rejFunc(strings.ToLower(item)) - } -} - -// rejectIfPresent returns a RejectByNameFunc which itself returns whether a path -// should be excluded. The RejectByNameFunc considers a file to be excluded when -// it resides in a directory with an exclusion file, that is specified by -// excludeFileSpec in the form "filename[:content]". The returned error is -// non-nil if the filename component of excludeFileSpec is empty. If rc is -// non-nil, it is going to be used in the RejectByNameFunc to expedite the evaluation -// of a directory based on previous visits. -func rejectIfPresent(excludeFileSpec string) (RejectByNameFunc, error) { - if excludeFileSpec == "" { - return nil, errors.New("name for exclusion tagfile is empty") - } - colon := strings.Index(excludeFileSpec, ":") - if colon == 0 { - return nil, fmt.Errorf("no name for exclusion tagfile provided") - } - tf, tc := "", "" - if colon > 0 { - tf = excludeFileSpec[:colon] - tc = excludeFileSpec[colon+1:] - } else { - tf = excludeFileSpec - } - debug.Log("using %q as exclusion tagfile", tf) - rc := &rejectionCache{} - fn := func(filename string) bool { - return isExcludedByFile(filename, tf, tc, rc) - } - return fn, nil -} - -// isExcludedByFile interprets filename as a path and returns true if that file -// is in an excluded directory. A directory is identified as excluded if it contains a -// tagfile which bears the name specified in tagFilename and starts with -// header. If rc is non-nil, it is used to expedite the evaluation of a -// directory based on previous visits. -func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache) bool { - if tagFilename == "" { - return false - } - dir, base := filepath.Split(filename) - if base == tagFilename { - return false // do not exclude the tagfile itself - } - rc.Lock() - defer rc.Unlock() - - rejected, visited := rc.Get(dir) - if visited { - return rejected - } - rejected = isDirExcludedByFile(dir, tagFilename, header) - rc.Store(dir, rejected) - return rejected -} - -func isDirExcludedByFile(dir, tagFilename, header string) bool { - tf := filepath.Join(dir, tagFilename) - _, err := fs.Lstat(tf) - if os.IsNotExist(err) { - return false - } - if err != nil { - Warnf("could not access exclusion tagfile: %v", err) - return false - } - // when no signature is given, the mere presence of tf is enough reason - // to exclude filename - if len(header) == 0 { - return true - } - // From this stage, errors mean tagFilename exists but it is malformed. - // Warnings will be generated so that the user is informed that the - // indented ignore-action is not performed. - f, err := os.Open(tf) - if err != nil { - Warnf("could not open exclusion tagfile: %v", err) - return false - } - defer func() { - _ = f.Close() - }() - buf := make([]byte, len(header)) - _, err = io.ReadFull(f, buf) - // EOF is handled with a dedicated message, otherwise the warning were too cryptic - if err == io.EOF { - Warnf("invalid (too short) signature in exclusion tagfile %q\n", tf) - return false - } - if err != nil { - Warnf("could not read signature from exclusion tagfile %q: %v\n", tf, err) - return false - } - if !bytes.Equal(buf, []byte(header)) { - Warnf("invalid signature in exclusion tagfile %q\n", tf) - return false - } - return true -} - -// DeviceMap is used to track allowed source devices for backup. This is used to -// check for crossing mount points during backup (for --one-file-system). It -// maps the name of a source path to its device ID. -type DeviceMap map[string]uint64 - -// NewDeviceMap creates a new device map from the list of source paths. -func NewDeviceMap(allowedSourcePaths []string) (DeviceMap, error) { - deviceMap := make(map[string]uint64) - - for _, item := range allowedSourcePaths { - item, err := filepath.Abs(filepath.Clean(item)) - if err != nil { - return nil, err - } - - fi, err := fs.Lstat(item) - if err != nil { - return nil, err - } - - id, err := fs.DeviceID(fi) - if err != nil { - return nil, err - } - - deviceMap[item] = id - } - - if len(deviceMap) == 0 { - return nil, errors.New("zero allowed devices") - } - - return deviceMap, nil -} - -// IsAllowed returns true if the path is located on an allowed device. -func (m DeviceMap) IsAllowed(item string, deviceID uint64) (bool, error) { - for dir := item; ; dir = filepath.Dir(dir) { - debug.Log("item %v, test dir %v", item, dir) - - // find a parent directory that is on an allowed device (otherwise - // we would not traverse the directory at all) - allowedID, ok := m[dir] - if !ok { - if dir == filepath.Dir(dir) { - // arrived at root, no allowed device found. this should not happen. - break - } - continue - } - - // if the item has a different device ID than the parent directory, - // we crossed a file system boundary - if allowedID != deviceID { - debug.Log("item %v (dir %v) on disallowed device %d", item, dir, deviceID) - return false, nil - } - - // item is on allowed device, accept it - debug.Log("item %v allowed", item) - return true, nil - } - - return false, fmt.Errorf("item %v (device ID %v) not found, deviceMap: %v", item, deviceID, m) -} - -// rejectByDevice returns a RejectFunc that rejects files which are on a -// different file systems than the files/dirs in samples. -func rejectByDevice(samples []string) (RejectFunc, error) { - deviceMap, err := NewDeviceMap(samples) - if err != nil { - return nil, err - } - debug.Log("allowed devices: %v\n", deviceMap) - - return func(item string, fi os.FileInfo) bool { - id, err := fs.DeviceID(fi) - if err != nil { - // This should never happen because gatherDevices() would have - // errored out earlier. If it still does that's a reason to panic. - panic(err) - } - - allowed, err := deviceMap.IsAllowed(filepath.Clean(item), id) - if err != nil { - // this should not happen - panic(fmt.Sprintf("error checking device ID of %v: %v", item, err)) - } - - if allowed { - // accept item - return false - } - - // reject everything except directories - if !fi.IsDir() { - return true - } - - // special case: make sure we keep mountpoints (directories which - // contain a mounted file system). Test this by checking if the parent - // directory would be included. - parentDir := filepath.Dir(filepath.Clean(item)) - - parentFI, err := fs.Lstat(parentDir) - if err != nil { - debug.Log("item %v: error running lstat() on parent directory: %v", item, err) - // if in doubt, reject - return true - } - - parentDeviceID, err := fs.DeviceID(parentFI) - if err != nil { - debug.Log("item %v: getting device ID of parent directory: %v", item, err) - // if in doubt, reject - return true - } - - parentAllowed, err := deviceMap.IsAllowed(parentDir, parentDeviceID) - if err != nil { - debug.Log("item %v: error checking parent directory: %v", item, err) - // if in doubt, reject - return true - } - - if parentAllowed { - // we found a mount point, so accept the directory - return false - } - - // reject everything else - return true - }, nil -} - // rejectResticCache returns a RejectByNameFunc that rejects the restic cache // directory (if set). -func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) { - if repo.Cache == nil { +func rejectResticCache(repo *repository.Repository) (archiver.RejectByNameFunc, error) { + if repo.Cache() == nil { return func(string) bool { return false }, nil } - cacheBase := repo.Cache.BaseDir() + cacheBase := repo.Cache().BaseDir() if cacheBase == "" { return nil, errors.New("cacheBase is empty string") @@ -362,137 +31,3 @@ func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) { return false }, nil } - -func rejectBySize(maxSizeStr string) (RejectFunc, error) { - maxSize, err := ui.ParseBytes(maxSizeStr) - if err != nil { - return nil, err - } - - return func(item string, fi os.FileInfo) bool { - // directory will be ignored - if fi.IsDir() { - return false - } - - filesize := fi.Size() - if filesize > maxSize { - debug.Log("file %s is oversize: %d", item, filesize) - return true - } - - return false - }, nil -} - -// readExcludePatternsFromFiles reads all exclude files and returns the list of -// exclude patterns. For each line, leading and trailing white space is removed -// and comment lines are ignored. For each remaining pattern, environment -// variables are resolved. For adding a literal dollar sign ($), write $$ to -// the file. -func readExcludePatternsFromFiles(excludeFiles []string) ([]string, error) { - getenvOrDollar := func(s string) string { - if s == "$" { - return "$" - } - return os.Getenv(s) - } - - var excludes []string - for _, filename := range excludeFiles { - err := func() (err error) { - data, err := textfile.Read(filename) - if err != nil { - return err - } - - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - - // ignore empty lines - if line == "" { - continue - } - - // strip comments - if strings.HasPrefix(line, "#") { - continue - } - - line = os.Expand(line, getenvOrDollar) - excludes = append(excludes, line) - } - return scanner.Err() - }() - if err != nil { - return nil, err - } - } - return excludes, nil -} - -type excludePatternOptions struct { - Excludes []string - InsensitiveExcludes []string - ExcludeFiles []string - InsensitiveExcludeFiles []string -} - -func initExcludePatternOptions(f *pflag.FlagSet, opts *excludePatternOptions) { - f.StringArrayVarP(&opts.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)") - f.StringArrayVar(&opts.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames") - f.StringArrayVar(&opts.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)") - f.StringArrayVar(&opts.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns") -} - -func (opts *excludePatternOptions) Empty() bool { - return len(opts.Excludes) == 0 && len(opts.InsensitiveExcludes) == 0 && len(opts.ExcludeFiles) == 0 && len(opts.InsensitiveExcludeFiles) == 0 -} - -func (opts excludePatternOptions) CollectPatterns() ([]RejectByNameFunc, error) { - var fs []RejectByNameFunc - // add patterns from file - if len(opts.ExcludeFiles) > 0 { - excludePatterns, err := readExcludePatternsFromFiles(opts.ExcludeFiles) - if err != nil { - return nil, err - } - - if err := filter.ValidatePatterns(excludePatterns); err != nil { - return nil, errors.Fatalf("--exclude-file: %s", err) - } - - opts.Excludes = append(opts.Excludes, excludePatterns...) - } - - if len(opts.InsensitiveExcludeFiles) > 0 { - excludes, err := readExcludePatternsFromFiles(opts.InsensitiveExcludeFiles) - if err != nil { - return nil, err - } - - if err := filter.ValidatePatterns(excludes); err != nil { - return nil, errors.Fatalf("--iexclude-file: %s", err) - } - - opts.InsensitiveExcludes = append(opts.InsensitiveExcludes, excludes...) - } - - if len(opts.InsensitiveExcludes) > 0 { - if err := filter.ValidatePatterns(opts.InsensitiveExcludes); err != nil { - return nil, errors.Fatalf("--iexclude: %s", err) - } - - fs = append(fs, rejectByInsensitivePattern(opts.InsensitiveExcludes)) - } - - if len(opts.Excludes) > 0 { - if err := filter.ValidatePatterns(opts.Excludes); err != nil { - return nil, errors.Fatalf("--exclude: %s", err) - } - - fs = append(fs, rejectByPattern(opts.Excludes)) - } - return fs, nil -} diff --git a/cmd/restic/find.go b/cmd/restic/find.go index a990b458d6f..faf7024e108 100644 --- a/cmd/restic/find.go +++ b/cmd/restic/find.go @@ -2,6 +2,7 @@ package main import ( "context" + "os" "github.com/restic/restic/internal/restic" "github.com/spf13/pflag" @@ -14,17 +15,27 @@ func initMultiSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter, if !addHostShorthand { hostShorthand = "" } - flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times)") + flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times) (default: $RESTIC_HOST)") flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]` (can be specified multiple times)") - flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times)") + flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times, snapshots must include all specified paths)") + + // set default based on env if set + if host := os.Getenv("RESTIC_HOST"); host != "" { + filt.Hosts = []string{host} + } } // initSingleSnapshotFilter is used for commands that work on a single snapshot // MUST be combined with restic.FindFilteredSnapshot func initSingleSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter) { - flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times)") + flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times) (default: $RESTIC_HOST)") flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]`, when snapshot ID \"latest\" is given (can be specified multiple times)") - flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times)") + flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times, snapshots must include all specified paths)") + + // set default based on env if set + if host := os.Getenv("RESTIC_HOST"); host != "" { + filt.Hosts = []string{host} + } } // FindFilteredSnapshots yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots. diff --git a/cmd/restic/find_test.go b/cmd/restic/find_test.go new file mode 100644 index 00000000000..a98a14f041d --- /dev/null +++ b/cmd/restic/find_test.go @@ -0,0 +1,61 @@ +package main + +import ( + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/spf13/pflag" +) + +func TestSnapshotFilter(t *testing.T) { + for _, test := range []struct { + name string + args []string + expected []string + env string + }{ + { + "no value", + []string{}, + nil, + "", + }, + { + "args only", + []string{"--host", "abc"}, + []string{"abc"}, + "", + }, + { + "env default", + []string{}, + []string{"def"}, + "def", + }, + { + "both", + []string{"--host", "abc"}, + []string{"abc"}, + "def", + }, + } { + t.Run(test.name, func(t *testing.T) { + t.Setenv("RESTIC_HOST", test.env) + + for _, mode := range []bool{false, true} { + set := pflag.NewFlagSet("test", pflag.PanicOnError) + flt := &restic.SnapshotFilter{} + if mode { + initMultiSnapshotFilter(set, flt, false) + } else { + initSingleSnapshotFilter(set, flt) + } + err := set.Parse(test.args) + rtest.OK(t, err) + + rtest.Equals(t, test.expected, flt.Hosts, "unexpected hosts") + } + }) + } +} diff --git a/cmd/restic/format.go b/cmd/restic/format.go index 063cd4e7125..e7d178b4ef5 100644 --- a/cmd/restic/format.go +++ b/cmd/restic/format.go @@ -24,20 +24,20 @@ func formatNode(path string, n *restic.Node, long bool, human bool) string { } switch n.Type { - case "file": + case restic.NodeTypeFile: mode = 0 - case "dir": + case restic.NodeTypeDir: mode = os.ModeDir - case "symlink": + case restic.NodeTypeSymlink: mode = os.ModeSymlink target = fmt.Sprintf(" -> %v", n.LinkTarget) - case "dev": + case restic.NodeTypeDev: mode = os.ModeDevice - case "chardev": + case restic.NodeTypeCharDev: mode = os.ModeDevice | os.ModeCharDevice - case "fifo": + case restic.NodeTypeFifo: mode = os.ModeNamedPipe - case "socket": + case restic.NodeTypeSocket: mode = os.ModeSocket } diff --git a/cmd/restic/format_test.go b/cmd/restic/format_test.go index 689bd27a56a..e232a200b79 100644 --- a/cmd/restic/format_test.go +++ b/cmd/restic/format_test.go @@ -19,7 +19,7 @@ func TestFormatNode(t *testing.T) { testPath := "/test/path" node := restic.Node{ Name: "baz", - Type: "file", + Type: restic.NodeTypeFile, Size: 14680064, UID: 1000, GID: 2000, diff --git a/cmd/restic/global.go b/cmd/restic/global.go index cc47496f36c..bea09837fbc 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -15,6 +15,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/azure" "github.com/restic/restic/internal/backend/b2" + "github.com/restic/restic/internal/backend/cache" "github.com/restic/restic/internal/backend/gs" "github.com/restic/restic/internal/backend/limiter" "github.com/restic/restic/internal/backend/local" @@ -27,9 +28,7 @@ import ( "github.com/restic/restic/internal/backend/sema" "github.com/restic/restic/internal/backend/sftp" "github.com/restic/restic/internal/backend/swift" - "github.com/restic/restic/internal/cache" "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/options" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" @@ -43,7 +42,11 @@ import ( "golang.org/x/term" ) -var version = "0.16.4-dev (compiled manually)" +// ErrNoRepository is used to report if opening a repsitory failed due +// to a missing backend storage location or config file +var ErrNoRepository = errors.New("repository does not exist") + +var version = "0.17.3-dev (compiled manually)" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" @@ -52,22 +55,23 @@ type backendWrapper func(r backend.Backend) (backend.Backend, error) // GlobalOptions hold all global options for restic. type GlobalOptions struct { - Repo string - RepositoryFile string - PasswordFile string - PasswordCommand string - KeyHint string - Quiet bool - Verbose int - NoLock bool - RetryLock time.Duration - JSON bool - CacheDir string - NoCache bool - CleanupCache bool - Compression repository.CompressionMode - PackSize uint - NoExtraVerify bool + Repo string + RepositoryFile string + PasswordFile string + PasswordCommand string + KeyHint string + Quiet bool + Verbose int + NoLock bool + RetryLock time.Duration + JSON bool + CacheDir string + NoCache bool + CleanupCache bool + Compression repository.CompressionMode + PackSize uint + NoExtraVerify bool + InsecureNoPassword bool backend.TransportOptions limiter.Limits @@ -96,9 +100,6 @@ var globalOptions = GlobalOptions{ stderr: os.Stderr, } -var isReadingPassword bool -var internalGlobalCtx context.Context - func init() { backends := location.NewRegistry() backends.Register(azure.NewFactory()) @@ -112,15 +113,6 @@ func init() { backends.Register(swift.NewFactory()) globalOptions.backends = backends - var cancel context.CancelFunc - internalGlobalCtx, cancel = context.WithCancel(context.Background()) - AddCleanupHandler(func(code int) (int, error) { - // Must be called before the unlock cleanup handler to ensure that the latter is - // not blocked due to limited number of backend connections, see #1434 - cancel() - return code, nil - }) - f := cmdRoot.PersistentFlags() f.StringVarP(&globalOptions.Repo, "repo", "r", "", "`repository` to backup to or restore from (default: $RESTIC_REPOSITORY)") f.StringVarP(&globalOptions.RepositoryFile, "repository-file", "", "", "`file` to read the repository location from (default: $RESTIC_REPOSITORY_FILE)") @@ -137,6 +129,7 @@ func init() { f.BoolVar(&globalOptions.NoCache, "no-cache", false, "do not use a local cache") f.StringSliceVar(&globalOptions.RootCertFilenames, "cacert", nil, "`file` to load root certificates from (default: use system certificates or $RESTIC_CACERT)") f.StringVar(&globalOptions.TLSClientCertKeyFilename, "tls-client-cert", "", "path to a `file` containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)") + f.BoolVar(&globalOptions.InsecureNoPassword, "insecure-no-password", false, "use an empty password for the repository, must be passed to every restic command (insecure)") f.BoolVar(&globalOptions.InsecureTLS, "insecure-tls", false, "skip TLS certificate verification when connecting to the repository (insecure)") f.BoolVar(&globalOptions.CleanupCache, "cleanup-cache", false, "auto remove old cache directories") f.Var(&globalOptions.Compression, "compression", "compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)") @@ -145,6 +138,8 @@ func init() { f.IntVar(&globalOptions.Limits.DownloadKb, "limit-download", 0, "limits downloads to a maximum `rate` in KiB/s. (default: unlimited)") f.UintVar(&globalOptions.PackSize, "pack-size", 0, "set target pack `size` in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE)") f.StringSliceVarP(&globalOptions.Options, "option", "o", []string{}, "set extended option (`key=value`, can be specified multiple times)") + f.StringVar(&globalOptions.HTTPUserAgent, "http-user-agent", "", "set a http user agent for outgoing http requests") + f.DurationVar(&globalOptions.StuckRequestTimeout, "stuck-request-timeout", 5*time.Minute, "`duration` after which to retry stuck requests") // Use our "generate" command instead of the cobra provided "completion" command cmdRoot.CompletionOptions.DisableDefaultCmd = true @@ -166,7 +161,9 @@ func init() { targetPackSize, _ := strconv.ParseUint(os.Getenv("RESTIC_PACK_SIZE"), 10, 32) globalOptions.PackSize = uint(targetPackSize) - restoreTerminal() + if os.Getenv("RESTIC_HTTP_USER_AGENT") != "" { + globalOptions.HTTPUserAgent = os.Getenv("RESTIC_HTTP_USER_AGENT") + } } func stdinIsTerminal() bool { @@ -191,40 +188,6 @@ func stdoutTerminalWidth() int { return w } -// restoreTerminal installs a cleanup handler that restores the previous -// terminal state on exit. This handler is only intended to restore the -// terminal configuration if restic exits after receiving a signal. A regular -// program execution must revert changes to the terminal configuration itself. -// The terminal configuration is only restored while reading a password. -func restoreTerminal() { - if !term.IsTerminal(int(os.Stdout.Fd())) { - return - } - - fd := int(os.Stdout.Fd()) - state, err := term.GetState(fd) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) - return - } - - AddCleanupHandler(func(code int) (int, error) { - // Restoring the terminal configuration while restic runs in the - // background, causes restic to get stopped on unix systems with - // a SIGTTOU signal. Thus only restore the terminal settings if - // they might have been modified, which is the case while reading - // a password. - if !isReadingPassword { - return code, nil - } - err := term.Restore(fd, state) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err) - } - return code, err - }) -} - // ClearLine creates a platform dependent string to clear the current // line, so it can be overwritten. // @@ -309,11 +272,7 @@ func resolvePassword(opts GlobalOptions, envStr string) (string, error) { return (strings.TrimSpace(string(output))), nil } if opts.PasswordFile != "" { - s, err := textfile.Read(opts.PasswordFile) - if errors.Is(err, os.ErrNotExist) { - return "", errors.Fatalf("%s does not exist", opts.PasswordFile) - } - return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile") + return loadPasswordFromFile(opts.PasswordFile) } if pwd := os.Getenv(envStr); pwd != "" { @@ -323,34 +282,81 @@ func resolvePassword(opts GlobalOptions, envStr string) (string, error) { return "", nil } +// loadPasswordFromFile loads a password from a file while stripping a BOM and +// converting the password to UTF-8. +func loadPasswordFromFile(pwdFile string) (string, error) { + s, err := textfile.Read(pwdFile) + if errors.Is(err, os.ErrNotExist) { + return "", errors.Fatalf("%s does not exist", pwdFile) + } + return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile") +} + // readPassword reads the password from the given reader directly. func readPassword(in io.Reader) (password string, err error) { sc := bufio.NewScanner(in) sc.Scan() - return sc.Text(), errors.Wrap(err, "Scan") + return sc.Text(), errors.WithStack(sc.Err()) } // readPasswordTerminal reads the password from the given reader which must be a // tty. Prompt is printed on the writer out before attempting to read the -// password. -func readPasswordTerminal(in *os.File, out io.Writer, prompt string) (password string, err error) { - fmt.Fprint(out, prompt) - isReadingPassword = true - buf, err := term.ReadPassword(int(in.Fd())) - isReadingPassword = false - fmt.Fprintln(out) +// password. If the context is canceled, the function leaks the password reading +// goroutine. +func readPasswordTerminal(ctx context.Context, in *os.File, out *os.File, prompt string) (password string, err error) { + fd := int(out.Fd()) + state, err := term.GetState(fd) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) + return "", err + } + + done := make(chan struct{}) + var buf []byte + + go func() { + defer close(done) + _, err = fmt.Fprint(out, prompt) + if err != nil { + return + } + buf, err = term.ReadPassword(int(in.Fd())) + if err != nil { + return + } + _, err = fmt.Fprintln(out) + }() + + select { + case <-ctx.Done(): + err := term.Restore(fd, state) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err) + } + return "", ctx.Err() + case <-done: + // clean shutdown, nothing to do + } + if err != nil { return "", errors.Wrap(err, "ReadPassword") } - password = string(buf) - return password, nil + return string(buf), nil } // ReadPassword reads the password from a password file, the environment -// variable RESTIC_PASSWORD or prompts the user. -func ReadPassword(opts GlobalOptions, prompt string) (string, error) { +// variable RESTIC_PASSWORD or prompts the user. If the context is canceled, +// the function leaks the password reading goroutine. +func ReadPassword(ctx context.Context, opts GlobalOptions, prompt string) (string, error) { + if opts.InsecureNoPassword { + if opts.password != "" { + return "", errors.Fatal("--insecure-no-password must not be specified together with providing a password via a cli option or environment variable") + } + return "", nil + } + if opts.password != "" { return opts.password, nil } @@ -361,7 +367,7 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) { ) if stdinIsTerminal() { - password, err = readPasswordTerminal(os.Stdin, os.Stderr, prompt) + password, err = readPasswordTerminal(ctx, os.Stdin, os.Stderr, prompt) } else { password, err = readPassword(os.Stdin) Verbosef("reading repository password from stdin\n") @@ -372,21 +378,22 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) { } if len(password) == 0 { - return "", errors.Fatal("an empty password is not a password") + return "", errors.Fatal("an empty password is not allowed by default. Pass the flag `--insecure-no-password` to restic to disable this check") } return password, nil } // ReadPasswordTwice calls ReadPassword two times and returns an error when the -// passwords don't match. -func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, error) { - pw1, err := ReadPassword(gopts, prompt1) +// passwords don't match. If the context is canceled, the function leaks the +// password reading goroutine. +func ReadPasswordTwice(ctx context.Context, gopts GlobalOptions, prompt1, prompt2 string) (string, error) { + pw1, err := ReadPassword(ctx, gopts, prompt1) if err != nil { return "", err } if stdinIsTerminal() { - pw2, err := ReadPassword(gopts, prompt2) + pw2, err := ReadPassword(ctx, gopts, prompt2) if err != nil { return "", err } @@ -438,22 +445,6 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi return nil, err } - report := func(msg string, err error, d time.Duration) { - Warnf("%v returned error, retrying after %v: %v\n", msg, d, err) - } - success := func(msg string, retries int) { - Warnf("%v operation successful after %d retries\n", msg, retries) - } - be = retry.New(be, 10, report, success) - - // wrap backend if a test specified a hook - if opts.backendTestHook != nil { - be, err = opts.backendTestHook(be) - if err != nil { - return nil, err - } - } - s, err := repository.New(be, repository.Options{ Compression: opts.Compression, PackSize: opts.PackSize * 1024 * 1024, @@ -464,12 +455,15 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi } passwordTriesLeft := 1 - if stdinIsTerminal() && opts.password == "" { + if stdinIsTerminal() && opts.password == "" && !opts.InsecureNoPassword { passwordTriesLeft = 3 } for ; passwordTriesLeft > 0; passwordTriesLeft-- { - opts.password, err = ReadPassword(opts, "enter password for repository: ") + opts.password, err = ReadPassword(ctx, opts, "enter password for repository: ") + if ctx.Err() != nil { + return nil, ctx.Err() + } if err != nil && passwordTriesLeft > 1 { opts.password = "" fmt.Printf("%s. Try again\n", err) @@ -485,7 +479,7 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi } } if err != nil { - if errors.IsFatal(err) { + if errors.IsFatal(err) || errors.Is(err, repository.ErrNoKeyFound) { return nil, err } return nil, errors.Fatalf("%s", err) @@ -539,7 +533,7 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi } for _, item := range oldCacheDirs { dir := filepath.Join(c.Base, item.Name()) - err = fs.RemoveAll(dir) + err = os.RemoveAll(dir) if err != nil { Warnf("unable to remove %v: %v\n", dir, err) } @@ -570,16 +564,13 @@ func parseConfig(loc location.Location, opts options.Options) (interface{}, erro return cfg, nil } -// Open the backend specified by a location config. -func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { +func innerOpen(ctx context.Context, s string, gopts GlobalOptions, opts options.Options, create bool) (backend.Backend, error) { debug.Log("parsing location %v", location.StripPassword(gopts.backends, s)) loc, err := location.Parse(gopts.backends, s) if err != nil { return nil, errors.Fatalf("parsing repository location failed: %v", err) } - var be backend.Backend - cfg, err := parseConfig(loc, opts) if err != nil { return nil, err @@ -599,7 +590,16 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio return nil, errors.Fatalf("invalid backend: %q", loc.Scheme) } - be, err = factory.Open(ctx, cfg, rt, lim) + var be backend.Backend + if create { + be, err = factory.Create(ctx, cfg, rt, lim) + } else { + be, err = factory.Open(ctx, cfg, rt, lim) + } + + if errors.Is(err, backend.ErrNoRepository) { + return nil, fmt.Errorf("Fatal: %w at %v: %v", ErrNoRepository, location.StripPassword(gopts.backends, s), err) + } if err != nil { return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(gopts.backends, s), err) } @@ -615,46 +615,53 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio } } - // check if config is there - fi, err := be.Stat(ctx, backend.Handle{Type: restic.ConfigFile}) - if err != nil { - return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, location.StripPassword(gopts.backends, s)) + report := func(msg string, err error, d time.Duration) { + if d >= 0 { + Warnf("%v returned error, retrying after %v: %v\n", msg, d, err) + } else { + Warnf("%v failed: %v\n", msg, err) + } } + success := func(msg string, retries int) { + Warnf("%v operation successful after %d retries\n", msg, retries) + } + be = retry.New(be, 15*time.Minute, report, success) - if fi.Size == 0 { - return nil, errors.New("config file has zero size, invalid repository?") + // wrap backend if a test specified a hook + if gopts.backendTestHook != nil { + be, err = gopts.backendTestHook(be) + if err != nil { + return nil, err + } } return be, nil } -// Create the backend specified by URI. -func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { - debug.Log("parsing location %v", location.StripPassword(gopts.backends, s)) - loc, err := location.Parse(gopts.backends, s) +// Open the backend specified by a location config. +func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { + be, err := innerOpen(ctx, s, gopts, opts, false) if err != nil { return nil, err } - cfg, err := parseConfig(loc, opts) - if err != nil { - return nil, err + // check if config is there + fi, err := be.Stat(ctx, backend.Handle{Type: restic.ConfigFile}) + if be.IsNotExist(err) { + return nil, fmt.Errorf("Fatal: %w: unable to open config file: %v\nIs there a repository at the following location?\n%v", ErrNoRepository, err, location.StripPassword(gopts.backends, s)) } - - rt, err := backend.Transport(globalOptions.TransportOptions) if err != nil { - return nil, errors.Fatal(err.Error()) + return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, location.StripPassword(gopts.backends, s)) } - factory := gopts.backends.Lookup(loc.Scheme) - if factory == nil { - return nil, errors.Fatalf("invalid backend: %q", loc.Scheme) + if fi.Size == 0 { + return nil, errors.New("config file has zero size, invalid repository?") } - be, err := factory.Create(ctx, cfg, rt, nil) - if err != nil { - return nil, err - } + return be, nil +} - return logger.New(sema.NewBackend(be)), nil +// Create the backend specified by URI. +func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { + return innerOpen(ctx, s, gopts, opts, true) } diff --git a/cmd/restic/global_debug.go b/cmd/restic/global_debug.go index b798074d10b..502b2cf6ed3 100644 --- a/cmd/restic/global_debug.go +++ b/cmd/restic/global_debug.go @@ -15,23 +15,28 @@ import ( "github.com/pkg/profile" ) -var ( - listenProfile string - memProfilePath string - cpuProfilePath string - traceProfilePath string - blockProfilePath string - insecure bool -) +type ProfileOptions struct { + listen string + memPath string + cpuPath string + tracePath string + blockPath string + insecure bool +} + +var profileOpts ProfileOptions +var prof interface { + Stop() +} func init() { f := cmdRoot.PersistentFlags() - f.StringVar(&listenProfile, "listen-profile", "", "listen on this `address:port` for memory profiling") - f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`") - f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`") - f.StringVar(&traceProfilePath, "trace-profile", "", "write trace to `dir`") - f.StringVar(&blockProfilePath, "block-profile", "", "write block profile to `dir`") - f.BoolVar(&insecure, "insecure-kdf", false, "use insecure KDF settings") + f.StringVar(&profileOpts.listen, "listen-profile", "", "listen on this `address:port` for memory profiling") + f.StringVar(&profileOpts.memPath, "mem-profile", "", "write memory profile to `dir`") + f.StringVar(&profileOpts.cpuPath, "cpu-profile", "", "write cpu profile to `dir`") + f.StringVar(&profileOpts.tracePath, "trace-profile", "", "write trace to `dir`") + f.StringVar(&profileOpts.blockPath, "block-profile", "", "write block profile to `dir`") + f.BoolVar(&profileOpts.insecure, "insecure-kdf", false, "use insecure KDF settings") } type fakeTestingTB struct{} @@ -41,10 +46,10 @@ func (fakeTestingTB) Logf(msg string, args ...interface{}) { } func runDebug() error { - if listenProfile != "" { - fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", listenProfile) + if profileOpts.listen != "" { + fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", profileOpts.listen) go func() { - err := http.ListenAndServe(listenProfile, nil) + err := http.ListenAndServe(profileOpts.listen, nil) if err != nil { fmt.Fprintf(os.Stderr, "profile HTTP server listen failed: %v\n", err) } @@ -52,16 +57,16 @@ func runDebug() error { } profilesEnabled := 0 - if memProfilePath != "" { + if profileOpts.memPath != "" { profilesEnabled++ } - if cpuProfilePath != "" { + if profileOpts.cpuPath != "" { profilesEnabled++ } - if traceProfilePath != "" { + if profileOpts.tracePath != "" { profilesEnabled++ } - if blockProfilePath != "" { + if profileOpts.blockPath != "" { profilesEnabled++ } @@ -69,30 +74,25 @@ func runDebug() error { return errors.Fatal("only one profile (memory, CPU, trace, or block) may be activated at the same time") } - var prof interface { - Stop() + if profileOpts.memPath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(profileOpts.memPath)) + } else if profileOpts.cpuPath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(profileOpts.cpuPath)) + } else if profileOpts.tracePath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(profileOpts.tracePath)) + } else if profileOpts.blockPath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(profileOpts.blockPath)) } - if memProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(memProfilePath)) - } else if cpuProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(cpuProfilePath)) - } else if traceProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(traceProfilePath)) - } else if blockProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(blockProfilePath)) - } - - if prof != nil { - AddCleanupHandler(func(code int) (int, error) { - prof.Stop() - return code, nil - }) - } - - if insecure { + if profileOpts.insecure { repository.TestUseLowSecurityKDFParameters(fakeTestingTB{}) } return nil } + +func stopDebug() { + if prof != nil { + prof.Stop() + } +} diff --git a/cmd/restic/global_release.go b/cmd/restic/global_release.go index 7cb2e6caf3c..1dab5a293ac 100644 --- a/cmd/restic/global_release.go +++ b/cmd/restic/global_release.go @@ -5,3 +5,6 @@ package main // runDebug is a noop without the debug tag. func runDebug() error { return nil } + +// stopDebug is a noop without the debug tag. +func stopDebug() {} diff --git a/cmd/restic/global_test.go b/cmd/restic/global_test.go index 4f5c29e9a77..8e97ece29d5 100644 --- a/cmd/restic/global_test.go +++ b/cmd/restic/global_test.go @@ -1,10 +1,13 @@ package main import ( + "context" "os" "path/filepath" + "strings" "testing" + "github.com/restic/restic/internal/errors" rtest "github.com/restic/restic/internal/test" ) @@ -22,6 +25,16 @@ func Test_PrintFunctionsRespectsGlobalStdout(t *testing.T) { } } +type errorReader struct{ err error } + +func (r *errorReader) Read([]byte) (int, error) { return 0, r.err } + +func TestReadPassword(t *testing.T) { + want := errors.New("foo") + _, err := readPassword(&errorReader{want}) + rtest.Assert(t, errors.Is(err, want), "wrong error %v", err) +} + func TestReadRepo(t *testing.T) { tempDir := rtest.TempDir(t) @@ -50,3 +63,14 @@ func TestReadRepo(t *testing.T) { t.Fatal("must not read repository path from invalid file path") } } + +func TestReadEmptyPassword(t *testing.T) { + opts := GlobalOptions{InsecureNoPassword: true} + password, err := ReadPassword(context.TODO(), opts, "test") + rtest.OK(t, err) + rtest.Equals(t, "", password, "got unexpected password") + + opts.password = "invalid" + _, err = ReadPassword(context.TODO(), opts, "test") + rtest.Assert(t, strings.Contains(err.Error(), "must not be specified together with providing a password via a cli option or environment variable"), "unexpected error message, got %v", err) +} diff --git a/cmd/restic/integration_filter_pattern_test.go b/cmd/restic/integration_filter_pattern_test.go index 2eacdeea98b..46badbe4fa2 100644 --- a/cmd/restic/integration_filter_pattern_test.go +++ b/cmd/restic/integration_filter_pattern_test.go @@ -5,6 +5,7 @@ import ( "path/filepath" "testing" + "github.com/restic/restic/internal/filter" rtest "github.com/restic/restic/internal/test" ) @@ -17,14 +18,14 @@ func TestBackupFailsWhenUsingInvalidPatterns(t *testing.T) { var err error // Test --exclude - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{excludePatternOptions: excludePatternOptions{Excludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) + err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{ExcludePatternOptions: filter.ExcludePatternOptions{Excludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --exclude: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) // Test --iexclude - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) + err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{ExcludePatternOptions: filter.ExcludePatternOptions{InsensitiveExcludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --iexclude: invalid pattern(s) provided: *[._]log[.-][0-9] @@ -47,14 +48,14 @@ func TestBackupFailsWhenUsingInvalidPatternsFromFile(t *testing.T) { var err error // Test --exclude-file: - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{excludePatternOptions: excludePatternOptions{ExcludeFiles: []string{excludeFile}}}, env.gopts) + err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{ExcludePatternOptions: filter.ExcludePatternOptions{ExcludeFiles: []string{excludeFile}}}, env.gopts) rtest.Equals(t, `Fatal: --exclude-file: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) // Test --iexclude-file - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludeFiles: []string{excludeFile}}}, env.gopts) + err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{ExcludePatternOptions: filter.ExcludePatternOptions{InsensitiveExcludeFiles: []string{excludeFile}}}, env.gopts) rtest.Equals(t, `Fatal: --iexclude-file: invalid pattern(s) provided: *[._]log[.-][0-9] @@ -70,30 +71,64 @@ func TestRestoreFailsWhenUsingInvalidPatterns(t *testing.T) { var err error // Test --exclude - err = testRunRestoreAssumeFailure("latest", RestoreOptions{Exclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{ExcludePatternOptions: filter.ExcludePatternOptions{Excludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --exclude: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) // Test --iexclude - err = testRunRestoreAssumeFailure("latest", RestoreOptions{InsensitiveExclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{ExcludePatternOptions: filter.ExcludePatternOptions{InsensitiveExcludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --iexclude: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) // Test --include - err = testRunRestoreAssumeFailure("latest", RestoreOptions{Include: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{IncludePatternOptions: filter.IncludePatternOptions{Includes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --include: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) // Test --iinclude - err = testRunRestoreAssumeFailure("latest", RestoreOptions{InsensitiveInclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{IncludePatternOptions: filter.IncludePatternOptions{InsensitiveIncludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --iinclude: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) } + +func TestRestoreFailsWhenUsingInvalidPatternsFromFile(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + // Create an include file with some invalid patterns + patternsFile := env.base + "/patternsFile" + fileErr := os.WriteFile(patternsFile, []byte("*.go\n*[._]log[.-][0-9]\n!*[._]log[.-][0-9]"), 0644) + if fileErr != nil { + t.Fatalf("Could not write include file: %v", fileErr) + } + + err := testRunRestoreAssumeFailure("latest", RestoreOptions{IncludePatternOptions: filter.IncludePatternOptions{IncludeFiles: []string{patternsFile}}}, env.gopts) + rtest.Equals(t, `Fatal: --include-file: invalid pattern(s) provided: +*[._]log[.-][0-9] +!*[._]log[.-][0-9]`, err.Error()) + + err = testRunRestoreAssumeFailure("latest", RestoreOptions{ExcludePatternOptions: filter.ExcludePatternOptions{ExcludeFiles: []string{patternsFile}}}, env.gopts) + rtest.Equals(t, `Fatal: --exclude-file: invalid pattern(s) provided: +*[._]log[.-][0-9] +!*[._]log[.-][0-9]`, err.Error()) + + err = testRunRestoreAssumeFailure("latest", RestoreOptions{IncludePatternOptions: filter.IncludePatternOptions{InsensitiveIncludeFiles: []string{patternsFile}}}, env.gopts) + rtest.Equals(t, `Fatal: --iinclude-file: invalid pattern(s) provided: +*[._]log[.-][0-9] +!*[._]log[.-][0-9]`, err.Error()) + + err = testRunRestoreAssumeFailure("latest", RestoreOptions{ExcludePatternOptions: filter.ExcludePatternOptions{InsensitiveExcludeFiles: []string{patternsFile}}}, env.gopts) + rtest.Equals(t, `Fatal: --iexclude-file: invalid pattern(s) provided: +*[._]log[.-][0-9] +!*[._]log[.-][0-9]`, err.Error()) +} diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index 184609d40c5..21944a9ce3a 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "runtime" + "strings" "sync" "testing" @@ -168,6 +169,16 @@ type testEnvironment struct { gopts GlobalOptions } +type logOutputter struct { + t testing.TB +} + +func (l *logOutputter) Write(p []byte) (n int, err error) { + l.t.Helper() + l.t.Log(strings.TrimSuffix(string(p), "\n")) + return len(p), nil +} + // withTestEnvironment creates a test environment and returns a cleanup // function which removes it. func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) { @@ -200,8 +211,11 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) { Quiet: true, CacheDir: env.cache, password: rtest.TestPassword, - stdout: os.Stdout, - stderr: os.Stderr, + // stdout and stderr are written to by Warnf etc. That is the written data + // usually consists of one or multiple lines and therefore can be handled well + // by t.Log. + stdout: &logOutputter{t}, + stderr: &logOutputter{t}, extended: make(options.Options), // replace this hook with "nil" if listing a filetype more than once is necessary @@ -232,47 +246,79 @@ func testSetupBackupData(t testing.TB, env *testEnvironment) string { } func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet { - r, err := OpenRepository(context.TODO(), gopts) + ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() packs := restic.NewIDSet() - rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error { packs.Insert(id) return nil })) return packs } +func listTreePacks(gopts GlobalOptions, t *testing.T) restic.IDSet { + ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false) + rtest.OK(t, err) + defer unlock() + + rtest.OK(t, r.LoadIndex(ctx, nil)) + treePacks := restic.NewIDSet() + rtest.OK(t, r.ListBlobs(ctx, func(pb restic.PackedBlob) { + if pb.Type == restic.TreeBlob { + treePacks.Insert(pb.PackID) + } + })) + + return treePacks +} + +func captureBackend(gopts *GlobalOptions) func() backend.Backend { + var be backend.Backend + gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { + be = r + return r, nil + } + return func() backend.Backend { + return be + } +} + func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) { - r, err := OpenRepository(context.TODO(), gopts) + be := captureBackend(&gopts) + ctx, _, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() for id := range remove { - rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()})) + rtest.OK(t, be().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()})) } } func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, removeTreePacks bool) { - r, err := OpenRepository(context.TODO(), gopts) + be := captureBackend(&gopts) + ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() // Get all tree packs - rtest.OK(t, r.LoadIndex(context.TODO(), nil)) + rtest.OK(t, r.LoadIndex(ctx, nil)) treePacks := restic.NewIDSet() - r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, r.ListBlobs(ctx, func(pb restic.PackedBlob) { if pb.Type == restic.TreeBlob { treePacks.Insert(pb.PackID) } - }) + })) // remove all packs containing data blobs - rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error { if treePacks.Has(id) != removeTreePacks || keep.Has(id) { return nil } - return r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}) + return be().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()}) })) } diff --git a/cmd/restic/integration_helpers_unix_test.go b/cmd/restic/integration_helpers_unix_test.go index df0c4fe63f1..30852a75341 100644 --- a/cmd/restic/integration_helpers_unix_test.go +++ b/cmd/restic/integration_helpers_unix_test.go @@ -13,17 +13,17 @@ import ( func (e *dirEntry) equals(out io.Writer, other *dirEntry) bool { if e.path != other.path { - fmt.Fprintf(out, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path) + _, _ = fmt.Fprintf(out, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path) return false } if e.fi.Mode() != other.fi.Mode() { - fmt.Fprintf(out, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode()) + _, _ = fmt.Fprintf(out, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode()) return false } if !sameModTime(e.fi, other.fi) { - fmt.Fprintf(out, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime()) + _, _ = fmt.Fprintf(out, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime()) return false } @@ -31,17 +31,17 @@ func (e *dirEntry) equals(out io.Writer, other *dirEntry) bool { stat2, _ := other.fi.Sys().(*syscall.Stat_t) if stat.Uid != stat2.Uid { - fmt.Fprintf(out, "%v: UID does not match (%v != %v)\n", e.path, stat.Uid, stat2.Uid) + _, _ = fmt.Fprintf(out, "%v: UID does not match (%v != %v)\n", e.path, stat.Uid, stat2.Uid) return false } if stat.Gid != stat2.Gid { - fmt.Fprintf(out, "%v: GID does not match (%v != %v)\n", e.path, stat.Gid, stat2.Gid) + _, _ = fmt.Fprintf(out, "%v: GID does not match (%v != %v)\n", e.path, stat.Gid, stat2.Gid) return false } if stat.Nlink != stat2.Nlink { - fmt.Fprintf(out, "%v: Number of links do not match (%v != %v)\n", e.path, stat.Nlink, stat2.Nlink) + _, _ = fmt.Fprintf(out, "%v: Number of links do not match (%v != %v)\n", e.path, stat.Nlink, stat2.Nlink) return false } diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 7cf8396a321..777573f263a 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -12,6 +12,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func TestCheckRestoreNoLock(t *testing.T) { @@ -34,7 +35,7 @@ func TestCheckRestoreNoLock(t *testing.T) { testRunCheck(t, env.gopts) snapshotIDs := testListSnapshots(t, env.gopts, 4) - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0]) + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0].String()) } // a listOnceBackend only allows listing once per filetype @@ -79,17 +80,22 @@ func TestListOnce(t *testing.T) { defer cleanup() env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { - return newListOnceBackend(r), nil + return newOrderedListOnceBackend(r), nil } pruneOpts := PruneOptions{MaxUnused: "0"} checkOpts := CheckOptions{ReadData: true, CheckUnused: true} createPrunableRepo(t, env) testRunPrune(t, env.gopts, pruneOpts) - rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil)) - - rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts)) - rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts)) + rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runCheck(context.TODO(), checkOpts, env.gopts, nil, term) + })) + rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term) + })) + rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts, term) + })) } type writeToOnly struct { @@ -142,7 +148,7 @@ func TestFindListOnce(t *testing.T) { defer cleanup() env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { - return newListOnceBackend(r), nil + return newOrderedListOnceBackend(r), nil } testSetupBackupData(t, env) @@ -154,12 +160,13 @@ func TestFindListOnce(t *testing.T) { testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) thirdSnapshot := restic.NewIDSet(testListSnapshots(t, env.gopts, 3)...) - repo, err := OpenRepository(context.TODO(), env.gopts) + ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false) rtest.OK(t, err) + defer unlock() snapshotIDs := restic.NewIDSet() // specify the two oldest snapshots explicitly and use "latest" to reference the newest one - for sn := range FindFilteredSnapshots(context.TODO(), repo, repo, &restic.SnapshotFilter{}, []string{ + for sn := range FindFilteredSnapshots(ctx, repo, repo, &restic.SnapshotFilter{}, []string{ secondSnapshot[0].String(), secondSnapshot[1].String()[:8], "latest", @@ -170,3 +177,47 @@ func TestFindListOnce(t *testing.T) { // the snapshots can only be listed once, if both lists match then the there has been only a single List() call rtest.Equals(t, thirdSnapshot, snapshotIDs) } + +type failConfigOnceBackend struct { + backend.Backend + failedOnce bool +} + +func (be *failConfigOnceBackend) Load(ctx context.Context, h backend.Handle, + length int, offset int64, fn func(rd io.Reader) error) error { + + if !be.failedOnce && h.Type == restic.ConfigFile { + be.failedOnce = true + return fmt.Errorf("oops") + } + return be.Backend.Load(ctx, h, length, offset, fn) +} + +func (be *failConfigOnceBackend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) { + if !be.failedOnce && h.Type == restic.ConfigFile { + be.failedOnce = true + return backend.FileInfo{}, fmt.Errorf("oops") + } + return be.Backend.Stat(ctx, h) +} + +func TestBackendRetryConfig(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + var wrappedBackend *failConfigOnceBackend + // cause config loading to fail once + env.gopts.backendInnerTestHook = func(r backend.Backend) (backend.Backend, error) { + wrappedBackend = &failConfigOnceBackend{Backend: r} + return wrappedBackend, nil + } + + testSetupBackupData(t, env) + rtest.Assert(t, wrappedBackend != nil, "backend not wrapped on init") + rtest.Assert(t, wrappedBackend != nil && wrappedBackend.failedOnce, "config loading was not retried on init") + wrappedBackend = nil + + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, BackupOptions{}, env.gopts) + rtest.Assert(t, wrappedBackend != nil, "backend not wrapped on backup") + rtest.Assert(t, wrappedBackend != nil && wrappedBackend.failedOnce, "config loading was not retried on init") +} diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 600b7476f42..0e3dea6d54e 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -2,316 +2,47 @@ package main import ( "context" - "fmt" - "sync" - "time" - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/repository" ) -type lockContext struct { - lock *restic.Lock - cancel context.CancelFunc - refreshWG sync.WaitGroup -} - -var globalLocks struct { - locks map[*restic.Lock]*lockContext - sync.Mutex - sync.Once -} - -func lockRepo(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - return lockRepository(ctx, repo, false, retryLock, json) -} - -func lockRepoExclusive(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - return lockRepository(ctx, repo, true, retryLock, json) -} - -var ( - retrySleepStart = 5 * time.Second - retrySleepMax = 60 * time.Second -) - -func minDuration(a, b time.Duration) time.Duration { - if a <= b { - return a - } - return b -} - -// lockRepository wraps the ctx such that it is cancelled when the repository is unlocked -// cancelling the original context also stops the lock refresh -func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - // make sure that a repository is unlocked properly and after cancel() was - // called by the cleanup handler in global.go - globalLocks.Do(func() { - AddCleanupHandler(unlockAll) - }) - - lockFn := restic.NewLock - if exclusive { - lockFn = restic.NewExclusiveLock - } - - var lock *restic.Lock - var err error - - retrySleep := minDuration(retrySleepStart, retryLock) - retryMessagePrinted := false - retryTimeout := time.After(retryLock) - -retryLoop: - for { - lock, err = lockFn(ctx, repo) - if err != nil && restic.IsAlreadyLocked(err) { - - if !retryMessagePrinted { - if !json { - Verbosef("repo already locked, waiting up to %s for the lock\n", retryLock) - } - retryMessagePrinted = true - } - - debug.Log("repo already locked, retrying in %v", retrySleep) - retrySleepCh := time.After(retrySleep) - - select { - case <-ctx.Done(): - return nil, ctx, ctx.Err() - case <-retryTimeout: - debug.Log("repo already locked, timeout expired") - // Last lock attempt - lock, err = lockFn(ctx, repo) - break retryLoop - case <-retrySleepCh: - retrySleep = minDuration(retrySleep*2, retrySleepMax) - } - } else { - // anything else, either a successful lock or another error - break retryLoop - } - } - if restic.IsInvalidLock(err) { - return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err) - } +func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun bool, exclusive bool) (context.Context, *repository.Repository, func(), error) { + repo, err := OpenRepository(ctx, gopts) if err != nil { - return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err) - } - debug.Log("create lock %p (exclusive %v)", lock, exclusive) - - ctx, cancel := context.WithCancel(ctx) - lockInfo := &lockContext{ - lock: lock, - cancel: cancel, - } - lockInfo.refreshWG.Add(2) - refreshChan := make(chan struct{}) - forceRefreshChan := make(chan refreshLockRequest) - - globalLocks.Lock() - globalLocks.locks[lock] = lockInfo - go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan) - go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan) - globalLocks.Unlock() - - return lock, ctx, err -} - -var refreshInterval = 5 * time.Minute - -// consider a lock refresh failed a bit before the lock actually becomes stale -// the difference allows to compensate for a small time drift between clients. -var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2 - -type refreshLockRequest struct { - result chan bool -} - -func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest) { - debug.Log("start") - lock := lockInfo.lock - ticker := time.NewTicker(refreshInterval) - lastRefresh := lock.Time - - defer func() { - ticker.Stop() - // ensure that the context was cancelled before removing the lock - lockInfo.cancel() - - // remove the lock from the repo - debug.Log("unlocking repository with lock %v", lock) - if err := lock.Unlock(); err != nil { - debug.Log("error while unlocking: %v", err) - Warnf("error while unlocking: %v", err) - } - - lockInfo.refreshWG.Done() - }() - - for { - select { - case <-ctx.Done(): - debug.Log("terminate") - return - - case req := <-forceRefresh: - debug.Log("trying to refresh stale lock") - // keep on going if our current lock still exists - success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel) - // inform refresh goroutine about forced refresh - select { - case <-ctx.Done(): - case req.result <- success: - } - - if success { - // update lock refresh time - lastRefresh = lock.Time - } - - case <-ticker.C: - if time.Since(lastRefresh) > refreshabilityTimeout { - // the lock is too old, wait until the expiry monitor cancels the context - continue - } - - debug.Log("refreshing locks") - err := lock.Refresh(context.TODO()) - if err != nil { - Warnf("unable to refresh lock: %v\n", err) - } else { - lastRefresh = lock.Time - // inform monitor goroutine about successful refresh - select { - case <-ctx.Done(): - case refreshed <- struct{}{}: - } - } - } + return nil, nil, nil, err } -} -func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest) { - // time.Now() might use a monotonic timer which is paused during standby - // convert to unix time to ensure we compare real time values - lastRefresh := time.Now().UnixNano() - pollDuration := 1 * time.Second - if refreshInterval < pollDuration { - // require for TestLockFailedRefresh - pollDuration = refreshInterval / 5 - } - // timers are paused during standby, which is a problem as the refresh timeout - // _must_ expire if the host was too long in standby. Thus fall back to periodic checks - // https://github.com/golang/go/issues/35012 - ticker := time.NewTicker(pollDuration) - defer func() { - ticker.Stop() - lockInfo.cancel() - lockInfo.refreshWG.Done() - }() - - var refreshStaleLockResult chan bool + unlock := func() {} + if !dryRun { + var lock *repository.Unlocker - for { - select { - case <-ctx.Done(): - debug.Log("terminate expiry monitoring") - return - case <-refreshed: - if refreshStaleLockResult != nil { - // ignore delayed refresh notifications while the stale lock is refreshed - continue + lock, ctx, err = repository.Lock(ctx, repo, exclusive, gopts.RetryLock, func(msg string) { + if !gopts.JSON { + Verbosef("%s", msg) } - lastRefresh = time.Now().UnixNano() - case <-ticker.C: - if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { - continue - } - - debug.Log("trying to refreshStaleLock") - // keep on going if our current lock still exists - refreshReq := refreshLockRequest{ - result: make(chan bool), - } - refreshStaleLockResult = refreshReq.result - - // inform refresh goroutine about forced refresh - select { - case <-ctx.Done(): - case forceRefresh <- refreshReq: - } - case success := <-refreshStaleLockResult: - if success { - lastRefresh = time.Now().UnixNano() - refreshStaleLockResult = nil - continue - } - - Warnf("Fatal: failed to refresh lock in time\n") - return + }, Warnf) + if err != nil { + return nil, nil, nil, err } - } -} - -func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc) bool { - freeze := backend.AsBackend[backend.FreezeBackend](be) - if freeze != nil { - debug.Log("freezing backend") - freeze.Freeze() - defer freeze.Unfreeze() - } - err := lock.RefreshStaleLock(ctx) - if err != nil { - Warnf("failed to refresh stale lock: %v\n", err) - // cancel context while the backend is still frozen to prevent accidental modifications - cancel() - return false + unlock = lock.Unlock + } else { + repo.SetDryRun() } - return true + return ctx, repo, unlock, nil } -func unlockRepo(lock *restic.Lock) { - if lock == nil { - return - } - - globalLocks.Lock() - lockInfo, exists := globalLocks.locks[lock] - delete(globalLocks.locks, lock) - globalLocks.Unlock() - - if !exists { - debug.Log("unable to find lock %v in the global list of locks, ignoring", lock) - return - } - lockInfo.cancel() - lockInfo.refreshWG.Wait() +func openWithReadLock(ctx context.Context, gopts GlobalOptions, noLock bool) (context.Context, *repository.Repository, func(), error) { + // TODO enforce read-only operations once the locking code has moved to the repository + return internalOpenWithLocked(ctx, gopts, noLock, false) } -func unlockAll(code int) (int, error) { - globalLocks.Lock() - locks := globalLocks.locks - debug.Log("unlocking %d locks", len(globalLocks.locks)) - for _, lockInfo := range globalLocks.locks { - lockInfo.cancel() - } - globalLocks.locks = make(map[*restic.Lock]*lockContext) - globalLocks.Unlock() - - for _, lockInfo := range locks { - lockInfo.refreshWG.Wait() - } - - return code, nil +func openWithAppendLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { + // TODO enforce non-exclusive operations once the locking code has moved to the repository + return internalOpenWithLocked(ctx, gopts, dryRun, false) } -func init() { - globalLocks.locks = make(map[*restic.Lock]*lockContext) +func openWithExclusiveLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { + return internalOpenWithLocked(ctx, gopts, dryRun, true) } diff --git a/cmd/restic/lock_test.go b/cmd/restic/lock_test.go deleted file mode 100644 index bf22db699d9..00000000000 --- a/cmd/restic/lock_test.go +++ /dev/null @@ -1,329 +0,0 @@ -package main - -import ( - "context" - "fmt" - "runtime" - "strings" - "sync" - "testing" - "time" - - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/location" - "github.com/restic/restic/internal/backend/mem" - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/test" -) - -func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Repository, func(), *testEnvironment) { - env, cleanup := withTestEnvironment(t) - - reg := location.NewRegistry() - reg.Register(mem.NewFactory()) - env.gopts.backends = reg - env.gopts.Repo = "mem:" - - if wrapper != nil { - env.gopts.backendTestHook = wrapper - } - testRunInit(t, env.gopts) - - repo, err := OpenRepository(context.TODO(), env.gopts) - test.OK(t, err) - return repo, cleanup, env -} - -func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, env *testEnvironment) (*restic.Lock, context.Context) { - lock, wrappedCtx, err := lockRepo(ctx, repo, env.gopts.RetryLock, env.gopts.JSON) - test.OK(t, err) - test.OK(t, wrappedCtx.Err()) - if lock.Stale() { - t.Fatal("lock returned stale lock") - } - return lock, wrappedCtx -} - -func TestLock(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) - unlockRepo(lock) - if wrappedCtx.Err() == nil { - t.Fatal("unlock did not cancel context") - } -} - -func TestLockCancel(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - lock, wrappedCtx := checkedLockRepo(ctx, t, repo, env) - cancel() - if wrappedCtx.Err() == nil { - t.Fatal("canceled parent context did not cancel context") - } - - // unlockRepo should not crash - unlockRepo(lock) -} - -func TestLockUnlockAll(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) - _, err := unlockAll(0) - test.OK(t, err) - if wrappedCtx.Err() == nil { - t.Fatal("canceled parent context did not cancel context") - } - - // unlockRepo should not crash - unlockRepo(lock) -} - -func TestLockConflict(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() - repo2, err := OpenRepository(context.TODO(), env.gopts) - test.OK(t, err) - - lock, _, err := lockRepoExclusive(context.Background(), repo, env.gopts.RetryLock, env.gopts.JSON) - test.OK(t, err) - defer unlockRepo(lock) - _, _, err = lockRepo(context.Background(), repo2, env.gopts.RetryLock, env.gopts.JSON) - if err == nil { - t.Fatal("second lock should have failed") - } - test.Assert(t, restic.IsAlreadyLocked(err), "unexpected error %v", err) -} - -type writeOnceBackend struct { - backend.Backend - written bool -} - -func (b *writeOnceBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { - if b.written { - return fmt.Errorf("fail after first write") - } - b.written = true - return b.Backend.Save(ctx, h, rd) -} - -func TestLockFailedRefresh(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { - return &writeOnceBackend{Backend: r}, nil - }) - defer cleanup() - - // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 20 * time.Millisecond - refreshabilityTimeout = 100 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) - - select { - case <-wrappedCtx.Done(): - // expected lock refresh failure - case <-time.After(time.Second): - t.Fatal("failed lock refresh did not cause context cancellation") - } - // unlockRepo should not crash - unlockRepo(lock) -} - -type loggingBackend struct { - backend.Backend - t *testing.T -} - -func (b *loggingBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { - b.t.Logf("save %v @ %v", h, time.Now()) - err := b.Backend.Save(ctx, h, rd) - b.t.Logf("save finished %v @ %v", h, time.Now()) - return err -} - -func TestLockSuccessfulRefresh(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { - return &loggingBackend{ - Backend: r, - t: t, - }, nil - }) - defer cleanup() - - t.Logf("test for successful lock refresh %v", time.Now()) - // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 60 * time.Millisecond - refreshabilityTimeout = 500 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) - - select { - case <-wrappedCtx.Done(): - // don't call t.Fatal to allow the lock to be properly cleaned up - t.Error("lock refresh failed", time.Now()) - - // Dump full stacktrace - buf := make([]byte, 1024*1024) - n := runtime.Stack(buf, true) - buf = buf[:n] - t.Log(string(buf)) - - case <-time.After(2 * refreshabilityTimeout): - // expected lock refresh to work - } - // unlockRepo should not crash - unlockRepo(lock) -} - -type slowBackend struct { - backend.Backend - m sync.Mutex - sleep time.Duration -} - -func (b *slowBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { - b.m.Lock() - sleep := b.sleep - b.m.Unlock() - time.Sleep(sleep) - return b.Backend.Save(ctx, h, rd) -} - -func TestLockSuccessfulStaleRefresh(t *testing.T) { - var sb *slowBackend - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { - sb = &slowBackend{Backend: r} - return sb, nil - }) - defer cleanup() - - t.Logf("test for successful lock refresh %v", time.Now()) - // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 10 * time.Millisecond - refreshabilityTimeout = 50 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) - // delay lock refreshing long enough that the lock would expire - sb.m.Lock() - sb.sleep = refreshabilityTimeout + refreshInterval - sb.m.Unlock() - - select { - case <-wrappedCtx.Done(): - // don't call t.Fatal to allow the lock to be properly cleaned up - t.Error("lock refresh failed", time.Now()) - - case <-time.After(refreshabilityTimeout): - } - // reset slow backend - sb.m.Lock() - sb.sleep = 0 - sb.m.Unlock() - debug.Log("normal lock period has expired") - - select { - case <-wrappedCtx.Done(): - // don't call t.Fatal to allow the lock to be properly cleaned up - t.Error("lock refresh failed", time.Now()) - - case <-time.After(3 * refreshabilityTimeout): - // expected lock refresh to work - } - - // unlockRepo should not crash - unlockRepo(lock) -} - -func TestLockWaitTimeout(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() - - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) - test.OK(t, err) - - retryLock := 200 * time.Millisecond - - start := time.Now() - lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON) - duration := time.Since(start) - - test.Assert(t, err != nil, - "create normal lock with exclusively locked repo didn't return an error") - test.Assert(t, strings.Contains(err.Error(), "repository is already locked exclusively"), - "create normal lock with exclusively locked repo didn't return the correct error") - test.Assert(t, retryLock <= duration && duration < retryLock*3/2, - "create normal lock with exclusively locked repo didn't wait for the specified timeout") - - test.OK(t, lock.Unlock()) - test.OK(t, elock.Unlock()) -} - -func TestLockWaitCancel(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() - - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) - test.OK(t, err) - - retryLock := 200 * time.Millisecond - cancelAfter := 40 * time.Millisecond - - start := time.Now() - ctx, cancel := context.WithCancel(context.TODO()) - time.AfterFunc(cancelAfter, cancel) - - lock, _, err := lockRepo(ctx, repo, retryLock, env.gopts.JSON) - duration := time.Since(start) - - test.Assert(t, err != nil, - "create normal lock with exclusively locked repo didn't return an error") - test.Assert(t, strings.Contains(err.Error(), "context canceled"), - "create normal lock with exclusively locked repo didn't return the correct error") - test.Assert(t, cancelAfter <= duration && duration < retryLock-10*time.Millisecond, - "create normal lock with exclusively locked repo didn't return in time, duration %v", duration) - - test.OK(t, lock.Unlock()) - test.OK(t, elock.Unlock()) -} - -func TestLockWaitSuccess(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() - - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) - test.OK(t, err) - - retryLock := 200 * time.Millisecond - unlockAfter := 40 * time.Millisecond - - time.AfterFunc(unlockAfter, func() { - test.OK(t, elock.Unlock()) - }) - - lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON) - test.OK(t, err) - - test.OK(t, lock.Unlock()) -} diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 4595e81613b..096c5695c15 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -3,6 +3,8 @@ package main import ( "bufio" "bytes" + "context" + "encoding/json" "fmt" "log" "os" @@ -14,7 +16,9 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/options" + "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) @@ -23,6 +27,8 @@ func init() { _, _ = maxprocs.Set() } +var ErrOK = errors.New("ok") + // cmdRoot is the base command when no other command has been specified. var cmdRoot = &cobra.Command{ Use: "restic", @@ -37,7 +43,7 @@ The full documentation can be found at https://restic.readthedocs.io/ . SilenceUsage: true, DisableAutoGenTag: true, - PersistentPreRunE: func(c *cobra.Command, args []string) error { + PersistentPreRunE: func(c *cobra.Command, _ []string) error { // set verbosity, default is one globalOptions.verbosity = 1 if globalOptions.Quiet && globalOptions.Verbose > 0 { @@ -73,6 +79,25 @@ The full documentation can be found at https://restic.readthedocs.io/ . // enabled) return runDebug() }, + PersistentPostRun: func(_ *cobra.Command, _ []string) { + stopDebug() + }, +} + +var cmdGroupDefault = "default" +var cmdGroupAdvanced = "advanced" + +func init() { + cmdRoot.AddGroup( + &cobra.Group{ + ID: cmdGroupDefault, + Title: "Available Commands:", + }, + &cobra.Group{ + ID: cmdGroupAdvanced, + Title: "Advanced Options:", + }, + ) } // Distinguish commands that need the password from those that work without, @@ -87,8 +112,6 @@ func needsPassword(cmd string) bool { } } -var logBuffer = bytes.NewBuffer(nil) - func tweakGoGC() { // lower GOGC from 100 to 50, unless it was manually overwritten by the user oldValue := godebug.SetGCPercent(50) @@ -97,44 +120,101 @@ func tweakGoGC() { } } +func printExitError(code int, message string) { + if globalOptions.JSON { + type jsonExitError struct { + MessageType string `json:"message_type"` // exit_error + Code int `json:"code"` + Message string `json:"message"` + } + + jsonS := jsonExitError{ + MessageType: "exit_error", + Code: code, + Message: message, + } + + err := json.NewEncoder(globalOptions.stderr).Encode(jsonS) + if err != nil { + Warnf("JSON encode failed: %v\n", err) + return + } + } else { + _, _ = fmt.Fprintf(globalOptions.stderr, "%v\n", message) + } +} + func main() { tweakGoGC() // install custom global logger into a buffer, if an error occurs // we can show the logs + logBuffer := bytes.NewBuffer(nil) log.SetOutput(logBuffer) + err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES"), func(s string) { + _, _ = fmt.Fprintln(os.Stderr, s) + }) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + Exit(1) + } + debug.Log("main %#v", os.Args) debug.Log("restic %s compiled with %v on %v/%v", version, runtime.Version(), runtime.GOOS, runtime.GOARCH) - err := cmdRoot.ExecuteContext(internalGlobalCtx) + ctx := createGlobalContext() + err = cmdRoot.ExecuteContext(ctx) + + if err == nil { + err = ctx.Err() + } else if err == ErrOK { + // ErrOK overwrites context cancelation errors + err = nil + } + + var exitMessage string switch { case restic.IsAlreadyLocked(err): - fmt.Fprintf(os.Stderr, "%v\nthe `unlock` command can be used to remove stale locks\n", err) + exitMessage = fmt.Sprintf("%v\nthe `unlock` command can be used to remove stale locks", err) case err == ErrInvalidSourceData: - fmt.Fprintf(os.Stderr, "Warning: %v\n", err) + exitMessage = fmt.Sprintf("Warning: %v", err) case errors.IsFatal(err): - fmt.Fprintf(os.Stderr, "%v\n", err) + exitMessage = err.Error() + case errors.Is(err, repository.ErrNoKeyFound): + exitMessage = fmt.Sprintf("Fatal: %v", err) case err != nil: - fmt.Fprintf(os.Stderr, "%+v\n", err) + exitMessage = fmt.Sprintf("%+v", err) if logBuffer.Len() > 0 { - fmt.Fprintf(os.Stderr, "also, the following messages were logged by a library:\n") + exitMessage += "also, the following messages were logged by a library:\n" sc := bufio.NewScanner(logBuffer) for sc.Scan() { - fmt.Fprintln(os.Stderr, sc.Text()) + exitMessage += fmt.Sprintln(sc.Text()) } } } var exitCode int - switch err { - case nil: + switch { + case err == nil: exitCode = 0 - case ErrInvalidSourceData: + case err == ErrInvalidSourceData: exitCode = 3 + case errors.Is(err, ErrNoRepository): + exitCode = 10 + case restic.IsAlreadyLocked(err): + exitCode = 11 + case errors.Is(err, repository.ErrNoKeyFound): + exitCode = 12 + case errors.Is(err, context.Canceled): + exitCode = 130 default: exitCode = 1 } + + if exitCode != 0 { + printExitError(exitCode, exitMessage) + } Exit(exitCode) } diff --git a/cmd/restic/progress.go b/cmd/restic/progress.go index 48aa209a6a3..afd5d027f35 100644 --- a/cmd/restic/progress.go +++ b/cmd/restic/progress.go @@ -29,7 +29,7 @@ func calculateProgressInterval(show bool, json bool) time.Duration { return interval } -// newTerminalProgressMax returns a progress.Counter that prints to stdout or terminal if provided. +// newGenericProgressMax returns a progress.Counter that prints to stdout or terminal if provided. func newGenericProgressMax(show bool, max uint64, description string, print func(status string, final bool)) *progress.Counter { if !show { return nil @@ -53,7 +53,7 @@ func newGenericProgressMax(show bool, max uint64, description string, print func func newTerminalProgressMax(show bool, max uint64, description string, term *termstatus.Terminal) *progress.Counter { return newGenericProgressMax(show, max, description, func(status string, final bool) { if final { - term.SetStatus([]string{}) + term.SetStatus(nil) term.Print(status) } else { term.SetStatus([]string{status}) diff --git a/cmd/restic/secondary_repo.go b/cmd/restic/secondary_repo.go index 4c46b60df44..44621afa18f 100644 --- a/cmd/restic/secondary_repo.go +++ b/cmd/restic/secondary_repo.go @@ -1,6 +1,7 @@ package main import ( + "context" "os" "github.com/restic/restic/internal/errors" @@ -10,11 +11,12 @@ import ( type secondaryRepoOptions struct { password string // from-repo options - Repo string - RepositoryFile string - PasswordFile string - PasswordCommand string - KeyHint string + Repo string + RepositoryFile string + PasswordFile string + PasswordCommand string + KeyHint string + InsecureNoPassword bool // repo2 options LegacyRepo string LegacyRepositoryFile string @@ -48,6 +50,7 @@ func initSecondaryRepoOptions(f *pflag.FlagSet, opts *secondaryRepoOptions, repo f.StringVarP(&opts.PasswordFile, "from-password-file", "", "", "`file` to read the source repository password from (default: $RESTIC_FROM_PASSWORD_FILE)") f.StringVarP(&opts.KeyHint, "from-key-hint", "", "", "key ID of key to try decrypting the source repository first (default: $RESTIC_FROM_KEY_HINT)") f.StringVarP(&opts.PasswordCommand, "from-password-command", "", "", "shell `command` to obtain the source repository password from (default: $RESTIC_FROM_PASSWORD_COMMAND)") + f.BoolVar(&opts.InsecureNoPassword, "from-insecure-no-password", false, "use an empty password for the source repository (insecure)") opts.Repo = os.Getenv("RESTIC_FROM_REPOSITORY") opts.RepositoryFile = os.Getenv("RESTIC_FROM_REPOSITORY_FILE") @@ -56,13 +59,13 @@ func initSecondaryRepoOptions(f *pflag.FlagSet, opts *secondaryRepoOptions, repo opts.PasswordCommand = os.Getenv("RESTIC_FROM_PASSWORD_COMMAND") } -func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) { +func fillSecondaryGlobalOpts(ctx context.Context, opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) { if opts.Repo == "" && opts.RepositoryFile == "" && opts.LegacyRepo == "" && opts.LegacyRepositoryFile == "" { return GlobalOptions{}, false, errors.Fatal("Please specify a source repository location (--from-repo or --from-repository-file)") } hasFromRepo := opts.Repo != "" || opts.RepositoryFile != "" || opts.PasswordFile != "" || - opts.KeyHint != "" || opts.PasswordCommand != "" + opts.KeyHint != "" || opts.PasswordCommand != "" || opts.InsecureNoPassword hasRepo2 := opts.LegacyRepo != "" || opts.LegacyRepositoryFile != "" || opts.LegacyPasswordFile != "" || opts.LegacyKeyHint != "" || opts.LegacyPasswordCommand != "" @@ -84,6 +87,7 @@ func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, rep dstGopts.PasswordFile = opts.PasswordFile dstGopts.PasswordCommand = opts.PasswordCommand dstGopts.KeyHint = opts.KeyHint + dstGopts.InsecureNoPassword = opts.InsecureNoPassword pwdEnv = "RESTIC_FROM_PASSWORD" repoPrefix = "source" @@ -97,6 +101,8 @@ func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, rep dstGopts.PasswordFile = opts.LegacyPasswordFile dstGopts.PasswordCommand = opts.LegacyPasswordCommand dstGopts.KeyHint = opts.LegacyKeyHint + // keep existing bevhaior for legacy options + dstGopts.InsecureNoPassword = false pwdEnv = "RESTIC_PASSWORD2" } @@ -109,7 +115,7 @@ func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, rep return GlobalOptions{}, false, err } } - dstGopts.password, err = ReadPassword(dstGopts, "enter password for "+repoPrefix+" repository: ") + dstGopts.password, err = ReadPassword(ctx, dstGopts, "enter password for "+repoPrefix+" repository: ") if err != nil { return GlobalOptions{}, false, err } diff --git a/cmd/restic/secondary_repo_test.go b/cmd/restic/secondary_repo_test.go index ff1a10b03cb..aa511ca992a 100644 --- a/cmd/restic/secondary_repo_test.go +++ b/cmd/restic/secondary_repo_test.go @@ -1,6 +1,7 @@ package main import ( + "context" "os" "path/filepath" "testing" @@ -170,7 +171,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) { // Test all valid cases for _, testCase := range validSecondaryRepoTestCases { - DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination") + DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination") rtest.OK(t, err) rtest.Equals(t, DstGOpts, testCase.DstGOpts) rtest.Equals(t, isFromRepo, testCase.FromRepo) @@ -178,7 +179,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) { // Test all invalid cases for _, testCase := range invalidSecondaryRepoTestCases { - _, _, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination") + _, _, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination") rtest.Assert(t, err != nil, "Expected error, but function did not return an error") } } diff --git a/cmd/restic/termstatus.go b/cmd/restic/termstatus.go index cf3cd82ee9a..c0e9a045bfe 100644 --- a/cmd/restic/termstatus.go +++ b/cmd/restic/termstatus.go @@ -4,7 +4,6 @@ import ( "context" "sync" - "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/termstatus" ) @@ -31,8 +30,7 @@ func setupTermstatus() (*termstatus.Terminal, func()) { // use the termstatus for stdout/stderr prevStdout, prevStderr := globalOptions.stdout, globalOptions.stderr - stdioWrapper := ui.NewStdioWrapper(term) - globalOptions.stdout, globalOptions.stderr = stdioWrapper.Stdout(), stdioWrapper.Stderr() + globalOptions.stdout, globalOptions.stderr = termstatus.WrapStdio(term) return term, func() { // shutdown termstatus diff --git a/cmd/restic/testdata/repo-restore-permissions-test.tar.gz b/cmd/restic/testdata/repo-restore-permissions-test.tar.gz index 36aa62dbfb0..dc8e9bc80e9 100644 Binary files a/cmd/restic/testdata/repo-restore-permissions-test.tar.gz and b/cmd/restic/testdata/repo-restore-permissions-test.tar.gz differ diff --git a/doc/020_installation.rst b/doc/020_installation.rst index 0f1cd6c0481..8566c109eb6 100644 --- a/doc/020_installation.rst +++ b/doc/020_installation.rst @@ -77,8 +77,7 @@ avoid any conflicts: macOS ===== -If you are using macOS, you can install restic using the -`homebrew `__ package manager: +If you are using macOS, you can install restic using `Homebrew `__: .. code-block:: console @@ -285,8 +284,7 @@ From Source *********** restic is written in the Go programming language and you need at least -Go version 1.19. Building for Solaris requires at least Go version 1.20. -Building restic may also work with older versions of Go, +Go version 1.21. Building restic may also work with older versions of Go, but that's not supported. See the `Getting started `__ guide of the Go project for instructions how to install Go. @@ -363,3 +361,18 @@ Example for using sudo to write a zsh completion script directly to the system-w the operating system used, e.g. ``/usr/share/bash-completion/completions/restic`` in Debian and derivatives. Please look up the correct path in the appropriate documentation. + +Example for setting up a powershell completion script for the local user's profile: + +.. code-block:: pwsh-session + + # Create profile if one does not exist + PS> If (!(Test-Path $PROFILE.CurrentUserAllHosts)) {New-Item -Path $PROFILE.CurrentUserAllHosts -Force} + + PS> $ProfileDir = (Get-Item $PROFILE.CurrentUserAllHosts).Directory + + # Generate Restic completions in the same directory as the profile + PS> restic generate --powershell-completion "$ProfileDir\restic-completion.ps1" + + # Append to the profile file the command to load Restic completions + PS> Add-Content -Path $PROFILE.CurrentUserAllHosts -Value "`r`nImport-Module $ProfileDir\restic-completion.ps1" diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 8661f59046e..720bfc11d53 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -201,15 +201,16 @@ scheme like this: $ restic -r rest:http://host:8000/ init Depending on your REST server setup, you can use HTTPS protocol, -password protection, multiple repositories or any combination of -those features. The TCP/IP port is also configurable. Here -are some more examples: +unix socket, password protection, multiple repositories or any +combination of those features. The TCP/IP port is also configurable. +Here are some more examples: .. code-block:: console $ restic -r rest:https://host:8000/ init $ restic -r rest:https://user:pass@host:8000/ init $ restic -r rest:https://user:pass@host:8000/my_backup_repo/ init + $ restic -r rest:http+unix:///tmp/rest.socket:/my_backup_repo/ init The server username and password can be specified using environment variables as well: @@ -248,28 +249,22 @@ while creating the bucket. $ export AWS_ACCESS_KEY_ID= $ export AWS_SECRET_ACCESS_KEY= +When using temporary credentials make sure to include the session token via +the environment variable ``AWS_SESSION_TOKEN``. + You can then easily initialize a repository that uses your Amazon S3 as -a backend. If the bucket does not exist it will be created in the -default location: +a backend. Make sure to use the endpoint for the correct region. The example +uses ``us-east-1``. If the bucket does not exist it will be created in that region: .. code-block:: console - $ restic -r s3:s3.amazonaws.com/bucket_name init + $ restic -r s3:s3.us-east-1.amazonaws.com/bucket_name init enter password for new repository: enter password again: - created restic repository eefee03bbd at s3:s3.amazonaws.com/bucket_name + created restic repository eefee03bbd at s3:s3.us-east-1.amazonaws.com/bucket_name Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. -If needed, you can manually specify the region to use by either setting the -environment variable ``AWS_DEFAULT_REGION`` or calling restic with an option -parameter like ``-o s3.region="us-east-1"``. If the region is not specified, -the default region is used. Afterwards, the S3 server (at least for AWS, -``s3.amazonaws.com``) will redirect restic to the correct endpoint. - -When using temporary credentials make sure to include the session token via -then environment variable ``AWS_SESSION_TOKEN``. - Until version 0.8.0, restic used a default prefix of ``restic``, so the files in the bucket were placed in a directory named ``restic``. If you want to access a repository created with an older version of restic, specify the path @@ -277,25 +272,14 @@ after the bucket name like this: .. code-block:: console - $ restic -r s3:s3.amazonaws.com/bucket_name/restic [...] + $ restic -r s3:s3.us-east-1.amazonaws.com/bucket_name/restic [...] -For an S3-compatible server that is not Amazon (like Minio, see below), -or is only available via HTTP, you can specify the URL to the server -like this: ``s3:http://server:port/bucket_name``. - .. note:: restic expects `path-style URLs `__ - like for example ``s3.us-west-2.amazonaws.com/bucket_name``. + like for example ``s3.us-west-2.amazonaws.com/bucket_name`` for Amazon S3. Virtual-hosted–style URLs like ``bucket_name.s3.us-west-2.amazonaws.com``, where the bucket name is part of the hostname are not supported. These must be converted to path-style URLs instead, for example ``s3.us-west-2.amazonaws.com/bucket_name``. - -.. note:: Certain S3-compatible servers do not properly implement the - ``ListObjectsV2`` API, most notably Ceph versions before v14.2.5. On these - backends, as a temporary workaround, you can provide the - ``-o s3.list-objects-v1=true`` option to use the older - ``ListObjects`` API instead. This option may be removed in future - versions of restic. - + See below for configuration options for S3-compatible storage from other providers. Minio Server ************ @@ -320,81 +304,74 @@ this command. .. code-block:: console - $ ./restic -r s3:http://localhost:9000/restic init + $ restic -r s3:http://localhost:9000/restic init enter password for new repository: enter password again: - created restic repository 6ad29560f5 at s3:http://localhost:9000/restic1 + created restic repository 6ad29560f5 at s3:http://localhost:9000/restic Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. -Wasabi -************ - -`Wasabi `__ is a low cost Amazon S3 conformant object storage provider. -Due to its S3 conformance, Wasabi can be used as a storage provider for a restic repository. +S3-compatible Storage +********************* -- Create a Wasabi bucket using the `Wasabi Console `__. -- Determine the correct Wasabi service URL for your bucket `here `__. +For an S3-compatible storage service that is not Amazon, you can specify the URL to the server +like this: ``s3:https://server:port/bucket_name``. -You must first setup the following environment variables with the -credentials of your Wasabi account. +You must also set credentials for authentication to the service. .. code-block:: console - $ export AWS_ACCESS_KEY_ID= - $ export AWS_SECRET_ACCESS_KEY= + $ export AWS_ACCESS_KEY_ID= + $ export AWS_SECRET_ACCESS_KEY= + $ restic -r s3:https://server:port/bucket_name init -Now you can easily initialize restic to use Wasabi as a backend with -this command. +If needed, you can manually specify the region to use by either setting the +environment variable ``AWS_DEFAULT_REGION`` or calling restic with an option +parameter like ``-o s3.region="us-east-1"``. If the region is not specified, +the default region ``us-east-1`` is used. -.. code-block:: console +To select between path-style and virtual-hosted access, the extended option +``-o s3.bucket-lookup=auto`` can be used. It supports the following values: - $ ./restic -r s3:https:/// init - enter password for new repository: - enter password again: - created restic repository xxxxxxxxxx at s3:https:/// - Please note that knowledge of your password is required to access - the repository. Losing your password means that your data is irrecoverably lost. +- ``auto``: Default behavior. Uses ``dns`` for Amazon and Google endpoints. Uses + ``path`` for all other endpoints +- ``dns``: Use virtual-hosted-style bucket access +- ``path``: Use path-style bucket access -Alibaba Cloud (Aliyun) Object Storage System (OSS) -************************************************** +Certain S3-compatible servers do not properly implement the ``ListObjectsV2`` API, +most notably Ceph versions before v14.2.5. On these backends, as a temporary +workaround, you can provide the ``-o s3.list-objects-v1=true`` option to use the +older ``ListObjects`` API instead. This option may be removed in future versions +of restic. -`Alibaba OSS `__ is an -encrypted, secure, cost-effective, and easy-to-use object storage -service that enables you to store, back up, and archive large amounts -of data in the cloud. +Wasabi +****** -Alibaba OSS is S3 compatible so it can be used as a storage provider -for a restic repository with a couple of extra parameters. +S3 storage from `Wasabi `__ can be used as follows. -- Determine the correct `Alibaba OSS region endpoint `__ - this will be something like ``oss-eu-west-1.aliyuncs.com`` -- You'll need the region name too - this will be something like ``oss-eu-west-1`` - -You must first setup the following environment variables with the -credentials of your Alibaba OSS account. +- Determine the correct Wasabi service URL for your bucket `here `__. +- Set environment variables with the necessary account credentials .. code-block:: console - $ export AWS_ACCESS_KEY_ID= - $ export AWS_SECRET_ACCESS_KEY= - -Now you can easily initialize restic to use Alibaba OSS as a backend with -this command. + $ export AWS_ACCESS_KEY_ID= + $ export AWS_SECRET_ACCESS_KEY= + $ restic -r s3:https:/// init -.. code-block:: console +Alibaba Cloud (Aliyun) Object Storage System (OSS) +************************************************** - $ ./restic -o s3.bucket-lookup=dns -o s3.region= -r s3:https:/// init - enter password for new backend: - enter password again: - created restic backend xxxxxxxxxx at s3:https:/// - Please note that knowledge of your password is required to access - the repository. Losing your password means that your data is irrecoverably lost. +S3 storage from `Alibaba OSS `__ can be used as follows. -For example with an actual endpoint: +- Determine the correct `Alibaba OSS region endpoint `__ - this will be something like ``oss-eu-west-1.aliyuncs.com`` +- You will need the region name too - this will be something like ``oss-eu-west-1`` +- Set environment variables with the necessary account credentials .. code-block:: console - $ restic -o s3.bucket-lookup=dns -o s3.region=oss-eu-west-1 -r s3:https://oss-eu-west-1.aliyuncs.com/bucketname init + $ export AWS_ACCESS_KEY_ID= + $ export AWS_SECRET_ACCESS_KEY= + $ restic -o s3.bucket-lookup=dns -o s3.region= -r s3:https:/// init OpenStack Swift *************** @@ -486,9 +463,11 @@ Backblaze B2 than using the Backblaze B2 backend directly. Different from the B2 backend, restic's S3 backend will only hide no longer - necessary files. Thus, make sure to setup lifecycle rules to eventually - delete hidden files. The lifecycle setting "Keep only the last version of the file" - will keep only the most current version of a file. Read the [Backblaze documentation](https://www.backblaze.com/docs/cloud-storage-lifecycle-rules). + necessary files. By default, Backblaze B2 retains all of the different versions of the + files and "hides" the older versions. Thus, to free space occupied by hidden files, + it is **recommended** to use the B2 lifecycle "Keep only the last version of the file". + The previous version of the file is "hidden" for one day and then deleted automatically + by B2. More details at the [Backblaze documentation](https://www.backblaze.com/docs/cloud-storage-lifecycle-rules). Restic can backup data to any Backblaze B2 bucket. You need to first setup the following environment variables with the credentials you can find in the @@ -549,17 +528,23 @@ For authentication export one of the following variables: # For SAS $ export AZURE_ACCOUNT_SAS= -For authentication using ``az login`` set the resource group name and ensure the user has -the minimum permissions of the role assignment ``Storage Blob Data Contributor`` on Azure RBAC. +For authentication using ``az login`` ensure the user has +the minimum permissions of the role assignment ``Storage Blob Data Contributor`` on Azure RBAC +for the storage account. .. code-block:: console - $ export AZURE_RESOURCE_GROUP= $ az login -Alternatively, if run on Azure, restic will automatically uses service accounts configured +Alternatively, if run on Azure, restic will automatically use service accounts configured via the standard environment variables or Workload / Managed Identities. +To enforce the use of the Azure CLI credential when other credentials are present, set the following environment variable: + +.. code-block:: console + + $ export AZURE_FORCE_CLI_CREDENTIAL=true + Restic will by default use Azure's global domain ``core.windows.net`` as endpoint suffix. You can specify other suffixes as follows: @@ -583,6 +568,10 @@ The number of concurrent connections to the Azure Blob Storage service can be se ``-o azure.connections=10`` switch. By default, at most five parallel connections are established. +The access tier of the blobs uploaded to the Azure Blob Storage service can be set with the +``-o azure.access-tier=Cool`` switch. The allowed values are ``Hot``, ``Cool`` or ``Cold``. +If unspecified, the default is inferred from the default configured on the storage account. + Google Cloud Storage ******************** @@ -845,3 +834,26 @@ and then grants read/write permissions for group access. .. note:: To manage who has access to the repository you can use ``usermod`` on Linux systems, to change which group controls repository access ``chgrp -R`` is your friend. + + +Repositories with empty password +******************************** + +Restic by default refuses to create or operate on repositories that use an +empty password. Since restic 0.17.0, the option ``--insecure-no-password`` allows +disabling this check. Restic will not prompt for a password when using this option. +Specifying ``--insecure-no-password`` while also passing a password to restic +via a CLI option or via environment variable results in an error. + +For security reasons, the option must always be specified when operating on +repositories with an empty password. For example to create a new repository +with an empty password, use the following command. + +.. code-block:: console + + restic init --insecure-no-password + + +The ``init`` and ``copy`` command also support the option ``--from-insecure-no-password`` +which applies to the source repository. The ``key add`` and ``key passwd`` commands +include the ``--new-insecure-no-password`` option to add or set and empty password. diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 550957eeb75..a30d80402be 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -24,16 +24,17 @@ again: $ restic -r /srv/restic-repo --verbose backup ~/work open repository enter password for repository: - password is correct - lock repository + repository a14e5863 opened (version 2, compression level auto) load index files - start scan - start backup - scan finished in 1.837s - processed 1.720 GiB in 0:12 + start scan on [/home/user/work] + start backup on [/home/user/work] + scan finished in 1.837s: 5307 files, 1.720 GiB + Files: 5307 new, 0 changed, 0 unmodified Dirs: 1867 new, 0 changed, 0 unmodified - Added: 1.200 GiB + Added to the repository: 1.200 GiB (1.103 GiB stored) + + processed 5307 files, 1.720 GiB in 0:12 snapshot 40dc1520 saved As you can see, restic created a backup of the directory and was pretty @@ -44,6 +45,7 @@ You can see that restic tells us it processed 1.720 GiB of data, this is the size of the files and directories in ``~/work`` on the local file system. It also tells us that only 1.200 GiB was added to the repository. This means that some of the data was duplicate and restic was able to efficiently reduce it. +The data compression also managed to compress the data down to 1.103 GiB. If you don't pass the ``--verbose`` option, restic will print less data. You'll still get a nice live status display. Be aware that the live status shows the @@ -56,6 +58,39 @@ snapshot for each volume that contains files to backup. Files are read from the VSS snapshot instead of the regular filesystem. This allows to backup files that are exclusively locked by another process during the backup. +You can use the following extended options to change the VSS behavior: + + * ``-o vss.timeout`` specifies timeout for VSS snapshot creation, default value being 120 seconds + * ``-o vss.exclude-all-mount-points`` disable auto snapshotting of all volume mount points + * ``-o vss.exclude-volumes`` allows excluding specific volumes or volume mount points from snapshotting + * ``-o vss.provider`` specifies VSS provider used for snapshotting + +For example a 2.5 minutes timeout with snapshotting of mount points disabled can be specified as: + +.. code-block:: console + + -o vss.timeout=2m30s -o vss.exclude-all-mount-points=true + +and excluding drive ``d:\``, mount point ``c:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as: + +.. code-block:: console + + -o vss.exclude-volumes="d:;c:\mnt\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" + +VSS provider can be specified by GUID: + +.. code-block:: console + + -o vss.provider={3f900f90-00e9-440e-873a-96ca5eb079e5} + +or by name: + +.. code-block:: console + + -o vss.provider="Hyper-V IC Software Shadow Copy Provider" + +Also, ``MS`` can be used as alias for ``Microsoft Software Shadow Copy provider 1.0``. + By default VSS ignores Outlook OST files. This is not a restriction of restic but the default Windows VSS configuration. The files not to snapshot are configured in the Windows registry under the following key: @@ -76,17 +111,18 @@ repository (since all data is already there). This is de-duplication at work! $ restic -r /srv/restic-repo --verbose backup ~/work open repository enter password for repository: - password is correct - lock repository + repository a14e5863 opened (version 2, compression level auto) load index files - using parent snapshot d875ae93 - start scan - start backup - scan finished in 1.881s - processed 1.720 GiB in 0:03 + using parent snapshot 40dc1520 + start scan on [/home/user/work] + start backup on [/home/user/work] + scan finished in 1.881s: 5307 files, 1.720 GiB + Files: 0 new, 0 changed, 5307 unmodified Dirs: 0 new, 0 changed, 1867 unmodified - Added: 0 B + Added to the repository: 0 B (0 B stored) + + processed 5307 files, 1.720 GiB in 0:03 snapshot 79766175 saved You can even backup individual files in the same repository (not passing @@ -96,7 +132,6 @@ You can even backup individual files in the same repository (not passing $ restic -r /srv/restic-repo backup ~/work.txt enter password for repository: - password is correct snapshot 249d0210 saved If you're interested in what restic does, pass ``--verbose`` twice (or @@ -110,7 +145,6 @@ restic encounters: $ restic -r /srv/restic-repo --verbose --verbose backup ~/work.txt open repository enter password for repository: - password is correct lock repository load index files using parent snapshot f3f8d56b @@ -198,6 +232,40 @@ On **Windows**, a file is considered unchanged when its path, size and modification time match, and only ``--force`` has any effect. The other options are recognized but ignored. +Skip creating snapshots if unchanged +************************************ + +By default, restic always creates a new snapshot even if nothing has changed +compared to the parent snapshot. To omit the creation of a new snapshot in this +case, specify the ``--skip-if-unchanged`` option. + +Note that when using absolute paths to specify the backup source, then also +changes to the parent folders result in a changed snapshot. For example, a backup +of ``/home/user/work`` will create a new snapshot if the metadata of either +``/``, ``/home`` or ``/home/user`` change. To avoid this problem run restic from +the corresponding folder and use relative paths. + +.. code-block:: console + + $ cd /home/user/work && restic -r /srv/restic-repo backup . --skip-if-unchanged + + open repository + enter password for repository: + repository a14e5863 opened (version 2, compression level auto) + load index files + using parent snapshot 40dc1520 + start scan on [.] + start backup on [.] + scan finished in 1.814s: 5307 files, 1.720 GiB + + Files: 0 new, 0 changed, 5307 unmodified + Dirs: 0 new, 0 changed, 1867 unmodified + Added to the repository: 0 B (0 B stored) + + processed 5307 files, 1.720 GiB in 0:03 + skipped creating snapshot + + Dry Runs ******** @@ -229,7 +297,8 @@ the exclude options are: - ``--exclude-file`` Specified one or more times to exclude items listed in a given file - ``--iexclude-file`` Same as ``exclude-file`` but ignores cases like in ``--iexclude`` - ``--exclude-if-present foo`` Specified one or more times to exclude a folder's content if it contains a file called ``foo`` (optionally having a given header, no wildcards for the file name supported) -- ``--exclude-larger-than size`` Specified once to excludes files larger than the given size +- ``--exclude-larger-than size`` Specified once to exclude files larger than the given size +- ``--exclude-cloud-files`` Specified once to exclude online-only cloud files (such as OneDrive Files On-Demand), currently only supported on Windows Please see ``restic help backup`` for more specific information about each exclude option. @@ -430,18 +499,17 @@ You can combine all three options with each other and with the normal file argum Comparing Snapshots ******************* -Restic has a `diff` command which shows the difference between two snapshots +Restic has a ``diff`` command which shows the difference between two snapshots and displays a small statistic, just pass the command two snapshot IDs: .. code-block:: console $ restic -r /srv/restic-repo diff 5845b002 2ab627a6 - password is correct comparing snapshot ea657ce5 to 2ab627a6: - C /restic/cmd_diff.go + M /restic/cmd_diff.go + /restic/foo - C /restic/restic + M /restic/restic Files: 0 new, 0 removed, 2 changed Dirs: 1 new, 0 removed @@ -460,6 +528,24 @@ folder, you could use the following command: $ restic -r /srv/restic-repo diff 5845b002:/restic 2ab627a6:/restic +By default, the ``diff`` command only lists differences in file contents. +The flag ``--metadata`` shows changes to file metadata, too. + +The characters left of the file path show what has changed for this file: + ++-------+-----------------------+ +| ``+`` | added | ++-------+-----------------------+ +| ``-`` | removed | ++-------+-----------------------+ +| ``T`` | entry type changed | ++-------+-----------------------+ +| ``M`` | file content changed | ++-------+-----------------------+ +| ``U`` | metadata changed | ++-------+-----------------------+ +| ``?`` | bitrot detected | ++-------+-----------------------+ Backing up special items and metadata ************************************* @@ -481,13 +567,17 @@ written, and the next backup needs to write new metadata again. If you really want to save the access time for files and directories, you can pass the ``--with-atime`` option to the ``backup`` command. +Backing up full security descriptors on Windows is only possible when the user +has ``SeBackupPrivilege`` privilege or is running as admin. This is a restriction +of Windows not restic. +If either of these conditions are not met, only the owner, group and DACL will +be backed up. + Note that ``restic`` does not back up some metadata associated with files. Of particular note are: * File creation date on Unix platforms * Inode flags on Unix platforms -* File ownership and ACLs on Windows -* The "hidden" flag on Windows Reading data from a command *************************** @@ -495,11 +585,13 @@ Reading data from a command Sometimes, it can be useful to directly save the output of a program, for example, ``mysqldump`` so that the SQL can later be restored. Restic supports this mode of operation; just supply the option ``--stdin-from-command`` when using the -``backup`` action, and write the command in place of the files/directories: +``backup`` action, and write the command in place of the files/directories. To prevent +restic from interpreting the arguments for the command, make sure to add ``--`` before +the command starts: .. code-block:: console - $ restic -r /srv/restic-repo backup --stdin-from-command mysqldump [...] + $ restic -r /srv/restic-repo backup --stdin-from-command -- mysqldump --host example mydb [...] This command creates a new snapshot based on the standard output of ``mysqldump``. By default, the command's standard output is saved in a file named ``stdin``. @@ -507,7 +599,7 @@ A different name can be specified with ``--stdin-filename``: .. code-block:: console - $ restic -r /srv/restic-repo backup --stdin-filename production.sql --stdin-from-command mysqldump [...] + $ restic -r /srv/restic-repo backup --stdin-filename production.sql --stdin-from-command -- mysqldump --host example mydb [...] Restic uses the command exit code to determine whether the command succeeded. A non-zero exit code from the command causes restic to cancel the backup. This causes @@ -595,6 +687,30 @@ created as it would only be written at the very (successful) end of the backup operation. Previous snapshots will still be there and will still work. +Exit status codes +***************** + +Restic returns an exit status code after the backup command is run: + +* 0 when the backup was successful (snapshot with all source files created) +* 1 when there was a fatal error (no snapshot created) +* 3 when some source files could not be read (incomplete snapshot with remaining files created) +* further exit codes are documented in :ref:`exit-codes`. + +Fatal errors occur for example when restic is unable to write to the backup destination, when +there are network connectivity issues preventing successful communication, or when an invalid +password or command line argument is provided. When restic returns this exit status code, one +should not expect a snapshot to have been created. + +Source file read errors occur when restic fails to read one or more files or directories that +it was asked to back up, e.g. due to permission problems. Restic displays the number of source +file read errors that occurred while running the backup. If there are errors of this type, +restic will still try to complete the backup run with all the other files, and create a +snapshot that then contains all but the unreadable files. + +For use of these exit status codes in scripts and other automation tools, see :ref:`exit-codes`. +To manually inspect the exit code in e.g. Linux, run ``echo $?``. + Environment Variables ********************* @@ -613,11 +729,13 @@ environment variables. The following lists these environment variables: RESTIC_TLS_CLIENT_CERT Location of TLS client certificate and private key (replaces --tls-client-cert) RESTIC_CACHE_DIR Location of the cache directory RESTIC_COMPRESSION Compression mode (only available for repository format version 2) + RESTIC_HOST Only consider snapshots for this host / Set the hostname for the snapshot manually (replaces --host) RESTIC_PROGRESS_FPS Frames per second by which the progress bar is updated RESTIC_PACK_SIZE Target size for pack files RESTIC_READ_CONCURRENCY Concurrency for file reads - TMPDIR Location for temporary files + TMPDIR Location for temporary files (except Windows) + TMP Location for temporary files (only Windows) AWS_ACCESS_KEY_ID Amazon S3 access key ID AWS_SECRET_ACCESS_KEY Amazon S3 secret access key @@ -636,6 +754,7 @@ environment variables. The following lists these environment variables: AZURE_ACCOUNT_KEY Account key for Azure AZURE_ACCOUNT_SAS Shared access signatures (SAS) for Azure AZURE_ENDPOINT_SUFFIX Endpoint suffix for Azure Storage (default: core.windows.net) + AZURE_FORCE_CLI_CREDENTIAL Force the use of Azure CLI credentials for authentication B2_ACCOUNT_ID Account ID or applicationKeyId for Backblaze B2 B2_ACCOUNT_KEY Account Key or applicationKey for Backblaze B2 @@ -680,26 +799,3 @@ See :ref:`caching` for the rules concerning cache locations when The external programs that restic may execute include ``rclone`` (for rclone backends) and ``ssh`` (for the SFTP backend). These may respond to further environment variables and configuration files; see their respective manuals. - -Exit status codes -***************** - -Restic returns one of the following exit status codes after the backup command is run: - -* 0 when the backup was successful (snapshot with all source files created) -* 1 when there was a fatal error (no snapshot created) -* 3 when some source files could not be read (incomplete snapshot with remaining files created) - -Fatal errors occur for example when restic is unable to write to the backup destination, when -there are network connectivity issues preventing successful communication, or when an invalid -password or command line argument is provided. When restic returns this exit status code, one -should not expect a snapshot to have been created. - -Source file read errors occur when restic fails to read one or more files or directories that -it was asked to back up, e.g. due to permission problems. Restic displays the number of source -file read errors that occurred while running the backup. If there are errors of this type, -restic will still try to complete the backup run with all the other files, and create a -snapshot that then contains all but the unreadable files. - -One can use these exit status codes in scripts and other automation tools, to make them aware of -the outcome of the backup run. To manually inspect the exit code in e.g. Linux, run ``echo $?``. diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index 48e5985dc41..f31e75c8427 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -18,19 +18,21 @@ Working with repositories Listing all snapshots ===================== -Now, you can list all the snapshots stored in the repository: +Now, you can list all the snapshots stored in the repository. The size column +only exists for snapshots created using restic 0.17.0 or later. It reflects the +size of the contained files at the time when the snapshot was created. .. code-block:: console $ restic -r /srv/restic-repo snapshots enter password for repository: - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work - 79766175 2015-05-08 21:40:19 kasimir /home/user/work - bdbd3439 2015-05-08 21:45:17 luigi /home/art - 590c8fc8 2015-05-08 21:47:38 kazik /srv - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------------- + 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work 20.643GiB + 79766175 2015-05-08 21:40:19 kasimir /home/user/work 20.645GiB + bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB + 590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB You can filter the listing by directory path: @@ -38,10 +40,10 @@ You can filter the listing by directory path: $ restic -r /srv/restic-repo snapshots --path="/srv" enter password for repository: - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 590c8fc8 2015-05-08 21:47:38 kazik /srv - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + 590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB Or filter by host: @@ -49,10 +51,10 @@ Or filter by host: $ restic -r /srv/restic-repo snapshots --host luigi enter password for repository: - ID Date Host Tags Directory - ---------------------------------------------------------------------- - bdbd3439 2015-05-08 21:45:17 luigi /home/art - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB Combining filters is also possible. @@ -64,21 +66,21 @@ Furthermore you can group the output by the same filters (host, paths, tags): enter password for repository: snapshots for (host [kasimir]) - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work - 79766175 2015-05-08 21:40:19 kasimir /home/user/work + ID Date Host Tags Directory Size + ------------------------------------------------------------------------ + 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work 20.643GiB + 79766175 2015-05-08 21:40:19 kasimir /home/user/work 20.645GiB 2 snapshots snapshots for (host [luigi]) - ID Date Host Tags Directory - ---------------------------------------------------------------------- - bdbd3439 2015-05-08 21:45:17 luigi /home/art - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB 2 snapshots snapshots for (host [kazik]) - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 590c8fc8 2015-05-08 21:47:38 kazik /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + 590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB 1 snapshots @@ -133,7 +135,7 @@ as separator. /home/user /home/user/work.txt -To show more details about the files in a snapshot, you can use the ``--long`` option. The colums include +To show more details about the files in a snapshot, you can use the ``--long`` option. The columns include file permissions, UID, GID, file size, modification time and file path. For scripting usage, the ``ls`` command supports the ``--json`` flag; the JSON output format is described at :ref:`ls json`. @@ -161,8 +163,8 @@ example from a local to a remote repository, you can use the ``copy`` command: .. code-block:: console $ restic -r /srv/restic-repo-copy copy --from-repo /srv/restic-repo - repository d6504c63 opened successfully, password is correct - repository 3dd0878c opened successfully, password is correct + repository d6504c63 opened successfully + repository 3dd0878c opened successfully snapshot 410b18a2 of [/home/user/work] at 2020-06-09 23:15:57.305305 +0200 CEST by user@kasimir copy started, this may take a while... @@ -261,7 +263,7 @@ the unwanted files from affected snapshots by rewriting them using the .. code-block:: console $ restic -r /srv/restic-repo rewrite --exclude secret-file - repository c881945a opened (repository version 2) successfully, password is correct + repository c881945a opened (repository version 2) successfully snapshot 6160ddb2 of [/home/user/work] at 2022-06-12 16:01:28.406630608 +0200 CEST by user@kasimir excluding /home/user/work/secret-file @@ -272,7 +274,7 @@ the unwanted files from affected snapshots by rewriting them using the modified 1 snapshots $ restic -r /srv/restic-repo rewrite --exclude secret-file 6160ddb2 - repository c881945a opened (repository version 2) successfully, password is correct + repository c881945a opened (repository version 2) successfully snapshot 6160ddb2 of [/home/user/work] at 2022-06-12 16:01:28.406630608 +0200 CEST by user@kasimir excluding /home/user/work/secret-file @@ -303,6 +305,13 @@ In order to preview the changes which ``rewrite`` would make, you can use the modifying the repository. Instead restic will only print the actions it would perform. +.. note:: The ``rewrite`` command verifies that it does not modify snapshots in + unexpected ways and fails with an ``cannot encode tree at "[...]" without loosing information`` + error otherwise. This can occur when rewriting a snapshot created by a newer + version of restic or some third-party implementation. + + To convert a snapshot into the format expected by the ``rewrite`` command + use ``restic repair snapshots ``. Modifying metadata of snapshots =============================== @@ -366,10 +375,22 @@ detect this and yield the same error as when you tried to restore: $ restic -r /srv/restic-repo check ... load indexes - error: error loading index de30f323: load : invalid data returned - Fatal: LoadIndex returned errors + error: error loading index de30f3231ca2e6a59af4aa84216dfe2ef7339c549dc11b09b84000997b139628: LoadRaw(): invalid data returned + + The repository index is damaged and must be repaired. You must run `restic repair index' to correct this. + + Fatal: repository contains errors + +.. warning:: + + If ``check`` reports an error in the repository, then you must repair the repository. + As long as a repository is damaged, restoring some files or directories will fail. New + snapshots are not guaranteed to be restorable either. + + For instructions how to repair a damaged repository, see the :ref:`troubleshooting` + section or follow the instructions provided by the ``check`` command. -If the repository structure is intact, restic will show that no errors were found: +If the repository structure is intact, restic will show that ``no errors were found``: .. code-block:: console diff --git a/doc/047_tuning_backup_parameters.rst b/doc/047_tuning_backup_parameters.rst index d8fb2c9b671..650f111be8f 100644 --- a/doc/047_tuning_backup_parameters.rst +++ b/doc/047_tuning_backup_parameters.rst @@ -26,7 +26,8 @@ When you start a backup, restic will concurrently count the number of files and their total size, which is used to estimate how long it will take. This will cause some extra I/O, which can slow down backups of network file systems or FUSE mounts. To avoid this overhead at the cost of not seeing a progress -estimate, use the ``--no-scan`` option which disables this file scanning. +estimate, use the ``--no-scan`` option of the ``backup`` command which disables +this file scanning. Backend Connections =================== @@ -98,7 +99,8 @@ to a 16 MiB pack size. The side effect of increasing the pack size is requiring more disk space for temporary pack files created before uploading. The space must be available in the system default temp -directory, unless overwritten by setting the ``$TMPDIR`` environment variable. In addition, +directory, unless overwritten by setting the ``$TMPDIR`` (except Windows) environment +variable (on Windows use ``$TMP`` or ``$TEMP``). In addition, depending on the backend the memory usage can also increase by a similar amount. Restic requires temporary space according to the pack size, multiplied by the number of backend connections plus one. For example, if the backend uses 5 connections (the default @@ -111,3 +113,28 @@ to disk. An operating system usually caches file write operations in memory and them to disk after a short delay. As larger pack files take longer to upload, this increases the chance of these files being written to disk. This can increase disk wear for SSDs. + + +Feature Flags +============= + +Feature flags allow disabling or enabling certain experimental restic features. The flags +can be specified via the ``RESTIC_FEATURES`` environment variable. The variable expects a +comma-separated list of ``key[=value],key2[=value2]`` pairs. The key is the name of a feature +flag. The value is optional and can contain either the value ``true`` (default if omitted) +or ``false``. The list of currently available feature flags is shown by the ``features`` +command. + +Restic will return an error if an invalid feature flag is specified. No longer relevant +feature flags may be removed in a future restic release. Thus, make sure to no longer +specify these flags. + +A feature can either be in alpha, beta, stable or deprecated state. + +- An _alpha_ feature is disabled by default and may change in arbitrary ways between restic + versions or be removed. +- A _beta_ feature is enabled by default, but still can change in minor ways or be removed. +- A _stable_ feature is always enabled and cannot be disabled. This allows for a transition + period after which the flag will be removed in a future restic version. +- A _deprecated_ feature is always disabled and cannot be enabled. The flag will be removed + in a future restic version. diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 3cd05500b88..b37f3c4fb5a 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -68,10 +68,18 @@ There are case insensitive variants of ``--exclude`` and ``--include`` called ``--iexclude`` and ``--iinclude``. These options will behave the same way but ignore the casing of paths. +There are also ``--include-file``, ``--exclude-file``, ``--iinclude-file`` and +``--iexclude-file`` flags that read the include and exclude patterns from a file. + Restoring symbolic links on windows is only possible when the user has ``SeCreateSymbolicLinkPrivilege`` privilege or is running as admin. This is a restriction of windows not restic. +Restoring full security descriptors on Windows is only possible when the user has +``SeRestorePrivilege``, ``SeSecurityPrivilege`` and ``SeTakeOwnershipPrivilege`` +privilege or is running as admin. This is a restriction of Windows not restic. +If either of these conditions are not met, only the DACL will be restored. + By default, restic does not restore files as sparse. Use ``restore --sparse`` to enable the creation of sparse files if supported by the filesystem. Then restic will restore long runs of zero bytes as holes in the corresponding files. @@ -80,6 +88,96 @@ disk space. Note that the exact location of the holes can differ from those in the original file, as their location is determined while restoring and is not stored explicitly. +Restoring extended file attributes +---------------------------------- + +By default, all extended attributes for files are restored. + +Use only ``--exclude-xattr`` or ``--include-xattr`` to control which extended +attributes are restored for files in the snapshot. For example, to restore +user and security namespaced extended attributes for files: + +.. code-block:: console + + $ restic -r /srv/restic-repo restore 79766175 --target /tmp/restore-work --include-xattr user.* --include-xattr security.* + enter password for repository: + restoring to /tmp/restore-work + +Restoring in-place +------------------ + +.. note:: + + Restoring data in-place can leave files in a partially restored state if the ``restore`` + operation is interrupted. To ensure you can revert back to the previous state, create + a current ``backup`` before restoring a different snapshot. + +By default, the ``restore`` command overwrites already existing files at the target +directory. This behavior can be configured via the ``--overwrite`` option. The following +values are supported: + +* ``--overwrite always`` (default): always overwrites already existing files. ``restore`` + will verify the existing file content and only restore mismatching parts to minimize + downloads. Updates the metadata of all files. +* ``--overwrite if-changed``: like the previous case, but speeds up the file content check + by assuming that files with matching size and modification time (mtime) are already up to date. + In case of a mismatch, the full file content is verified. Updates the metadata of all files. +* ``--overwrite if-newer``: only overwrite existing files if the file in the snapshot has a + newer modification time (mtime). +* ``--overwrite never``: never overwrite existing files. + +Delete files not in snapshot +---------------------------- + +When restoring into a directory that already contains files, it can be useful to remove all +files that do not exist in the snapshot. For this, pass the ``--delete`` option to the ``restore`` +command. The command will then **delete all files** from the target directory that do not +exist in the snapshot. + +The ``--delete`` option also allows overwriting a non-empty directory if the snapshot contains a +file with the same name. + +.. warning:: + + Always use the ``--dry-run -vv`` option to verify what would be deleted before running the actual + command. + +When specifying ``--include`` or ``--exclude`` options, only files or directories matched by those +options will be deleted. For example, the command +``restic -r /srv/restic-repo restore 79766175:/work --target /tmp/restore-work --include /foo --delete`` +would only delete files within ``/tmp/restore-work/foo``. + +When using ``--target / --delete`` then the ``restore`` command only works if either an ``--include`` +or ``--exclude`` option is also specified. This ensures that one cannot accidentaly delete +the whole system. + +Dry run +------- + +As restore operations can take a long time, it can be useful to perform a dry-run to +see what would be restored without having to run the full restore operation. The +restore command supports the ``--dry-run`` option and prints information about the +restored files when specifying ``--verbose=2``. + +.. code-block:: console + + $ restic restore --target /tmp/restore-work --dry-run --verbose=2 latest + + unchanged /restic/internal/walker/walker.go with size 2.812 KiB + updated /restic/internal/walker/walker_test.go with size 11.143 KiB + restored /restic/restic with size 35.318 MiB + restored /restic + [...] + Summary: Restored 9072 files/dirs (153.597 MiB) in 0:00 + +Files with already up to date content are reported as ``unchanged``. Files whose content +was modified are ``updated`` and files that are new are shown as ``restored``. Directories +and other file types like symlinks are always reported as ``restored``. + +To reliably determine which files would be updated, a dry-run also verifies the content of +already existing files according to the specified overwrite behavior. To skip these checks +either specify ``--overwrite never`` or specify a non-existing ``--target`` directory. + Restore using mount =================== @@ -98,9 +196,9 @@ command to serve the repository with FUSE: Mounting repositories via FUSE is only possible on Linux, macOS and FreeBSD. On Linux, the ``fuse`` kernel module needs to be loaded and the ``fusermount`` -command needs to be in the ``PATH``. On macOS, you need `FUSE for macOS -`__. On FreeBSD, you may need to install FUSE -and load the kernel module (``kldload fuse``). +command needs to be in the ``PATH``. On macOS, you need `FUSE-T +`__ or `FUSE for macOS `__. +On FreeBSD, you may need to install FUSE and load the kernel module (``kldload fuse``). Restic supports storage and preservation of hard links. However, since hard links exist in the scope of a filesystem by definition, restoring @@ -176,8 +274,8 @@ To include the folder content at the root of the archive, you can use the `` restore.tar It is also possible to ``dump`` the contents of a selected snapshot and folder -structure to a file using the ``--target`` flag. The ``dump`` command will fail -if the already file exists. +structure to a file using the ``--target`` flag. .. code-block:: console - $ restic -r /srv/restic-repo dump latest / --target /home/linux.user/output.tar -a tar \ No newline at end of file + + $ restic -r /srv/restic-repo dump latest / --target /home/linux.user/output.tar -a tar diff --git a/doc/060_forget.rst b/doc/060_forget.rst index caeb6313a13..b211148cb39 100644 --- a/doc/060_forget.rst +++ b/doc/060_forget.rst @@ -80,7 +80,7 @@ command must be run: $ restic -r /srv/restic-repo prune enter password for repository: - repository 33002c5e opened successfully, password is correct + repository 33002c5e opened successfully loading all snapshots... loading indexes... finding data that is still in use for 4 snapshots @@ -182,7 +182,9 @@ The ``forget`` command accepts the following policy options: - ``--keep-yearly n`` for the last ``n`` years which have one or more snapshots, keep only the most recent one for each year. - ``--keep-tag`` keep all snapshots which have all tags specified by - this option (can be specified multiple times). + this option (can be specified multiple times). The ``forget`` command will + exit with an error if all snapshots in a snapshot group would be removed + as none of them have the specified tags. - ``--keep-within duration`` keep all snapshots having a timestamp within the specified duration of the latest snapshot, where ``duration`` is a number of years, months, days, and hours. E.g. ``2y5m7d3h`` will keep all @@ -205,14 +207,15 @@ The ``forget`` command accepts the following policy options: natural time boundaries and *not* relative to when you run ``forget``. Weeks are Monday 00:00 to Sunday 23:59, days 00:00 to 23:59, hours :00 to :59, etc. They also only count hours/days/weeks/etc which have one or more snapshots. - A value of ``-1`` will be interpreted as "forever", i.e. "keep all". + A value of ``unlimited`` will be interpreted as "forever", i.e. "keep all". .. note:: All duration related options (``--keep-{within-,}*``) ignore snapshots with a timestamp in the future (relative to when the ``forget`` command is run) and these snapshots will hence not be removed. .. note:: If there are not enough snapshots to keep one for each duration related - ``--keep-{within-,}*`` option, the oldest snapshot is kept additionally. + ``--keep-{within-,}*`` option, the oldest snapshot is kept additionally and + marked as ``oldest`` in the output (e.g. ``oldest hourly snapshot``). .. note:: Specifying ``--keep-tag ''`` will match untagged snapshots only. @@ -263,7 +266,7 @@ Sunday for 12 weeks: .. code-block:: console $ restic snapshots - repository f00c6e2a opened successfully, password is correct + repository f00c6e2a opened successfully ID Time Host Tags Paths --------------------------------------------------------------- 0a1f9759 2019-09-01 11:00:00 mopped /home/user/work @@ -287,7 +290,7 @@ four Sundays, and remove the other snapshots: .. code-block:: console $ restic forget --keep-daily 4 --dry-run - repository f00c6e2a opened successfully, password is correct + repository f00c6e2a opened successfully Applying Policy: keep the last 4 daily snapshots keep 4 snapshots: ID Time Host Tags Reasons Paths @@ -336,12 +339,23 @@ year and yearly for the last 75 years, you can instead specify ``forget --keep-within-yearly 75y`` (note that `1w` is not a recognized duration, so you will have to specify `7d` instead). + +Removing all snapshots +====================== + For safety reasons, restic refuses to act on an "empty" policy. For example, if one were to specify ``--keep-last 0`` to forget *all* snapshots in the repository, restic will respond that no snapshots will be removed. To delete all snapshots, use ``--keep-last 1`` and then finally remove the last snapshot manually (by passing the ID to ``forget``). +Since restic 0.17.0, it is possible to delete all snapshots for a specific +host, tag or path using the ``--unsafe-allow-remove-all`` option. The option +must always be combined with a snapshot filter (by host, path or tag). +For example the command ``forget --tag example --unsafe-allow-remove-all`` +removes all snapshots with tag ``example``. + + Security considerations in append-only mode =========================================== diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 7279ee61474..57a8e2872f5 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -21,23 +21,55 @@ Check if a repository is already initialized ******************************************** You may find a need to check if a repository is already initialized, -perhaps to prevent your script from initializing a repository multiple -times. The command ``cat config`` may be used for this purpose: +perhaps to prevent your script from trying to initialize a repository multiple +times (the ``init`` command contains a check to prevent overwriting existing +repositories). The command ``cat config`` may be used for this purpose: .. code-block:: console $ restic -r /srv/restic-repo cat config - Fatal: unable to open config file: stat /srv/restic-repo/config: no such file or directory + Fatal: repository does not exist: unable to open config file: stat /srv/restic-repo/config: no such file or directory Is there a repository at the following location? /srv/restic-repo -If a repository does not exist, restic will return a non-zero exit code -and print an error message. Note that restic will also return a non-zero -exit code if a different error is encountered (e.g.: incorrect password -to ``cat config``) and it may print a different error message. If there -are no errors, restic will return a zero exit code and print the repository +If a repository does not exist, restic (since 0.17.0) will return exit code ``10`` +and print a corresponding error message. Older versions return exit code ``1``. +Note that restic will also return exit code ``1`` if a different error is encountered +(e.g.: incorrect password to ``cat config``) and it may print a different error message. +If there are no errors, restic will return a zero exit code and print the repository metadata. +.. _exit-codes: + +Exit codes +********** + +Restic commands return an exit code that signals whether the command was successful. +The following table provides a general description, see the help of each command for +a more specific description. + +.. warning:: + New exit codes will be added over time. If an unknown exit code is returned, then it + MUST be treated as a command failure. + ++-----+----------------------------------------------------+ +| 0 | Command was successful | ++-----+----------------------------------------------------+ +| 1 | Command failed, see command help for more details | ++-----+----------------------------------------------------+ +| 2 | Go runtime error | ++-----+----------------------------------------------------+ +| 3 | ``backup`` command could not read some source data | ++-----+----------------------------------------------------+ +| 10 | Repository does not exist (since restic 0.17.0) | ++-----+----------------------------------------------------+ +| 11 | Failed to lock repository (since restic 0.17.0) | ++-----+----------------------------------------------------+ +| 12 | Wrong password (since restic 0.17.1) | ++-----+----------------------------------------------------+ +| 130 | Restic was interrupted using SIGINT or SIGSTOP | ++-----+----------------------------------------------------+ + JSON output *********** @@ -55,12 +87,33 @@ JSON output of most restic commands are documented here. list of allowed values is documented may be extended at any time. +Exit errors +----------- + +Fatal errors will result in a final JSON message on ``stderr`` before the process exits. +It will hold the error message and the exit code. + +.. note:: + Some errors cannot be caught and reported this way, + such as Go runtime errors or command line parsing errors. + ++----------------------+-------------------------------------------+ +| ``message_type`` | Always "exit_error" | ++----------------------+-------------------------------------------+ +| ``code`` | Exit code (see above chart) | ++----------------------+-------------------------------------------+ +| ``message`` | Error message | ++----------------------+-------------------------------------------+ + Output formats -------------- -Currently only the output on ``stdout`` is JSON formatted. Errors printed on ``stderr`` -are still printed as plain text messages. The generated JSON output uses one of the -following two formats. +Commands print their main JSON output on ``stdout``. +The generated JSON output uses one of the following two formats. + +.. note:: + Not all messages and errors have been converted to JSON yet. + Feel free to submit a pull request! Single JSON document ^^^^^^^^^^^^^^^^^^^^ @@ -75,9 +128,6 @@ Several commands, in particular long running ones or those that generate a large use a format also known as JSON lines. It consists of a stream of new-line separated JSON messages. You can determine the nature of the message using the ``message_type`` field. -As an exception, the ``ls`` command uses the field ``struct_type`` instead. - - backup ------ @@ -111,10 +161,12 @@ Status Error ^^^^^ +These errors are printed on ``stderr``. + +----------------------+-------------------------------------------+ | ``message_type`` | Always "error" | +----------------------+-------------------------------------------+ -| ``error`` | Error message | +| ``error.message`` | Error message | +----------------------+-------------------------------------------+ | ``during`` | What restic was trying to do | +----------------------+-------------------------------------------+ @@ -162,19 +214,26 @@ Summary is the last output line in a successful backup. +---------------------------+---------------------------------------------------------+ | ``dirs_unmodified`` | Number of directories that did not change | +---------------------------+---------------------------------------------------------+ -| ``data_blobs`` | Number of data blobs | +| ``data_blobs`` | Number of data blobs added | ++---------------------------+---------------------------------------------------------+ +| ``tree_blobs`` | Number of tree blobs added | +---------------------------+---------------------------------------------------------+ -| ``tree_blobs`` | Number of tree blobs | +| ``data_added`` | Amount of (uncompressed) data added, in bytes | +---------------------------+---------------------------------------------------------+ -| ``data_added`` | Amount of data added, in bytes | +| ``data_added_packed`` | Amount of data added (after compression), in bytes | +---------------------------+---------------------------------------------------------+ | ``total_files_processed`` | Total number of files processed | +---------------------------+---------------------------------------------------------+ | ``total_bytes_processed`` | Total number of bytes processed | +---------------------------+---------------------------------------------------------+ +| ``backup_start`` | Time at which the backup was started | ++---------------------------+---------------------------------------------------------+ +| ``backup_end`` | Time at which the backup was completed | ++---------------------------+---------------------------------------------------------+ | ``total_duration`` | Total time it took for the operation to complete | +---------------------------+---------------------------------------------------------+ -| ``snapshot_id`` | ID of the new snapshot | +| ``snapshot_id`` | ID of the new snapshot. Field is omitted if snapshot | +| | creation was skipped | +---------------------------+---------------------------------------------------------+ @@ -368,13 +427,13 @@ Snapshot object Reason object -+----------------+---------------------------------------------------------+ -| ``snapshot`` | Snapshot object, without ``id`` and ``short_id`` fields | -+----------------+---------------------------------------------------------+ -| ``matches`` | Array containing descriptions of the matching criteria | -+----------------+---------------------------------------------------------+ -| ``counters`` | Object containing counters used by the policies | -+----------------+---------------------------------------------------------+ ++----------------+-----------------------------------------------------------+ +| ``snapshot`` | Snapshot object, including ``id`` and ``short_id`` fields | ++----------------+-----------------------------------------------------------+ +| ``matches`` | Array containing descriptions of the matching criteria | ++----------------+-----------------------------------------------------------+ +| ``counters`` | Object containing counters used by the policies | ++----------------+-----------------------------------------------------------+ init @@ -420,63 +479,67 @@ As an exception, the ``struct_type`` field is used to determine the message type snapshot ^^^^^^^^ -+----------------+--------------------------------------------------+ -| ``struct_type``| Always "snapshot" | -+----------------+--------------------------------------------------+ -| ``time`` | Timestamp of when the backup was started | -+----------------+--------------------------------------------------+ -| ``parent`` | ID of the parent snapshot | -+----------------+--------------------------------------------------+ -| ``tree`` | ID of the root tree blob | -+----------------+--------------------------------------------------+ -| ``paths`` | List of paths included in the backup | -+----------------+--------------------------------------------------+ -| ``hostname`` | Hostname of the backed up machine | -+----------------+--------------------------------------------------+ -| ``username`` | Username the backup command was run as | -+----------------+--------------------------------------------------+ -| ``uid`` | ID of owner | -+----------------+--------------------------------------------------+ -| ``gid`` | ID of group | -+----------------+--------------------------------------------------+ -| ``excludes`` | List of paths and globs excluded from the backup | -+----------------+--------------------------------------------------+ -| ``tags`` | List of tags for the snapshot in question | -+----------------+--------------------------------------------------+ -| ``id`` | Snapshot ID | -+----------------+--------------------------------------------------+ -| ``short_id`` | Snapshot ID, short form | -+----------------+--------------------------------------------------+ ++------------------+--------------------------------------------------+ +| ``message_type`` | Always "snapshot" | ++------------------+--------------------------------------------------+ +| ``struct_type`` | Always "snapshot" (deprecated) | ++------------------+--------------------------------------------------+ +| ``time`` | Timestamp of when the backup was started | ++------------------+--------------------------------------------------+ +| ``parent`` | ID of the parent snapshot | ++------------------+--------------------------------------------------+ +| ``tree`` | ID of the root tree blob | ++------------------+--------------------------------------------------+ +| ``paths`` | List of paths included in the backup | ++------------------+--------------------------------------------------+ +| ``hostname`` | Hostname of the backed up machine | ++------------------+--------------------------------------------------+ +| ``username`` | Username the backup command was run as | ++------------------+--------------------------------------------------+ +| ``uid`` | ID of owner | ++------------------+--------------------------------------------------+ +| ``gid`` | ID of group | ++------------------+--------------------------------------------------+ +| ``excludes`` | List of paths and globs excluded from the backup | ++------------------+--------------------------------------------------+ +| ``tags`` | List of tags for the snapshot in question | ++------------------+--------------------------------------------------+ +| ``id`` | Snapshot ID | ++------------------+--------------------------------------------------+ +| ``short_id`` | Snapshot ID, short form | ++------------------+--------------------------------------------------+ node ^^^^ -+-----------------+--------------------------+ -| ``struct_type`` | Always "node" | -+-----------------+--------------------------+ -| ``name`` | Node name | -+-----------------+--------------------------+ -| ``type`` | Node type | -+-----------------+--------------------------+ -| ``path`` | Node path | -+-----------------+--------------------------+ -| ``uid`` | UID of node | -+-----------------+--------------------------+ -| ``gid`` | GID of node | -+-----------------+--------------------------+ -| ``size`` | Size in bytes | -+-----------------+--------------------------+ -| ``mode`` | Node mode | -+-----------------+--------------------------+ -| ``atime`` | Node access time | -+-----------------+--------------------------+ -| ``mtime`` | Node modification time | -+-----------------+--------------------------+ -| ``ctime`` | Node creation time | -+-----------------+--------------------------+ -| ``inode`` | Inode number of node | -+-----------------+--------------------------+ ++------------------+----------------------------+ +| ``message_type`` | Always "node" | ++------------------+----------------------------+ +| ``struct_type`` | Always "node" (deprecated) | ++------------------+----------------------------+ +| ``name`` | Node name | ++------------------+----------------------------+ +| ``type`` | Node type | ++------------------+----------------------------+ +| ``path`` | Node path | ++------------------+----------------------------+ +| ``uid`` | UID of node | ++------------------+----------------------------+ +| ``gid`` | GID of node | ++------------------+----------------------------+ +| ``size`` | Size in bytes | ++------------------+----------------------------+ +| ``mode`` | Node mode | ++------------------+----------------------------+ +| ``atime`` | Node access time | ++------------------+----------------------------+ +| ``mtime`` | Node modification time | ++------------------+----------------------------+ +| ``ctime`` | Node creation time | ++------------------+----------------------------+ +| ``inode`` | Inode number of node | ++------------------+----------------------------+ restore @@ -498,11 +561,47 @@ Status +----------------------+------------------------------------------------------------+ |``files_restored`` | Files restored | +----------------------+------------------------------------------------------------+ +|``files_skipped`` | Files skipped due to overwrite setting | ++----------------------+------------------------------------------------------------+ +|``files_deleted`` | Files deleted | ++----------------------+------------------------------------------------------------+ |``total_bytes`` | Total number of bytes in restore set | +----------------------+------------------------------------------------------------+ |``bytes_restored`` | Number of bytes restored | +----------------------+------------------------------------------------------------+ +|``bytes_skipped`` | Total size of skipped files | ++----------------------+------------------------------------------------------------+ + +Error +^^^^^ + +These errors are printed on ``stderr``. + ++----------------------+-------------------------------------------+ +| ``message_type`` | Always "error" | ++----------------------+-------------------------------------------+ +| ``error.message`` | Error message | ++----------------------+-------------------------------------------+ +| ``during`` | Always "restore" | ++----------------------+-------------------------------------------+ +| ``item`` | Usually, the path of the problematic file | ++----------------------+-------------------------------------------+ +Verbose Status +^^^^^^^^^^^^^^ + +Verbose status provides details about the progress, including details about restored files. +Only printed if `--verbose=2` is specified. + ++----------------------+-----------------------------------------------------------+ +| ``message_type`` | Always "verbose_status" | ++----------------------+-----------------------------------------------------------+ +| ``action`` | Either "restored", "updated", "unchanged" or "deleted" | ++----------------------+-----------------------------------------------------------+ +| ``item`` | The item in question | ++----------------------+-----------------------------------------------------------+ +| ``size`` | Size of the item in bytes | ++----------------------+-----------------------------------------------------------+ Summary ^^^^^^^ @@ -516,10 +615,16 @@ Summary +----------------------+------------------------------------------------------------+ |``files_restored`` | Files restored | +----------------------+------------------------------------------------------------+ +|``files_skipped`` | Files skipped due to overwrite setting | ++----------------------+------------------------------------------------------------+ +|``files_deleted`` | Files deleted | ++----------------------+------------------------------------------------------------+ |``total_bytes`` | Total number of bytes in restore set | +----------------------+------------------------------------------------------------+ |``bytes_restored`` | Number of bytes restored | +----------------------+------------------------------------------------------------+ +|``bytes_skipped`` | Total size of skipped files | ++----------------------+------------------------------------------------------------+ snapshots @@ -550,11 +655,48 @@ The snapshots command returns a single JSON object, an array with objects of the +---------------------+--------------------------------------------------+ | ``program_version`` | restic version used to create snapshot | +---------------------+--------------------------------------------------+ +| ``summary`` | Snapshot statistics, see "Summary object" | ++---------------------+--------------------------------------------------+ | ``id`` | Snapshot ID | +---------------------+--------------------------------------------------+ | ``short_id`` | Snapshot ID, short form | +---------------------+--------------------------------------------------+ +Summary object + +The contained statistics reflect the information at the point in time when the snapshot +was created. + ++---------------------------+---------------------------------------------------------+ +| ``backup_start`` | Time at which the backup was started | ++---------------------------+---------------------------------------------------------+ +| ``backup_end`` | Time at which the backup was completed | ++---------------------------+---------------------------------------------------------+ +| ``files_new`` | Number of new files | ++---------------------------+---------------------------------------------------------+ +| ``files_changed`` | Number of files that changed | ++---------------------------+---------------------------------------------------------+ +| ``files_unmodified`` | Number of files that did not change | ++---------------------------+---------------------------------------------------------+ +| ``dirs_new`` | Number of new directories | ++---------------------------+---------------------------------------------------------+ +| ``dirs_changed`` | Number of directories that changed | ++---------------------------+---------------------------------------------------------+ +| ``dirs_unmodified`` | Number of directories that did not change | ++---------------------------+---------------------------------------------------------+ +| ``data_blobs`` | Number of data blobs added | ++---------------------------+---------------------------------------------------------+ +| ``tree_blobs`` | Number of tree blobs added | ++---------------------------+---------------------------------------------------------+ +| ``data_added`` | Amount of (uncompressed) data added, in bytes | ++---------------------------+---------------------------------------------------------+ +| ``data_added_packed`` | Amount of data added (after compression), in bytes | ++---------------------------+---------------------------------------------------------+ +| ``total_files_processed`` | Total number of files processed | ++---------------------------+---------------------------------------------------------+ +| ``total_bytes_processed`` | Total number of bytes processed | ++---------------------------+---------------------------------------------------------+ + stats ----- @@ -580,18 +722,44 @@ The stats command returns a single JSON object. | ``compression_space_saving`` | Overall space saving due to compression | +------------------------------+-----------------------------------------------------+ +tag +--- + +The ``tag`` command uses the JSON lines format with the following message types. + +Changed +^^^^^^^ + ++--------------------------+-------------------------------------------+ +| ``message_type`` | Always "changed" | ++--------------------------+-------------------------------------------+ +| ``old_snapshot_id`` | ID of the snapshot before the change | ++--------------------------+-------------------------------------------+ +| ``new_snapshot_id`` | ID of the snapshot after the change | ++--------------------------+-------------------------------------------+ + +Summary +^^^^^^^ + ++-----------------------------+-------------------------------------------+ +| ``message_type`` | Always "summary" | ++-----------------------------+-------------------------------------------+ +| ``changed_snapshot_count`` | Total number of changed snapshots | ++-----------------------------+-------------------------------------------+ version ------- The version command returns a single JSON object. -+----------------+--------------------+ -| ``version`` | restic version | -+----------------+--------------------+ -| ``go_version`` | Go compile version | -+----------------+--------------------+ -| ``go_os`` | Go OS | -+----------------+--------------------+ -| ``go_arch`` | Go architecture | -+----------------+--------------------+ ++------------------+--------------------+ +| ``message_type`` | Always "version" | ++------------------+--------------------+ +| ``version`` | restic version | ++------------------+--------------------+ +| ``go_version`` | Go compile version | ++------------------+--------------------+ +| ``go_os`` | Go OS | ++------------------+--------------------+ +| ``go_arch`` | Go architecture | ++------------------+--------------------+ diff --git a/doc/077_troubleshooting.rst b/doc/077_troubleshooting.rst index 6a9a6ee15c0..36c9d63ecc4 100644 --- a/doc/077_troubleshooting.rst +++ b/doc/077_troubleshooting.rst @@ -10,6 +10,8 @@ ^ for subsubsections " for paragraphs +.. _troubleshooting: + ######################### Troubleshooting ######################### @@ -71,11 +73,15 @@ some blobs in the repository, then please ask for help in the forum or our IRC channel. These errors are often caused by hardware problems which **must** be investigated and fixed. Otherwise, the backup will be damaged again and again. -Similarly, if a repository is repeatedly damaged, please open an `issue on Github +Similarly, if a repository is repeatedly damaged, please open an `issue on GitHub `_ as this could indicate a bug somewhere. Please include the check output and additional information that might help locate the problem. +If ``check`` detects damaged pack files, it will show instructions on how to repair +them using the ``repair pack`` command. Use that command instead of the "Repair the +index" section in this guide. + 2. Backup the repository ************************ @@ -98,12 +104,17 @@ remove data unexpectedly. Please take the time to understand what the commands described in the following do. If you are unsure, then ask for help in the forum or our IRC channel. Search whether your issue is already known and solved. Please take a look at the -`forum`_ and `Github issues `_. +`forum`_ and `GitHub issues `_. 3. Repair the index ******************* +.. note:: + + If the `check` command tells you to run `restic repair pack`, then use that + command instead. It will repair the damaged pack files and also update the index. + Restic relies on its index to contain correct information about what data is stored in the repository. Thus, the first step to repair a repository is to repair the index: diff --git a/doc/bash-completion.sh b/doc/bash-completion.sh index cae37a6ca2d..985d0e369bc 100644 --- a/doc/bash-completion.sh +++ b/doc/bash-completion.sh @@ -49,7 +49,7 @@ __restic_handle_go_custom_completion() local out requestComp lastParam lastChar comp directive args # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly restic allows to handle aliases + # Calling ${words[0]} instead of directly restic allows handling aliases args=("${words[@]:1}") # Disable ActiveHelp which is not supported for bash completion v1 requestComp="RESTIC_ACTIVE_HELP=0 ${words[0]} __completeNoDesc ${args[*]}" @@ -456,12 +456,16 @@ _restic_backup() two_word_flags+=("--read-concurrency") local_nonpersistent_flags+=("--read-concurrency") local_nonpersistent_flags+=("--read-concurrency=") + flags+=("--skip-if-unchanged") + local_nonpersistent_flags+=("--skip-if-unchanged") flags+=("--stdin") local_nonpersistent_flags+=("--stdin") flags+=("--stdin-filename=") two_word_flags+=("--stdin-filename") local_nonpersistent_flags+=("--stdin-filename") local_nonpersistent_flags+=("--stdin-filename=") + flags+=("--stdin-from-command") + local_nonpersistent_flags+=("--stdin-from-command") flags+=("--tag=") two_word_flags+=("--tag") local_nonpersistent_flags+=("--tag") @@ -479,6 +483,9 @@ _restic_backup() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -509,6 +516,8 @@ _restic_backup() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -552,6 +561,9 @@ _restic_cache() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -582,6 +594,8 @@ _restic_cache() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -617,6 +631,9 @@ _restic_cat() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -647,6 +664,8 @@ _restic_cat() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -654,6 +673,15 @@ _restic_cat() must_have_one_flag=() must_have_one_noun=() + must_have_one_noun+=("blob") + must_have_one_noun+=("config") + must_have_one_noun+=("index") + must_have_one_noun+=("key") + must_have_one_noun+=("lock") + must_have_one_noun+=("masterkey") + must_have_one_noun+=("pack") + must_have_one_noun+=("snapshot") + must_have_one_noun+=("tree") noun_aliases=() } @@ -690,6 +718,9 @@ _restic_check() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -720,6 +751,8 @@ _restic_check() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -744,6 +777,8 @@ _restic_copy() flags_with_completion=() flags_completion=() + flags+=("--from-insecure-no-password") + local_nonpersistent_flags+=("--from-insecure-no-password") flags+=("--from-key-hint=") two_word_flags+=("--from-key-hint") local_nonpersistent_flags+=("--from-key-hint") @@ -789,6 +824,9 @@ _restic_copy() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -819,6 +857,8 @@ _restic_copy() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -856,6 +896,9 @@ _restic_diff() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -886,6 +929,8 @@ _restic_diff() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -934,6 +979,82 @@ _restic_dump() two_word_flags+=("--tag") local_nonpersistent_flags+=("--tag") local_nonpersistent_flags+=("--tag=") + flags+=("--target=") + two_word_flags+=("--target") + two_word_flags+=("-t") + local_nonpersistent_flags+=("--target") + local_nonpersistent_flags+=("--target=") + local_nonpersistent_flags+=("-t") + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_features() +{ + last_command="restic_features" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + local_nonpersistent_flags+=("-h") flags+=("--cacert=") two_word_flags+=("--cacert") flags+=("--cache-dir=") @@ -941,6 +1062,9 @@ _restic_dump() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -971,6 +1095,8 @@ _restic_dump() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1056,6 +1182,9 @@ _restic_find() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1086,6 +1215,8 @@ _restic_find() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1174,6 +1305,8 @@ _restic_forget() two_word_flags+=("--keep-tag") local_nonpersistent_flags+=("--keep-tag") local_nonpersistent_flags+=("--keep-tag=") + flags+=("--unsafe-allow-remove-all") + local_nonpersistent_flags+=("--unsafe-allow-remove-all") flags+=("--host=") two_word_flags+=("--host") local_nonpersistent_flags+=("--host") @@ -1227,6 +1360,9 @@ _restic_forget() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1257,6 +1393,8 @@ _restic_forget() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1312,6 +1450,9 @@ _restic_generate() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1342,6 +1483,8 @@ _restic_generate() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1373,6 +1516,9 @@ _restic_help() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1403,6 +1549,8 @@ _restic_help() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1430,6 +1578,8 @@ _restic_init() flags+=("--copy-chunker-params") local_nonpersistent_flags+=("--copy-chunker-params") + flags+=("--from-insecure-no-password") + local_nonpersistent_flags+=("--from-insecure-no-password") flags+=("--from-key-hint=") two_word_flags+=("--from-key-hint") local_nonpersistent_flags+=("--from-key-hint") @@ -1465,6 +1615,9 @@ _restic_init() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1495,6 +1648,8 @@ _restic_init() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1505,9 +1660,230 @@ _restic_init() noun_aliases=() } -_restic_key() +_restic_key_add() { - last_command="restic_key" + last_command="restic_key_add" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + local_nonpersistent_flags+=("-h") + flags+=("--host=") + two_word_flags+=("--host") + local_nonpersistent_flags+=("--host") + local_nonpersistent_flags+=("--host=") + flags+=("--new-insecure-no-password") + local_nonpersistent_flags+=("--new-insecure-no-password") + flags+=("--new-password-file=") + two_word_flags+=("--new-password-file") + local_nonpersistent_flags+=("--new-password-file") + local_nonpersistent_flags+=("--new-password-file=") + flags+=("--user=") + two_word_flags+=("--user") + local_nonpersistent_flags+=("--user") + local_nonpersistent_flags+=("--user=") + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_key_help() +{ + last_command="restic_key_help" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + has_completion_function=1 + noun_aliases=() +} + +_restic_key_list() +{ + last_command="restic_key_list" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + local_nonpersistent_flags+=("-h") + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_key_passwd() +{ + last_command="restic_key_passwd" command_aliases=() @@ -1527,6 +1903,8 @@ _restic_key() two_word_flags+=("--host") local_nonpersistent_flags+=("--host") local_nonpersistent_flags+=("--host=") + flags+=("--new-insecure-no-password") + local_nonpersistent_flags+=("--new-insecure-no-password") flags+=("--new-password-file=") two_word_flags+=("--new-password-file") local_nonpersistent_flags+=("--new-password-file") @@ -1542,6 +1920,154 @@ _restic_key() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_key_remove() +{ + last_command="restic_key_remove" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + local_nonpersistent_flags+=("-h") + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_key() +{ + last_command="restic_key" + + command_aliases=() + + commands=() + commands+=("add") + commands+=("help") + commands+=("list") + commands+=("passwd") + commands+=("remove") + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + local_nonpersistent_flags+=("-h") + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1572,6 +2098,8 @@ _restic_key() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1607,6 +2135,9 @@ _restic_list() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1637,6 +2168,8 @@ _restic_list() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1644,6 +2177,12 @@ _restic_list() must_have_one_flag=() must_have_one_noun=() + must_have_one_noun+=("blobs") + must_have_one_noun+=("index") + must_have_one_noun+=("keys") + must_have_one_noun+=("locks") + must_have_one_noun+=("packs") + must_have_one_noun+=("snapshots") noun_aliases=() } @@ -1677,6 +2216,8 @@ _restic_ls() flags+=("-l") local_nonpersistent_flags+=("--long") local_nonpersistent_flags+=("-l") + flags+=("--ncdu") + local_nonpersistent_flags+=("--ncdu") flags+=("--path=") two_word_flags+=("--path") local_nonpersistent_flags+=("--path") @@ -1694,6 +2235,9 @@ _restic_ls() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1724,6 +2268,8 @@ _restic_ls() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1763,6 +2309,9 @@ _restic_migrate() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1793,6 +2342,8 @@ _restic_migrate() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1856,6 +2407,79 @@ _restic_mount() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_options() +{ + last_command="restic_options" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + local_nonpersistent_flags+=("-h") + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1886,6 +2510,8 @@ _restic_mount() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1943,6 +2569,9 @@ _restic_prune() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1973,6 +2602,8 @@ _restic_prune() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2008,6 +2639,9 @@ _restic_recover() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2038,6 +2672,8 @@ _restic_recover() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2069,6 +2705,9 @@ _restic_repair_help() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2099,6 +2738,8 @@ _restic_repair_help() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2137,6 +2778,9 @@ _restic_repair_index() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2167,6 +2811,8 @@ _restic_repair_index() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2202,6 +2848,9 @@ _restic_repair_packs() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2232,6 +2881,8 @@ _restic_repair_packs() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2287,6 +2938,9 @@ _restic_repair_snapshots() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2317,6 +2971,8 @@ _restic_repair_snapshots() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2356,6 +3012,9 @@ _restic_repair() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2386,6 +3045,8 @@ _restic_repair() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2410,12 +3071,20 @@ _restic_restore() flags_with_completion=() flags_completion=() + flags+=("--delete") + local_nonpersistent_flags+=("--delete") + flags+=("--dry-run") + local_nonpersistent_flags+=("--dry-run") flags+=("--exclude=") two_word_flags+=("--exclude") two_word_flags+=("-e") local_nonpersistent_flags+=("--exclude") local_nonpersistent_flags+=("--exclude=") local_nonpersistent_flags+=("-e") + flags+=("--exclude-file=") + two_word_flags+=("--exclude-file") + local_nonpersistent_flags+=("--exclude-file") + local_nonpersistent_flags+=("--exclude-file=") flags+=("--help") flags+=("-h") local_nonpersistent_flags+=("--help") @@ -2430,16 +3099,32 @@ _restic_restore() two_word_flags+=("--iexclude") local_nonpersistent_flags+=("--iexclude") local_nonpersistent_flags+=("--iexclude=") + flags+=("--iexclude-file=") + two_word_flags+=("--iexclude-file") + local_nonpersistent_flags+=("--iexclude-file") + local_nonpersistent_flags+=("--iexclude-file=") flags+=("--iinclude=") two_word_flags+=("--iinclude") local_nonpersistent_flags+=("--iinclude") local_nonpersistent_flags+=("--iinclude=") + flags+=("--iinclude-file=") + two_word_flags+=("--iinclude-file") + local_nonpersistent_flags+=("--iinclude-file") + local_nonpersistent_flags+=("--iinclude-file=") flags+=("--include=") two_word_flags+=("--include") two_word_flags+=("-i") local_nonpersistent_flags+=("--include") local_nonpersistent_flags+=("--include=") local_nonpersistent_flags+=("-i") + flags+=("--include-file=") + two_word_flags+=("--include-file") + local_nonpersistent_flags+=("--include-file") + local_nonpersistent_flags+=("--include-file=") + flags+=("--overwrite=") + two_word_flags+=("--overwrite") + local_nonpersistent_flags+=("--overwrite") + local_nonpersistent_flags+=("--overwrite=") flags+=("--path=") two_word_flags+=("--path") local_nonpersistent_flags+=("--path") @@ -2465,6 +3150,9 @@ _restic_restore() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2495,6 +3183,8 @@ _restic_restore() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2553,6 +3243,14 @@ _restic_rewrite() two_word_flags+=("--iexclude-file") local_nonpersistent_flags+=("--iexclude-file") local_nonpersistent_flags+=("--iexclude-file=") + flags+=("--new-host=") + two_word_flags+=("--new-host") + local_nonpersistent_flags+=("--new-host") + local_nonpersistent_flags+=("--new-host=") + flags+=("--new-time=") + two_word_flags+=("--new-time") + local_nonpersistent_flags+=("--new-time") + local_nonpersistent_flags+=("--new-time=") flags+=("--path=") two_word_flags+=("--path") local_nonpersistent_flags+=("--path") @@ -2568,6 +3266,9 @@ _restic_rewrite() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2598,6 +3299,8 @@ _restic_rewrite() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2637,6 +3340,9 @@ _restic_self-update() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2667,6 +3373,8 @@ _restic_self-update() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2730,6 +3438,9 @@ _restic_snapshots() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2760,6 +3471,8 @@ _restic_snapshots() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2796,6 +3509,8 @@ _restic_stats() local_nonpersistent_flags+=("-H") flags+=("--mode=") two_word_flags+=("--mode") + flags_with_completion+=("--mode") + flags_completion+=("__restic_handle_go_custom_completion") local_nonpersistent_flags+=("--mode") local_nonpersistent_flags+=("--mode=") flags+=("--path=") @@ -2813,6 +3528,9 @@ _restic_stats() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2843,6 +3561,8 @@ _restic_stats() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2904,6 +3624,9 @@ _restic_tag() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2934,6 +3657,8 @@ _restic_tag() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2971,6 +3696,9 @@ _restic_unlock() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -3001,6 +3729,8 @@ _restic_unlock() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -3036,6 +3766,9 @@ _restic_version() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -3066,6 +3799,8 @@ _restic_version() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -3090,6 +3825,7 @@ _restic_root_command() commands+=("copy") commands+=("diff") commands+=("dump") + commands+=("features") commands+=("find") commands+=("forget") commands+=("generate") @@ -3100,6 +3836,7 @@ _restic_root_command() commands+=("ls") commands+=("migrate") commands+=("mount") + commands+=("options") commands+=("prune") commands+=("recover") commands+=("repair") @@ -3129,6 +3866,9 @@ _restic_root_command() flags+=("-h") local_nonpersistent_flags+=("--help") local_nonpersistent_flags+=("-h") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -3159,6 +3899,8 @@ _restic_root_command() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") diff --git a/doc/design.rst b/doc/design.rst index b80029d10c6..62b7e9bf967 100644 --- a/doc/design.rst +++ b/doc/design.rst @@ -119,15 +119,10 @@ A local repository can be initialized with the ``restic init`` command, e.g.: $ restic -r /tmp/restic-repo init -The local and sftp backends will auto-detect and accept all layouts described -in the following sections, so that remote repositories mounted locally e.g. via -fuse can be accessed. The layout auto-detection can be overridden by specifying -the option ``-o local.layout=default``, valid values are ``default`` and -``s3legacy``. The option for the sftp backend is named ``sftp.layout``, for the -s3 backend ``s3.layout``. +S3 Legacy Layout (deprecated) +----------------------------- -S3 Legacy Layout ----------------- +Restic 0.17 is the last version that supports the legacy layout. Unfortunately during development the Amazon S3 backend uses slightly different paths (directory names use singular instead of plural for ``key``, @@ -152,9 +147,6 @@ the ``data`` directory. The S3 Legacy repository layout looks like this: /snapshot └── 22a5af1bdc6e616f8a29579458c49627e01b32210d09adb288d1ecda7c5711ec -The S3 backend understands and accepts both forms, new backends are -always created with the default layout for compatibility reasons. - Pack Format =========== @@ -234,7 +226,9 @@ Individual files for the index, locks or snapshots are encrypted and authenticated like Data and Tree Blobs, so the outer structure is ``IV || Ciphertext || MAC`` again. In repository format version 1 the plaintext always consists of a JSON document which must either be an -object or an array. +object or an array. The JSON encoder must deterministically encode the +document and should match the behavior of the Go standard library implementation +in ``encoding/json``. Repository format version 2 adds support for compression. The plaintext now starts with a header to indicate the encoding version to distinguish @@ -296,8 +290,8 @@ of a JSON document like the following: } This JSON document lists Packs and the blobs contained therein. In this -example, the Pack ``73d04e61`` contains two data Blobs and one Tree -blob, the plaintext hashes are listed afterwards. The ``length`` field +example, the Pack ``73d04e61`` contains three data Blobs, +the plaintext hashes are listed afterwards. The ``length`` field corresponds to ``Length(encrypted_blob)`` in the pack file header. Field ``uncompressed_length`` is only present for compressed blobs and therefore is never present in version 1 of the repository format. It is @@ -473,6 +467,10 @@ A snapshot references a tree by the SHA-256 hash of the JSON string representation of its contents. Trees and data are saved in pack files in a subdirectory of the directory ``data``. +The JSON encoder must deterministically encode the document and should +match the behavior of the Go standard library implementation in ``encoding/json``. +This ensures that trees can be properly deduplicated. + The command ``restic cat blob`` can be used to inspect the tree referenced above (piping the output of the command to ``jq .`` so that the JSON is indented): @@ -507,12 +505,11 @@ this metadata is generated: - The name is quoted using `strconv.Quote `__ before being saved. This handles non-unicode names, but also changes the representation of names containing ``"`` or ``\``. - - The filemode saved is the mode defined by `fs.FileMode `__ masked by ``os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky`` - -When the entry references a directory, the field ``subtree`` contains the plain text -ID of another tree object. +- When the entry references a directory, the field ``subtree`` contains the plain text + ID of another tree object. +- Check the implementation for a full struct definition. When the command ``restic cat blob`` is used, the plaintext ID is needed to print a tree. The tree referenced above can be dumped as follows: diff --git a/doc/developer_information.rst b/doc/developer_information.rst index c7757e087e8..f1eae3a678e 100644 --- a/doc/developer_information.rst +++ b/doc/developer_information.rst @@ -113,6 +113,34 @@ The following steps are necessary to build the binaries: restic/builder \ go run helpers/build-release-binaries/main.go --version 0.14.0 --verbose +Verifying SLSA Provenance for Docker Images +******************************************* + +Our Docker images are built with SLSA (Supply-chain Levels for Software Artifacts) +provenance. + +To verify this provenance: + +1. Install the `slsa-verifier` tool from https://github.com/slsa-framework/slsa-verifier + +2. Run the following command: + + .. code-block:: console + + $ slsa-verifier verify-image \ + --source-uri github.com/restic/restic \ + @ + + Replace `` with the Git tag of the release you're verifying, `` + with the full name of the Docker image (including the registry), and `` + with the SHA256 digest of the image. + +3. If the verification is successful, you'll see output indicating that the provenance +is valid. + +This verification ensures that the Docker image was built by our official GitHub +Actions workflow and has not been tampered with since its creation. + Verifying the Official Binaries ******************************* @@ -123,7 +151,7 @@ The specified go compiler version must match the one used to build the official binaries. For example, for restic 0.16.2 the command would be ``helpers/verify-release-binaries.sh 0.16.2 1.21.3``. -The script requires bash, curl, docker, git, gpg, shasum and tar. +The script requires bash, curl, docker (version >= 25.0), git, gpg, shasum and tar. The script first downloads all release binaries, checks the SHASUM256 file and its signature. Afterwards it checks that the tarball matches the restic git repository diff --git a/doc/faq.rst b/doc/faq.rst index e8ef2de5e56..74dd77d7143 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -74,7 +74,7 @@ $ restic backup --exclude "~/documents" ~ This command will result in a complete backup of the current logged in user's home directory and it won't exclude the folder ``~/documents/`` - which is not what the user wanted to achieve. The problem is how the path to ``~/documents`` is passed to restic. -In order to spot an issue like this, you can make use of the following ruby command preceeding your restic command. +In order to spot an issue like this, you can make use of the following ruby command preceding your restic command. :: @@ -90,7 +90,7 @@ The error here is that the tilde ``~`` in ``"~/documents"`` didn't get expanded /home/john/documents $ echo "~/documents" - ~/document + ~/documents $ echo "$HOME/documents" /home/john/documents @@ -100,7 +100,7 @@ Restic handles globbing and expansion in the following ways: - Globbing is only expanded for lines read via ``--files-from`` - Environment variables are not expanded in the file read via ``--files-from`` - ``*`` is expanded for paths read via ``--files-from`` -- e.g. For backup targets given to restic as arguments on the shell, neither glob expansion nor shell variable replacement is done. If restic is called as ``restic backup '*' '$HOME'``, it will try to backup the literal file(s)/dir(s) ``*`` and ``$HOME`` +- e.g. For backup sources given to restic as arguments on the shell, neither glob expansion nor shell variable replacement is done. If restic is called as ``restic backup '*' '$HOME'``, it will try to backup the literal file(s)/dir(s) ``*`` and ``$HOME`` - Double-asterisk ``**`` only works in exclude patterns as this is a custom extension built into restic; the shell must not expand it @@ -228,3 +228,17 @@ Restic backup command fails to find a valid file in Windows If the name of a file in Windows contains an invalid character, Restic will not be able to read the file. To solve this issue, consider renaming the particular file. + +What can I do in case of "request timeout" errors? +-------------------------------------------------- + +Restic monitors connections to the backend to detect stuck requests. If a request +does not return any data within five minutes, restic assumes the request is stuck and +retries it. However, for large repositories it sometimes takes longer than that to +collect a list of all files, causing the following error: + +:: + + List(data) returned error, retrying after 1s: [...]: request timeout + +In this case you can increase the timeout using the ``--stuck-request-timeout`` option. diff --git a/doc/man/restic-backup.1 b/doc/man/restic-backup.1 index 730685271d1..a84b955bac7 100644 --- a/doc/man/restic-backup.1 +++ b/doc/man/restic-backup.1 @@ -22,6 +22,9 @@ given as the arguments. Exit status is 0 if the command was successful. Exit status is 1 if there was a fatal error (no snapshot created). Exit status is 3 if some source data could not be read (incomplete snapshot created). +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -63,7 +66,7 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea .PP \fB-f\fP, \fB--force\fP[=false] - force re-reading the target files/directories (overrides the "parent" flag) + force re-reading the source files/directories (overrides the "parent" flag) .PP \fB-g\fP, \fB--group-by\fP=host,paths @@ -75,7 +78,7 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea .PP \fB-H\fP, \fB--host\fP="" - set the \fBhostname\fR for the snapshot manually. To prevent an expensive rescan use the "parent" flag + set the \fBhostname\fR for the snapshot manually (default: $RESTIC_HOST). To prevent an expensive rescan use the "parent" flag .PP \fB--iexclude\fP=[] @@ -91,7 +94,7 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea .PP \fB--ignore-inode\fP[=false] - ignore inode number changes when checking for modified files + ignore inode number and ctime changes when checking for modified files .PP \fB--no-scan\fP[=false] @@ -109,6 +112,10 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea \fB--read-concurrency\fP=0 read \fBn\fR files concurrently (default: $RESTIC_READ_CONCURRENCY or 2) +.PP +\fB--skip-if-unchanged\fP[=false] + skip snapshot creation if identical to parent snapshot + .PP \fB--stdin\fP[=false] read backup from stdin @@ -117,6 +124,10 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea \fB--stdin-filename\fP="stdin" \fBfilename\fR to use when reading from stdin +.PP +\fB--stdin-from-command\fP[=false] + interpret arguments as command to execute and store its stdout + .PP \fB--tag\fP=[] add \fBtags\fR for the new snapshot in the format \fBtag[,tag,...]\fR (can be specified multiple times) @@ -147,6 +158,14 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -211,6 +230,10 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-cache.1 b/doc/man/restic-cache.1 index c170c16242f..fb23fe8a9d6 100644 --- a/doc/man/restic-cache.1 +++ b/doc/man/restic-cache.1 @@ -18,7 +18,8 @@ The "cache" command allows listing and cleaning local cache directories. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. .SH OPTIONS @@ -56,6 +57,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -120,6 +129,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-cat.1 b/doc/man/restic-cat.1 index b42a58e149e..cab1b85a5b1 100644 --- a/doc/man/restic-cat.1 +++ b/doc/man/restic-cat.1 @@ -18,7 +18,11 @@ The "cat" command is used to print internal objects to stdout. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -44,6 +48,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -108,6 +120,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-check.1 b/doc/man/restic-check.1 index 9c1dc77e504..60d17a3139e 100644 --- a/doc/man/restic-check.1 +++ b/doc/man/restic-check.1 @@ -23,7 +23,11 @@ repository and not use a local cache. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -61,6 +65,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -125,6 +137,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-copy.1 b/doc/man/restic-copy.1 index bd9795f4447..96c39413947 100644 --- a/doc/man/restic-copy.1 +++ b/doc/man/restic-copy.1 @@ -30,7 +30,20 @@ This can be mitigated by the "--copy-chunker-params" option when initializing a new destination repository using the "init" command. +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. + + .SH OPTIONS +.PP +\fB--from-insecure-no-password\fP[=false] + use an empty password for the source repository (insecure) + .PP \fB--from-key-hint\fP="" key ID of key to try decrypting the source repository first (default: $RESTIC_FROM_KEY_HINT) @@ -57,11 +70,11 @@ new destination repository using the "init" command. .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--tag\fP=[] @@ -85,6 +98,14 @@ new destination repository using the "init" command. \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -149,6 +170,10 @@ new destination repository using the "init" command. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-diff.1 b/doc/man/restic-diff.1 index 28f3a483887..f4c8a1d144b 100644 --- a/doc/man/restic-diff.1 +++ b/doc/man/restic-diff.1 @@ -28,18 +28,28 @@ U The metadata (access mode, timestamps, ...) for the item was updated M The file's content was modified .IP \(bu 2 T The type was changed, e.g. a file was made a symlink +.IP \(bu 2 +? Bitrot detected: The file's content has changed but all metadata is the same .RE +.PP +Metadata comparison will likely not work if a backup was created using the +\&'--ignore-inode' or '--ignore-ctime' option. + .PP To only compare files in specific subfolders, you can use the -":" syntax, where "subfolder" is a path within the +"snapshotID:subfolder" syntax, where "subfolder" is a path within the snapshot. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -69,6 +79,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -133,6 +151,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-dump.1 b/doc/man/restic-dump.1 index 7fa3f777de1..657570f6d41 100644 --- a/doc/man/restic-dump.1 +++ b/doc/man/restic-dump.1 @@ -24,13 +24,17 @@ repository. .PP To include the folder content at the root of the archive, you can use the -":" syntax, where "subfolder" is a path within the +"snapshotID:subfolder" syntax, where "subfolder" is a path within the snapshot. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -44,16 +48,20 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR, when snapshot ID "latest" is given (can be specified multiple times) + only consider snapshots for this \fBhost\fR, when snapshot ID "latest" is given (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR, when snapshot ID "latest" is given (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR, when snapshot ID "latest" is given (can be specified multiple times, snapshots must include all specified paths) .PP \fB--tag\fP=[] only consider snapshots including \fBtag[,tag,...]\fR, when snapshot ID "latest" is given (can be specified multiple times) +.PP +\fB-t\fP, \fB--target\fP="" + write the output to target \fBpath\fR + .SH OPTIONS INHERITED FROM PARENT COMMANDS .PP @@ -72,6 +80,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -136,6 +152,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-features.1 b/doc/man/restic-features.1 new file mode 100644 index 00000000000..b288f655ab0 --- /dev/null +++ b/doc/man/restic-features.1 @@ -0,0 +1,146 @@ +.nh +.TH "restic backup" "1" "Jan 2017" "generated by \fBrestic generate\fR" "" + +.SH NAME +.PP +restic-features - Print list of feature flags + + +.SH SYNOPSIS +.PP +\fBrestic features [flags]\fP + + +.SH DESCRIPTION +.PP +The "features" command prints a list of supported feature flags. + +.PP +To pass feature flags to restic, set the RESTIC_FEATURES environment variable +to "featureA=true,featureB=false". Specifying an unknown feature flag is an error. + +.PP +A feature can either be in alpha, beta, stable or deprecated state. +An \fIalpha\fP feature is disabled by default and may change in arbitrary ways between restic versions or be removed. +A \fIbeta\fP feature is enabled by default, but still can change in minor ways or be removed. +A \fIstable\fP feature is always enabled and cannot be disabled. The flag will be removed in a future restic version. +A \fIdeprecated\fP feature is always disabled and cannot be enabled. The flag will be removed in a future restic version. + + +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. + + +.SH OPTIONS +.PP +\fB-h\fP, \fB--help\fP[=false] + help for features + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB--cacert\fP=[] + \fBfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT) + +.PP +\fB--cache-dir\fP="" + set the cache \fBdirectory\fR\&. (default: use system default cache directory) + +.PP +\fB--cleanup-cache\fP[=false] + auto remove old cache directories + +.PP +\fB--compression\fP=auto + compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) + +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + +.PP +\fB--insecure-tls\fP[=false] + skip TLS certificate verification when connecting to the repository (insecure) + +.PP +\fB--json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB--key-hint\fP="" + \fBkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + +.PP +\fB--limit-download\fP=0 + limits downloads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--limit-upload\fP=0 + limits uploads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--no-cache\fP[=false] + do not use a local cache + +.PP +\fB--no-extra-verify\fP[=false] + skip additional verification of data before upload (see documentation) + +.PP +\fB--no-lock\fP[=false] + do not lock the repository, this allows some operations on read-only repositories + +.PP +\fB-o\fP, \fB--option\fP=[] + set extended option (\fBkey=value\fR, can be specified multiple times) + +.PP +\fB--pack-size\fP=0 + set target pack \fBsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) + +.PP +\fB--password-command\fP="" + shell \fBcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND) + +.PP +\fB-p\fP, \fB--password-file\fP="" + \fBfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE) + +.PP +\fB-q\fP, \fB--quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB-r\fP, \fB--repo\fP="" + \fBrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY) + +.PP +\fB--repository-file\fP="" + \fBfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE) + +.PP +\fB--retry-lock\fP=0s + retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) + +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + +.PP +\fB--tls-client-cert\fP="" + path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) + +.PP +\fB-v\fP, \fB--verbose\fP[=0] + be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-find.1 b/doc/man/restic-find.1 index c3297c43f10..e8d974527d2 100644 --- a/doc/man/restic-find.1 +++ b/doc/man/restic-find.1 @@ -29,7 +29,7 @@ It can also be used to search for restic blobs or trees for troubleshooting. .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--human-readable\fP[=false] @@ -57,7 +57,7 @@ It can also be used to search for restic blobs or trees for troubleshooting. .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--show-pack-id\fP[=false] @@ -93,6 +93,14 @@ It can also be used to search for restic blobs or trees for troubleshooting. \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -157,6 +165,10 @@ It can also be used to search for restic blobs or trees for troubleshooting. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) @@ -178,8 +190,11 @@ restic find --pack 025c1d06 EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. - +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .EE diff --git a/doc/man/restic-forget.1 b/doc/man/restic-forget.1 index d0c4cfc74da..058dbee25a8 100644 --- a/doc/man/restic-forget.1 +++ b/doc/man/restic-forget.1 @@ -15,7 +15,10 @@ restic-forget - Remove snapshots from the repository .PP The "forget" command removes snapshots according to a policy. All snapshots are first divided into groups according to "--group-by", and after that the policy -specified by the "--keep-*" options is applied to each group individually. +specified by the "--keep-\fI" options is applied to each group individually. +If there are not enough snapshots to keep one for each duration related +"--keep-{within-,}\fP" option, the oldest snapshot in the group is kept +additionally. .PP Please note that this command really only deletes the snapshot object in the @@ -29,7 +32,11 @@ security considerations. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -85,9 +92,13 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--keep-tag\fP=[] keep snapshots with this \fBtaglist\fR (can be specified multiple times) +.PP +\fB--unsafe-allow-remove-all\fP[=false] + allow deleting all snapshots of a snapshot group + .PP \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--tag\fP=[] @@ -95,7 +106,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB-c\fP, \fB--compact\fP[=false] @@ -155,6 +166,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -219,6 +238,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-generate.1 b/doc/man/restic-generate.1 index 84f659ef2f6..f17a6fcd030 100644 --- a/doc/man/restic-generate.1 +++ b/doc/man/restic-generate.1 @@ -19,7 +19,8 @@ and the auto-completion files for bash, fish and zsh). .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. .SH OPTIONS @@ -65,6 +66,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -129,6 +138,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-init.1 b/doc/man/restic-init.1 index 5f19c8f8cc8..50fa00b7101 100644 --- a/doc/man/restic-init.1 +++ b/doc/man/restic-init.1 @@ -18,7 +18,8 @@ The "init" command initializes a new repository. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. .SH OPTIONS @@ -26,6 +27,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--copy-chunker-params\fP[=false] copy chunker parameters from the secondary repository (useful with the copy command) +.PP +\fB--from-insecure-no-password\fP[=false] + use an empty password for the source repository (insecure) + .PP \fB--from-key-hint\fP="" key ID of key to try decrypting the source repository first (default: $RESTIC_FROM_KEY_HINT) @@ -72,6 +77,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -136,6 +149,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-key-add.1 b/doc/man/restic-key-add.1 new file mode 100644 index 00000000000..ff33408b47a --- /dev/null +++ b/doc/man/restic-key-add.1 @@ -0,0 +1,154 @@ +.nh +.TH "restic backup" "1" "Jan 2017" "generated by \fBrestic generate\fR" "" + +.SH NAME +.PP +restic-key-add - Add a new key (password) to the repository; returns the new key ID + + +.SH SYNOPSIS +.PP +\fBrestic key add [flags]\fP + + +.SH DESCRIPTION +.PP +The "add" sub-command creates a new key and validates the key. Returns the new key ID. + + +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. + + +.SH OPTIONS +.PP +\fB-h\fP, \fB--help\fP[=false] + help for add + +.PP +\fB--host\fP="" + the hostname for new key + +.PP +\fB--new-insecure-no-password\fP[=false] + add an empty password for the repository (insecure) + +.PP +\fB--new-password-file\fP="" + \fBfile\fR from which to read the new password + +.PP +\fB--user\fP="" + the username for new key + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB--cacert\fP=[] + \fBfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT) + +.PP +\fB--cache-dir\fP="" + set the cache \fBdirectory\fR\&. (default: use system default cache directory) + +.PP +\fB--cleanup-cache\fP[=false] + auto remove old cache directories + +.PP +\fB--compression\fP=auto + compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) + +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + +.PP +\fB--insecure-tls\fP[=false] + skip TLS certificate verification when connecting to the repository (insecure) + +.PP +\fB--json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB--key-hint\fP="" + \fBkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + +.PP +\fB--limit-download\fP=0 + limits downloads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--limit-upload\fP=0 + limits uploads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--no-cache\fP[=false] + do not use a local cache + +.PP +\fB--no-extra-verify\fP[=false] + skip additional verification of data before upload (see documentation) + +.PP +\fB--no-lock\fP[=false] + do not lock the repository, this allows some operations on read-only repositories + +.PP +\fB-o\fP, \fB--option\fP=[] + set extended option (\fBkey=value\fR, can be specified multiple times) + +.PP +\fB--pack-size\fP=0 + set target pack \fBsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) + +.PP +\fB--password-command\fP="" + shell \fBcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND) + +.PP +\fB-p\fP, \fB--password-file\fP="" + \fBfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE) + +.PP +\fB-q\fP, \fB--quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB-r\fP, \fB--repo\fP="" + \fBrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY) + +.PP +\fB--repository-file\fP="" + \fBfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE) + +.PP +\fB--retry-lock\fP=0s + retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) + +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + +.PP +\fB--tls-client-cert\fP="" + path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) + +.PP +\fB-v\fP, \fB--verbose\fP[=0] + be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2) + + +.SH SEE ALSO +.PP +\fBrestic-key(1)\fP diff --git a/doc/man/restic-key-list.1 b/doc/man/restic-key-list.1 new file mode 100644 index 00000000000..7deb05793b0 --- /dev/null +++ b/doc/man/restic-key-list.1 @@ -0,0 +1,140 @@ +.nh +.TH "restic backup" "1" "Jan 2017" "generated by \fBrestic generate\fR" "" + +.SH NAME +.PP +restic-key-list - List keys (passwords) + + +.SH SYNOPSIS +.PP +\fBrestic key list [flags]\fP + + +.SH DESCRIPTION +.PP +The "list" sub-command lists all the keys (passwords) associated with the repository. +Returns the key ID, username, hostname, created time and if it's the current key being +used to access the repository. + + +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. + + +.SH OPTIONS +.PP +\fB-h\fP, \fB--help\fP[=false] + help for list + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB--cacert\fP=[] + \fBfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT) + +.PP +\fB--cache-dir\fP="" + set the cache \fBdirectory\fR\&. (default: use system default cache directory) + +.PP +\fB--cleanup-cache\fP[=false] + auto remove old cache directories + +.PP +\fB--compression\fP=auto + compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) + +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + +.PP +\fB--insecure-tls\fP[=false] + skip TLS certificate verification when connecting to the repository (insecure) + +.PP +\fB--json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB--key-hint\fP="" + \fBkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + +.PP +\fB--limit-download\fP=0 + limits downloads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--limit-upload\fP=0 + limits uploads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--no-cache\fP[=false] + do not use a local cache + +.PP +\fB--no-extra-verify\fP[=false] + skip additional verification of data before upload (see documentation) + +.PP +\fB--no-lock\fP[=false] + do not lock the repository, this allows some operations on read-only repositories + +.PP +\fB-o\fP, \fB--option\fP=[] + set extended option (\fBkey=value\fR, can be specified multiple times) + +.PP +\fB--pack-size\fP=0 + set target pack \fBsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) + +.PP +\fB--password-command\fP="" + shell \fBcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND) + +.PP +\fB-p\fP, \fB--password-file\fP="" + \fBfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE) + +.PP +\fB-q\fP, \fB--quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB-r\fP, \fB--repo\fP="" + \fBrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY) + +.PP +\fB--repository-file\fP="" + \fBfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE) + +.PP +\fB--retry-lock\fP=0s + retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) + +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + +.PP +\fB--tls-client-cert\fP="" + path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) + +.PP +\fB-v\fP, \fB--verbose\fP[=0] + be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2) + + +.SH SEE ALSO +.PP +\fBrestic-key(1)\fP diff --git a/doc/man/restic-key-passwd.1 b/doc/man/restic-key-passwd.1 new file mode 100644 index 00000000000..68e81edd984 --- /dev/null +++ b/doc/man/restic-key-passwd.1 @@ -0,0 +1,155 @@ +.nh +.TH "restic backup" "1" "Jan 2017" "generated by \fBrestic generate\fR" "" + +.SH NAME +.PP +restic-key-passwd - Change key (password); creates a new key ID and removes the old key ID, returns new key ID + + +.SH SYNOPSIS +.PP +\fBrestic key passwd [flags]\fP + + +.SH DESCRIPTION +.PP +The "passwd" sub-command creates a new key, validates the key and remove the old key ID. +Returns the new key ID. + + +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. + + +.SH OPTIONS +.PP +\fB-h\fP, \fB--help\fP[=false] + help for passwd + +.PP +\fB--host\fP="" + the hostname for new key + +.PP +\fB--new-insecure-no-password\fP[=false] + add an empty password for the repository (insecure) + +.PP +\fB--new-password-file\fP="" + \fBfile\fR from which to read the new password + +.PP +\fB--user\fP="" + the username for new key + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB--cacert\fP=[] + \fBfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT) + +.PP +\fB--cache-dir\fP="" + set the cache \fBdirectory\fR\&. (default: use system default cache directory) + +.PP +\fB--cleanup-cache\fP[=false] + auto remove old cache directories + +.PP +\fB--compression\fP=auto + compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) + +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + +.PP +\fB--insecure-tls\fP[=false] + skip TLS certificate verification when connecting to the repository (insecure) + +.PP +\fB--json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB--key-hint\fP="" + \fBkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + +.PP +\fB--limit-download\fP=0 + limits downloads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--limit-upload\fP=0 + limits uploads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--no-cache\fP[=false] + do not use a local cache + +.PP +\fB--no-extra-verify\fP[=false] + skip additional verification of data before upload (see documentation) + +.PP +\fB--no-lock\fP[=false] + do not lock the repository, this allows some operations on read-only repositories + +.PP +\fB-o\fP, \fB--option\fP=[] + set extended option (\fBkey=value\fR, can be specified multiple times) + +.PP +\fB--pack-size\fP=0 + set target pack \fBsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) + +.PP +\fB--password-command\fP="" + shell \fBcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND) + +.PP +\fB-p\fP, \fB--password-file\fP="" + \fBfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE) + +.PP +\fB-q\fP, \fB--quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB-r\fP, \fB--repo\fP="" + \fBrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY) + +.PP +\fB--repository-file\fP="" + \fBfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE) + +.PP +\fB--retry-lock\fP=0s + retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) + +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + +.PP +\fB--tls-client-cert\fP="" + path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) + +.PP +\fB-v\fP, \fB--verbose\fP[=0] + be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2) + + +.SH SEE ALSO +.PP +\fBrestic-key(1)\fP diff --git a/doc/man/restic-key-remove.1 b/doc/man/restic-key-remove.1 new file mode 100644 index 00000000000..ff1a0ceb917 --- /dev/null +++ b/doc/man/restic-key-remove.1 @@ -0,0 +1,139 @@ +.nh +.TH "restic backup" "1" "Jan 2017" "generated by \fBrestic generate\fR" "" + +.SH NAME +.PP +restic-key-remove - Remove key ID (password) from the repository. + + +.SH SYNOPSIS +.PP +\fBrestic key remove [ID] [flags]\fP + + +.SH DESCRIPTION +.PP +The "remove" sub-command removes the selected key ID. The "remove" command does not allow +removing the current key being used to access the repository. + + +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. + + +.SH OPTIONS +.PP +\fB-h\fP, \fB--help\fP[=false] + help for remove + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB--cacert\fP=[] + \fBfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT) + +.PP +\fB--cache-dir\fP="" + set the cache \fBdirectory\fR\&. (default: use system default cache directory) + +.PP +\fB--cleanup-cache\fP[=false] + auto remove old cache directories + +.PP +\fB--compression\fP=auto + compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) + +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + +.PP +\fB--insecure-tls\fP[=false] + skip TLS certificate verification when connecting to the repository (insecure) + +.PP +\fB--json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB--key-hint\fP="" + \fBkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + +.PP +\fB--limit-download\fP=0 + limits downloads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--limit-upload\fP=0 + limits uploads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--no-cache\fP[=false] + do not use a local cache + +.PP +\fB--no-extra-verify\fP[=false] + skip additional verification of data before upload (see documentation) + +.PP +\fB--no-lock\fP[=false] + do not lock the repository, this allows some operations on read-only repositories + +.PP +\fB-o\fP, \fB--option\fP=[] + set extended option (\fBkey=value\fR, can be specified multiple times) + +.PP +\fB--pack-size\fP=0 + set target pack \fBsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) + +.PP +\fB--password-command\fP="" + shell \fBcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND) + +.PP +\fB-p\fP, \fB--password-file\fP="" + \fBfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE) + +.PP +\fB-q\fP, \fB--quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB-r\fP, \fB--repo\fP="" + \fBrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY) + +.PP +\fB--repository-file\fP="" + \fBfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE) + +.PP +\fB--retry-lock\fP=0s + retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) + +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + +.PP +\fB--tls-client-cert\fP="" + path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) + +.PP +\fB-v\fP, \fB--verbose\fP[=0] + be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2) + + +.SH SEE ALSO +.PP +\fBrestic-key(1)\fP diff --git a/doc/man/restic-key.1 b/doc/man/restic-key.1 index 8d181318821..4fd1f6caf42 100644 --- a/doc/man/restic-key.1 +++ b/doc/man/restic-key.1 @@ -8,17 +8,13 @@ restic-key - Manage keys (passwords) .SH SYNOPSIS .PP -\fBrestic key [flags] [list|add|remove|passwd] [ID]\fP +\fBrestic key [flags]\fP .SH DESCRIPTION .PP -The "key" command manages keys (passwords) for accessing the repository. - - -.SH EXIT STATUS -.PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +The "key" command allows you to set multiple access keys or passwords +per repository. .SH OPTIONS @@ -26,18 +22,6 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB-h\fP, \fB--help\fP[=false] help for key -.PP -\fB--host\fP="" - the hostname for new keys - -.PP -\fB--new-password-file\fP="" - \fBfile\fR from which to read the new password - -.PP -\fB--user\fP="" - the username for new keys - .SH OPTIONS INHERITED FROM PARENT COMMANDS .PP @@ -56,6 +40,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -120,6 +112,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) @@ -131,4 +127,4 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .SH SEE ALSO .PP -\fBrestic(1)\fP +\fBrestic(1)\fP, \fBrestic-key-add(1)\fP, \fBrestic-key-list(1)\fP, \fBrestic-key-passwd(1)\fP, \fBrestic-key-remove(1)\fP diff --git a/doc/man/restic-list.1 b/doc/man/restic-list.1 index e399038a219..29945e859d4 100644 --- a/doc/man/restic-list.1 +++ b/doc/man/restic-list.1 @@ -18,7 +18,11 @@ The "list" command allows listing objects in the repository based on type. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -44,6 +48,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -108,6 +120,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-ls.1 b/doc/man/restic-ls.1 index 10b0657a372..b990d2ec880 100644 --- a/doc/man/restic-ls.1 +++ b/doc/man/restic-ls.1 @@ -33,7 +33,11 @@ a path separator); paths use the forward slash '/' as separator. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -43,7 +47,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR, when snapshot ID "latest" is given (can be specified multiple times) + only consider snapshots for this \fBhost\fR, when snapshot ID "latest" is given (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--human-readable\fP[=false] @@ -53,9 +57,13 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB-l\fP, \fB--long\fP[=false] use a long listing format showing size and mode +.PP +\fB--ncdu\fP[=false] + output NCDU export format (pipe into 'ncdu -f -') + .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR, when snapshot ID "latest" is given (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR, when snapshot ID "latest" is given (can be specified multiple times, snapshots must include all specified paths) .PP \fB--recursive\fP[=false] @@ -83,6 +91,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -147,6 +163,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-migrate.1 b/doc/man/restic-migrate.1 index 7e48f726c6a..c0fa2dbc1ed 100644 --- a/doc/man/restic-migrate.1 +++ b/doc/man/restic-migrate.1 @@ -20,7 +20,11 @@ names are specified, these migrations are applied. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -50,6 +54,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -114,6 +126,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-mount.1 b/doc/man/restic-mount.1 index aab607fcfe5..5ec59391d85 100644 --- a/doc/man/restic-mount.1 +++ b/doc/man/restic-mount.1 @@ -28,7 +28,6 @@ Example time template without colons: .EX --time-template "2006-01-02_15-04-05" - .EE .PP @@ -36,7 +35,6 @@ You need to specify a sample format for exactly the following timestamp: .EX Mon Jan 2 15:04:05 -0700 MST 2006 - .EE .PP @@ -62,7 +60,11 @@ The default path templates are: .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -76,7 +78,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--no-default-permissions\fP[=false] @@ -88,7 +90,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--path-template\fP=[] @@ -120,6 +122,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -184,6 +194,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-options.1 b/doc/man/restic-options.1 new file mode 100644 index 00000000000..8ea8bea63c7 --- /dev/null +++ b/doc/man/restic-options.1 @@ -0,0 +1,135 @@ +.nh +.TH "restic backup" "1" "Jan 2017" "generated by \fBrestic generate\fR" "" + +.SH NAME +.PP +restic-options - Print list of extended options + + +.SH SYNOPSIS +.PP +\fBrestic options [flags]\fP + + +.SH DESCRIPTION +.PP +The "options" command prints a list of extended options. + + +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. + + +.SH OPTIONS +.PP +\fB-h\fP, \fB--help\fP[=false] + help for options + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB--cacert\fP=[] + \fBfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT) + +.PP +\fB--cache-dir\fP="" + set the cache \fBdirectory\fR\&. (default: use system default cache directory) + +.PP +\fB--cleanup-cache\fP[=false] + auto remove old cache directories + +.PP +\fB--compression\fP=auto + compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) + +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + +.PP +\fB--insecure-tls\fP[=false] + skip TLS certificate verification when connecting to the repository (insecure) + +.PP +\fB--json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB--key-hint\fP="" + \fBkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + +.PP +\fB--limit-download\fP=0 + limits downloads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--limit-upload\fP=0 + limits uploads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--no-cache\fP[=false] + do not use a local cache + +.PP +\fB--no-extra-verify\fP[=false] + skip additional verification of data before upload (see documentation) + +.PP +\fB--no-lock\fP[=false] + do not lock the repository, this allows some operations on read-only repositories + +.PP +\fB-o\fP, \fB--option\fP=[] + set extended option (\fBkey=value\fR, can be specified multiple times) + +.PP +\fB--pack-size\fP=0 + set target pack \fBsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) + +.PP +\fB--password-command\fP="" + shell \fBcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND) + +.PP +\fB-p\fP, \fB--password-file\fP="" + \fBfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE) + +.PP +\fB-q\fP, \fB--quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB-r\fP, \fB--repo\fP="" + \fBrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY) + +.PP +\fB--repository-file\fP="" + \fBfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE) + +.PP +\fB--retry-lock\fP=0s + retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) + +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + +.PP +\fB--tls-client-cert\fP="" + path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) + +.PP +\fB-v\fP, \fB--verbose\fP[=0] + be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-prune.1 b/doc/man/restic-prune.1 index c54d5d7ff0e..1ee262b61a1 100644 --- a/doc/man/restic-prune.1 +++ b/doc/man/restic-prune.1 @@ -19,7 +19,11 @@ referenced and therefore not needed any more. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -73,6 +77,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -137,6 +149,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-recover.1 b/doc/man/restic-recover.1 index 010fbafd77f..382a91cebd0 100644 --- a/doc/man/restic-recover.1 +++ b/doc/man/restic-recover.1 @@ -20,7 +20,11 @@ It can be used if, for example, a snapshot has been removed by accident with "fo .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -46,6 +50,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -110,6 +122,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-repair-index.1 b/doc/man/restic-repair-index.1 index f06be64c04d..341f90d5926 100644 --- a/doc/man/restic-repair-index.1 +++ b/doc/man/restic-repair-index.1 @@ -19,7 +19,11 @@ repository. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -49,6 +53,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -113,6 +125,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-repair-packs.1 b/doc/man/restic-repair-packs.1 index f3671fe1803..d0091725bc2 100644 --- a/doc/man/restic-repair-packs.1 +++ b/doc/man/restic-repair-packs.1 @@ -12,9 +12,6 @@ restic-repair-packs - Salvage damaged pack files .SH DESCRIPTION -.PP -WARNING: The CLI for this command is experimental and will likely change in the future! - .PP The "repair packs" command extracts intact blobs from the specified pack files, rebuilds the index to remove the damaged pack files and removes the pack files from the repository. @@ -22,7 +19,11 @@ the index to remove the damaged pack files and removes the pack files from the r .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -48,6 +49,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -112,6 +121,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-repair-snapshots.1 b/doc/man/restic-repair-snapshots.1 index 9369f25f245..d9e12ddf104 100644 --- a/doc/man/restic-repair-snapshots.1 +++ b/doc/man/restic-repair-snapshots.1 @@ -37,7 +37,11 @@ snapshot! .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -55,11 +59,11 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--tag\fP=[] @@ -83,6 +87,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -147,6 +159,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-repair.1 b/doc/man/restic-repair.1 index 77aecc17333..b0656248681 100644 --- a/doc/man/restic-repair.1 +++ b/doc/man/restic-repair.1 @@ -39,6 +39,14 @@ Repair the repository \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -103,6 +111,10 @@ Repair the repository \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-restore.1 b/doc/man/restic-restore.1 index 4635b1e43b9..e9ef4ef94c3 100644 --- a/doc/man/restic-restore.1 +++ b/doc/man/restic-restore.1 @@ -21,43 +21,75 @@ The special snapshotID "latest" can be used to restore the latest snapshot in th repository. .PP -To only restore a specific subfolder, you can use the ":" +To only restore a specific subfolder, you can use the "snapshotID:subfolder" syntax, where "subfolder" is a path within the snapshot. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS +.PP +\fB--delete\fP[=false] + delete files from target directory if they do not exist in snapshot. Use '--dry-run -vv' to check what would be deleted + +.PP +\fB--dry-run\fP[=false] + do not write any data, just show what would be done + .PP \fB-e\fP, \fB--exclude\fP=[] exclude a \fBpattern\fR (can be specified multiple times) +.PP +\fB--exclude-file\fP=[] + read exclude patterns from a \fBfile\fR (can be specified multiple times) + .PP \fB-h\fP, \fB--help\fP[=false] help for restore .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR, when snapshot ID "latest" is given (can be specified multiple times) + only consider snapshots for this \fBhost\fR, when snapshot ID "latest" is given (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--iexclude\fP=[] - same as --exclude but ignores the casing of \fBpattern\fR + same as --exclude \fBpattern\fR but ignores the casing of filenames + +.PP +\fB--iexclude-file\fP=[] + same as --exclude-file but ignores casing of \fBfile\fRnames in patterns .PP \fB--iinclude\fP=[] - same as --include but ignores the casing of \fBpattern\fR + same as --include \fBpattern\fR but ignores the casing of filenames + +.PP +\fB--iinclude-file\fP=[] + same as --include-file but ignores casing of \fBfile\fRnames in patterns .PP \fB-i\fP, \fB--include\fP=[] - include a \fBpattern\fR, exclude everything else (can be specified multiple times) + include a \fBpattern\fR (can be specified multiple times) + +.PP +\fB--include-file\fP=[] + read include patterns from a \fBfile\fR (can be specified multiple times) + +.PP +\fB--overwrite\fP=always + overwrite behavior, one of (always|if-changed|if-newer|never) (default: always) .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR, when snapshot ID "latest" is given (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR, when snapshot ID "latest" is given (can be specified multiple times, snapshots must include all specified paths) .PP \fB--sparse\fP[=false] @@ -93,6 +125,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -157,6 +197,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-rewrite.1 b/doc/man/restic-rewrite.1 index d63c653e6fb..c0d4a7e1a8c 100644 --- a/doc/man/restic-rewrite.1 +++ b/doc/man/restic-rewrite.1 @@ -35,7 +35,11 @@ use the "prune" command. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -61,7 +65,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--iexclude\fP=[] @@ -71,9 +75,17 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--iexclude-file\fP=[] same as --exclude-file but ignores casing of \fBfile\fRnames in patterns +.PP +\fB--new-host\fP="" + replace hostname + +.PP +\fB--new-time\fP="" + replace time of the backup + .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--tag\fP=[] @@ -97,6 +109,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -161,6 +181,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-self-update.1 b/doc/man/restic-self-update.1 index 92ab5add372..d475f13cb00 100644 --- a/doc/man/restic-self-update.1 +++ b/doc/man/restic-self-update.1 @@ -21,7 +21,11 @@ files. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -51,6 +55,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -115,6 +127,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-snapshots.1 b/doc/man/restic-snapshots.1 index 6203bbf2b25..f59240b444e 100644 --- a/doc/man/restic-snapshots.1 +++ b/doc/man/restic-snapshots.1 @@ -18,7 +18,11 @@ The "snapshots" command lists all snapshots stored in the repository. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -36,7 +40,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--latest\fP=0 @@ -44,7 +48,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--tag\fP=[] @@ -68,6 +72,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -132,6 +144,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-stats.1 b/doc/man/restic-stats.1 index 9d37163defb..1e6e79dac85 100644 --- a/doc/man/restic-stats.1 +++ b/doc/man/restic-stats.1 @@ -32,7 +32,7 @@ The modes are: .IP \(bu 2 restore-size: (default) Counts the size of the restored files. .IP \(bu 2 -files-by-contents: Counts total size of files, where a file is +files-by-contents: Counts total size of unique files, where a file is considered unique if it has unique contents. .IP \(bu 2 raw-data: Counts the size of blobs in the repository, regardless of @@ -48,7 +48,11 @@ Refer to the online manual for more details about each mode. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -58,7 +62,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--mode\fP="restore-size" @@ -66,7 +70,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--tag\fP=[] @@ -90,6 +94,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -154,6 +166,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-tag.1 b/doc/man/restic-tag.1 index b1468c74d76..89c677867e3 100644 --- a/doc/man/restic-tag.1 +++ b/doc/man/restic-tag.1 @@ -25,7 +25,11 @@ When no snapshotID is given, all snapshots matching the host, tag and path filte .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -39,11 +43,11 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--remove\fP=[] @@ -75,6 +79,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -139,6 +151,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-unlock.1 b/doc/man/restic-unlock.1 index 0b3b43f2acc..74679ef915f 100644 --- a/doc/man/restic-unlock.1 +++ b/doc/man/restic-unlock.1 @@ -18,7 +18,8 @@ The "unlock" command removes stale locks that have been created by other restic .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. .SH OPTIONS @@ -48,6 +49,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -112,6 +121,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-version.1 b/doc/man/restic-version.1 index ccc23038f75..8d5fe6c65b6 100644 --- a/doc/man/restic-version.1 +++ b/doc/man/restic-version.1 @@ -19,7 +19,8 @@ and the version of this software. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. .SH OPTIONS @@ -45,6 +46,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -109,6 +118,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic.1 b/doc/man/restic.1 index 333eab76a41..bd8009aac43 100644 --- a/doc/man/restic.1 +++ b/doc/man/restic.1 @@ -41,6 +41,14 @@ The full documentation can be found at https://restic.readthedocs.io/ . \fB-h\fP, \fB--help\fP[=false] help for restic +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -105,6 +113,10 @@ The full documentation can be found at https://restic.readthedocs.io/ . \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) @@ -116,4 +128,4 @@ The full documentation can be found at https://restic.readthedocs.io/ . .SH SEE ALSO .PP -\fBrestic-backup(1)\fP, \fBrestic-cache(1)\fP, \fBrestic-cat(1)\fP, \fBrestic-check(1)\fP, \fBrestic-copy(1)\fP, \fBrestic-diff(1)\fP, \fBrestic-dump(1)\fP, \fBrestic-find(1)\fP, \fBrestic-forget(1)\fP, \fBrestic-generate(1)\fP, \fBrestic-init(1)\fP, \fBrestic-key(1)\fP, \fBrestic-list(1)\fP, \fBrestic-ls(1)\fP, \fBrestic-migrate(1)\fP, \fBrestic-mount(1)\fP, \fBrestic-prune(1)\fP, \fBrestic-recover(1)\fP, \fBrestic-repair(1)\fP, \fBrestic-restore(1)\fP, \fBrestic-rewrite(1)\fP, \fBrestic-self-update(1)\fP, \fBrestic-snapshots(1)\fP, \fBrestic-stats(1)\fP, \fBrestic-tag(1)\fP, \fBrestic-unlock(1)\fP, \fBrestic-version(1)\fP +\fBrestic-backup(1)\fP, \fBrestic-cache(1)\fP, \fBrestic-cat(1)\fP, \fBrestic-check(1)\fP, \fBrestic-copy(1)\fP, \fBrestic-diff(1)\fP, \fBrestic-dump(1)\fP, \fBrestic-features(1)\fP, \fBrestic-find(1)\fP, \fBrestic-forget(1)\fP, \fBrestic-generate(1)\fP, \fBrestic-init(1)\fP, \fBrestic-key(1)\fP, \fBrestic-list(1)\fP, \fBrestic-ls(1)\fP, \fBrestic-migrate(1)\fP, \fBrestic-mount(1)\fP, \fBrestic-options(1)\fP, \fBrestic-prune(1)\fP, \fBrestic-recover(1)\fP, \fBrestic-repair(1)\fP, \fBrestic-restore(1)\fP, \fBrestic-rewrite(1)\fP, \fBrestic-self-update(1)\fP, \fBrestic-snapshots(1)\fP, \fBrestic-stats(1)\fP, \fBrestic-tag(1)\fP, \fBrestic-unlock(1)\fP, \fBrestic-version(1)\fP diff --git a/doc/manual_rest.rst b/doc/manual_rest.rst index bf9554e046a..d1e5817f324 100644 --- a/doc/manual_rest.rst +++ b/doc/manual_rest.rst @@ -8,11 +8,13 @@ Usage help is available: .. code-block:: console - $ ./restic --help + $ restic --help restic is a backup program which allows saving multiple revisions of files and directories in an encrypted repository stored on different backends. + The full documentation can be found at https://restic.readthedocs.io/ . + Usage: restic [command] @@ -26,8 +28,6 @@ Usage help is available: dump Print a backed-up file to stdout find Find a file, a directory or restic IDs forget Remove snapshots from the repository - generate Generate manual pages and auto-completion files (bash, fish, zsh, powershell) - help Help about any command init Initialize a new repository key Manage keys (passwords) list List objects in the repository @@ -39,25 +39,36 @@ Usage help is available: repair Repair the repository restore Extract the data from a snapshot rewrite Rewrite snapshots to exclude unwanted files - self-update Update the restic binary snapshots List all snapshots stats Scan the repository and show basic statistics tag Modify tags on snapshots unlock Remove locks other processes created + + Advanced Options: + features Print list of feature flags + options Print list of extended options + + Additional Commands: + generate Generate manual pages and auto-completion files (bash, fish, zsh, powershell) + help Help about any command + self-update Update the restic binary version Print version information Flags: - --cacert file file to load root certificates from (default: use system certificates) + --cacert file file to load root certificates from (default: use system certificates or $RESTIC_CACERT) --cache-dir directory set the cache directory. (default: use system default cache directory) --cleanup-cache auto remove old cache directories --compression mode compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) (default auto) -h, --help help for restic + --http-user-agent string set a http user agent for outgoing http requests + --insecure-no-password use an empty password for the repository, must be passed to every restic command (insecure) --insecure-tls skip TLS certificate verification when connecting to the repository (insecure) --json set output mode to JSON for commands that support it --key-hint key key ID of key to try decrypting first (default: $RESTIC_KEY_HINT) --limit-download rate limits downloads to a maximum rate in KiB/s. (default: unlimited) --limit-upload rate limits uploads to a maximum rate in KiB/s. (default: unlimited) --no-cache do not use a local cache + --no-extra-verify skip additional verification of data before upload (see documentation) --no-lock do not lock the repository, this allows some operations on read-only repositories -o, --option key=value set extended option (key=value, can be specified multiple times) --pack-size size set target pack size in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) @@ -67,7 +78,7 @@ Usage help is available: -r, --repo repository repository to backup to or restore from (default: $RESTIC_REPOSITORY) --repository-file file file to read the repository location from (default: $RESTIC_REPOSITORY_FILE) --retry-lock duration retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) - --tls-client-cert file path to a file containing PEM encoded TLS client certificate and private key + --tls-client-cert file path to a file containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) -v, --verbose be verbose (specify multiple times or a level using --verbose=n, max level/times is 2) Use "restic [command] --help" for more information about a command. @@ -80,7 +91,7 @@ command: .. code-block:: console - $ ./restic backup --help + $ restic backup --help The "backup" command creates a new snapshot and saves the files and directories given as the arguments. @@ -105,10 +116,10 @@ command: --files-from file read the files to backup from file (can be combined with file args; can be specified multiple times) --files-from-raw file read the files to backup from file (can be combined with file args; can be specified multiple times) --files-from-verbatim file read the files to backup from file (can be combined with file args; can be specified multiple times) - -f, --force force re-reading the target files/directories (overrides the "parent" flag) + -f, --force force re-reading the source files/directories (overrides the "parent" flag) -g, --group-by group group snapshots by host, paths and/or tags, separated by comma (disable grouping with '') (default host,paths) -h, --help help for backup - -H, --host hostname set the hostname for the snapshot manually. To prevent an expensive rescan use the "parent" flag + -H, --host hostname set the hostname for the snapshot manually (default: $RESTIC_HOST). To prevent an expensive rescan use the "parent" flag --iexclude pattern same as --exclude pattern but ignores the casing of filenames --iexclude-file file same as --exclude-file but ignores casing of filenames in patterns --ignore-ctime ignore ctime changes when checking for modified files @@ -117,24 +128,29 @@ command: -x, --one-file-system exclude other file systems, don't cross filesystem boundaries and subvolumes --parent snapshot use this parent snapshot (default: latest snapshot in the group determined by --group-by and not newer than the timestamp determined by --time) --read-concurrency n read n files concurrently (default: $RESTIC_READ_CONCURRENCY or 2) + --skip-if-unchanged skip snapshot creation if identical to parent snapshot --stdin read backup from stdin --stdin-filename filename filename to use when reading from stdin (default "stdin") + --stdin-from-command interpret arguments as command to execute and store its stdout --tag tags add tags for the new snapshot in the format `tag[,tag,...]` (can be specified multiple times) (default []) --time time time of the backup (ex. '2012-11-01 22:08:41') (default: now) --use-fs-snapshot use filesystem snapshot where possible (currently only Windows VSS) --with-atime store the atime for all files and directories Global Flags: - --cacert file file to load root certificates from (default: use system certificates) + --cacert file file to load root certificates from (default: use system certificates or $RESTIC_CACERT) --cache-dir directory set the cache directory. (default: use system default cache directory) --cleanup-cache auto remove old cache directories --compression mode compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) (default auto) + --http-user-agent string set a http user agent for outgoing http requests + --insecure-no-password use an empty password for the repository, must be passed to every restic command (insecure) --insecure-tls skip TLS certificate verification when connecting to the repository (insecure) --json set output mode to JSON for commands that support it --key-hint key key ID of key to try decrypting first (default: $RESTIC_KEY_HINT) --limit-download rate limits downloads to a maximum rate in KiB/s. (default: unlimited) --limit-upload rate limits uploads to a maximum rate in KiB/s. (default: unlimited) --no-cache do not use a local cache + --no-extra-verify skip additional verification of data before upload (see documentation) --no-lock do not lock the repository, this allows some operations on read-only repositories -o, --option key=value set extended option (key=value, can be specified multiple times) --pack-size size set target pack size in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) @@ -144,7 +160,7 @@ command: -r, --repo repository repository to backup to or restore from (default: $RESTIC_REPOSITORY) --repository-file file file to read the repository location from (default: $RESTIC_REPOSITORY_FILE) --retry-lock duration retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) - --tls-client-cert file path to a file containing PEM encoded TLS client certificate and private key + --tls-client-cert file path to a file containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) -v, --verbose be verbose (specify multiple times or a level using --verbose=n, max level/times is 2) Subcommands that support showing progress information such as ``backup``, @@ -322,7 +338,6 @@ required to restore the latest snapshot (from any host that made it): .. code-block:: console $ restic stats latest - password is correct Total File Count: 10538 Total Size: 37.824 GiB @@ -333,7 +348,6 @@ host by using the ``--host`` flag: .. code-block:: console $ restic stats --host myserver latest - password is correct Total File Count: 21766 Total Size: 481.783 GiB @@ -350,7 +364,6 @@ has restic's deduplication helped? We can check: .. code-block:: console $ restic stats --host myserver --mode raw-data latest - password is correct Total Blob Count: 340847 Total Size: 458.663 GiB @@ -408,9 +421,12 @@ Temporary files During some operations (e.g. ``backup`` and ``prune``) restic uses temporary files to store data. These files will, by default, be saved to the system's temporary directory, on Linux this is usually located in -``/tmp/``. The environment variable ``TMPDIR`` can be used to specify a -different directory, e.g. to use the directory ``/var/tmp/restic-tmp`` -instead of the default, set the environment variable like this: +``/tmp/``. To specify a different directory for temporary files, set +the appropriate environment variable. On non-Windows operating systems, +use the ``TMPDIR`` environment variable. On Windows, use either the +``TMP`` or ``TEMP`` environment variable. For example, to use the +directory ``/var/tmp/restic-tmp`` instead of the default, set the +environment variable as follows: .. code-block:: console diff --git a/doc/powershell-completion.ps1 b/doc/powershell-completion.ps1 index d8aa5a1af81..033477e7bed 100644 --- a/doc/powershell-completion.ps1 +++ b/doc/powershell-completion.ps1 @@ -10,7 +10,7 @@ filter __restic_escapeStringWithSpecialChars { $_ -replace '\s|#|@|\$|;|,|''|\{|\}|\(|\)|"|`|\||<|>|&','`$&' } -[scriptblock]$__resticCompleterBlock = { +[scriptblock]${__resticCompleterBlock} = { param( $WordToComplete, $CommandAst, @@ -85,7 +85,7 @@ filter __restic_escapeStringWithSpecialChars { __restic_debug "Calling $RequestComp" # First disable ActiveHelp which is not supported for Powershell - $env:RESTIC_ACTIVE_HELP=0 + ${env:RESTIC_ACTIVE_HELP}=0 #call the command store the output in $out and redirect stderr and stdout to null # $Out is an array contains each line per element @@ -242,4 +242,4 @@ filter __restic_escapeStringWithSpecialChars { } } -Register-ArgumentCompleter -CommandName 'restic' -ScriptBlock $__resticCompleterBlock +Register-ArgumentCompleter -CommandName 'restic' -ScriptBlock ${__resticCompleterBlock} diff --git a/docker/Dockerfile b/docker/Dockerfile index 978da796040..4c031ebacb4 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20-alpine AS builder +FROM golang:1.23-alpine AS builder WORKDIR /go/src/github.com/restic/restic diff --git a/go.mod b/go.mod index afcbc427b99..2193e738c40 100644 --- a/go.mod +++ b/go.mod @@ -1,89 +1,85 @@ module github.com/restic/restic require ( - cloud.google.com/go/storage v1.37.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 - github.com/Backblaze/blazer v0.6.1 - github.com/anacrolix/fuse v0.2.0 - github.com/cenkalti/backoff/v4 v4.2.1 - github.com/cespare/xxhash/v2 v2.2.0 + cloud.google.com/go/storage v1.43.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 + github.com/Backblaze/blazer v0.7.1 + github.com/Microsoft/go-winio v0.6.2 + github.com/anacrolix/fuse v0.3.1 + github.com/cenkalti/backoff/v4 v4.3.0 + github.com/cespare/xxhash/v2 v2.3.0 github.com/elithrar/simple-scrypt v1.3.0 github.com/go-ole/go-ole v1.3.0 github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/klauspost/compress v1.17.6 - github.com/minio/minio-go/v7 v7.0.66 - github.com/minio/sha256-simd v1.0.1 - github.com/ncw/swift/v2 v2.0.2 + github.com/klauspost/compress v1.17.11 + github.com/minio/minio-go/v7 v7.0.77 + github.com/ncw/swift/v2 v2.0.3 + github.com/peterbourgon/unixtransport v0.0.4 github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.7.0 - github.com/pkg/sftp v1.13.6 - github.com/pkg/xattr v0.4.10-0.20221120235825-35026bbbd013 + github.com/pkg/sftp v1.13.7 + github.com/pkg/xattr v0.4.10 github.com/restic/chunker v0.4.0 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 - go.uber.org/automaxprocs v1.5.3 - golang.org/x/crypto v0.18.0 - golang.org/x/net v0.20.0 - golang.org/x/oauth2 v0.16.0 - golang.org/x/sync v0.6.0 - golang.org/x/sys v0.16.0 - golang.org/x/term v0.16.0 - golang.org/x/text v0.14.0 - golang.org/x/time v0.5.0 - google.golang.org/api v0.157.0 + go.uber.org/automaxprocs v1.6.0 + golang.org/x/crypto v0.32.0 + golang.org/x/net v0.34.0 + golang.org/x/oauth2 v0.23.0 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.29.0 + golang.org/x/term v0.28.0 + golang.org/x/text v0.21.0 + golang.org/x/time v0.7.0 + google.golang.org/api v0.204.0 ) require ( - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/iam v1.2.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v5 v5.0.0 // indirect + github.com/goccy/go-json v0.10.3 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/google/uuid v1.5.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/kr/fs v0.1.0 // indirect - github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/rs/xid v1.5.0 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/rs/xid v1.6.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/grpc v1.60.1 // indirect - google.golang.org/protobuf v1.32.0 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect + google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) -go 1.19 +go 1.21 diff --git a/go.sum b/go.sum index fb70ac9d518..ef829bd9021 100644 --- a/go.sum +++ b/go.sum @@ -1,53 +1,70 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4= -cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 h1:hVeq+yCyUi+MsoO/CU95yqCIcdzra5ovzk8Q2BBpV2M= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1s= -github.com/Backblaze/blazer v0.6.1/go.mod h1:7/jrGx4O6OKOto6av+hLwelPR8rwZ+PLxQ5ZOiYAjwY= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Backblaze/blazer v0.7.1 h1:J43PbFj6hXLg1jvCNr+rQoAsxzKK0IP7ftl1ReCwpcQ= +github.com/Backblaze/blazer v0.7.1/go.mod h1:MhntL1nMpIuoqrPP6TnZu/xTydMgOAe/Xm6KongbjKs= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74= -github.com/anacrolix/fuse v0.2.0 h1:pc+To78kI2d/WUjIyrsdqeJQAesuwpGxlI3h1nAv3Do= -github.com/anacrolix/fuse v0.2.0/go.mod h1:Kfu02xBwnySDpH3N23BmrP3MDfwAQGRLUCj6XyeOvBQ= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/anacrolix/envpprof v1.3.0 h1:WJt9bpuT7A/CDCxPOv/eeZqHWlle/Y0keJUvc6tcJDk= +github.com/anacrolix/envpprof v1.3.0/go.mod h1:7QIG4CaX1uexQ3tqd5+BRa/9e2D02Wcertl6Yh0jCB0= +github.com/anacrolix/fuse v0.3.1 h1:oT8s3B5HFkBdLe/WKJO5MNo9iIyEtc+BhvTZYp4jhDM= +github.com/anacrolix/fuse v0.3.1/go.mod h1:vN3X/6E+uHNjg5F8Oy9FD9I+pYxeDWeB8mNjIoxL5ds= +github.com/anacrolix/generics v0.0.0-20230113004304-d6428d516633 h1:TO3pytMIJ98CO1nYtqbFx/iuTHi4OgIUoE2wNfDdKxw= +github.com/anacrolix/generics v0.0.0-20230113004304-d6428d516633/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8= +github.com/anacrolix/log v0.13.1/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= +github.com/anacrolix/log v0.14.1 h1:j2FcIpYZ5FbANetUcm5JNu+zUBGADSp/VbjhUPrAY0k= +github.com/anacrolix/log v0.14.1/go.mod h1:1OmJESOtxQGNMlUO5rcv96Vpp9mfMqXXbe2RdinFLdY= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= +github.com/dvyukov/go-fuzz v0.0.0-20220726122315-1d375ef9f9f6/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elithrar/simple-scrypt v1.3.0 h1:KIlOlxdoQf9JWKl5lMAJ28SY2URB0XTRDn2TckyzAZg= github.com/elithrar/simple-scrypt v1.3.0/go.mod h1:U2XQRI95XHY0St410VE3UjT7vuKb1qPwrl/EJwEqnZo= @@ -55,20 +72,24 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= -github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -83,90 +104,99 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= -github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= -github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk= -github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/minio/minio-go/v7 v7.0.77 h1:GaGghJRg9nwDVlNbwYjSDJT1rqltQkBFDsypWX1v3Bw= +github.com/minio/minio-go/v7 v7.0.77/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg= +github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg= +github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/peterbourgon/ff/v3 v3.3.1/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= +github.com/peterbourgon/unixtransport v0.0.4 h1:UTF0FxXCAglvoZz9jaGPYjEg52DjBLDYGMJvJni6Tfw= +github.com/peterbourgon/unixtransport v0.0.4/go.mod h1:o8aUkOCa8W/BIXpi15uKvbSabjtBh0JhSOJGSfoOhAU= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= -github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= -github.com/pkg/xattr v0.4.10-0.20221120235825-35026bbbd013 h1:aqByeeNnF7NiEbXCi7nBxZ272+6f6FUBmj/dUzWCdvc= -github.com/pkg/xattr v0.4.10-0.20221120235825-35026bbbd013/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= +github.com/pkg/sftp v1.13.7 h1:uv+I3nNJvlKZIQGSr8JVQLNHFU9YhhNpvC14Y6KgmSM= +github.com/pkg/sftp v1.13.7/go.mod h1:KMKI0t3T6hfA+lTR/ssZdunHo+uwq7ghoN09/FSu3DY= +github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA= +github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/restic/chunker v0.4.0 h1:YUPYCUn70MYP7VO4yllypp2SjmsRhRJaad3xKu1QFRw= github.com/restic/chunker v0.4.0/go.mod h1:z0cH2BejpW636LXw0R/BGyv+Ey8+m9QGiOanDHItzyw= github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s= -github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc= @@ -178,127 +208,146 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= +golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -google.golang.org/api v0.157.0 h1:ORAeqmbrrozeyw5NjnMxh7peHO0UzV4wWYSwZeCUb20= -google.golang.org/api v0.157.0/go.mod h1:+z4v4ufbZ1WEpld6yMGHyggs+PmAHiaLNj5ytP3N01g= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= +google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k= -google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 h1:KHBtwE+eQc3+NxpjmRFlQ3pJQ2FNnhhgB9xOV8kyBuU= -google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38/go.mod h1:xBI+tzfqGGN2JBeSebfKXFSdBpWVQ7sLW40PTupVRm4= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -308,15 +357,15 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/helpers/build-release-binaries/main.go b/helpers/build-release-binaries/main.go index 81d126b0053..8fe8c24fb9b 100644 --- a/helpers/build-release-binaries/main.go +++ b/helpers/build-release-binaries/main.go @@ -243,14 +243,15 @@ func buildTargets(sourceDir, outputDir string, targets map[string][]string) { } var defaultBuildTargets = map[string][]string{ - "aix": {"ppc64"}, - "darwin": {"amd64", "arm64"}, - "freebsd": {"386", "amd64", "arm"}, - "linux": {"386", "amd64", "arm", "arm64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "riscv64", "s390x"}, - "netbsd": {"386", "amd64"}, - "openbsd": {"386", "amd64"}, - "windows": {"386", "amd64"}, - "solaris": {"amd64"}, + "aix": {"ppc64"}, + "darwin": {"amd64", "arm64"}, + "dragonfly": {"amd64"}, + "freebsd": {"386", "amd64", "arm"}, + "linux": {"386", "amd64", "arm", "arm64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "riscv64", "s390x"}, + "netbsd": {"386", "amd64"}, + "openbsd": {"386", "amd64"}, + "windows": {"386", "amd64"}, + "solaris": {"amd64"}, } func downloadModules(sourceDir string) { diff --git a/helpers/prepare-release/main.go b/helpers/prepare-release/main.go index baf8aa2baeb..607d16936eb 100644 --- a/helpers/prepare-release/main.go +++ b/helpers/prepare-release/main.go @@ -31,7 +31,7 @@ var opts = struct { var versionRegex = regexp.MustCompile(`^\d+\.\d+\.\d+$`) func init() { - pflag.BoolVar(&opts.IgnoreBranchName, "ignore-branch-name", false, "allow releasing from other branches as 'master'") + pflag.BoolVar(&opts.IgnoreBranchName, "ignore-branch-name", false, "allow releasing from other branches than 'master'") pflag.BoolVar(&opts.IgnoreUncommittedChanges, "ignore-uncommitted-changes", false, "allow uncommitted changes") pflag.BoolVar(&opts.IgnoreChangelogVersion, "ignore-changelog-version", false, "ignore missing entry in CHANGELOG.md") pflag.BoolVar(&opts.IgnoreChangelogReleaseDate, "ignore-changelog-release-date", false, "ignore missing subdir with date in changelog/") @@ -128,17 +128,22 @@ func uncommittedChanges(dirs ...string) string { return string(changes) } -func preCheckBranchMaster() { - if opts.IgnoreBranchName { - return - } - +func getBranchName() string { branch, err := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD").Output() if err != nil { die("error running 'git': %v", err) } - if strings.TrimSpace(string(branch)) != "master" { + return strings.TrimSpace(string(branch)) +} + +func preCheckBranchMaster() { + if opts.IgnoreBranchName { + return + } + + branch := getBranchName() + if branch != "master" { die("wrong branch: %s", branch) } } @@ -323,6 +328,11 @@ func updateVersion() { } func updateVersionDev() { + err := os.WriteFile("VERSION", []byte(opts.Version+"-dev\n"), 0644) + if err != nil { + die("unable to write version to file: %v", err) + } + newVersion := fmt.Sprintf(`var version = "%s-dev (compiled manually)"`, opts.Version) replace(versionCodeFile, versionPattern, newVersion) @@ -444,6 +454,7 @@ func main() { } preCheckBranchMaster() + branch := getBranchName() preCheckUncommittedChanges() preCheckVersionExists() preCheckDockerBuilderGoVersion() @@ -480,5 +491,5 @@ func main() { msg("done, output dir is %v", opts.OutputDir) - msg("now run:\n\ngit push --tags origin master\n%s\n\nrm -rf %q", dockerCmds, sourceDir) + msg("now run:\n\ngit push --tags origin %s\n%s\n\nrm -rf %q", branch, dockerCmds, sourceDir) } diff --git a/helpers/verify-release-binaries.sh b/helpers/verify-release-binaries.sh index 4e80528e2ba..5ac57027bbb 100755 --- a/helpers/verify-release-binaries.sh +++ b/helpers/verify-release-binaries.sh @@ -89,13 +89,14 @@ extract_docker() { restic_platform=$3 out=restic_${restic_version}_linux_${restic_platform}.bz2 + # requires at least docker 25.0 docker image pull --platform "linux/${docker_platform}" ${image}:${restic_version} > /dev/null docker image save ${image}:${restic_version} -o docker.tar mkdir img - tar xvf docker.tar -C img --wildcards \*/layer.tar > /dev/null + tar xvf docker.tar -C img --wildcards blobs/sha256/\* > /dev/null rm docker.tar - for i in img/*/layer.tar; do + for i in img/blobs/sha256/*; do tar -xvf "$i" -C img usr/bin/restic 2> /dev/null 1>&2 || true if [[ -f img/usr/bin/restic ]]; then if [[ -f restic-docker ]]; then diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index f2c481b3208..0b71cbacf0d 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -8,10 +8,12 @@ import ( "runtime" "sort" "strings" + "sync" "time" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" "golang.org/x/sync/errgroup" @@ -23,7 +25,7 @@ type SelectByNameFunc func(item string) bool // SelectFunc returns true for all items that should be included (files and // dirs). If false is returned, files are ignored and dirs are not even walked. -type SelectFunc func(item string, fi os.FileInfo) bool +type SelectFunc func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool // ErrorFunc is called when an error during archiving occurs. When nil is // returned, the archiver continues, otherwise it aborts and passes the error @@ -40,6 +42,20 @@ type ItemStats struct { TreeSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead) } +type ChangeStats struct { + New uint + Changed uint + Unchanged uint +} + +type Summary struct { + BackupStart time.Time + BackupEnd time.Time + Files, Dirs ChangeStats + ProcessedBytes uint64 + ItemStats +} + // Add adds other to the current ItemStats. func (s *ItemStats) Add(other ItemStats) { s.DataBlobs += other.DataBlobs @@ -50,17 +66,42 @@ func (s *ItemStats) Add(other ItemStats) { s.TreeSizeInRepo += other.TreeSizeInRepo } +// ToNoder returns a restic.Node for a File. +type ToNoder interface { + ToNode(ignoreXattrListError bool) (*restic.Node, error) +} + +type archiverRepo interface { + restic.Loader + restic.BlobSaver + restic.SaverUnpacked[restic.WriteableFileType] + + Config() restic.Config + StartPackUploader(ctx context.Context, wg *errgroup.Group) + Flush(ctx context.Context) error +} + // Archiver saves a directory structure to the repo. +// +// An Archiver has a number of worker goroutines handling saving the different +// data structures to the repository, the details are implemented by the +// fileSaver, blobSaver, and treeSaver types. +// +// The main goroutine (the one calling Snapshot()) traverses the directory tree +// and delegates all work to these worker pools. They return a futureNode which +// can be resolved later, by calling Wait() on it. type Archiver struct { - Repo restic.Repository + Repo archiverRepo SelectByName SelectByNameFunc Select SelectFunc FS fs.FS Options Options - blobSaver *BlobSaver - fileSaver *FileSaver - treeSaver *TreeSaver + blobSaver *blobSaver + fileSaver *fileSaver + treeSaver *treeSaver + mu sync.Mutex + summary *Summary // Error is called for all errors that occur during backup. Error ErrorFunc @@ -134,7 +175,7 @@ func (o Options) ApplyDefaults() Options { if o.SaveTreeConcurrency == 0 { // can either wait for a file, wait for a tree, serialize a tree or wait for saveblob // the last two are cpu-bound and thus mutually exclusive. - // Also allow waiting for FileReadConcurrency files, this is the maximum of FutureFiles + // Also allow waiting for FileReadConcurrency files, this is the maximum of files // which currently can be in progress. The main backup loop blocks when trying to queue // more files to read. o.SaveTreeConcurrency = uint(runtime.GOMAXPROCS(0)) + o.ReadConcurrency @@ -144,12 +185,12 @@ func (o Options) ApplyDefaults() Options { } // New initializes a new archiver. -func New(repo restic.Repository, fs fs.FS, opts Options) *Archiver { +func New(repo archiverRepo, filesystem fs.FS, opts Options) *Archiver { arch := &Archiver{ Repo: repo, - SelectByName: func(item string) bool { return true }, - Select: func(item string, fi os.FileInfo) bool { return true }, - FS: fs, + SelectByName: func(_ string) bool { return true }, + Select: func(_ string, _ *fs.ExtendedFileInfo, _ fs.FS) bool { return true }, + FS: filesystem, Options: opts.ApplyDefaults(), CompleteItem: func(string, *restic.Node, *restic.Node, ItemStats, time.Duration) {}, @@ -182,16 +223,64 @@ func (arch *Archiver) error(item string, err error) error { return errf } +func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s ItemStats, d time.Duration) { + arch.CompleteItem(item, previous, current, s, d) + + arch.mu.Lock() + defer arch.mu.Unlock() + + arch.summary.ItemStats.Add(s) + + if current != nil { + arch.summary.ProcessedBytes += current.Size + } else { + // last item or an error occurred + return + } + + switch current.Type { + case restic.NodeTypeDir: + switch { + case previous == nil: + arch.summary.Dirs.New++ + case previous.Equals(*current): + arch.summary.Dirs.Unchanged++ + default: + arch.summary.Dirs.Changed++ + } + + case restic.NodeTypeFile: + switch { + case previous == nil: + arch.summary.Files.New++ + case previous.Equals(*current): + arch.summary.Files.Unchanged++ + default: + arch.summary.Files.Changed++ + } + } +} + // nodeFromFileInfo returns the restic node from an os.FileInfo. -func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo) (*restic.Node, error) { - node, err := restic.NodeFromFileInfo(filename, fi) +func (arch *Archiver) nodeFromFileInfo(snPath, filename string, meta ToNoder, ignoreXattrListError bool) (*restic.Node, error) { + node, err := meta.ToNode(ignoreXattrListError) if !arch.WithAtime { node.AccessTime = node.ModTime } + if feature.Flag.Enabled(feature.DeviceIDForHardlinks) { + if node.Links == 1 || node.Type == restic.NodeTypeDir { + // the DeviceID is only necessary for hardlinked files + // when using subvolumes or snapshots their deviceIDs tend to change which causes + // restic to upload new tree blobs + node.DeviceID = 0 + } + } // overwrite name to match that within the snapshot node.Name = path.Base(snPath) - if err != nil { - return node, fmt.Errorf("nodeFromFileInfo %v: %w", filename, err) + // do not filter error for nodes of irregular or invalid type + if node.Type != restic.NodeTypeIrregular && node.Type != restic.NodeTypeInvalid && err != nil { + err = fmt.Errorf("incomplete metadata for %v: %w", filename, err) + return node, arch.error(filename, err) } return node, err } @@ -199,7 +288,7 @@ func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo) // loadSubtree tries to load the subtree referenced by node. In case of an error, nil is returned. // If there is no node to load, then nil is returned without an error. func (arch *Archiver) loadSubtree(ctx context.Context, node *restic.Node) (*restic.Tree, error) { - if node == nil || node.Type != "dir" || node.Subtree == nil { + if node == nil || node.Type != restic.NodeTypeDir || node.Subtree == nil { return nil, nil } @@ -214,7 +303,7 @@ func (arch *Archiver) loadSubtree(ctx context.Context, node *restic.Node) (*rest } func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error { - if arch.Repo.Index().Has(restic.BlobHandle{ID: id, Type: restic.TreeBlob}) { + if _, ok := arch.Repo.LookupBlobSize(restic.TreeBlob, id); ok { err = errors.Errorf("tree %v could not be loaded; the repository could be damaged: %v", id, err) } else { err = errors.Errorf("tree %v is not known; the repository could be damaged, run `repair index` to try to repair it", id) @@ -222,35 +311,29 @@ func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error { return err } -// SaveDir stores a directory in the repo and returns the node. snPath is the +// saveDir stores a directory in the repo and returns the node. snPath is the // path within the current snapshot. -func (arch *Archiver) SaveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete CompleteFunc) (d FutureNode, err error) { +func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, meta fs.File, previous *restic.Tree, complete fileCompleteFunc) (d futureNode, err error) { debug.Log("%v %v", snPath, dir) - treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi) - if err != nil { - return FutureNode{}, err - } - - names, err := readdirnames(arch.FS, dir, fs.O_NOFOLLOW) + treeNode, names, err := arch.dirToNodeAndEntries(snPath, dir, meta) if err != nil { - return FutureNode{}, err + return futureNode{}, err } - sort.Strings(names) - nodes := make([]FutureNode, 0, len(names)) + nodes := make([]futureNode, 0, len(names)) for _, name := range names { // test if context has been cancelled if ctx.Err() != nil { debug.Log("context has been cancelled, aborting") - return FutureNode{}, ctx.Err() + return futureNode{}, ctx.Err() } pathname := arch.FS.Join(dir, name) oldNode := previous.Find(name) snItem := join(snPath, name) - fn, excluded, err := arch.Save(ctx, snItem, pathname, oldNode) + fn, excluded, err := arch.save(ctx, snItem, pathname, oldNode) // return error early if possible if err != nil { @@ -260,7 +343,7 @@ func (arch *Archiver) SaveDir(ctx context.Context, snPath string, dir string, fi continue } - return FutureNode{}, err + return futureNode{}, err } if excluded { @@ -275,11 +358,34 @@ func (arch *Archiver) SaveDir(ctx context.Context, snPath string, dir string, fi return fn, nil } -// FutureNode holds a reference to a channel that returns a FutureNodeResult +func (arch *Archiver) dirToNodeAndEntries(snPath, dir string, meta fs.File) (node *restic.Node, names []string, err error) { + err = meta.MakeReadable() + if err != nil { + return nil, nil, fmt.Errorf("openfile for readdirnames failed: %w", err) + } + + node, err = arch.nodeFromFileInfo(snPath, dir, meta, false) + if err != nil { + return nil, nil, err + } + if node.Type != restic.NodeTypeDir { + return nil, nil, fmt.Errorf("directory %q changed type, refusing to archive", snPath) + } + + names, err = meta.Readdirnames(-1) + if err != nil { + return nil, nil, fmt.Errorf("readdirnames %v failed: %w", dir, err) + } + sort.Strings(names) + + return node, names, nil +} + +// futureNode holds a reference to a channel that returns a FutureNodeResult // or a reference to an already existing result. If the result is available // immediately, then storing a reference directly requires less memory than // using the indirection via a channel. -type FutureNode struct { +type futureNode struct { ch <-chan futureNodeResult res *futureNodeResult } @@ -292,18 +398,18 @@ type futureNodeResult struct { err error } -func newFutureNode() (FutureNode, chan<- futureNodeResult) { +func newFutureNode() (futureNode, chan<- futureNodeResult) { ch := make(chan futureNodeResult, 1) - return FutureNode{ch: ch}, ch + return futureNode{ch: ch}, ch } -func newFutureNodeWithResult(res futureNodeResult) FutureNode { - return FutureNode{ +func newFutureNodeWithResult(res futureNodeResult) futureNode { + return futureNode{ res: &res, } } -func (fn *FutureNode) take(ctx context.Context) futureNodeResult { +func (fn *futureNode) take(ctx context.Context) futureNodeResult { if fn.res != nil { res := fn.res // free result @@ -318,6 +424,7 @@ func (fn *FutureNode) take(ctx context.Context) futureNodeResult { return res } case <-ctx.Done(): + return futureNodeResult{err: ctx.Err()} } return futureNodeResult{err: errors.Errorf("no result")} } @@ -327,52 +434,78 @@ func (fn *FutureNode) take(ctx context.Context) futureNodeResult { func (arch *Archiver) allBlobsPresent(previous *restic.Node) bool { // check if all blobs are contained in index for _, id := range previous.Content { - if !arch.Repo.Index().Has(restic.BlobHandle{ID: id, Type: restic.DataBlob}) { + if _, ok := arch.Repo.LookupBlobSize(restic.DataBlob, id); !ok { return false } } return true } -// Save saves a target (file or directory) to the repo. If the item is +// save saves a target (file or directory) to the repo. If the item is // excluded, this function returns a nil node and error, with excluded set to // true. // // Errors and completion needs to be handled by the caller. // // snPath is the path within the current snapshot. -func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) { +func (arch *Archiver) save(ctx context.Context, snPath, target string, previous *restic.Node) (fn futureNode, excluded bool, err error) { start := time.Now() debug.Log("%v target %q, previous %v", snPath, target, previous) abstarget, err := arch.FS.Abs(target) if err != nil { - return FutureNode{}, false, err + return futureNode{}, false, err } + filterError := func(err error) (futureNode, bool, error) { + err = arch.error(abstarget, err) + if err != nil { + return futureNode{}, false, errors.WithStack(err) + } + return futureNode{}, true, nil + } + filterNotExist := func(err error) error { + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + } // exclude files by path before running Lstat to reduce number of lstat calls if !arch.SelectByName(abstarget) { debug.Log("%v is excluded by path", target) - return FutureNode{}, true, nil + return futureNode{}, true, nil } + meta, err := arch.FS.OpenFile(target, fs.O_NOFOLLOW, true) + if err != nil { + debug.Log("open metadata for %v returned error: %v", target, err) + // ignore if file disappeared since it was returned by readdir + return filterError(filterNotExist(err)) + } + closeFile := true + defer func() { + if closeFile { + cerr := meta.Close() + if err == nil { + err = cerr + } + } + }() + // get file info and run remaining select functions that require file information - fi, err := arch.FS.Lstat(target) + fi, err := meta.Stat() if err != nil { debug.Log("lstat() for %v returned error: %v", target, err) - err = arch.error(abstarget, err) - if err != nil { - return FutureNode{}, false, errors.WithStack(err) - } - return FutureNode{}, true, nil + // ignore if file disappeared since it was returned by readdir + return filterError(filterNotExist(err)) } - if !arch.Select(abstarget, fi) { + if !arch.Select(abstarget, fi, arch.FS) { debug.Log("%v is excluded", target) - return FutureNode{}, true, nil + return futureNode{}, true, nil } switch { - case fs.IsRegularFile(fi): + case fi.Mode.IsRegular(): debug.Log(" %v regular file", target) // check if the file has not changed before performing a fopen operation (more expensive, specially @@ -380,11 +513,11 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous if previous != nil && !fileChanged(fi, previous, arch.ChangeIgnoreFlags) { if arch.allBlobsPresent(previous) { debug.Log("%v hasn't changed, using old list of blobs", target) - arch.CompleteItem(snPath, previous, previous, ItemStats{}, time.Since(start)) + arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start)) arch.CompleteBlob(previous.Size) - node, err := arch.nodeFromFileInfo(snPath, target, fi) + node, err := arch.nodeFromFileInfo(snPath, target, meta, false) if err != nil { - return FutureNode{}, false, err + return futureNode{}, false, err } // copy list of blobs @@ -403,54 +536,42 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous err := errors.Errorf("parts of %v not found in the repository index; storing the file again", target) err = arch.error(abstarget, err) if err != nil { - return FutureNode{}, false, err + return futureNode{}, false, err } } // reopen file and do an fstat() on the open file to check it is still // a file (and has not been exchanged for e.g. a symlink) - file, err := arch.FS.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + err := meta.MakeReadable() if err != nil { - debug.Log("Openfile() for %v returned error: %v", target, err) - err = arch.error(abstarget, err) - if err != nil { - return FutureNode{}, false, errors.WithStack(err) - } - return FutureNode{}, true, nil + debug.Log("MakeReadable() for %v returned error: %v", target, err) + return filterError(err) } - fi, err = file.Stat() + fi, err := meta.Stat() if err != nil { debug.Log("stat() on opened file %v returned error: %v", target, err) - _ = file.Close() - err = arch.error(abstarget, err) - if err != nil { - return FutureNode{}, false, errors.WithStack(err) - } - return FutureNode{}, true, nil + return filterError(err) } // make sure it's still a file - if !fs.IsRegularFile(fi) { - err = errors.Errorf("file %v changed type, refusing to archive", fi.Name()) - _ = file.Close() - err = arch.error(abstarget, err) - if err != nil { - return FutureNode{}, false, err - } - return FutureNode{}, true, nil + if !fi.Mode.IsRegular() { + err = errors.Errorf("file %q changed type, refusing to archive", target) + return filterError(err) } + closeFile = false + // Save will close the file, we don't need to do that - fn = arch.fileSaver.Save(ctx, snPath, target, file, fi, func() { + fn = arch.fileSaver.Save(ctx, snPath, target, meta, func() { arch.StartFile(snPath) }, func() { - arch.CompleteItem(snPath, nil, nil, ItemStats{}, 0) + arch.trackItem(snPath, nil, nil, ItemStats{}, 0) }, func(node *restic.Node, stats ItemStats) { - arch.CompleteItem(snPath, previous, node, stats, time.Since(start)) + arch.trackItem(snPath, previous, node, stats, time.Since(start)) }) - case fi.IsDir(): + case fi.Mode.IsDir(): debug.Log(" %v dir", target) snItem := snPath + "/" @@ -459,28 +580,28 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous err = arch.error(abstarget, err) } if err != nil { - return FutureNode{}, false, err + return futureNode{}, false, err } - fn, err = arch.SaveDir(ctx, snPath, target, fi, oldSubtree, + fn, err = arch.saveDir(ctx, snPath, target, meta, oldSubtree, func(node *restic.Node, stats ItemStats) { - arch.CompleteItem(snItem, previous, node, stats, time.Since(start)) + arch.trackItem(snItem, previous, node, stats, time.Since(start)) }) if err != nil { debug.Log("SaveDir for %v returned error: %v", snPath, err) - return FutureNode{}, false, err + return futureNode{}, false, err } - case fi.Mode()&os.ModeSocket > 0: + case fi.Mode&os.ModeSocket > 0: debug.Log(" %v is a socket, ignoring", target) - return FutureNode{}, true, nil + return futureNode{}, true, nil default: debug.Log(" %v other", target) - node, err := arch.nodeFromFileInfo(snPath, target, fi) + node, err := arch.nodeFromFileInfo(snPath, target, meta, false) if err != nil { - return FutureNode{}, false, err + return futureNode{}, false, err } fn = newFutureNodeWithResult(futureNodeResult{ snPath: snPath, @@ -497,27 +618,26 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous // fileChanged tries to detect whether a file's content has changed compared // to the contents of node, which describes the same path in the parent backup. // It should only be run for regular files. -func fileChanged(fi os.FileInfo, node *restic.Node, ignoreFlags uint) bool { +func fileChanged(fi *fs.ExtendedFileInfo, node *restic.Node, ignoreFlags uint) bool { switch { case node == nil: return true - case node.Type != "file": + case node.Type != restic.NodeTypeFile: // We're only called for regular files, so this is a type change. return true - case uint64(fi.Size()) != node.Size: + case uint64(fi.Size) != node.Size: return true - case !fi.ModTime().Equal(node.ModTime): + case !fi.ModTime.Equal(node.ModTime): return true } checkCtime := ignoreFlags&ChangeIgnoreCtime == 0 checkInode := ignoreFlags&ChangeIgnoreInode == 0 - extFI := fs.ExtendedStat(fi) switch { - case checkCtime && !extFI.ChangeTime.Equal(node.ChangeTime): + case checkCtime && !fi.ChangeTime.Equal(node.ChangeTime): return true - case checkInode && node.Inode != extFI.Inode: + case checkInode && node.Inode != fi.Inode: return true } @@ -529,41 +649,20 @@ func join(elem ...string) string { return path.Join(elem...) } -// statDir returns the file info for the directory. Symbolic links are -// resolved. If the target directory is not a directory, an error is returned. -func (arch *Archiver) statDir(dir string) (os.FileInfo, error) { - fi, err := arch.FS.Stat(dir) - if err != nil { - return nil, errors.WithStack(err) - } - - tpe := fi.Mode() & (os.ModeType | os.ModeCharDevice) - if tpe != os.ModeDir { - return fi, errors.Errorf("path is not a directory: %v", dir) - } - - return fi, nil -} - -// SaveTree stores a Tree in the repo, returned is the tree. snPath is the path +// saveTree stores a Tree in the repo, returned is the tree. snPath is the path // within the current snapshot. -func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree, complete CompleteFunc) (FutureNode, int, error) { +func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *tree, previous *restic.Tree, complete fileCompleteFunc) (futureNode, int, error) { var node *restic.Node if snPath != "/" { if atree.FileInfoPath == "" { - return FutureNode{}, 0, errors.Errorf("FileInfoPath for %v is empty", snPath) - } - - fi, err := arch.statDir(atree.FileInfoPath) - if err != nil { - return FutureNode{}, 0, err + return futureNode{}, 0, errors.Errorf("FileInfoPath for %v is empty", snPath) } - debug.Log("%v, dir node data loaded from %v", snPath, atree.FileInfoPath) - node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi) + var err error + node, err = arch.dirPathToNode(snPath, atree.FileInfoPath) if err != nil { - return FutureNode{}, 0, err + return futureNode{}, 0, err } } else { // fake root node @@ -572,7 +671,7 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, debug.Log("%v (%v nodes), parent %v", snPath, len(atree.Nodes), previous) nodeNames := atree.NodeNames() - nodes := make([]FutureNode, 0, len(nodeNames)) + nodes := make([]futureNode, 0, len(nodeNames)) // iterate over the nodes of atree in lexicographic (=deterministic) order for _, name := range nodeNames { @@ -580,12 +679,12 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, // test if context has been cancelled if ctx.Err() != nil { - return FutureNode{}, 0, ctx.Err() + return futureNode{}, 0, ctx.Err() } // this is a leaf node if subatree.Leaf() { - fn, excluded, err := arch.Save(ctx, join(snPath, name), subatree.Path, previous.Find(name)) + fn, excluded, err := arch.save(ctx, join(snPath, name), subatree.Path, previous.Find(name)) if err != nil { err = arch.error(subatree.Path, err) @@ -593,11 +692,11 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, // ignore error continue } - return FutureNode{}, 0, err + return futureNode{}, 0, err } if err != nil { - return FutureNode{}, 0, err + return futureNode{}, 0, err } if !excluded { @@ -615,15 +714,15 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, err = arch.error(join(snPath, name), err) } if err != nil { - return FutureNode{}, 0, err + return futureNode{}, 0, err } // not a leaf node, archive subtree - fn, _, err := arch.SaveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *restic.Node, is ItemStats) { - arch.CompleteItem(snItem, oldNode, n, is, time.Since(start)) + fn, _, err := arch.saveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *restic.Node, is ItemStats) { + arch.trackItem(snItem, oldNode, n, is, time.Since(start)) }) if err != nil { - return FutureNode{}, 0, err + return futureNode{}, 0, err } nodes = append(nodes, fn) } @@ -632,25 +731,29 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, return fn, len(nodes), nil } -// flags are passed to fs.OpenFile. O_RDONLY is implied. -func readdirnames(filesystem fs.FS, dir string, flags int) ([]string, error) { - f, err := filesystem.OpenFile(dir, fs.O_RDONLY|flags, 0) - if err != nil { - return nil, errors.WithStack(err) - } - - entries, err := f.Readdirnames(-1) +func (arch *Archiver) dirPathToNode(snPath, target string) (node *restic.Node, err error) { + meta, err := arch.FS.OpenFile(target, 0, true) if err != nil { - _ = f.Close() - return nil, errors.Wrapf(err, "Readdirnames %v failed", dir) + return nil, err } + defer func() { + cerr := meta.Close() + if err == nil { + err = cerr + } + }() - err = f.Close() + debug.Log("%v, reading dir node data from %v", snPath, target) + // in some cases reading xattrs for directories above the backup source is not allowed + // thus ignore errors for such folders. + node, err = arch.nodeFromFileInfo(snPath, target, meta, true) if err != nil { return nil, err } - - return entries, nil + if node.Type != restic.NodeTypeDir { + return nil, errors.Errorf("path is not a directory: %v", target) + } + return node, err } // resolveRelativeTargets replaces targets that only contain relative @@ -660,7 +763,12 @@ func resolveRelativeTargets(filesys fs.FS, targets []string) ([]string, error) { debug.Log("targets before resolving: %v", targets) result := make([]string, 0, len(targets)) for _, target := range targets { - target = filesys.Clean(target) + if target != "" && filesys.VolumeName(target) == target { + // special case to allow users to also specify a volume name "C:" instead of a path "C:\" + target = target + filesys.Separator() + } else { + target = filesys.Clean(target) + } pc, _ := pathComponents(filesys, target, false) if len(pc) > 0 { result = append(result, target) @@ -668,7 +776,7 @@ func resolveRelativeTargets(filesys fs.FS, targets []string) ([]string, error) { } debug.Log("replacing %q with readdir(%q)", target, target) - entries, err := readdirnames(filesys, target, fs.O_NOFOLLOW) + entries, err := fs.Readdirnames(filesys, target, fs.O_NOFOLLOW) if err != nil { return nil, err } @@ -688,9 +796,12 @@ type SnapshotOptions struct { Tags restic.TagList Hostname string Excludes []string + BackupStart time.Time Time time.Time ParentSnapshot *restic.Snapshot ProgramVersion string + // SkipIfUnchanged omits the snapshot creation if it is identical to the parent snapshot. + SkipIfUnchanged bool } // loadParentTree loads a tree referenced by snapshot id. If id is null, nil is returned. @@ -716,16 +827,16 @@ func (arch *Archiver) loadParentTree(ctx context.Context, sn *restic.Snapshot) * // runWorkers starts the worker pools, which are stopped when the context is cancelled. func (arch *Archiver) runWorkers(ctx context.Context, wg *errgroup.Group) { - arch.blobSaver = NewBlobSaver(ctx, wg, arch.Repo, arch.Options.SaveBlobConcurrency) + arch.blobSaver = newBlobSaver(ctx, wg, arch.Repo, arch.Options.SaveBlobConcurrency) - arch.fileSaver = NewFileSaver(ctx, wg, + arch.fileSaver = newFileSaver(ctx, wg, arch.blobSaver.Save, arch.Repo.Config().ChunkerPolynomial, arch.Options.ReadConcurrency, arch.Options.SaveBlobConcurrency) arch.fileSaver.CompleteBlob = arch.CompleteBlob arch.fileSaver.NodeFromFileInfo = arch.nodeFromFileInfo - arch.treeSaver = NewTreeSaver(ctx, wg, arch.Options.SaveTreeConcurrency, arch.blobSaver.Save, arch.Error) + arch.treeSaver = newTreeSaver(ctx, wg, arch.Options.SaveTreeConcurrency, arch.blobSaver.Save, arch.Error) } func (arch *Archiver) stopWorkers() { @@ -738,15 +849,19 @@ func (arch *Archiver) stopWorkers() { } // Snapshot saves several targets and returns a snapshot. -func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, error) { +func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, *Summary, error) { + arch.summary = &Summary{ + BackupStart: opts.BackupStart, + } + cleanTargets, err := resolveRelativeTargets(arch.FS, targets) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } - atree, err := NewTree(arch.FS, cleanTargets) + atree, err := newTree(arch.FS, cleanTargets) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } var rootTreeID restic.ID @@ -762,8 +877,8 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps arch.runWorkers(wgCtx, wg) debug.Log("starting snapshot") - fn, nodeCount, err := arch.SaveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(n *restic.Node, is ItemStats) { - arch.CompleteItem("/", nil, nil, is, time.Since(start)) + fn, nodeCount, err := arch.saveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *restic.Node, is ItemStats) { + arch.trackItem("/", nil, nil, is, time.Since(start)) }) if err != nil { return err @@ -799,12 +914,19 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps }) err = wgUp.Wait() if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err + } + + if opts.ParentSnapshot != nil && opts.SkipIfUnchanged { + ps := opts.ParentSnapshot + if ps.Tree != nil && rootTreeID.Equal(*ps.Tree) { + return nil, restic.ID{}, arch.summary, nil + } } sn, err := restic.NewSnapshot(targets, opts.Tags, opts.Hostname, opts.Time) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } sn.ProgramVersion = opts.ProgramVersion @@ -813,11 +935,29 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps sn.Parent = opts.ParentSnapshot.ID() } sn.Tree = &rootTreeID + arch.summary.BackupEnd = time.Now() + sn.Summary = &restic.SnapshotSummary{ + BackupStart: arch.summary.BackupStart, + BackupEnd: arch.summary.BackupEnd, + + FilesNew: arch.summary.Files.New, + FilesChanged: arch.summary.Files.Changed, + FilesUnmodified: arch.summary.Files.Unchanged, + DirsNew: arch.summary.Dirs.New, + DirsChanged: arch.summary.Dirs.Changed, + DirsUnmodified: arch.summary.Dirs.Unchanged, + DataBlobs: arch.summary.ItemStats.DataBlobs, + TreeBlobs: arch.summary.ItemStats.TreeBlobs, + DataAdded: arch.summary.ItemStats.DataSize + arch.summary.ItemStats.TreeSize, + DataAddedPacked: arch.summary.ItemStats.DataSizeInRepo + arch.summary.ItemStats.TreeSizeInRepo, + TotalFilesProcessed: arch.summary.Files.New + arch.summary.Files.Changed + arch.summary.Files.Unchanged, + TotalBytesProcessed: arch.summary.ProcessedBytes, + } id, err := restic.SaveSnapshot(ctx, arch.Repo, sn) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } - return sn, id, nil + return sn, id, arch.summary, nil } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 46ef4425184..fcc3d465da4 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -3,6 +3,7 @@ package archiver import ( "bytes" "context" + "fmt" "io" "os" "path/filepath" @@ -19,15 +20,16 @@ import ( "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" ) func prepareTempdirRepoSrc(t testing.TB, src TestDir) (string, restic.Repository) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) repo := repository.TestRepository(t) TestCreateFiles(t, tempdir, src) @@ -35,7 +37,7 @@ func prepareTempdirRepoSrc(t testing.TB, src TestDir) (string, restic.Repository return tempdir, repo } -func saveFile(t testing.TB, repo restic.Repository, filename string, filesystem fs.FS) (*restic.Node, ItemStats) { +func saveFile(t testing.TB, repo archiverRepo, filename string, filesystem fs.FS) (*restic.Node, ItemStats) { wg, ctx := errgroup.WithContext(context.TODO()) repo.StartPackUploader(ctx, wg) @@ -74,17 +76,12 @@ func saveFile(t testing.TB, repo restic.Repository, filename string, filesystem startCallback = true } - file, err := arch.FS.OpenFile(filename, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + file, err := arch.FS.OpenFile(filename, fs.O_NOFOLLOW, false) if err != nil { t.Fatal(err) } - fi, err := file.Stat() - if err != nil { - t.Fatal(err) - } - - res := arch.fileSaver.Save(ctx, "/", filename, file, fi, start, completeReading, complete) + res := arch.fileSaver.Save(ctx, "/", filename, file, start, completeReading, complete) fnr := res.take(ctx) if fnr.err != nil { @@ -132,7 +129,7 @@ func TestArchiverSaveFile(t *testing.T) { var tests = []TestFile{ {Content: ""}, {Content: "foo"}, - {Content: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Content: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, testfile := range tests { @@ -165,7 +162,7 @@ func TestArchiverSaveFileReaderFS(t *testing.T) { Data string }{ {Data: "foo"}, - {Data: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Data: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, test := range tests { @@ -207,7 +204,7 @@ func TestArchiverSave(t *testing.T) { var tests = []TestFile{ {Content: ""}, {Content: "foo"}, - {Content: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Content: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, testfile := range tests { @@ -226,8 +223,9 @@ func TestArchiverSave(t *testing.T) { return err } arch.runWorkers(ctx, wg) + arch.summary = &Summary{} - node, excluded, err := arch.Save(ctx, "/", filepath.Join(tempdir, "file"), nil) + node, excluded, err := arch.save(ctx, "/", filepath.Join(tempdir, "file"), nil) if err != nil { t.Fatal(err) } @@ -275,7 +273,7 @@ func TestArchiverSaveReaderFS(t *testing.T) { Data string }{ {Data: "foo"}, - {Data: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Data: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, test := range tests { @@ -303,8 +301,9 @@ func TestArchiverSaveReaderFS(t *testing.T) { return err } arch.runWorkers(ctx, wg) + arch.summary = &Summary{} - node, excluded, err := arch.Save(ctx, "/", filename, nil) + node, excluded, err := arch.save(ctx, "/", filename, nil) t.Logf("Save returned %v %v", node, err) if err != nil { t.Fatal(err) @@ -351,7 +350,7 @@ func TestArchiverSaveReaderFS(t *testing.T) { func BenchmarkArchiverSaveFileSmall(b *testing.B) { const fileSize = 4 * 1024 d := TestDir{"file": TestFile{ - Content: string(restictest.Random(23, fileSize)), + Content: string(rtest.Random(23, fileSize)), }} b.SetBytes(fileSize) @@ -383,7 +382,7 @@ func BenchmarkArchiverSaveFileSmall(b *testing.B) { func BenchmarkArchiverSaveFileLarge(b *testing.B) { const fileSize = 40*1024*1024 + 1287898 d := TestDir{"file": TestFile{ - Content: string(restictest.Random(23, fileSize)), + Content: string(rtest.Random(23, fileSize)), }} b.SetBytes(fileSize) @@ -413,14 +412,14 @@ func BenchmarkArchiverSaveFileLarge(b *testing.B) { } type blobCountingRepo struct { - restic.Repository + archiverRepo m sync.Mutex saved map[restic.BlobHandle]uint } func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) { - id, exists, size, err := repo.Repository.SaveBlob(ctx, t, buf, id, storeDuplicate) + id, exists, size, err := repo.archiverRepo.SaveBlob(ctx, t, buf, id, storeDuplicate) if exists { return id, exists, size, err } @@ -432,7 +431,7 @@ func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, b } func (repo *blobCountingRepo) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) { - id, err := restic.SaveTree(ctx, repo.Repository, t) + id, err := restic.SaveTree(ctx, repo.archiverRepo, t) h := restic.BlobHandle{ID: id, Type: restic.TreeBlob} repo.m.Lock() repo.saved[h]++ @@ -459,14 +458,14 @@ func appendToFile(t testing.TB, filename string, data []byte) { } func TestArchiverSaveFileIncremental(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) repo := &blobCountingRepo{ - Repository: repository.TestRepository(t), - saved: make(map[restic.BlobHandle]uint), + archiverRepo: repository.TestRepository(t), + saved: make(map[restic.BlobHandle]uint), } - data := restictest.Random(23, 512*1024+887898) + data := rtest.Random(23, 512*1024+887898) testfile := filepath.Join(tempdir, "testfile") for i := 0; i < 3; i++ { @@ -509,21 +508,21 @@ func chmodTwice(t testing.TB, name string) { // POSIX says that ctime is updated "even if the file status does not // change", but let's make sure it does change, just in case. err := os.Chmod(name, 0700) - restictest.OK(t, err) + rtest.OK(t, err) sleep() err = os.Chmod(name, 0600) - restictest.OK(t, err) + rtest.OK(t, err) } -func lstat(t testing.TB, name string) os.FileInfo { +func lstat(t testing.TB, name string) *fs.ExtendedFileInfo { fi, err := os.Lstat(name) if err != nil { t.Fatal(err) } - return fi + return fs.ExtendedStat(fi) } func setTimestamp(t testing.TB, filename string, atime, mtime time.Time) { @@ -552,11 +551,12 @@ func rename(t testing.TB, oldname, newname string) { } } -func nodeFromFI(t testing.TB, filename string, fi os.FileInfo) *restic.Node { - node, err := restic.NodeFromFileInfo(filename, fi) - if err != nil { - t.Fatal(err) - } +func nodeFromFile(t testing.TB, localFs fs.FS, filename string) *restic.Node { + meta, err := localFs.OpenFile(filename, fs.O_NOFOLLOW, true) + rtest.OK(t, err) + node, err := meta.ToNode(false) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) return node } @@ -660,7 +660,7 @@ func TestFileChanged(t *testing.T) { rename(t, filename, tempname) save(t, filename, defaultContent) remove(t, tempname) - setTimestamp(t, filename, fi.ModTime(), fi.ModTime()) + setTimestamp(t, filename, fi.ModTime, fi.ModTime) }, ChangeIgnore: ChangeIgnoreCtime | ChangeIgnoreInode, SameFile: true, @@ -673,7 +673,7 @@ func TestFileChanged(t *testing.T) { t.Skip("don't run test on Windows") } - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) filename := filepath.Join(tempdir, "file") content := defaultContent @@ -682,8 +682,10 @@ func TestFileChanged(t *testing.T) { } save(t, filename, content) - fiBefore := lstat(t, filename) - node := nodeFromFI(t, filename, fiBefore) + fs := &fs.Local{} + fiBefore, err := fs.Lstat(filename) + rtest.OK(t, err) + node := nodeFromFile(t, fs, filename) if fileChanged(fiBefore, node, 0) { t.Fatalf("unchanged file detected as changed") @@ -709,7 +711,7 @@ func TestFileChanged(t *testing.T) { } func TestFilChangedSpecialCases(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) filename := filepath.Join(tempdir, "file") content := []byte("foobar") @@ -724,8 +726,8 @@ func TestFilChangedSpecialCases(t *testing.T) { t.Run("type-change", func(t *testing.T) { fi := lstat(t, filename) - node := nodeFromFI(t, filename, fi) - node.Type = "symlink" + node := nodeFromFile(t, &fs.Local{}, filename) + node.Type = restic.NodeTypeSymlink if !fileChanged(fi, node, 0) { t.Fatal("node with changed type detected as unchanged") } @@ -743,12 +745,12 @@ func TestArchiverSaveDir(t *testing.T) { }{ { src: TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, }, target: ".", want: TestDir{ "targetdir": TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, }, }, }, @@ -758,8 +760,8 @@ func TestArchiverSaveDir(t *testing.T) { "foo": TestFile{Content: "foo"}, "emptyfile": TestFile{Content: ""}, "bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}, - "largefile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, - "largerfile": TestFile{Content: string(restictest.Random(234, 5*1024*1024+5000))}, + "largefile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, + "largerfile": TestFile{Content: string(rtest.Random(234, 5*1024*1024+5000))}, }, }, target: "targetdir", @@ -829,26 +831,24 @@ func TestArchiverSaveDir(t *testing.T) { wg, ctx := errgroup.WithContext(context.Background()) repo.StartPackUploader(ctx, wg) - arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) + testFS := fs.Track{FS: fs.Local{}} + arch := New(repo, testFS, Options{}) arch.runWorkers(ctx, wg) + arch.summary = &Summary{} chdir := tempdir if test.chdir != "" { chdir = filepath.Join(chdir, test.chdir) } - back := restictest.Chdir(t, chdir) + back := rtest.Chdir(t, chdir) defer back() - fi, err := fs.Lstat(test.target) - if err != nil { - t.Fatal(err) - } - - ft, err := arch.SaveDir(ctx, "/", test.target, fi, nil, nil) - if err != nil { - t.Fatal(err) - } + meta, err := testFS.OpenFile(test.target, fs.O_NOFOLLOW, true) + rtest.OK(t, err) + ft, err := arch.saveDir(ctx, "/", test.target, meta, nil, nil) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) fnr := ft.take(ctx) node, stats := fnr.node, fnr.stats @@ -895,11 +895,11 @@ func TestArchiverSaveDir(t *testing.T) { } func TestArchiverSaveDirIncremental(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) repo := &blobCountingRepo{ - Repository: repository.TestRepository(t), - saved: make(map[restic.BlobHandle]uint), + archiverRepo: repository.TestRepository(t), + saved: make(map[restic.BlobHandle]uint), } appendToFile(t, filepath.Join(tempdir, "testfile"), []byte("foobar")) @@ -910,18 +910,16 @@ func TestArchiverSaveDirIncremental(t *testing.T) { wg, ctx := errgroup.WithContext(context.TODO()) repo.StartPackUploader(ctx, wg) - arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) + testFS := fs.Track{FS: fs.Local{}} + arch := New(repo, testFS, Options{}) arch.runWorkers(ctx, wg) + arch.summary = &Summary{} - fi, err := fs.Lstat(tempdir) - if err != nil { - t.Fatal(err) - } - - ft, err := arch.SaveDir(ctx, "/", tempdir, fi, nil, nil) - if err != nil { - t.Fatal(err) - } + meta, err := testFS.OpenFile(tempdir, fs.O_NOFOLLOW, true) + rtest.OK(t, err) + ft, err := arch.saveDir(ctx, "/", tempdir, meta, nil, nil) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) fnr := ft.take(ctx) node, stats := fnr.node, fnr.stats @@ -982,9 +980,9 @@ func TestArchiverSaveDirIncremental(t *testing.T) { // bothZeroOrNeither fails the test if only one of exp, act is zero. func bothZeroOrNeither(tb testing.TB, exp, act uint64) { + tb.Helper() if (exp == 0 && act != 0) || (exp != 0 && act == 0) { - _, file, line, _ := runtime.Caller(1) - tb.Fatalf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + rtest.Equals(tb, exp, act) } } @@ -1004,7 +1002,7 @@ func TestArchiverSaveTree(t *testing.T) { prepare func(t testing.TB) targets []string want TestDir - stat ItemStats + stat Summary }{ { src: TestDir{ @@ -1014,7 +1012,12 @@ func TestArchiverSaveTree(t *testing.T) { want: TestDir{ "targetfile": TestFile{Content: string("foobar")}, }, - stat: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + stat: Summary{ + ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + ProcessedBytes: 6, + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{0, 0, 0}, + }, }, { src: TestDir{ @@ -1026,7 +1029,12 @@ func TestArchiverSaveTree(t *testing.T) { "targetfile": TestFile{Content: string("foobar")}, "filesymlink": TestSymlink{Target: "targetfile"}, }, - stat: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + stat: Summary{ + ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + ProcessedBytes: 6, + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{0, 0, 0}, + }, }, { src: TestDir{ @@ -1046,7 +1054,12 @@ func TestArchiverSaveTree(t *testing.T) { "symlink": TestSymlink{Target: "subdir"}, }, }, - stat: ItemStats{0, 0, 0, 1, 0x154, 0x16a}, + stat: Summary{ + ItemStats: ItemStats{0, 0, 0, 1, 0x154, 0x16a}, + ProcessedBytes: 0, + Files: ChangeStats{0, 0, 0}, + Dirs: ChangeStats{1, 0, 0}, + }, }, { src: TestDir{ @@ -1070,7 +1083,12 @@ func TestArchiverSaveTree(t *testing.T) { }, }, }, - stat: ItemStats{1, 6, 32 + 6, 3, 0x47f, 0x4c1}, + stat: Summary{ + ItemStats: ItemStats{1, 6, 32 + 6, 3, 0x47f, 0x4c1}, + ProcessedBytes: 6, + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{3, 0, 0}, + }, }, } @@ -1082,32 +1100,25 @@ func TestArchiverSaveTree(t *testing.T) { arch := New(repo, testFS, Options{}) - var stat ItemStats - lock := &sync.Mutex{} - arch.CompleteItem = func(item string, previous, current *restic.Node, s ItemStats, d time.Duration) { - lock.Lock() - defer lock.Unlock() - stat.Add(s) - } - wg, ctx := errgroup.WithContext(context.TODO()) repo.StartPackUploader(ctx, wg) arch.runWorkers(ctx, wg) + arch.summary = &Summary{} - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() if test.prepare != nil { test.prepare(t) } - atree, err := NewTree(testFS, test.targets) + atree, err := newTree(testFS, test.targets) if err != nil { t.Fatal(err) } - fn, _, err := arch.SaveTree(ctx, "/", atree, nil, nil) + fn, _, err := arch.saveTree(ctx, "/", atree, nil, nil) if err != nil { t.Fatal(err) } @@ -1134,11 +1145,15 @@ func TestArchiverSaveTree(t *testing.T) { want = test.src } TestEnsureTree(context.TODO(), t, "/", repo, treeID, want) + stat := arch.summary bothZeroOrNeither(t, uint64(test.stat.DataBlobs), uint64(stat.DataBlobs)) bothZeroOrNeither(t, uint64(test.stat.TreeBlobs), uint64(stat.TreeBlobs)) bothZeroOrNeither(t, test.stat.DataSize, stat.DataSize) bothZeroOrNeither(t, test.stat.DataSizeInRepo, stat.DataSizeInRepo) bothZeroOrNeither(t, test.stat.TreeSizeInRepo, stat.TreeSizeInRepo) + rtest.Equals(t, test.stat.ProcessedBytes, stat.ProcessedBytes) + rtest.Equals(t, test.stat.Files, stat.Files) + rtest.Equals(t, test.stat.Dirs, stat.Dirs) }) } } @@ -1386,7 +1401,7 @@ func TestArchiverSnapshot(t *testing.T) { chdir = filepath.Join(chdir, filepath.FromSlash(test.chdir)) } - back := restictest.Chdir(t, chdir) + back := rtest.Chdir(t, chdir) defer back() var targets []string @@ -1395,7 +1410,7 @@ func TestArchiverSnapshot(t *testing.T) { } t.Logf("targets: %v", targets) - sn, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) + sn, snapshotID, _, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) if err != nil { t.Fatal(err) } @@ -1408,7 +1423,7 @@ func TestArchiverSnapshot(t *testing.T) { } TestEnsureSnapshot(t, repo, snapshotID, want) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) // check that the snapshot contains the targets with absolute paths for i, target := range sn.Paths { @@ -1425,6 +1440,66 @@ func TestArchiverSnapshot(t *testing.T) { } } +func TestResolveRelativeTargetsSpecial(t *testing.T) { + var tests = []struct { + name string + targets []string + expected []string + win bool + }{ + { + name: "basic relative path", + targets: []string{filepath.FromSlash("some/path")}, + expected: []string{filepath.FromSlash("some/path")}, + }, + { + name: "partial relative path", + targets: []string{filepath.FromSlash("../some/path")}, + expected: []string{filepath.FromSlash("../some/path")}, + }, + { + name: "basic absolute path", + targets: []string{filepath.FromSlash("/some/path")}, + expected: []string{filepath.FromSlash("/some/path")}, + }, + { + name: "volume name", + targets: []string{"C:"}, + expected: []string{"C:\\"}, + win: true, + }, + { + name: "volume root path", + targets: []string{"C:\\"}, + expected: []string{"C:\\"}, + win: true, + }, + { + name: "UNC path", + targets: []string{"\\\\server\\volume"}, + expected: []string{"\\\\server\\volume\\"}, + win: true, + }, + { + name: "UNC path with trailing slash", + targets: []string{"\\\\server\\volume\\"}, + expected: []string{"\\\\server\\volume\\"}, + win: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.win && runtime.GOOS != "windows" { + t.Skip("skip test on unix") + } + + targets, err := resolveRelativeTargets(&fs.Local{}, test.targets) + rtest.OK(t, err) + rtest.Equals(t, test.expected, targets) + }) + } +} + func TestArchiverSnapshotSelect(t *testing.T) { var tests = []struct { name string @@ -1446,7 +1521,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { return true }, }, @@ -1463,7 +1538,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { return false }, err: "snapshot is empty", @@ -1490,7 +1565,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { return filepath.Ext(item) != ".txt" }, }, @@ -1514,8 +1589,8 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo) bool { - return filepath.Base(item) != "subdir" + selFn: func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { + return fs.Base(item) != "subdir" }, }, { @@ -1523,8 +1598,8 @@ func TestArchiverSnapshotSelect(t *testing.T) { src: TestDir{ "foo": TestFile{Content: "foo"}, }, - selFn: func(item string, fi os.FileInfo) bool { - return filepath.IsAbs(item) + selFn: func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { + return fs.IsAbs(item) }, }, } @@ -1539,11 +1614,11 @@ func TestArchiverSnapshotSelect(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.Select = test.selFn - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() targets := []string{"."} - _, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) + _, snapshotID, _, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) if test.err != "" { if err == nil { t.Fatalf("expected error not found, got %v, wanted %q", err, test.err) @@ -1568,7 +1643,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { } TestEnsureSnapshot(t, repo, snapshotID, want) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) }) } } @@ -1581,17 +1656,8 @@ type MockFS struct { bytesRead map[string]int // tracks bytes read from all opened files } -func (m *MockFS) Open(name string) (fs.File, error) { - f, err := m.FS.Open(name) - if err != nil { - return f, err - } - - return MockFile{File: f, fs: m, filename: name}, nil -} - -func (m *MockFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, error) { - f, err := m.FS.OpenFile(name, flag, perm) +func (m *MockFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) { + f, err := m.FS.OpenFile(name, flag, metadataOnly) if err != nil { return f, err } @@ -1616,17 +1682,88 @@ func (f MockFile) Read(p []byte) (int, error) { return n, err } +func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) { + t.Helper() + rtest.Equals(t, stat.BackupStart, sn.Summary.BackupStart, "BackupStart") + // BackupEnd is set to time.Now() and can't be compared to a fixed value + rtest.Equals(t, stat.Files.New, sn.Summary.FilesNew, "FilesNew") + rtest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged, "FilesChanged") + rtest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified, "FilesUnmodified") + rtest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew, "DirsNew") + rtest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged, "DirsChanged") + rtest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified, "DirsUnmodified") + rtest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed, "TotalBytesProcessed") + rtest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed, "TotalFilesProcessed") + bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs)) + bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs)) + bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded)) + bothZeroOrNeither(t, uint64(stat.DataSizeInRepo+stat.TreeSizeInRepo), uint64(sn.Summary.DataAddedPacked)) +} + func TestArchiverParent(t *testing.T) { var tests = []struct { - src TestDir - read map[string]int // tracks number of times a file must have been read + src TestDir + modify func(path string) + statInitial Summary + statSecond Summary }{ { src: TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, + }, + statInitial: Summary{ + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{0, 0, 0}, + ProcessedBytes: 2102152, + ItemStats: ItemStats{3, 0x201593, 0x201632, 1, 0, 0}, + }, + statSecond: Summary{ + Files: ChangeStats{0, 0, 1}, + Dirs: ChangeStats{0, 0, 0}, + ProcessedBytes: 2102152, + }, + }, + { + src: TestDir{ + "targetDir": TestDir{ + "targetfile": TestFile{Content: string(rtest.Random(888, 1234))}, + "targetfile2": TestFile{Content: string(rtest.Random(888, 1235))}, + }, + }, + statInitial: Summary{ + Files: ChangeStats{2, 0, 0}, + Dirs: ChangeStats{1, 0, 0}, + ProcessedBytes: 2469, + ItemStats: ItemStats{2, 0xe1c, 0xcd9, 2, 0, 0}, }, - read: map[string]int{ - "targetfile": 1, + statSecond: Summary{ + Files: ChangeStats{0, 0, 2}, + Dirs: ChangeStats{0, 0, 1}, + ProcessedBytes: 2469, + }, + }, + { + src: TestDir{ + "targetDir": TestDir{ + "targetfile": TestFile{Content: string(rtest.Random(888, 1234))}, + }, + "targetfile2": TestFile{Content: string(rtest.Random(888, 1235))}, + }, + modify: func(path string) { + remove(t, filepath.Join(path, "targetDir", "targetfile")) + save(t, filepath.Join(path, "targetfile2"), []byte("foobar")) + }, + statInitial: Summary{ + Files: ChangeStats{2, 0, 0}, + Dirs: ChangeStats{1, 0, 0}, + ProcessedBytes: 2469, + ItemStats: ItemStats{2, 0xe13, 0xcf8, 2, 0, 0}, + }, + statSecond: Summary{ + Files: ChangeStats{0, 1, 0}, + Dirs: ChangeStats{0, 1, 0}, + ProcessedBytes: 6, + ItemStats: ItemStats{1, 0x305, 0x233, 2, 0, 0}, }, }, } @@ -1645,10 +1782,10 @@ func TestArchiverParent(t *testing.T) { arch := New(repo, testFS, Options{}) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() - firstSnapshot, firstSnapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + firstSnapshot, firstSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if err != nil { t.Fatal(err) } @@ -1673,38 +1810,38 @@ func TestArchiverParent(t *testing.T) { } return nil }) + rtest.Equals(t, test.statInitial.Files, summary.Files) + rtest.Equals(t, test.statInitial.Dirs, summary.Dirs) + rtest.Equals(t, test.statInitial.ProcessedBytes, summary.ProcessedBytes) + checkSnapshotStats(t, firstSnapshot, test.statInitial) + + if test.modify != nil { + test.modify(tempdir) + } opts := SnapshotOptions{ Time: time.Now(), ParentSnapshot: firstSnapshot, } - _, secondSnapshotID, err := arch.Snapshot(ctx, []string{"."}, opts) + testFS.bytesRead = map[string]int{} + secondSnapshot, secondSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, opts) if err != nil { t.Fatal(err) } - // check that all files still been read exactly once - TestWalkFiles(t, ".", test.src, func(filename string, item interface{}) error { - file, ok := item.(TestFile) - if !ok { - return nil - } - - n, ok := testFS.bytesRead[filename] - if !ok { - t.Fatalf("file %v was not read at all", filename) - } - - if n != len(file.Content) { - t.Fatalf("file %v: read %v bytes, wanted %v bytes", filename, n, len(file.Content)) - } - return nil - }) + if test.modify == nil { + // check that no files were read this time + rtest.Equals(t, map[string]int{}, testFS.bytesRead) + } + rtest.Equals(t, test.statSecond.Files, summary.Files) + rtest.Equals(t, test.statSecond.Dirs, summary.Dirs) + rtest.Equals(t, test.statSecond.ProcessedBytes, summary.ProcessedBytes) + checkSnapshotStats(t, secondSnapshot, test.statSecond) t.Logf("second backup saved as %v", secondSnapshotID.Str()) t.Logf("testfs: %v", testFS) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) }) } } @@ -1804,7 +1941,7 @@ func TestArchiverErrorReporting(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() if test.prepare != nil { @@ -1814,7 +1951,7 @@ func TestArchiverErrorReporting(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.Error = test.errFn - _, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + _, snapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if test.mustError { if err != nil { t.Logf("found expected error (%v), skipping further checks", err) @@ -1837,7 +1974,7 @@ func TestArchiverErrorReporting(t *testing.T) { } TestEnsureSnapshot(t, repo, snapshotID, want) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) }) } } @@ -1874,20 +2011,20 @@ func TestArchiverContextCanceled(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, TestDir{ "targetfile": TestFile{Content: "foobar"}, }) // Ensure that the archiver itself reports the canceled context and not just the backend - repo := repository.TestRepositoryWithBackend(t, &noCancelBackend{mem.New()}, 0, repository.Options{}) + repo, _ := repository.TestRepositoryWithBackend(t, &noCancelBackend{mem.New()}, 0, repository.Options{}) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) - _, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + _, snapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if err != nil { t.Logf("found expected error (%v)", err) @@ -1910,24 +2047,16 @@ type TrackFS struct { m sync.Mutex } -func (m *TrackFS) Open(name string) (fs.File, error) { - m.m.Lock() - m.opened[name]++ - m.m.Unlock() - - return m.FS.Open(name) -} - -func (m *TrackFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, error) { +func (m *TrackFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) { m.m.Lock() m.opened[name]++ m.m.Unlock() - return m.FS.OpenFile(name, flag, perm) + return m.FS.OpenFile(name, flag, metadataOnly) } type failSaveRepo struct { - restic.Repository + archiverRepo failAfter int32 cnt int32 err error @@ -1939,7 +2068,7 @@ func (f *failSaveRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []by return restic.Hash(buf), false, 0, f.err } - return f.Repository.SaveBlob(ctx, t, buf, id, storeDuplicate) + return f.archiverRepo.SaveBlob(ctx, t, buf, id, storeDuplicate) } func TestArchiverAbortEarlyOnError(t *testing.T) { @@ -1968,16 +2097,16 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { { src: TestDir{ "dir": TestDir{ - "file0": TestFile{Content: string(restictest.Random(0, 1024))}, - "file1": TestFile{Content: string(restictest.Random(1, 1024))}, - "file2": TestFile{Content: string(restictest.Random(2, 1024))}, - "file3": TestFile{Content: string(restictest.Random(3, 1024))}, - "file4": TestFile{Content: string(restictest.Random(4, 1024))}, - "file5": TestFile{Content: string(restictest.Random(5, 1024))}, - "file6": TestFile{Content: string(restictest.Random(6, 1024))}, - "file7": TestFile{Content: string(restictest.Random(7, 1024))}, - "file8": TestFile{Content: string(restictest.Random(8, 1024))}, - "file9": TestFile{Content: string(restictest.Random(9, 1024))}, + "file0": TestFile{Content: string(rtest.Random(0, 1024))}, + "file1": TestFile{Content: string(rtest.Random(1, 1024))}, + "file2": TestFile{Content: string(rtest.Random(2, 1024))}, + "file3": TestFile{Content: string(rtest.Random(3, 1024))}, + "file4": TestFile{Content: string(rtest.Random(4, 1024))}, + "file5": TestFile{Content: string(rtest.Random(5, 1024))}, + "file6": TestFile{Content: string(rtest.Random(6, 1024))}, + "file7": TestFile{Content: string(rtest.Random(7, 1024))}, + "file8": TestFile{Content: string(rtest.Random(8, 1024))}, + "file9": TestFile{Content: string(rtest.Random(9, 1024))}, }, }, wantOpen: map[string]uint{ @@ -2002,7 +2131,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() testFS := &TrackFS{ @@ -2015,9 +2144,9 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { } testRepo := &failSaveRepo{ - Repository: repo, - failAfter: int32(test.failAfter), - err: test.err, + archiverRepo: repo, + failAfter: int32(test.failAfter), + err: test.err, } // at most two files may be queued @@ -2026,7 +2155,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { SaveBlobConcurrency: 1, }) - _, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + _, _, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if !errors.Is(err, test.err) { t.Errorf("expected error (%v) not found, got %v", test.err, err) } @@ -2044,7 +2173,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { } } -func snapshot(t testing.TB, repo restic.Repository, fs fs.FS, parent *restic.Snapshot, filename string) (*restic.Snapshot, *restic.Node) { +func snapshot(t testing.TB, repo archiverRepo, fs fs.FS, parent *restic.Snapshot, filename string) (*restic.Snapshot, *restic.Node) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -2054,7 +2183,7 @@ func snapshot(t testing.TB, repo restic.Repository, fs fs.FS, parent *restic.Sna Time: time.Now(), ParentSnapshot: parent, } - snapshot, _, err := arch.Snapshot(ctx, []string{filename}, sopts) + snapshot, _, _, err := arch.Snapshot(ctx, []string{filename}, sopts) if err != nil { t.Fatal(err) } @@ -2072,48 +2201,51 @@ func snapshot(t testing.TB, repo restic.Repository, fs fs.FS, parent *restic.Sna return snapshot, node } -// StatFS allows overwriting what is returned by the Lstat function. -type StatFS struct { +type overrideFS struct { fs.FS - - OverrideLstat map[string]os.FileInfo - OnlyOverrideStat bool + overrideFI *fs.ExtendedFileInfo + resetFIOnRead bool + overrideNode *restic.Node + overrideErr error } -func (fs *StatFS) Lstat(name string) (os.FileInfo, error) { - if !fs.OnlyOverrideStat { - if fi, ok := fs.OverrideLstat[fixpath(name)]; ok { - return fi, nil - } +func (m *overrideFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) { + f, err := m.FS.OpenFile(name, flag, metadataOnly) + if err != nil { + return f, err } - return fs.FS.Lstat(name) + if filepath.Base(name) == "testfile" || filepath.Base(name) == "testdir" { + return &overrideFile{f, m}, nil + } + return f, nil } -func (fs *StatFS) OpenFile(name string, flags int, perm os.FileMode) (fs.File, error) { - if fi, ok := fs.OverrideLstat[fixpath(name)]; ok { - f, err := fs.FS.OpenFile(name, flags, perm) - if err != nil { - return nil, err - } +type overrideFile struct { + fs.File + ofs *overrideFS +} - wrappedFile := fileStat{ - File: f, - fi: fi, - } - return wrappedFile, nil +func (f overrideFile) Stat() (*fs.ExtendedFileInfo, error) { + if f.ofs.overrideFI == nil { + return f.File.Stat() } + return f.ofs.overrideFI, nil - return fs.FS.OpenFile(name, flags, perm) } -type fileStat struct { - fs.File - fi os.FileInfo +func (f overrideFile) MakeReadable() error { + if f.ofs.resetFIOnRead { + f.ofs.overrideFI = nil + } + return f.File.MakeReadable() } -func (f fileStat) Stat() (os.FileInfo, error) { - return f.fi, nil +func (f overrideFile) ToNode(ignoreXattrListError bool) (*restic.Node, error) { + if f.ofs.overrideNode == nil { + return f.File.ToNode(ignoreXattrListError) + } + return f.ofs.overrideNode, f.ofs.overrideErr } // used by wrapFileInfo, use untyped const in order to avoid having a version @@ -2125,6 +2257,8 @@ const ( ) func TestMetadataChanged(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeviceIDForHardlinks, true)() + files := TestDir{ "testfile": TestFile{ Content: "foo bar test file", @@ -2133,26 +2267,29 @@ func TestMetadataChanged(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, files) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() // get metadata fi := lstat(t, "testfile") - want, err := restic.NodeFromFileInfo("testfile", fi) - if err != nil { - t.Fatal(err) - } + localFS := &fs.Local{} + meta, err := localFS.OpenFile("testfile", fs.O_NOFOLLOW, true) + rtest.OK(t, err) + want, err := meta.ToNode(false) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) - fs := &StatFS{ - FS: fs.Local{}, - OverrideLstat: map[string]os.FileInfo{ - "testfile": fi, - }, + fs := &overrideFS{ + FS: localFS, + overrideFI: fi, + overrideNode: &restic.Node{}, } + *fs.overrideNode = *want sn, node2 := snapshot(t, repo, fs, nil, "testfile") // set some values so we can then compare the nodes + want.DeviceID = 0 want.Content = node2.Content want.Path = "" if len(want.ExtendedAttributes) == 0 { @@ -2166,26 +2303,31 @@ func TestMetadataChanged(t *testing.T) { t.Fatalf("metadata does not match:\n%v", cmp.Diff(want, node2)) } - // modify the mode by wrapping it in a new struct, uses the consts defined above - fs.OverrideLstat["testfile"] = wrapFileInfo(fi) + // modify the mode and UID/GID + modFI := *fi + modFI.Mode = mockFileInfoMode + if runtime.GOOS != "windows" { + modFI.UID = mockFileInfoUID + modFI.GID = mockFileInfoGID + } + + fs.overrideFI = &modFI + rtest.Assert(t, !fileChanged(fs.overrideFI, node2, 0), "testfile must not be considered as changed") // set the override values in the 'want' node which - want.Mode = 0400 + want.Mode = mockFileInfoMode // ignore UID and GID on Windows if runtime.GOOS != "windows" { - want.UID = 51234 - want.GID = 51235 + want.UID = mockFileInfoUID + want.GID = mockFileInfoGID } - // no user and group name - want.User = "" - want.Group = "" + // update mock node accordingly + fs.overrideNode.Mode = want.Mode + fs.overrideNode.UID = want.UID + fs.overrideNode.GID = want.GID // make another snapshot _, node3 := snapshot(t, repo, fs, sn, "testfile") - // Override username and group to empty string - in case underlying system has user with UID 51234 - // See https://github.com/restic/restic/issues/2372 - node3.User = "" - node3.Group = "" // make sure that metadata was recorded successfully if !cmp.Equal(want, node3) { @@ -2195,53 +2337,200 @@ func TestMetadataChanged(t *testing.T) { // make sure the content matches TestEnsureFileContent(context.Background(), t, repo, "testfile", node3, files["testfile"].(TestFile)) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) } -func TestRacyFileSwap(t *testing.T) { +func TestRacyFileTypeSwap(t *testing.T) { files := TestDir{ - "file": TestFile{ + "testfile": TestFile{ Content: "foo bar test file", }, + "testdir": TestDir{}, } - tempdir, repo := prepareTempdirRepoSrc(t, files) + for _, dirError := range []bool{false, true} { + desc := "file changed type" + if dirError { + desc = "dir changed type" + } + t.Run(desc, func(t *testing.T) { + tempdir, repo := prepareTempdirRepoSrc(t, files) - back := restictest.Chdir(t, tempdir) - defer back() + back := rtest.Chdir(t, tempdir) + defer back() - // get metadata of current folder - fi := lstat(t, ".") - tempfile := filepath.Join(tempdir, "file") + // get metadata of current folder + var fakeName, realName string + if dirError { + // lstat claims this is a directory, but it's actually a file + fakeName = "testdir" + realName = "testfile" + } else { + fakeName = "testfile" + realName = "testdir" + } + fakeFI := lstat(t, fakeName) + tempfile := filepath.Join(tempdir, realName) - statfs := &StatFS{ - FS: fs.Local{}, - OverrideLstat: map[string]os.FileInfo{ - tempfile: fi, - }, - OnlyOverrideStat: true, + statfs := &overrideFS{ + FS: fs.Local{}, + overrideFI: fakeFI, + resetFIOnRead: true, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + wg, ctx := errgroup.WithContext(ctx) + repo.StartPackUploader(ctx, wg) + + arch := New(repo, fs.Track{FS: statfs}, Options{}) + arch.Error = func(item string, err error) error { + t.Logf("archiver error as expected for %v: %v", item, err) + return err + } + arch.runWorkers(ctx, wg) + + // fs.Track will panic if the file was not closed + _, excluded, err := arch.save(ctx, "/", tempfile, nil) + rtest.Assert(t, err != nil && strings.Contains(err.Error(), "changed type, refusing to archive"), "save() returned wrong error: %v", err) + tpe := "file" + if dirError { + tpe = "directory" + } + rtest.Assert(t, strings.Contains(err.Error(), tpe+" "), "unexpected item type in error: %v", err) + rtest.Assert(t, !excluded, "Save() excluded the node, that's unexpected") + }) } +} - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() +type mockToNoder struct { + node *restic.Node + err error +} - wg, ctx := errgroup.WithContext(ctx) - repo.StartPackUploader(ctx, wg) +func (m *mockToNoder) ToNode(_ bool) (*restic.Node, error) { + return m.node, m.err +} - arch := New(repo, fs.Track{FS: statfs}, Options{}) +func TestMetadataBackupErrorFiltering(t *testing.T) { + tempdir := t.TempDir() + filename := filepath.Join(tempdir, "file") + repo := repository.TestRepository(t) + + arch := New(repo, fs.Local{}, Options{}) + + var filteredErr error + replacementErr := fmt.Errorf("replacement") arch.Error = func(item string, err error) error { - t.Logf("archiver error as expected for %v: %v", item, err) - return err + filteredErr = err + return replacementErr } - arch.runWorkers(ctx, wg) - // fs.Track will panic if the file was not closed - _, excluded, err := arch.Save(ctx, "/", tempfile, nil) + nonExistNoder := &mockToNoder{ + node: &restic.Node{Type: restic.NodeTypeFile}, + err: fmt.Errorf("not found"), + } + + // check that errors from reading extended metadata are properly filtered + node, err := arch.nodeFromFileInfo("file", filename+"invalid", nonExistNoder, false) + rtest.Assert(t, node != nil, "node is missing") + rtest.Assert(t, err == replacementErr, "expected %v got %v", replacementErr, err) + rtest.Assert(t, filteredErr != nil, "missing inner error") + + // check that errors from reading irregular file are not filtered + filteredErr = nil + nonExistNoder = &mockToNoder{ + node: &restic.Node{Type: restic.NodeTypeIrregular}, + err: fmt.Errorf(`unsupported file type "irregular"`), + } + node, err = arch.nodeFromFileInfo("file", filename, nonExistNoder, false) + rtest.Assert(t, node != nil, "node is missing") + rtest.Assert(t, filteredErr == nil, "error for irregular node should not have been filtered") + rtest.Assert(t, strings.Contains(err.Error(), "irregular"), "unexpected error %q does not warn about irregular file mode", err) +} + +func TestIrregularFile(t *testing.T) { + files := TestDir{ + "testfile": TestFile{ + Content: "foo bar test file", + }, + } + tempdir, repo := prepareTempdirRepoSrc(t, files) + + back := rtest.Chdir(t, tempdir) + defer back() + + tempfile := filepath.Join(tempdir, "testfile") + fi := lstat(t, "testfile") + // patch mode to irregular + fi.Mode = (fi.Mode &^ os.ModeType) | os.ModeIrregular + + override := &overrideFS{ + FS: fs.Local{}, + overrideFI: fi, + overrideNode: &restic.Node{ + Type: restic.NodeTypeIrregular, + }, + overrideErr: fmt.Errorf(`unsupported file type "irregular"`), + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + arch := New(repo, fs.Track{FS: override}, Options{}) + _, excluded, err := arch.save(ctx, "/", tempfile, nil) if err == nil { - t.Errorf("Save() should have failed") + t.Fatalf("Save() should have failed") } + rtest.Assert(t, strings.Contains(err.Error(), "irregular"), "unexpected error %q does not warn about irregular file mode", err) if excluded { t.Errorf("Save() excluded the node, that's unexpected") } } + +type missingFS struct { + fs.FS + errorOnOpen bool +} + +func (fs *missingFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) { + if fs.errorOnOpen { + return nil, os.ErrNotExist + } + + return &missingFile{}, nil +} + +type missingFile struct { + fs.File +} + +func (f *missingFile) Stat() (*fs.ExtendedFileInfo, error) { + return nil, os.ErrNotExist +} + +func (f *missingFile) Close() error { + // prevent segfault in test + return nil +} + +func TestDisappearedFile(t *testing.T) { + tempdir, repo := prepareTempdirRepoSrc(t, TestDir{}) + + back := rtest.Chdir(t, tempdir) + defer back() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // depending on the underlying FS implementation a missing file may be detected by OpenFile or + // the subsequent file.Stat() call. Thus test both cases. + for _, errorOnOpen := range []bool{false, true} { + arch := New(repo, fs.Track{FS: &missingFS{FS: &fs.Local{}, errorOnOpen: errorOnOpen}}, Options{}) + _, excluded, err := arch.save(ctx, "/", filepath.Join(tempdir, "testdir"), nil) + rtest.OK(t, err) + rtest.Assert(t, excluded, "testfile should have been excluded") + } +} diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index 7523f074983..b6cc1ba4ecc 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -4,38 +4,49 @@ package archiver import ( - "os" - "syscall" + "testing" + + "github.com/restic/restic/internal/feature" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" ) -type wrappedFileInfo struct { - os.FileInfo - sys interface{} - mode os.FileMode +func statAndSnapshot(t *testing.T, repo archiverRepo, name string) (*restic.Node, *restic.Node) { + want := nodeFromFile(t, &fs.Local{}, name) + _, node := snapshot(t, repo, &fs.Local{}, nil, name) + return want, node } -func (fi wrappedFileInfo) Sys() interface{} { - return fi.sys -} +func TestHardlinkMetadata(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeviceIDForHardlinks, true)() + + files := TestDir{ + "testfile": TestFile{ + Content: "foo bar test file", + }, + "linktarget": TestFile{ + Content: "test file", + }, + "testlink": TestHardlink{ + Target: "./linktarget", + }, + "testdir": TestDir{}, + } -func (fi wrappedFileInfo) Mode() os.FileMode { - return fi.mode -} + tempdir, repo := prepareTempdirRepoSrc(t, files) -// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed. -func wrapFileInfo(fi os.FileInfo) os.FileInfo { - // get the underlying stat_t and modify the values - stat := fi.Sys().(*syscall.Stat_t) - stat.Mode = mockFileInfoMode - stat.Uid = mockFileInfoUID - stat.Gid = mockFileInfoGID - - // wrap the os.FileInfo so we can return a modified stat_t - res := wrappedFileInfo{ - FileInfo: fi, - sys: stat, - mode: mockFileInfoMode, - } + back := rtest.Chdir(t, tempdir) + defer back() + + want, node := statAndSnapshot(t, repo, "testlink") + rtest.Assert(t, node.DeviceID == want.DeviceID, "device id mismatch expected %v got %v", want.DeviceID, node.DeviceID) + rtest.Assert(t, node.Links == want.Links, "link count mismatch expected %v got %v", want.Links, node.Links) + rtest.Assert(t, node.Inode == want.Inode, "inode mismatch expected %v got %v", want.Inode, node.Inode) + + _, node = statAndSnapshot(t, repo, "testfile") + rtest.Assert(t, node.DeviceID == 0, "device id mismatch for testfile expected %v got %v", 0, node.DeviceID) - return res + _, node = statAndSnapshot(t, repo, "testdir") + rtest.Assert(t, node.DeviceID == 0, "device id mismatch for testdir expected %v got %v", 0, node.DeviceID) } diff --git a/internal/archiver/archiver_windows_test.go b/internal/archiver/archiver_windows_test.go deleted file mode 100644 index e1195030f3d..00000000000 --- a/internal/archiver/archiver_windows_test.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build windows -// +build windows - -package archiver - -import ( - "os" -) - -type wrappedFileInfo struct { - os.FileInfo - mode os.FileMode -} - -func (fi wrappedFileInfo) Mode() os.FileMode { - return fi.mode -} - -// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed. -func wrapFileInfo(fi os.FileInfo) os.FileInfo { - // wrap the os.FileInfo and return the modified mode, uid and gid are ignored on Windows - res := wrappedFileInfo{ - FileInfo: fi, - mode: mockFileInfoMode, - } - - return res -} diff --git a/internal/archiver/blob_saver.go b/internal/archiver/blob_saver.go index ae4879ff430..356a32ce290 100644 --- a/internal/archiver/blob_saver.go +++ b/internal/archiver/blob_saver.go @@ -2,28 +2,29 @@ package archiver import ( "context" + "fmt" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/restic" "golang.org/x/sync/errgroup" ) -// Saver allows saving a blob. -type Saver interface { +// saver allows saving a blob. +type saver interface { SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) } -// BlobSaver concurrently saves incoming blobs to the repo. -type BlobSaver struct { - repo Saver +// blobSaver concurrently saves incoming blobs to the repo. +type blobSaver struct { + repo saver ch chan<- saveBlobJob } -// NewBlobSaver returns a new blob. A worker pool is started, it is stopped +// newBlobSaver returns a new blob. A worker pool is started, it is stopped // when ctx is cancelled. -func NewBlobSaver(ctx context.Context, wg *errgroup.Group, repo Saver, workers uint) *BlobSaver { +func newBlobSaver(ctx context.Context, wg *errgroup.Group, repo saver, workers uint) *blobSaver { ch := make(chan saveBlobJob) - s := &BlobSaver{ + s := &blobSaver{ repo: repo, ch: ch, } @@ -37,15 +38,15 @@ func NewBlobSaver(ctx context.Context, wg *errgroup.Group, repo Saver, workers u return s } -func (s *BlobSaver) TriggerShutdown() { +func (s *blobSaver) TriggerShutdown() { close(s.ch) } // Save stores a blob in the repo. It checks the index and the known blobs // before saving anything. It takes ownership of the buffer passed in. -func (s *BlobSaver) Save(ctx context.Context, t restic.BlobType, buf *Buffer, cb func(res SaveBlobResponse)) { +func (s *blobSaver) Save(ctx context.Context, t restic.BlobType, buf *buffer, filename string, cb func(res saveBlobResponse)) { select { - case s.ch <- saveBlobJob{BlobType: t, buf: buf, cb: cb}: + case s.ch <- saveBlobJob{BlobType: t, buf: buf, fn: filename, cb: cb}: case <-ctx.Done(): debug.Log("not sending job, context is cancelled") } @@ -53,25 +54,26 @@ func (s *BlobSaver) Save(ctx context.Context, t restic.BlobType, buf *Buffer, cb type saveBlobJob struct { restic.BlobType - buf *Buffer - cb func(res SaveBlobResponse) + buf *buffer + fn string + cb func(res saveBlobResponse) } -type SaveBlobResponse struct { +type saveBlobResponse struct { id restic.ID length int sizeInRepo int known bool } -func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) (SaveBlobResponse, error) { +func (s *blobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) (saveBlobResponse, error) { id, known, sizeInRepo, err := s.repo.SaveBlob(ctx, t, buf, restic.ID{}, false) if err != nil { - return SaveBlobResponse{}, err + return saveBlobResponse{}, err } - return SaveBlobResponse{ + return saveBlobResponse{ id: id, length: len(buf), sizeInRepo: sizeInRepo, @@ -79,7 +81,7 @@ func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) }, nil } -func (s *BlobSaver) worker(ctx context.Context, jobs <-chan saveBlobJob) error { +func (s *blobSaver) worker(ctx context.Context, jobs <-chan saveBlobJob) error { for { var job saveBlobJob var ok bool @@ -95,7 +97,7 @@ func (s *BlobSaver) worker(ctx context.Context, jobs <-chan saveBlobJob) error { res, err := s.saveBlob(ctx, job.BlobType, job.buf.Data) if err != nil { debug.Log("saveBlob returned error, exiting: %v", err) - return err + return fmt.Errorf("failed to save blob from file %q: %w", job.fn, err) } job.cb(res) job.buf.Release() diff --git a/internal/archiver/blob_saver_test.go b/internal/archiver/blob_saver_test.go index 1996c35b843..e23ed12e5e0 100644 --- a/internal/archiver/blob_saver_test.go +++ b/internal/archiver/blob_saver_test.go @@ -4,20 +4,20 @@ import ( "context" "fmt" "runtime" + "strings" "sync" "sync/atomic" "testing" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" ) var errTest = errors.New("test error") type saveFail struct { - idx restic.MasterIndex cnt int32 failAt int32 } @@ -31,33 +31,27 @@ func (b *saveFail) SaveBlob(_ context.Context, _ restic.BlobType, _ []byte, id r return id, false, 0, nil } -func (b *saveFail) Index() restic.MasterIndex { - return b.idx -} - func TestBlobSaver(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() wg, ctx := errgroup.WithContext(ctx) - saver := &saveFail{ - idx: index.NewMasterIndex(), - } + saver := &saveFail{} - b := NewBlobSaver(ctx, wg, saver, uint(runtime.NumCPU())) + b := newBlobSaver(ctx, wg, saver, uint(runtime.NumCPU())) var wait sync.WaitGroup - var results []SaveBlobResponse + var results []saveBlobResponse var lock sync.Mutex wait.Add(20) for i := 0; i < 20; i++ { - buf := &Buffer{Data: []byte(fmt.Sprintf("foo%d", i))} + buf := &buffer{Data: []byte(fmt.Sprintf("foo%d", i))} idx := i lock.Lock() - results = append(results, SaveBlobResponse{}) + results = append(results, saveBlobResponse{}) lock.Unlock() - b.Save(ctx, restic.DataBlob, buf, func(res SaveBlobResponse) { + b.Save(ctx, restic.DataBlob, buf, "file", func(res saveBlobResponse) { lock.Lock() results[idx] = res lock.Unlock() @@ -98,15 +92,14 @@ func TestBlobSaverError(t *testing.T) { wg, ctx := errgroup.WithContext(ctx) saver := &saveFail{ - idx: index.NewMasterIndex(), failAt: int32(test.failAt), } - b := NewBlobSaver(ctx, wg, saver, uint(runtime.NumCPU())) + b := newBlobSaver(ctx, wg, saver, uint(runtime.NumCPU())) for i := 0; i < test.blobs; i++ { - buf := &Buffer{Data: []byte(fmt.Sprintf("foo%d", i))} - b.Save(ctx, restic.DataBlob, buf, func(res SaveBlobResponse) {}) + buf := &buffer{Data: []byte(fmt.Sprintf("foo%d", i))} + b.Save(ctx, restic.DataBlob, buf, "errfile", func(res saveBlobResponse) {}) } b.TriggerShutdown() @@ -116,9 +109,8 @@ func TestBlobSaverError(t *testing.T) { t.Errorf("expected error not found") } - if err != errTest { - t.Fatalf("unexpected error found: %v", err) - } + rtest.Assert(t, errors.Is(err, errTest), "unexpected error %v", err) + rtest.Assert(t, strings.Contains(err.Error(), "errfile"), "expected error to contain 'errfile' got: %v", err) }) } } diff --git a/internal/archiver/buffer.go b/internal/archiver/buffer.go index 39bda26682d..d5bfb46b393 100644 --- a/internal/archiver/buffer.go +++ b/internal/archiver/buffer.go @@ -1,14 +1,14 @@ package archiver -// Buffer is a reusable buffer. After the buffer has been used, Release should +// buffer is a reusable buffer. After the buffer has been used, Release should // be called so the underlying slice is put back into the pool. -type Buffer struct { +type buffer struct { Data []byte - pool *BufferPool + pool *bufferPool } // Release puts the buffer back into the pool it came from. -func (b *Buffer) Release() { +func (b *buffer) Release() { pool := b.pool if pool == nil || cap(b.Data) > pool.defaultSize { return @@ -20,32 +20,32 @@ func (b *Buffer) Release() { } } -// BufferPool implements a limited set of reusable buffers. -type BufferPool struct { - ch chan *Buffer +// bufferPool implements a limited set of reusable buffers. +type bufferPool struct { + ch chan *buffer defaultSize int } -// NewBufferPool initializes a new buffer pool. The pool stores at most max +// newBufferPool initializes a new buffer pool. The pool stores at most max // items. New buffers are created with defaultSize. Buffers that have grown // larger are not put back. -func NewBufferPool(max int, defaultSize int) *BufferPool { - b := &BufferPool{ - ch: make(chan *Buffer, max), +func newBufferPool(max int, defaultSize int) *bufferPool { + b := &bufferPool{ + ch: make(chan *buffer, max), defaultSize: defaultSize, } return b } // Get returns a new buffer, either from the pool or newly allocated. -func (pool *BufferPool) Get() *Buffer { +func (pool *bufferPool) Get() *buffer { select { case buf := <-pool.ch: return buf default: } - b := &Buffer{ + b := &buffer{ Data: make([]byte, pool.defaultSize), pool: pool, } diff --git a/internal/archiver/doc.go b/internal/archiver/doc.go index 928145aa203..1b9603975cf 100644 --- a/internal/archiver/doc.go +++ b/internal/archiver/doc.go @@ -1,12 +1,3 @@ // Package archiver contains the code which reads files, splits them into // chunks and saves the data to the repository. -// -// An Archiver has a number of worker goroutines handling saving the different -// data structures to the repository, the details are implemented by the -// FileSaver, BlobSaver, and TreeSaver types. -// -// The main goroutine (the one calling Snapshot()) traverses the directory tree -// and delegates all work to these worker pools. They return a type -// (FutureFile, FutureBlob, and FutureTree) which can be resolved later, by -// calling Wait() on it. package archiver diff --git a/internal/archiver/exclude.go b/internal/archiver/exclude.go new file mode 100644 index 00000000000..c7dff0acb1a --- /dev/null +++ b/internal/archiver/exclude.go @@ -0,0 +1,336 @@ +package archiver + +import ( + "bytes" + "fmt" + "io" + "os" + "runtime" + "strings" + "sync" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" +) + +// RejectByNameFunc is a function that takes a filename of a +// file that would be included in the backup. The function returns true if it +// should be excluded (rejected) from the backup. +type RejectByNameFunc func(path string) bool + +// RejectFunc is a function that takes a filename and os.FileInfo of a +// file that would be included in the backup. The function returns true if it +// should be excluded (rejected) from the backup. +type RejectFunc func(path string, fi *fs.ExtendedFileInfo, fs fs.FS) bool + +func CombineRejectByNames(funcs []RejectByNameFunc) SelectByNameFunc { + return func(item string) bool { + for _, reject := range funcs { + if reject(item) { + return false + } + } + return true + } +} + +func CombineRejects(funcs []RejectFunc) SelectFunc { + return func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { + for _, reject := range funcs { + if reject(item, fi, fs) { + return false + } + } + return true + } +} + +type rejectionCache struct { + m map[string]bool + mtx sync.Mutex +} + +func newRejectionCache() *rejectionCache { + return &rejectionCache{m: make(map[string]bool)} +} + +// Lock locks the mutex in rc. +func (rc *rejectionCache) Lock() { + rc.mtx.Lock() +} + +// Unlock unlocks the mutex in rc. +func (rc *rejectionCache) Unlock() { + rc.mtx.Unlock() +} + +// Get returns the last stored value for dir and a second boolean that +// indicates whether that value was actually written to the cache. It is the +// callers responsibility to call rc.Lock and rc.Unlock before using this +// method, otherwise data races may occur. +func (rc *rejectionCache) Get(dir string) (bool, bool) { + v, ok := rc.m[dir] + return v, ok +} + +// Store stores a new value for dir. It is the callers responsibility to call +// rc.Lock and rc.Unlock before using this method, otherwise data races may +// occur. +func (rc *rejectionCache) Store(dir string, rejected bool) { + rc.m[dir] = rejected +} + +// RejectIfPresent returns a RejectByNameFunc which itself returns whether a path +// should be excluded. The RejectByNameFunc considers a file to be excluded when +// it resides in a directory with an exclusion file, that is specified by +// excludeFileSpec in the form "filename[:content]". The returned error is +// non-nil if the filename component of excludeFileSpec is empty. If rc is +// non-nil, it is going to be used in the RejectByNameFunc to expedite the evaluation +// of a directory based on previous visits. +func RejectIfPresent(excludeFileSpec string, warnf func(msg string, args ...interface{})) (RejectFunc, error) { + if excludeFileSpec == "" { + return nil, errors.New("name for exclusion tagfile is empty") + } + colon := strings.Index(excludeFileSpec, ":") + if colon == 0 { + return nil, fmt.Errorf("no name for exclusion tagfile provided") + } + tf, tc := "", "" + if colon > 0 { + tf = excludeFileSpec[:colon] + tc = excludeFileSpec[colon+1:] + } else { + tf = excludeFileSpec + } + debug.Log("using %q as exclusion tagfile", tf) + rc := newRejectionCache() + return func(filename string, _ *fs.ExtendedFileInfo, fs fs.FS) bool { + return isExcludedByFile(filename, tf, tc, rc, fs, warnf) + }, nil +} + +// isExcludedByFile interprets filename as a path and returns true if that file +// is in an excluded directory. A directory is identified as excluded if it contains a +// tagfile which bears the name specified in tagFilename and starts with +// header. If rc is non-nil, it is used to expedite the evaluation of a +// directory based on previous visits. +func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache, fs fs.FS, warnf func(msg string, args ...interface{})) bool { + if tagFilename == "" { + return false + } + + if fs.Base(filename) == tagFilename { + return false // do not exclude the tagfile itself + } + rc.Lock() + defer rc.Unlock() + + dir := fs.Dir(filename) + rejected, visited := rc.Get(dir) + if visited { + return rejected + } + rejected = isDirExcludedByFile(dir, tagFilename, header, fs, warnf) + rc.Store(dir, rejected) + return rejected +} + +func isDirExcludedByFile(dir, tagFilename, header string, fsInst fs.FS, warnf func(msg string, args ...interface{})) bool { + tf := fsInst.Join(dir, tagFilename) + _, err := fsInst.Lstat(tf) + if errors.Is(err, os.ErrNotExist) { + return false + } + if err != nil { + warnf("could not access exclusion tagfile: %v", err) + return false + } + // when no signature is given, the mere presence of tf is enough reason + // to exclude filename + if len(header) == 0 { + return true + } + // From this stage, errors mean tagFilename exists but it is malformed. + // Warnings will be generated so that the user is informed that the + // indented ignore-action is not performed. + f, err := fsInst.OpenFile(tf, fs.O_RDONLY, false) + if err != nil { + warnf("could not open exclusion tagfile: %v", err) + return false + } + defer func() { + _ = f.Close() + }() + buf := make([]byte, len(header)) + _, err = io.ReadFull(f, buf) + // EOF is handled with a dedicated message, otherwise the warning were too cryptic + if err == io.EOF { + warnf("invalid (too short) signature in exclusion tagfile %q\n", tf) + return false + } + if err != nil { + warnf("could not read signature from exclusion tagfile %q: %v\n", tf, err) + return false + } + if !bytes.Equal(buf, []byte(header)) { + warnf("invalid signature in exclusion tagfile %q\n", tf) + return false + } + return true +} + +// deviceMap is used to track allowed source devices for backup. This is used to +// check for crossing mount points during backup (for --one-file-system). It +// maps the name of a source path to its device ID. +type deviceMap map[string]uint64 + +// newDeviceMap creates a new device map from the list of source paths. +func newDeviceMap(allowedSourcePaths []string, fs fs.FS) (deviceMap, error) { + if runtime.GOOS == "windows" { + return nil, errors.New("Device IDs are not supported on Windows") + } + + deviceMap := make(map[string]uint64) + + for _, item := range allowedSourcePaths { + item, err := fs.Abs(fs.Clean(item)) + if err != nil { + return nil, err + } + + fi, err := fs.Lstat(item) + if err != nil { + return nil, err + } + + deviceMap[item] = fi.DeviceID + } + + if len(deviceMap) == 0 { + return nil, errors.New("zero allowed devices") + } + + return deviceMap, nil +} + +// IsAllowed returns true if the path is located on an allowed device. +func (m deviceMap) IsAllowed(item string, deviceID uint64, fs fs.FS) (bool, error) { + for dir := item; ; dir = fs.Dir(dir) { + debug.Log("item %v, test dir %v", item, dir) + + // find a parent directory that is on an allowed device (otherwise + // we would not traverse the directory at all) + allowedID, ok := m[dir] + if !ok { + if dir == fs.Dir(dir) { + // arrived at root, no allowed device found. this should not happen. + break + } + continue + } + + // if the item has a different device ID than the parent directory, + // we crossed a file system boundary + if allowedID != deviceID { + debug.Log("item %v (dir %v) on disallowed device %d", item, dir, deviceID) + return false, nil + } + + // item is on allowed device, accept it + debug.Log("item %v allowed", item) + return true, nil + } + + return false, fmt.Errorf("item %v (device ID %v) not found, deviceMap: %v", item, deviceID, m) +} + +// RejectByDevice returns a RejectFunc that rejects files which are on a +// different file systems than the files/dirs in samples. +func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { + deviceMap, err := newDeviceMap(samples, filesystem) + if err != nil { + return nil, err + } + debug.Log("allowed devices: %v\n", deviceMap) + + return func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { + allowed, err := deviceMap.IsAllowed(fs.Clean(item), fi.DeviceID, fs) + if err != nil { + // this should not happen + panic(fmt.Sprintf("error checking device ID of %v: %v", item, err)) + } + + if allowed { + // accept item + return false + } + + // reject everything except directories + if !fi.Mode.IsDir() { + return true + } + + // special case: make sure we keep mountpoints (directories which + // contain a mounted file system). Test this by checking if the parent + // directory would be included. + parentDir := fs.Dir(fs.Clean(item)) + + parentFI, err := fs.Lstat(parentDir) + if err != nil { + debug.Log("item %v: error running lstat() on parent directory: %v", item, err) + // if in doubt, reject + return true + } + + parentAllowed, err := deviceMap.IsAllowed(parentDir, parentFI.DeviceID, fs) + if err != nil { + debug.Log("item %v: error checking parent directory: %v", item, err) + // if in doubt, reject + return true + } + + if parentAllowed { + // we found a mount point, so accept the directory + return false + } + + // reject everything else + return true + }, nil +} + +func RejectBySize(maxSize int64) (RejectFunc, error) { + return func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { + // directory will be ignored + if fi.Mode.IsDir() { + return false + } + + filesize := fi.Size + if filesize > maxSize { + debug.Log("file %s is oversize: %d", item, filesize) + return true + } + + return false + }, nil +} + +// RejectCloudFiles returns a func which rejects files which are online-only cloud files +func RejectCloudFiles(warnf func(msg string, args ...interface{})) (RejectFunc, error) { + return func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { + recall, err := fi.RecallOnDataAccess() + if err != nil { + warnf("item %v: error checking online-only status: %v", item, err) + return false + } + + if recall { + debug.Log("rejecting online-only cloud file %s", item) + return true + } + + return false + }, nil +} diff --git a/cmd/restic/exclude_test.go b/internal/archiver/exclude_test.go similarity index 78% rename from cmd/restic/exclude_test.go rename to internal/archiver/exclude_test.go index 9a24418ae32..9bfa5d83fe6 100644 --- a/cmd/restic/exclude_test.go +++ b/internal/archiver/exclude_test.go @@ -1,67 +1,14 @@ -package main +package archiver import ( "os" "path/filepath" "testing" + "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/test" ) -func TestRejectByPattern(t *testing.T) { - var tests = []struct { - filename string - reject bool - }{ - {filename: "/home/user/foo.go", reject: true}, - {filename: "/home/user/foo.c", reject: false}, - {filename: "/home/user/foobar", reject: false}, - {filename: "/home/user/foobar/x", reject: true}, - {filename: "/home/user/README", reject: false}, - {filename: "/home/user/README.md", reject: true}, - } - - patterns := []string{"*.go", "README.md", "/home/user/foobar/*"} - - for _, tc := range tests { - t.Run("", func(t *testing.T) { - reject := rejectByPattern(patterns) - res := reject(tc.filename) - if res != tc.reject { - t.Fatalf("wrong result for filename %v: want %v, got %v", - tc.filename, tc.reject, res) - } - }) - } -} - -func TestRejectByInsensitivePattern(t *testing.T) { - var tests = []struct { - filename string - reject bool - }{ - {filename: "/home/user/foo.GO", reject: true}, - {filename: "/home/user/foo.c", reject: false}, - {filename: "/home/user/foobar", reject: false}, - {filename: "/home/user/FOObar/x", reject: true}, - {filename: "/home/user/README", reject: false}, - {filename: "/home/user/readme.md", reject: true}, - } - - patterns := []string{"*.go", "README.md", "/home/user/foobar/*"} - - for _, tc := range tests { - t.Run("", func(t *testing.T) { - reject := rejectByInsensitivePattern(patterns) - res := reject(tc.filename) - if res != tc.reject { - t.Fatalf("wrong result for filename %v: want %v, got %v", - tc.filename, tc.reject, res) - } - }) - } -} - func TestIsExcludedByFile(t *testing.T) { const ( tagFilename = "CACHEDIR.TAG" @@ -102,7 +49,7 @@ func TestIsExcludedByFile(t *testing.T) { if tc.content == "" { h = "" } - if got := isExcludedByFile(foo, tagFilename, h, nil); tc.want != got { + if got := isExcludedByFile(foo, tagFilename, h, newRejectionCache(), &fs.Local{}, func(msg string, args ...interface{}) { t.Logf(msg, args...) }); tc.want != got { t.Fatalf("expected %v, got %v", tc.want, got) } }) @@ -153,8 +100,8 @@ func TestMultipleIsExcludedByFile(t *testing.T) { // create two rejection functions, one that tests for the NOFOO file // and one for the NOBAR file - fooExclude, _ := rejectIfPresent("NOFOO") - barExclude, _ := rejectIfPresent("NOBAR") + fooExclude, _ := RejectIfPresent("NOFOO", nil) + barExclude, _ := RejectIfPresent("NOBAR", nil) // To mock the archiver scanning walk, we create filepath.WalkFn // that tests against the two rejection functions and stores @@ -164,8 +111,8 @@ func TestMultipleIsExcludedByFile(t *testing.T) { if err != nil { return err } - excludedByFoo := fooExclude(p) - excludedByBar := barExclude(p) + excludedByFoo := fooExclude(p, nil, &fs.Local{}) + excludedByBar := barExclude(p, nil, &fs.Local{}) excluded := excludedByFoo || excludedByBar // the log message helps debugging in case the test fails t.Logf("%q: %v || %v = %v", p, excludedByFoo, excludedByBar, excluded) @@ -192,9 +139,6 @@ func TestMultipleIsExcludedByFile(t *testing.T) { func TestIsExcludedByFileSize(t *testing.T) { tempDir := test.TempDir(t) - // Max size of file is set to be 1k - maxSizeStr := "1k" - // Create some files in a temporary directory. // Files in UPPERCASE will be used as exclusion triggers later on. // We will test the inclusion later, so we add the expected value as @@ -238,7 +182,7 @@ func TestIsExcludedByFileSize(t *testing.T) { test.OKs(t, errs) // see if anything went wrong during the creation // create rejection function - sizeExclude, _ := rejectBySize(maxSizeStr) + sizeExclude, _ := RejectBySize(1024) // To mock the archiver scanning walk, we create filepath.WalkFn // that tests against the two rejection functions and stores @@ -249,7 +193,7 @@ func TestIsExcludedByFileSize(t *testing.T) { return err } - excluded := sizeExclude(p, fi) + excluded := sizeExclude(p, fs.ExtendedStat(fi), nil) // the log message helps debugging in case the test fails t.Logf("%q: dir:%t; size:%d; excluded:%v", p, fi.IsDir(), fi.Size(), excluded) m[p] = !excluded @@ -268,7 +212,7 @@ func TestIsExcludedByFileSize(t *testing.T) { } func TestDeviceMap(t *testing.T) { - deviceMap := DeviceMap{ + deviceMap := deviceMap{ filepath.FromSlash("/"): 1, filepath.FromSlash("/usr/local"): 5, } @@ -299,7 +243,7 @@ func TestDeviceMap(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - res, err := deviceMap.IsAllowed(filepath.FromSlash(test.item), test.deviceID) + res, err := deviceMap.IsAllowed(filepath.FromSlash(test.item), test.deviceID, &fs.Local{}) if err != nil { t.Fatal(err) } diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go index 724f5e620ce..ca8ec2fbb50 100644 --- a/internal/archiver/file_saver.go +++ b/internal/archiver/file_saver.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "os" "sync" "github.com/restic/chunker" @@ -15,13 +14,13 @@ import ( "golang.org/x/sync/errgroup" ) -// SaveBlobFn saves a blob to a repo. -type SaveBlobFn func(context.Context, restic.BlobType, *Buffer, func(res SaveBlobResponse)) +// saveBlobFn saves a blob to a repo. +type saveBlobFn func(context.Context, restic.BlobType, *buffer, string, func(res saveBlobResponse)) -// FileSaver concurrently saves incoming files to the repo. -type FileSaver struct { - saveFilePool *BufferPool - saveBlob SaveBlobFn +// fileSaver concurrently saves incoming files to the repo. +type fileSaver struct { + saveFilePool *bufferPool + saveBlob saveBlobFn pol chunker.Pol @@ -29,21 +28,21 @@ type FileSaver struct { CompleteBlob func(bytes uint64) - NodeFromFileInfo func(snPath, filename string, fi os.FileInfo) (*restic.Node, error) + NodeFromFileInfo func(snPath, filename string, meta ToNoder, ignoreXattrListError bool) (*restic.Node, error) } -// NewFileSaver returns a new file saver. A worker pool with fileWorkers is +// newFileSaver returns a new file saver. A worker pool with fileWorkers is // started, it is stopped when ctx is cancelled. -func NewFileSaver(ctx context.Context, wg *errgroup.Group, save SaveBlobFn, pol chunker.Pol, fileWorkers, blobWorkers uint) *FileSaver { +func newFileSaver(ctx context.Context, wg *errgroup.Group, save saveBlobFn, pol chunker.Pol, fileWorkers, blobWorkers uint) *fileSaver { ch := make(chan saveFileJob) debug.Log("new file saver with %v file workers and %v blob workers", fileWorkers, blobWorkers) poolSize := fileWorkers + blobWorkers - s := &FileSaver{ + s := &fileSaver{ saveBlob: save, - saveFilePool: NewBufferPool(int(poolSize), chunker.MaxSize), + saveFilePool: newBufferPool(int(poolSize), chunker.MaxSize), pol: pol, ch: ch, @@ -60,24 +59,23 @@ func NewFileSaver(ctx context.Context, wg *errgroup.Group, save SaveBlobFn, pol return s } -func (s *FileSaver) TriggerShutdown() { +func (s *fileSaver) TriggerShutdown() { close(s.ch) } -// CompleteFunc is called when the file has been saved. -type CompleteFunc func(*restic.Node, ItemStats) +// fileCompleteFunc is called when the file has been saved. +type fileCompleteFunc func(*restic.Node, ItemStats) // Save stores the file f and returns the data once it has been completed. The // file is closed by Save. completeReading is only called if the file was read // successfully. complete is always called. If completeReading is called, then // this will always happen before calling complete. -func (s *FileSaver) Save(ctx context.Context, snPath string, target string, file fs.File, fi os.FileInfo, start func(), completeReading func(), complete CompleteFunc) FutureNode { +func (s *fileSaver) Save(ctx context.Context, snPath string, target string, file fs.File, start func(), completeReading func(), complete fileCompleteFunc) futureNode { fn, ch := newFutureNode() job := saveFileJob{ snPath: snPath, target: target, file: file, - fi: fi, ch: ch, start: start, @@ -100,16 +98,15 @@ type saveFileJob struct { snPath string target string file fs.File - fi os.FileInfo ch chan<- futureNodeResult start func() completeReading func() - complete CompleteFunc + complete fileCompleteFunc } // saveFile stores the file f in the repo, then closes it. -func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, target string, f fs.File, fi os.FileInfo, start func(), finishReading func(), finish func(res futureNodeResult)) { +func (s *fileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, target string, f fs.File, start func(), finishReading func(), finish func(res futureNodeResult)) { start() fnr := futureNodeResult{ @@ -156,14 +153,14 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat debug.Log("%v", snPath) - node, err := s.NodeFromFileInfo(snPath, f.Name(), fi) + node, err := s.NodeFromFileInfo(snPath, target, f, false) if err != nil { _ = f.Close() completeError(err) return } - if node.Type != "file" { + if node.Type != restic.NodeTypeFile { _ = f.Close() completeError(errors.Errorf("node type %q is wrong", node.Type)) return @@ -205,7 +202,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat node.Content = append(node.Content, restic.ID{}) lock.Unlock() - s.saveBlob(ctx, restic.DataBlob, buf, func(sbr SaveBlobResponse) { + s.saveBlob(ctx, restic.DataBlob, buf, target, func(sbr saveBlobResponse) { lock.Lock() if !sbr.known { fnr.stats.DataBlobs++ @@ -246,7 +243,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat completeBlob() } -func (s *FileSaver) worker(ctx context.Context, jobs <-chan saveFileJob) { +func (s *fileSaver) worker(ctx context.Context, jobs <-chan saveFileJob) { // a worker has one chunker which is reused for each file (because it contains a rather large buffer) chnker := chunker.New(nil, s.pol) @@ -262,7 +259,7 @@ func (s *FileSaver) worker(ctx context.Context, jobs <-chan saveFileJob) { } } - s.saveFile(ctx, chnker, job.snPath, job.target, job.file, job.fi, job.start, func() { + s.saveFile(ctx, chnker, job.snPath, job.target, job.file, job.start, func() { if job.completeReading != nil { job.completeReading() } diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go index b088eeeedb1..ce862f6feb6 100644 --- a/internal/archiver/file_saver_test.go +++ b/internal/archiver/file_saver_test.go @@ -30,11 +30,11 @@ func createTestFiles(t testing.TB, num int) (files []string) { return files } -func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, context.Context, *errgroup.Group) { +func startFileSaver(ctx context.Context, t testing.TB, fsInst fs.FS) (*fileSaver, context.Context, *errgroup.Group) { wg, ctx := errgroup.WithContext(ctx) - saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *Buffer, cb func(SaveBlobResponse)) { - cb(SaveBlobResponse{ + saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *buffer, _ string, cb func(saveBlobResponse)) { + cb(saveBlobResponse{ id: restic.Hash(buf.Data), length: len(buf.Data), sizeInRepo: len(buf.Data), @@ -48,9 +48,9 @@ func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, context.Cont t.Fatal(err) } - s := NewFileSaver(ctx, wg, saveBlob, pol, workers, workers) - s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo) (*restic.Node, error) { - return restic.NodeFromFileInfo(filename, fi) + s := newFileSaver(ctx, wg, saveBlob, pol, workers, workers) + s.NodeFromFileInfo = func(snPath, filename string, meta ToNoder, ignoreXattrListError bool) (*restic.Node, error) { + return meta.ToNode(ignoreXattrListError) } return s, ctx, wg @@ -67,22 +67,17 @@ func TestFileSaver(t *testing.T) { completeFn := func(*restic.Node, ItemStats) {} testFs := fs.Local{} - s, ctx, wg := startFileSaver(ctx, t) + s, ctx, wg := startFileSaver(ctx, t, testFs) - var results []FutureNode + var results []futureNode for _, filename := range files { - f, err := testFs.Open(filename) + f, err := testFs.OpenFile(filename, os.O_RDONLY, false) if err != nil { t.Fatal(err) } - fi, err := f.Stat() - if err != nil { - t.Fatal(err) - } - - ff := s.Save(ctx, filename, filename, f, fi, startFn, completeReadingFn, completeFn) + ff := s.Save(ctx, filename, filename, f, startFn, completeReadingFn, completeFn) results = append(results, ff) } diff --git a/internal/archiver/scanner.go b/internal/archiver/scanner.go index 6ce2a47000b..2e6b7210ce3 100644 --- a/internal/archiver/scanner.go +++ b/internal/archiver/scanner.go @@ -2,8 +2,6 @@ package archiver import ( "context" - "os" - "path/filepath" "sort" "github.com/restic/restic/internal/debug" @@ -22,13 +20,13 @@ type Scanner struct { } // NewScanner initializes a new Scanner. -func NewScanner(fs fs.FS) *Scanner { +func NewScanner(filesystem fs.FS) *Scanner { return &Scanner{ - FS: fs, - SelectByName: func(item string) bool { return true }, - Select: func(item string, fi os.FileInfo) bool { return true }, - Error: func(item string, err error) error { return err }, - Result: func(item string, s ScanStats) {}, + FS: filesystem, + SelectByName: func(_ string) bool { return true }, + Select: func(_ string, _ *fs.ExtendedFileInfo, _ fs.FS) bool { return true }, + Error: func(_ string, err error) error { return err }, + Result: func(_ string, _ ScanStats) {}, } } @@ -38,7 +36,7 @@ type ScanStats struct { Bytes uint64 } -func (s *Scanner) scanTree(ctx context.Context, stats ScanStats, tree Tree) (ScanStats, error) { +func (s *Scanner) scanTree(ctx context.Context, stats ScanStats, tree tree) (ScanStats, error) { // traverse the path in the file system for all leaf nodes if tree.Leaf() { abstarget, err := s.FS.Abs(tree.Path) @@ -83,7 +81,7 @@ func (s *Scanner) Scan(ctx context.Context, targets []string) error { debug.Log("clean targets %v", cleanTargets) // we're using the same tree representation as the archiver does - tree, err := NewTree(s.FS, cleanTargets) + tree, err := newTree(s.FS, cleanTargets) if err != nil { return err } @@ -115,23 +113,23 @@ func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (Sca } // run remaining select functions that require file information - if !s.Select(target, fi) { + if !s.Select(target, fi, s.FS) { return stats, nil } switch { - case fi.Mode().IsRegular(): + case fi.Mode.IsRegular(): stats.Files++ - stats.Bytes += uint64(fi.Size()) - case fi.Mode().IsDir(): - names, err := readdirnames(s.FS, target, fs.O_NOFOLLOW) + stats.Bytes += uint64(fi.Size) + case fi.Mode.IsDir(): + names, err := fs.Readdirnames(s.FS, target, fs.O_NOFOLLOW) if err != nil { return stats, s.Error(target, err) } sort.Strings(names) for _, name := range names { - stats, err = s.scan(ctx, stats, filepath.Join(target, name)) + stats, err = s.scan(ctx, stats, s.FS.Join(target, name)) if err != nil { return stats, err } diff --git a/internal/archiver/scanner_test.go b/internal/archiver/scanner_test.go index 1b4cd1f7f2e..a47952388ff 100644 --- a/internal/archiver/scanner_test.go +++ b/internal/archiver/scanner_test.go @@ -9,7 +9,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/fs" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) func TestScanner(t *testing.T) { @@ -56,8 +56,8 @@ func TestScanner(t *testing.T) { }, }, }, - selFn: func(item string, fi os.FileInfo) bool { - if fi.IsDir() { + selFn: func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { + if fi.Mode.IsDir() { return true } @@ -81,10 +81,10 @@ func TestScanner(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() cur, err := os.Getwd() @@ -216,10 +216,10 @@ func TestScannerError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() cur, err := os.Getwd() @@ -288,10 +288,10 @@ func TestScannerCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() cur, err := os.Getwd() diff --git a/internal/archiver/testing.go b/internal/archiver/testing.go index 111c1e68cdc..e555a70d627 100644 --- a/internal/archiver/testing.go +++ b/internal/archiver/testing.go @@ -6,11 +6,11 @@ import ( "path" "path/filepath" "runtime" + "sort" "strings" "testing" "time" - "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" @@ -25,13 +25,13 @@ func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *res Tags: []string{"test"}, } if parent != nil { - sn, err := restic.LoadSnapshot(context.TODO(), arch.Repo, *parent) + sn, err := restic.LoadSnapshot(context.TODO(), repo, *parent) if err != nil { t.Fatal(err) } opts.ParentSnapshot = sn } - sn, _, err := arch.Snapshot(context.TODO(), []string{path}, opts) + sn, _, _, err := arch.Snapshot(context.TODO(), []string{path}, opts) if err != nil { t.Fatal(err) } @@ -63,11 +63,29 @@ func (s TestSymlink) String() string { return "" } +// TestHardlink describes a hardlink created for a test. +type TestHardlink struct { + Target string +} + +func (s TestHardlink) String() string { + return "" +} + // TestCreateFiles creates a directory structure described by dir at target, // which must already exist. On Windows, symlinks aren't created. func TestCreateFiles(t testing.TB, target string, dir TestDir) { t.Helper() - for name, item := range dir { + + // ensure a stable order such that it can be guaranteed that a hardlink target already exists + var names []string + for name := range dir { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + item := dir[name] targetPath := filepath.Join(target, name) switch it := item.(type) { @@ -77,12 +95,17 @@ func TestCreateFiles(t testing.TB, target string, dir TestDir) { t.Fatal(err) } case TestSymlink: - err := fs.Symlink(filepath.FromSlash(it.Target), targetPath) + err := os.Symlink(filepath.FromSlash(it.Target), targetPath) + if err != nil { + t.Fatal(err) + } + case TestHardlink: + err := os.Link(filepath.Join(target, filepath.FromSlash(it.Target)), targetPath) if err != nil { t.Fatal(err) } case TestDir: - err := fs.Mkdir(targetPath, 0755) + err := os.Mkdir(targetPath, 0755) if err != nil { t.Fatal(err) } @@ -134,7 +157,7 @@ func TestEnsureFiles(t testing.TB, target string, dir TestDir) { // first, test that all items are there TestWalkFiles(t, target, dir, func(path string, item interface{}) error { - fi, err := fs.Lstat(path) + fi, err := os.Lstat(path) if err != nil { return err } @@ -146,7 +169,7 @@ func TestEnsureFiles(t testing.TB, target string, dir TestDir) { } return nil case TestFile: - if !fs.IsRegularFile(fi) { + if !fi.Mode().IsRegular() { t.Errorf("is not a regular file: %v", path) return nil } @@ -165,7 +188,7 @@ func TestEnsureFiles(t testing.TB, target string, dir TestDir) { return nil } - target, err := fs.Readlink(path) + target, err := os.Readlink(path) if err != nil { return err } @@ -185,7 +208,7 @@ func TestEnsureFiles(t testing.TB, target string, dir TestDir) { }) // then, traverse the directory again, looking for additional files - err := fs.Walk(target, func(path string, fi os.FileInfo, err error) error { + err := filepath.Walk(target, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } @@ -215,7 +238,7 @@ func TestEnsureFileContent(ctx context.Context, t testing.TB, repo restic.BlobLo return } - content := make([]byte, crypto.CiphertextLength(len(file.Content))) + content := make([]byte, len(file.Content)) pos := 0 for _, id := range node.Content { part, err := repo.LoadBlob(ctx, restic.DataBlob, id, content[pos:]) @@ -266,7 +289,7 @@ func TestEnsureTree(ctx context.Context, t testing.TB, prefix string, repo resti switch e := entry.(type) { case TestDir: - if node.Type != "dir" { + if node.Type != restic.NodeTypeDir { t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "dir") return } @@ -278,13 +301,13 @@ func TestEnsureTree(ctx context.Context, t testing.TB, prefix string, repo resti TestEnsureTree(ctx, t, path.Join(prefix, node.Name), repo, *node.Subtree, e) case TestFile: - if node.Type != "file" { + if node.Type != restic.NodeTypeFile { t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "file") } TestEnsureFileContent(ctx, t, repo, nodePrefix, node, e) case TestSymlink: - if node.Type != "symlink" { - t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "file") + if node.Type != restic.NodeTypeSymlink { + t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "symlink") } if e.Target != node.LinkTarget { diff --git a/internal/archiver/testing_test.go b/internal/archiver/testing_test.go index ada7261f15d..a217abe2531 100644 --- a/internal/archiver/testing_test.go +++ b/internal/archiver/testing_test.go @@ -11,7 +11,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) // MockT passes through all logging functions from T, but catches Fail(), @@ -54,7 +54,7 @@ func (t *MockT) Errorf(msg string, args ...interface{}) { func createFilesAt(t testing.TB, targetdir string, files map[string]interface{}) { for name, item := range files { target := filepath.Join(targetdir, filepath.FromSlash(name)) - err := fs.MkdirAll(filepath.Dir(target), 0700) + err := os.MkdirAll(filepath.Dir(target), 0700) if err != nil { t.Fatal(err) } @@ -66,7 +66,7 @@ func createFilesAt(t testing.TB, targetdir string, files map[string]interface{}) t.Fatal(err) } case TestSymlink: - err := fs.Symlink(filepath.FromSlash(it.Target), target) + err := os.Symlink(filepath.FromSlash(it.Target), target) if err != nil { t.Fatal(err) } @@ -101,11 +101,11 @@ func TestTestCreateFiles(t *testing.T) { } for i, test := range tests { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) t.Run("", func(t *testing.T) { tempdir := filepath.Join(tempdir, fmt.Sprintf("test-%d", i)) - err := fs.MkdirAll(tempdir, 0700) + err := os.MkdirAll(tempdir, 0700) if err != nil { t.Fatal(err) } @@ -114,7 +114,7 @@ func TestTestCreateFiles(t *testing.T) { for name, item := range test.files { targetPath := filepath.Join(tempdir, filepath.FromSlash(name)) - fi, err := fs.Lstat(targetPath) + fi, err := os.Lstat(targetPath) if err != nil { t.Error(err) continue @@ -122,7 +122,7 @@ func TestTestCreateFiles(t *testing.T) { switch node := item.(type) { case TestFile: - if !fs.IsRegularFile(fi) { + if !fi.Mode().IsRegular() { t.Errorf("is not regular file: %v", name) continue } @@ -142,7 +142,7 @@ func TestTestCreateFiles(t *testing.T) { continue } - target, err := fs.Readlink(targetPath) + target, err := os.Readlink(targetPath) if err != nil { t.Error(err) continue @@ -191,7 +191,7 @@ func TestTestWalkFiles(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) got := make(map[string]string) @@ -321,7 +321,7 @@ func TestTestEnsureFiles(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) createFilesAt(t, tempdir, test.files) subtestT := testing.TB(t) @@ -452,17 +452,17 @@ func TestTestEnsureSnapshot(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) targetDir := filepath.Join(tempdir, "target") - err := fs.Mkdir(targetDir, 0700) + err := os.Mkdir(targetDir, 0700) if err != nil { t.Fatal(err) } createFilesAt(t, targetDir, test.files) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() repo := repository.TestRepository(t) @@ -473,7 +473,7 @@ func TestTestEnsureSnapshot(t *testing.T) { Hostname: "localhost", Tags: []string{"test"}, } - _, id, err := arch.Snapshot(ctx, []string{"."}, opts) + _, id, _, err := arch.Snapshot(ctx, []string{"."}, opts) if err != nil { t.Fatal(err) } diff --git a/internal/archiver/tree.go b/internal/archiver/tree.go index 16a78ee70ac..f4eb1abde62 100644 --- a/internal/archiver/tree.go +++ b/internal/archiver/tree.go @@ -9,7 +9,7 @@ import ( "github.com/restic/restic/internal/fs" ) -// Tree recursively defines how a snapshot should look like when +// tree recursively defines how a snapshot should look like when // archived. // // When `Path` is set, this is a leaf node and the contents of `Path` should be @@ -20,8 +20,8 @@ import ( // // `FileInfoPath` is used to extract metadata for intermediate (=non-leaf) // trees. -type Tree struct { - Nodes map[string]Tree +type tree struct { + Nodes map[string]tree Path string // where the files/dirs to be saved are found FileInfoPath string // where the dir can be found that is not included itself, but its subdirs Root string // parent directory of the tree @@ -95,13 +95,13 @@ func rootDirectory(fs fs.FS, target string) string { } // Add adds a new file or directory to the tree. -func (t *Tree) Add(fs fs.FS, path string) error { +func (t *tree) Add(fs fs.FS, path string) error { if path == "" { panic("invalid path (empty string)") } if t.Nodes == nil { - t.Nodes = make(map[string]Tree) + t.Nodes = make(map[string]tree) } pc, virtualPrefix := pathComponents(fs, path, false) @@ -111,7 +111,7 @@ func (t *Tree) Add(fs fs.FS, path string) error { name := pc[0] root := rootDirectory(fs, path) - tree := Tree{Root: root} + tree := tree{Root: root} origName := name i := 0 @@ -152,63 +152,63 @@ func (t *Tree) Add(fs fs.FS, path string) error { } // add adds a new target path into the tree. -func (t *Tree) add(fs fs.FS, target, root string, pc []string) error { +func (t *tree) add(fs fs.FS, target, root string, pc []string) error { if len(pc) == 0 { return errors.Errorf("invalid path %q", target) } if t.Nodes == nil { - t.Nodes = make(map[string]Tree) + t.Nodes = make(map[string]tree) } name := pc[0] if len(pc) == 1 { - tree, ok := t.Nodes[name] + node, ok := t.Nodes[name] if !ok { - t.Nodes[name] = Tree{Path: target} + t.Nodes[name] = tree{Path: target} return nil } - if tree.Path != "" { + if node.Path != "" { return errors.Errorf("path is already set for target %v", target) } - tree.Path = target - t.Nodes[name] = tree + node.Path = target + t.Nodes[name] = node return nil } - tree := Tree{} + node := tree{} if other, ok := t.Nodes[name]; ok { - tree = other + node = other } subroot := fs.Join(root, name) - tree.FileInfoPath = subroot + node.FileInfoPath = subroot - err := tree.add(fs, target, subroot, pc[1:]) + err := node.add(fs, target, subroot, pc[1:]) if err != nil { return err } - t.Nodes[name] = tree + t.Nodes[name] = node return nil } -func (t Tree) String() string { +func (t tree) String() string { return formatTree(t, "") } // Leaf returns true if this is a leaf node, which means Path is set to a // non-empty string and the contents of Path should be inserted at this point // in the tree. -func (t Tree) Leaf() bool { +func (t tree) Leaf() bool { return t.Path != "" } // NodeNames returns the sorted list of subtree names. -func (t Tree) NodeNames() []string { +func (t tree) NodeNames() []string { // iterate over the nodes of atree in lexicographic (=deterministic) order names := make([]string, 0, len(t.Nodes)) for name := range t.Nodes { @@ -219,7 +219,7 @@ func (t Tree) NodeNames() []string { } // formatTree returns a text representation of the tree t. -func formatTree(t Tree, indent string) (s string) { +func formatTree(t tree, indent string) (s string) { for name, node := range t.Nodes { s += fmt.Sprintf("%v/%v, root %q, path %q, meta %q\n", indent, name, node.Root, node.Path, node.FileInfoPath) s += formatTree(node, indent+" ") @@ -228,12 +228,12 @@ func formatTree(t Tree, indent string) (s string) { } // unrollTree unrolls the tree so that only leaf nodes have Path set. -func unrollTree(f fs.FS, t *Tree) error { +func unrollTree(f fs.FS, t *tree) error { // if the current tree is a leaf node (Path is set) and has additional // nodes, add the contents of Path to the nodes. if t.Path != "" && len(t.Nodes) > 0 { debug.Log("resolve path %v", t.Path) - entries, err := readdirnames(f, t.Path, 0) + entries, err := fs.Readdirnames(f, t.Path, 0) if err != nil { return err } @@ -252,7 +252,7 @@ func unrollTree(f fs.FS, t *Tree) error { return errors.Errorf("tree unrollTree: collision on path, node %#v, path %q", node, f.Join(t.Path, entry)) } - t.Nodes[entry] = Tree{Path: f.Join(t.Path, entry)} + t.Nodes[entry] = tree{Path: f.Join(t.Path, entry)} } t.Path = "" } @@ -269,10 +269,10 @@ func unrollTree(f fs.FS, t *Tree) error { return nil } -// NewTree creates a Tree from the target files/directories. -func NewTree(fs fs.FS, targets []string) (*Tree, error) { +// newTree creates a Tree from the target files/directories. +func newTree(fs fs.FS, targets []string) (*tree, error) { debug.Log("targets: %v", targets) - tree := &Tree{} + tree := &tree{} seen := make(map[string]struct{}) for _, target := range targets { target = fs.Clean(target) diff --git a/internal/archiver/tree_saver.go b/internal/archiver/tree_saver.go index a7dae387360..aeedefef5ae 100644 --- a/internal/archiver/tree_saver.go +++ b/internal/archiver/tree_saver.go @@ -9,20 +9,20 @@ import ( "golang.org/x/sync/errgroup" ) -// TreeSaver concurrently saves incoming trees to the repo. -type TreeSaver struct { - saveBlob func(ctx context.Context, t restic.BlobType, buf *Buffer, cb func(res SaveBlobResponse)) +// treeSaver concurrently saves incoming trees to the repo. +type treeSaver struct { + saveBlob saveBlobFn errFn ErrorFunc ch chan<- saveTreeJob } -// NewTreeSaver returns a new tree saver. A worker pool with treeWorkers is +// newTreeSaver returns a new tree saver. A worker pool with treeWorkers is // started, it is stopped when ctx is cancelled. -func NewTreeSaver(ctx context.Context, wg *errgroup.Group, treeWorkers uint, saveBlob func(ctx context.Context, t restic.BlobType, buf *Buffer, cb func(res SaveBlobResponse)), errFn ErrorFunc) *TreeSaver { +func newTreeSaver(ctx context.Context, wg *errgroup.Group, treeWorkers uint, saveBlob saveBlobFn, errFn ErrorFunc) *treeSaver { ch := make(chan saveTreeJob) - s := &TreeSaver{ + s := &treeSaver{ ch: ch, saveBlob: saveBlob, errFn: errFn, @@ -37,12 +37,12 @@ func NewTreeSaver(ctx context.Context, wg *errgroup.Group, treeWorkers uint, sav return s } -func (s *TreeSaver) TriggerShutdown() { +func (s *treeSaver) TriggerShutdown() { close(s.ch) } // Save stores the dir d and returns the data once it has been completed. -func (s *TreeSaver) Save(ctx context.Context, snPath string, target string, node *restic.Node, nodes []FutureNode, complete CompleteFunc) FutureNode { +func (s *treeSaver) Save(ctx context.Context, snPath string, target string, node *restic.Node, nodes []futureNode, complete fileCompleteFunc) futureNode { fn, ch := newFutureNode() job := saveTreeJob{ snPath: snPath, @@ -66,13 +66,13 @@ type saveTreeJob struct { snPath string target string node *restic.Node - nodes []FutureNode + nodes []futureNode ch chan<- futureNodeResult - complete CompleteFunc + complete fileCompleteFunc } // save stores the nodes as a tree in the repo. -func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, ItemStats, error) { +func (s *treeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, ItemStats, error) { var stats ItemStats node := job.node nodes := job.nodes @@ -84,12 +84,16 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I for i, fn := range nodes { // fn is a copy, so clear the original value explicitly - nodes[i] = FutureNode{} + nodes[i] = futureNode{} fnr := fn.take(ctx) // return the error if it wasn't ignored if fnr.err != nil { debug.Log("err for %v: %v", fnr.snPath, fnr.err) + if fnr.err == context.Canceled { + return nil, stats, fnr.err + } + fnr.err = s.errFn(fnr.target, fnr.err) if fnr.err == nil { // ignore error @@ -124,9 +128,9 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I return nil, stats, err } - b := &Buffer{Data: buf} - ch := make(chan SaveBlobResponse, 1) - s.saveBlob(ctx, restic.TreeBlob, b, func(res SaveBlobResponse) { + b := &buffer{Data: buf} + ch := make(chan saveBlobResponse, 1) + s.saveBlob(ctx, restic.TreeBlob, b, job.target, func(res saveBlobResponse) { ch <- res }) @@ -145,7 +149,7 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I } } -func (s *TreeSaver) worker(ctx context.Context, jobs <-chan saveTreeJob) error { +func (s *treeSaver) worker(ctx context.Context, jobs <-chan saveTreeJob) error { for { var job saveTreeJob var ok bool diff --git a/internal/archiver/tree_saver_test.go b/internal/archiver/tree_saver_test.go index 5de4375d695..4aa4c51f165 100644 --- a/internal/archiver/tree_saver_test.go +++ b/internal/archiver/tree_saver_test.go @@ -12,8 +12,8 @@ import ( "golang.org/x/sync/errgroup" ) -func treeSaveHelper(_ context.Context, _ restic.BlobType, buf *Buffer, cb func(res SaveBlobResponse)) { - cb(SaveBlobResponse{ +func treeSaveHelper(_ context.Context, _ restic.BlobType, buf *buffer, _ string, cb func(res saveBlobResponse)) { + cb(saveBlobResponse{ id: restic.NewRandomID(), known: false, length: len(buf.Data), @@ -21,7 +21,7 @@ func treeSaveHelper(_ context.Context, _ restic.BlobType, buf *Buffer, cb func(r }) } -func setupTreeSaver() (context.Context, context.CancelFunc, *TreeSaver, func() error) { +func setupTreeSaver() (context.Context, context.CancelFunc, *treeSaver, func() error) { ctx, cancel := context.WithCancel(context.Background()) wg, ctx := errgroup.WithContext(ctx) @@ -29,7 +29,7 @@ func setupTreeSaver() (context.Context, context.CancelFunc, *TreeSaver, func() e return err } - b := NewTreeSaver(ctx, wg, uint(runtime.NumCPU()), treeSaveHelper, errFn) + b := newTreeSaver(ctx, wg, uint(runtime.NumCPU()), treeSaveHelper, errFn) shutdown := func() error { b.TriggerShutdown() @@ -43,7 +43,7 @@ func TestTreeSaver(t *testing.T) { ctx, cancel, b, shutdown := setupTreeSaver() defer cancel() - var results []FutureNode + var results []futureNode for i := 0; i < 20; i++ { node := &restic.Node{ @@ -83,13 +83,13 @@ func TestTreeSaverError(t *testing.T) { ctx, cancel, b, shutdown := setupTreeSaver() defer cancel() - var results []FutureNode + var results []futureNode for i := 0; i < test.trees; i++ { node := &restic.Node{ Name: fmt.Sprintf("file-%d", i), } - nodes := []FutureNode{ + nodes := []futureNode{ newFutureNodeWithResult(futureNodeResult{node: &restic.Node{ Name: fmt.Sprintf("child-%d", i), }}), @@ -128,7 +128,7 @@ func TestTreeSaverDuplicates(t *testing.T) { node := &restic.Node{ Name: "file", } - nodes := []FutureNode{ + nodes := []futureNode{ newFutureNodeWithResult(futureNodeResult{node: &restic.Node{ Name: "child", }}), diff --git a/internal/archiver/tree_test.go b/internal/archiver/tree_test.go index 7852a4c2ec6..c9fe776b13e 100644 --- a/internal/archiver/tree_test.go +++ b/internal/archiver/tree_test.go @@ -8,11 +8,11 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/fs" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) // debug.Log requires Tree.String. -var _ fmt.Stringer = Tree{} +var _ fmt.Stringer = tree{} func TestPathComponents(t *testing.T) { var tests = []struct { @@ -142,20 +142,20 @@ func TestTree(t *testing.T) { var tests = []struct { targets []string src TestDir - want Tree + want tree unix bool win bool mustError bool }{ { targets: []string{"foo"}, - want: Tree{Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ "foo": {Path: "foo", Root: "."}, }}, }, { targets: []string{"foo", "bar", "baz"}, - want: Tree{Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ "foo": {Path: "foo", Root: "."}, "bar": {Path: "bar", Root: "."}, "baz": {Path: "baz", Root: "."}, @@ -163,8 +163,8 @@ func TestTree(t *testing.T) { }, { targets: []string{"foo/user1", "foo/user2", "foo/other"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/user1")}, "user2": {Path: filepath.FromSlash("foo/user2")}, "other": {Path: filepath.FromSlash("foo/other")}, @@ -173,9 +173,9 @@ func TestTree(t *testing.T) { }, { targets: []string{"foo/work/user1", "foo/work/user2"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ - "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ + "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/work/user1")}, "user2": {Path: filepath.FromSlash("foo/work/user2")}, }}, @@ -184,50 +184,50 @@ func TestTree(t *testing.T) { }, { targets: []string{"foo/user1", "bar/user1", "foo/other"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/user1")}, "other": {Path: filepath.FromSlash("foo/other")}, }}, - "bar": {Root: ".", FileInfoPath: "bar", Nodes: map[string]Tree{ + "bar": {Root: ".", FileInfoPath: "bar", Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("bar/user1")}, }}, }}, }, { targets: []string{"../work"}, - want: Tree{Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ "work": {Root: "..", Path: filepath.FromSlash("../work")}, }}, }, { targets: []string{"../work/other"}, - want: Tree{Nodes: map[string]Tree{ - "work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]tree{ "other": {Path: filepath.FromSlash("../work/other")}, }}, }}, }, { targets: []string{"foo/user1", "../work/other", "foo/user2"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/user1")}, "user2": {Path: filepath.FromSlash("foo/user2")}, }}, - "work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{ + "work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]tree{ "other": {Path: filepath.FromSlash("../work/other")}, }}, }}, }, { targets: []string{"foo/user1", "../foo/other", "foo/user2"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/user1")}, "user2": {Path: filepath.FromSlash("foo/user2")}, }}, - "foo-1": {Root: "..", FileInfoPath: filepath.FromSlash("../foo"), Nodes: map[string]Tree{ + "foo-1": {Root: "..", FileInfoPath: filepath.FromSlash("../foo"), Nodes: map[string]tree{ "other": {Path: filepath.FromSlash("../foo/other")}, }}, }}, @@ -240,11 +240,11 @@ func TestTree(t *testing.T) { }, }, targets: []string{"foo", "foo/work"}, - want: Tree{Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ "foo": { Root: ".", FileInfoPath: "foo", - Nodes: map[string]Tree{ + Nodes: map[string]tree{ "file": {Path: filepath.FromSlash("foo/file")}, "work": {Path: filepath.FromSlash("foo/work")}, }, @@ -261,11 +261,11 @@ func TestTree(t *testing.T) { }, }, targets: []string{"foo/work", "foo"}, - want: Tree{Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ "foo": { Root: ".", FileInfoPath: "foo", - Nodes: map[string]Tree{ + Nodes: map[string]tree{ "file": {Path: filepath.FromSlash("foo/file")}, "work": {Path: filepath.FromSlash("foo/work")}, }, @@ -282,11 +282,11 @@ func TestTree(t *testing.T) { }, }, targets: []string{"foo/work", "foo/work/user2"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "work": { FileInfoPath: filepath.FromSlash("foo/work"), - Nodes: map[string]Tree{ + Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/work/user1")}, "user2": {Path: filepath.FromSlash("foo/work/user2")}, }, @@ -304,10 +304,10 @@ func TestTree(t *testing.T) { }, }, targets: []string{"foo/work/user2", "foo/work"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "work": {FileInfoPath: filepath.FromSlash("foo/work"), - Nodes: map[string]Tree{ + Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/work/user1")}, "user2": {Path: filepath.FromSlash("foo/work/user2")}, }, @@ -332,12 +332,12 @@ func TestTree(t *testing.T) { }, }, targets: []string{"foo/work/user2/data/secret", "foo"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "other": {Path: filepath.FromSlash("foo/other")}, - "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ - "user2": {FileInfoPath: filepath.FromSlash("foo/work/user2"), Nodes: map[string]Tree{ - "data": {FileInfoPath: filepath.FromSlash("foo/work/user2/data"), Nodes: map[string]Tree{ + "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{ + "user2": {FileInfoPath: filepath.FromSlash("foo/work/user2"), Nodes: map[string]tree{ + "data": {FileInfoPath: filepath.FromSlash("foo/work/user2/data"), Nodes: map[string]tree{ "secret": { Path: filepath.FromSlash("foo/work/user2/data/secret"), }, @@ -368,10 +368,10 @@ func TestTree(t *testing.T) { }, unix: true, targets: []string{"mnt/driveA", "mnt/driveA/work/driveB"}, - want: Tree{Nodes: map[string]Tree{ - "mnt": {Root: ".", FileInfoPath: filepath.FromSlash("mnt"), Nodes: map[string]Tree{ - "driveA": {FileInfoPath: filepath.FromSlash("mnt/driveA"), Nodes: map[string]Tree{ - "work": {FileInfoPath: filepath.FromSlash("mnt/driveA/work"), Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "mnt": {Root: ".", FileInfoPath: filepath.FromSlash("mnt"), Nodes: map[string]tree{ + "driveA": {FileInfoPath: filepath.FromSlash("mnt/driveA"), Nodes: map[string]tree{ + "work": {FileInfoPath: filepath.FromSlash("mnt/driveA/work"), Nodes: map[string]tree{ "driveB": { Path: filepath.FromSlash("mnt/driveA/work/driveB"), }, @@ -384,9 +384,9 @@ func TestTree(t *testing.T) { }, { targets: []string{"foo/work/user", "foo/work/user"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ - "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ + "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{ "user": {Path: filepath.FromSlash("foo/work/user")}, }}, }}, @@ -394,9 +394,9 @@ func TestTree(t *testing.T) { }, { targets: []string{"./foo/work/user", "foo/work/user"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ - "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ + "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{ "user": {Path: filepath.FromSlash("foo/work/user")}, }}, }}, @@ -405,10 +405,10 @@ func TestTree(t *testing.T) { { win: true, targets: []string{`c:\users\foobar\temp`}, - want: Tree{Nodes: map[string]Tree{ - "c": {Root: `c:\`, FileInfoPath: `c:\`, Nodes: map[string]Tree{ - "users": {FileInfoPath: `c:\users`, Nodes: map[string]Tree{ - "foobar": {FileInfoPath: `c:\users\foobar`, Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "c": {Root: `c:\`, FileInfoPath: `c:\`, Nodes: map[string]tree{ + "users": {FileInfoPath: `c:\users`, Nodes: map[string]tree{ + "foobar": {FileInfoPath: `c:\users\foobar`, Nodes: map[string]tree{ "temp": {Path: `c:\users\foobar\temp`}, }}, }}, @@ -439,13 +439,13 @@ func TestTree(t *testing.T) { t.Skip("skip test on unix") } - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() - tree, err := NewTree(fs.Local{}, test.targets) + tree, err := newTree(fs.Local{}, test.targets) if test.mustError { if err == nil { t.Fatal("expected error, got nil") diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index adaa37d976d..27390ee13fe 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -37,6 +37,8 @@ type Backend struct { prefix string listMaxItems int layout.Layout + + accessTier blob.AccessTier } const saveLargeSize = 256 * 1024 * 1024 @@ -60,6 +62,11 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { } else { endpointSuffix = "core.windows.net" } + + if cfg.AccountName == "" { + return nil, errors.Fatalf("unable to open Azure backend: Account name ($AZURE_ACCOUNT_NAME) is empty") + } + url := fmt.Sprintf("https://%s.blob.%s/%s", cfg.AccountName, endpointSuffix, cfg.Container) opts := &azContainer.ClientOptions{ ClientOptions: azcore.ClientOptions{ @@ -102,10 +109,20 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { return nil, errors.Wrap(err, "NewAccountSASClientFromEndpointToken") } } else { - debug.Log(" - using DefaultAzureCredential") - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - return nil, errors.Wrap(err, "NewDefaultAzureCredential") + var cred azcore.TokenCredential + + if cfg.ForceCliCredential { + debug.Log(" - using AzureCLICredential") + cred, err = azidentity.NewAzureCLICredential(nil) + if err != nil { + return nil, errors.Wrap(err, "NewAzureCLICredential") + } + } else { + debug.Log(" - using DefaultAzureCredential") + cred, err = azidentity.NewDefaultAzureCredential(nil) + if err != nil { + return nil, errors.Wrap(err, "NewDefaultAzureCredential") + } } client, err = azContainer.NewClient(url, cred, opts) @@ -114,20 +131,33 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { } } + var accessTier blob.AccessTier + // if the access tier is not supported, then we will not set the access tier; during the upload process, + // the value will be inferred from the default configured on the storage account. + for _, tier := range supportedAccessTiers() { + if strings.EqualFold(string(tier), cfg.AccessTier) { + accessTier = tier + debug.Log(" - using access tier %v", accessTier) + break + } + } + be := &Backend{ - container: client, - cfg: cfg, - connections: cfg.Connections, - Layout: &layout.DefaultLayout{ - Path: cfg.Prefix, - Join: path.Join, - }, + container: client, + cfg: cfg, + connections: cfg.Connections, + Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, + accessTier: accessTier, } return be, nil } +func supportedAccessTiers() []blob.AccessTier { + return []blob.AccessTier{blob.AccessTierHot, blob.AccessTierCool, blob.AccessTierCold, blob.AccessTierArchive} +} + // Open opens the Azure backend at specified container. func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) { return open(cfg, rt) @@ -150,8 +180,14 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, er if err != nil { return nil, errors.Wrap(err, "container.Create") } + } else if err != nil && bloberror.HasCode(err, bloberror.AuthorizationFailure) { + // We ignore this Auth. Failure, as the failure is related to the type + // of SAS/SAT, not an actual real failure. If the token is invalid, we + // fail later on anyway. + // For details see Issue #4004. + debug.Log("Ignoring AuthorizationFailure when calling GetProperties") } else if err != nil { - return be, err + return be, errors.Wrap(err, "container.GetProperties") } return be, nil @@ -167,20 +203,24 @@ func (be *Backend) IsNotExist(err error) bool { return bloberror.HasCode(err, bloberror.BlobNotFound) } -// Join combines path components with slashes. -func (be *Backend) Join(p ...string) string { - return path.Join(p...) +func (be *Backend) IsPermanentError(err error) bool { + if be.IsNotExist(err) { + return true + } + + var aerr *azcore.ResponseError + if errors.As(err, &aerr) { + if aerr.StatusCode == http.StatusRequestedRangeNotSatisfiable || aerr.StatusCode == http.StatusUnauthorized || aerr.StatusCode == http.StatusForbidden { + return true + } + } + return false } func (be *Backend) Connections() uint { return be.connections } -// Location returns this backend's location (the container name). -func (be *Backend) Location() string { - return be.Join(be.cfg.AccountName, be.cfg.Prefix) -} - // Hasher may return a hash function for calculating a content hash for the backend func (be *Backend) Hasher() hash.Hash { return md5.New() @@ -196,25 +236,39 @@ func (be *Backend) Path() string { return be.prefix } +// useAccessTier determines whether to apply the configured access tier to a given file. +// For archive access tier, only data files are stored using that class; metadata +// must remain instantly accessible. +func (be *Backend) useAccessTier(h backend.Handle) bool { + notArchiveClass := !strings.EqualFold(be.cfg.AccessTier, "archive") + isDataFile := h.Type == backend.PackFile && !h.IsMetadata + return isDataFile || notArchiveClass +} + // Save stores data in the backend at the handle. func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { objName := be.Filename(h) debug.Log("InsertObject(%v, %v)", be.cfg.AccountName, objName) + var accessTier blob.AccessTier + if be.useAccessTier(h) { + accessTier = be.accessTier + } + var err error if rd.Length() < saveLargeSize { // if it's smaller than 256miB, then just create the file directly from the reader - err = be.saveSmall(ctx, objName, rd) + err = be.saveSmall(ctx, objName, rd, accessTier) } else { // otherwise use the more complicated method - err = be.saveLarge(ctx, objName, rd) + err = be.saveLarge(ctx, objName, rd, accessTier) } return err } -func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.RewindReader) error { +func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error { blockBlobClient := be.container.NewBlockBlobClient(objName) // upload it as a new "block", use the base64 hash for the ID @@ -235,11 +289,13 @@ func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.Rew } blocks := []string{id} - _, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{}) + _, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{ + Tier: &accessTier, + }) return errors.Wrap(err, "CommitBlockList") } -func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader) error { +func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error { blockBlobClient := be.container.NewBlockBlobClient(objName) buf := make([]byte, 100*1024*1024) @@ -286,7 +342,9 @@ func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.Rew return errors.Errorf("wrote %d bytes instead of the expected %d bytes", uploadedBytes, rd.Length()) } - _, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{}) + _, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{ + Tier: &accessTier, + }) debug.Log("uploaded %d parts: %v", len(blocks), blocks) return errors.Wrap(err, "CommitBlockList") @@ -313,6 +371,11 @@ func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, return nil, err } + if length > 0 && (resp.ContentLength == nil || *resp.ContentLength != int64(length)) { + _ = resp.Body.Close() + return nil, &azcore.ResponseError{ErrorCode: "restic-file-too-short", StatusCode: http.StatusRequestedRangeNotSatisfiable} + } + return resp.Body, err } diff --git a/internal/backend/azure/azure_test.go b/internal/backend/azure/azure_test.go index 7df27d325a3..adafb6b030a 100644 --- a/internal/backend/azure/azure_test.go +++ b/internal/backend/azure/azure_test.go @@ -80,6 +80,91 @@ func BenchmarkBackendAzure(t *testing.B) { newAzureTestSuite().RunBenchmarks(t) } +// TestBackendAzureAccountToken tests that a Storage Account SAS/SAT token can authorize. +// This test ensures that restic can use a token that was generated using the storage +// account keys can be used to authorize the azure connection. +// Requires the RESTIC_TEST_AZURE_ACCOUNT_NAME, RESTIC_TEST_AZURE_REPOSITORY, and the +// RESTIC_TEST_AZURE_ACCOUNT_SAS environment variables to be set, otherwise this test +// will be skipped. +func TestBackendAzureAccountToken(t *testing.T) { + vars := []string{ + "RESTIC_TEST_AZURE_ACCOUNT_NAME", + "RESTIC_TEST_AZURE_REPOSITORY", + "RESTIC_TEST_AZURE_ACCOUNT_SAS", + } + + for _, v := range vars { + if os.Getenv(v) == "" { + t.Skipf("set %v to test SAS/SAT Token Authentication", v) + return + } + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY")) + if err != nil { + t.Fatal(err) + } + + cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME") + cfg.AccountSAS = options.NewSecretString(os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_SAS")) + + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatal(err) + } + + _, err = azure.Create(ctx, *cfg, tr) + if err != nil { + t.Fatal(err) + } +} + +// TestBackendAzureContainerToken tests that a container SAS/SAT token can authorize. +// This test ensures that restic can use a token that was generated using a user +// delegation key against the container we are storing data in can be used to +// authorize the azure connection. +// Requires the RESTIC_TEST_AZURE_ACCOUNT_NAME, RESTIC_TEST_AZURE_REPOSITORY, and the +// RESTIC_TEST_AZURE_CONTAINER_SAS environment variables to be set, otherwise this test +// will be skipped. +func TestBackendAzureContainerToken(t *testing.T) { + vars := []string{ + "RESTIC_TEST_AZURE_ACCOUNT_NAME", + "RESTIC_TEST_AZURE_REPOSITORY", + "RESTIC_TEST_AZURE_CONTAINER_SAS", + } + + for _, v := range vars { + if os.Getenv(v) == "" { + t.Skipf("set %v to test SAS/SAT Token Authentication", v) + return + } + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY")) + if err != nil { + t.Fatal(err) + } + + cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME") + cfg.AccountSAS = options.NewSecretString(os.Getenv("RESTIC_TEST_AZURE_CONTAINER_SAS")) + + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatal(err) + } + + _, err = azure.Create(ctx, *cfg, tr) + if err != nil { + t.Fatal(err) + } +} + func TestUploadLargeFile(t *testing.T) { if os.Getenv("RESTIC_AZURE_TEST_LARGE_UPLOAD") == "" { t.Skip("set RESTIC_AZURE_TEST_LARGE_UPLOAD=1 to test large uploads") diff --git a/internal/backend/azure/config.go b/internal/backend/azure/config.go index 6ae431f65ea..ee7ac51d8f0 100644 --- a/internal/backend/azure/config.go +++ b/internal/backend/azure/config.go @@ -3,6 +3,7 @@ package azure import ( "os" "path" + "strconv" "strings" "github.com/restic/restic/internal/backend" @@ -13,14 +14,16 @@ import ( // Config contains all configuration necessary to connect to an azure compatible // server. type Config struct { - AccountName string - AccountSAS options.SecretString - AccountKey options.SecretString - EndpointSuffix string - Container string - Prefix string + AccountName string + AccountSAS options.SecretString + AccountKey options.SecretString + ForceCliCredential bool + EndpointSuffix string + Container string + Prefix string - Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` + AccessTier string `option:"access-tier" help:"set the access tier for the blob storage (default: inferred from the storage account defaults)"` } // NewConfig returns a new Config with the default values filled in. @@ -73,6 +76,11 @@ func (cfg *Config) ApplyEnvironment(prefix string) { cfg.AccountSAS = options.NewSecretString(os.Getenv(prefix + "AZURE_ACCOUNT_SAS")) } + var forceCliCred, err = strconv.ParseBool(os.Getenv(prefix + "AZURE_FORCE_CLI_CREDENTIAL")) + if err == nil { + cfg.ForceCliCredential = forceCliCred + } + if cfg.EndpointSuffix == "" { cfg.EndpointSuffix = os.Getenv(prefix + "AZURE_ENDPOINT_SUFFIX") } diff --git a/internal/backend/b2/b2.go b/internal/backend/b2/b2.go index bc6ef1a4d15..3ef2bcbe3d1 100644 --- a/internal/backend/b2/b2.go +++ b/internal/backend/b2/b2.go @@ -2,6 +2,7 @@ package b2 import ( "context" + "fmt" "hash" "io" "net/http" @@ -31,6 +32,8 @@ type b2Backend struct { canDelete bool } +var errTooShort = fmt.Errorf("file is too short") + // Billing happens in 1000 item granularity, but we are more interested in reducing the number of network round trips const defaultListMaxItems = 10 * 1000 @@ -97,18 +100,17 @@ func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backen } bucket, err := client.Bucket(ctx, cfg.Bucket) - if err != nil { + if b2.IsNotExist(err) { + return nil, backend.ErrNoRepository + } else if err != nil { return nil, errors.Wrap(err, "Bucket") } be := &b2Backend{ - client: client, - bucket: bucket, - cfg: cfg, - Layout: &layout.DefaultLayout{ - Join: path.Join, - Path: cfg.Prefix, - }, + client: client, + bucket: bucket, + cfg: cfg, + Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, canDelete: true, } @@ -138,13 +140,10 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Back } be := &b2Backend{ - client: client, - bucket: bucket, - cfg: cfg, - Layout: &layout.DefaultLayout{ - Join: path.Join, - Path: cfg.Prefix, - }, + client: client, + bucket: bucket, + cfg: cfg, + Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, } return be, nil @@ -159,11 +158,6 @@ func (be *b2Backend) Connections() uint { return be.cfg.Connections } -// Location returns the location for the backend. -func (be *b2Backend) Location() string { - return be.cfg.Bucket -} - // Hasher may return a hash function for calculating a content hash for the backend func (be *b2Backend) Hasher() hash.Hash { return nil @@ -186,13 +180,36 @@ func (be *b2Backend) IsNotExist(err error) bool { return false } +func (be *b2Backend) IsPermanentError(err error) bool { + // the library unfortunately endlessly retries authentication errors + return be.IsNotExist(err) || errors.Is(err, errTooShort) +} + // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (be *b2Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn) + return util.DefaultLoad(ctx, h, length, offset, be.openReader, func(rd io.Reader) error { + if length == 0 { + return fn(rd) + } + + // there is no direct way to efficiently check whether the file is too short + // use a LimitedReader to track the number of bytes read + limrd := &io.LimitedReader{R: rd, N: int64(length)} + err := fn(limrd) + + // check the underlying reader to be agnostic to however fn() handles the returned error + _, rderr := rd.Read([]byte{0}) + if rderr == io.EOF && limrd.N != 0 { + // file is too short + return fmt.Errorf("%w: %v", errTooShort, err) + } + + return err + }) } func (be *b2Backend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { diff --git a/internal/backend/backend.go b/internal/backend/backend.go index aa9920f9b88..f606e1123e8 100644 --- a/internal/backend/backend.go +++ b/internal/backend/backend.go @@ -2,10 +2,13 @@ package backend import ( "context" + "fmt" "hash" "io" ) +var ErrNoRepository = fmt.Errorf("repository does not exist") + // Backend is used to store and access data. // // Backend operations that return an error will be retried when a Backend is @@ -14,10 +17,6 @@ import ( // the context package need not be wrapped, as context cancellation is checked // separately by the retrying logic. type Backend interface { - // Location returns a string that describes the type and location of the - // repository. - Location() string - // Connections returns the maximum number of concurrent backend operations. Connections() uint @@ -38,7 +37,9 @@ type Backend interface { // Load runs fn with a reader that yields the contents of the file at h at the // given offset. If length is larger than zero, only a portion of the file - // is read. + // is read. If the length is larger than zero and the file is too short to return + // the requested length bytes, then an error MUST be returned that is recognized + // by IsPermanentError(). // // The function fn may be called multiple times during the same Load invocation // and therefore must be idempotent. @@ -66,6 +67,12 @@ type Backend interface { // for unwrapping it. IsNotExist(err error) bool + // IsPermanentError returns true if the error can very likely not be resolved + // by retrying the operation. Backends should return true if the file is missing, + // the requested range does not (completely) exist in the file or the user is + // not authorized to perform the requested operation. + IsPermanentError(err error) bool + // Delete removes all data in the backend. Delete(ctx context.Context) error } diff --git a/internal/cache/backend.go b/internal/backend/cache/backend.go similarity index 76% rename from internal/cache/backend.go rename to internal/backend/cache/backend.go index 5cbdb544465..3754266ba96 100644 --- a/internal/cache/backend.go +++ b/internal/backend/cache/backend.go @@ -2,11 +2,14 @@ package cache import ( "context" + "fmt" "io" + "os" "sync" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" ) // Backend wraps a restic.Backend and adds a cache. @@ -40,7 +43,8 @@ func (b *Backend) Remove(ctx context.Context, h backend.Handle) error { return err } - return b.Cache.remove(h) + _, err = b.Cache.remove(h) + return err } func autoCacheTypes(h backend.Handle) bool { @@ -79,10 +83,9 @@ func (b *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindR return err } - err = b.Cache.Save(h, rd) + err = b.Cache.save(h, rd) if err != nil { debug.Log("unable to save %v to cache: %v", h, err) - _ = b.Cache.remove(h) return err } @@ -120,11 +123,11 @@ func (b *Backend) cacheFile(ctx context.Context, h backend.Handle) error { if !b.Cache.Has(h) { // nope, it's still not in the cache, pull it from the repo and save it err := b.Backend.Load(ctx, h, 0, 0, func(rd io.Reader) error { - return b.Cache.Save(h, rd) + return b.Cache.save(h, rd) }) if err != nil { // try to remove from the cache, ignore errors - _ = b.Cache.remove(h) + _, _ = b.Cache.remove(h) } return err } @@ -134,9 +137,9 @@ func (b *Backend) cacheFile(ctx context.Context, h backend.Handle) error { // loadFromCache will try to load the file from the cache. func (b *Backend) loadFromCache(h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) (bool, error) { - rd, err := b.Cache.load(h, length, offset) + rd, inCache, err := b.Cache.load(h, length, offset) if err != nil { - return false, err + return inCache, err } err = consumer(rd) @@ -162,14 +165,12 @@ func (b *Backend) Load(ctx context.Context, h backend.Handle, length int, offset // try loading from cache without checking that the handle is actually cached inCache, err := b.loadFromCache(h, length, offset, consumer) if inCache { - if err == nil { - return nil + if err != nil { + debug.Log("error loading %v from cache: %v", h, err) } - - // drop from cache and retry once - _ = b.Cache.remove(h) + // the caller must explicitly use cache.Forget() to remove the cache entry + return err } - debug.Log("error loading %v from cache: %v", h, err) // if we don't automatically cache this file type, fall back to the backend if !autoCacheTypes(h) { @@ -185,6 +186,9 @@ func (b *Backend) Load(ctx context.Context, h backend.Handle, length int, offset inCache, err = b.loadFromCache(h, length, offset, consumer) if inCache { + if err != nil { + debug.Log("error loading %v from cache: %v", h, err) + } return err } @@ -198,13 +202,9 @@ func (b *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, debug.Log("cache Stat(%v)", h) fi, err := b.Backend.Stat(ctx, h) - if err != nil { - if b.Backend.IsNotExist(err) { - // try to remove from the cache, ignore errors - _ = b.Cache.remove(h) - } - - return fi, err + if err != nil && b.Backend.IsNotExist(err) { + // try to remove from the cache, ignore errors + _, _ = b.Cache.remove(h) } return fi, err @@ -218,3 +218,43 @@ func (b *Backend) IsNotExist(err error) bool { func (b *Backend) Unwrap() backend.Backend { return b.Backend } + +func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(f backend.FileInfo) error) error { + if !b.Cache.canBeCached(t) { + return b.Backend.List(ctx, t, fn) + } + + // will contain the IDs of the files that are in the repository + ids := restic.NewIDSet() + + // wrap the original function to also add the file to the ids set + wrapFn := func(f backend.FileInfo) error { + id, err := restic.ParseID(f.Name) + if err != nil { + // ignore files with invalid name + return nil + } + + ids.Insert(id) + + // execute the original function + return fn(f) + } + + err := b.Backend.List(ctx, t, wrapFn) + if err != nil { + return err + } + + if ctx.Err() != nil { + return ctx.Err() + } + + // clear the cache for files that are not in the repo anymore, ignore errors + err = b.Cache.Clear(t, ids) + if err != nil { + fmt.Fprintf(os.Stderr, "error clearing %s files in cache: %v\n", t.String(), err) + } + + return nil +} diff --git a/internal/backend/cache/backend_test.go b/internal/backend/cache/backend_test.go new file mode 100644 index 00000000000..7f83e40cbcd --- /dev/null +++ b/internal/backend/cache/backend_test.go @@ -0,0 +1,315 @@ +package cache + +import ( + "bytes" + "context" + "io" + "math/rand" + "strings" + "sync" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/mem" + backendtest "github.com/restic/restic/internal/backend/test" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +func loadAndCompare(t testing.TB, be backend.Backend, h backend.Handle, data []byte) { + buf, err := backendtest.LoadAll(context.TODO(), be, h) + if err != nil { + t.Fatal(err) + } + + if len(buf) != len(data) { + t.Fatalf("wrong number of bytes read, want %v, got %v", len(data), len(buf)) + } + + if !bytes.Equal(buf, data) { + t.Fatalf("wrong data returned, want:\n %02x\ngot:\n %02x", data[:16], buf[:16]) + } +} + +func save(t testing.TB, be backend.Backend, h backend.Handle, data []byte) { + err := be.Save(context.TODO(), h, backend.NewByteReader(data, be.Hasher())) + if err != nil { + t.Fatal(err) + } +} + +func remove(t testing.TB, be backend.Backend, h backend.Handle) { + err := be.Remove(context.TODO(), h) + if err != nil { + t.Fatal(err) + } +} + +func randomData(n int) (backend.Handle, []byte) { + data := test.Random(rand.Int(), n) + id := restic.Hash(data) + h := backend.Handle{ + Type: backend.IndexFile, + Name: id.String(), + } + return h, data +} + +func list(t testing.TB, be backend.Backend, fn func(backend.FileInfo) error) { + err := be.List(context.TODO(), backend.IndexFile, fn) + if err != nil { + t.Fatal(err) + } +} + +func TestBackend(t *testing.T) { + be := mem.New() + c := TestNewCache(t) + wbe := c.Wrap(be) + + h, data := randomData(5234142) + + // save directly in backend + save(t, be, h, data) + if c.Has(h) { + t.Errorf("cache has file too early") + } + + // load data via cache + loadAndCompare(t, wbe, h, data) + if !c.Has(h) { + t.Errorf("cache doesn't have file after load") + } + + // remove via cache + remove(t, wbe, h) + if c.Has(h) { + t.Errorf("cache has file after remove") + } + + // save via cache + save(t, wbe, h, data) + if !c.Has(h) { + t.Errorf("cache doesn't have file after load") + } + + // load data directly from backend + loadAndCompare(t, be, h, data) + + // load data via cache + loadAndCompare(t, wbe, h, data) + + // remove directly + remove(t, be, h) + if !c.Has(h) { + t.Errorf("file not in cache any more") + } + + // run stat + _, err := wbe.Stat(context.TODO(), h) + if err == nil { + t.Errorf("expected error for removed file not found, got nil") + } + + if !wbe.IsNotExist(err) { + t.Errorf("Stat() returned error that does not match IsNotExist(): %v", err) + } + + if c.Has(h) { + t.Errorf("removed file still in cache after stat") + } +} + +type loadCountingBackend struct { + backend.Backend + ctr int +} + +func (l *loadCountingBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + l.ctr++ + return l.Backend.Load(ctx, h, length, offset, fn) +} + +func TestOutOfBoundsAccess(t *testing.T) { + be := &loadCountingBackend{Backend: mem.New()} + c := TestNewCache(t) + wbe := c.Wrap(be) + + h, data := randomData(50) + save(t, be, h, data) + + // load out of bounds + err := wbe.Load(context.TODO(), h, 100, 100, func(rd io.Reader) error { + t.Error("cache returned non-existent file section") + return errors.New("broken") + }) + test.Assert(t, strings.Contains(err.Error(), " is too short"), "expected too short error, got %v", err) + test.Equals(t, 1, be.ctr, "expected file to be loaded only once") + // file must nevertheless get cached + if !c.Has(h) { + t.Errorf("cache doesn't have file after load") + } + + // start within bounds, but request too large chunk + err = wbe.Load(context.TODO(), h, 100, 0, func(rd io.Reader) error { + t.Error("cache returned non-existent file section") + return errors.New("broken") + }) + test.Assert(t, strings.Contains(err.Error(), " is too short"), "expected too short error, got %v", err) + test.Equals(t, 1, be.ctr, "expected file to be loaded only once") +} + +func TestForget(t *testing.T) { + be := &loadCountingBackend{Backend: mem.New()} + c := TestNewCache(t) + wbe := c.Wrap(be) + + h, data := randomData(50) + save(t, be, h, data) + + loadAndCompare(t, wbe, h, data) + test.Equals(t, 1, be.ctr, "expected file to be loaded once") + + // must still exist even if load returns an error + exp := errors.New("error") + err := wbe.Load(context.TODO(), h, 0, 0, func(rd io.Reader) error { + return exp + }) + test.Equals(t, exp, err, "wrong error") + test.Assert(t, c.Has(h), "missing cache entry") + + test.OK(t, c.Forget(h)) + test.Assert(t, !c.Has(h), "cache entry should have been removed") + + // cache it again + loadAndCompare(t, wbe, h, data) + test.Assert(t, c.Has(h), "missing cache entry") + + // forget must delete file only once + err = c.Forget(h) + test.Assert(t, strings.Contains(err.Error(), "circuit breaker prevents repeated deletion of cached file"), "wrong error message %q", err) + test.Assert(t, c.Has(h), "cache entry should still exist") +} + +type loadErrorBackend struct { + backend.Backend + loadError error +} + +func (be loadErrorBackend) Load(_ context.Context, _ backend.Handle, _ int, _ int64, _ func(rd io.Reader) error) error { + time.Sleep(10 * time.Millisecond) + return be.loadError +} + +func TestErrorBackend(t *testing.T) { + be := mem.New() + c := TestNewCache(t) + h, data := randomData(5234142) + + // save directly in backend + save(t, be, h, data) + + testErr := errors.New("test error") + errBackend := loadErrorBackend{ + Backend: be, + loadError: testErr, + } + + loadTest := func(wg *sync.WaitGroup, be backend.Backend) { + defer wg.Done() + + buf, err := backendtest.LoadAll(context.TODO(), be, h) + if err == testErr { + return + } + + if err != nil { + t.Error(err) + return + } + + if !bytes.Equal(buf, data) { + t.Errorf("data does not match") + } + time.Sleep(time.Millisecond) + } + + wrappedBE := c.Wrap(errBackend) + var wg sync.WaitGroup + for i := 0; i < 5; i++ { + wg.Add(1) + go loadTest(&wg, wrappedBE) + } + + wg.Wait() +} + +func TestAutomaticCacheClear(t *testing.T) { + be := mem.New() + c := TestNewCache(t) + wbe := c.Wrap(be) + + // add two handles h1 and h2 + h1, data := randomData(2000) + // save h1 directly to the backend + save(t, be, h1, data) + if c.Has(h1) { + t.Errorf("cache has file1 too early") + } + + h2, data2 := randomData(3000) + + // save h2 directly to the backend + save(t, be, h2, data2) + if c.Has(h2) { + t.Errorf("cache has file2 too early") + } + + loadAndCompare(t, wbe, h1, data) + if !c.Has(h1) { + t.Errorf("cache doesn't have file1 after load") + } + + loadAndCompare(t, wbe, h2, data2) + if !c.Has(h2) { + t.Errorf("cache doesn't have file2 after load") + } + + // remove h1 directly from the backend + remove(t, be, h1) + if !c.Has(h1) { + t.Errorf("file1 not in cache any more, should be removed from cache only after list") + } + + // list all files in the backend + list(t, wbe, func(_ backend.FileInfo) error { return nil }) + + // h1 should be removed from the cache + if c.Has(h1) { + t.Errorf("cache has file1 after remove") + } + + // h2 should still be in the cache + if !c.Has(h2) { + t.Errorf("cache doesn't have file2 after list") + } +} + +func TestAutomaticCacheClearInvalidFilename(t *testing.T) { + be := mem.New() + c := TestNewCache(t) + + data := test.Random(rand.Int(), 42) + h := backend.Handle{ + Type: backend.IndexFile, + Name: "tmp12345", + } + save(t, be, h, data) + + wbe := c.Wrap(be) + + // list all files in the backend + list(t, wbe, func(_ backend.FileInfo) error { return nil }) +} diff --git a/internal/cache/cache.go b/internal/backend/cache/cache.go similarity index 94% rename from internal/cache/cache.go rename to internal/backend/cache/cache.go index 19b3182df5e..2893df5014a 100644 --- a/internal/cache/cache.go +++ b/internal/backend/cache/cache.go @@ -6,12 +6,12 @@ import ( "path/filepath" "regexp" "strconv" + "sync" "time" "github.com/pkg/errors" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" ) @@ -20,6 +20,8 @@ type Cache struct { path string Base string Created bool + + forgotten sync.Map } const dirMode = 0700 @@ -51,7 +53,7 @@ const cachedirTagSignature = "Signature: 8a477f597d28d172789f06886806bc55\n" func writeCachedirTag(dir string) error { tagfile := filepath.Join(dir, "CACHEDIR.TAG") - f, err := fs.OpenFile(tagfile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, fileMode) + f, err := os.OpenFile(tagfile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, fileMode) if err != nil { if errors.Is(err, os.ErrExist) { return nil @@ -82,7 +84,7 @@ func New(id string, basedir string) (c *Cache, err error) { } } - err = fs.MkdirAll(basedir, dirMode) + err = os.MkdirAll(basedir, dirMode) if err != nil { return nil, errors.WithStack(err) } @@ -110,7 +112,7 @@ func New(id string, basedir string) (c *Cache, err error) { case errors.Is(err, os.ErrNotExist): // Create the repo cache dir. The parent exists, so Mkdir suffices. - err := fs.Mkdir(cachedir, dirMode) + err := os.Mkdir(cachedir, dirMode) switch { case err == nil: created = true @@ -131,7 +133,7 @@ func New(id string, basedir string) (c *Cache, err error) { } for _, p := range cacheLayoutPaths { - if err = fs.MkdirAll(filepath.Join(cachedir, p), dirMode); err != nil { + if err = os.MkdirAll(filepath.Join(cachedir, p), dirMode); err != nil { return nil, errors.WithStack(err) } } @@ -149,7 +151,7 @@ func New(id string, basedir string) (c *Cache, err error) { // directory d to the current time. func updateTimestamp(d string) error { t := time.Now() - return fs.Chtimes(d, t, t) + return os.Chtimes(d, t, t) } // MaxCacheAge is the default age (30 days) after which cache directories are considered old. @@ -162,7 +164,7 @@ func validCacheDirName(s string) bool { // listCacheDirs returns the list of cache directories. func listCacheDirs(basedir string) ([]os.FileInfo, error) { - f, err := fs.Open(basedir) + f, err := os.Open(basedir) if err != nil { if errors.Is(err, os.ErrNotExist) { err = nil diff --git a/internal/cache/cache_test.go b/internal/backend/cache/cache_test.go similarity index 100% rename from internal/cache/cache_test.go rename to internal/backend/cache/cache_test.go diff --git a/internal/cache/dir.go b/internal/backend/cache/dir.go similarity index 100% rename from internal/cache/dir.go rename to internal/backend/cache/dir.go diff --git a/internal/cache/dir_test.go b/internal/backend/cache/dir_test.go similarity index 100% rename from internal/cache/dir_test.go rename to internal/backend/cache/dir_test.go diff --git a/internal/cache/file.go b/internal/backend/cache/file.go similarity index 63% rename from internal/cache/file.go rename to internal/backend/cache/file.go index 48a38c1d387..062d6ea3f64 100644 --- a/internal/cache/file.go +++ b/internal/backend/cache/file.go @@ -1,6 +1,7 @@ package cache import ( + "fmt" "io" "os" "path/filepath" @@ -8,9 +9,9 @@ import ( "github.com/pkg/errors" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" ) @@ -31,54 +32,54 @@ func (c *Cache) canBeCached(t backend.FileType) bool { return ok } -// Load returns a reader that yields the contents of the file with the +// load returns a reader that yields the contents of the file with the // given handle. rd must be closed after use. If an error is returned, the -// ReadCloser is nil. -func (c *Cache) load(h backend.Handle, length int, offset int64) (io.ReadCloser, error) { +// ReadCloser is nil. The bool return value indicates whether the requested +// file exists in the cache. It can be true even when no reader is returned +// because length or offset are out of bounds +func (c *Cache) load(h backend.Handle, length int, offset int64) (io.ReadCloser, bool, error) { debug.Log("Load(%v, %v, %v) from cache", h, length, offset) if !c.canBeCached(h.Type) { - return nil, errors.New("cannot be cached") + return nil, false, errors.New("cannot be cached") } - f, err := fs.Open(c.filename(h)) + f, err := os.Open(c.filename(h)) if err != nil { - return nil, errors.WithStack(err) + return nil, false, errors.WithStack(err) } fi, err := f.Stat() if err != nil { _ = f.Close() - return nil, errors.WithStack(err) + return nil, true, errors.WithStack(err) } size := fi.Size() if size <= int64(crypto.CiphertextLength(0)) { _ = f.Close() - _ = c.remove(h) - return nil, errors.Errorf("cached file %v is truncated, removing", h) + return nil, true, errors.Errorf("cached file %v is truncated", h) } if size < offset+int64(length) { _ = f.Close() - _ = c.remove(h) - return nil, errors.Errorf("cached file %v is too small, removing", h) + return nil, true, errors.Errorf("cached file %v is too short", h) } if offset > 0 { if _, err = f.Seek(offset, io.SeekStart); err != nil { _ = f.Close() - return nil, err + return nil, true, err } } if length <= 0 { - return f, nil + return f, true, nil } - return backend.LimitReadCloser(f, int64(length)), nil + return util.LimitReadCloser(f, int64(length)), true, nil } -// Save saves a file in the cache. -func (c *Cache) Save(h backend.Handle, rd io.Reader) error { +// save saves a file in the cache. +func (c *Cache) save(h backend.Handle, rd io.Reader) error { debug.Log("Save to cache: %v", h) if rd == nil { return errors.New("Save() called with nil reader") @@ -89,7 +90,7 @@ func (c *Cache) Save(h backend.Handle, rd io.Reader) error { finalname := c.filename(h) dir := filepath.Dir(finalname) - err := fs.Mkdir(dir, 0700) + err := os.Mkdir(dir, 0700) if err != nil && !errors.Is(err, os.ErrExist) { return err } @@ -104,26 +105,26 @@ func (c *Cache) Save(h backend.Handle, rd io.Reader) error { n, err := io.Copy(f, rd) if err != nil { _ = f.Close() - _ = fs.Remove(f.Name()) + _ = os.Remove(f.Name()) return errors.Wrap(err, "Copy") } if n <= int64(crypto.CiphertextLength(0)) { _ = f.Close() - _ = fs.Remove(f.Name()) + _ = os.Remove(f.Name()) debug.Log("trying to cache truncated file %v, removing", h) return nil } // Close, then rename. Windows doesn't like the reverse order. if err = f.Close(); err != nil { - _ = fs.Remove(f.Name()) + _ = os.Remove(f.Name()) return errors.WithStack(err) } - err = fs.Rename(f.Name(), finalname) + err = os.Rename(f.Name(), finalname) if err != nil { - _ = fs.Remove(f.Name()) + _ = os.Remove(f.Name()) } if runtime.GOOS == "windows" && errors.Is(err, os.ErrPermission) { // On Windows, renaming over an existing file is ok @@ -138,13 +139,34 @@ func (c *Cache) Save(h backend.Handle, rd io.Reader) error { return errors.WithStack(err) } -// Remove deletes a file. When the file is not cache, no error is returned. -func (c *Cache) remove(h backend.Handle) error { - if !c.Has(h) { - return nil +func (c *Cache) Forget(h backend.Handle) error { + h.IsMetadata = false + + if _, ok := c.forgotten.Load(h); ok { + // Delete a file at most once while restic runs. + // This prevents repeatedly caching and forgetting broken files + return fmt.Errorf("circuit breaker prevents repeated deletion of cached file %v", h) } - return fs.Remove(c.filename(h)) + removed, err := c.remove(h) + if removed { + c.forgotten.Store(h, struct{}{}) + } + return err +} + +// remove deletes a file. When the file is not cached, no error is returned. +func (c *Cache) remove(h backend.Handle) (bool, error) { + if !c.canBeCached(h.Type) { + return false, nil + } + + err := os.Remove(c.filename(h)) + removed := err == nil + if errors.Is(err, os.ErrNotExist) { + err = nil + } + return removed, err } // Clear removes all files of type t from the cache that are not contained in @@ -165,7 +187,8 @@ func (c *Cache) Clear(t restic.FileType, valid restic.IDSet) error { continue } - if err = fs.Remove(c.filename(backend.Handle{Type: t, Name: id.String()})); err != nil { + // ignore ErrNotExist to gracefully handle multiple processes running Clear() concurrently + if err = os.Remove(c.filename(backend.Handle{Type: t, Name: id.String()})); err != nil && !errors.Is(err, os.ErrNotExist) { return err } } @@ -187,6 +210,10 @@ func (c *Cache) list(t restic.FileType) (restic.IDSet, error) { dir := filepath.Join(c.path, cacheLayoutPaths[t]) err := filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error { if err != nil { + // ignore ErrNotExist to gracefully handle multiple processes clearing the cache + if errors.Is(err, os.ErrNotExist) { + return nil + } return errors.Wrap(err, "Walk") } @@ -212,6 +239,6 @@ func (c *Cache) Has(h backend.Handle) bool { return false } - _, err := fs.Stat(c.filename(h)) + _, err := os.Stat(c.filename(h)) return err == nil } diff --git a/internal/cache/file_test.go b/internal/backend/cache/file_test.go similarity index 82% rename from internal/cache/file_test.go rename to internal/backend/cache/file_test.go index 7935f9806cf..942f71f91b8 100644 --- a/internal/cache/file_test.go +++ b/internal/backend/cache/file_test.go @@ -12,17 +12,16 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" ) -func generateRandomFiles(t testing.TB, tpe backend.FileType, c *Cache) restic.IDSet { +func generateRandomFiles(t testing.TB, random *rand.Rand, tpe backend.FileType, c *Cache) restic.IDSet { ids := restic.NewIDSet() - for i := 0; i < rand.Intn(15)+10; i++ { - buf := test.Random(rand.Int(), 1<<19) + for i := 0; i < random.Intn(15)+10; i++ { + buf := rtest.Random(random.Int(), 1<<19) id := restic.Hash(buf) h := backend.Handle{Type: tpe, Name: id.String()} @@ -30,7 +29,7 @@ func generateRandomFiles(t testing.TB, tpe backend.FileType, c *Cache) restic.ID t.Errorf("index %v present before save", id) } - err := c.Save(h, bytes.NewReader(buf)) + err := c.save(h, bytes.NewReader(buf)) if err != nil { t.Fatal(err) } @@ -48,10 +47,11 @@ func randomID(s restic.IDSet) restic.ID { } func load(t testing.TB, c *Cache, h backend.Handle) []byte { - rd, err := c.load(h, 0, 0) + rd, inCache, err := c.load(h, 0, 0) if err != nil { t.Fatal(err) } + rtest.Equals(t, true, inCache, "expected inCache flag to be true") if rd == nil { t.Fatalf("load() returned nil reader") @@ -87,7 +87,7 @@ func clearFiles(t testing.TB, c *Cache, tpe restic.FileType, valid restic.IDSet) func TestFiles(t *testing.T) { seed := time.Now().Unix() t.Logf("seed is %v", seed) - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) c := TestNewCache(t) @@ -99,7 +99,7 @@ func TestFiles(t *testing.T) { for _, tpe := range tests { t.Run(tpe.String(), func(t *testing.T) { - ids := generateRandomFiles(t, tpe, c) + ids := generateRandomFiles(t, random, tpe, c) id := randomID(ids) h := backend.Handle{Type: tpe, Name: id.String()} @@ -139,19 +139,19 @@ func TestFiles(t *testing.T) { func TestFileLoad(t *testing.T) { seed := time.Now().Unix() t.Logf("seed is %v", seed) - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) c := TestNewCache(t) // save about 5 MiB of data in the cache - data := test.Random(rand.Int(), 5234142) + data := rtest.Random(random.Int(), 5234142) id := restic.ID{} copy(id[:], data) h := backend.Handle{ Type: restic.PackFile, Name: id.String(), } - if err := c.Save(h, bytes.NewReader(data)); err != nil { + if err := c.save(h, bytes.NewReader(data)); err != nil { t.Fatalf("Save() returned error: %v", err) } @@ -169,10 +169,11 @@ func TestFileLoad(t *testing.T) { for _, test := range tests { t.Run(fmt.Sprintf("%v/%v", test.length, test.offset), func(t *testing.T) { - rd, err := c.load(h, test.length, test.offset) + rd, inCache, err := c.load(h, test.length, test.offset) if err != nil { t.Fatal(err) } + rtest.Equals(t, true, inCache, "expected inCache flag to be true") buf, err := io.ReadAll(rd) if err != nil { @@ -221,15 +222,20 @@ func TestFileSaveConcurrent(t *testing.T) { t.Skip("may not work due to FILE_SHARE_DELETE issue") } + seed := time.Now().Unix() + t.Logf("seed is %v", seed) + random := rand.New(rand.NewSource(seed)) + const nproc = 40 var ( c = TestNewCache(t) - data = test.Random(1, 10000) + data = rtest.Random(1, 10000) g errgroup.Group id restic.ID ) - rand.Read(id[:]) + + random.Read(id[:]) h := backend.Handle{ Type: restic.PackFile, @@ -237,7 +243,7 @@ func TestFileSaveConcurrent(t *testing.T) { } for i := 0; i < nproc/2; i++ { - g.Go(func() error { return c.Save(h, bytes.NewReader(data)) }) + g.Go(func() error { return c.save(h, bytes.NewReader(data)) }) // Can't use load because only the main goroutine may call t.Fatal. g.Go(func() error { @@ -245,7 +251,7 @@ func TestFileSaveConcurrent(t *testing.T) { // ensure is ENOENT or nil error. time.Sleep(time.Duration(100+rand.Intn(200)) * time.Millisecond) - f, err := c.load(h, 0, 0) + f, _, err := c.load(h, 0, 0) t.Logf("Load error: %v", err) switch { case err == nil: @@ -264,23 +270,23 @@ func TestFileSaveConcurrent(t *testing.T) { }) } - test.OK(t, g.Wait()) + rtest.OK(t, g.Wait()) saved := load(t, c, h) - test.Equals(t, data, saved) + rtest.Equals(t, data, saved) } func TestFileSaveAfterDamage(t *testing.T) { c := TestNewCache(t) - test.OK(t, fs.RemoveAll(c.path)) + rtest.OK(t, os.RemoveAll(c.path)) // save a few bytes of data in the cache - data := test.Random(123456789, 42) + data := rtest.Random(123456789, 42) id := restic.Hash(data) h := backend.Handle{ Type: restic.PackFile, Name: id.String(), } - if err := c.Save(h, bytes.NewReader(data)); err == nil { + if err := c.save(h, bytes.NewReader(data)); err == nil { t.Fatal("Missing error when saving to deleted cache directory") } } diff --git a/internal/cache/testing.go b/internal/backend/cache/testing.go similarity index 100% rename from internal/cache/testing.go rename to internal/backend/cache/testing.go diff --git a/internal/backend/dryrun/dry_backend.go b/internal/backend/dryrun/dry_backend.go index b3db0210f7e..8af0ce9adc4 100644 --- a/internal/backend/dryrun/dry_backend.go +++ b/internal/backend/dryrun/dry_backend.go @@ -46,11 +46,6 @@ func (be *Backend) Connections() uint { return be.b.Connections() } -// Location returns the location of the backend. -func (be *Backend) Location() string { - return "DRY:" + be.b.Location() -} - // Delete removes all data in the backend. func (be *Backend) Delete(_ context.Context) error { return nil @@ -72,6 +67,10 @@ func (be *Backend) IsNotExist(err error) bool { return be.b.IsNotExist(err) } +func (be *Backend) IsPermanentError(err error) bool { + return be.b.IsPermanentError(err) +} + func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error { return be.b.List(ctx, t, fn) } diff --git a/internal/backend/dryrun/dry_backend_test.go b/internal/backend/dryrun/dry_backend_test.go index 56962107d73..be98f531099 100644 --- a/internal/backend/dryrun/dry_backend_test.go +++ b/internal/backend/dryrun/dry_backend_test.go @@ -36,7 +36,6 @@ func TestDry(t *testing.T) { content string wantErr string }{ - {d, "loc", "", "DRY:RAM", ""}, {d, "delete", "", "", ""}, {d, "stat", "a", "", "not found"}, {d, "list", "", "", ""}, @@ -76,11 +75,6 @@ func TestDry(t *testing.T) { if files != step.content { t.Errorf("%d. List = %q, want %q", i, files, step.content) } - case "loc": - loc := step.be.Location() - if loc != step.content { - t.Errorf("%d. Location = %q, want %q", i, loc, step.content) - } case "delete": err = step.be.Delete(ctx) case "remove": @@ -96,7 +90,7 @@ func TestDry(t *testing.T) { } case "load": data := "" - err = step.be.Load(ctx, handle, 100, 0, func(rd io.Reader) error { + err = step.be.Load(ctx, handle, 0, 0, func(rd io.Reader) error { buf, err := io.ReadAll(rd) data = string(buf) return err diff --git a/internal/backend/gs/gs.go b/internal/backend/gs/gs.go index 77d20e05656..ad50f194b52 100644 --- a/internal/backend/gs/gs.go +++ b/internal/backend/gs/gs.go @@ -12,12 +12,13 @@ import ( "strings" "cloud.google.com/go/storage" - "github.com/pkg/errors" + "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/layout" "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" "golang.org/x/oauth2" "golang.org/x/oauth2/google" @@ -104,17 +105,14 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { } be := &Backend{ - gcsClient: gcsClient, - projectID: cfg.ProjectID, - connections: cfg.Connections, - bucketName: cfg.Bucket, - region: cfg.Region, - bucket: gcsClient.Bucket(cfg.Bucket), - prefix: cfg.Prefix, - Layout: &layout.DefaultLayout{ - Path: cfg.Prefix, - Join: path.Join, - }, + gcsClient: gcsClient, + projectID: cfg.ProjectID, + connections: cfg.Connections, + bucketName: cfg.Bucket, + region: cfg.Region, + bucket: gcsClient.Bucket(cfg.Bucket), + prefix: cfg.Prefix, + Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, } @@ -134,7 +132,7 @@ func Open(_ context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) { be, err := open(cfg, rt) if err != nil { - return nil, errors.Wrap(err, "open") + return nil, err } // Try to determine if the bucket exists. If it does not, try to create it. @@ -145,7 +143,7 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Back // however, the client doesn't have storage.bucket.get permission return be, nil } - return nil, errors.Wrap(err, "service.Buckets.Get") + return nil, errors.WithStack(err) } if !exists { @@ -155,7 +153,7 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Back // Bucket doesn't exist, try to create it. if err := be.bucket.Create(ctx, be.projectID, bucketAttrs); err != nil { // Always an error, as the bucket definitely doesn't exist. - return nil, errors.Wrap(err, "service.Buckets.Insert") + return nil, errors.WithStack(err) } } @@ -173,20 +171,25 @@ func (be *Backend) IsNotExist(err error) bool { return errors.Is(err, storage.ErrObjectNotExist) } -// Join combines path components with slashes. -func (be *Backend) Join(p ...string) string { - return path.Join(p...) +func (be *Backend) IsPermanentError(err error) bool { + if be.IsNotExist(err) { + return true + } + + var gerr *googleapi.Error + if errors.As(err, &gerr) { + if gerr.Code == http.StatusRequestedRangeNotSatisfiable || gerr.Code == http.StatusUnauthorized || gerr.Code == http.StatusForbidden { + return true + } + } + + return false } func (be *Backend) Connections() uint { return be.connections } -// Location returns this backend's location (the bucket name). -func (be *Backend) Location() string { - return be.Join(be.bucketName, be.prefix) -} - // Hasher may return a hash function for calculating a content hash for the backend func (be *Backend) Hasher() hash.Hash { return md5.New() @@ -241,7 +244,7 @@ func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.Rewind } if err != nil { - return errors.Wrap(err, "service.Objects.Insert") + return errors.WithStack(err) } // sanity check @@ -273,6 +276,11 @@ func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, return nil, err } + if length > 0 && r.Attrs.Size < offset+int64(length) { + _ = r.Close() + return nil, &googleapi.Error{Code: http.StatusRequestedRangeNotSatisfiable, Message: "restic-file-too-short"} + } + return r, err } @@ -283,7 +291,7 @@ func (be *Backend) Stat(ctx context.Context, h backend.Handle) (bi backend.FileI attr, err := be.bucket.Object(objName).Attrs(ctx) if err != nil { - return backend.FileInfo{}, errors.Wrap(err, "service.Objects.Get") + return backend.FileInfo{}, errors.WithStack(err) } return backend.FileInfo{Size: attr.Size, Name: h.Name}, nil @@ -299,7 +307,7 @@ func (be *Backend) Remove(ctx context.Context, h backend.Handle) error { err = nil } - return errors.Wrap(err, "client.RemoveObject") + return errors.WithStack(err) } // List runs fn for each file in the backend which has the type t. When an diff --git a/internal/backend/http_transport.go b/internal/backend/http_transport.go index 9ee1c91f112..5a3856e412e 100644 --- a/internal/backend/http_transport.go +++ b/internal/backend/http_transport.go @@ -10,8 +10,11 @@ import ( "strings" "time" + "github.com/peterbourgon/unixtransport" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" + "golang.org/x/net/http2" ) // TransportOptions collects various options which can be set for an HTTP based @@ -25,6 +28,12 @@ type TransportOptions struct { // Skip TLS certificate verification InsecureTLS bool + + // Specify Custom User-Agent for the http Client + HTTPUserAgent string + + // Timeout after which to retry stuck requests + StuckRequestTimeout time.Duration } // readPEMCertKey reads a file and returns the PEM encoded certificate and key @@ -73,7 +82,6 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { KeepAlive: 30 * time.Second, DualStack: true, }).DialContext, - ForceAttemptHTTP2: true, MaxIdleConns: 100, MaxIdleConnsPerHost: 100, IdleConnTimeout: 90 * time.Second, @@ -82,6 +90,19 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { TLSClientConfig: &tls.Config{}, } + // ensure that http2 connections are closed if they are broken + h2, err := http2.ConfigureTransports(tr) + if err != nil { + panic(err) + } + if feature.Flag.Enabled(feature.BackendErrorRedesign) { + h2.WriteByteTimeout = 120 * time.Second + h2.ReadIdleTimeout = 60 * time.Second + h2.PingTimeout = 60 * time.Second + } + + unixtransport.Register(tr) + if opts.InsecureTLS { tr.TLSClientConfig.InsecureSkipVerify = true } @@ -116,6 +137,22 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { tr.TLSClientConfig.RootCAs = pool } + rt := http.RoundTripper(tr) + + // if the userAgent is set in the Transport Options, wrap the + // http.RoundTripper + if opts.HTTPUserAgent != "" { + rt = newCustomUserAgentRoundTripper(rt, opts.HTTPUserAgent) + } + + if feature.Flag.Enabled(feature.BackendErrorRedesign) { + if opts.StuckRequestTimeout == 0 { + opts.StuckRequestTimeout = 5 * time.Minute + } + + rt = newWatchdogRoundtripper(rt, opts.StuckRequestTimeout, 128*1024) + } + // wrap in the debug round tripper (if active) - return debug.RoundTripper(tr), nil + return debug.RoundTripper(rt), nil } diff --git a/internal/backend/httpuseragent_roundtripper.go b/internal/backend/httpuseragent_roundtripper.go new file mode 100644 index 00000000000..6272aa41ace --- /dev/null +++ b/internal/backend/httpuseragent_roundtripper.go @@ -0,0 +1,25 @@ +package backend + +import "net/http" + +// httpUserAgentRoundTripper is a custom http.RoundTripper that modifies the User-Agent header +// of outgoing HTTP requests. +type httpUserAgentRoundTripper struct { + userAgent string + rt http.RoundTripper +} + +func newCustomUserAgentRoundTripper(rt http.RoundTripper, userAgent string) *httpUserAgentRoundTripper { + return &httpUserAgentRoundTripper{ + rt: rt, + userAgent: userAgent, + } +} + +// RoundTrip modifies the User-Agent header of the request and then delegates the request +// to the underlying RoundTripper. +func (c *httpUserAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = req.Clone(req.Context()) + req.Header.Set("User-Agent", c.userAgent) + return c.rt.RoundTrip(req) +} diff --git a/internal/backend/httpuseragent_roundtripper_test.go b/internal/backend/httpuseragent_roundtripper_test.go new file mode 100644 index 00000000000..0a81c418af1 --- /dev/null +++ b/internal/backend/httpuseragent_roundtripper_test.go @@ -0,0 +1,50 @@ +package backend + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestCustomUserAgentTransport(t *testing.T) { + // Create a mock HTTP handler that checks the User-Agent header + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + userAgent := r.Header.Get("User-Agent") + if userAgent != "TestUserAgent" { + t.Errorf("Expected User-Agent: TestUserAgent, got: %s", userAgent) + } + w.WriteHeader(http.StatusOK) + }) + + // Create a test server with the mock handler + server := httptest.NewServer(handler) + defer server.Close() + + // Create a custom user agent transport + customUserAgent := "TestUserAgent" + transport := &httpUserAgentRoundTripper{ + userAgent: customUserAgent, + rt: http.DefaultTransport, + } + + // Create an HTTP client with the custom transport + client := &http.Client{ + Transport: transport, + } + + // Make a request to the test server + resp, err := client.Get(server.URL) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer func() { + if err := resp.Body.Close(); err != nil { + t.Log("failed to close response body") + } + }() + + // Check the response status code + if resp.StatusCode != http.StatusOK { + t.Errorf("Expected status code: %d, got: %d", http.StatusOK, resp.StatusCode) + } +} diff --git a/internal/backend/layout/layout.go b/internal/backend/layout/layout.go index b600566a47f..cd69efc34de 100644 --- a/internal/backend/layout/layout.go +++ b/internal/backend/layout/layout.go @@ -1,17 +1,7 @@ package layout import ( - "context" - "fmt" - "os" - "path/filepath" - "regexp" - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/fs" - "github.com/restic/restic/internal/restic" ) // Layout computes paths for file name storage. @@ -22,149 +12,3 @@ type Layout interface { Paths() []string Name() string } - -// Filesystem is the abstraction of a file system used for a backend. -type Filesystem interface { - Join(...string) string - ReadDir(context.Context, string) ([]os.FileInfo, error) - IsNotExist(error) bool -} - -// ensure statically that *LocalFilesystem implements Filesystem. -var _ Filesystem = &LocalFilesystem{} - -// LocalFilesystem implements Filesystem in a local path. -type LocalFilesystem struct { -} - -// ReadDir returns all entries of a directory. -func (l *LocalFilesystem) ReadDir(_ context.Context, dir string) ([]os.FileInfo, error) { - f, err := fs.Open(dir) - if err != nil { - return nil, err - } - - entries, err := f.Readdir(-1) - if err != nil { - return nil, errors.Wrap(err, "Readdir") - } - - err = f.Close() - if err != nil { - return nil, errors.Wrap(err, "Close") - } - - return entries, nil -} - -// Join combines several path components to one. -func (l *LocalFilesystem) Join(paths ...string) string { - return filepath.Join(paths...) -} - -// IsNotExist returns true for errors that are caused by not existing files. -func (l *LocalFilesystem) IsNotExist(err error) bool { - return os.IsNotExist(err) -} - -var backendFilenameLength = len(restic.ID{}) * 2 -var backendFilename = regexp.MustCompile(fmt.Sprintf("^[a-fA-F0-9]{%d}$", backendFilenameLength)) - -func hasBackendFile(ctx context.Context, fs Filesystem, dir string) (bool, error) { - entries, err := fs.ReadDir(ctx, dir) - if err != nil && fs.IsNotExist(err) { - return false, nil - } - - if err != nil { - return false, errors.Wrap(err, "ReadDir") - } - - for _, e := range entries { - if backendFilename.MatchString(e.Name()) { - return true, nil - } - } - - return false, nil -} - -// ErrLayoutDetectionFailed is returned by DetectLayout() when the layout -// cannot be detected automatically. -var ErrLayoutDetectionFailed = errors.New("auto-detecting the filesystem layout failed") - -// DetectLayout tries to find out which layout is used in a local (or sftp) -// filesystem at the given path. If repo is nil, an instance of LocalFilesystem -// is used. -func DetectLayout(ctx context.Context, repo Filesystem, dir string) (Layout, error) { - debug.Log("detect layout at %v", dir) - if repo == nil { - repo = &LocalFilesystem{} - } - - // key file in the "keys" dir (DefaultLayout) - foundKeysFile, err := hasBackendFile(ctx, repo, repo.Join(dir, defaultLayoutPaths[backend.KeyFile])) - if err != nil { - return nil, err - } - - // key file in the "key" dir (S3LegacyLayout) - foundKeyFile, err := hasBackendFile(ctx, repo, repo.Join(dir, s3LayoutPaths[backend.KeyFile])) - if err != nil { - return nil, err - } - - if foundKeysFile && !foundKeyFile { - debug.Log("found default layout at %v", dir) - return &DefaultLayout{ - Path: dir, - Join: repo.Join, - }, nil - } - - if foundKeyFile && !foundKeysFile { - debug.Log("found s3 layout at %v", dir) - return &S3LegacyLayout{ - Path: dir, - Join: repo.Join, - }, nil - } - - debug.Log("layout detection failed") - return nil, ErrLayoutDetectionFailed -} - -// ParseLayout parses the config string and returns a Layout. When layout is -// the empty string, DetectLayout is used. If that fails, defaultLayout is used. -func ParseLayout(ctx context.Context, repo Filesystem, layout, defaultLayout, path string) (l Layout, err error) { - debug.Log("parse layout string %q for backend at %v", layout, path) - switch layout { - case "default": - l = &DefaultLayout{ - Path: path, - Join: repo.Join, - } - case "s3legacy": - l = &S3LegacyLayout{ - Path: path, - Join: repo.Join, - } - case "": - l, err = DetectLayout(ctx, repo, path) - - // use the default layout if auto detection failed - if errors.Is(err, ErrLayoutDetectionFailed) && defaultLayout != "" { - debug.Log("error: %v, use default layout %v", err, defaultLayout) - return ParseLayout(ctx, repo, defaultLayout, "", path) - } - - if err != nil { - return nil, err - } - debug.Log("layout detected: %v", l) - default: - return nil, errors.Errorf("unknown backend layout string %q, may be one of: default, s3legacy", layout) - } - - return l, nil -} diff --git a/internal/backend/layout/layout_default.go b/internal/backend/layout/layout_default.go index 9a8419f10a8..d2c4634d395 100644 --- a/internal/backend/layout/layout_default.go +++ b/internal/backend/layout/layout_default.go @@ -11,8 +11,8 @@ import ( // subdirs, two characters each (taken from the first two characters of the // file name). type DefaultLayout struct { - Path string - Join func(...string) string + path string + join func(...string) string } var defaultLayoutPaths = map[backend.FileType]string{ @@ -23,6 +23,13 @@ var defaultLayoutPaths = map[backend.FileType]string{ backend.KeyFile: "keys", } +func NewDefaultLayout(path string, join func(...string) string) *DefaultLayout { + return &DefaultLayout{ + path: path, + join: join, + } +} + func (l *DefaultLayout) String() string { return "" } @@ -37,32 +44,32 @@ func (l *DefaultLayout) Dirname(h backend.Handle) string { p := defaultLayoutPaths[h.Type] if h.Type == backend.PackFile && len(h.Name) > 2 { - p = l.Join(p, h.Name[:2]) + "/" + p = l.join(p, h.Name[:2]) + "/" } - return l.Join(l.Path, p) + "/" + return l.join(l.path, p) + "/" } // Filename returns a path to a file, including its name. func (l *DefaultLayout) Filename(h backend.Handle) string { name := h.Name if h.Type == backend.ConfigFile { - return l.Join(l.Path, "config") + return l.join(l.path, "config") } - return l.Join(l.Dirname(h), name) + return l.join(l.Dirname(h), name) } // Paths returns all directory names needed for a repo. func (l *DefaultLayout) Paths() (dirs []string) { for _, p := range defaultLayoutPaths { - dirs = append(dirs, l.Join(l.Path, p)) + dirs = append(dirs, l.join(l.path, p)) } // also add subdirs for i := 0; i < 256; i++ { subdir := hex.EncodeToString([]byte{byte(i)}) - dirs = append(dirs, l.Join(l.Path, defaultLayoutPaths[backend.PackFile], subdir)) + dirs = append(dirs, l.join(l.path, defaultLayoutPaths[backend.PackFile], subdir)) } return dirs @@ -74,6 +81,6 @@ func (l *DefaultLayout) Basedir(t backend.FileType) (dirname string, subdirs boo subdirs = true } - dirname = l.Join(l.Path, defaultLayoutPaths[t]) + dirname = l.join(l.path, defaultLayoutPaths[t]) return } diff --git a/internal/backend/layout/layout_rest.go b/internal/backend/layout/layout_rest.go index 822dd4a7e23..78fc6c82623 100644 --- a/internal/backend/layout/layout_rest.go +++ b/internal/backend/layout/layout_rest.go @@ -1,18 +1,24 @@ package layout import ( + "path" + "github.com/restic/restic/internal/backend" ) // RESTLayout implements the default layout for the REST protocol. type RESTLayout struct { - URL string - Path string - Join func(...string) string + url string } var restLayoutPaths = defaultLayoutPaths +func NewRESTLayout(url string) *RESTLayout { + return &RESTLayout{ + url: url, + } +} + func (l *RESTLayout) String() string { return "" } @@ -25,10 +31,10 @@ func (l *RESTLayout) Name() string { // Dirname returns the directory path for a given file type and name. func (l *RESTLayout) Dirname(h backend.Handle) string { if h.Type == backend.ConfigFile { - return l.URL + l.Join(l.Path, "/") + return l.url + "/" } - return l.URL + l.Join(l.Path, "/", restLayoutPaths[h.Type]) + "/" + return l.url + path.Join("/", restLayoutPaths[h.Type]) + "/" } // Filename returns a path to a file, including its name. @@ -39,18 +45,18 @@ func (l *RESTLayout) Filename(h backend.Handle) string { name = "config" } - return l.URL + l.Join(l.Path, "/", restLayoutPaths[h.Type], name) + return l.url + path.Join("/", restLayoutPaths[h.Type], name) } // Paths returns all directory names func (l *RESTLayout) Paths() (dirs []string) { for _, p := range restLayoutPaths { - dirs = append(dirs, l.URL+l.Join(l.Path, p)) + dirs = append(dirs, l.url+path.Join("/", p)) } return dirs } // Basedir returns the base dir name for files of type t. func (l *RESTLayout) Basedir(t backend.FileType) (dirname string, subdirs bool) { - return l.URL + l.Join(l.Path, restLayoutPaths[t]), false + return l.url + path.Join("/", restLayoutPaths[t]), false } diff --git a/internal/backend/layout/layout_s3legacy.go b/internal/backend/layout/layout_s3legacy.go deleted file mode 100644 index 8b90789d84f..00000000000 --- a/internal/backend/layout/layout_s3legacy.go +++ /dev/null @@ -1,79 +0,0 @@ -package layout - -import ( - "github.com/restic/restic/internal/backend" -) - -// S3LegacyLayout implements the old layout used for s3 cloud storage backends, as -// described in the Design document. -type S3LegacyLayout struct { - URL string - Path string - Join func(...string) string -} - -var s3LayoutPaths = map[backend.FileType]string{ - backend.PackFile: "data", - backend.SnapshotFile: "snapshot", - backend.IndexFile: "index", - backend.LockFile: "lock", - backend.KeyFile: "key", -} - -func (l *S3LegacyLayout) String() string { - return "" -} - -// Name returns the name for this layout. -func (l *S3LegacyLayout) Name() string { - return "s3legacy" -} - -// join calls Join with the first empty elements removed. -func (l *S3LegacyLayout) join(url string, items ...string) string { - for len(items) > 0 && items[0] == "" { - items = items[1:] - } - - path := l.Join(items...) - if path == "" || path[0] != '/' { - if url != "" && url[len(url)-1] != '/' { - url += "/" - } - } - - return url + path -} - -// Dirname returns the directory path for a given file type and name. -func (l *S3LegacyLayout) Dirname(h backend.Handle) string { - if h.Type == backend.ConfigFile { - return l.URL + l.Join(l.Path, "/") - } - - return l.join(l.URL, l.Path, s3LayoutPaths[h.Type]) + "/" -} - -// Filename returns a path to a file, including its name. -func (l *S3LegacyLayout) Filename(h backend.Handle) string { - name := h.Name - - if h.Type == backend.ConfigFile { - name = "config" - } - - return l.join(l.URL, l.Path, s3LayoutPaths[h.Type], name) -} - -// Paths returns all directory names -func (l *S3LegacyLayout) Paths() (dirs []string) { - for _, p := range s3LayoutPaths { - dirs = append(dirs, l.Join(l.Path, p)) - } - return dirs -} - -// Basedir returns the base dir name for type t. -func (l *S3LegacyLayout) Basedir(t backend.FileType) (dirname string, subdirs bool) { - return l.Join(l.Path, s3LayoutPaths[t]), false -} diff --git a/internal/backend/layout/layout_test.go b/internal/backend/layout/layout_test.go index 998f5aeb694..af5105c207a 100644 --- a/internal/backend/layout/layout_test.go +++ b/internal/backend/layout/layout_test.go @@ -1,12 +1,12 @@ package layout import ( - "context" "fmt" "path" "path/filepath" "reflect" "sort" + "strings" "testing" "github.com/restic/restic/internal/backend" @@ -98,8 +98,8 @@ func TestDefaultLayout(t *testing.T) { t.Run("Paths", func(t *testing.T) { l := &DefaultLayout{ - Path: tempdir, - Join: filepath.Join, + path: tempdir, + join: filepath.Join, } dirs := l.Paths() @@ -127,8 +127,8 @@ func TestDefaultLayout(t *testing.T) { for _, test := range tests { t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) { l := &DefaultLayout{ - Path: test.path, - Join: test.join, + path: test.path, + join: test.join, } filename := l.Filename(test.Handle) @@ -140,7 +140,7 @@ func TestDefaultLayout(t *testing.T) { } func TestRESTLayout(t *testing.T) { - path := rtest.TempDir(t) + url := `https://hostname.foo` var tests = []struct { backend.Handle @@ -148,44 +148,43 @@ func TestRESTLayout(t *testing.T) { }{ { backend.Handle{Type: backend.PackFile, Name: "0123456"}, - filepath.Join(path, "data", "0123456"), + strings.Join([]string{url, "data", "0123456"}, "/"), }, { backend.Handle{Type: backend.ConfigFile, Name: "CFG"}, - filepath.Join(path, "config"), + strings.Join([]string{url, "config"}, "/"), }, { backend.Handle{Type: backend.SnapshotFile, Name: "123456"}, - filepath.Join(path, "snapshots", "123456"), + strings.Join([]string{url, "snapshots", "123456"}, "/"), }, { backend.Handle{Type: backend.IndexFile, Name: "123456"}, - filepath.Join(path, "index", "123456"), + strings.Join([]string{url, "index", "123456"}, "/"), }, { backend.Handle{Type: backend.LockFile, Name: "123456"}, - filepath.Join(path, "locks", "123456"), + strings.Join([]string{url, "locks", "123456"}, "/"), }, { backend.Handle{Type: backend.KeyFile, Name: "123456"}, - filepath.Join(path, "keys", "123456"), + strings.Join([]string{url, "keys", "123456"}, "/"), }, } l := &RESTLayout{ - Path: path, - Join: filepath.Join, + url: url, } t.Run("Paths", func(t *testing.T) { dirs := l.Paths() want := []string{ - filepath.Join(path, "data"), - filepath.Join(path, "snapshots"), - filepath.Join(path, "index"), - filepath.Join(path, "locks"), - filepath.Join(path, "keys"), + strings.Join([]string{url, "data"}, "/"), + strings.Join([]string{url, "snapshots"}, "/"), + strings.Join([]string{url, "index"}, "/"), + strings.Join([]string{url, "locks"}, "/"), + strings.Join([]string{url, "keys"}, "/"), } sort.Strings(want) @@ -214,59 +213,23 @@ func TestRESTLayoutURLs(t *testing.T) { dir string }{ { - &RESTLayout{URL: "https://hostname.foo", Path: "", Join: path.Join}, + &RESTLayout{url: "https://hostname.foo"}, backend.Handle{Type: backend.PackFile, Name: "foobar"}, "https://hostname.foo/data/foobar", "https://hostname.foo/data/", }, { - &RESTLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join}, + &RESTLayout{url: "https://hostname.foo:1234/prefix/repo"}, backend.Handle{Type: backend.LockFile, Name: "foobar"}, "https://hostname.foo:1234/prefix/repo/locks/foobar", "https://hostname.foo:1234/prefix/repo/locks/", }, { - &RESTLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join}, + &RESTLayout{url: "https://hostname.foo:1234/prefix/repo"}, backend.Handle{Type: backend.ConfigFile, Name: "foobar"}, "https://hostname.foo:1234/prefix/repo/config", "https://hostname.foo:1234/prefix/repo/", }, - { - &S3LegacyLayout{URL: "https://hostname.foo", Path: "/", Join: path.Join}, - backend.Handle{Type: backend.PackFile, Name: "foobar"}, - "https://hostname.foo/data/foobar", - "https://hostname.foo/data/", - }, - { - &S3LegacyLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "", Join: path.Join}, - backend.Handle{Type: backend.LockFile, Name: "foobar"}, - "https://hostname.foo:1234/prefix/repo/lock/foobar", - "https://hostname.foo:1234/prefix/repo/lock/", - }, - { - &S3LegacyLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join}, - backend.Handle{Type: backend.ConfigFile, Name: "foobar"}, - "https://hostname.foo:1234/prefix/repo/config", - "https://hostname.foo:1234/prefix/repo/", - }, - { - &S3LegacyLayout{URL: "", Path: "", Join: path.Join}, - backend.Handle{Type: backend.PackFile, Name: "foobar"}, - "data/foobar", - "data/", - }, - { - &S3LegacyLayout{URL: "", Path: "", Join: path.Join}, - backend.Handle{Type: backend.LockFile, Name: "foobar"}, - "lock/foobar", - "lock/", - }, - { - &S3LegacyLayout{URL: "", Path: "/", Join: path.Join}, - backend.Handle{Type: backend.ConfigFile, Name: "foobar"}, - "/config", - "/", - }, } for _, test := range tests { @@ -283,163 +246,3 @@ func TestRESTLayoutURLs(t *testing.T) { }) } } - -func TestS3LegacyLayout(t *testing.T) { - path := rtest.TempDir(t) - - var tests = []struct { - backend.Handle - filename string - }{ - { - backend.Handle{Type: backend.PackFile, Name: "0123456"}, - filepath.Join(path, "data", "0123456"), - }, - { - backend.Handle{Type: backend.ConfigFile, Name: "CFG"}, - filepath.Join(path, "config"), - }, - { - backend.Handle{Type: backend.SnapshotFile, Name: "123456"}, - filepath.Join(path, "snapshot", "123456"), - }, - { - backend.Handle{Type: backend.IndexFile, Name: "123456"}, - filepath.Join(path, "index", "123456"), - }, - { - backend.Handle{Type: backend.LockFile, Name: "123456"}, - filepath.Join(path, "lock", "123456"), - }, - { - backend.Handle{Type: backend.KeyFile, Name: "123456"}, - filepath.Join(path, "key", "123456"), - }, - } - - l := &S3LegacyLayout{ - Path: path, - Join: filepath.Join, - } - - t.Run("Paths", func(t *testing.T) { - dirs := l.Paths() - - want := []string{ - filepath.Join(path, "data"), - filepath.Join(path, "snapshot"), - filepath.Join(path, "index"), - filepath.Join(path, "lock"), - filepath.Join(path, "key"), - } - - sort.Strings(want) - sort.Strings(dirs) - - if !reflect.DeepEqual(dirs, want) { - t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs) - } - }) - - for _, test := range tests { - t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) { - filename := l.Filename(test.Handle) - if filename != test.filename { - t.Fatalf("wrong filename, want %v, got %v", test.filename, filename) - } - }) - } -} - -func TestDetectLayout(t *testing.T) { - path := rtest.TempDir(t) - - var tests = []struct { - filename string - want string - }{ - {"repo-layout-default.tar.gz", "*layout.DefaultLayout"}, - {"repo-layout-s3legacy.tar.gz", "*layout.S3LegacyLayout"}, - } - - var fs = &LocalFilesystem{} - for _, test := range tests { - for _, fs := range []Filesystem{fs, nil} { - t.Run(fmt.Sprintf("%v/fs-%T", test.filename, fs), func(t *testing.T) { - rtest.SetupTarTestFixture(t, path, filepath.Join("../testdata", test.filename)) - - layout, err := DetectLayout(context.TODO(), fs, filepath.Join(path, "repo")) - if err != nil { - t.Fatal(err) - } - - if layout == nil { - t.Fatal("wanted some layout, but detect returned nil") - } - - layoutName := fmt.Sprintf("%T", layout) - if layoutName != test.want { - t.Fatalf("want layout %v, got %v", test.want, layoutName) - } - - rtest.RemoveAll(t, filepath.Join(path, "repo")) - }) - } - } -} - -func TestParseLayout(t *testing.T) { - path := rtest.TempDir(t) - - var tests = []struct { - layoutName string - defaultLayoutName string - want string - }{ - {"default", "", "*layout.DefaultLayout"}, - {"s3legacy", "", "*layout.S3LegacyLayout"}, - {"", "", "*layout.DefaultLayout"}, - } - - rtest.SetupTarTestFixture(t, path, filepath.Join("..", "testdata", "repo-layout-default.tar.gz")) - - for _, test := range tests { - t.Run(test.layoutName, func(t *testing.T) { - layout, err := ParseLayout(context.TODO(), &LocalFilesystem{}, test.layoutName, test.defaultLayoutName, filepath.Join(path, "repo")) - if err != nil { - t.Fatal(err) - } - - if layout == nil { - t.Fatal("wanted some layout, but detect returned nil") - } - - // test that the functions work (and don't panic) - _ = layout.Dirname(backend.Handle{Type: backend.PackFile}) - _ = layout.Filename(backend.Handle{Type: backend.PackFile, Name: "1234"}) - _ = layout.Paths() - - layoutName := fmt.Sprintf("%T", layout) - if layoutName != test.want { - t.Fatalf("want layout %v, got %v", test.want, layoutName) - } - }) - } -} - -func TestParseLayoutInvalid(t *testing.T) { - path := rtest.TempDir(t) - - var invalidNames = []string{ - "foo", "bar", "local", - } - - for _, name := range invalidNames { - t.Run(name, func(t *testing.T) { - layout, err := ParseLayout(context.TODO(), nil, name, "", path) - if err == nil { - t.Fatalf("expected error not found for layout name %v, layout is %v", name, layout) - } - }) - } -} diff --git a/internal/backend/local/config.go b/internal/backend/local/config.go index dc5e7948cfa..782f132d06d 100644 --- a/internal/backend/local/config.go +++ b/internal/backend/local/config.go @@ -9,8 +9,7 @@ import ( // Config holds all information needed to open a local repository. type Config struct { - Path string - Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect)"` + Path string Connections uint `option:"connections" help:"set a limit for the number of concurrent operations (default: 2)"` } diff --git a/internal/backend/local/layout_test.go b/internal/backend/local/layout_test.go index 46f3996bb72..cac89e55261 100644 --- a/internal/backend/local/layout_test.go +++ b/internal/backend/local/layout_test.go @@ -14,20 +14,14 @@ func TestLayout(t *testing.T) { var tests = []struct { filename string - layout string failureExpected bool packfiles map[string]bool }{ - {"repo-layout-default.tar.gz", "", false, map[string]bool{ + {"repo-layout-default.tar.gz", false, map[string]bool{ "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, "fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false, "c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false, }}, - {"repo-layout-s3legacy.tar.gz", "", false, map[string]bool{ - "fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false, - "c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false, - "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, - }}, } for _, test := range tests { @@ -37,7 +31,6 @@ func TestLayout(t *testing.T) { repo := filepath.Join(path, "repo") be, err := Open(context.TODO(), Config{ Path: repo, - Layout: test.layout, Connections: 2, }) if err != nil { diff --git a/internal/backend/local/local.go b/internal/backend/local/local.go index b89f2ff446f..ee87ae5d6e6 100644 --- a/internal/backend/local/local.go +++ b/internal/backend/local/local.go @@ -2,6 +2,7 @@ package local import ( "context" + "fmt" "hash" "io" "os" @@ -30,19 +31,16 @@ type Local struct { // ensure statically that *Local implements backend.Backend. var _ backend.Backend = &Local{} +var errTooShort = fmt.Errorf("file is too short") + func NewFactory() location.Factory { return location.NewLimitedBackendFactory("local", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open)) } -const defaultLayout = "default" - -func open(ctx context.Context, cfg Config) (*Local, error) { - l, err := layout.ParseLayout(ctx, &layout.LocalFilesystem{}, cfg.Layout, defaultLayout, cfg.Path) - if err != nil { - return nil, err - } +func open(cfg Config) (*Local, error) { + l := layout.NewDefaultLayout(cfg.Path, filepath.Join) - fi, err := fs.Stat(l.Filename(backend.Handle{Type: backend.ConfigFile})) + fi, err := os.Stat(l.Filename(backend.Handle{Type: backend.ConfigFile})) m := util.DeriveModesFromFileInfo(fi, err) debug.Log("using (%03O file, %03O dir) permissions", m.File, m.Dir) @@ -54,30 +52,30 @@ func open(ctx context.Context, cfg Config) (*Local, error) { } // Open opens the local backend as specified by config. -func Open(ctx context.Context, cfg Config) (*Local, error) { - debug.Log("open local backend at %v (layout %q)", cfg.Path, cfg.Layout) - return open(ctx, cfg) +func Open(_ context.Context, cfg Config) (*Local, error) { + debug.Log("open local backend at %v", cfg.Path) + return open(cfg) } // Create creates all the necessary files and directories for a new local // backend at dir. Afterwards a new config blob should be created. -func Create(ctx context.Context, cfg Config) (*Local, error) { - debug.Log("create local backend at %v (layout %q)", cfg.Path, cfg.Layout) +func Create(_ context.Context, cfg Config) (*Local, error) { + debug.Log("create local backend at %v", cfg.Path) - be, err := open(ctx, cfg) + be, err := open(cfg) if err != nil { return nil, err } // test if config file already exists - _, err = fs.Lstat(be.Filename(backend.Handle{Type: backend.ConfigFile})) + _, err = os.Lstat(be.Filename(backend.Handle{Type: backend.ConfigFile})) if err == nil { return nil, errors.New("config file already exists") } // create paths for data and refs for _, d := range be.Paths() { - err := fs.MkdirAll(d, be.Modes.Dir) + err := os.MkdirAll(d, be.Modes.Dir) if err != nil { return nil, errors.WithStack(err) } @@ -90,11 +88,6 @@ func (b *Local) Connections() uint { return b.Config.Connections } -// Location returns this backend's location (the directory name). -func (b *Local) Location() string { - return b.Path -} - // Hasher may return a hash function for calculating a content hash for the backend func (b *Local) Hasher() hash.Hash { return nil @@ -110,6 +103,10 @@ func (b *Local) IsNotExist(err error) bool { return errors.Is(err, os.ErrNotExist) } +func (b *Local) IsPermanentError(err error) bool { + return b.IsNotExist(err) || errors.Is(err, errTooShort) || errors.Is(err, os.ErrPermission) +} + // Save stores data in the backend at the handle. func (b *Local) Save(_ context.Context, h backend.Handle, rd backend.RewindReader) (err error) { finalname := b.Filename(h) @@ -130,7 +127,7 @@ func (b *Local) Save(_ context.Context, h backend.Handle, rd backend.RewindReade debug.Log("error %v: creating dir", err) // error is caused by a missing directory, try to create it - mkdirErr := fs.MkdirAll(dir, b.Modes.Dir) + mkdirErr := os.MkdirAll(dir, b.Modes.Dir) if mkdirErr != nil { debug.Log("error creating dir %v: %v", dir, mkdirErr) } else { @@ -150,7 +147,7 @@ func (b *Local) Save(_ context.Context, h backend.Handle, rd backend.RewindReade // temporary's name and no other goroutine will get the same data to // Save, so the temporary name should never be reused by another // goroutine. - _ = fs.Remove(f.Name()) + _ = os.Remove(f.Name()) } }(f) @@ -214,11 +211,23 @@ func (b *Local) Load(ctx context.Context, h backend.Handle, length int, offset i } func (b *Local) openReader(_ context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { - f, err := fs.Open(b.Filename(h)) + f, err := os.Open(b.Filename(h)) if err != nil { return nil, err } + fi, err := f.Stat() + if err != nil { + _ = f.Close() + return nil, err + } + + size := fi.Size() + if size < offset+int64(length) { + _ = f.Close() + return nil, errTooShort + } + if offset > 0 { _, err = f.Seek(offset, 0) if err != nil { @@ -228,7 +237,7 @@ func (b *Local) openReader(_ context.Context, h backend.Handle, length int, offs } if length > 0 { - return backend.LimitReadCloser(f, int64(length)), nil + return util.LimitReadCloser(f, int64(length)), nil } return f, nil @@ -236,7 +245,7 @@ func (b *Local) openReader(_ context.Context, h backend.Handle, length int, offs // Stat returns information about a blob. func (b *Local) Stat(_ context.Context, h backend.Handle) (backend.FileInfo, error) { - fi, err := fs.Stat(b.Filename(h)) + fi, err := os.Stat(b.Filename(h)) if err != nil { return backend.FileInfo{}, errors.WithStack(err) } @@ -249,12 +258,12 @@ func (b *Local) Remove(_ context.Context, h backend.Handle) error { fn := b.Filename(h) // reset read-only flag - err := fs.Chmod(fn, 0666) + err := os.Chmod(fn, 0666) if err != nil && !os.IsPermission(err) { return errors.WithStack(err) } - return fs.Remove(fn) + return os.Remove(fn) } // List runs fn for each file in the backend which has the type t. When an @@ -280,7 +289,7 @@ func (b *Local) List(ctx context.Context, t backend.FileType, fn func(backend.Fi // Also, visitDirs assumes it sees a directory full of directories, while // visitFiles wants a directory full or regular files. func visitDirs(ctx context.Context, dir string, fn func(backend.FileInfo) error) error { - d, err := fs.Open(dir) + d, err := os.Open(dir) if err != nil { return err } @@ -307,7 +316,7 @@ func visitDirs(ctx context.Context, dir string, fn func(backend.FileInfo) error) } func visitFiles(ctx context.Context, dir string, fn func(backend.FileInfo) error, ignoreNotADirectory bool) error { - d, err := fs.Open(dir) + d, err := os.Open(dir) if err != nil { return err } @@ -353,7 +362,7 @@ func visitFiles(ctx context.Context, dir string, fn func(backend.FileInfo) error // Delete removes the repository and all files. func (b *Local) Delete(_ context.Context) error { - return fs.RemoveAll(b.Path) + return os.RemoveAll(b.Path) } // Close closes all open files. diff --git a/internal/backend/local/local_unix.go b/internal/backend/local/local_unix.go index e3256ed7a6b..e525874564d 100644 --- a/internal/backend/local/local_unix.go +++ b/internal/backend/local/local_unix.go @@ -8,8 +8,6 @@ import ( "os" "runtime" "syscall" - - "github.com/restic/restic/internal/fs" ) // fsyncDir flushes changes to the directory dir. @@ -45,5 +43,5 @@ func isMacENOTTY(err error) bool { // set file to readonly func setFileReadonly(f string, mode os.FileMode) error { - return fs.Chmod(f, mode&^0222) + return os.Chmod(f, mode&^0222) } diff --git a/internal/backend/mem/mem_backend.go b/internal/backend/mem/mem_backend.go index 2698a82755a..981c0a18250 100644 --- a/internal/backend/mem/mem_backend.go +++ b/internal/backend/mem/mem_backend.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/base64" + "fmt" "hash" "io" "net/http" @@ -28,7 +29,7 @@ func NewFactory() location.Factory { return location.NewHTTPBackendFactory[struct{}, *MemoryBackend]( "mem", - func(s string) (*struct{}, error) { + func(_ string) (*struct{}, error) { return &struct{}{}, nil }, location.NoPassword, @@ -41,7 +42,8 @@ func NewFactory() location.Factory { ) } -var errNotFound = errors.New("not found") +var errNotFound = fmt.Errorf("not found") +var errTooSmall = errors.New("access beyond end of file") const connectionCount = 2 @@ -68,6 +70,10 @@ func (be *MemoryBackend) IsNotExist(err error) bool { return errors.Is(err, errNotFound) } +func (be *MemoryBackend) IsPermanentError(err error) bool { + return be.IsNotExist(err) || errors.Is(err, errTooSmall) +} + // Save adds new Data to the backend. func (be *MemoryBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { be.m.Lock() @@ -130,12 +136,12 @@ func (be *MemoryBackend) openReader(ctx context.Context, h backend.Handle, lengt } buf := be.data[h] - if offset > int64(len(buf)) { - return nil, errors.New("offset beyond end of file") + if offset+int64(length) > int64(len(buf)) { + return nil, errTooSmall } buf = buf[offset:] - if length > 0 && len(buf) > length { + if length > 0 { buf = buf[:length] } @@ -216,11 +222,6 @@ func (be *MemoryBackend) Connections() uint { return connectionCount } -// Location returns the location of the backend (RAM). -func (be *MemoryBackend) Location() string { - return "RAM" -} - // Hasher may return a hash function for calculating a content hash for the backend func (be *MemoryBackend) Hasher() hash.Hash { return xxhash.New() diff --git a/internal/backend/mock/backend.go b/internal/backend/mock/backend.go index 57b1ede1993..a0319844333 100644 --- a/internal/backend/mock/backend.go +++ b/internal/backend/mock/backend.go @@ -13,6 +13,7 @@ import ( type Backend struct { CloseFn func() error IsNotExistFn func(err error) bool + IsPermanentErrorFn func(err error) bool SaveFn func(ctx context.Context, h backend.Handle, rd backend.RewindReader) error OpenReaderFn func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) StatFn func(ctx context.Context, h backend.Handle) (backend.FileInfo, error) @@ -20,7 +21,6 @@ type Backend struct { RemoveFn func(ctx context.Context, h backend.Handle) error DeleteFn func(ctx context.Context) error ConnectionsFn func() uint - LocationFn func() string HasherFn func() hash.Hash HasAtomicReplaceFn func() bool } @@ -48,15 +48,6 @@ func (m *Backend) Connections() uint { return m.ConnectionsFn() } -// Location returns a location string. -func (m *Backend) Location() string { - if m.LocationFn == nil { - return "" - } - - return m.LocationFn() -} - // Hasher may return a hash function for calculating a content hash for the backend func (m *Backend) Hasher() hash.Hash { if m.HasherFn == nil { @@ -83,6 +74,14 @@ func (m *Backend) IsNotExist(err error) bool { return m.IsNotExistFn(err) } +func (m *Backend) IsPermanentError(err error) bool { + if m.IsPermanentErrorFn == nil { + return false + } + + return m.IsPermanentErrorFn(err) +} + // Save data in the backend. func (m *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { if m.SaveFn == nil { diff --git a/internal/backend/rclone/backend.go b/internal/backend/rclone/backend.go index 416162364d6..8294aa8c419 100644 --- a/internal/backend/rclone/backend.go +++ b/internal/backend/rclone/backend.go @@ -94,7 +94,7 @@ func run(command string, args ...string) (*StdioConn, *sync.WaitGroup, chan stru err = errW } if err != nil { - if util.IsErrDot(err) { + if errors.Is(err, exec.ErrDot) { return nil, nil, nil, nil, errors.Errorf("cannot implicitly run relative executable %v found in current directory, use -o rclone.program=./ to override", cmd.Path) } return nil, nil, nil, nil, err @@ -183,7 +183,7 @@ func newBackend(ctx context.Context, cfg Config, lim limiter.Limiter) (*Backend, dialCount := 0 tr := &http2.Transport{ AllowHTTP: true, // this is not really HTTP, just stdin/stdout - DialTLS: func(network, address string, cfg *tls.Config) (net.Conn, error) { + DialTLS: func(network, address string, _ *tls.Config) (net.Conn, error) { debug.Log("new connection requested, %v %v", network, address) if dialCount > 0 { // the connection to the child process is already closed diff --git a/internal/backend/rest/config_test.go b/internal/backend/rest/config_test.go index 23ea9095b11..13a1ebb13c0 100644 --- a/internal/backend/rest/config_test.go +++ b/internal/backend/rest/config_test.go @@ -31,6 +31,13 @@ var configTests = []test.ConfigTestData[Config]{ Connections: 5, }, }, + { + S: "rest:http+unix:///tmp/rest.socket:/my_backup_repo/", + Cfg: Config{ + URL: parseURL("http+unix:///tmp/rest.socket:/my_backup_repo/"), + Connections: 5, + }, + }, } func TestParseConfig(t *testing.T) { diff --git a/internal/backend/rest/rest.go b/internal/backend/rest/rest.go index d8171d90e87..7bdedff397f 100644 --- a/internal/backend/rest/rest.go +++ b/internal/backend/rest/rest.go @@ -8,7 +8,6 @@ import ( "io" "net/http" "net/url" - "path" "strings" "github.com/restic/restic/internal/backend" @@ -17,6 +16,7 @@ import ( "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" ) // make sure the rest backend implements backend.Backend @@ -30,6 +30,20 @@ type Backend struct { layout.Layout } +// restError is returned whenever the server returns a non-successful HTTP status. +type restError struct { + backend.Handle + StatusCode int + Status string +} + +func (e *restError) Error() string { + if e.StatusCode == http.StatusNotFound && e.Handle.Type.String() != "invalid" { + return fmt.Sprintf("%v does not exist", e.Handle) + } + return fmt.Sprintf("unexpected HTTP response (%v): %v", e.StatusCode, e.Status) +} + func NewFactory() location.Factory { return location.NewHTTPBackendFactory("rest", ParseConfig, StripPassword, Create, Open) } @@ -51,7 +65,7 @@ func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) be := &Backend{ url: cfg.URL, client: http.Client{Transport: rt}, - Layout: &layout.RESTLayout{URL: url, Join: path.Join}, + Layout: layout.NewRESTLayout(url), connections: cfg.Connections, } @@ -96,7 +110,7 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, er } if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode) + return nil, &restError{backend.Handle{}, resp.StatusCode, resp.Status} } return be, nil @@ -106,11 +120,6 @@ func (b *Backend) Connections() uint { return b.connections } -// Location returns this backend's location (the server's URL). -func (b *Backend) Location() string { - return b.url.String() -} - // Hasher may return a hash function for calculating a content hash for the backend func (b *Backend) Hasher() hash.Hash { return nil @@ -133,6 +142,12 @@ func (b *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindR if err != nil { return errors.WithStack(err) } + req.GetBody = func() (io.ReadCloser, error) { + if err := rd.Rewind(); err != nil { + return nil, err + } + return io.NopCloser(rd), nil + } req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Accept", ContentTypeV2) @@ -150,26 +165,31 @@ func (b *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindR } if resp.StatusCode != http.StatusOK { - return errors.Errorf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode) + return &restError{h, resp.StatusCode, resp.Status} } return nil } -// notExistError is returned whenever the requested file does not exist on the -// server. -type notExistError struct { - backend.Handle +// IsNotExist returns true if the error was caused by a non-existing file. +func (b *Backend) IsNotExist(err error) bool { + var e *restError + return errors.As(err, &e) && e.StatusCode == http.StatusNotFound } -func (e *notExistError) Error() string { - return fmt.Sprintf("%v does not exist", e.Handle) -} +func (b *Backend) IsPermanentError(err error) bool { + if b.IsNotExist(err) { + return true + } -// IsNotExist returns true if the error was caused by a non-existing file. -func (b *Backend) IsNotExist(err error) bool { - var e *notExistError - return errors.As(err, &e) + var rerr *restError + if errors.As(err, &rerr) { + if rerr.StatusCode == http.StatusRequestedRangeNotSatisfiable || rerr.StatusCode == http.StatusUnauthorized || rerr.StatusCode == http.StatusForbidden { + return true + } + } + + return false } // Load runs fn with a reader that yields the contents of the file at h at the @@ -221,14 +241,13 @@ func (b *Backend) openReader(ctx context.Context, h backend.Handle, length int, return nil, errors.Wrap(err, "client.Do") } - if resp.StatusCode == http.StatusNotFound { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { _ = drainAndClose(resp) - return nil, ¬ExistError{h} + return nil, &restError{h, resp.StatusCode, resp.Status} } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - _ = drainAndClose(resp) - return nil, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status) + if feature.Flag.Enabled(feature.BackendErrorRedesign) && length > 0 && resp.ContentLength != int64(length) { + return nil, &restError{h, http.StatusRequestedRangeNotSatisfiable, "partial out of bounds read"} } return resp.Body, nil @@ -251,12 +270,8 @@ func (b *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, return backend.FileInfo{}, err } - if resp.StatusCode == http.StatusNotFound { - return backend.FileInfo{}, ¬ExistError{h} - } - if resp.StatusCode != http.StatusOK { - return backend.FileInfo{}, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status) + return backend.FileInfo{}, &restError{h, resp.StatusCode, resp.Status} } if resp.ContentLength < 0 { @@ -288,12 +303,8 @@ func (b *Backend) Remove(ctx context.Context, h backend.Handle) error { return err } - if resp.StatusCode == http.StatusNotFound { - return ¬ExistError{h} - } - if resp.StatusCode != http.StatusOK { - return errors.Errorf("blob not removed, server response: %v (%v)", resp.Status, resp.StatusCode) + return &restError{h, resp.StatusCode, resp.Status} } return nil @@ -330,7 +341,7 @@ func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(backend. if resp.StatusCode != http.StatusOK { _ = drainAndClose(resp) - return errors.Errorf("List failed, server response: %v (%v)", resp.Status, resp.StatusCode) + return &restError{backend.Handle{Type: t}, resp.StatusCode, resp.Status} } if resp.Header.Get("Content-Type") == ContentTypeV2 { diff --git a/internal/backend/rest/rest_test.go b/internal/backend/rest/rest_test.go index 6a5b4f8a58f..50560f66d49 100644 --- a/internal/backend/rest/rest_test.go +++ b/internal/backend/rest/rest_test.go @@ -1,11 +1,15 @@ package rest_test import ( + "bufio" "context" - "net" + "fmt" "net/url" "os" "os/exec" + "regexp" + "strings" + "syscall" "testing" "time" @@ -14,54 +18,133 @@ import ( rtest "github.com/restic/restic/internal/test" ) -func runRESTServer(ctx context.Context, t testing.TB, dir string) (*url.URL, func()) { +var ( + serverStartedRE = regexp.MustCompile("^start server on (.*)$") +) + +func runRESTServer(ctx context.Context, t testing.TB, dir, reqListenAddr string) (*url.URL, func()) { srv, err := exec.LookPath("rest-server") if err != nil { t.Skip(err) } - cmd := exec.CommandContext(ctx, srv, "--no-auth", "--path", dir) + // create our own context, so that our cleanup can cancel and wait for completion + // this will ensure any open ports, open unix sockets etc are properly closed + processCtx, cancel := context.WithCancel(ctx) + cmd := exec.CommandContext(processCtx, srv, "--no-auth", "--path", dir, "--listen", reqListenAddr) + + // this cancel func is called by when the process context is done + cmd.Cancel = func() error { + // we execute in a Go-routine as we know the caller will + // be waiting on a .Wait() regardless + go func() { + // try to send a graceful termination signal + if cmd.Process.Signal(syscall.SIGTERM) == nil { + // if we succeed, then wait a few seconds + time.Sleep(2 * time.Second) + } + // and then make sure it's killed either way, ignoring any error code + _ = cmd.Process.Kill() + }() + return nil + } + + // this is the cleanup function that we return the caller, + // which will cancel our process context, and then wait for it to finish + cleanup := func() { + cancel() + _ = cmd.Wait() + } + + // but in-case we don't finish this method, e.g. by calling t.Fatal() + // we also defer a call to clean it up ourselves, guarded by a flag to + // indicate that we returned the function to the caller to deal with. + callerWillCleanUp := false + defer func() { + if !callerWillCleanUp { + cleanup() + } + }() + + // send stdout to our std out cmd.Stdout = os.Stdout - cmd.Stderr = os.Stdout - if err := cmd.Start(); err != nil { + + // capture stderr with a pipe, as we want to examine this output + // to determine when the server is started and listening. + cmdErr, err := cmd.StderrPipe() + if err != nil { t.Fatal(err) } - // wait until the TCP port is reachable - var success bool - for i := 0; i < 10; i++ { - time.Sleep(200 * time.Millisecond) + // start the rest-server + if err := cmd.Start(); err != nil { + t.Fatal(err) + } - c, err := net.Dial("tcp", "localhost:8000") - if err != nil { - continue + // create a channel to receive the actual listen address on + listenAddrCh := make(chan string) + go func() { + defer close(listenAddrCh) + matched := false + br := bufio.NewReader(cmdErr) + for { + line, err := br.ReadString('\n') + if err != nil { + // we ignore errors, as code that relies on this + // will happily fail via timeout and empty closed + // channel. + return + } + + line = strings.Trim(line, "\r\n") + if !matched { + // look for the server started message, and return the address + // that it's listening on + matchedServerListen := serverStartedRE.FindSubmatch([]byte(line)) + if len(matchedServerListen) == 2 { + listenAddrCh <- string(matchedServerListen[1]) + matched = true + } + } + _, _ = fmt.Fprintln(os.Stdout, line) // print all output to console } + }() - success = true - if err := c.Close(); err != nil { - t.Fatal(err) + // wait for us to get an address, + // or the parent context to cancel, + // or for us to timeout + var actualListenAddr string + select { + case <-processCtx.Done(): + t.Fatal(context.Canceled) + case <-time.NewTimer(2 * time.Second).C: + t.Fatal(context.DeadlineExceeded) + case a, ok := <-listenAddrCh: + if !ok { + t.Fatal(context.Canceled) } + actualListenAddr = a } - if !success { - t.Fatal("unable to connect to rest server") - return nil, nil + // this translate the address that the server is listening on + // to a URL suitable for us to connect to + var addrToConnectTo string + if strings.HasPrefix(reqListenAddr, "unix:") { + addrToConnectTo = fmt.Sprintf("http+unix://%s:/restic-test/", actualListenAddr) + } else { + // while we may listen on 0.0.0.0, we connect to localhost + addrToConnectTo = fmt.Sprintf("http://%s/restic-test/", strings.Replace(actualListenAddr, "0.0.0.0", "localhost", 1)) } - url, err := url.Parse("http://localhost:8000/restic-test/") + // parse to a URL + url, err := url.Parse(addrToConnectTo) if err != nil { t.Fatal(err) } - cleanup := func() { - if err := cmd.Process.Kill(); err != nil { - t.Fatal(err) - } - - // ignore errors, we've killed the process - _ = cmd.Wait() - } - + // indicate that we've completed successfully, and that the caller + // is responsible for calling cleanup + callerWillCleanUp = true return url, cleanup } @@ -91,7 +174,7 @@ func TestBackendREST(t *testing.T) { defer cancel() dir := rtest.TempDir(t) - serverURL, cleanup := runRESTServer(ctx, t, dir) + serverURL, cleanup := runRESTServer(ctx, t, dir, ":0") defer cleanup() newTestSuite(serverURL, false).RunTests(t) @@ -116,7 +199,7 @@ func BenchmarkBackendREST(t *testing.B) { defer cancel() dir := rtest.TempDir(t) - serverURL, cleanup := runRESTServer(ctx, t, dir) + serverURL, cleanup := runRESTServer(ctx, t, dir, ":0") defer cleanup() newTestSuite(serverURL, false).RunBenchmarks(t) diff --git a/internal/backend/rest/rest_unix_test.go b/internal/backend/rest/rest_unix_test.go new file mode 100644 index 00000000000..c4f08df0ef4 --- /dev/null +++ b/internal/backend/rest/rest_unix_test.go @@ -0,0 +1,30 @@ +//go:build !windows +// +build !windows + +package rest_test + +import ( + "context" + "fmt" + "path" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func TestBackendRESTWithUnixSocket(t *testing.T) { + defer func() { + if t.Skipped() { + rtest.SkipDisallowed(t, "restic/backend/rest.TestBackendREST") + } + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dir := rtest.TempDir(t) + serverURL, cleanup := runRESTServer(ctx, t, path.Join(dir, "data"), fmt.Sprintf("unix:%s", path.Join(dir, "sock"))) + defer cleanup() + + newTestSuite(serverURL, false).RunTests(t) +} diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index c63338fb664..de8a520ec9e 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -2,22 +2,27 @@ package retry import ( "context" + "errors" "fmt" "io" + "sync" "time" "github.com/cenkalti/backoff/v4" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/feature" ) // Backend retries operations on the backend in case of an error with a // backoff. type Backend struct { backend.Backend - MaxTries int - Report func(string, error, time.Duration) - Success func(string, int) + MaxElapsedTime time.Duration + Report func(string, error, time.Duration) + Success func(string, int) + + failedLoads sync.Map } // statically ensure that RetryBackend implements backend.Backend. @@ -27,32 +32,64 @@ var _ backend.Backend = &Backend{} // backoff. report is called with a description and the error, if one occurred. // success is called with the number of retries before a successful operation // (it is not called if it succeeded on the first try) -func New(be backend.Backend, maxTries int, report func(string, error, time.Duration), success func(string, int)) *Backend { +func New(be backend.Backend, maxElapsedTime time.Duration, report func(string, error, time.Duration), success func(string, int)) *Backend { return &Backend{ - Backend: be, - MaxTries: maxTries, - Report: report, - Success: success, + Backend: be, + MaxElapsedTime: maxElapsedTime, + Report: report, + Success: success, } } // retryNotifyErrorWithSuccess is an extension of backoff.RetryNotify with notification of success after an error. // success is NOT notified on the first run of operation (only after an error). -func retryNotifyErrorWithSuccess(operation backoff.Operation, b backoff.BackOff, notify backoff.Notify, success func(retries int)) error { +func retryNotifyErrorWithSuccess(operation backoff.Operation, b backoff.BackOffContext, notify backoff.Notify, success func(retries int)) error { + var operationWrapper backoff.Operation if success == nil { - return backoff.RetryNotify(operation, b, notify) - } - retries := 0 - operationWrapper := func() error { - err := operation() - if err != nil { - retries++ - } else if retries > 0 { - success(retries) + operationWrapper = operation + } else { + retries := 0 + operationWrapper = func() error { + err := operation() + if err != nil { + retries++ + } else if retries > 0 { + success(retries) + } + return err } - return err } - return backoff.RetryNotify(operationWrapper, b, notify) + err := backoff.RetryNotify(operationWrapper, b, notify) + + if err != nil && notify != nil && b.Context().Err() == nil { + // log final error, unless the context was canceled + notify(err, -1) + } + return err +} + +func withRetryAtLeastOnce(delegate *backoff.ExponentialBackOff) *retryAtLeastOnce { + return &retryAtLeastOnce{delegate: delegate} +} + +type retryAtLeastOnce struct { + delegate *backoff.ExponentialBackOff + numTries uint64 +} + +func (b *retryAtLeastOnce) NextBackOff() time.Duration { + delay := b.delegate.NextBackOff() + + b.numTries++ + if b.numTries == 1 && b.delegate.Stop == delay { + return b.delegate.InitialInterval + } + return delay +} + +func (b *retryAtLeastOnce) Reset() { + b.numTries = 0 + b.delegate.Reset() } var fastRetries = false @@ -69,13 +106,38 @@ func (be *Backend) retry(ctx context.Context, msg string, f func() error) error } bo := backoff.NewExponentialBackOff() + bo.MaxElapsedTime = be.MaxElapsedTime + + if feature.Flag.Enabled(feature.BackendErrorRedesign) { + bo.InitialInterval = 1 * time.Second + bo.Multiplier = 2 + } if fastRetries { // speed up integration tests bo.InitialInterval = 1 * time.Millisecond + maxElapsedTime := 200 * time.Millisecond + if bo.MaxElapsedTime > maxElapsedTime { + bo.MaxElapsedTime = maxElapsedTime + } } - err := retryNotifyErrorWithSuccess(f, - backoff.WithContext(backoff.WithMaxRetries(bo, uint64(be.MaxTries)), ctx), + var b backoff.BackOff = withRetryAtLeastOnce(bo) + if !feature.Flag.Enabled(feature.BackendErrorRedesign) { + // deprecated behavior + b = backoff.WithMaxRetries(b, 10) + } + + err := retryNotifyErrorWithSuccess( + func() error { + err := f() + // don't retry permanent errors as those very likely cannot be fixed by retrying + // TODO remove IsNotExist(err) special cases when removing the feature flag + if feature.Flag.Enabled(feature.BackendErrorRedesign) && !errors.Is(err, &backoff.PermanentError{}) && be.Backend.IsPermanentError(err) { + return backoff.Permanent(err) + } + return err + }, + backoff.WithContext(b, ctx), func(err error, d time.Duration) { if be.Report != nil { be.Report(msg, err, d) @@ -121,29 +183,57 @@ func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.Rewind }) } +// Failed loads expire after an hour +var failedLoadExpiry = time.Hour + // Load returns a reader that yields the contents of the file at h at the // given offset. If length is larger than zero, only a portion of the file // is returned. rd must be closed after use. If an error is returned, the // ReadCloser must be nil. func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) (err error) { - return be.retry(ctx, fmt.Sprintf("Load(%v, %v, %v)", h, length, offset), + key := h + key.IsMetadata = false + + // Implement the circuit breaker pattern for files that exhausted all retries due to a non-permanent error + if v, ok := be.failedLoads.Load(key); ok { + if time.Since(v.(time.Time)) > failedLoadExpiry { + be.failedLoads.Delete(key) + } else { + // fail immediately if the file was already problematic during the last hour + return fmt.Errorf("circuit breaker open for file %v", h) + } + } + + err = be.retry(ctx, fmt.Sprintf("Load(%v, %v, %v)", h, length, offset), func() error { - err := be.Backend.Load(ctx, h, length, offset, consumer) - if be.Backend.IsNotExist(err) { - return backoff.Permanent(err) - } - return err + return be.Backend.Load(ctx, h, length, offset, consumer) }) + + if feature.Flag.Enabled(feature.BackendErrorRedesign) && err != nil && ctx.Err() == nil && !be.IsPermanentError(err) { + // We've exhausted the retries, the file is likely inaccessible. By excluding permanent + // errors, not found or truncated files are not recorded. Also ignore errors if the context + // was canceled. + be.failedLoads.LoadOrStore(key, time.Now()) + } + + return err } // Stat returns information about the File identified by h. func (be *Backend) Stat(ctx context.Context, h backend.Handle) (fi backend.FileInfo, err error) { - err = be.retry(ctx, fmt.Sprintf("Stat(%v)", h), + // see the call to `cancel()` below for why this context exists + statCtx, cancel := context.WithCancel(ctx) + defer cancel() + + err = be.retry(statCtx, fmt.Sprintf("Stat(%v)", h), func() error { var innerError error fi, innerError = be.Backend.Stat(ctx, h) if be.Backend.IsNotExist(innerError) { + // stat is only used to check the existence of the config file. + // cancel the context to suppress the final error message if the file is not found. + cancel() // do not retry if file is not found, as stat is usually used to check whether a file exists return backoff.Permanent(innerError) } diff --git a/internal/backend/retry/backend_retry_test.go b/internal/backend/retry/backend_retry_test.go index 405cdfa5923..9259144d483 100644 --- a/internal/backend/retry/backend_retry_test.go +++ b/internal/backend/retry/backend_retry_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "io" + "strings" "testing" "time" @@ -192,8 +193,9 @@ func TestBackendListRetryErrorBackend(t *testing.T) { } TestFastRetries(t) - const maxRetries = 2 - retryBackend := New(be, maxRetries, nil, nil) + const maxElapsedTime = 10 * time.Millisecond + now := time.Now() + retryBackend := New(be, maxElapsedTime, nil, nil) var listed []string err := retryBackend.List(context.TODO(), backend.PackFile, func(fi backend.FileInfo) error { @@ -206,8 +208,9 @@ func TestBackendListRetryErrorBackend(t *testing.T) { t.Fatalf("wrong error returned, want %v, got %v", ErrBackendTest, err) } - if retries != maxRetries+1 { - t.Fatalf("List was called %d times, wanted %v", retries, maxRetries+1) + duration := time.Since(now) + if duration > 100*time.Millisecond { + t.Fatalf("list retries took %v, expected at most 10ms", duration) } test.Equals(t, names[:2], listed) @@ -289,7 +292,7 @@ func TestBackendLoadNotExists(t *testing.T) { } return nil, notFound } - be.IsNotExistFn = func(err error) bool { + be.IsPermanentErrorFn = func(err error) bool { return errors.Is(err, notFound) } @@ -299,10 +302,85 @@ func TestBackendLoadNotExists(t *testing.T) { err := retryBackend.Load(context.TODO(), backend.Handle{}, 0, 0, func(rd io.Reader) (err error) { return nil }) - test.Assert(t, be.IsNotExistFn(err), "unexpected error %v", err) + test.Assert(t, be.IsPermanentErrorFn(err), "unexpected error %v", err) test.Equals(t, 1, attempt) } +func TestBackendLoadCircuitBreaker(t *testing.T) { + // retry should not retry if the error matches IsPermanentError + notFound := errors.New("not found") + otherError := errors.New("something") + attempt := 0 + + be := mock.NewBackend() + be.IsPermanentErrorFn = func(err error) bool { + return errors.Is(err, notFound) + } + be.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { + attempt++ + return nil, otherError + } + nilRd := func(rd io.Reader) (err error) { + return nil + } + + TestFastRetries(t) + retryBackend := New(be, 2, nil, nil) + // trip the circuit breaker for file "other" + err := retryBackend.Load(context.TODO(), backend.Handle{Name: "other"}, 0, 0, nilRd) + test.Equals(t, otherError, err, "unexpected error") + test.Equals(t, 2, attempt) + + attempt = 0 + err = retryBackend.Load(context.TODO(), backend.Handle{Name: "other"}, 0, 0, nilRd) + test.Assert(t, strings.Contains(err.Error(), "circuit breaker open for file"), "expected circuit breaker error, got %v") + test.Equals(t, 0, attempt) + + // don't trip for permanent errors + be.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { + attempt++ + return nil, notFound + } + err = retryBackend.Load(context.TODO(), backend.Handle{Name: "notfound"}, 0, 0, nilRd) + test.Equals(t, notFound, err, "expected circuit breaker to only affect other file, got %v") + err = retryBackend.Load(context.TODO(), backend.Handle{Name: "notfound"}, 0, 0, nilRd) + test.Equals(t, notFound, err, "persistent error must not trigger circuit breaker, got %v") + + // wait for circuit breaker to expire + time.Sleep(5 * time.Millisecond) + old := failedLoadExpiry + defer func() { + failedLoadExpiry = old + }() + failedLoadExpiry = 3 * time.Millisecond + err = retryBackend.Load(context.TODO(), backend.Handle{Name: "other"}, 0, 0, nilRd) + test.Equals(t, notFound, err, "expected circuit breaker to reset, got %v") +} + +func TestBackendLoadCircuitBreakerCancel(t *testing.T) { + cctx, cancel := context.WithCancel(context.Background()) + be := mock.NewBackend() + be.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { + cancel() + return nil, errors.New("something") + } + nilRd := func(rd io.Reader) (err error) { + return nil + } + + TestFastRetries(t) + retryBackend := New(be, 2, nil, nil) + // canceling the context should not trip the circuit breaker + err := retryBackend.Load(cctx, backend.Handle{Name: "other"}, 0, 0, nilRd) + test.Equals(t, context.Canceled, err, "unexpected error") + + // reset context and check that the cirucit breaker does not return an error + cctx, cancel = context.WithCancel(context.Background()) + defer cancel() + err = retryBackend.Load(cctx, backend.Handle{Name: "other"}, 0, 0, nilRd) + test.Equals(t, context.Canceled, err, "unexpected error") +} + func TestBackendStatNotExists(t *testing.T) { // stat should not retry if the error matches IsNotExist notFound := errors.New("not found") @@ -322,13 +400,47 @@ func TestBackendStatNotExists(t *testing.T) { } TestFastRetries(t) - retryBackend := New(be, 10, nil, nil) + retryBackend := New(be, 10, func(s string, err error, d time.Duration) { + t.Fatalf("unexpected error output %v", s) + }, func(s string, i int) { + t.Fatalf("unexpected log output %v", s) + }) _, err := retryBackend.Stat(context.TODO(), backend.Handle{}) test.Assert(t, be.IsNotExistFn(err), "unexpected error %v", err) test.Equals(t, 1, attempt) } +func TestBackendRetryPermanent(t *testing.T) { + // retry should not retry if the error matches IsPermanentError + notFound := errors.New("not found") + attempt := 0 + + be := mock.NewBackend() + be.IsPermanentErrorFn = func(err error) bool { + return errors.Is(err, notFound) + } + + TestFastRetries(t) + retryBackend := New(be, 2, nil, nil) + err := retryBackend.retry(context.TODO(), "test", func() error { + attempt++ + return notFound + }) + + test.Assert(t, be.IsPermanentErrorFn(err), "unexpected error %v", err) + test.Equals(t, 1, attempt) + + attempt = 0 + err = retryBackend.retry(context.TODO(), "test", func() error { + attempt++ + return errors.New("something") + }) + test.Assert(t, !be.IsPermanentErrorFn(err), "error unexpectedly considered permanent %v", err) + test.Equals(t, 2, attempt) + +} + func assertIsCanceled(t *testing.T, err error) { test.Assert(t, err == context.Canceled, "got unexpected err %v", err) } @@ -376,7 +488,7 @@ func TestNotifyWithSuccessIsNotCalled(t *testing.T) { t.Fatal("Success should not have been called") } - err := retryNotifyErrorWithSuccess(operation, &backoff.ZeroBackOff{}, notify, success) + err := retryNotifyErrorWithSuccess(operation, backoff.WithContext(&backoff.ZeroBackOff{}, context.Background()), notify, success) if err != nil { t.Fatal("retry should not have returned an error") } @@ -402,7 +514,7 @@ func TestNotifyWithSuccessIsCalled(t *testing.T) { successCalled++ } - err := retryNotifyErrorWithSuccess(operation, &backoff.ZeroBackOff{}, notify, success) + err := retryNotifyErrorWithSuccess(operation, backoff.WithContext(&backoff.ZeroBackOff{}, context.Background()), notify, success) if err != nil { t.Fatal("retry should not have returned an error") } @@ -415,3 +527,83 @@ func TestNotifyWithSuccessIsCalled(t *testing.T) { t.Fatalf("Success should have been called only once, but was called %d times instead", successCalled) } } + +func TestNotifyWithSuccessFinalError(t *testing.T) { + operation := func() error { + return errors.New("expected error in test") + } + + notifyCalled := 0 + notify := func(error, time.Duration) { + notifyCalled++ + } + + successCalled := 0 + success := func(retries int) { + successCalled++ + } + + err := retryNotifyErrorWithSuccess(operation, backoff.WithContext(backoff.WithMaxRetries(&backoff.ZeroBackOff{}, 5), context.Background()), notify, success) + test.Assert(t, err.Error() == "expected error in test", "wrong error message %v", err) + test.Equals(t, 6, notifyCalled, "notify should have been called 6 times") + test.Equals(t, 0, successCalled, "success should not have been called") +} + +func TestNotifyWithCancelError(t *testing.T) { + operation := func() error { + return errors.New("expected error in test") + } + + notify := func(error, time.Duration) { + t.Error("unexpected call to notify") + } + + success := func(retries int) { + t.Error("unexpected call to success") + } + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + err := retryNotifyErrorWithSuccess(operation, backoff.WithContext(&backoff.ZeroBackOff{}, ctx), notify, success) + test.Assert(t, err == context.Canceled, "wrong error message %v", err) +} + +type testClock struct { + Time time.Time +} + +func (c *testClock) Now() time.Time { + return c.Time +} + +func TestRetryAtLeastOnce(t *testing.T) { + expBackOff := backoff.NewExponentialBackOff() + expBackOff.InitialInterval = 500 * time.Millisecond + expBackOff.RandomizationFactor = 0 + expBackOff.MaxElapsedTime = 5 * time.Second + expBackOff.Multiplier = 2 // guarantee numerical stability + clock := &testClock{Time: time.Now()} + expBackOff.Clock = clock + expBackOff.Reset() + + retry := withRetryAtLeastOnce(expBackOff) + + // expire backoff + clock.Time = clock.Time.Add(10 * time.Second) + delay := retry.NextBackOff() + test.Equals(t, expBackOff.InitialInterval, delay, "must retry at least once") + + delay = retry.NextBackOff() + test.Equals(t, expBackOff.Stop, delay, "must not retry more than once") + + // test reset behavior + retry.Reset() + test.Equals(t, uint64(0), retry.numTries, "numTries should be reset to 0") + + // Verify that after reset, NextBackOff returns the initial interval again + delay = retry.NextBackOff() + test.Equals(t, expBackOff.InitialInterval, delay, "retries must work after reset") + + delay = retry.NextBackOff() + test.Equals(t, expBackOff.InitialInterval*time.Duration(expBackOff.Multiplier), delay, "retries must work after reset") +} diff --git a/internal/backend/s3/config.go b/internal/backend/s3/config.go index b4d44399fea..be2a78ce5cc 100644 --- a/internal/backend/s3/config.go +++ b/internal/backend/s3/config.go @@ -20,14 +20,15 @@ type Config struct { Secret options.SecretString Bucket string Prefix string - Layout string `option:"layout" help:"use this backend layout (default: auto-detect)"` + Layout string `option:"layout" help:"use this backend layout (default: auto-detect) (deprecated)"` StorageClass string `option:"storage-class" help:"set S3 storage class (STANDARD, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or REDUCED_REDUNDANCY)"` - Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` - MaxRetries uint `option:"retries" help:"set the number of retries attempted"` - Region string `option:"region" help:"set region"` - BucketLookup string `option:"bucket-lookup" help:"bucket lookup style: 'auto', 'dns', or 'path'"` - ListObjectsV1 bool `option:"list-objects-v1" help:"use deprecated V1 api for ListObjects calls"` + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` + MaxRetries uint `option:"retries" help:"set the number of retries attempted"` + Region string `option:"region" help:"set region"` + BucketLookup string `option:"bucket-lookup" help:"bucket lookup style: 'auto', 'dns', or 'path'"` + ListObjectsV1 bool `option:"list-objects-v1" help:"use deprecated V1 api for ListObjects calls"` + UnsafeAnonymousAuth bool `option:"unsafe-anonymous-auth" help:"use anonymous authentication"` } // NewConfig returns a new Config with the default values filled in. diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index d41f4479df6..e3d4cc499dc 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -9,7 +9,6 @@ import ( "os" "path" "strings" - "time" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/layout" @@ -17,6 +16,7 @@ import ( "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" @@ -36,9 +36,7 @@ func NewFactory() location.Factory { return location.NewHTTPBackendFactory("s3", ParseConfig, location.NoPassword, Create, Open) } -const defaultLayout = "default" - -func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) { +func open(cfg Config, rt http.RoundTripper) (*Backend, error) { debug.Log("open, config %#v", cfg) if cfg.KeyID == "" && cfg.Secret.String() != "" { @@ -51,7 +49,7 @@ func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, erro minio.MaxRetry = int(cfg.MaxRetries) } - creds, err := getCredentials(cfg) + creds, err := getCredentials(cfg, rt) if err != nil { return nil, errors.Wrap(err, "s3.getCredentials") } @@ -82,21 +80,19 @@ func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, erro be := &Backend{ client: client, cfg: cfg, + Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), } - l, err := layout.ParseLayout(ctx, be, cfg.Layout, defaultLayout, cfg.Prefix) - if err != nil { - return nil, err - } - - be.Layout = l - return be, nil } // getCredentials -- runs through the various credential types and returns the first one that works. // additionally if the user has specified a role to assume, it will do that as well. -func getCredentials(cfg Config) (*credentials.Credentials, error) { +func getCredentials(cfg Config, tr http.RoundTripper) (*credentials.Credentials, error) { + if cfg.UnsafeAnonymousAuth { + return credentials.New(&credentials.Static{}), nil + } + // Chains all credential types, in the following order: // - Static credentials provided by user // - AWS env vars (i.e. AWS_ACCESS_KEY_ID) @@ -119,7 +115,7 @@ func getCredentials(cfg Config) (*credentials.Credentials, error) { &credentials.FileMinioClient{}, &credentials.IAM{ Client: &http.Client{ - Transport: http.DefaultTransport, + Transport: tr, }, }, }) @@ -130,7 +126,10 @@ func getCredentials(cfg Config) (*credentials.Credentials, error) { } if c.SignerType == credentials.SignatureAnonymous { - debug.Log("using anonymous access for %#v", cfg.Endpoint) + // Fail if no credentials were found to prevent repeated attempts to (unsuccessfully) retrieve new credentials. + // The first attempt still has to timeout which slows down restic usage considerably. Thus, migrate towards forcing + // users to explicitly decide between authenticated and anonymous access. + return nil, fmt.Errorf("no credentials found. Use `-o s3.unsafe-anonymous-auth=true` for anonymous authentication") } roleArn := os.Getenv("RESTIC_AWS_ASSUME_ROLE_ARN") @@ -181,14 +180,14 @@ func getCredentials(cfg Config) (*credentials.Credentials, error) { // Open opens the S3 backend at bucket and region. The bucket is created if it // does not exist yet. -func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) { - return open(ctx, cfg, rt) +func Open(_ context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) { + return open(cfg, rt) } // Create opens the S3 backend at bucket and region and creates the bucket if // it does not exist yet. func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) { - be, err := open(ctx, cfg, rt) + be, err := open(cfg, rt) if err != nil { return nil, errors.Wrap(err, "open") } @@ -229,87 +228,25 @@ func (be *Backend) IsNotExist(err error) bool { return errors.As(err, &e) && e.Code == "NoSuchKey" } -// Join combines path components with slashes. -func (be *Backend) Join(p ...string) string { - return path.Join(p...) -} - -type fileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -func (fi *fileInfo) Name() string { return fi.name } // base name of the file -func (fi *fileInfo) Size() int64 { return fi.size } // length in bytes for regular files; system-dependent for others -func (fi *fileInfo) Mode() os.FileMode { return fi.mode } // file mode bits -func (fi *fileInfo) ModTime() time.Time { return fi.modTime } // modification time -func (fi *fileInfo) IsDir() bool { return fi.isDir } // abbreviation for Mode().IsDir() -func (fi *fileInfo) Sys() interface{} { return nil } // underlying data source (can return nil) - -// ReadDir returns the entries for a directory. -func (be *Backend) ReadDir(ctx context.Context, dir string) (list []os.FileInfo, err error) { - debug.Log("ReadDir(%v)", dir) - - // make sure dir ends with a slash - if dir[len(dir)-1] != '/' { - dir += "/" +func (be *Backend) IsPermanentError(err error) bool { + if be.IsNotExist(err) { + return true } - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - debug.Log("using ListObjectsV1(%v)", be.cfg.ListObjectsV1) - - for obj := range be.client.ListObjects(ctx, be.cfg.Bucket, minio.ListObjectsOptions{ - Prefix: dir, - Recursive: false, - UseV1: be.cfg.ListObjectsV1, - }) { - if obj.Err != nil { - return nil, err - } - - if obj.Key == "" { - continue - } - - name := strings.TrimPrefix(obj.Key, dir) - // Sometimes s3 returns an entry for the dir itself. Ignore it. - if name == "" { - continue + var merr minio.ErrorResponse + if errors.As(err, &merr) { + if merr.Code == "InvalidRange" || merr.Code == "AccessDenied" { + return true } - entry := &fileInfo{ - name: name, - size: obj.Size, - modTime: obj.LastModified, - } - - if name[len(name)-1] == '/' { - entry.isDir = true - entry.mode = os.ModeDir | 0755 - entry.name = name[:len(name)-1] - } else { - entry.mode = 0644 - } - - list = append(list, entry) } - return list, nil + return false } func (be *Backend) Connections() uint { return be.cfg.Connections } -// Location returns this backend's location (the bucket name). -func (be *Backend) Location() string { - return be.Join(be.cfg.Bucket, be.cfg.Prefix) -} - // Hasher may return a hash function for calculating a content hash for the backend func (be *Backend) Hasher() hash.Hash { return nil @@ -384,11 +321,18 @@ func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, } coreClient := minio.Core{Client: be.client} - rd, _, _, err := coreClient.GetObject(ctx, be.cfg.Bucket, objName, opts) + rd, info, _, err := coreClient.GetObject(ctx, be.cfg.Bucket, objName, opts) if err != nil { return nil, err } + if feature.Flag.Enabled(feature.BackendErrorRedesign) && length > 0 { + if info.Size > 0 && info.Size != int64(length) { + _ = rd.Close() + return nil, minio.ErrorResponse{Code: "InvalidRange", Message: "restic-file-too-short"} + } + } + return rd, err } @@ -496,40 +440,3 @@ func (be *Backend) Delete(ctx context.Context) error { // Close does nothing func (be *Backend) Close() error { return nil } - -// Rename moves a file based on the new layout l. -func (be *Backend) Rename(ctx context.Context, h backend.Handle, l layout.Layout) error { - debug.Log("Rename %v to %v", h, l) - oldname := be.Filename(h) - newname := l.Filename(h) - - if oldname == newname { - debug.Log(" %v is already renamed", newname) - return nil - } - - debug.Log(" %v -> %v", oldname, newname) - - src := minio.CopySrcOptions{ - Bucket: be.cfg.Bucket, - Object: oldname, - } - - dst := minio.CopyDestOptions{ - Bucket: be.cfg.Bucket, - Object: newname, - } - - _, err := be.client.CopyObject(ctx, dst, src) - if err != nil && be.IsNotExist(err) { - debug.Log("copy failed: %v, seems to already have been renamed", err) - return nil - } - - if err != nil { - debug.Log("copy failed: %v", err) - return err - } - - return be.client.RemoveObject(ctx, be.cfg.Bucket, oldname, minio.RemoveObjectOptions{}) -} diff --git a/internal/backend/sftp/config.go b/internal/backend/sftp/config.go index 65af50d196a..daefbf441d9 100644 --- a/internal/backend/sftp/config.go +++ b/internal/backend/sftp/config.go @@ -13,7 +13,6 @@ import ( type Config struct { User, Host, Port, Path string - Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect)"` Command string `option:"command" help:"specify command to create sftp connection"` Args string `option:"args" help:"specify arguments for ssh"` diff --git a/internal/backend/sftp/layout_test.go b/internal/backend/sftp/layout_test.go index 9cf24a75365..9e143d4fdd2 100644 --- a/internal/backend/sftp/layout_test.go +++ b/internal/backend/sftp/layout_test.go @@ -20,20 +20,14 @@ func TestLayout(t *testing.T) { var tests = []struct { filename string - layout string failureExpected bool packfiles map[string]bool }{ - {"repo-layout-default.tar.gz", "", false, map[string]bool{ + {"repo-layout-default.tar.gz", false, map[string]bool{ "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, "fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false, "c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false, }}, - {"repo-layout-s3legacy.tar.gz", "", false, map[string]bool{ - "fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false, - "c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false, - "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, - }}, } for _, test := range tests { @@ -44,7 +38,6 @@ func TestLayout(t *testing.T) { be, err := sftp.Open(context.TODO(), sftp.Config{ Command: fmt.Sprintf("%q -e", sftpServer), Path: repo, - Layout: test.layout, Connections: 5, }) if err != nil { diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index 0a94e4aa3c9..14819a2df6f 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -20,6 +20,7 @@ import ( "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/cenkalti/backoff/v4" "github.com/pkg/sftp" @@ -43,12 +44,12 @@ type SFTP struct { var _ backend.Backend = &SFTP{} +var errTooShort = fmt.Errorf("file is too short") + func NewFactory() location.Factory { return location.NewLimitedBackendFactory("sftp", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open)) } -const defaultLayout = "default" - func startClient(cfg Config) (*SFTP, error) { program, args, err := buildSSHCommand(cfg) if err != nil { @@ -85,7 +86,7 @@ func startClient(cfg Config) (*SFTP, error) { bg, err := util.StartForeground(cmd) if err != nil { - if util.IsErrDot(err) { + if errors.Is(err, exec.ErrDot) { return nil, errors.Errorf("cannot implicitly run relative executable %v found in current directory, use -o sftp.command=./ to override", cmd.Path) } return nil, err @@ -102,7 +103,12 @@ func startClient(cfg Config) (*SFTP, error) { }() // open the SFTP session - client, err := sftp.NewClientPipe(rd, wr) + client, err := sftp.NewClientPipe(rd, wr, + // write multiple packets (32kb) in parallel per file + // not strictly necessary as we use ReadFromWithConcurrency + sftp.UseConcurrentWrites(true), + // increase send buffer per file to 4MB + sftp.MaxConcurrentRequestsPerFile(128)) if err != nil { return nil, errors.Errorf("unable to start the sftp session, error: %v", err) } @@ -113,7 +119,13 @@ func startClient(cfg Config) (*SFTP, error) { } _, posixRename := client.HasExtension("posix-rename@openssh.com") - return &SFTP{c: client, cmd: cmd, result: ch, posixRename: posixRename}, nil + return &SFTP{ + c: client, + cmd: cmd, + result: ch, + posixRename: posixRename, + Layout: layout.NewDefaultLayout(cfg.Path, path.Join), + }, nil } // clientError returns an error if the client has exited. Otherwise, nil is @@ -131,7 +143,7 @@ func (r *SFTP) clientError() error { // Open opens an sftp backend as described by the config by running // "ssh" with the appropriate arguments (or cfg.Command, if set). -func Open(ctx context.Context, cfg Config) (*SFTP, error) { +func Open(_ context.Context, cfg Config) (*SFTP, error) { debug.Log("open backend with config %#v", cfg) sftp, err := startClient(cfg) @@ -140,18 +152,10 @@ func Open(ctx context.Context, cfg Config) (*SFTP, error) { return nil, err } - return open(ctx, sftp, cfg) + return open(sftp, cfg) } -func open(ctx context.Context, sftp *SFTP, cfg Config) (*SFTP, error) { - var err error - sftp.Layout, err = layout.ParseLayout(ctx, sftp, cfg.Layout, defaultLayout, cfg.Path) - if err != nil { - return nil, err - } - - debug.Log("layout: %v\n", sftp.Layout) - +func open(sftp *SFTP, cfg Config) (*SFTP, error) { fi, err := sftp.c.Stat(sftp.Layout.Filename(backend.Handle{Type: backend.ConfigFile})) m := util.DeriveModesFromFileInfo(fi, err) debug.Log("using (%03O file, %03O dir) permissions", m.File, m.Dir) @@ -187,26 +191,15 @@ func (r *SFTP) mkdirAllDataSubdirs(ctx context.Context, nconn uint) error { return g.Wait() } -// Join combines path components with slashes (according to the sftp spec). -func (r *SFTP) Join(p ...string) string { - return path.Join(p...) -} - -// ReadDir returns the entries for a directory. -func (r *SFTP) ReadDir(_ context.Context, dir string) ([]os.FileInfo, error) { - fi, err := r.c.ReadDir(dir) - - // sftp client does not specify dir name on error, so add it here - err = errors.Wrapf(err, "(%v)", dir) - - return fi, err -} - // IsNotExist returns true if the error is caused by a not existing file. func (r *SFTP) IsNotExist(err error) bool { return errors.Is(err, os.ErrNotExist) } +func (r *SFTP) IsPermanentError(err error) bool { + return r.IsNotExist(err) || errors.Is(err, errTooShort) || errors.Is(err, os.ErrPermission) +} + func buildSSHCommand(cfg Config) (cmd string, args []string, err error) { if cfg.Command != "" { args, err := backend.SplitShellStrings(cfg.Command) @@ -254,11 +247,6 @@ func Create(ctx context.Context, cfg Config) (*SFTP, error) { return nil, err } - sftp.Layout, err = layout.ParseLayout(ctx, sftp, cfg.Layout, defaultLayout, cfg.Path) - if err != nil { - return nil, err - } - sftp.Modes = util.DefaultModes // test if config file already exists @@ -273,18 +261,13 @@ func Create(ctx context.Context, cfg Config) (*SFTP, error) { } // repurpose existing connection - return open(ctx, sftp, cfg) + return open(sftp, cfg) } func (r *SFTP) Connections() uint { return r.Config.Connections } -// Location returns this backend's location (the directory name). -func (r *SFTP) Location() string { - return r.p -} - // Hasher may return a hash function for calculating a content hash for the backend func (r *SFTP) Hasher() hash.Hash { return nil @@ -295,12 +278,6 @@ func (r *SFTP) HasAtomicReplace() bool { return r.posixRename } -// Join joins the given paths and cleans them afterwards. This always uses -// forward slashes, which is required by sftp. -func Join(parts ...string) string { - return path.Clean(path.Join(parts...)) -} - // tempSuffix generates a random string suffix that should be sufficiently long // to avoid accidental conflicts func tempSuffix() string { @@ -359,7 +336,7 @@ func (r *SFTP) Save(_ context.Context, h backend.Handle, rd backend.RewindReader }() // save data, make sure to use the optimized sftp upload method - wbytes, err := f.ReadFrom(rd) + wbytes, err := f.ReadFromWithConcurrency(rd, 0) if err != nil { _ = f.Close() err = r.checkNoSpace(dirname, rd.Length(), err) @@ -414,7 +391,28 @@ func (r *SFTP) checkNoSpace(dir string, size int64, origErr error) error { // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (r *SFTP) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { - return util.DefaultLoad(ctx, h, length, offset, r.openReader, fn) + if err := r.clientError(); err != nil { + return err + } + + return util.DefaultLoad(ctx, h, length, offset, r.openReader, func(rd io.Reader) error { + if length == 0 || !feature.Flag.Enabled(feature.BackendErrorRedesign) { + return fn(rd) + } + + // there is no direct way to efficiently check whether the file is too short + // rd is already a LimitedReader which can be used to track the number of bytes read + err := fn(rd) + + // check the underlying reader to be agnostic to however fn() handles the returned error + _, rderr := rd.Read([]byte{0}) + if rderr == io.EOF && rd.(*util.LimitedReadCloser).N != 0 { + // file is too short + return fmt.Errorf("%w: %v", errTooShort, err) + } + + return err + }) } func (r *SFTP) openReader(_ context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { @@ -434,7 +432,7 @@ func (r *SFTP) openReader(_ context.Context, h backend.Handle, length int, offse if length > 0 { // unlimited reads usually use io.Copy which needs WriteTo support at the underlying reader // limited reads are usually combined with io.ReadFull which reads all required bytes into a buffer in one go - return backend.LimitReadCloser(f, int64(length)), nil + return util.LimitReadCloser(f, int64(length)), nil } return f, nil @@ -466,6 +464,10 @@ func (r *SFTP) Remove(_ context.Context, h backend.Handle) error { // List runs fn for each file in the backend which has the type t. When an // error occurs (or fn returns an error), List stops and returns it. func (r *SFTP) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error { + if err := r.clientError(); err != nil { + return err + } + basedir, subdirs := r.Basedir(t) walker := r.c.Walk(basedir) for { @@ -548,13 +550,17 @@ func (r *SFTP) Close() error { } func (r *SFTP) deleteRecursive(ctx context.Context, name string) error { - entries, err := r.ReadDir(ctx, name) + entries, err := r.c.ReadDir(name) if err != nil { - return errors.Wrap(err, "ReadDir") + return errors.Wrapf(err, "ReadDir(%v)", name) } for _, fi := range entries { - itemName := r.Join(name, fi.Name()) + if ctx.Err() != nil { + return ctx.Err() + } + + itemName := path.Join(name, fi.Name()) if fi.IsDir() { err := r.deleteRecursive(ctx, itemName) if err != nil { diff --git a/internal/backend/swift/swift.go b/internal/backend/swift/swift.go index 6943f018079..dfa2055cdf1 100644 --- a/internal/backend/swift/swift.go +++ b/internal/backend/swift/swift.go @@ -19,6 +19,7 @@ import ( "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/ncw/swift/v2" ) @@ -71,10 +72,7 @@ func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backen connections: cfg.Connections, container: cfg.Container, prefix: cfg.Prefix, - Layout: &layout.DefaultLayout{ - Path: cfg.Prefix, - Join: path.Join, - }, + Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), } // Authenticate if needed @@ -117,11 +115,6 @@ func (be *beSwift) Connections() uint { return be.connections } -// Location returns this backend's location (the container name). -func (be *beSwift) Location() string { - return be.container -} - // Hasher may return a hash function for calculating a content hash for the backend func (be *beSwift) Hasher() hash.Hash { return md5.New() @@ -153,7 +146,18 @@ func (be *beSwift) openReader(ctx context.Context, h backend.Handle, length int, obj, _, err := be.conn.ObjectOpen(ctx, be.container, objName, false, headers) if err != nil { - return nil, errors.Wrap(err, "conn.ObjectOpen") + return nil, fmt.Errorf("conn.ObjectOpen: %w", err) + } + + if feature.Flag.Enabled(feature.BackendErrorRedesign) && length > 0 { + // get response length, but don't cause backend calls + cctx, cancel := context.WithCancel(context.Background()) + cancel() + objLength, e := obj.Length(cctx) + if e == nil && objLength != int64(length) { + _ = obj.Close() + return nil, &swift.Error{StatusCode: http.StatusRequestedRangeNotSatisfiable, Text: "restic-file-too-short"} + } } return obj, nil @@ -242,6 +246,21 @@ func (be *beSwift) IsNotExist(err error) bool { return errors.As(err, &e) && e.StatusCode == http.StatusNotFound } +func (be *beSwift) IsPermanentError(err error) bool { + if be.IsNotExist(err) { + return true + } + + var serr *swift.Error + if errors.As(err, &serr) { + if serr.StatusCode == http.StatusRequestedRangeNotSatisfiable || serr.StatusCode == http.StatusUnauthorized || serr.StatusCode == http.StatusForbidden { + return true + } + } + + return false +} + // Delete removes all restic objects in the container. // It will not remove the container itself. func (be *beSwift) Delete(ctx context.Context) error { diff --git a/internal/backend/test/tests.go b/internal/backend/test/tests.go index 414bf1c3bef..add2f531afc 100644 --- a/internal/backend/test/tests.go +++ b/internal/backend/test/tests.go @@ -3,6 +3,7 @@ package test import ( "bytes" "context" + "crypto/sha256" "fmt" "io" "math/rand" @@ -12,7 +13,6 @@ import ( "testing" "time" - "github.com/minio/sha256-simd" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" @@ -21,10 +21,11 @@ import ( "github.com/restic/restic/internal/backend" ) -func seedRand(t testing.TB) { +func seedRand(t testing.TB) *rand.Rand { seed := time.Now().UnixNano() - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) t.Logf("rand initialized with seed %d", seed) + return random } func beTest(ctx context.Context, be backend.Backend, h backend.Handle) (bool, error) { @@ -36,6 +37,19 @@ func beTest(ctx context.Context, be backend.Backend, h backend.Handle) (bool, er return err == nil, err } +func LoadAll(ctx context.Context, be backend.Backend, h backend.Handle) ([]byte, error) { + var buf []byte + err := be.Load(ctx, h, 0, 0, func(rd io.Reader) error { + var err error + buf, err = io.ReadAll(rd) + return err + }) + if err != nil { + return nil, err + } + return buf, nil +} + // TestStripPasswordCall tests that the StripPassword method of a factory can be called without crashing. // It does not verify whether passwords are removed correctly func (s *Suite[C]) TestStripPasswordCall(_ *testing.T) { @@ -75,17 +89,6 @@ func (s *Suite[C]) TestCreateWithConfig(t *testing.T) { } } -// TestLocation tests that a location string is returned. -func (s *Suite[C]) TestLocation(t *testing.T) { - b := s.open(t) - defer s.close(t, b) - - l := b.Location() - if l == "" { - t.Fatalf("invalid location string %q", l) - } -} - // TestConfig saves and loads a config from the backend. func (s *Suite[C]) TestConfig(t *testing.T) { b := s.open(t) @@ -94,11 +97,12 @@ func (s *Suite[C]) TestConfig(t *testing.T) { var testString = "Config" // create config and read it back - _, err := backend.LoadAll(context.TODO(), nil, b, backend.Handle{Type: backend.ConfigFile}) + _, err := LoadAll(context.TODO(), b, backend.Handle{Type: backend.ConfigFile}) if err == nil { t.Fatalf("did not get expected error for non-existing config") } test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize error from LoadAll(): %v", err) + test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize error from LoadAll(): %v", err) err = b.Save(context.TODO(), backend.Handle{Type: backend.ConfigFile}, backend.NewByteReader([]byte(testString), b.Hasher())) if err != nil { @@ -109,7 +113,7 @@ func (s *Suite[C]) TestConfig(t *testing.T) { // same config for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} { h := backend.Handle{Type: backend.ConfigFile, Name: name} - buf, err := backend.LoadAll(context.TODO(), nil, b, h) + buf, err := LoadAll(context.TODO(), b, h) if err != nil { t.Fatalf("unable to read config with name %q: %+v", name, err) } @@ -125,7 +129,7 @@ func (s *Suite[C]) TestConfig(t *testing.T) { // TestLoad tests the backend's Load function. func (s *Suite[C]) TestLoad(t *testing.T) { - seedRand(t) + random := seedRand(t) b := s.open(t) defer s.close(t, b) @@ -135,8 +139,9 @@ func (s *Suite[C]) TestLoad(t *testing.T) { t.Fatalf("Load() did not return an error for non-existing blob") } test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize non-existing blob: %v", err) + test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize non-existing blob: %v", err) - length := rand.Intn(1<<24) + 2000 + length := random.Intn(1<<24) + 2000 data := test.Random(23, length) id := restic.Hash(data) @@ -169,8 +174,8 @@ func (s *Suite[C]) TestLoad(t *testing.T) { } for i := 0; i < loadTests; i++ { - l := rand.Intn(length + 2000) - o := rand.Intn(length + 2000) + l := random.Intn(length + 2000) + o := random.Intn(length + 2000) d := data if o < len(d) { @@ -181,8 +186,12 @@ func (s *Suite[C]) TestLoad(t *testing.T) { } getlen := l - if l >= len(d) && rand.Float32() >= 0.5 { - getlen = 0 + if l >= len(d) { + if random.Float32() >= 0.5 { + getlen = 0 + } else { + getlen = len(d) + } } if l > 0 && l < len(d) { @@ -225,6 +234,18 @@ func (s *Suite[C]) TestLoad(t *testing.T) { } } + // test error checking for partial and fully out of bounds read + // only test for length > 0 as we currently do not need strict out of bounds handling for length==0 + for _, offset := range []int{length - 99, length - 50, length, length + 100} { + err = b.Load(context.TODO(), handle, 100, int64(offset), func(rd io.Reader) (ierr error) { + _, ierr = io.ReadAll(rd) + return ierr + }) + test.Assert(t, err != nil, "Load() did not return error on out of bounds read! o %v, l %v, filelength %v", offset, 100, length) + test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize out of range read: %v", err) + test.Assert(t, !b.IsNotExist(err), "IsNotExist() must not recognize out of range read: %v", err) + } + test.OK(t, b.Remove(context.TODO(), handle)) } @@ -234,9 +255,9 @@ type setter interface { // TestList makes sure that the backend implements List() pagination correctly. func (s *Suite[C]) TestList(t *testing.T) { - seedRand(t) + random := seedRand(t) - numTestFiles := rand.Intn(20) + 20 + numTestFiles := random.Intn(20) + 20 b := s.open(t) defer s.close(t, b) @@ -257,7 +278,7 @@ func (s *Suite[C]) TestList(t *testing.T) { list1 := make(map[restic.ID]int64) for i := 0; i < numTestFiles; i++ { - data := test.Random(rand.Int(), rand.Intn(100)+55) + data := test.Random(random.Int(), random.Intn(100)+55) id := restic.Hash(data) h := backend.Handle{Type: backend.PackFile, Name: id.String()} err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher())) @@ -333,8 +354,6 @@ func (s *Suite[C]) TestList(t *testing.T) { // TestListCancel tests that the context is respected and the error is returned by List. func (s *Suite[C]) TestListCancel(t *testing.T) { - seedRand(t) - numTestFiles := 5 b := s.open(t) @@ -478,7 +497,7 @@ func (ec errorCloser) Rewind() error { // TestSave tests saving data in the backend. func (s *Suite[C]) TestSave(t *testing.T) { - seedRand(t) + random := seedRand(t) b := s.open(t) defer s.close(t, b) @@ -490,7 +509,7 @@ func (s *Suite[C]) TestSave(t *testing.T) { } for i := 0; i < saveTests; i++ { - length := rand.Intn(1<<23) + 200000 + length := random.Intn(1<<23) + 200000 data := test.Random(23, length) id = sha256.Sum256(data) @@ -501,7 +520,7 @@ func (s *Suite[C]) TestSave(t *testing.T) { err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher())) test.OK(t, err) - buf, err := backend.LoadAll(context.TODO(), nil, b, h) + buf, err := LoadAll(context.TODO(), b, h) test.OK(t, err) if len(buf) != len(data) { t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf)) @@ -534,7 +553,7 @@ func (s *Suite[C]) TestSave(t *testing.T) { t.Fatal(err) } - length := rand.Intn(1<<23) + 200000 + length := random.Intn(1<<23) + 200000 data := test.Random(23, length) id = sha256.Sum256(data) @@ -594,7 +613,7 @@ func (r *incompleteByteReader) Length() int64 { // TestSaveError tests saving data in the backend. func (s *Suite[C]) TestSaveError(t *testing.T) { - seedRand(t) + random := seedRand(t) b := s.open(t) defer func() { @@ -603,7 +622,7 @@ func (s *Suite[C]) TestSaveError(t *testing.T) { _ = b.Close() }() - length := rand.Intn(1<<23) + 200000 + length := random.Intn(1<<23) + 200000 data := test.Random(24, length) var id restic.ID copy(id[:], data) @@ -633,7 +652,7 @@ func (b *wrongByteReader) Hash() []byte { // TestSaveWrongHash tests that uploads with a wrong hash fail func (s *Suite[C]) TestSaveWrongHash(t *testing.T) { - seedRand(t) + random := seedRand(t) b := s.open(t) defer s.close(t, b) @@ -642,7 +661,7 @@ func (s *Suite[C]) TestSaveWrongHash(t *testing.T) { return } - length := rand.Intn(1<<23) + 200000 + length := random.Intn(1<<23) + 200000 data := test.Random(25, length) var id restic.ID copy(id[:], data) @@ -762,6 +781,7 @@ func (s *Suite[C]) TestBackend(t *testing.T) { defer s.close(t, b) test.Assert(t, !b.IsNotExist(nil), "IsNotExist() recognized nil error") + test.Assert(t, !b.IsPermanentError(nil), "IsPermanentError() recognized nil error") for _, tpe := range []backend.FileType{ backend.PackFile, backend.KeyFile, backend.LockFile, @@ -782,11 +802,13 @@ func (s *Suite[C]) TestBackend(t *testing.T) { _, err = b.Stat(context.TODO(), h) test.Assert(t, err != nil, "blob data could be extracted before creation") test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Stat() error: %v", err) + test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize Stat() error: %v", err) // try to read not existing blob err = testLoad(b, h) test.Assert(t, err != nil, "blob could be read before creation") test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Load() error: %v", err) + test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize Load() error: %v", err) // try to get string out, should fail ret, err = beTest(context.TODO(), b, h) @@ -800,7 +822,7 @@ func (s *Suite[C]) TestBackend(t *testing.T) { // test Load() h := backend.Handle{Type: tpe, Name: ts.id} - buf, err := backend.LoadAll(context.TODO(), nil, b, h) + buf, err := LoadAll(context.TODO(), b, h) test.OK(t, err) test.Equals(t, ts.data, string(buf)) diff --git a/internal/backend/testdata/repo-layout-s3legacy.tar.gz b/internal/backend/testdata/repo-layout-s3legacy.tar.gz deleted file mode 100644 index 2b7d852cc9a..00000000000 Binary files a/internal/backend/testdata/repo-layout-s3legacy.tar.gz and /dev/null differ diff --git a/internal/backend/util/errdot_119.go b/internal/backend/util/errdot_119.go deleted file mode 100644 index e20ed47b7df..00000000000 --- a/internal/backend/util/errdot_119.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build go1.19 -// +build go1.19 - -// This file provides a function to check whether an error from cmd.Start() is -// exec.ErrDot which was introduced in Go 1.19. -// This function is needed so that we can perform this check only for Go 1.19 and -// up, whereas for older versions we use a dummy/stub in the file errdot_old.go. -// Once the minimum Go version restic supports is 1.19, remove this file and -// replace any calls to it with the corresponding code as per below. - -package util - -import ( - "errors" - "os/exec" -) - -func IsErrDot(err error) bool { - return errors.Is(err, exec.ErrDot) -} diff --git a/internal/backend/util/errdot_old.go b/internal/backend/util/errdot_old.go deleted file mode 100644 index 4f7a0b40b3c..00000000000 --- a/internal/backend/util/errdot_old.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !go1.19 -// +build !go1.19 - -// This file provides a stub for IsErrDot() for Go versions below 1.19. -// See the corresponding file errdot_119.go for more information. -// Once the minimum Go version restic supports is 1.19, remove this file -// and perform the actions listed in errdot_119.go. - -package util - -func IsErrDot(err error) bool { - return false -} diff --git a/internal/backend/util/foreground.go b/internal/backend/util/foreground.go index 35cbada1aae..477fc890022 100644 --- a/internal/backend/util/foreground.go +++ b/internal/backend/util/foreground.go @@ -11,6 +11,9 @@ import ( // to the previous process group. // // The command's environment has all RESTIC_* variables removed. +// +// Return exec.ErrDot if it would implicitly run an executable from the current +// directory. func StartForeground(cmd *exec.Cmd) (bg func() error, err error) { env := os.Environ() // Returns a copy that we can modify. diff --git a/internal/backend/util/limited_reader.go b/internal/backend/util/limited_reader.go new file mode 100644 index 00000000000..fdee1c06a98 --- /dev/null +++ b/internal/backend/util/limited_reader.go @@ -0,0 +1,15 @@ +package util + +import "io" + +// LimitedReadCloser wraps io.LimitedReader and exposes the Close() method. +type LimitedReadCloser struct { + io.Closer + io.LimitedReader +} + +// LimitReadCloser returns a new reader wraps r in an io.LimitedReader, but also +// exposes the Close() method. +func LimitReadCloser(r io.ReadCloser, n int64) *LimitedReadCloser { + return &LimitedReadCloser{Closer: r, LimitedReader: io.LimitedReader{R: r, N: n}} +} diff --git a/internal/backend/utils.go b/internal/backend/utils.go deleted file mode 100644 index 16160829574..00000000000 --- a/internal/backend/utils.go +++ /dev/null @@ -1,76 +0,0 @@ -package backend - -import ( - "bytes" - "context" - "encoding/hex" - "fmt" - "io" - - "github.com/minio/sha256-simd" - - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/errors" -) - -func verifyContentMatchesName(s string, data []byte) (bool, error) { - if len(s) != hex.EncodedLen(sha256.Size) { - return false, fmt.Errorf("invalid length for ID: %q", s) - } - - b, err := hex.DecodeString(s) - if err != nil { - return false, fmt.Errorf("invalid ID: %s", err) - } - var id [sha256.Size]byte - copy(id[:], b) - - hashed := sha256.Sum256(data) - return id == hashed, nil -} - -// LoadAll reads all data stored in the backend for the handle into the given -// buffer, which is truncated. If the buffer is not large enough or nil, a new -// one is allocated. -func LoadAll(ctx context.Context, buf []byte, be Backend, h Handle) ([]byte, error) { - retriedInvalidData := false - err := be.Load(ctx, h, 0, 0, func(rd io.Reader) error { - // make sure this is idempotent, in case an error occurs this function may be called multiple times! - wr := bytes.NewBuffer(buf[:0]) - _, cerr := io.Copy(wr, rd) - if cerr != nil { - return cerr - } - buf = wr.Bytes() - - // retry loading damaged data only once. If a file fails to download correctly - // the second time, then it is likely corrupted at the backend. Return the data - // to the caller in that case to let it decide what to do with the data. - if !retriedInvalidData && h.Type != ConfigFile { - if matches, err := verifyContentMatchesName(h.Name, buf); err == nil && !matches { - debug.Log("retry loading broken blob %v", h) - retriedInvalidData = true - return errors.Errorf("loadAll(%v): invalid data returned", h) - } - } - return nil - }) - - if err != nil { - return nil, err - } - - return buf, nil -} - -// LimitedReadCloser wraps io.LimitedReader and exposes the Close() method. -type LimitedReadCloser struct { - io.Closer - io.LimitedReader -} - -// LimitReadCloser returns a new reader wraps r in an io.LimitedReader, but also -// exposes the Close() method. -func LimitReadCloser(r io.ReadCloser, n int64) *LimitedReadCloser { - return &LimitedReadCloser{Closer: r, LimitedReader: io.LimitedReader{R: r, N: n}} -} diff --git a/internal/backend/utils_test.go b/internal/backend/utils_test.go deleted file mode 100644 index ad9540e5417..00000000000 --- a/internal/backend/utils_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package backend_test - -import ( - "bytes" - "context" - "io" - "math/rand" - "testing" - - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/mem" - "github.com/restic/restic/internal/backend/mock" - "github.com/restic/restic/internal/restic" - rtest "github.com/restic/restic/internal/test" -) - -const KiB = 1 << 10 -const MiB = 1 << 20 - -func TestLoadAll(t *testing.T) { - b := mem.New() - var buf []byte - - for i := 0; i < 20; i++ { - data := rtest.Random(23+i, rand.Intn(MiB)+500*KiB) - - id := restic.Hash(data) - h := backend.Handle{Name: id.String(), Type: backend.PackFile} - err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher())) - rtest.OK(t, err) - - buf, err := backend.LoadAll(context.TODO(), buf, b, backend.Handle{Type: backend.PackFile, Name: id.String()}) - rtest.OK(t, err) - - if len(buf) != len(data) { - t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf)) - continue - } - - if !bytes.Equal(buf, data) { - t.Errorf("wrong data returned") - continue - } - } -} - -func save(t testing.TB, be backend.Backend, buf []byte) backend.Handle { - id := restic.Hash(buf) - h := backend.Handle{Name: id.String(), Type: backend.PackFile} - err := be.Save(context.TODO(), h, backend.NewByteReader(buf, be.Hasher())) - if err != nil { - t.Fatal(err) - } - return h -} - -type quickRetryBackend struct { - backend.Backend -} - -func (be *quickRetryBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { - err := be.Backend.Load(ctx, h, length, offset, fn) - if err != nil { - // retry - err = be.Backend.Load(ctx, h, length, offset, fn) - } - return err -} - -func TestLoadAllBroken(t *testing.T) { - b := mock.NewBackend() - - data := rtest.Random(23, rand.Intn(MiB)+500*KiB) - id := restic.Hash(data) - // damage buffer - data[0] ^= 0xff - - b.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { - return io.NopCloser(bytes.NewReader(data)), nil - } - - // must fail on first try - _, err := backend.LoadAll(context.TODO(), nil, b, backend.Handle{Type: backend.PackFile, Name: id.String()}) - if err == nil { - t.Fatalf("missing expected error") - } - - // must return the broken data after a retry - be := &quickRetryBackend{Backend: b} - buf, err := backend.LoadAll(context.TODO(), nil, be, backend.Handle{Type: backend.PackFile, Name: id.String()}) - rtest.OK(t, err) - - if !bytes.Equal(buf, data) { - t.Fatalf("wrong data returned") - } -} - -func TestLoadAllAppend(t *testing.T) { - b := mem.New() - - h1 := save(t, b, []byte("foobar test string")) - randomData := rtest.Random(23, rand.Intn(MiB)+500*KiB) - h2 := save(t, b, randomData) - - var tests = []struct { - handle backend.Handle - buf []byte - want []byte - }{ - { - handle: h1, - buf: nil, - want: []byte("foobar test string"), - }, - { - handle: h1, - buf: []byte("xxx"), - want: []byte("foobar test string"), - }, - { - handle: h2, - buf: nil, - want: randomData, - }, - { - handle: h2, - buf: make([]byte, 0, 200), - want: randomData, - }, - { - handle: h2, - buf: []byte("foobarbaz"), - want: randomData, - }, - } - - for _, test := range tests { - t.Run("", func(t *testing.T) { - buf, err := backend.LoadAll(context.TODO(), test.buf, b, test.handle) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(buf, test.want) { - t.Errorf("wrong data returned, want %q, got %q", test.want, buf) - } - }) - } -} diff --git a/internal/backend/watchdog_roundtriper.go b/internal/backend/watchdog_roundtriper.go new file mode 100644 index 00000000000..dc270b974f3 --- /dev/null +++ b/internal/backend/watchdog_roundtriper.go @@ -0,0 +1,122 @@ +package backend + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "sync/atomic" + "time" +) + +var errRequestTimeout = fmt.Errorf("request timeout") + +// watchdogRoundtripper cancels an http request if an upload or download did not make progress +// within timeout. The time between fully sending the request and receiving an response is also +// limited by this timeout. This ensures that stuck requests are cancelled after some time. +// +// The roundtriper makes the assumption that the upload and download happen continuously. In particular, +// the caller must not make long pauses between individual read requests from the response body. +type watchdogRoundtripper struct { + rt http.RoundTripper + timeout time.Duration + chunkSize int +} + +var _ http.RoundTripper = &watchdogRoundtripper{} + +func newWatchdogRoundtripper(rt http.RoundTripper, timeout time.Duration, chunkSize int) *watchdogRoundtripper { + return &watchdogRoundtripper{ + rt: rt, + timeout: timeout, + chunkSize: chunkSize, + } +} + +func (w *watchdogRoundtripper) RoundTrip(req *http.Request) (*http.Response, error) { + timer := time.NewTimer(w.timeout) + ctx, cancel := context.WithCancel(req.Context()) + timedOut := &atomic.Bool{} + + // cancel context if timer expires + go func() { + defer timer.Stop() + select { + case <-timer.C: + timedOut.Store(true) + cancel() + case <-ctx.Done(): + } + }() + + kick := func() { + timer.Reset(w.timeout) + } + isTimeout := func(err error) bool { + return timedOut.Load() && errors.Is(err, context.Canceled) + } + + req = req.Clone(ctx) + if req.Body != nil { + // kick watchdog timer as long as uploading makes progress + req.Body = newWatchdogReadCloser(req.Body, w.chunkSize, kick, nil, isTimeout) + } + + resp, err := w.rt.RoundTrip(req) + if err != nil { + if isTimeout(err) { + err = errRequestTimeout + } + return nil, err + } + + // kick watchdog timer as long as downloading makes progress + // cancel context to stop goroutine once response body is closed + resp.Body = newWatchdogReadCloser(resp.Body, w.chunkSize, kick, cancel, isTimeout) + return resp, nil +} + +func newWatchdogReadCloser(rc io.ReadCloser, chunkSize int, kick func(), close func(), isTimeout func(err error) bool) *watchdogReadCloser { + return &watchdogReadCloser{ + rc: rc, + chunkSize: chunkSize, + kick: kick, + close: close, + isTimeout: isTimeout, + } +} + +type watchdogReadCloser struct { + rc io.ReadCloser + chunkSize int + kick func() + close func() + isTimeout func(err error) bool +} + +var _ io.ReadCloser = &watchdogReadCloser{} + +func (w *watchdogReadCloser) Read(p []byte) (n int, err error) { + w.kick() + + // Read is not required to fill the whole passed in byte slice + // Thus, keep things simple and just stay within our chunkSize. + if len(p) > w.chunkSize { + p = p[:w.chunkSize] + } + n, err = w.rc.Read(p) + w.kick() + + if err != nil && w.isTimeout(err) { + err = errRequestTimeout + } + return n, err +} + +func (w *watchdogReadCloser) Close() error { + if w.close != nil { + w.close() + } + return w.rc.Close() +} diff --git a/internal/backend/watchdog_roundtriper_test.go b/internal/backend/watchdog_roundtriper_test.go new file mode 100644 index 00000000000..f7f90259cea --- /dev/null +++ b/internal/backend/watchdog_roundtriper_test.go @@ -0,0 +1,204 @@ +package backend + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + rtest "github.com/restic/restic/internal/test" +) + +func TestRead(t *testing.T) { + data := []byte("abcdef") + var ctr int + kick := func() { + ctr++ + } + var closed bool + onClose := func() { + closed = true + } + isTimeout := func(err error) bool { + return false + } + + wd := newWatchdogReadCloser(io.NopCloser(bytes.NewReader(data)), 1, kick, onClose, isTimeout) + + out, err := io.ReadAll(wd) + rtest.OK(t, err) + rtest.Equals(t, data, out, "data mismatch") + // the EOF read also triggers the kick function + rtest.Equals(t, len(data)*2+2, ctr, "unexpected number of kick calls") + + rtest.Equals(t, false, closed, "close function called too early") + rtest.OK(t, wd.Close()) + rtest.Equals(t, true, closed, "close function not called") +} + +func TestRoundtrip(t *testing.T) { + t.Parallel() + + // at the higher delay values, it takes longer to transmit the request/response body + // than the roundTripper timeout + for _, delay := range []int{0, 1, 10, 20} { + t.Run(fmt.Sprintf("%v", delay), func(t *testing.T) { + msg := []byte("ping-pong-data") + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + data, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + + // slowly send the reply + for len(data) >= 2 { + _, _ = w.Write(data[:2]) + w.(http.Flusher).Flush() + data = data[2:] + time.Sleep(time.Duration(delay) * time.Millisecond) + } + _, _ = w.Write(data) + })) + defer srv.Close() + + rt := newWatchdogRoundtripper(http.DefaultTransport, 100*time.Millisecond, 2) + req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(newSlowReader(bytes.NewReader(msg), time.Duration(delay)*time.Millisecond))) + rtest.OK(t, err) + + resp, err := rt.RoundTrip(req) + rtest.OK(t, err) + rtest.Equals(t, 200, resp.StatusCode, "unexpected status code") + + response, err := io.ReadAll(resp.Body) + rtest.OK(t, err) + rtest.Equals(t, msg, response, "unexpected response") + + rtest.OK(t, resp.Body.Close()) + }) + } +} + +func TestCanceledRoundtrip(t *testing.T) { + rt := newWatchdogRoundtripper(http.DefaultTransport, time.Second, 2) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + req, err := http.NewRequestWithContext(ctx, "GET", "http://some.random.url.dfdgsfg", nil) + rtest.OK(t, err) + + resp, err := rt.RoundTrip(req) + rtest.Equals(t, context.Canceled, err) + // make linter happy + if resp != nil { + rtest.OK(t, resp.Body.Close()) + } +} + +type slowReader struct { + data io.Reader + delay time.Duration +} + +func newSlowReader(data io.Reader, delay time.Duration) *slowReader { + return &slowReader{ + data: data, + delay: delay, + } +} + +func (s *slowReader) Read(p []byte) (n int, err error) { + time.Sleep(s.delay) + return s.data.Read(p) +} + +func TestUploadTimeout(t *testing.T) { + t.Parallel() + + msg := []byte("ping") + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(500) + return + } + t.Error("upload should have been canceled") + })) + defer srv.Close() + + rt := newWatchdogRoundtripper(http.DefaultTransport, 10*time.Millisecond, 1024) + req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(newSlowReader(bytes.NewReader(msg), 100*time.Millisecond))) + rtest.OK(t, err) + + resp, err := rt.RoundTrip(req) + rtest.Equals(t, errRequestTimeout, err) + // make linter happy + if resp != nil { + rtest.OK(t, resp.Body.Close()) + } +} + +func TestProcessingTimeout(t *testing.T) { + t.Parallel() + + msg := []byte("ping") + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(500) + return + } + time.Sleep(100 * time.Millisecond) + w.WriteHeader(200) + })) + defer srv.Close() + + rt := newWatchdogRoundtripper(http.DefaultTransport, 10*time.Millisecond, 1024) + req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(bytes.NewReader(msg))) + rtest.OK(t, err) + + resp, err := rt.RoundTrip(req) + rtest.Equals(t, errRequestTimeout, err) + // make linter happy + if resp != nil { + rtest.OK(t, resp.Body.Close()) + } +} + +func TestDownloadTimeout(t *testing.T) { + t.Parallel() + + msg := []byte("ping") + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + data, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + _, _ = w.Write(data[:2]) + w.(http.Flusher).Flush() + data = data[2:] + + time.Sleep(100 * time.Millisecond) + _, _ = w.Write(data) + + })) + defer srv.Close() + + rt := newWatchdogRoundtripper(http.DefaultTransport, 25*time.Millisecond, 1024) + req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(bytes.NewReader(msg))) + rtest.OK(t, err) + + resp, err := rt.RoundTrip(req) + rtest.OK(t, err) + rtest.Equals(t, 200, resp.StatusCode, "unexpected status code") + + _, err = io.ReadAll(resp.Body) + rtest.Equals(t, errRequestTimeout, err, "response download not canceled") + rtest.OK(t, resp.Body.Close()) +} diff --git a/internal/bloblru/cache.go b/internal/bloblru/cache.go index 302ecc769b9..9981f8a87be 100644 --- a/internal/bloblru/cache.go +++ b/internal/bloblru/cache.go @@ -20,13 +20,15 @@ type Cache struct { c *simplelru.LRU[restic.ID, []byte] free, size int // Current and max capacity, in bytes. + inProgress map[restic.ID]chan struct{} } // New constructs a blob cache that stores at most size bytes worth of blobs. func New(size int) *Cache { c := &Cache{ - free: size, - size: size, + free: size, + size: size, + inProgress: make(map[restic.ID]chan struct{}), } // NewLRU wants us to specify some max. number of entries, else it errors. @@ -85,6 +87,57 @@ func (c *Cache) Get(id restic.ID) ([]byte, bool) { return blob, ok } +func (c *Cache) GetOrCompute(id restic.ID, compute func() ([]byte, error)) ([]byte, error) { + // check if already cached + blob, ok := c.Get(id) + if ok { + return blob, nil + } + + // check for parallel download or start our own + finish := make(chan struct{}) + c.mu.Lock() + waitForResult, isComputing := c.inProgress[id] + if !isComputing { + c.inProgress[id] = finish + } + c.mu.Unlock() + + if isComputing { + // wait for result of parallel download + <-waitForResult + } else { + // remove progress channel once finished here + defer func() { + c.mu.Lock() + delete(c.inProgress, id) + c.mu.Unlock() + close(finish) + }() + } + + // try again. This is necessary independent of whether isComputing is true or not. + // The calls to `c.Get()` and checking/adding the entry in `c.inProgress` are not atomic, + // thus the item might have been computed in the meantime. + // The following scenario would compute() the value multiple times otherwise: + // Goroutine A does not find a value in the initial call to `c.Get`, then goroutine B + // takes over, caches the computed value and cleans up its channel in c.inProgress. + // Then goroutine A continues, does not detect a parallel computation and would try + // to call compute() again. + blob, ok = c.Get(id) + if ok { + return blob, nil + } + + // download it + blob, err := compute() + if err == nil { + c.Add(id, blob) + } + + return blob, err +} + func (c *Cache) evict(key restic.ID, blob []byte) { debug.Log("bloblru.Cache: evict %v, %d bytes", key, cap(blob)) c.free += cap(blob) + overhead diff --git a/internal/bloblru/cache_test.go b/internal/bloblru/cache_test.go index aa6f4465c43..d25daf764ff 100644 --- a/internal/bloblru/cache_test.go +++ b/internal/bloblru/cache_test.go @@ -1,11 +1,14 @@ package bloblru import ( + "context" + "fmt" "math/rand" "testing" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "golang.org/x/sync/errgroup" ) func TestCache(t *testing.T) { @@ -52,6 +55,70 @@ func TestCache(t *testing.T) { rtest.Equals(t, cacheSize, c.free) } +func TestCacheGetOrCompute(t *testing.T) { + var id1, id2 restic.ID + id1[0] = 1 + id2[0] = 2 + + const ( + kiB = 1 << 10 + cacheSize = 64*kiB + 3*overhead + ) + + c := New(cacheSize) + + e := fmt.Errorf("broken") + _, err := c.GetOrCompute(id1, func() ([]byte, error) { + return nil, e + }) + rtest.Equals(t, e, err, "expected error was not returned") + + // fill buffer + data1 := make([]byte, 10*kiB) + blob, err := c.GetOrCompute(id1, func() ([]byte, error) { + return data1, nil + }) + rtest.OK(t, err) + rtest.Equals(t, &data1[0], &blob[0], "wrong buffer returned") + + // now the buffer should be returned without calling the compute function + blob, err = c.GetOrCompute(id1, func() ([]byte, error) { + return nil, e + }) + rtest.OK(t, err) + rtest.Equals(t, &data1[0], &blob[0], "wrong buffer returned") + + // check concurrency + wg, _ := errgroup.WithContext(context.TODO()) + wait := make(chan struct{}) + calls := make(chan struct{}, 10) + + // start a bunch of blocking goroutines + for i := 0; i < 10; i++ { + wg.Go(func() error { + buf, err := c.GetOrCompute(id2, func() ([]byte, error) { + // block to ensure that multiple requests are waiting in parallel + <-wait + calls <- struct{}{} + return make([]byte, 42), nil + }) + if len(buf) != 42 { + return fmt.Errorf("wrong buffer") + } + return err + }) + } + + close(wait) + rtest.OK(t, wg.Wait()) + close(calls) + count := 0 + for range calls { + count++ + } + rtest.Equals(t, 1, count, "expected exactly one call of the compute function") +} + func BenchmarkAdd(b *testing.B) { const ( MiB = 1 << 20 diff --git a/internal/cache/backend_test.go b/internal/cache/backend_test.go deleted file mode 100644 index 68fbb02b3f9..00000000000 --- a/internal/cache/backend_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package cache - -import ( - "bytes" - "context" - "io" - "math/rand" - "sync" - "testing" - "time" - - "github.com/pkg/errors" - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/mem" - "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/test" -) - -func loadAndCompare(t testing.TB, be backend.Backend, h backend.Handle, data []byte) { - buf, err := backend.LoadAll(context.TODO(), nil, be, h) - if err != nil { - t.Fatal(err) - } - - if len(buf) != len(data) { - t.Fatalf("wrong number of bytes read, want %v, got %v", len(data), len(buf)) - } - - if !bytes.Equal(buf, data) { - t.Fatalf("wrong data returned, want:\n %02x\ngot:\n %02x", data[:16], buf[:16]) - } -} - -func save(t testing.TB, be backend.Backend, h backend.Handle, data []byte) { - err := be.Save(context.TODO(), h, backend.NewByteReader(data, be.Hasher())) - if err != nil { - t.Fatal(err) - } -} - -func remove(t testing.TB, be backend.Backend, h backend.Handle) { - err := be.Remove(context.TODO(), h) - if err != nil { - t.Fatal(err) - } -} - -func randomData(n int) (backend.Handle, []byte) { - data := test.Random(rand.Int(), n) - id := restic.Hash(data) - h := backend.Handle{ - Type: backend.IndexFile, - Name: id.String(), - } - return h, data -} - -func TestBackend(t *testing.T) { - be := mem.New() - c := TestNewCache(t) - wbe := c.Wrap(be) - - h, data := randomData(5234142) - - // save directly in backend - save(t, be, h, data) - if c.Has(h) { - t.Errorf("cache has file too early") - } - - // load data via cache - loadAndCompare(t, wbe, h, data) - if !c.Has(h) { - t.Errorf("cache doesn't have file after load") - } - - // remove via cache - remove(t, wbe, h) - if c.Has(h) { - t.Errorf("cache has file after remove") - } - - // save via cache - save(t, wbe, h, data) - if !c.Has(h) { - t.Errorf("cache doesn't have file after load") - } - - // load data directly from backend - loadAndCompare(t, be, h, data) - - // load data via cache - loadAndCompare(t, be, h, data) - - // remove directly - remove(t, be, h) - if !c.Has(h) { - t.Errorf("file not in cache any more") - } - - // run stat - _, err := wbe.Stat(context.TODO(), h) - if err == nil { - t.Errorf("expected error for removed file not found, got nil") - } - - if !wbe.IsNotExist(err) { - t.Errorf("Stat() returned error that does not match IsNotExist(): %v", err) - } - - if c.Has(h) { - t.Errorf("removed file still in cache after stat") - } -} - -type loadErrorBackend struct { - backend.Backend - loadError error -} - -func (be loadErrorBackend) Load(_ context.Context, _ backend.Handle, _ int, _ int64, _ func(rd io.Reader) error) error { - time.Sleep(10 * time.Millisecond) - return be.loadError -} - -func TestErrorBackend(t *testing.T) { - be := mem.New() - c := TestNewCache(t) - h, data := randomData(5234142) - - // save directly in backend - save(t, be, h, data) - - testErr := errors.New("test error") - errBackend := loadErrorBackend{ - Backend: be, - loadError: testErr, - } - - loadTest := func(wg *sync.WaitGroup, be backend.Backend) { - defer wg.Done() - - buf, err := backend.LoadAll(context.TODO(), nil, be, h) - if err == testErr { - return - } - - if err != nil { - t.Error(err) - return - } - - if !bytes.Equal(buf, data) { - t.Errorf("data does not match") - } - time.Sleep(time.Millisecond) - } - - wrappedBE := c.Wrap(errBackend) - var wg sync.WaitGroup - for i := 0; i < 5; i++ { - wg.Add(1) - go loadTest(&wg, wrappedBE) - } - - wg.Wait() -} - -func TestBackendRemoveBroken(t *testing.T) { - be := mem.New() - c := TestNewCache(t) - - h, data := randomData(5234142) - // save directly in backend - save(t, be, h, data) - - // prime cache with broken copy - broken := append([]byte{}, data...) - broken[0] ^= 0xff - err := c.Save(h, bytes.NewReader(broken)) - test.OK(t, err) - - // loadall retries if broken data was returned - buf, err := backend.LoadAll(context.TODO(), nil, c.Wrap(be), h) - test.OK(t, err) - - if !bytes.Equal(buf, data) { - t.Fatalf("wrong data returned") - } - - // check that the cache now contains the correct data - rd, err := c.load(h, 0, 0) - defer func() { - _ = rd.Close() - }() - test.OK(t, err) - cached, err := io.ReadAll(rd) - test.OK(t, err) - if !bytes.Equal(cached, data) { - t.Fatalf("wrong data cache") - } -} diff --git a/internal/checker/checker.go b/internal/checker/checker.go index df865cb412a..12020891a13 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -2,24 +2,17 @@ package checker import ( "bufio" - "bytes" "context" "fmt" - "io" "runtime" - "sort" "sync" "github.com/klauspost/compress/zstd" - "github.com/minio/sha256-simd" - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/s3" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/hashing" - "github.com/restic/restic/internal/index" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/repository/index" + "github.com/restic/restic/internal/repository/pack" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" "golang.org/x/sync/errgroup" @@ -58,9 +51,6 @@ func New(repo restic.Repository, trackUnused bool) *Checker { return c } -// ErrLegacyLayout is returned when the repository uses the S3 legacy layout. -var ErrLegacyLayout = errors.New("repository uses S3 legacy layout") - // ErrDuplicatePacks is returned when a pack is found in more than one index. type ErrDuplicatePacks struct { PackID restic.ID @@ -80,35 +70,15 @@ func (e *ErrMixedPack) Error() string { return fmt.Sprintf("pack %v contains a mix of tree and data blobs", e.PackID.Str()) } -// ErrOldIndexFormat is returned when an index with the old format is -// found. -type ErrOldIndexFormat struct { - restic.ID -} - -func (err *ErrOldIndexFormat) Error() string { - return fmt.Sprintf("index %v has old format", err.ID) -} - -// ErrPackData is returned if errors are discovered while verifying a packfile -type ErrPackData struct { - PackID restic.ID - errs []error -} - -func (e *ErrPackData) Error() string { - return fmt.Sprintf("pack %v contains %v errors: %v", e.PackID, len(e.errs), e.errs) -} - func (c *Checker) LoadSnapshots(ctx context.Context) error { var err error c.snapshots, err = restic.MemorizeList(ctx, c.repo, restic.SnapshotFile) return err } -func computePackTypes(ctx context.Context, idx restic.MasterIndex) map[restic.ID]restic.BlobType { +func computePackTypes(ctx context.Context, idx restic.ListBlobser) (map[restic.ID]restic.BlobType, error) { packs := make(map[restic.ID]restic.BlobType) - idx.Each(ctx, func(pb restic.PackedBlob) { + err := idx.ListBlobs(ctx, func(pb restic.PackedBlob) { tpe, exists := packs[pb.PackID] if exists { if pb.Type != tpe { @@ -119,45 +89,16 @@ func computePackTypes(ctx context.Context, idx restic.MasterIndex) map[restic.ID } packs[pb.PackID] = tpe }) - return packs + return packs, err } // LoadIndex loads all index files. func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []error, errs []error) { debug.Log("Start") - indexList, err := restic.MemorizeList(ctx, c.repo, restic.IndexFile) - if err != nil { - // abort if an error occurs while listing the indexes - return hints, append(errs, err) - } - - if p != nil { - var numIndexFiles uint64 - err := indexList.List(ctx, restic.IndexFile, func(id restic.ID, size int64) error { - numIndexFiles++ - return nil - }) - if err != nil { - return hints, append(errs, err) - } - p.SetMax(numIndexFiles) - defer p.Done() - } - packToIndex := make(map[restic.ID]restic.IDSet) - err = index.ForAllIndexes(ctx, indexList, c.repo, func(id restic.ID, index *index.Index, oldFormat bool, err error) error { + err := c.masterIndex.Load(ctx, c.repo, p, func(id restic.ID, idx *index.Index, err error) error { debug.Log("process index %v, err %v", id, err) - - if p != nil { - p.Add(1) - } - - if oldFormat { - debug.Log("index %v has old format", id) - hints = append(hints, &ErrOldIndexFormat{id}) - } - err = errors.Wrapf(err, "error loading index %v", id) if err != nil { @@ -165,11 +106,9 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e return nil } - c.masterIndex.Insert(index) - debug.Log("process blobs") cnt := 0 - index.Each(ctx, func(blob restic.PackedBlob) { + err = idx.Each(ctx, func(blob restic.PackedBlob) { cnt++ if _, ok := packToIndex[blob.PackID]; !ok { @@ -179,22 +118,28 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e }) debug.Log("%d blobs processed", cnt) - return nil + return err }) if err != nil { - errs = append(errs, err) + // failed to load the index + return hints, append(errs, err) } - // Merge index before computing pack sizes, as this needs removed duplicates - err = c.masterIndex.MergeFinalIndexes() + err = c.repo.SetIndex(c.masterIndex) if err != nil { - // abort if an error occurs merging the indexes - return hints, append(errs, err) + debug.Log("SetIndex returned error: %v", err) + errs = append(errs, err) } // compute pack size using index entries - c.packs = pack.Size(ctx, c.masterIndex, false) - packTypes := computePackTypes(ctx, c.masterIndex) + c.packs, err = pack.Size(ctx, c.repo, false) + if err != nil { + return hints, append(errs, err) + } + packTypes, err := computePackTypes(ctx, c.repo) + if err != nil { + return hints, append(errs, err) + } debug.Log("checking for duplicate packs") for packID := range c.packs { @@ -212,48 +157,26 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e } } - err = c.repo.SetIndex(c.masterIndex) - if err != nil { - debug.Log("SetIndex returned error: %v", err) - errs = append(errs, err) - } - return hints, errs } // PackError describes an error with a specific pack. type PackError struct { - ID restic.ID - Orphaned bool - Err error + ID restic.ID + Orphaned bool + Truncated bool + Err error } func (e *PackError) Error() string { return "pack " + e.ID.String() + ": " + e.Err.Error() } -// IsOrphanedPack returns true if the error describes a pack which is not -// contained in any index. -func IsOrphanedPack(err error) bool { - var e *PackError - return errors.As(err, &e) && e.Orphaned -} - -func isS3Legacy(b backend.Backend) bool { - be := backend.AsBackend[*s3.Backend](b) - return be != nil && be.Layout.Name() == "s3legacy" -} - // Packs checks that all packs referenced in the index are still available and // there are no packs that aren't in an index. errChan is closed after all // packs have been checked. func (c *Checker) Packs(ctx context.Context, errChan chan<- error) { defer close(errChan) - - if isS3Legacy(c.repo.Backend()) { - errChan <- ErrLegacyLayout - } - debug.Log("checking for %d packs", len(c.packs)) debug.Log("listing repository packs") @@ -288,7 +211,7 @@ func (c *Checker) Packs(ctx context.Context, errChan chan<- error) { select { case <-ctx.Done(): return - case errChan <- &PackError{ID: id, Err: errors.Errorf("unexpected file size: got %d, expected %d", reposize, size)}: + case errChan <- &PackError{ID: id, Truncated: true, Err: errors.Errorf("unexpected file size: got %d, expected %d", reposize, size)}: } } } @@ -421,7 +344,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { for _, node := range tree.Nodes { switch node.Type { - case "file": + case restic.NodeTypeFile: if node.Content == nil { errs = append(errs, &Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)}) } @@ -436,7 +359,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { // unfortunately fails in some cases that are not resolvable // by users, so we omit this check, see #1887 - _, found := c.repo.LookupBlobSize(blobID, restic.DataBlob) + _, found := c.repo.LookupBlobSize(restic.DataBlob, blobID) if !found { debug.Log("tree %v references blob %v which isn't contained in index", id, blobID) errs = append(errs, &Error{TreeID: id, Err: errors.Errorf("file %q blob %v not found in index", node.Name, blobID)}) @@ -457,7 +380,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { c.blobRefs.Unlock() } - case "dir": + case restic.NodeTypeDir: if node.Subtree == nil { errs = append(errs, &Error{TreeID: id, Err: errors.Errorf("dir node %q has no subtree", node.Name)}) continue @@ -468,7 +391,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { continue } - case "symlink", "socket", "chardev", "dev", "fifo": + case restic.NodeTypeSymlink, restic.NodeTypeSocket, restic.NodeTypeCharDev, restic.NodeTypeDev, restic.NodeTypeFifo: // nothing to check default: @@ -484,7 +407,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { } // UnusedBlobs returns all blobs that have never been referenced. -func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) { +func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles, err error) { if !c.trackUnused { panic("only works when tracking blob references") } @@ -495,7 +418,7 @@ func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) { ctx, cancel := context.WithCancel(ctx) defer cancel() - c.repo.Index().Each(ctx, func(blob restic.PackedBlob) { + err = c.repo.ListBlobs(ctx, func(blob restic.PackedBlob) { h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} if !c.blobRefs.M.Has(h) { debug.Log("blob %v not referenced", h) @@ -503,7 +426,7 @@ func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) { } }) - return blobs + return blobs, err } // CountPacks returns the number of packs in the repository. @@ -516,126 +439,13 @@ func (c *Checker) GetPacks() map[restic.ID]int64 { return c.packs } -// checkPack reads a pack and checks the integrity of all blobs. -func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { - debug.Log("checking pack %v", id.String()) - - if len(blobs) == 0 { - return errors.Errorf("pack %v is empty or not indexed", id) - } - - // sanity check blobs in index - sort.Slice(blobs, func(i, j int) bool { - return blobs[i].Offset < blobs[j].Offset - }) - idxHdrSize := pack.CalculateHeaderSize(blobs) - lastBlobEnd := 0 - nonContinuousPack := false - for _, blob := range blobs { - if lastBlobEnd != int(blob.Offset) { - nonContinuousPack = true - } - lastBlobEnd = int(blob.Offset + blob.Length) - } - // size was calculated by masterindex.PackSize, thus there's no need to recalculate it here - - var errs []error - if nonContinuousPack { - debug.Log("Index for pack contains gaps / overlaps, blobs: %v", blobs) - errs = append(errs, errors.New("Index for pack contains gaps / overlapping blobs")) - } - - // calculate hash on-the-fly while reading the pack and capture pack header - var hash restic.ID - var hdrBuf []byte - h := backend.Handle{Type: backend.PackFile, Name: id.String()} - err := r.Backend().Load(ctx, h, int(size), 0, func(rd io.Reader) error { - hrd := hashing.NewReader(rd, sha256.New()) - bufRd.Reset(hrd) - - it := repository.NewPackBlobIterator(id, bufRd, 0, blobs, r.Key(), dec) - for { - val, err := it.Next() - if err == repository.ErrPackEOF { - break - } else if err != nil { - return err - } - debug.Log(" check blob %v: %v", val.Handle.ID, val.Handle) - if val.Err != nil { - debug.Log(" error verifying blob %v: %v", val.Handle.ID, err) - errs = append(errs, errors.Errorf("blob %v: %v", val.Handle.ID, err)) - } - } - - // skip enough bytes until we reach the possible header start - curPos := lastBlobEnd - minHdrStart := int(size) - pack.MaxHeaderSize - if minHdrStart > curPos { - _, err := bufRd.Discard(minHdrStart - curPos) - if err != nil { - return err - } - } - - // read remainder, which should be the pack header - var err error - hdrBuf, err = io.ReadAll(bufRd) - if err != nil { - return err - } - - hash = restic.IDFromHash(hrd.Sum(nil)) - return nil - }) - if err != nil { - // failed to load the pack file, return as further checks cannot succeed anyways - debug.Log(" error streaming pack: %v", err) - return errors.Errorf("pack %v failed to download: %v", id, err) - } - if !hash.Equal(id) { - debug.Log("Pack ID does not match, want %v, got %v", id, hash) - return errors.Errorf("Pack ID does not match, want %v, got %v", id, hash) - } - - blobs, hdrSize, err := pack.List(r.Key(), bytes.NewReader(hdrBuf), int64(len(hdrBuf))) - if err != nil { - return err - } - - if uint32(idxHdrSize) != hdrSize { - debug.Log("Pack header size does not match, want %v, got %v", idxHdrSize, hdrSize) - errs = append(errs, errors.Errorf("Pack header size does not match, want %v, got %v", idxHdrSize, hdrSize)) - } - - idx := r.Index() - for _, blob := range blobs { - // Check if blob is contained in index and position is correct - idxHas := false - for _, pb := range idx.Lookup(blob.BlobHandle) { - if pb.PackID == id && pb.Blob == blob { - idxHas = true - break - } - } - if !idxHas { - errs = append(errs, errors.Errorf("Blob %v is not contained in index or position is incorrect", blob.ID)) - continue - } - } - - if len(errs) > 0 { - return &ErrPackData{PackID: id, errs: errs} - } - - return nil -} - // ReadData loads all data from the repository and checks the integrity. func (c *Checker) ReadData(ctx context.Context, errChan chan<- error) { c.ReadPacks(ctx, c.packs, nil, errChan) } +const maxStreamBufferSize = 4 * 1024 * 1024 + // ReadPacks loads data from specified packs and checks the integrity. func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *progress.Counter, errChan chan<- error) { defer close(errChan) @@ -653,9 +463,7 @@ func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *p // run workers for i := 0; i < workerCount; i++ { g.Go(func() error { - // create a buffer that is large enough to be reused by repository.StreamPack - // this ensures that we can read the pack header later on - bufRd := bufio.NewReaderSize(nil, repository.MaxStreamBufferSize) + bufRd := bufio.NewReaderSize(nil, maxStreamBufferSize) dec, err := zstd.NewReader(nil) if err != nil { panic(dec) @@ -674,7 +482,7 @@ func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *p } } - err := checkPack(ctx, c.repo, ps.id, ps.blobs, ps.size, bufRd, dec) + err := repository.CheckPack(ctx, c.repo.(*repository.Repository), ps.id, ps.blobs, ps.size, bufRd, dec) p.Add(1) if err == nil { continue @@ -695,7 +503,7 @@ func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *p } // push packs to ch - for pbs := range c.repo.Index().ListPacks(ctx, packSet) { + for pbs := range c.repo.ListPacksFromIndex(ctx, packSet) { size := packs[pbs.PackID] debug.Log("listed %v", pbs.PackID) select { diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index cca5a582cdd..92bbb1da660 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -8,6 +8,7 @@ import ( "path/filepath" "sort" "strconv" + "strings" "sync" "testing" "time" @@ -16,8 +17,8 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/hashing" "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/repository/hashing" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" @@ -72,11 +73,9 @@ func assertOnlyMixedPackHints(t *testing.T, hints []error) { } func TestCheckRepo(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, _, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) if len(errs) > 0 { @@ -92,16 +91,11 @@ func TestCheckRepo(t *testing.T) { } func TestMissingPack(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, be, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - - packHandle := backend.Handle{ - Type: restic.PackFile, - Name: "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6", - } - test.OK(t, repo.Backend().Remove(context.TODO(), packHandle)) + packID := restic.TestParseID("657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6") + test.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: packID.String()})) chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) @@ -116,25 +110,20 @@ func TestMissingPack(t *testing.T) { "expected exactly one error, got %v", len(errs)) if err, ok := errs[0].(*checker.PackError); ok { - test.Equals(t, packHandle.Name, err.ID.String()) + test.Equals(t, packID, err.ID) } else { t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) } } func TestUnreferencedPack(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, be, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - // index 3f1a only references pack 60e0 packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" - indexHandle := backend.Handle{ - Type: restic.IndexFile, - Name: "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44", - } - test.OK(t, repo.Backend().Remove(context.TODO(), indexHandle)) + indexID := restic.TestParseID("3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44") + test.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: indexID.String()})) chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) @@ -156,16 +145,11 @@ func TestUnreferencedPack(t *testing.T) { } func TestUnreferencedBlobs(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, be, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - - snapshotHandle := backend.Handle{ - Type: restic.SnapshotFile, - Name: "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02", - } - test.OK(t, repo.Backend().Remove(context.TODO(), snapshotHandle)) + snapshotID := restic.TestParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02") + test.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.SnapshotFile, Name: snapshotID.String()})) unusedBlobsBySnapshot := restic.BlobHandles{ restic.TestParseHandle("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849", restic.DataBlob), @@ -188,18 +172,17 @@ func TestUnreferencedBlobs(t *testing.T) { test.OKs(t, checkPacks(chkr)) test.OKs(t, checkStruct(chkr)) - blobs := chkr.UnusedBlobs(context.TODO()) + blobs, err := chkr.UnusedBlobs(context.TODO()) + test.OK(t, err) sort.Sort(blobs) test.Equals(t, unusedBlobsBySnapshot, blobs) } func TestModifiedIndex(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, be, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - done := make(chan struct{}) defer close(done) @@ -225,13 +208,13 @@ func TestModifiedIndex(t *testing.T) { }() wr := io.Writer(tmpfile) var hw *hashing.Writer - if repo.Backend().Hasher() != nil { - hw = hashing.NewWriter(wr, repo.Backend().Hasher()) + if be.Hasher() != nil { + hw = hashing.NewWriter(wr, be.Hasher()) wr = hw } // read the file from the backend - err = repo.Backend().Load(context.TODO(), h, 0, 0, func(rd io.Reader) error { + err = be.Load(context.TODO(), h, 0, 0, func(rd io.Reader) error { _, err := io.Copy(wr, rd) return err }) @@ -253,7 +236,7 @@ func TestModifiedIndex(t *testing.T) { t.Fatal(err) } - err = repo.Backend().Save(context.TODO(), h2, rd) + err = be.Save(context.TODO(), h2, rd) if err != nil { t.Fatal(err) } @@ -274,11 +257,9 @@ func TestModifiedIndex(t *testing.T) { var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz") func TestDuplicatePacksInIndex(t *testing.T) { - repodir, cleanup := test.Env(t, checkerDuplicateIndexTestData) + repo, _, cleanup := repository.TestFromFixture(t, checkerDuplicateIndexTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) if len(hints) == 0 { @@ -336,44 +317,91 @@ func induceError(data []byte) { data[pos] ^= 1 } +// errorOnceBackend randomly modifies data when reading a file for the first time. +type errorOnceBackend struct { + backend.Backend + m sync.Map +} + +func (b *errorOnceBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) error { + _, isRetry := b.m.LoadOrStore(h, struct{}{}) + return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error { + if !isRetry && h.Type != restic.ConfigFile { + return consumer(errorReadCloser{rd}) + } + return consumer(rd) + }) +} + func TestCheckerModifiedData(t *testing.T) { - repo := repository.TestRepository(t) + repo, _, be := repository.TestRepositoryWithVersion(t, 0) sn := archiver.TestSnapshot(t, repo, ".", nil) t.Logf("archived as %v", sn.ID().Str()) - beError := &errorBackend{Backend: repo.Backend()} - checkRepo, err := repository.New(beError, repository.Options{}) - test.OK(t, err) - test.OK(t, checkRepo.SearchKey(context.TODO(), test.TestPassword, 5, "")) - - chkr := checker.New(checkRepo, false) - - hints, errs := chkr.LoadIndex(context.TODO(), nil) - if len(errs) > 0 { - t.Fatalf("expected no errors, got %v: %v", len(errs), errs) - } - - if len(hints) > 0 { - t.Errorf("expected no hints, got %v: %v", len(hints), hints) - } - - beError.ProduceErrors = true - errFound := false - for _, err := range checkPacks(chkr) { - t.Logf("pack error: %v", err) - } - - for _, err := range checkStruct(chkr) { - t.Logf("struct error: %v", err) - } - - for _, err := range checkData(chkr) { - t.Logf("data error: %v", err) - errFound = true - } - - if !errFound { - t.Fatal("no error found, checker is broken") + errBe := &errorBackend{Backend: be} + + for _, test := range []struct { + name string + be backend.Backend + damage func() + check func(t *testing.T, err error) + }{ + { + "errorBackend", + errBe, + func() { + errBe.ProduceErrors = true + }, + func(t *testing.T, err error) { + if err == nil { + t.Fatal("no error found, checker is broken") + } + }, + }, + { + "errorOnceBackend", + &errorOnceBackend{Backend: be}, + func() {}, + func(t *testing.T, err error) { + if !strings.Contains(err.Error(), "check successful on second attempt, original error pack") { + t.Fatalf("wrong error found, got %v", err) + } + }, + }, + } { + t.Run(test.name, func(t *testing.T) { + checkRepo := repository.TestOpenBackend(t, test.be) + + chkr := checker.New(checkRepo, false) + + hints, errs := chkr.LoadIndex(context.TODO(), nil) + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } + + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } + + test.damage() + var err error + for _, err := range checkPacks(chkr) { + t.Logf("pack error: %v", err) + } + + for _, err := range checkStruct(chkr) { + t.Logf("struct error: %v", err) + } + + for _, cerr := range checkData(chkr) { + t.Logf("data error: %v", cerr) + if err == nil { + err = cerr + } + } + + test.check(t, err) + }) } } @@ -399,10 +427,8 @@ func (r *loadTreesOnceRepository) LoadTree(ctx context.Context, id restic.ID) (* } func TestCheckerNoDuplicateTreeDecodes(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, _, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - - repo := repository.TestOpenLocal(t, repodir) checkRepo := &loadTreesOnceRepository{ Repository: repo, loadedTrees: restic.NewIDSet(), @@ -435,11 +461,11 @@ func (r *delayRepository) LoadTree(ctx context.Context, id restic.ID) (*restic.T return restic.LoadTree(ctx, r.Repository, id) } -func (r *delayRepository) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, bool) { +func (r *delayRepository) LookupBlobSize(t restic.BlobType, id restic.ID) (uint, bool) { if id == r.DelayTree && t == restic.DataBlob { r.Unblock() } - return r.Repository.LookupBlobSize(id, t) + return r.Repository.LookupBlobSize(t, id) } func (r *delayRepository) Unblock() { @@ -456,7 +482,7 @@ func TestCheckerBlobTypeConfusion(t *testing.T) { damagedNode := &restic.Node{ Name: "damaged", - Type: "file", + Type: restic.NodeTypeFile, Mode: 0644, Size: 42, Content: restic.IDs{restic.TestParseID("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")}, @@ -481,14 +507,14 @@ func TestCheckerBlobTypeConfusion(t *testing.T) { malNode := &restic.Node{ Name: "aaaaa", - Type: "file", + Type: restic.NodeTypeFile, Mode: 0644, Size: uint64(len(buf)), Content: restic.IDs{id}, } dirNode := &restic.Node{ Name: "bbbbb", - Type: "dir", + Type: restic.NodeTypeDir, Mode: 0755, Subtree: &id, } @@ -549,9 +575,7 @@ func TestCheckerBlobTypeConfusion(t *testing.T) { } func loadBenchRepository(t *testing.B) (*checker.Checker, restic.Repository, func()) { - repodir, cleanup := test.Env(t, checkerTestData) - - repo := repository.TestOpenLocal(t, repodir) + repo, _, cleanup := repository.TestFromFixture(t, checkerTestData) chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) diff --git a/internal/checker/testing.go b/internal/checker/testing.go index fe1679393ba..d0014398ff7 100644 --- a/internal/checker/testing.go +++ b/internal/checker/testing.go @@ -8,7 +8,7 @@ import ( ) // TestCheckRepo runs the checker on repo. -func TestCheckRepo(t testing.TB, repo restic.Repository) { +func TestCheckRepo(t testing.TB, repo restic.Repository, skipStructure bool) { chkr := New(repo, true) hints, errs := chkr.LoadIndex(context.TODO(), nil) @@ -33,18 +33,23 @@ func TestCheckRepo(t testing.TB, repo restic.Repository) { t.Error(err) } - // structure - errChan = make(chan error) - go chkr.Structure(context.TODO(), nil, errChan) - - for err := range errChan { - t.Error(err) - } - - // unused blobs - blobs := chkr.UnusedBlobs(context.TODO()) - if len(blobs) > 0 { - t.Errorf("unused blobs found: %v", blobs) + if !skipStructure { + // structure + errChan = make(chan error) + go chkr.Structure(context.TODO(), nil, errChan) + + for err := range errChan { + t.Error(err) + } + + // unused blobs + blobs, err := chkr.UnusedBlobs(context.TODO()) + if err != nil { + t.Error(err) + } + if len(blobs) > 0 { + t.Errorf("unused blobs found: %v", blobs) + } } // read data diff --git a/internal/crypto/crypto.go b/internal/crypto/crypto.go index 0f9179207d1..d7ac9c3d4a8 100644 --- a/internal/crypto/crypto.go +++ b/internal/crypto/crypto.go @@ -27,7 +27,7 @@ const ( var ( // ErrUnauthenticated is returned when ciphertext verification has failed. - ErrUnauthenticated = errors.New("ciphertext verification failed") + ErrUnauthenticated = fmt.Errorf("ciphertext verification failed") ) // Key holds encryption and message authentication keys for a repository. It is stored @@ -299,7 +299,7 @@ func (k *Key) Open(dst, nonce, ciphertext, _ []byte) ([]byte, error) { // check for plausible length if len(ciphertext) < k.Overhead() { - return nil, errors.Errorf("trying to decrypt invalid data: ciphertext too small") + return nil, errors.Errorf("trying to decrypt invalid data: ciphertext too short") } l := len(ciphertext) - macSize diff --git a/internal/debug/debug.go b/internal/debug/debug.go index 62c145e1a03..a09d6e74a3a 100644 --- a/internal/debug/debug.go +++ b/internal/debug/debug.go @@ -8,8 +8,6 @@ import ( "path/filepath" "runtime" "strings" - - "github.com/restic/restic/internal/fs" ) var opts struct { @@ -46,7 +44,7 @@ func initDebugLogger() { fmt.Fprintf(os.Stderr, "debug log file %v\n", debugfile) - f, err := fs.OpenFile(debugfile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + f, err := os.OpenFile(debugfile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) if err != nil { fmt.Fprintf(os.Stderr, "unable to open debug log file: %v\n", err) os.Exit(2) @@ -122,7 +120,7 @@ func goroutineNum() int { runtime.Stack(b, false) var num int - fmt.Sscanf(string(b), "goroutine %d ", &num) + _, _ = fmt.Sscanf(string(b), "goroutine %d ", &num) return num } diff --git a/internal/debug/round_tripper.go b/internal/debug/round_tripper.go index 9dced95c62d..4afab729897 100644 --- a/internal/debug/round_tripper.go +++ b/internal/debug/round_tripper.go @@ -42,7 +42,7 @@ func (rd *eofDetectReader) Close() error { msg += fmt.Sprintf(", body: %q", buf) } - fmt.Fprintln(os.Stderr, msg) + _, _ = fmt.Fprintln(os.Stderr, msg) Log("%s: %+v", msg, errors.New("Close()")) } return rd.rd.Close() diff --git a/internal/dump/common.go b/internal/dump/common.go index 0163288357d..b4741302e38 100644 --- a/internal/dump/common.go +++ b/internal/dump/common.go @@ -6,9 +6,9 @@ import ( "path" "github.com/restic/restic/internal/bloblru" - "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/walker" + "golang.org/x/sync/errgroup" ) // A Dumper writes trees and files from a repository to a Writer @@ -16,11 +16,11 @@ import ( type Dumper struct { cache *bloblru.Cache format string - repo restic.BlobLoader + repo restic.Loader w io.Writer } -func New(format string, repo restic.BlobLoader, w io.Writer) *Dumper { +func New(format string, repo restic.Loader, w io.Writer) *Dumper { return &Dumper{ cache: bloblru.New(64 << 20), format: format, @@ -66,7 +66,7 @@ func sendNodes(ctx context.Context, repo restic.BlobLoader, root *restic.Node, c } // If this is no directory we are finished - if !IsDir(root) { + if root.Type != restic.NodeTypeDir { return nil } @@ -80,7 +80,7 @@ func sendNodes(ctx context.Context, repo restic.BlobLoader, root *restic.Node, c node.Path = path.Join(root.Path, nodepath) - if !IsFile(node) && !IsDir(node) && !IsLink(node) { + if node.Type != restic.NodeTypeFile && node.Type != restic.NodeTypeDir && node.Type != restic.NodeTypeSymlink { return nil } @@ -103,40 +103,50 @@ func (d *Dumper) WriteNode(ctx context.Context, node *restic.Node) error { } func (d *Dumper) writeNode(ctx context.Context, w io.Writer, node *restic.Node) error { - var ( - buf []byte - err error - ) - for _, id := range node.Content { - blob, ok := d.cache.Get(id) - if !ok { - blob, err = d.repo.LoadBlob(ctx, restic.DataBlob, id, buf) - if err != nil { - return err + wg, ctx := errgroup.WithContext(ctx) + limit := d.repo.Connections() - 1 // See below for the -1. + blobs := make(chan (<-chan []byte), limit) + + wg.Go(func() error { + for ch := range blobs { + select { + case <-ctx.Done(): + return ctx.Err() + case blob := <-ch: + if _, err := w.Write(blob); err != nil { + return err + } } - - buf = d.cache.Add(id, blob) // Reuse evicted buffer. } + return nil + }) - if _, err := w.Write(blob); err != nil { - return errors.Wrap(err, "Write") - } - } + // Start short-lived goroutines to load blobs. + // There will be at most 1+cap(blobs) calling LoadBlob at any moment. +loop: + for _, id := range node.Content { + // This needs to be buffered, so that loaders can quit + // without waiting for the writer. + ch := make(chan []byte, 1) - return nil -} + wg.Go(func() error { + blob, err := d.cache.GetOrCompute(id, func() ([]byte, error) { + return d.repo.LoadBlob(ctx, restic.DataBlob, id, nil) + }) -// IsDir checks if the given node is a directory. -func IsDir(node *restic.Node) bool { - return node.Type == "dir" -} + if err == nil { + ch <- blob + } + return err + }) -// IsLink checks if the given node as a link. -func IsLink(node *restic.Node) bool { - return node.Type == "symlink" -} + select { + case blobs <- ch: + case <-ctx.Done(): + break loop + } + } -// IsFile checks if the given node is a file. -func IsFile(node *restic.Node) bool { - return node.Type == "file" + close(blobs) + return wg.Wait() } diff --git a/internal/dump/common_test.go b/internal/dump/common_test.go index 3ee9112af34..afd19df6372 100644 --- a/internal/dump/common_test.go +++ b/internal/dump/common_test.go @@ -78,7 +78,7 @@ func WriteTest(t *testing.T, format string, cd CheckDump) { back := rtest.Chdir(t, tmpdir) defer back() - sn, _, err := arch.Snapshot(ctx, []string{"."}, archiver.SnapshotOptions{}) + sn, _, _, err := arch.Snapshot(ctx, []string{"."}, archiver.SnapshotOptions{}) rtest.OK(t, err) tree, err := restic.LoadTree(ctx, repo, *sn.Tree) diff --git a/internal/dump/tar.go b/internal/dump/tar.go index e8f34deb15a..c5933d4f8ac 100644 --- a/internal/dump/tar.go +++ b/internal/dump/tar.go @@ -79,16 +79,16 @@ func (d *Dumper) dumpNodeTar(ctx context.Context, node *restic.Node, w *tar.Writ header.Mode |= cISVTX } - if IsFile(node) { + if node.Type == restic.NodeTypeFile { header.Typeflag = tar.TypeReg } - if IsLink(node) { + if node.Type == restic.NodeTypeSymlink { header.Typeflag = tar.TypeSymlink header.Linkname = node.LinkTarget } - if IsDir(node) { + if node.Type == restic.NodeTypeDir { header.Typeflag = tar.TypeDir header.Name += "/" } diff --git a/internal/dump/tar_test.go b/internal/dump/tar_test.go index 3556e6aeb30..cb3cb08c438 100644 --- a/internal/dump/tar_test.go +++ b/internal/dump/tar_test.go @@ -13,7 +13,6 @@ import ( "testing" "time" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -83,7 +82,7 @@ func checkTar(t *testing.T, testDir string, srcTar *bytes.Buffer) error { return fmt.Errorf("foldernames must end with separator got %v", hdr.Name) } case tar.TypeSymlink: - target, err := fs.Readlink(matchPath) + target, err := os.Readlink(matchPath) if err != nil { return err } @@ -124,7 +123,7 @@ func TestFieldTooLong(t *testing.T) { node := restic.Node{ Name: "file_with_xattr", Path: "/file_with_xattr", - Type: "file", + Type: restic.NodeTypeFile, Mode: 0644, ExtendedAttributes: []restic.ExtendedAttribute{ { diff --git a/internal/dump/zip.go b/internal/dump/zip.go index e5ef5c95b78..17aeb4829a1 100644 --- a/internal/dump/zip.go +++ b/internal/dump/zip.go @@ -39,8 +39,11 @@ func (d *Dumper) dumpNodeZip(ctx context.Context, node *restic.Node, zw *zip.Wri Modified: node.ModTime, } header.SetMode(node.Mode) + if node.Type == restic.NodeTypeFile { + header.Method = zip.Deflate + } - if IsDir(node) { + if node.Type == restic.NodeTypeDir { header.Name += "/" } @@ -49,7 +52,7 @@ func (d *Dumper) dumpNodeZip(ctx context.Context, node *restic.Node, zw *zip.Wri return errors.Wrap(err, "ZipHeader") } - if IsLink(node) { + if node.Type == restic.NodeTypeSymlink { if _, err = w.Write([]byte(node.LinkTarget)); err != nil { return errors.Wrap(err, "Write") } diff --git a/internal/dump/zip_test.go b/internal/dump/zip_test.go index 0c304d3da56..c6eb0420616 100644 --- a/internal/dump/zip_test.go +++ b/internal/dump/zip_test.go @@ -9,8 +9,6 @@ import ( "strings" "testing" "time" - - "github.com/restic/restic/internal/fs" ) func TestWriteZip(t *testing.T) { @@ -91,7 +89,7 @@ func checkZip(t *testing.T, testDir string, srcZip *bytes.Buffer) error { return fmt.Errorf("foldernames must end with separator got %v", f.Name) } case f.Mode()&os.ModeSymlink != 0: - target, err := fs.Readlink(matchPath) + target, err := os.Readlink(matchPath) if err != nil { return err } @@ -103,6 +101,9 @@ func checkZip(t *testing.T, testDir string, srcZip *bytes.Buffer) error { return fmt.Errorf("symlink target does not match, got %s want %s", string(linkName), target) } default: + if f.Method != zip.Deflate { + return fmt.Errorf("expected compression method got %v want %v", f.Method, zip.Deflate) + } if uint64(match.Size()) != f.UncompressedSize64 { return fmt.Errorf("size does not match got %v want %v", f.UncompressedSize64, match.Size()) } diff --git a/internal/errors/errors.go b/internal/errors/errors.go index 0327ea0da40..96e5b82bb3c 100644 --- a/internal/errors/errors.go +++ b/internal/errors/errors.go @@ -22,12 +22,24 @@ var Wrap = errors.Wrap // nil, Wrapf returns nil. var Wrapf = errors.Wrapf +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. var WithStack = errors.WithStack // Go 1.13-style error handling. +// As finds the first error in err's tree that matches target, and if one is found, +// sets target to that error value and returns true. Otherwise, it returns false. func As(err error, tgt interface{}) bool { return stderrors.As(err, tgt) } +// Is reports whether any error in err's tree matches target. func Is(x, y error) bool { return stderrors.Is(x, y) } +func Join(errs ...error) error { return stderrors.Join(errs...) } + +// Unwrap returns the result of calling the Unwrap method on err, if err's type contains +// an Unwrap method returning error. Otherwise, Unwrap returns nil. +// +// Unwrap only calls a method of the form "Unwrap() error". In particular Unwrap does not +// unwrap errors returned by [Join]. func Unwrap(err error) error { return stderrors.Unwrap(err) } diff --git a/internal/feature/features.go b/internal/feature/features.go new file mode 100644 index 00000000000..e3b625e928b --- /dev/null +++ b/internal/feature/features.go @@ -0,0 +1,140 @@ +package feature + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +type state string +type FlagName string + +const ( + // Alpha features are disabled by default. They do not guarantee any backwards compatibility and may change in arbitrary ways between restic versions. + Alpha state = "alpha" + // Beta features are enabled by default. They may still change, but incompatible changes should be avoided. + Beta state = "beta" + // Stable features are always enabled + Stable state = "stable" + // Deprecated features are always disabled + Deprecated state = "deprecated" +) + +type FlagDesc struct { + Type state + Description string +} + +type FlagSet struct { + flags map[FlagName]*FlagDesc + enabled map[FlagName]bool +} + +func New() *FlagSet { + return &FlagSet{} +} + +func getDefault(phase state) bool { + switch phase { + case Alpha, Deprecated: + return false + case Beta, Stable: + return true + default: + panic("unknown feature phase") + } +} + +func (f *FlagSet) SetFlags(flags map[FlagName]FlagDesc) { + f.flags = map[FlagName]*FlagDesc{} + f.enabled = map[FlagName]bool{} + + for name, flag := range flags { + fcopy := flag + f.flags[name] = &fcopy + f.enabled[name] = getDefault(fcopy.Type) + } +} + +func (f *FlagSet) Apply(flags string, logWarning func(string)) error { + if flags == "" { + return nil + } + + selection := make(map[string]bool) + + for _, flag := range strings.Split(flags, ",") { + parts := strings.SplitN(flag, "=", 2) + + name := parts[0] + value := "true" + if len(parts) == 2 { + value = parts[1] + } + + isEnabled, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("failed to parse value %q for feature flag %v: %w", value, name, err) + } + + selection[name] = isEnabled + } + + for name, value := range selection { + fname := FlagName(name) + flag := f.flags[fname] + if flag == nil { + return fmt.Errorf("unknown feature flag %q", name) + } + + switch flag.Type { + case Alpha, Beta: + f.enabled[fname] = value + case Stable: + logWarning(fmt.Sprintf("feature flag %q is always enabled and will be removed in a future release", fname)) + case Deprecated: + logWarning(fmt.Sprintf("feature flag %q is always disabled and will be removed in a future release", fname)) + default: + panic("unknown feature phase") + } + } + + return nil +} + +func (f *FlagSet) Enabled(name FlagName) bool { + isEnabled, ok := f.enabled[name] + if !ok { + panic(fmt.Sprintf("unknown feature flag %v", name)) + } + + return isEnabled +} + +// Help contains information about a feature. +type Help struct { + Name string + Type string + Default bool + Description string +} + +func (f *FlagSet) List() []Help { + var help []Help + + for name, flag := range f.flags { + help = append(help, Help{ + Name: string(name), + Type: string(flag.Type), + Default: getDefault(flag.Type), + Description: flag.Description, + }) + } + + sort.Slice(help, func(i, j int) bool { + return strings.Compare(help[i].Name, help[j].Name) < 0 + }) + + return help +} diff --git a/internal/feature/features_test.go b/internal/feature/features_test.go new file mode 100644 index 00000000000..f5d405fa7d4 --- /dev/null +++ b/internal/feature/features_test.go @@ -0,0 +1,151 @@ +package feature_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/restic/restic/internal/feature" + rtest "github.com/restic/restic/internal/test" +) + +var ( + alpha = feature.FlagName("alpha-feature") + beta = feature.FlagName("beta-feature") + stable = feature.FlagName("stable-feature") + deprecated = feature.FlagName("deprecated-feature") +) + +var testFlags = map[feature.FlagName]feature.FlagDesc{ + alpha: { + Type: feature.Alpha, + Description: "alpha", + }, + beta: { + Type: feature.Beta, + Description: "beta", + }, + stable: { + Type: feature.Stable, + Description: "stable", + }, + deprecated: { + Type: feature.Deprecated, + Description: "deprecated", + }, +} + +func buildTestFlagSet() *feature.FlagSet { + flags := feature.New() + flags.SetFlags(testFlags) + return flags +} + +func TestFeatureDefaults(t *testing.T) { + flags := buildTestFlagSet() + for _, exp := range []struct { + flag feature.FlagName + value bool + }{ + {alpha, false}, + {beta, true}, + {stable, true}, + {deprecated, false}, + } { + rtest.Assert(t, flags.Enabled(exp.flag) == exp.value, "expected flag %v to have value %v got %v", exp.flag, exp.value, flags.Enabled(exp.flag)) + } +} + +func panicIfCalled(msg string) { + panic(msg) +} + +func TestEmptyApply(t *testing.T) { + flags := buildTestFlagSet() + rtest.OK(t, flags.Apply("", panicIfCalled)) + + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") + rtest.Assert(t, flags.Enabled(beta), "expected beta feature to be enabled") +} + +func TestFeatureApply(t *testing.T) { + flags := buildTestFlagSet() + rtest.OK(t, flags.Apply(string(alpha), panicIfCalled)) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", alpha), panicIfCalled)) + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", alpha), panicIfCalled)) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled again") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", beta), panicIfCalled)) + rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") + + logMsg := "" + log := func(msg string) { + logMsg = msg + } + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", stable), log)) + rtest.Assert(t, flags.Enabled(stable), "expected stable feature to remain enabled") + rtest.Assert(t, strings.Contains(logMsg, string(stable)), "unexpected log message for stable flag: %v", logMsg) + + logMsg = "" + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", deprecated), log)) + rtest.Assert(t, !flags.Enabled(deprecated), "expected deprecated feature to remain disabled") + rtest.Assert(t, strings.Contains(logMsg, string(deprecated)), "unexpected log message for deprecated flag: %v", logMsg) +} + +func TestFeatureMultipleApply(t *testing.T) { + flags := buildTestFlagSet() + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true,%s=false", alpha, beta), panicIfCalled)) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") + rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") +} + +func TestFeatureApplyInvalid(t *testing.T) { + flags := buildTestFlagSet() + + err := flags.Apply("invalid-flag", panicIfCalled) + rtest.Assert(t, err != nil && strings.Contains(err.Error(), "unknown feature flag"), "expected unknown feature flag error, got: %v", err) + + err = flags.Apply(fmt.Sprintf("%v=invalid", alpha), panicIfCalled) + rtest.Assert(t, err != nil && strings.Contains(err.Error(), "failed to parse value"), "expected parsing error, got: %v", err) +} + +func assertPanic(t *testing.T) { + if r := recover(); r == nil { + t.Fatal("should have panicked") + } +} + +func TestFeatureQueryInvalid(t *testing.T) { + defer assertPanic(t) + + flags := buildTestFlagSet() + flags.Enabled("invalid-flag") +} + +func TestFeatureSetInvalidPhase(t *testing.T) { + defer assertPanic(t) + + flags := feature.New() + flags.SetFlags(map[feature.FlagName]feature.FlagDesc{ + "invalid": { + Type: "invalid", + }, + }) +} + +func TestFeatureList(t *testing.T) { + flags := buildTestFlagSet() + + rtest.Equals(t, []feature.Help{ + {string(alpha), string(feature.Alpha), false, "alpha"}, + {string(beta), string(feature.Beta), true, "beta"}, + {string(deprecated), string(feature.Deprecated), false, "deprecated"}, + {string(stable), string(feature.Stable), true, "stable"}, + }, flags.List()) +} diff --git a/internal/feature/registry.go b/internal/feature/registry.go new file mode 100644 index 00000000000..0773ea13650 --- /dev/null +++ b/internal/feature/registry.go @@ -0,0 +1,21 @@ +package feature + +// Flag is named such that checking for a feature uses `feature.Flag.Enabled(feature.ExampleFeature)`. +var Flag = New() + +// flag names are written in kebab-case +const ( + BackendErrorRedesign FlagName = "backend-error-redesign" + DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" + ExplicitS3AnonymousAuth FlagName = "explicit-s3-anonymous-auth" + SafeForgetKeepTags FlagName = "safe-forget-keep-tags" +) + +func init() { + Flag.SetFlags(map[FlagName]FlagDesc{ + BackendErrorRedesign: {Type: Beta, Description: "enforce timeouts for stuck HTTP requests and use new backend error handling design."}, + DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, + ExplicitS3AnonymousAuth: {Type: Stable, Description: "forbid anonymous S3 authentication unless `-o s3.unsafe-anonymous-auth=true` is set"}, + SafeForgetKeepTags: {Type: Stable, Description: "prevent deleting all snapshots if the tag passed to `forget --keep-tags tagname` does not exist"}, + }) +} diff --git a/internal/feature/testing.go b/internal/feature/testing.go new file mode 100644 index 00000000000..b796e89b5b9 --- /dev/null +++ b/internal/feature/testing.go @@ -0,0 +1,33 @@ +package feature + +import ( + "fmt" + "testing" +) + +// TestSetFlag temporarily sets a feature flag to the given value until the +// returned function is called. +// +// Usage +// ``` +// defer TestSetFlag(t, features.Flags, features.ExampleFlag, true)() +// ``` +func TestSetFlag(t *testing.T, f *FlagSet, flag FlagName, value bool) func() { + current := f.Enabled(flag) + + panicIfCalled := func(msg string) { + panic(msg) + } + + if err := f.Apply(fmt.Sprintf("%s=%v", flag, value), panicIfCalled); err != nil { + // not reachable + panic(err) + } + + return func() { + if err := f.Apply(fmt.Sprintf("%s=%v", flag, current), panicIfCalled); err != nil { + // not reachable + panic(err) + } + } +} diff --git a/internal/feature/testing_test.go b/internal/feature/testing_test.go new file mode 100644 index 00000000000..f11b4bae40d --- /dev/null +++ b/internal/feature/testing_test.go @@ -0,0 +1,19 @@ +package feature_test + +import ( + "testing" + + "github.com/restic/restic/internal/feature" + rtest "github.com/restic/restic/internal/test" +) + +func TestSetFeatureFlag(t *testing.T) { + flags := buildTestFlagSet() + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") + + restore := feature.TestSetFlag(t, flags, alpha, true) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") + + restore() + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled again") +} diff --git a/internal/filter/exclude.go b/internal/filter/exclude.go new file mode 100644 index 00000000000..48ecdfddf77 --- /dev/null +++ b/internal/filter/exclude.go @@ -0,0 +1,162 @@ +package filter + +import ( + "bufio" + "bytes" + "fmt" + "os" + "strings" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/textfile" + "github.com/spf13/pflag" +) + +// RejectByNameFunc is a function that takes a filename of a +// file that would be included in the backup. The function returns true if it +// should be excluded (rejected) from the backup. +type RejectByNameFunc func(path string) bool + +// RejectByPattern returns a RejectByNameFunc which rejects files that match +// one of the patterns. +func RejectByPattern(patterns []string, warnf func(msg string, args ...interface{})) RejectByNameFunc { + parsedPatterns := ParsePatterns(patterns) + return func(item string) bool { + matched, err := List(parsedPatterns, item) + if err != nil { + warnf("error for exclude pattern: %v", err) + } + + if matched { + debug.Log("path %q excluded by an exclude pattern", item) + return true + } + + return false + } +} + +// RejectByInsensitivePattern is like RejectByPattern but case insensitive. +func RejectByInsensitivePattern(patterns []string, warnf func(msg string, args ...interface{})) RejectByNameFunc { + for index, path := range patterns { + patterns[index] = strings.ToLower(path) + } + + rejFunc := RejectByPattern(patterns, warnf) + return func(item string) bool { + return rejFunc(strings.ToLower(item)) + } +} + +// readPatternsFromFiles reads all files and returns the list of +// patterns. For each line, leading and trailing white space is removed +// and comment lines are ignored. For each remaining pattern, environment +// variables are resolved. For adding a literal dollar sign ($), write $$ to +// the file. +func readPatternsFromFiles(files []string) ([]string, error) { + getenvOrDollar := func(s string) string { + if s == "$" { + return "$" + } + return os.Getenv(s) + } + + var patterns []string + for _, filename := range files { + err := func() (err error) { + data, err := textfile.Read(filename) + if err != nil { + return err + } + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + // ignore empty lines + if line == "" { + continue + } + + // strip comments + if strings.HasPrefix(line, "#") { + continue + } + + line = os.Expand(line, getenvOrDollar) + patterns = append(patterns, line) + } + return scanner.Err() + }() + if err != nil { + return nil, fmt.Errorf("failed to read patterns from file %q: %w", filename, err) + } + } + return patterns, nil +} + +type ExcludePatternOptions struct { + Excludes []string + InsensitiveExcludes []string + ExcludeFiles []string + InsensitiveExcludeFiles []string +} + +func (opts *ExcludePatternOptions) Add(f *pflag.FlagSet) { + f.StringArrayVarP(&opts.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)") + f.StringArrayVar(&opts.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames") + f.StringArrayVar(&opts.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)") + f.StringArrayVar(&opts.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns") +} + +func (opts *ExcludePatternOptions) Empty() bool { + return len(opts.Excludes) == 0 && len(opts.InsensitiveExcludes) == 0 && len(opts.ExcludeFiles) == 0 && len(opts.InsensitiveExcludeFiles) == 0 +} + +func (opts ExcludePatternOptions) CollectPatterns(warnf func(msg string, args ...interface{})) ([]RejectByNameFunc, error) { + var fs []RejectByNameFunc + // add patterns from file + if len(opts.ExcludeFiles) > 0 { + excludePatterns, err := readPatternsFromFiles(opts.ExcludeFiles) + if err != nil { + return nil, err + } + + if err := ValidatePatterns(excludePatterns); err != nil { + return nil, errors.Fatalf("--exclude-file: %s", err) + } + + opts.Excludes = append(opts.Excludes, excludePatterns...) + } + + if len(opts.InsensitiveExcludeFiles) > 0 { + excludes, err := readPatternsFromFiles(opts.InsensitiveExcludeFiles) + if err != nil { + return nil, err + } + + if err := ValidatePatterns(excludes); err != nil { + return nil, errors.Fatalf("--iexclude-file: %s", err) + } + + opts.InsensitiveExcludes = append(opts.InsensitiveExcludes, excludes...) + } + + if len(opts.InsensitiveExcludes) > 0 { + if err := ValidatePatterns(opts.InsensitiveExcludes); err != nil { + return nil, errors.Fatalf("--iexclude: %s", err) + } + + fs = append(fs, RejectByInsensitivePattern(opts.InsensitiveExcludes, warnf)) + } + + if len(opts.Excludes) > 0 { + if err := ValidatePatterns(opts.Excludes); err != nil { + return nil, errors.Fatalf("--exclude: %s", err) + } + + fs = append(fs, RejectByPattern(opts.Excludes, warnf)) + } + return fs, nil +} diff --git a/internal/filter/exclude_test.go b/internal/filter/exclude_test.go new file mode 100644 index 00000000000..738fb216d18 --- /dev/null +++ b/internal/filter/exclude_test.go @@ -0,0 +1,59 @@ +package filter + +import ( + "testing" +) + +func TestRejectByPattern(t *testing.T) { + var tests = []struct { + filename string + reject bool + }{ + {filename: "/home/user/foo.go", reject: true}, + {filename: "/home/user/foo.c", reject: false}, + {filename: "/home/user/foobar", reject: false}, + {filename: "/home/user/foobar/x", reject: true}, + {filename: "/home/user/README", reject: false}, + {filename: "/home/user/README.md", reject: true}, + } + + patterns := []string{"*.go", "README.md", "/home/user/foobar/*"} + + for _, tc := range tests { + t.Run("", func(t *testing.T) { + reject := RejectByPattern(patterns, nil) + res := reject(tc.filename) + if res != tc.reject { + t.Fatalf("wrong result for filename %v: want %v, got %v", + tc.filename, tc.reject, res) + } + }) + } +} + +func TestRejectByInsensitivePattern(t *testing.T) { + var tests = []struct { + filename string + reject bool + }{ + {filename: "/home/user/foo.GO", reject: true}, + {filename: "/home/user/foo.c", reject: false}, + {filename: "/home/user/foobar", reject: false}, + {filename: "/home/user/FOObar/x", reject: true}, + {filename: "/home/user/README", reject: false}, + {filename: "/home/user/readme.md", reject: true}, + } + + patterns := []string{"*.go", "README.md", "/home/user/foobar/*"} + + for _, tc := range tests { + t.Run("", func(t *testing.T) { + reject := RejectByInsensitivePattern(patterns, nil) + res := reject(tc.filename) + if res != tc.reject { + t.Fatalf("wrong result for filename %v: want %v, got %v", + tc.filename, tc.reject, res) + } + }) + } +} diff --git a/internal/filter/include.go b/internal/filter/include.go new file mode 100644 index 00000000000..87d5f12079b --- /dev/null +++ b/internal/filter/include.go @@ -0,0 +1,99 @@ +package filter + +import ( + "strings" + + "github.com/restic/restic/internal/errors" + "github.com/spf13/pflag" +) + +// IncludeByNameFunc is a function that takes a filename that should be included +// in the restore process and returns whether it should be included. +type IncludeByNameFunc func(item string) (matched bool, childMayMatch bool) + +type IncludePatternOptions struct { + Includes []string + InsensitiveIncludes []string + IncludeFiles []string + InsensitiveIncludeFiles []string +} + +func (opts *IncludePatternOptions) Add(f *pflag.FlagSet) { + f.StringArrayVarP(&opts.Includes, "include", "i", nil, "include a `pattern` (can be specified multiple times)") + f.StringArrayVar(&opts.InsensitiveIncludes, "iinclude", nil, "same as --include `pattern` but ignores the casing of filenames") + f.StringArrayVar(&opts.IncludeFiles, "include-file", nil, "read include patterns from a `file` (can be specified multiple times)") + f.StringArrayVar(&opts.InsensitiveIncludeFiles, "iinclude-file", nil, "same as --include-file but ignores casing of `file`names in patterns") +} + +func (opts IncludePatternOptions) CollectPatterns(warnf func(msg string, args ...interface{})) ([]IncludeByNameFunc, error) { + var fs []IncludeByNameFunc + if len(opts.IncludeFiles) > 0 { + includePatterns, err := readPatternsFromFiles(opts.IncludeFiles) + if err != nil { + return nil, err + } + + if err := ValidatePatterns(includePatterns); err != nil { + return nil, errors.Fatalf("--include-file: %s", err) + } + + opts.Includes = append(opts.Includes, includePatterns...) + } + + if len(opts.InsensitiveIncludeFiles) > 0 { + includePatterns, err := readPatternsFromFiles(opts.InsensitiveIncludeFiles) + if err != nil { + return nil, err + } + + if err := ValidatePatterns(includePatterns); err != nil { + return nil, errors.Fatalf("--iinclude-file: %s", err) + } + + opts.InsensitiveIncludes = append(opts.InsensitiveIncludes, includePatterns...) + } + + if len(opts.InsensitiveIncludes) > 0 { + if err := ValidatePatterns(opts.InsensitiveIncludes); err != nil { + return nil, errors.Fatalf("--iinclude: %s", err) + } + + fs = append(fs, IncludeByInsensitivePattern(opts.InsensitiveIncludes, warnf)) + } + + if len(opts.Includes) > 0 { + if err := ValidatePatterns(opts.Includes); err != nil { + return nil, errors.Fatalf("--include: %s", err) + } + + fs = append(fs, IncludeByPattern(opts.Includes, warnf)) + } + return fs, nil +} + +// IncludeByPattern returns a IncludeByNameFunc which includes files that match +// one of the patterns. +func IncludeByPattern(patterns []string, warnf func(msg string, args ...interface{})) IncludeByNameFunc { + parsedPatterns := ParsePatterns(patterns) + return func(item string) (matched bool, childMayMatch bool) { + matched, childMayMatch, err := ListWithChild(parsedPatterns, item) + if err != nil { + warnf("error for include pattern: %v", err) + } + + return matched, childMayMatch + } +} + +// IncludeByInsensitivePattern returns a IncludeByNameFunc which includes files that match +// one of the patterns, ignoring the casing of the filenames. +func IncludeByInsensitivePattern(patterns []string, warnf func(msg string, args ...interface{})) IncludeByNameFunc { + for index, path := range patterns { + patterns[index] = strings.ToLower(path) + } + + includeFunc := IncludeByPattern(patterns, warnf) + return func(item string) (matched bool, childMayMatch bool) { + return includeFunc(strings.ToLower(item)) + } +} diff --git a/internal/filter/include_test.go b/internal/filter/include_test.go new file mode 100644 index 00000000000..2f474622cdb --- /dev/null +++ b/internal/filter/include_test.go @@ -0,0 +1,59 @@ +package filter + +import ( + "testing" +) + +func TestIncludeByPattern(t *testing.T) { + var tests = []struct { + filename string + include bool + }{ + {filename: "/home/user/foo.go", include: true}, + {filename: "/home/user/foo.c", include: false}, + {filename: "/home/user/foobar", include: false}, + {filename: "/home/user/foobar/x", include: false}, + {filename: "/home/user/README", include: false}, + {filename: "/home/user/README.md", include: true}, + } + + patterns := []string{"*.go", "README.md"} + + for _, tc := range tests { + t.Run(tc.filename, func(t *testing.T) { + includeFunc := IncludeByPattern(patterns, nil) + matched, _ := includeFunc(tc.filename) + if matched != tc.include { + t.Fatalf("wrong result for filename %v: want %v, got %v", + tc.filename, tc.include, matched) + } + }) + } +} + +func TestIncludeByInsensitivePattern(t *testing.T) { + var tests = []struct { + filename string + include bool + }{ + {filename: "/home/user/foo.GO", include: true}, + {filename: "/home/user/foo.c", include: false}, + {filename: "/home/user/foobar", include: false}, + {filename: "/home/user/FOObar/x", include: false}, + {filename: "/home/user/README", include: false}, + {filename: "/home/user/readme.MD", include: true}, + } + + patterns := []string{"*.go", "README.md"} + + for _, tc := range tests { + t.Run(tc.filename, func(t *testing.T) { + includeFunc := IncludeByInsensitivePattern(patterns, nil) + matched, _ := includeFunc(tc.filename) + if matched != tc.include { + t.Fatalf("wrong result for filename %v: want %v, got %v", + tc.filename, tc.include, matched) + } + }) + } +} diff --git a/internal/fs/const_unix.go b/internal/fs/const_unix.go index fe84cda176d..e570c255370 100644 --- a/internal/fs/const_unix.go +++ b/internal/fs/const_unix.go @@ -7,3 +7,6 @@ import "syscall" // O_NOFOLLOW instructs the kernel to not follow symlinks when opening a file. const O_NOFOLLOW int = syscall.O_NOFOLLOW + +// O_DIRECTORY instructs the kernel to only open directories. +const O_DIRECTORY int = syscall.O_DIRECTORY diff --git a/internal/fs/const_windows.go b/internal/fs/const_windows.go index f1b263a54a4..b2b1bab86b6 100644 --- a/internal/fs/const_windows.go +++ b/internal/fs/const_windows.go @@ -3,5 +3,12 @@ package fs -// O_NOFOLLOW is a noop on Windows. -const O_NOFOLLOW int = 0 +// TODO honor flags when opening files + +// O_NOFOLLOW is currently only interpreted by FS.OpenFile in metadataOnly mode and ignored by OpenFile. +// The value of the constant is invented and only for use within this fs package. It must not be used in other contexts. +// It must not conflict with the other O_* values from go/src/syscall/types_windows.go +const O_NOFOLLOW int = 0x40000000 + +// O_DIRECTORY is a noop on Windows. +const O_DIRECTORY int = 0 diff --git a/internal/fs/deviceid_unix.go b/internal/fs/deviceid_unix.go deleted file mode 100644 index c366221ab29..00000000000 --- a/internal/fs/deviceid_unix.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build !windows -// +build !windows - -package fs - -import ( - "os" - "syscall" - - "github.com/restic/restic/internal/errors" -) - -// DeviceID extracts the device ID from an os.FileInfo object by casting it -// to syscall.Stat_t -func DeviceID(fi os.FileInfo) (deviceID uint64, err error) { - if fi == nil { - return 0, errors.New("unable to determine device: fi is nil") - } - - if fi.Sys() == nil { - return 0, errors.New("unable to determine device: fi.Sys() is nil") - } - - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - // st.Dev is uint32 on Darwin and uint64 on Linux. Just cast - // everything to uint64. - return uint64(st.Dev), nil - } - - return 0, errors.New("Could not cast to syscall.Stat_t") -} diff --git a/internal/fs/deviceid_windows.go b/internal/fs/deviceid_windows.go deleted file mode 100644 index 42355817d5f..00000000000 --- a/internal/fs/deviceid_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build windows -// +build windows - -package fs - -import ( - "os" - - "github.com/restic/restic/internal/errors" -) - -// DeviceID extracts the device ID from an os.FileInfo object by casting it -// to syscall.Stat_t -func DeviceID(fi os.FileInfo) (deviceID uint64, err error) { - return 0, errors.New("Device IDs are not supported on Windows") -} diff --git a/internal/fs/ea_windows.go b/internal/fs/ea_windows.go new file mode 100644 index 00000000000..27c2fcdf151 --- /dev/null +++ b/internal/fs/ea_windows.go @@ -0,0 +1,177 @@ +//go:build windows +// +build windows + +package fs + +import ( + "fmt" + "syscall" + "unsafe" + + "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" +) + +// extendedAttribute is a type alias for winio.ExtendedAttribute +type extendedAttribute = winio.ExtendedAttribute + +// encodeExtendedAttributes encodes the extended attributes to a byte slice. +func encodeExtendedAttributes(attrs []extendedAttribute) ([]byte, error) { + return winio.EncodeExtendedAttributes(attrs) +} + +// decodeExtendedAttributes decodes the extended attributes from a byte slice. +func decodeExtendedAttributes(data []byte) ([]extendedAttribute, error) { + return winio.DecodeExtendedAttributes(data) +} + +// The code below was copied over from https://github.com/microsoft/go-winio/blob/main/pipe.go under MIT license. + +// The MIT License (MIT) + +// Copyright (c) 2015 Microsoft + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +type ntStatus int32 + +func (status ntStatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} + +// The code below was copied over from https://github.com/microsoft/go-winio/blob/main/zsyscall_windows.go under MIT license. + +// ioStatusBlock represents the IO_STATUS_BLOCK struct defined here: +// https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/ns-wdm-_io_status_block +type ioStatusBlock struct { + Status, Information uintptr +} + +var ( + modntdll = windows.NewLazySystemDLL("ntdll.dll") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") +) + +func rtlNtStatusToDosError(status ntStatus) (winerr error) { + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status)) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +// The code below was adapted from https://github.com/ambarve/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/ea.go +// under MIT license. + +var ( + procNtQueryEaFile = modntdll.NewProc("NtQueryEaFile") + procNtSetEaFile = modntdll.NewProc("NtSetEaFile") +) + +const ( + // STATUS_NO_EAS_ON_FILE is a constant value which indicates EAs were requested for the file but it has no EAs. + // Windows NTSTATUS value: STATUS_NO_EAS_ON_FILE=0xC0000052 + STATUS_NO_EAS_ON_FILE = -1073741742 +) + +// fgetEA retrieves the extended attributes for the file represented by `handle`. The +// `handle` must have been opened with file access flag FILE_READ_EA (0x8). +// The extended file attribute names in windows are case-insensitive and when fetching +// the attributes the names are generally returned in UPPER case. +func fgetEA(handle windows.Handle) ([]extendedAttribute, error) { + // default buffer size to start with + bufLen := 1024 + buf := make([]byte, bufLen) + var iosb ioStatusBlock + // keep increasing the buffer size until it is large enough + for { + status := getFileEA(handle, &iosb, &buf[0], uint32(bufLen), false, 0, 0, nil, true) + + if status == STATUS_NO_EAS_ON_FILE { + //If status is -1073741742, no extended attributes were found + return nil, nil + } + err := status.Err() + if err != nil { + // convert ntstatus code to windows error + if err == windows.ERROR_INSUFFICIENT_BUFFER || err == windows.ERROR_MORE_DATA { + bufLen *= 2 + buf = make([]byte, bufLen) + continue + } + return nil, fmt.Errorf("get file EA failed with: %w", err) + } + break + } + return decodeExtendedAttributes(buf) +} + +// fsetEA sets the extended attributes for the file represented by `handle`. The +// handle must have been opened with the file access flag FILE_WRITE_EA(0x10). +func fsetEA(handle windows.Handle, attrs []extendedAttribute) error { + encodedEA, err := encodeExtendedAttributes(attrs) + if err != nil { + return fmt.Errorf("failed to encoded extended attributes: %w", err) + } + + var iosb ioStatusBlock + + return setFileEA(handle, &iosb, &encodedEA[0], uint32(len(encodedEA))).Err() +} + +// The code below was adapted from https://github.com/ambarve/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/zsyscall_windows.go +// under MIT license. + +func getFileEA(handle windows.Handle, iosb *ioStatusBlock, buf *uint8, bufLen uint32, returnSingleEntry bool, eaList uintptr, eaListLen uint32, eaIndex *uint32, restartScan bool) (status ntStatus) { + var _p0 uint32 + if returnSingleEntry { + _p0 = 1 + } + var _p1 uint32 + if restartScan { + _p1 = 1 + } + r0, _, _ := syscall.SyscallN(procNtQueryEaFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(buf)), uintptr(bufLen), uintptr(_p0), uintptr(eaList), uintptr(eaListLen), uintptr(unsafe.Pointer(eaIndex)), uintptr(_p1)) + status = ntStatus(r0) + return +} + +func setFileEA(handle windows.Handle, iosb *ioStatusBlock, buf *uint8, bufLen uint32) (status ntStatus) { + r0, _, _ := syscall.SyscallN(procNtSetEaFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(buf)), uintptr(bufLen)) + status = ntStatus(r0) + return +} + +// pathSupportsExtendedAttributes returns true if the path supports extended attributes. +func pathSupportsExtendedAttributes(path string) (supported bool, err error) { + var fileSystemFlags uint32 + utf16Path, err := windows.UTF16PtrFromString(path) + if err != nil { + return false, err + } + err = windows.GetVolumeInformation(utf16Path, nil, 0, nil, nil, &fileSystemFlags, nil, 0) + if err != nil { + return false, err + } + supported = (fileSystemFlags & windows.FILE_SUPPORTS_EXTENDED_ATTRIBUTES) != 0 + return supported, nil +} diff --git a/internal/fs/ea_windows_test.go b/internal/fs/ea_windows_test.go new file mode 100644 index 00000000000..00cbe97f816 --- /dev/null +++ b/internal/fs/ea_windows_test.go @@ -0,0 +1,279 @@ +//go:build windows +// +build windows + +package fs + +import ( + "crypto/rand" + "fmt" + "math/big" + "os" + "path/filepath" + "reflect" + "syscall" + "testing" + "unsafe" + + "golang.org/x/sys/windows" +) + +// The code below was adapted from github.com/Microsoft/go-winio under MIT license. + +// The MIT License (MIT) + +// Copyright (c) 2015 Microsoft + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// The code below was adapted from https://github.com/ambarve/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/ea_test.go +// under MIT license. + +var ( + testEas = []extendedAttribute{ + {Name: "foo", Value: []byte("bar")}, + {Name: "fizz", Value: []byte("buzz")}, + } + + testEasEncoded = []byte{16, 0, 0, 0, 0, 3, 3, 0, 102, 111, 111, 0, 98, 97, 114, 0, 0, + 0, 0, 0, 0, 4, 4, 0, 102, 105, 122, 122, 0, 98, 117, 122, 122, 0, 0, 0} + testEasNotPadded = testEasEncoded[0 : len(testEasEncoded)-3] + testEasTruncated = testEasEncoded[0:20] +) + +func TestRoundTripEas(t *testing.T) { + b, err := encodeExtendedAttributes(testEas) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEasEncoded, b) { + t.Fatalf("Encoded mismatch %v %v", testEasEncoded, b) + } + eas, err := decodeExtendedAttributes(b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEas, eas) { + t.Fatalf("mismatch %+v %+v", testEas, eas) + } +} + +func TestEasDontNeedPaddingAtEnd(t *testing.T) { + eas, err := decodeExtendedAttributes(testEasNotPadded) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEas, eas) { + t.Fatalf("mismatch %+v %+v", testEas, eas) + } +} + +func TestTruncatedEasFailCorrectly(t *testing.T) { + _, err := decodeExtendedAttributes(testEasTruncated) + if err == nil { + t.Fatal("expected error") + } +} + +func TestNilEasEncodeAndDecodeAsNil(t *testing.T) { + b, err := encodeExtendedAttributes(nil) + if err != nil { + t.Fatal(err) + } + if len(b) != 0 { + t.Fatal("expected empty") + } + eas, err := decodeExtendedAttributes(nil) + if err != nil { + t.Fatal(err) + } + if len(eas) != 0 { + t.Fatal("expected empty") + } +} + +// TestSetFileEa makes sure that the test buffer is actually parsable by NtSetEaFile. +func TestSetFileEa(t *testing.T) { + f, err := os.CreateTemp("", "testea") + if err != nil { + t.Fatal(err) + } + defer func() { + err := os.Remove(f.Name()) + if err != nil { + t.Logf("Error removing file %s: %v\n", f.Name(), err) + } + err = f.Close() + if err != nil { + t.Logf("Error closing file %s: %v\n", f.Name(), err) + } + }() + ntdll := syscall.MustLoadDLL("ntdll.dll") + ntSetEaFile := ntdll.MustFindProc("NtSetEaFile") + var iosb [2]uintptr + r, _, _ := ntSetEaFile.Call(f.Fd(), + uintptr(unsafe.Pointer(&iosb[0])), + uintptr(unsafe.Pointer(&testEasEncoded[0])), + uintptr(len(testEasEncoded))) + if r != 0 { + t.Fatalf("NtSetEaFile failed with %08x", r) + } +} + +// The code below was refactored from github.com/Microsoft/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/ea_test.go +// under MIT license. +func TestSetGetFileEA(t *testing.T) { + testFilePath, testFile := setupTestFile(t) + testEAs := generateTestEAs(t, 3, testFilePath) + fileHandle := openFile(t, testFilePath, windows.FILE_ATTRIBUTE_NORMAL) + defer testCloseFileHandle(t, testFilePath, testFile, fileHandle) + + testSetGetEA(t, testFilePath, fileHandle, testEAs) +} + +// The code is new code and reuses code refactored from github.com/Microsoft/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/ea_test.go +// under MIT license. +func TestSetGetFolderEA(t *testing.T) { + testFolderPath := setupTestFolder(t) + + testEAs := generateTestEAs(t, 3, testFolderPath) + fileHandle := openFile(t, testFolderPath, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS) + defer testCloseFileHandle(t, testFolderPath, nil, fileHandle) + + testSetGetEA(t, testFolderPath, fileHandle, testEAs) +} + +func setupTestFile(t *testing.T) (testFilePath string, testFile *os.File) { + tempDir := t.TempDir() + testFilePath = filepath.Join(tempDir, "testfile.txt") + var err error + if testFile, err = os.Create(testFilePath); err != nil { + t.Fatalf("failed to create temporary file: %s", err) + } + return testFilePath, testFile +} + +func setupTestFolder(t *testing.T) string { + tempDir := t.TempDir() + testfolderPath := filepath.Join(tempDir, "testfolder") + if err := os.Mkdir(testfolderPath, os.ModeDir); err != nil { + t.Fatalf("failed to create temporary folder: %s", err) + } + return testfolderPath +} + +func generateTestEAs(t *testing.T, nAttrs int, path string) []extendedAttribute { + testEAs := make([]extendedAttribute, nAttrs) + for i := 0; i < nAttrs; i++ { + testEAs[i].Name = fmt.Sprintf("TESTEA%d", i+1) + testEAs[i].Value = make([]byte, getRandomInt()) + if _, err := rand.Read(testEAs[i].Value); err != nil { + t.Logf("Error reading rand for path %s: %v\n", path, err) + } + } + return testEAs +} + +func getRandomInt() int64 { + nBig, err := rand.Int(rand.Reader, big.NewInt(27)) + if err != nil { + panic(err) + } + n := nBig.Int64() + if n == 0 { + n = getRandomInt() + } + return n +} + +func openFile(t *testing.T, path string, attributes uint32) windows.Handle { + utf16Path := windows.StringToUTF16Ptr(path) + fileAccessRightReadWriteEA := uint32(0x8 | 0x10) + fileHandle, err := windows.CreateFile(utf16Path, fileAccessRightReadWriteEA, 0, nil, windows.OPEN_EXISTING, attributes, 0) + if err != nil { + t.Fatalf("open file failed with: %s", err) + } + return fileHandle +} + +func testCloseFileHandle(t *testing.T, testfilePath string, testFile *os.File, handle windows.Handle) { + if testFile != nil { + err := testFile.Close() + if err != nil { + t.Logf("Error closing file %s: %v\n", testFile.Name(), err) + } + } + if err := windows.Close(handle); err != nil { + t.Logf("Error closing file handle %s: %v\n", testfilePath, err) + } + cleanupTestFile(t, testfilePath) +} + +func cleanupTestFile(t *testing.T, path string) { + if err := os.Remove(path); err != nil { + t.Logf("Error removing file/folder %s: %v\n", path, err) + } +} + +func testSetGetEA(t *testing.T, path string, handle windows.Handle, testEAs []extendedAttribute) { + if err := fsetEA(handle, testEAs); err != nil { + t.Fatalf("set EA for path %s failed: %s", path, err) + } + + readEAs, err := fgetEA(handle) + if err != nil { + t.Fatalf("get EA for path %s failed: %s", path, err) + } + + if !reflect.DeepEqual(readEAs, testEAs) { + t.Logf("expected: %+v, found: %+v\n", testEAs, readEAs) + t.Fatalf("EAs read from path %s don't match", path) + } +} + +func TestPathSupportsExtendedAttributes(t *testing.T) { + testCases := []struct { + name string + path string + expected bool + }{ + { + name: "System drive", + path: os.Getenv("SystemDrive") + `\`, + expected: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + supported, err := pathSupportsExtendedAttributes(tc.path) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if supported != tc.expected { + t.Errorf("Expected %v, got %v for path %s", tc.expected, supported, tc.path) + } + }) + } + + // Test with an invalid path + _, err := pathSupportsExtendedAttributes("Z:\\NonExistentPath-UAS664da5s4dyu56das45f5as") + if err == nil { + t.Error("Expected an error for non-existent path, but got nil") + } +} diff --git a/internal/fs/file.go b/internal/fs/file.go index f35901c06ef..57f1a996a07 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -1,17 +1,11 @@ package fs import ( + "fmt" "os" - "path/filepath" - "time" + "runtime" ) -// Mkdir creates a new directory with the specified name and permission bits. -// If there is an error, it will be of type *PathError. -func Mkdir(name string, perm os.FileMode) error { - return os.Mkdir(fixpath(name), perm) -} - // MkdirAll creates a directory named path, along with any necessary parents, // and returns nil, or else returns an error. The permission bits perm are used // for all directories that MkdirAll creates. If path is already a directory, @@ -20,12 +14,6 @@ func MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(fixpath(path), perm) } -// Readlink returns the destination of the named symbolic link. -// If there is an error, it will be of type *PathError. -func Readlink(name string) (string, error) { - return os.Readlink(fixpath(name)) -} - // Remove removes the named file or directory. // If there is an error, it will be of type *PathError. func Remove(name string) error { @@ -40,32 +28,12 @@ func RemoveAll(path string) error { return os.RemoveAll(fixpath(path)) } -// Rename renames (moves) oldpath to newpath. -// If newpath already exists, Rename replaces it. -// OS-specific restrictions may apply when oldpath and newpath are in different directories. -// If there is an error, it will be of type *LinkError. -func Rename(oldpath, newpath string) error { - return os.Rename(fixpath(oldpath), fixpath(newpath)) -} - -// Symlink creates newname as a symbolic link to oldname. -// If there is an error, it will be of type *LinkError. -func Symlink(oldname, newname string) error { - return os.Symlink(oldname, fixpath(newname)) -} - // Link creates newname as a hard link to oldname. // If there is an error, it will be of type *LinkError. func Link(oldname, newname string) error { return os.Link(fixpath(oldname), fixpath(newname)) } -// Stat returns a FileInfo structure describing the named file. -// If there is an error, it will be of type *PathError. -func Stat(name string) (os.FileInfo, error) { - return os.Stat(fixpath(name)) -} - // Lstat returns the FileInfo structure describing the named file. // If the file is a symbolic link, the returned FileInfo // describes the symbolic link. Lstat makes no attempt to follow the link. @@ -74,53 +42,50 @@ func Lstat(name string) (os.FileInfo, error) { return os.Lstat(fixpath(name)) } -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func Create(name string) (*os.File, error) { - return os.Create(fixpath(name)) -} - -// Open opens a file for reading. -func Open(name string) (File, error) { - return os.Open(fixpath(name)) -} - // OpenFile is the generalized open call; most users will use Open // or Create instead. It opens the named file with specified flag // (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, // methods on the returned File can be used for I/O. // If there is an error, it will be of type *PathError. func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { + if runtime.GOOS == "windows" { + flag &^= O_NOFOLLOW + } return os.OpenFile(fixpath(name), flag, perm) } -// Walk walks the file tree rooted at root, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. The files are walked in lexical -// order, which makes the output deterministic but means that for very -// large directories Walk can be inefficient. -// Walk does not follow symbolic links. -func Walk(root string, walkFn filepath.WalkFunc) error { - return filepath.Walk(fixpath(root), walkFn) +// IsAccessDenied checks if the error is due to permission error. +func IsAccessDenied(err error) bool { + return os.IsPermission(err) } -// RemoveIfExists removes a file, returning no error if it does not exist. -func RemoveIfExists(filename string) error { - err := os.Remove(filename) - if err != nil && os.IsNotExist(err) { - err = nil +// ResetPermissions resets the permissions of the file at the specified path +func ResetPermissions(path string) error { + // Set the default file permissions + if err := os.Chmod(fixpath(path), 0600); err != nil { + return err } - return err + return nil } -// Chtimes changes the access and modification times of the named file, -// similar to the Unix utime() or utimes() functions. -// -// The underlying filesystem may truncate or round the values to a less -// precise time unit. If there is an error, it will be of type *PathError. -func Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(fixpath(name), atime, mtime) +// Readdirnames returns a list of file in a directory. Flags are passed to fs.OpenFile. +// O_RDONLY and O_DIRECTORY are implied. +func Readdirnames(filesystem FS, dir string, flags int) ([]string, error) { + f, err := filesystem.OpenFile(dir, O_RDONLY|O_DIRECTORY|flags, false) + if err != nil { + return nil, fmt.Errorf("openfile for readdirnames failed: %w", err) + } + + entries, err := f.Readdirnames(-1) + if err != nil { + _ = f.Close() + return nil, fmt.Errorf("readdirnames %v failed: %w", dir, err) + } + + err = f.Close() + if err != nil { + return nil, err + } + + return entries, nil } diff --git a/internal/fs/file_unix.go b/internal/fs/file_unix.go index 65f10c844e3..4e7765c3035 100644 --- a/internal/fs/file_unix.go +++ b/internal/fs/file_unix.go @@ -29,7 +29,7 @@ func TempFile(dir, prefix string) (f *os.File, err error) { return f, nil } -// isNotSuported returns true if the error is caused by an unsupported file system feature. +// isNotSupported returns true if the error is caused by an unsupported file system feature. func isNotSupported(err error) bool { if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ENOTSUP { return true @@ -37,8 +37,8 @@ func isNotSupported(err error) bool { return false } -// Chmod changes the mode of the named file to mode. -func Chmod(name string, mode os.FileMode) error { +// chmod changes the mode of the named file to mode. +func chmod(name string, mode os.FileMode) error { err := os.Chmod(fixpath(name), mode) // ignore the error if the FS does not support setting this mode (e.g. CIFS with gvfs on Linux) diff --git a/internal/fs/file_unix_test.go b/internal/fs/file_unix_test.go new file mode 100644 index 00000000000..00d68abb8e5 --- /dev/null +++ b/internal/fs/file_unix_test.go @@ -0,0 +1,22 @@ +//go:build unix + +package fs + +import ( + "path/filepath" + "syscall" + "testing" + + "github.com/restic/restic/internal/errors" + rtest "github.com/restic/restic/internal/test" +) + +func TestReaddirnamesFifo(t *testing.T) { + // should not block when reading from a fifo instead of a directory + tempdir := t.TempDir() + fifoFn := filepath.Join(tempdir, "fifo") + rtest.OK(t, mkfifo(fifoFn, 0o600)) + + _, err := Readdirnames(&Local{}, fifoFn, 0) + rtest.Assert(t, errors.Is(err, syscall.ENOTDIR), "unexpected error %v", err) +} diff --git a/internal/fs/file_windows.go b/internal/fs/file_windows.go index d19a744e1be..d7aabf360c3 100644 --- a/internal/fs/file_windows.go +++ b/internal/fs/file_windows.go @@ -8,6 +8,7 @@ import ( "strings" "time" + "github.com/restic/restic/internal/restic" "golang.org/x/sys/windows" ) @@ -17,19 +18,28 @@ func fixpath(name string) string { abspath, err := filepath.Abs(name) if err == nil { // Check if \\?\UNC\ already exist - if strings.HasPrefix(abspath, `\\?\UNC\`) { + if strings.HasPrefix(abspath, uncPathPrefix) { + return abspath + } + // Check if \\?\GLOBALROOT exists which marks volume shadow copy snapshots + if strings.HasPrefix(abspath, globalRootPrefix) { + if strings.Count(abspath, `\`) == 5 { + // Append slash if this just a volume name, e.g. `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX` + // Without the trailing slash any access to the volume itself will fail. + return abspath + string(filepath.Separator) + } return abspath } // Check if \\?\ already exist - if strings.HasPrefix(abspath, `\\?\`) { + if strings.HasPrefix(abspath, extendedPathPrefix) { return abspath } // Check if path starts with \\ if strings.HasPrefix(abspath, `\\`) { - return strings.Replace(abspath, `\\`, `\\?\UNC\`, 1) + return strings.Replace(abspath, `\\`, uncPathPrefix, 1) } // Normal path - return `\\?\` + abspath + return extendedPathPrefix + abspath } return name } @@ -74,6 +84,53 @@ func TempFile(dir, prefix string) (f *os.File, err error) { } // Chmod changes the mode of the named file to mode. -func Chmod(name string, mode os.FileMode) error { +func chmod(name string, mode os.FileMode) error { return os.Chmod(fixpath(name), mode) } + +// clearSystem removes the system attribute from the file. +func clearSystem(path string) error { + return clearAttribute(path, windows.FILE_ATTRIBUTE_SYSTEM) +} + +// clearAttribute removes the specified attribute from the file. +func clearAttribute(path string, attribute uint32) error { + ptr, err := windows.UTF16PtrFromString(fixpath(path)) + if err != nil { + return err + } + fileAttributes, err := windows.GetFileAttributes(ptr) + if err != nil { + return err + } + if fileAttributes&attribute != 0 { + // Clear the attribute + fileAttributes &= ^uint32(attribute) + err = windows.SetFileAttributes(ptr, fileAttributes) + if err != nil { + return err + } + } + return nil +} + +// openHandleForEA return a file handle for file or dir for setting/getting EAs +func openHandleForEA(nodeType restic.NodeType, path string, writeAccess bool) (handle windows.Handle, err error) { + path = fixpath(path) + fileAccess := windows.FILE_READ_EA + if writeAccess { + fileAccess = fileAccess | windows.FILE_WRITE_EA + } + + switch nodeType { + case restic.NodeTypeFile: + utf16Path := windows.StringToUTF16Ptr(path) + handle, err = windows.CreateFile(utf16Path, uint32(fileAccess), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) + case restic.NodeTypeDir: + utf16Path := windows.StringToUTF16Ptr(path) + handle, err = windows.CreateFile(utf16Path, uint32(fileAccess), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + default: + return 0, nil + } + return handle, err +} diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go index 48c40dc9098..fc6c69cf219 100644 --- a/internal/fs/fs_local.go +++ b/internal/fs/fs_local.go @@ -3,6 +3,8 @@ package fs import ( "os" "path/filepath" + + "github.com/restic/restic/internal/restic" ) // Local is the local file system. Most methods are just passed on to the stdlib. @@ -18,42 +20,28 @@ func (fs Local) VolumeName(path string) string { return filepath.VolumeName(path) } -// Open opens a file for reading. -func (fs Local) Open(name string) (File, error) { - f, err := os.Open(fixpath(name)) - if err != nil { - return nil, err - } - _ = setFlags(f) - return f, nil -} - -// OpenFile is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. -func (fs Local) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, err := os.OpenFile(fixpath(name), flag, perm) - if err != nil { - return nil, err - } - _ = setFlags(f) - return f, nil -} - -// Stat returns a FileInfo describing the named file. If there is an error, it -// will be of type *PathError. -func (fs Local) Stat(name string) (os.FileInfo, error) { - return os.Stat(fixpath(name)) +// OpenFile opens a file or directory for reading. +// +// If metadataOnly is set, an implementation MUST return a File object for +// arbitrary file types including symlinks. The implementation may internally use +// the given file path or a file handle. In particular, an implementation may +// delay actually accessing the underlying filesystem. +// +// Only the O_NOFOLLOW and O_DIRECTORY flags are supported. +func (fs Local) OpenFile(name string, flag int, metadataOnly bool) (File, error) { + return newLocalFile(name, flag, metadataOnly) } // Lstat returns the FileInfo structure describing the named file. // If the file is a symbolic link, the returned FileInfo // describes the symbolic link. Lstat makes no attempt to follow the link. // If there is an error, it will be of type *PathError. -func (fs Local) Lstat(name string) (os.FileInfo, error) { - return os.Lstat(fixpath(name)) +func (fs Local) Lstat(name string) (*ExtendedFileInfo, error) { + fi, err := os.Lstat(fixpath(name)) + if err != nil { + return nil, err + } + return extendedStat(fi), nil } // Join joins any number of path elements into a single path, adding a @@ -96,3 +84,92 @@ func (fs Local) Base(path string) string { func (fs Local) Dir(path string) string { return filepath.Dir(path) } + +type localFile struct { + name string + flag int + f *os.File + fi *ExtendedFileInfo +} + +// See the File interface for a description of each method +var _ File = &localFile{} + +func newLocalFile(name string, flag int, metadataOnly bool) (*localFile, error) { + var f *os.File + if !metadataOnly { + var err error + f, err = os.OpenFile(fixpath(name), flag, 0) + if err != nil { + return nil, err + } + _ = setFlags(f) + } + return &localFile{ + name: name, + flag: flag, + f: f, + }, nil +} + +func (f *localFile) MakeReadable() error { + if f.f != nil { + panic("file is already readable") + } + + newF, err := newLocalFile(f.name, f.flag, false) + if err != nil { + return err + } + // replace state and also reset cached FileInfo + *f = *newF + return nil +} + +func (f *localFile) cacheFI() error { + if f.fi != nil { + return nil + } + var fi os.FileInfo + var err error + if f.f != nil { + fi, err = f.f.Stat() + } else if f.flag&O_NOFOLLOW != 0 { + fi, err = os.Lstat(f.name) + } else { + fi, err = os.Stat(f.name) + } + if err != nil { + return err + } + f.fi = extendedStat(fi) + return nil +} + +func (f *localFile) Stat() (*ExtendedFileInfo, error) { + err := f.cacheFI() + // the call to cacheFI MUST happen before reading from f.fi + return f.fi, err +} + +func (f *localFile) ToNode(ignoreXattrListError bool) (*restic.Node, error) { + if err := f.cacheFI(); err != nil { + return nil, err + } + return nodeFromFileInfo(f.name, f.fi, ignoreXattrListError) +} + +func (f *localFile) Read(p []byte) (n int, err error) { + return f.f.Read(p) +} + +func (f *localFile) Readdirnames(n int) ([]string, error) { + return f.f.Readdirnames(n) +} + +func (f *localFile) Close() error { + if f.f != nil { + return f.f.Close() + } + return nil +} diff --git a/internal/fs/fs_local_test.go b/internal/fs/fs_local_test.go new file mode 100644 index 00000000000..8fd8eb136e7 --- /dev/null +++ b/internal/fs/fs_local_test.go @@ -0,0 +1,221 @@ +package fs + +import ( + "io" + "os" + "path/filepath" + "slices" + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +type fsLocalMetadataTestcase struct { + name string + follow bool + setup func(t *testing.T, path string) + nodeType restic.NodeType +} + +func TestFSLocalMetadata(t *testing.T) { + for _, test := range []fsLocalMetadataTestcase{ + { + name: "file", + setup: func(t *testing.T, path string) { + rtest.OK(t, os.WriteFile(path, []byte("example"), 0o600)) + }, + nodeType: restic.NodeTypeFile, + }, + { + name: "directory", + setup: func(t *testing.T, path string) { + rtest.OK(t, os.Mkdir(path, 0o600)) + }, + nodeType: restic.NodeTypeDir, + }, + { + name: "symlink", + setup: func(t *testing.T, path string) { + rtest.OK(t, os.Symlink(path+"old", path)) + }, + nodeType: restic.NodeTypeSymlink, + }, + { + name: "symlink file", + follow: true, + setup: func(t *testing.T, path string) { + rtest.OK(t, os.WriteFile(path+"file", []byte("example"), 0o600)) + rtest.OK(t, os.Symlink(path+"file", path)) + }, + nodeType: restic.NodeTypeFile, + }, + } { + runFSLocalTestcase(t, test) + } +} + +func runFSLocalTestcase(t *testing.T, test fsLocalMetadataTestcase) { + t.Run(test.name, func(t *testing.T) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + test.setup(t, path) + + testFs := &Local{} + flags := 0 + if !test.follow { + flags |= O_NOFOLLOW + } + f, err := testFs.OpenFile(path, flags, true) + rtest.OK(t, err) + checkMetadata(t, f, path, test.follow, test.nodeType) + rtest.OK(t, f.Close()) + }) + +} + +func checkMetadata(t *testing.T, f File, path string, follow bool, nodeType restic.NodeType) { + fi, err := f.Stat() + rtest.OK(t, err) + var fi2 os.FileInfo + if follow { + fi2, err = os.Stat(path) + } else { + fi2, err = os.Lstat(path) + } + rtest.OK(t, err) + assertFIEqual(t, fi2, fi) + + node, err := f.ToNode(false) + rtest.OK(t, err) + + // ModTime is likely unique per file, thus it provides a good indication that it is from the correct file + rtest.Equals(t, fi.ModTime, node.ModTime, "node ModTime") + rtest.Equals(t, nodeType, node.Type, "node Type") +} + +func assertFIEqual(t *testing.T, want os.FileInfo, got *ExtendedFileInfo) { + t.Helper() + rtest.Equals(t, want.Name(), got.Name, "Name") + rtest.Equals(t, want.ModTime(), got.ModTime, "ModTime") + rtest.Equals(t, want.Mode(), got.Mode, "Mode") + rtest.Equals(t, want.Size(), got.Size, "Size") +} + +func TestFSLocalRead(t *testing.T) { + testFSLocalRead(t, false) + testFSLocalRead(t, true) +} + +func testFSLocalRead(t *testing.T, makeReadable bool) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + testdata := "example" + rtest.OK(t, os.WriteFile(path, []byte(testdata), 0o600)) + + f := openReadable(t, path, makeReadable) + checkMetadata(t, f, path, false, restic.NodeTypeFile) + + data, err := io.ReadAll(f) + rtest.OK(t, err) + rtest.Equals(t, testdata, string(data), "file content mismatch") + + rtest.OK(t, f.Close()) +} + +func openReadable(t *testing.T, path string, useMakeReadable bool) File { + testFs := &Local{} + f, err := testFs.OpenFile(path, O_NOFOLLOW, useMakeReadable) + rtest.OK(t, err) + if useMakeReadable { + // file was opened as metadataOnly. open for reading + rtest.OK(t, f.MakeReadable()) + } + return f +} + +func TestFSLocalReaddir(t *testing.T) { + testFSLocalReaddir(t, false) + testFSLocalReaddir(t, true) +} + +func testFSLocalReaddir(t *testing.T, makeReadable bool) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + rtest.OK(t, os.Mkdir(path, 0o700)) + entries := []string{"testfile"} + rtest.OK(t, os.WriteFile(filepath.Join(path, entries[0]), []byte("example"), 0o600)) + + f := openReadable(t, path, makeReadable) + checkMetadata(t, f, path, false, restic.NodeTypeDir) + + names, err := f.Readdirnames(-1) + rtest.OK(t, err) + slices.Sort(names) + rtest.Equals(t, entries, names, "directory content mismatch") + + rtest.OK(t, f.Close()) +} + +func TestFSLocalReadableRace(t *testing.T) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + testdata := "example" + rtest.OK(t, os.WriteFile(path, []byte(testdata), 0o600)) + + testFs := &Local{} + f, err := testFs.OpenFile(path, O_NOFOLLOW, true) + rtest.OK(t, err) + + pathNew := path + "new" + rtest.OK(t, os.Rename(path, pathNew)) + + err = f.MakeReadable() + if err == nil { + // a file handle based implementation should still work + checkMetadata(t, f, pathNew, false, restic.NodeTypeFile) + + data, err := io.ReadAll(f) + rtest.OK(t, err) + rtest.Equals(t, testdata, string(data), "file content mismatch") + } + + rtest.OK(t, f.Close()) +} + +func TestFSLocalTypeChange(t *testing.T) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + testdata := "example" + rtest.OK(t, os.WriteFile(path, []byte(testdata), 0o600)) + + testFs := &Local{} + f, err := testFs.OpenFile(path, O_NOFOLLOW, true) + rtest.OK(t, err) + // cache metadata + _, err = f.Stat() + rtest.OK(t, err) + + pathNew := path + "new" + // rename instead of unlink to let the test also work on windows + rtest.OK(t, os.Rename(path, pathNew)) + + rtest.OK(t, os.Mkdir(path, 0o700)) + rtest.OK(t, f.MakeReadable()) + + fi, err := f.Stat() + rtest.OK(t, err) + if !fi.Mode.IsDir() { + // a file handle based implementation should still reference the file + checkMetadata(t, f, pathNew, false, restic.NodeTypeFile) + + data, err := io.ReadAll(f) + rtest.OK(t, err) + rtest.Equals(t, testdata, string(data), "file content mismatch") + } + // else: + // path-based implementation + // nothing to test here. stat returned the new file type + + rtest.OK(t, f.Close()) +} diff --git a/internal/fs/fs_local_unix_test.go b/internal/fs/fs_local_unix_test.go new file mode 100644 index 00000000000..5bcb5efd032 --- /dev/null +++ b/internal/fs/fs_local_unix_test.go @@ -0,0 +1,40 @@ +//go:build unix + +package fs + +import ( + "syscall" + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func TestFSLocalMetadataUnix(t *testing.T) { + for _, test := range []fsLocalMetadataTestcase{ + { + name: "socket", + setup: func(t *testing.T, path string) { + fd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + rtest.OK(t, err) + defer func() { + _ = syscall.Close(fd) + }() + + addr := &syscall.SockaddrUnix{Name: path} + rtest.OK(t, syscall.Bind(fd, addr)) + }, + nodeType: restic.NodeTypeSocket, + }, + { + name: "fifo", + setup: func(t *testing.T, path string) { + rtest.OK(t, mkfifo(path, 0o600)) + }, + nodeType: restic.NodeTypeFifo, + }, + // device files can only be created as root + } { + runFSLocalTestcase(t, test) + } +} diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index aa3522aea5b..dfee31779a3 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -1,43 +1,109 @@ package fs import ( - "os" "path/filepath" + "runtime" "strings" "sync" + "time" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" ) -// ErrorHandler is used to report errors via callback -type ErrorHandler func(item string, err error) error +// VSSConfig holds extended options of windows volume shadow copy service. +type VSSConfig struct { + ExcludeAllMountPoints bool `option:"exclude-all-mount-points" help:"exclude mountpoints from snapshotting on all volumes"` + ExcludeVolumes string `option:"exclude-volumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` + Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshot before timing out"` + Provider string `option:"provider" help:"VSS provider identifier which will be used for snapshotting"` +} + +func init() { + if runtime.GOOS == "windows" { + options.Register("vss", VSSConfig{}) + } +} + +// NewVSSConfig returns a new VSSConfig with the default values filled in. +func NewVSSConfig() VSSConfig { + return VSSConfig{ + Timeout: time.Second * 120, + } +} + +// ParseVSSConfig parses a VSS extended options to VSSConfig struct. +func ParseVSSConfig(o options.Options) (VSSConfig, error) { + cfg := NewVSSConfig() + o = o.Extract("vss") + if err := o.Apply("vss", &cfg); err != nil { + return VSSConfig{}, err + } + + return cfg, nil +} + +// ErrorHandler is used to report errors via callback. +type ErrorHandler func(item string, err error) // MessageHandler is used to report errors/messages via callbacks. type MessageHandler func(msg string, args ...interface{}) +// VolumeFilter is used to filter volumes by it's mount point or GUID path. +type VolumeFilter func(volume string) bool + // LocalVss is a wrapper around the local file system which uses windows volume // shadow copy service (VSS) in a transparent way. type LocalVss struct { FS - snapshots map[string]VssSnapshot - failedSnapshots map[string]struct{} - mutex sync.RWMutex - msgError ErrorHandler - msgMessage MessageHandler + snapshots map[string]VssSnapshot + failedSnapshots map[string]struct{} + mutex sync.RWMutex + msgError ErrorHandler + msgMessage MessageHandler + excludeAllMountPoints bool + excludeVolumes map[string]struct{} + timeout time.Duration + provider string } // statically ensure that LocalVss implements FS. var _ FS = &LocalVss{} +// parseMountPoints try to convert semicolon separated list of mount points +// to map of lowercased volume GUID paths. Mountpoints already in volume +// GUID path format will be validated and normalized. +func parseMountPoints(list string, msgError ErrorHandler) (volumes map[string]struct{}) { + if list == "" { + return + } + for _, s := range strings.Split(list, ";") { + if v, err := getVolumeNameForVolumeMountPoint(s); err != nil { + msgError(s, errors.Errorf("failed to parse vss.exclude-volumes [%s]: %s", s, err)) + } else { + if volumes == nil { + volumes = make(map[string]struct{}) + } + volumes[strings.ToLower(v)] = struct{}{} + } + } + + return +} + // NewLocalVss creates a new wrapper around the windows filesystem using volume // shadow copy service to access locked files. -func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler) *LocalVss { +func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig) *LocalVss { return &LocalVss{ - FS: Local{}, - snapshots: make(map[string]VssSnapshot), - failedSnapshots: make(map[string]struct{}), - msgError: msgError, - msgMessage: msgMessage, + FS: Local{}, + snapshots: make(map[string]VssSnapshot), + failedSnapshots: make(map[string]struct{}), + msgError: msgError, + msgMessage: msgMessage, + excludeAllMountPoints: cfg.ExcludeAllMountPoints, + excludeVolumes: parseMountPoints(cfg.ExcludeVolumes, msgError), + timeout: cfg.Timeout, + provider: cfg.Provider, } } @@ -50,7 +116,7 @@ func (fs *LocalVss) DeleteSnapshots() { for volumeName, snapshot := range fs.snapshots { if err := snapshot.Delete(); err != nil { - _ = fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err)) + fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err)) activeSnapshots[volumeName] = snapshot } } @@ -58,24 +124,30 @@ func (fs *LocalVss) DeleteSnapshots() { fs.snapshots = activeSnapshots } -// Open wraps the Open method of the underlying file system. -func (fs *LocalVss) Open(name string) (File, error) { - return os.Open(fs.snapshotPath(name)) +// OpenFile wraps the OpenFile method of the underlying file system. +func (fs *LocalVss) OpenFile(name string, flag int, metadataOnly bool) (File, error) { + return fs.FS.OpenFile(fs.snapshotPath(name), flag, metadataOnly) } -// OpenFile wraps the Open method of the underlying file system. -func (fs *LocalVss) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return os.OpenFile(fs.snapshotPath(name), flag, perm) +// Lstat wraps the Lstat method of the underlying file system. +func (fs *LocalVss) Lstat(name string) (*ExtendedFileInfo, error) { + return fs.FS.Lstat(fs.snapshotPath(name)) } -// Stat wraps the Open method of the underlying file system. -func (fs *LocalVss) Stat(name string) (os.FileInfo, error) { - return os.Stat(fs.snapshotPath(name)) -} +// isMountPointIncluded is true if given mountpoint included by user. +func (fs *LocalVss) isMountPointIncluded(mountPoint string) bool { + if fs.excludeVolumes == nil { + return true + } -// Lstat wraps the Open method of the underlying file system. -func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { - return os.Lstat(fs.snapshotPath(name)) + volume, err := getVolumeNameForVolumeMountPoint(mountPoint) + if err != nil { + fs.msgError(mountPoint, errors.Errorf("failed to get volume from mount point [%s]: %s", mountPoint, err)) + return true + } + + _, ok := fs.excludeVolumes[strings.ToLower(volume)] + return !ok } // snapshotPath returns the path inside a VSS snapshots if it already exists. @@ -83,7 +155,6 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { // If creation of a snapshot fails the file's original path is returned as // a fallback. func (fs *LocalVss) snapshotPath(path string) string { - fixPath := fixpath(path) if strings.HasPrefix(fixPath, `\\?\UNC\`) { @@ -94,7 +165,7 @@ func (fs *LocalVss) snapshotPath(path string) string { return path } - fixPath = strings.TrimPrefix(fixpath(path), `\\?\`) + fixPath = strings.TrimPrefix(fixPath, `\\?\`) fixPathLower := strings.ToLower(fixPath) volumeName := filepath.VolumeName(fixPath) volumeNameLower := strings.ToLower(volumeName) @@ -114,23 +185,36 @@ func (fs *LocalVss) snapshotPath(path string) string { if !snapshotExists && !snapshotFailed { vssVolume := volumeNameLower + string(filepath.Separator) - fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) - if snapshot, err := NewVssSnapshot(vssVolume, 120, fs.msgError); err != nil { - _ = fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", - vssVolume, err)) + if !fs.isMountPointIncluded(vssVolume) { + fs.msgMessage("snapshots for [%s] excluded by user\n", vssVolume) fs.failedSnapshots[volumeNameLower] = struct{}{} } else { - fs.snapshots[volumeNameLower] = snapshot - fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume) - if len(snapshot.mountPointInfo) > 0 { - fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume) - for mp, mpInfo := range snapshot.mountPointInfo { - info := "" - if !mpInfo.IsSnapshotted() { - info = " (not snapshotted)" + fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) + + var includeVolume VolumeFilter + if !fs.excludeAllMountPoints { + includeVolume = func(volume string) bool { + return fs.isMountPointIncluded(volume) + } + } + + if snapshot, err := NewVssSnapshot(fs.provider, vssVolume, fs.timeout, includeVolume, fs.msgError); err != nil { + fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", + vssVolume, err)) + fs.failedSnapshots[volumeNameLower] = struct{}{} + } else { + fs.snapshots[volumeNameLower] = snapshot + fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume) + if len(snapshot.mountPointInfo) > 0 { + fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume) + for mp, mpInfo := range snapshot.mountPointInfo { + info := "" + if !mpInfo.IsSnapshotted() { + info = " (not snapshotted)" + } + fs.msgMessage(" - %s%s\n", mp, info) } - fs.msgMessage(" - %s%s\n", mp, info) } } } @@ -173,9 +257,8 @@ func (fs *LocalVss) snapshotPath(path string) string { snapshotPath = fs.Join(snapshot.GetSnapshotDeviceObject(), strings.TrimPrefix(fixPath, volumeName)) if snapshotPath == snapshot.GetSnapshotDeviceObject() { - snapshotPath = snapshotPath + string(filepath.Separator) + snapshotPath += string(filepath.Separator) } - } else { // no snapshot is available for the requested path: // -> try to backup without a snapshot diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go new file mode 100644 index 00000000000..b64897d1cbe --- /dev/null +++ b/internal/fs/fs_local_vss_test.go @@ -0,0 +1,341 @@ +//go:build windows +// +build windows + +package fs + +import ( + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "testing" + "time" + + ole "github.com/go-ole/go-ole" + "github.com/restic/restic/internal/options" + rtest "github.com/restic/restic/internal/test" +) + +func matchStrings(ptrs []string, strs []string) bool { + if len(ptrs) != len(strs) { + return false + } + + for i, p := range ptrs { + if p == "" { + return false + } + matched, err := regexp.MatchString(p, strs[i]) + if err != nil { + panic(err) + } + if !matched { + return false + } + } + + return true +} + +func matchMap(strs []string, m map[string]struct{}) bool { + if len(strs) != len(m) { + return false + } + + for _, s := range strs { + if _, ok := m[s]; !ok { + return false + } + } + + return true +} + +func TestVSSConfig(t *testing.T) { + type config struct { + excludeAllMountPoints bool + timeout time.Duration + provider string + } + setTests := []struct { + input options.Options + output config + }{ + { + options.Options{ + "vss.timeout": "6h38m42s", + "vss.provider": "Ms", + }, + config{ + timeout: 23922000000000, + provider: "Ms", + }, + }, + { + options.Options{ + "vss.exclude-all-mount-points": "t", + "vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}", + }, + config{ + excludeAllMountPoints: true, + timeout: 120000000000, + provider: "{b5946137-7b9f-4925-af80-51abd60b20d5}", + }, + }, + { + options.Options{ + "vss.exclude-all-mount-points": "0", + "vss.exclude-volumes": "", + "vss.timeout": "120s", + "vss.provider": "Microsoft Software Shadow Copy provider 1.0", + }, + config{ + timeout: 120000000000, + provider: "Microsoft Software Shadow Copy provider 1.0", + }, + }, + } + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + cfg, err := ParseVSSConfig(test.input) + if err != nil { + t.Fatal(err) + } + + errorHandler := func(item string, err error) { + t.Fatalf("unexpected error (%v)", err) + } + messageHandler := func(msg string, args ...interface{}) { + t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) + } + + dst := NewLocalVss(errorHandler, messageHandler, cfg) + + if dst.excludeAllMountPoints != test.output.excludeAllMountPoints || + dst.excludeVolumes != nil || dst.timeout != test.output.timeout || + dst.provider != test.output.provider { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, dst) + } + }) + } +} + +func TestParseMountPoints(t *testing.T) { + volumeMatch := regexp.MustCompile(`^\\\\\?\\Volume\{[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}\}\\$`) + + // It's not a good idea to test functions based on getVolumeNameForVolumeMountPoint by calling + // getVolumeNameForVolumeMountPoint itself, but we have restricted test environment: + // cannot manage volumes and can only be sure that the mount point C:\ exists + sysVolume, err := getVolumeNameForVolumeMountPoint("C:") + if err != nil { + t.Fatal(err) + } + // We don't know a valid volume GUID path for c:\, but we'll at least check its format + if !volumeMatch.MatchString(sysVolume) { + t.Fatalf("invalid volume GUID path: %s", sysVolume) + } + // Changing the case and removing trailing backslash allows tests + // the equality of different ways of writing a volume name + sysVolumeMutated := strings.ToUpper(sysVolume[:len(sysVolume)-1]) + sysVolumeMatch := strings.ToLower(sysVolume) + + type check struct { + volume string + result bool + } + setTests := []struct { + input options.Options + output []string + checks []check + errors []string + }{ + { + options.Options{ + "vss.exclude-volumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated, + }, + []string{ + sysVolumeMatch, + }, + []check{ + {`c:\`, false}, + {`c:`, false}, + {sysVolume, false}, + {sysVolumeMutated, false}, + }, + []string{}, + }, + { + options.Options{ + "vss.exclude-volumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, + }, + []string{ + sysVolumeMatch, + }, + []check{ + {`c:\windows\`, true}, + {`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, true}, + {`c:`, false}, + {``, true}, + }, + []string{ + `failed to parse vss\.exclude-volumes \[z:\\nonexistent\]:.*`, + `failed to parse vss\.exclude-volumes \[c:\\windows\\\]:.*`, + `failed to parse vss\.exclude-volumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, + `failed to get volume from mount point \[c:\\windows\\\]:.*`, + `failed to get volume from mount point \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, + `failed to get volume from mount point \[\]:.*`, + }, + }, + } + + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + cfg, err := ParseVSSConfig(test.input) + if err != nil { + t.Fatal(err) + } + + var log []string + errorHandler := func(item string, err error) { + log = append(log, strings.TrimSpace(err.Error())) + } + messageHandler := func(msg string, args ...interface{}) { + t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) + } + + dst := NewLocalVss(errorHandler, messageHandler, cfg) + + if !matchMap(test.output, dst.excludeVolumes) { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", + test.output, dst.excludeVolumes) + } + + for _, c := range test.checks { + if dst.isMountPointIncluded(c.volume) != c.result { + t.Fatalf(`wrong check: isMountPointIncluded("%s") != %v`, c.volume, c.result) + } + } + + if !matchStrings(test.errors, log) { + t.Fatalf("wrong log, want:\n %#v\ngot:\n %#v", test.errors, log) + } + }) + } +} + +func TestParseProvider(t *testing.T) { + msProvider := ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}") + setTests := []struct { + provider string + id *ole.GUID + result string + }{ + { + "", + ole.IID_NULL, + "", + }, + { + "mS", + msProvider, + "", + }, + { + "{B5946137-7b9f-4925-Af80-51abD60b20d5}", + msProvider, + "", + }, + { + "Microsoft Software Shadow Copy provider 1.0", + msProvider, + "", + }, + { + "{04560982-3d7d-4bbc-84f7-0712f833a28f}", + nil, + `invalid VSS provider "{04560982-3d7d-4bbc-84f7-0712f833a28f}"`, + }, + { + "non-existent provider", + nil, + `invalid VSS provider "non-existent provider"`, + }, + } + + _ = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + id, err := getProviderID(test.provider) + + if err != nil && id != nil { + t.Fatalf("err!=nil but id=%v", id) + } + + if test.result != "" || err != nil { + var result string + if err != nil { + result = err.Error() + } + if test.result != result || test.result == "" { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.result, result) + } + } else if !ole.IsEqualGUID(id, test.id) { + t.Fatalf("wrong id, want:\n %s\ngot:\n %s", test.id.String(), id.String()) + } + }) + } +} + +func TestVSSFS(t *testing.T) { + if runtime.GOOS != "windows" || HasSufficientPrivilegesForVSS() != nil { + t.Skip("vss fs test can only be run on windows with admin privileges") + } + + cfg, err := ParseVSSConfig(options.Options{}) + rtest.OK(t, err) + + errorHandler := func(item string, err error) { + t.Fatalf("unexpected error (%v)", err) + } + messageHandler := func(msg string, args ...interface{}) { + if strings.HasPrefix(msg, "creating VSS snapshot for") || strings.HasPrefix(msg, "successfully created snapshot") { + return + } + t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) + } + + localVss := NewLocalVss(errorHandler, messageHandler, cfg) + defer localVss.DeleteSnapshots() + + tempdir := t.TempDir() + tempfile := filepath.Join(tempdir, "file") + rtest.OK(t, os.WriteFile(tempfile, []byte("example"), 0o600)) + + // trigger snapshot creation and + // capture FI while file still exists (should already be within the snapshot) + origFi, err := localVss.Lstat(tempfile) + rtest.OK(t, err) + + // remove original file + rtest.OK(t, os.Remove(tempfile)) + + lstatFi, err := localVss.Lstat(tempfile) + rtest.OK(t, err) + rtest.Equals(t, origFi.Mode, lstatFi.Mode) + + f, err := localVss.OpenFile(tempfile, os.O_RDONLY, false) + rtest.OK(t, err) + data, err := io.ReadAll(f) + rtest.OK(t, err) + rtest.Equals(t, "example", string(data), "unexpected file content") + + node, err := f.ToNode(false) + rtest.OK(t, err) + rtest.Equals(t, node.Mode, lstatFi.Mode) + + rtest.OK(t, f.Close()) +} diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index 47af74245be..bbe5c95abed 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -5,11 +5,13 @@ import ( "io" "os" "path" + "slices" "sync" "syscall" "time" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" ) // Reader is a file system which provides a directory with a single file. When @@ -39,8 +41,21 @@ func (fs *Reader) VolumeName(_ string) string { return "" } -// Open opens a file for reading. -func (fs *Reader) Open(name string) (f File, err error) { +func (fs *Reader) fi() *ExtendedFileInfo { + return &ExtendedFileInfo{ + Name: fs.Name, + Mode: fs.Mode, + ModTime: fs.ModTime, + Size: fs.Size, + } +} + +func (fs *Reader) OpenFile(name string, flag int, _ bool) (f File, err error) { + if flag & ^(O_RDONLY|O_NOFOLLOW) != 0 { + return nil, pathError("open", name, + fmt.Errorf("invalid combination of flags 0x%x", flag)) + } + switch name { case fs.Name: fs.open.Do(func() { @@ -54,7 +69,7 @@ func (fs *Reader) Open(name string) (f File, err error) { return f, nil case "/", ".": f = fakeDir{ - entries: []os.FileInfo{fs.fi()}, + entries: []string{fs.fi().Name}, } return f, nil } @@ -62,56 +77,18 @@ func (fs *Reader) Open(name string) (f File, err error) { return nil, pathError("open", name, syscall.ENOENT) } -func (fs *Reader) fi() os.FileInfo { - return fakeFileInfo{ - name: fs.Name, - size: fs.Size, - mode: fs.Mode, - modtime: fs.ModTime, - } -} - -// OpenFile is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *os.PathError. -func (fs *Reader) OpenFile(name string, flag int, _ os.FileMode) (f File, err error) { - if flag & ^(O_RDONLY|O_NOFOLLOW) != 0 { - return nil, pathError("open", name, - fmt.Errorf("invalid combination of flags 0x%x", flag)) - } - - fs.open.Do(func() { - f = newReaderFile(fs.ReadCloser, fs.fi(), fs.AllowEmptyFile) - }) - - if f == nil { - return nil, pathError("open", name, syscall.EIO) - } - - return f, nil -} - -// Stat returns a FileInfo describing the named file. If there is an error, it -// will be of type *os.PathError. -func (fs *Reader) Stat(name string) (os.FileInfo, error) { - return fs.Lstat(name) -} - // Lstat returns the FileInfo structure describing the named file. // If the file is a symbolic link, the returned FileInfo // describes the symbolic link. Lstat makes no attempt to follow the link. // If there is an error, it will be of type *os.PathError. -func (fs *Reader) Lstat(name string) (os.FileInfo, error) { - getDirInfo := func(name string) os.FileInfo { - fi := fakeFileInfo{ - name: fs.Base(name), - size: 0, - mode: os.ModeDir | 0755, - modtime: time.Now(), +func (fs *Reader) Lstat(name string) (*ExtendedFileInfo, error) { + getDirInfo := func(name string) *ExtendedFileInfo { + return &ExtendedFileInfo{ + Name: fs.Base(name), + Size: 0, + Mode: os.ModeDir | 0755, + ModTime: time.Now(), } - return fi } switch name { @@ -178,13 +155,13 @@ func (fs *Reader) Dir(p string) string { return path.Dir(p) } -func newReaderFile(rd io.ReadCloser, fi os.FileInfo, allowEmptyFile bool) *readerFile { +func newReaderFile(rd io.ReadCloser, fi *ExtendedFileInfo, allowEmptyFile bool) *readerFile { return &readerFile{ ReadCloser: rd, AllowEmptyFile: allowEmptyFile, fakeFile: fakeFile{ - FileInfo: fi, - name: fi.Name(), + fi: fi, + name: fi.Name, }, } } @@ -223,31 +200,23 @@ func (r *readerFile) Close() error { var _ File = &readerFile{} // fakeFile implements all File methods, but only returns errors for anything -// except Stat() and Name(). +// except Stat() type fakeFile struct { name string - os.FileInfo + fi *ExtendedFileInfo } // ensure that fakeFile implements File var _ File = fakeFile{} -func (f fakeFile) Fd() uintptr { - return 0 +func (f fakeFile) MakeReadable() error { + return nil } func (f fakeFile) Readdirnames(_ int) ([]string, error) { return nil, pathError("readdirnames", f.name, os.ErrInvalid) } -func (f fakeFile) Readdir(_ int) ([]os.FileInfo, error) { - return nil, pathError("readdir", f.name, os.ErrInvalid) -} - -func (f fakeFile) Seek(int64, int) (int64, error) { - return 0, pathError("seek", f.name, os.ErrInvalid) -} - func (f fakeFile) Read(_ []byte) (int, error) { return 0, pathError("read", f.name, os.ErrInvalid) } @@ -256,17 +225,24 @@ func (f fakeFile) Close() error { return nil } -func (f fakeFile) Stat() (os.FileInfo, error) { - return f.FileInfo, nil +func (f fakeFile) Stat() (*ExtendedFileInfo, error) { + return f.fi, nil } -func (f fakeFile) Name() string { - return f.name +func (f fakeFile) ToNode(_ bool) (*restic.Node, error) { + node := buildBasicNode(f.name, f.fi) + + // fill minimal info with current values for uid, gid + node.UID = uint32(os.Getuid()) + node.GID = uint32(os.Getgid()) + node.ChangeTime = node.ModTime + + return node, nil } // fakeDir implements Readdirnames and Readdir, everything else is delegated to fakeFile. type fakeDir struct { - entries []os.FileInfo + entries []string fakeFile } @@ -274,51 +250,7 @@ func (d fakeDir) Readdirnames(n int) ([]string, error) { if n > 0 { return nil, pathError("readdirnames", d.name, errors.New("not implemented")) } - names := make([]string, 0, len(d.entries)) - for _, entry := range d.entries { - names = append(names, entry.Name()) - } - - return names, nil -} - -func (d fakeDir) Readdir(n int) ([]os.FileInfo, error) { - if n > 0 { - return nil, pathError("readdir", d.name, errors.New("not implemented")) - } - return d.entries, nil -} - -// fakeFileInfo implements the bare minimum of os.FileInfo. -type fakeFileInfo struct { - name string - size int64 - mode os.FileMode - modtime time.Time -} - -func (fi fakeFileInfo) Name() string { - return fi.name -} - -func (fi fakeFileInfo) Size() int64 { - return fi.size -} - -func (fi fakeFileInfo) Mode() os.FileMode { - return fi.mode -} - -func (fi fakeFileInfo) ModTime() time.Time { - return fi.modtime -} - -func (fi fakeFileInfo) IsDir() bool { - return fi.mode&os.ModeDir > 0 -} - -func (fi fakeFileInfo) Sys() interface{} { - return nil + return slices.Clone(d.entries), nil } func pathError(op, name string, err error) *os.PathError { diff --git a/internal/fs/fs_reader_command.go b/internal/fs/fs_reader_command.go index 3830e5811c4..2fa4375dd2a 100644 --- a/internal/fs/fs_reader_command.go +++ b/internal/fs/fs_reader_command.go @@ -10,7 +10,7 @@ import ( "github.com/restic/restic/internal/errors" ) -// CommandReader wrap a command such that its standard output can be read using +// CommandReader wraps a command such that its standard output can be read using // a io.ReadCloser. Close() waits for the command to terminate, reporting // any error back to the caller. type CommandReader struct { @@ -29,6 +29,10 @@ type CommandReader struct { } func NewCommandReader(ctx context.Context, args []string, logOutput io.Writer) (*CommandReader, error) { + if len(args) == 0 { + return nil, fmt.Errorf("no command was specified as argument") + } + // Prepare command and stdout command := exec.CommandContext(ctx, args[0], args[1:]...) stdout, err := command.StdoutPipe() diff --git a/internal/fs/fs_reader_command_test.go b/internal/fs/fs_reader_command_test.go index a9028544c4c..8f0d17b1ea5 100644 --- a/internal/fs/fs_reader_command_test.go +++ b/internal/fs/fs_reader_command_test.go @@ -34,6 +34,11 @@ func TestCommandReaderInvalid(t *testing.T) { test.Assert(t, err != nil, "missing error") } +func TestCommandReaderEmptyArgs(t *testing.T) { + _, err := fs.NewCommandReader(context.TODO(), []string{}, io.Discard) + test.Assert(t, err != nil, "missing error") +} + func TestCommandReaderOutput(t *testing.T) { reader, err := fs.NewCommandReader(context.TODO(), []string{"echo", "hello world"}, io.Discard) test.OK(t, err) diff --git a/internal/fs/fs_reader_test.go b/internal/fs/fs_reader_test.go index d3ef5608a2d..257bfbbac76 100644 --- a/internal/fs/fs_reader_test.go +++ b/internal/fs/fs_reader_test.go @@ -15,29 +15,8 @@ import ( "github.com/restic/restic/internal/test" ) -func verifyFileContentOpen(t testing.TB, fs FS, filename string, want []byte) { - f, err := fs.Open(filename) - if err != nil { - t.Fatal(err) - } - - buf, err := io.ReadAll(f) - if err != nil { - t.Fatal(err) - } - - err = f.Close() - if err != nil { - t.Fatal(err) - } - - if !cmp.Equal(want, buf) { - t.Error(cmp.Diff(want, buf)) - } -} - func verifyFileContentOpenFile(t testing.TB, fs FS, filename string, want []byte) { - f, err := fs.OpenFile(filename, O_RDONLY, 0) + f, err := fs.OpenFile(filename, O_RDONLY, false) if err != nil { t.Fatal(err) } @@ -58,7 +37,7 @@ func verifyFileContentOpenFile(t testing.TB, fs FS, filename string, want []byte } func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) { - f, err := fs.Open(dir) + f, err := fs.OpenFile(dir, O_RDONLY, false) if err != nil { t.Fatal(err) } @@ -81,96 +60,25 @@ func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) { } } -type fiSlice []os.FileInfo - -func (s fiSlice) Len() int { - return len(s) -} - -func (s fiSlice) Less(i, j int) bool { - return s[i].Name() < s[j].Name() -} - -func (s fiSlice) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func verifyDirectoryContentsFI(t testing.TB, fs FS, dir string, want []os.FileInfo) { - f, err := fs.Open(dir) - if err != nil { - t.Fatal(err) - } - - entries, err := f.Readdir(-1) - if err != nil { - t.Fatal(err) - } - - err = f.Close() - if err != nil { - t.Fatal(err) - } - - sort.Sort(fiSlice(want)) - sort.Sort(fiSlice(entries)) - - if len(want) != len(entries) { - t.Errorf("wrong number of entries returned, want %d, got %d", len(want), len(entries)) - } - max := len(want) - if len(entries) < max { - max = len(entries) - } - - for i := 0; i < max; i++ { - fi1 := want[i] - fi2 := entries[i] - - if fi1.Name() != fi2.Name() { - t.Errorf("entry %d: wrong value for Name: want %q, got %q", i, fi1.Name(), fi2.Name()) - } - - if fi1.IsDir() != fi2.IsDir() { - t.Errorf("entry %d: wrong value for IsDir: want %v, got %v", i, fi1.IsDir(), fi2.IsDir()) - } - - if fi1.Mode() != fi2.Mode() { - t.Errorf("entry %d: wrong value for Mode: want %v, got %v", i, fi1.Mode(), fi2.Mode()) - } - - if fi1.ModTime() != fi2.ModTime() { - t.Errorf("entry %d: wrong value for ModTime: want %v, got %v", i, fi1.ModTime(), fi2.ModTime()) - } - - if fi1.Size() != fi2.Size() { - t.Errorf("entry %d: wrong value for Size: want %v, got %v", i, fi1.Size(), fi2.Size()) - } - - if fi1.Sys() != fi2.Sys() { - t.Errorf("entry %d: wrong value for Sys: want %v, got %v", i, fi1.Sys(), fi2.Sys()) - } - } -} - -func checkFileInfo(t testing.TB, fi os.FileInfo, filename string, modtime time.Time, mode os.FileMode, isdir bool) { - if fi.IsDir() != isdir { - t.Errorf("IsDir returned %t, want %t", fi.IsDir(), isdir) +func checkFileInfo(t testing.TB, fi *ExtendedFileInfo, filename string, modtime time.Time, mode os.FileMode, isdir bool) { + if fi.Mode.IsDir() != isdir { + t.Errorf("IsDir returned %t, want %t", fi.Mode.IsDir(), isdir) } - if fi.Mode() != mode { - t.Errorf("Mode() returned wrong value, want 0%o, got 0%o", mode, fi.Mode()) + if fi.Mode != mode { + t.Errorf("Mode has wrong value, want 0%o, got 0%o", mode, fi.Mode) } - if !modtime.Equal(time.Time{}) && !fi.ModTime().Equal(modtime) { - t.Errorf("ModTime() returned wrong value, want %v, got %v", modtime, fi.ModTime()) + if !modtime.Equal(time.Time{}) && !fi.ModTime.Equal(modtime) { + t.Errorf("ModTime has wrong value, want %v, got %v", modtime, fi.ModTime) } - if path.Base(fi.Name()) != fi.Name() { - t.Errorf("Name() returned is not base, want %q, got %q", path.Base(fi.Name()), fi.Name()) + if path.Base(fi.Name) != fi.Name { + t.Errorf("Name is not base, want %q, got %q", path.Base(fi.Name), fi.Name) } - if fi.Name() != path.Base(filename) { - t.Errorf("Name() returned wrong value, want %q, got %q", path.Base(filename), fi.Name()) + if fi.Name != path.Base(filename) { + t.Errorf("Name has wrong value, want %q, got %q", path.Base(filename), fi.Name) } } @@ -195,36 +103,6 @@ func TestFSReader(t *testing.T) { verifyDirectoryContents(t, fs, ".", []string{filename}) }, }, - { - name: "Readdir-slash", - f: func(t *testing.T, fs FS) { - fi := fakeFileInfo{ - mode: 0644, - modtime: now, - name: filename, - size: int64(len(data)), - } - verifyDirectoryContentsFI(t, fs, "/", []os.FileInfo{fi}) - }, - }, - { - name: "Readdir-current", - f: func(t *testing.T, fs FS) { - fi := fakeFileInfo{ - mode: 0644, - modtime: now, - name: filename, - size: int64(len(data)), - } - verifyDirectoryContentsFI(t, fs, ".", []os.FileInfo{fi}) - }, - }, - { - name: "file/Open", - f: func(t *testing.T, fs FS) { - verifyFileContentOpen(t, fs, filename, data) - }, - }, { name: "file/OpenFile", f: func(t *testing.T, fs FS) { @@ -245,7 +123,7 @@ func TestFSReader(t *testing.T) { { name: "file/Stat", f: func(t *testing.T, fs FS) { - f, err := fs.Open(filename) + f, err := fs.OpenFile(filename, O_RDONLY, true) if err != nil { t.Fatal(err) } @@ -417,7 +295,7 @@ func TestFSReaderMinFileSize(t *testing.T) { AllowEmptyFile: test.allowEmpty, } - f, err := fs.Open("testfile") + f, err := fs.OpenFile("testfile", O_RDONLY, false) if err != nil { t.Fatal(err) } diff --git a/internal/fs/fs_track.go b/internal/fs/fs_track.go index 319fbfaff8a..9ebdbb8c4a4 100644 --- a/internal/fs/fs_track.go +++ b/internal/fs/fs_track.go @@ -15,19 +15,9 @@ type Track struct { FS } -// Open wraps the Open method of the underlying file system. -func (fs Track) Open(name string) (File, error) { - f, err := fs.FS.Open(fixpath(name)) - if err != nil { - return nil, err - } - - return newTrackFile(debug.Stack(), name, f), nil -} - // OpenFile wraps the OpenFile method of the underlying file system. -func (fs Track) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, err := fs.FS.OpenFile(fixpath(name), flag, perm) +func (fs Track) OpenFile(name string, flag int, metadataOnly bool) (File, error) { + f, err := fs.FS.OpenFile(name, flag, metadataOnly) if err != nil { return nil, err } @@ -41,7 +31,7 @@ type trackFile struct { func newTrackFile(stack []byte, filename string, file File) *trackFile { f := &trackFile{file} - runtime.SetFinalizer(f, func(f *trackFile) { + runtime.SetFinalizer(f, func(_ any) { fmt.Fprintf(os.Stderr, "file %s not closed\n\nStacktrack:\n%s\n", filename, stack) panic("file " + filename + " not closed") }) diff --git a/internal/fs/helpers.go b/internal/fs/helpers.go deleted file mode 100644 index 4dd1e0e7338..00000000000 --- a/internal/fs/helpers.go +++ /dev/null @@ -1,13 +0,0 @@ -package fs - -import "os" - -// IsRegularFile returns true if fi belongs to a normal file. If fi is nil, -// false is returned. -func IsRegularFile(fi os.FileInfo) bool { - if fi == nil { - return false - } - - return fi.Mode()&os.ModeType == 0 -} diff --git a/internal/fs/interface.go b/internal/fs/interface.go index b26c56944bc..d75b0a91d20 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -2,15 +2,22 @@ package fs import ( "io" - "os" + + "github.com/restic/restic/internal/restic" ) // FS bundles all methods needed for a file system. type FS interface { - Open(name string) (File, error) - OpenFile(name string, flag int, perm os.FileMode) (File, error) - Stat(name string) (os.FileInfo, error) - Lstat(name string) (os.FileInfo, error) + // OpenFile opens a file or directory for reading. + // + // If metadataOnly is set, an implementation MUST return a File object for + // arbitrary file types including symlinks. The implementation may internally use + // the given file path or a file handle. In particular, an implementation may + // delay actually accessing the underlying filesystem. + // + // Only the O_NOFOLLOW and O_DIRECTORY flags are supported. + OpenFile(name string, flag int, metadataOnly bool) (File, error) + Lstat(name string) (*ExtendedFileInfo, error) Join(elem ...string) string Separator() string @@ -23,15 +30,23 @@ type FS interface { Base(path string) string } -// File is an open file on a file system. +// File is an open file on a file system. When opened as metadataOnly, an +// implementation may opt to perform filesystem operations using the filepath +// instead of actually opening the file. type File interface { + // MakeReadable reopens a File that was opened metadataOnly for reading. + // The method must not be called for files that are opened for reading. + // If possible, the underlying file should be reopened atomically. + // MakeReadable must work for files and directories. + MakeReadable() error + io.Reader io.Closer - Fd() uintptr Readdirnames(n int) ([]string, error) - Readdir(int) ([]os.FileInfo, error) - Seek(int64, int) (int64, error) - Stat() (os.FileInfo, error) - Name() string + Stat() (*ExtendedFileInfo, error) + // ToNode returns a restic.Node for the File. The internally used os.FileInfo + // must be consistent with that returned by Stat(). In particular, the metadata + // returned by consecutive calls to Stat() and ToNode() must match. + ToNode(ignoreXattrListError bool) (*restic.Node, error) } diff --git a/internal/fs/mknod_unix.go b/internal/fs/mknod_unix.go new file mode 100644 index 00000000000..024c4d502d9 --- /dev/null +++ b/internal/fs/mknod_unix.go @@ -0,0 +1,18 @@ +//go:build !freebsd && !windows +// +build !freebsd,!windows + +package fs + +import ( + "os" + + "golang.org/x/sys/unix" +) + +func mknod(path string, mode uint32, dev uint64) error { + err := unix.Mknod(path, mode, int(dev)) + if err != nil { + err = &os.PathError{Op: "mknod", Path: path, Err: err} + } + return err +} diff --git a/internal/fs/node.go b/internal/fs/node.go new file mode 100644 index 00000000000..ab2aca957d0 --- /dev/null +++ b/internal/fs/node.go @@ -0,0 +1,299 @@ +package fs + +import ( + "fmt" + "os" + "os/user" + "strconv" + "sync" + "syscall" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +// nodeFromFileInfo returns a new node from the given path and FileInfo. It +// returns the first error that is encountered, together with a node. +func nodeFromFileInfo(path string, fi *ExtendedFileInfo, ignoreXattrListError bool) (*restic.Node, error) { + node := buildBasicNode(path, fi) + + if err := nodeFillExtendedStat(node, path, fi); err != nil { + return node, err + } + + err := nodeFillGenericAttributes(node, path, fi) + err = errors.Join(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) + return node, err +} + +func buildBasicNode(path string, fi *ExtendedFileInfo) *restic.Node { + mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky + node := &restic.Node{ + Path: path, + Name: fi.Name, + Mode: fi.Mode & mask, + ModTime: fi.ModTime, + } + + node.Type = nodeTypeFromFileInfo(fi.Mode) + if node.Type == restic.NodeTypeFile { + node.Size = uint64(fi.Size) + } + return node +} + +func nodeTypeFromFileInfo(mode os.FileMode) restic.NodeType { + switch mode & os.ModeType { + case 0: + return restic.NodeTypeFile + case os.ModeDir: + return restic.NodeTypeDir + case os.ModeSymlink: + return restic.NodeTypeSymlink + case os.ModeDevice | os.ModeCharDevice: + return restic.NodeTypeCharDev + case os.ModeDevice: + return restic.NodeTypeDev + case os.ModeNamedPipe: + return restic.NodeTypeFifo + case os.ModeSocket: + return restic.NodeTypeSocket + case os.ModeIrregular: + return restic.NodeTypeIrregular + } + + return restic.NodeTypeInvalid +} + +func nodeFillExtendedStat(node *restic.Node, path string, stat *ExtendedFileInfo) error { + node.Inode = stat.Inode + node.DeviceID = stat.DeviceID + node.ChangeTime = stat.ChangeTime + node.AccessTime = stat.AccessTime + + node.UID = stat.UID + node.GID = stat.GID + node.User = lookupUsername(stat.UID) + node.Group = lookupGroup(stat.GID) + + switch node.Type { + case restic.NodeTypeFile: + node.Size = uint64(stat.Size) + node.Links = stat.Links + case restic.NodeTypeDir: + case restic.NodeTypeSymlink: + var err error + node.LinkTarget, err = os.Readlink(fixpath(path)) + node.Links = stat.Links + if err != nil { + return errors.WithStack(err) + } + case restic.NodeTypeDev: + node.Device = stat.Device + node.Links = stat.Links + case restic.NodeTypeCharDev: + node.Device = stat.Device + node.Links = stat.Links + case restic.NodeTypeFifo: + case restic.NodeTypeSocket: + default: + return errors.Errorf("unsupported file type %q", node.Type) + } + return nil +} + +var ( + uidLookupCache = make(map[uint32]string) + uidLookupCacheMutex = sync.RWMutex{} +) + +// Cached user name lookup by uid. Returns "" when no name can be found. +func lookupUsername(uid uint32) string { + uidLookupCacheMutex.RLock() + username, ok := uidLookupCache[uid] + uidLookupCacheMutex.RUnlock() + + if ok { + return username + } + + u, err := user.LookupId(strconv.Itoa(int(uid))) + if err == nil { + username = u.Username + } + + uidLookupCacheMutex.Lock() + uidLookupCache[uid] = username + uidLookupCacheMutex.Unlock() + + return username +} + +var ( + gidLookupCache = make(map[uint32]string) + gidLookupCacheMutex = sync.RWMutex{} +) + +// Cached group name lookup by gid. Returns "" when no name can be found. +func lookupGroup(gid uint32) string { + gidLookupCacheMutex.RLock() + group, ok := gidLookupCache[gid] + gidLookupCacheMutex.RUnlock() + + if ok { + return group + } + + g, err := user.LookupGroupId(strconv.Itoa(int(gid))) + if err == nil { + group = g.Name + } + + gidLookupCacheMutex.Lock() + gidLookupCache[gid] = group + gidLookupCacheMutex.Unlock() + + return group +} + +// NodeCreateAt creates the node at the given path but does NOT restore node meta data. +func NodeCreateAt(node *restic.Node, path string) (err error) { + debug.Log("create node %v at %v", node.Name, path) + + switch node.Type { + case restic.NodeTypeDir: + err = nodeCreateDirAt(node, path) + case restic.NodeTypeFile: + err = nodeCreateFileAt(path) + case restic.NodeTypeSymlink: + err = nodeCreateSymlinkAt(node, path) + case restic.NodeTypeDev: + err = nodeCreateDevAt(node, path) + case restic.NodeTypeCharDev: + err = nodeCreateCharDevAt(node, path) + case restic.NodeTypeFifo: + err = nodeCreateFifoAt(path) + case restic.NodeTypeSocket: + err = nil + default: + err = errors.Errorf("filetype %q not implemented", node.Type) + } + + return err +} + +func nodeCreateDirAt(node *restic.Node, path string) error { + err := os.Mkdir(fixpath(path), node.Mode) + if err != nil && !os.IsExist(err) { + return errors.WithStack(err) + } + + return nil +} + +func nodeCreateFileAt(path string) error { + f, err := OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) + if err != nil { + return errors.WithStack(err) + } + + if err := f.Close(); err != nil { + return errors.WithStack(err) + } + + return nil +} + +func nodeCreateSymlinkAt(node *restic.Node, path string) error { + if err := os.Symlink(node.LinkTarget, fixpath(path)); err != nil { + return errors.WithStack(err) + } + + return nil +} + +func nodeCreateDevAt(node *restic.Node, path string) error { + return mknod(path, syscall.S_IFBLK|0600, node.Device) +} + +func nodeCreateCharDevAt(node *restic.Node, path string) error { + return mknod(path, syscall.S_IFCHR|0600, node.Device) +} + +func nodeCreateFifoAt(path string) error { + return mkfifo(path, 0600) +} + +func mkfifo(path string, mode uint32) (err error) { + return mknod(path, mode|syscall.S_IFIFO, 0) +} + +// NodeRestoreMetadata restores node metadata +func NodeRestoreMetadata(node *restic.Node, path string, warn func(msg string), xattrSelectFilter func(xattrName string) bool) error { + err := nodeRestoreMetadata(node, path, warn, xattrSelectFilter) + if err != nil { + // It is common to have permission errors for folders like /home + // unless you're running as root, so ignore those. + if os.Geteuid() > 0 && errors.Is(err, os.ErrPermission) { + debug.Log("not running as root, ignoring permission error for %v: %v", + path, err) + return nil + } + debug.Log("restoreMetadata(%s) error %v", path, err) + } + + return err +} + +func nodeRestoreMetadata(node *restic.Node, path string, warn func(msg string), xattrSelectFilter func(xattrName string) bool) error { + var firsterr error + + if err := lchown(path, int(node.UID), int(node.GID)); err != nil { + firsterr = errors.WithStack(err) + } + + if err := nodeRestoreExtendedAttributes(node, path, xattrSelectFilter); err != nil { + debug.Log("error restoring extended attributes for %v: %v", path, err) + if firsterr == nil { + firsterr = err + } + } + + if err := nodeRestoreGenericAttributes(node, path, warn); err != nil { + debug.Log("error restoring generic attributes for %v: %v", path, err) + if firsterr == nil { + firsterr = err + } + } + + if err := nodeRestoreTimestamps(node, path); err != nil { + debug.Log("error restoring timestamps for %v: %v", path, err) + if firsterr == nil { + firsterr = err + } + } + + // Moving RestoreTimestamps and restoreExtendedAttributes calls above as for readonly files in windows + // calling Chmod below will no longer allow any modifications to be made on the file and the + // calls above would fail. + if node.Type != restic.NodeTypeSymlink { + if err := chmod(path, node.Mode); err != nil { + if firsterr == nil { + firsterr = errors.WithStack(err) + } + } + } + + return firsterr +} + +func nodeRestoreTimestamps(node *restic.Node, path string) error { + atime := node.AccessTime.UnixNano() + mtime := node.ModTime.UnixNano() + + if err := utimesNano(fixpath(path), atime, mtime, node.Type); err != nil { + return fmt.Errorf("failed to restore timestamp of %q: %w", path, err) + } + return nil +} diff --git a/internal/fs/node_freebsd.go b/internal/fs/node_freebsd.go new file mode 100644 index 00000000000..0cbe876f124 --- /dev/null +++ b/internal/fs/node_freebsd.go @@ -0,0 +1,21 @@ +//go:build freebsd +// +build freebsd + +package fs + +import ( + "os" + "syscall" +) + +func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { + return nil +} + +func mknod(path string, mode uint32, dev uint64) error { + err := syscall.Mknod(path, mode, dev) + if err != nil { + err = &os.PathError{Op: "mknod", Path: path, Err: err} + } + return err +} diff --git a/internal/fs/node_linux.go b/internal/fs/node_linux.go new file mode 100644 index 00000000000..ee13e0a9ec0 --- /dev/null +++ b/internal/fs/node_linux.go @@ -0,0 +1,15 @@ +package fs + +import ( + "github.com/restic/restic/internal/restic" + "golang.org/x/sys/unix" +) + +// utimesNano is like syscall.UtimesNano, except that it does not follow symlinks. +func utimesNano(path string, atime, mtime int64, _ restic.NodeType) error { + times := []unix.Timespec{ + unix.NsecToTimespec(atime), + unix.NsecToTimespec(mtime), + } + return unix.UtimesNanoAt(unix.AT_FDCWD, path, times, unix.AT_SYMLINK_NOFOLLOW) +} diff --git a/internal/fs/node_linux_test.go b/internal/fs/node_linux_test.go new file mode 100644 index 00000000000..e9f1cf86083 --- /dev/null +++ b/internal/fs/node_linux_test.go @@ -0,0 +1,19 @@ +package fs + +import ( + "io/fs" + "strings" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func TestRestoreSymlinkTimestampsError(t *testing.T) { + d := t.TempDir() + node := restic.Node{Type: restic.NodeTypeSymlink} + err := nodeRestoreTimestamps(&node, d+"/nosuchfile") + rtest.Assert(t, errors.Is(err, fs.ErrNotExist), "want ErrNotExist, got %q", err) + rtest.Assert(t, strings.Contains(err.Error(), d), "filename not in %q", err) +} diff --git a/internal/fs/node_noxattr.go b/internal/fs/node_noxattr.go new file mode 100644 index 00000000000..2dbd72c9d2d --- /dev/null +++ b/internal/fs/node_noxattr.go @@ -0,0 +1,18 @@ +//go:build aix || dragonfly || openbsd +// +build aix dragonfly openbsd + +package fs + +import ( + "github.com/restic/restic/internal/restic" +) + +// nodeRestoreExtendedAttributes is a no-op +func nodeRestoreExtendedAttributes(_ *restic.Node, _ string, _ func(xattrName string) bool) error { + return nil +} + +// nodeFillExtendedAttributes is a no-op +func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { + return nil +} diff --git a/internal/fs/node_test.go b/internal/fs/node_test.go new file mode 100644 index 00000000000..490ab7e409e --- /dev/null +++ b/internal/fs/node_test.go @@ -0,0 +1,300 @@ +package fs + +import ( + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" +) + +func BenchmarkNodeFromFileInfo(t *testing.B) { + tempfile, err := os.CreateTemp(t.TempDir(), "restic-test-temp-") + rtest.OK(t, err) + path := tempfile.Name() + rtest.OK(t, tempfile.Close()) + + fs := Local{} + f, err := fs.OpenFile(path, O_NOFOLLOW, true) + rtest.OK(t, err) + _, err = f.Stat() + rtest.OK(t, err) + + t.ResetTimer() + + for i := 0; i < t.N; i++ { + _, err := f.ToNode(false) + rtest.OK(t, err) + } + + rtest.OK(t, f.Close()) +} + +func parseTime(s string) time.Time { + t, err := time.Parse("2006-01-02 15:04:05.999", s) + if err != nil { + panic(err) + } + + return t.Local() +} + +var nodeTests = []restic.Node{ + { + Name: "testFile", + Type: restic.NodeTypeFile, + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0604, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testSuidFile", + Type: restic.NodeTypeFile, + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0755 | os.ModeSetuid, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testSuidFile2", + Type: restic.NodeTypeFile, + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0755 | os.ModeSetgid, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testSticky", + Type: restic.NodeTypeFile, + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0755 | os.ModeSticky, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testDir", + Type: restic.NodeTypeDir, + Subtree: nil, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0750 | os.ModeDir, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testSymlink", + Type: restic.NodeTypeSymlink, + LinkTarget: "invalid", + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0777 | os.ModeSymlink, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + + // include "testFile" and "testDir" again with slightly different + // metadata, so we can test if CreateAt works with pre-existing files. + { + Name: "testFile", + Type: restic.NodeTypeFile, + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0604, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + }, + { + Name: "testDir", + Type: restic.NodeTypeDir, + Subtree: nil, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0750 | os.ModeDir, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + }, + { + Name: "testXattrFile", + Type: restic.NodeTypeFile, + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0604, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + ExtendedAttributes: []restic.ExtendedAttribute{ + {Name: "user.foo", Value: []byte("bar")}, + }, + }, + { + Name: "testXattrDir", + Type: restic.NodeTypeDir, + Subtree: nil, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0750 | os.ModeDir, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + ExtendedAttributes: []restic.ExtendedAttribute{ + {Name: "user.foo", Value: []byte("bar")}, + }, + }, + { + Name: "testXattrFileMacOSResourceFork", + Type: restic.NodeTypeFile, + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0604, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + ExtendedAttributes: []restic.ExtendedAttribute{ + {Name: "com.apple.ResourceFork", Value: []byte("bar")}, + }, + }, +} + +func TestNodeRestoreAt(t *testing.T) { + tempdir := t.TempDir() + + for _, test := range nodeTests { + t.Run("", func(t *testing.T) { + var nodePath string + if test.ExtendedAttributes != nil { + if runtime.GOOS == "windows" { + // In windows extended attributes are case insensitive and windows returns + // the extended attributes in UPPER case. + // Update the tests to use UPPER case xattr names for windows. + extAttrArr := test.ExtendedAttributes + // Iterate through the array using pointers + for i := 0; i < len(extAttrArr); i++ { + extAttrArr[i].Name = strings.ToUpper(extAttrArr[i].Name) + } + } + for _, attr := range test.ExtendedAttributes { + if strings.HasPrefix(attr.Name, "com.apple.") && runtime.GOOS != "darwin" { + t.Skipf("attr %v only relevant on macOS", attr.Name) + } + } + + // tempdir might be backed by a filesystem that does not support + // extended attributes + nodePath = test.Name + defer func() { + _ = os.Remove(nodePath) + }() + } else { + nodePath = filepath.Join(tempdir, test.Name) + } + rtest.OK(t, NodeCreateAt(&test, nodePath)) + // Restore metadata, restoring all xattrs + rtest.OK(t, NodeRestoreMetadata(&test, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) }, + func(_ string) bool { return true })) + + fs := &Local{} + meta, err := fs.OpenFile(nodePath, O_NOFOLLOW, true) + rtest.OK(t, err) + n2, err := meta.ToNode(false) + rtest.OK(t, err) + n3, err := meta.ToNode(true) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) + rtest.Assert(t, n2.Equals(*n3), "unexpected node info mismatch %v", cmp.Diff(n2, n3)) + + rtest.Assert(t, test.Name == n2.Name, + "%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name) + rtest.Assert(t, test.Type == n2.Type, + "%v: type doesn't match (%v != %v)", test.Type, test.Type, n2.Type) + rtest.Assert(t, test.Size == n2.Size, + "%v: size doesn't match (%v != %v)", test.Size, test.Size, n2.Size) + + if runtime.GOOS != "windows" { + rtest.Assert(t, test.UID == n2.UID, + "%v: UID doesn't match (%v != %v)", test.Type, test.UID, n2.UID) + rtest.Assert(t, test.GID == n2.GID, + "%v: GID doesn't match (%v != %v)", test.Type, test.GID, n2.GID) + if test.Type != restic.NodeTypeSymlink { + // On OpenBSD only root can set sticky bit (see sticky(8)). + if runtime.GOOS != "openbsd" && runtime.GOOS != "netbsd" && runtime.GOOS != "solaris" && test.Name == "testSticky" { + rtest.Assert(t, test.Mode == n2.Mode, + "%v: mode doesn't match (0%o != 0%o)", test.Type, test.Mode, n2.Mode) + } + } + } + + AssertFsTimeEqual(t, "AccessTime", test.Type, test.AccessTime, n2.AccessTime) + AssertFsTimeEqual(t, "ModTime", test.Type, test.ModTime, n2.ModTime) + if len(n2.ExtendedAttributes) == 0 { + n2.ExtendedAttributes = nil + } + rtest.Assert(t, reflect.DeepEqual(test.ExtendedAttributes, n2.ExtendedAttributes), + "%v: xattrs don't match (%v != %v)", test.Name, test.ExtendedAttributes, n2.ExtendedAttributes) + }) + } +} + +func AssertFsTimeEqual(t *testing.T, label string, nodeType restic.NodeType, t1 time.Time, t2 time.Time) { + var equal bool + + // Go currently doesn't support setting timestamps of symbolic links on darwin and bsd + if nodeType == restic.NodeTypeSymlink { + switch runtime.GOOS { + case "darwin", "freebsd", "openbsd", "netbsd", "solaris": + return + } + } + + switch runtime.GOOS { + case "darwin": + // HFS+ timestamps don't support sub-second precision, + // see https://en.wikipedia.org/wiki/Comparison_of_file_systems + diff := int(t1.Sub(t2).Seconds()) + equal = diff == 0 + default: + equal = t1.Equal(t2) + } + + rtest.Assert(t, equal, "%s: %s doesn't match (%v != %v)", label, nodeType, t1, t2) +} + +func TestNodeRestoreMetadataError(t *testing.T) { + tempdir := t.TempDir() + + node := &nodeTests[0] + nodePath := filepath.Join(tempdir, node.Name) + + // This will fail because the target file does not exist + err := NodeRestoreMetadata(node, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) }, + func(_ string) bool { return true }) + test.Assert(t, errors.Is(err, os.ErrNotExist), "failed for an unexpected reason") +} diff --git a/internal/fs/node_unix.go b/internal/fs/node_unix.go new file mode 100644 index 00000000000..e88e5425104 --- /dev/null +++ b/internal/fs/node_unix.go @@ -0,0 +1,24 @@ +//go:build !windows +// +build !windows + +package fs + +import ( + "os" + + "github.com/restic/restic/internal/restic" +) + +func lchown(name string, uid, gid int) error { + return os.Lchown(name, uid, gid) +} + +// nodeRestoreGenericAttributes is no-op. +func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { + return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) +} + +// nodeFillGenericAttributes is a no-op. +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) error { + return nil +} diff --git a/internal/fs/node_unix_notlinux.go b/internal/fs/node_unix_notlinux.go new file mode 100644 index 00000000000..f8846638c32 --- /dev/null +++ b/internal/fs/node_unix_notlinux.go @@ -0,0 +1,21 @@ +//go:build !linux && unix + +package fs + +import ( + "syscall" + + "github.com/restic/restic/internal/restic" +) + +// utimesNano is like syscall.UtimesNano, except that it skips symlinks. +func utimesNano(path string, atime, mtime int64, typ restic.NodeType) error { + if typ == restic.NodeTypeSymlink { + return nil + } + + return syscall.UtimesNano(path, []syscall.Timespec{ + syscall.NsecToTimespec(atime), + syscall.NsecToTimespec(mtime), + }) +} diff --git a/internal/restic/node_unix_test.go b/internal/fs/node_unix_test.go similarity index 60% rename from internal/restic/node_unix_test.go rename to internal/fs/node_unix_test.go index 374326bf7b8..1eb1ee5061d 100644 --- a/internal/restic/node_unix_test.go +++ b/internal/fs/node_unix_test.go @@ -1,16 +1,19 @@ //go:build !windows // +build !windows -package restic +package fs import ( + "io/fs" "os" "path/filepath" "runtime" + "strings" "syscall" "testing" - "time" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -27,8 +30,11 @@ func stat(t testing.TB, filename string) (fi os.FileInfo, ok bool) { return fi, true } -func checkFile(t testing.TB, stat *syscall.Stat_t, node *Node) { +func checkFile(t testing.TB, fi fs.FileInfo, node *restic.Node) { t.Helper() + + stat := fi.Sys().(*syscall.Stat_t) + if uint32(node.Mode.Perm()) != uint32(stat.Mode&0777) { t.Errorf("Mode does not match, want %v, got %v", stat.Mode&0777, node.Mode) } @@ -41,7 +47,7 @@ func checkFile(t testing.TB, stat *syscall.Stat_t, node *Node) { t.Errorf("Dev does not match, want %v, got %v", stat.Dev, node.DeviceID) } - if node.Size != uint64(stat.Size) && node.Type != "symlink" { + if node.Size != uint64(stat.Size) && node.Type != restic.NodeTypeSymlink { t.Errorf("Size does not match, want %v, got %v", stat.Size, node.Size) } @@ -58,29 +64,20 @@ func checkFile(t testing.TB, stat *syscall.Stat_t, node *Node) { } // use the os dependent function to compare the timestamps - s, ok := toStatT(stat) - if !ok { - return - } - - mtime := s.mtim() - if node.ModTime != time.Unix(mtime.Unix()) { - t.Errorf("ModTime does not match, want %v, got %v", time.Unix(mtime.Unix()), node.ModTime) + s := ExtendedStat(fi) + if node.ModTime != s.ModTime { + t.Errorf("ModTime does not match, want %v, got %v", s.ModTime, node.ModTime) } - - ctime := s.ctim() - if node.ChangeTime != time.Unix(ctime.Unix()) { - t.Errorf("ChangeTime does not match, want %v, got %v", time.Unix(ctime.Unix()), node.ChangeTime) + if node.ChangeTime != s.ChangeTime { + t.Errorf("ChangeTime does not match, want %v, got %v", s.ChangeTime, node.ChangeTime) } - - atime := s.atim() - if node.AccessTime != time.Unix(atime.Unix()) { - t.Errorf("AccessTime does not match, want %v, got %v", time.Unix(atime.Unix()), node.AccessTime) + if node.AccessTime != s.AccessTime { + t.Errorf("AccessTime does not match, want %v, got %v", s.AccessTime, node.AccessTime) } - } -func checkDevice(t testing.TB, stat *syscall.Stat_t, node *Node) { +func checkDevice(t testing.TB, fi fs.FileInfo, node *restic.Node) { + stat := fi.Sys().(*syscall.Stat_t) if node.Device != uint64(stat.Rdev) { t.Errorf("Rdev does not match, want %v, got %v", stat.Rdev, node.Device) } @@ -117,31 +114,33 @@ func TestNodeFromFileInfo(t *testing.T) { return } - if fi.Sys() == nil { - t.Skip("fi.Sys() is nil") - return - } + fs := &Local{} + meta, err := fs.OpenFile(test.filename, O_NOFOLLOW, true) + rtest.OK(t, err) + node, err := meta.ToNode(false) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) - s, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - t.Skipf("fi type is %T, not stat_t", fi.Sys()) - return - } - - node, err := NodeFromFileInfo(test.filename, fi) - if err != nil { - t.Fatal(err) - } + rtest.OK(t, err) switch node.Type { - case "file", "symlink": - checkFile(t, s, node) - case "dev", "chardev": - checkFile(t, s, node) - checkDevice(t, s, node) + case restic.NodeTypeFile, restic.NodeTypeSymlink: + checkFile(t, fi, node) + case restic.NodeTypeDev, restic.NodeTypeCharDev: + checkFile(t, fi, node) + checkDevice(t, fi, node) default: t.Fatalf("invalid node type %q", node.Type) } }) } } + +func TestMknodError(t *testing.T) { + d := t.TempDir() + // Call mkfifo, which calls mknod, as mknod may give + // "operation not permitted" on Mac. + err := mkfifo(d, 0) + rtest.Assert(t, errors.Is(err, os.ErrExist), "want ErrExist, got %q", err) + rtest.Assert(t, strings.Contains(err.Error(), d), "filename not in %q", err) +} diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go new file mode 100644 index 00000000000..df0a7ea651f --- /dev/null +++ b/internal/fs/node_windows.go @@ -0,0 +1,497 @@ +package fs + +import ( + "encoding/json" + "fmt" + "path/filepath" + "reflect" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + "golang.org/x/sys/windows" +) + +var ( + modAdvapi32 = syscall.NewLazyDLL("advapi32.dll") + procEncryptFile = modAdvapi32.NewProc("EncryptFileW") + procDecryptFile = modAdvapi32.NewProc("DecryptFileW") + + // eaSupportedVolumesMap is a map of volumes to boolean values indicating if they support extended attributes. + eaSupportedVolumesMap = sync.Map{} +) + +const ( + extendedPathPrefix = `\\?\` + uncPathPrefix = `\\?\UNC\` + globalRootPrefix = `\\?\GLOBALROOT\` + volumeGUIDPrefix = `\\?\Volume{` +) + +// mknod is not supported on Windows. +func mknod(_ string, _ uint32, _ uint64) (err error) { + return errors.New("device nodes cannot be created on windows") +} + +// Windows doesn't need lchown +func lchown(_ string, _ int, _ int) (err error) { + return nil +} + +// utimesNano is like syscall.UtimesNano, except that it sets FILE_FLAG_OPEN_REPARSE_POINT. +func utimesNano(path string, atime, mtime int64, _ restic.NodeType) error { + // tweaked version of UtimesNano from go/src/syscall/syscall_windows.go + pathp, e := syscall.UTF16PtrFromString(fixpath(path)) + if e != nil { + return e + } + h, e := syscall.CreateFile(pathp, + syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + if e != nil { + return e + } + + defer func() { + err := syscall.Close(h) + if err != nil { + debug.Log("Error closing file handle for %s: %v\n", path, err) + } + }() + + a := syscall.NsecToFiletime(atime) + w := syscall.NsecToFiletime(mtime) + return syscall.SetFileTime(h, nil, &a, &w) +} + +// restore extended attributes for windows +func nodeRestoreExtendedAttributes(node *restic.Node, path string, xattrSelectFilter func(xattrName string) bool) error { + count := len(node.ExtendedAttributes) + if count > 0 { + eas := []extendedAttribute{} + for _, attr := range node.ExtendedAttributes { + // Filter for xattrs we want to include/exclude + if xattrSelectFilter(attr.Name) { + eas = append(eas, extendedAttribute{Name: attr.Name, Value: attr.Value}) + } + } + if len(eas) > 0 { + if errExt := restoreExtendedAttributes(node.Type, path, eas); errExt != nil { + return errExt + } + } + } + return nil +} + +// fill extended attributes in the node +// It also checks if the volume supports extended attributes and stores the result in a map +// so that it does not have to be checked again for subsequent calls for paths in the same volume. +func nodeFillExtendedAttributes(node *restic.Node, path string, _ bool) (err error) { + if strings.Contains(filepath.Base(path), ":") { + // Do not process for Alternate Data Streams in Windows + return nil + } + + // only capture xattrs for file/dir + if node.Type != restic.NodeTypeFile && node.Type != restic.NodeTypeDir { + return nil + } + + allowExtended, err := checkAndStoreEASupport(path) + if err != nil { + return err + } + if !allowExtended { + return nil + } + + var fileHandle windows.Handle + if fileHandle, err = openHandleForEA(node.Type, path, false); fileHandle == 0 { + return nil + } + if err != nil { + return errors.Errorf("get EA failed while opening file handle for path %v, with: %v", path, err) + } + defer closeFileHandle(fileHandle, path) // Replaced inline defer with named function call + //Get the windows Extended Attributes using the file handle + var extAtts []extendedAttribute + extAtts, err = fgetEA(fileHandle) + debug.Log("fillExtendedAttributes(%v) %v", path, extAtts) + if err != nil { + return errors.Errorf("get EA failed for path %v, with: %v", path, err) + } + if len(extAtts) == 0 { + return nil + } + + //Fill the ExtendedAttributes in the node using the name/value pairs in the windows EA + for _, attr := range extAtts { + extendedAttr := restic.ExtendedAttribute{ + Name: attr.Name, + Value: attr.Value, + } + + node.ExtendedAttributes = append(node.ExtendedAttributes, extendedAttr) + } + return nil +} + +// closeFileHandle safely closes a file handle and logs any errors. +func closeFileHandle(fileHandle windows.Handle, path string) { + err := windows.CloseHandle(fileHandle) + if err != nil { + debug.Log("Error closing file handle for %s: %v\n", path, err) + } +} + +// restoreExtendedAttributes handles restore of the Windows Extended Attributes to the specified path. +// The Windows API requires setting of all the Extended Attributes in one call. +func restoreExtendedAttributes(nodeType restic.NodeType, path string, eas []extendedAttribute) (err error) { + var fileHandle windows.Handle + if fileHandle, err = openHandleForEA(nodeType, path, true); fileHandle == 0 { + return nil + } + if err != nil { + return errors.Errorf("set EA failed while opening file handle for path %v, with: %v", path, err) + } + defer closeFileHandle(fileHandle, path) // Replaced inline defer with named function call + + // clear old unexpected xattrs by setting them to an empty value + oldEAs, err := fgetEA(fileHandle) + if err != nil { + return err + } + + for _, oldEA := range oldEAs { + found := false + for _, ea := range eas { + if strings.EqualFold(ea.Name, oldEA.Name) { + found = true + break + } + } + + if !found { + eas = append(eas, extendedAttribute{Name: oldEA.Name, Value: nil}) + } + } + + if err = fsetEA(fileHandle, eas); err != nil { + return errors.Errorf("set EA failed for path %v, with: %v", path, err) + } + return nil +} + +// restoreGenericAttributes restores generic attributes for Windows +func nodeRestoreGenericAttributes(node *restic.Node, path string, warn func(msg string)) (err error) { + if len(node.GenericAttributes) == 0 { + return nil + } + var errs []error + windowsAttributes, unknownAttribs, err := genericAttributesToWindowsAttrs(node.GenericAttributes) + if err != nil { + return fmt.Errorf("error parsing generic attribute for: %s : %v", path, err) + } + if windowsAttributes.CreationTime != nil { + if err := restoreCreationTime(path, windowsAttributes.CreationTime); err != nil { + errs = append(errs, fmt.Errorf("error restoring creation time for: %s : %v", path, err)) + } + } + if windowsAttributes.FileAttributes != nil { + if err := restoreFileAttributes(path, windowsAttributes.FileAttributes); err != nil { + errs = append(errs, fmt.Errorf("error restoring file attributes for: %s : %v", path, err)) + } + } + if windowsAttributes.SecurityDescriptor != nil { + if err := setSecurityDescriptor(path, windowsAttributes.SecurityDescriptor); err != nil { + errs = append(errs, fmt.Errorf("error restoring security descriptor for: %s : %v", path, err)) + } + } + + restic.HandleUnknownGenericAttributesFound(unknownAttribs, warn) + return errors.Join(errs...) +} + +// genericAttributesToWindowsAttrs converts the generic attributes map to a WindowsAttributes and also returns a string of unknown attributes that it could not convert. +func genericAttributesToWindowsAttrs(attrs map[restic.GenericAttributeType]json.RawMessage) (windowsAttributes restic.WindowsAttributes, unknownAttribs []restic.GenericAttributeType, err error) { + waValue := reflect.ValueOf(&windowsAttributes).Elem() + unknownAttribs, err = restic.GenericAttributesToOSAttrs(attrs, reflect.TypeOf(windowsAttributes), &waValue, "windows") + return windowsAttributes, unknownAttribs, err +} + +// restoreCreationTime gets the creation time from the data and sets it to the file/folder at +// the specified path. +func restoreCreationTime(path string, creationTime *syscall.Filetime) (err error) { + pathPointer, err := syscall.UTF16PtrFromString(fixpath(path)) + if err != nil { + return err + } + handle, err := syscall.CreateFile(pathPointer, + syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, + syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return err + } + defer func() { + if err := syscall.Close(handle); err != nil { + debug.Log("Error closing file handle for %s: %v\n", path, err) + } + }() + return syscall.SetFileTime(handle, creationTime, nil, nil) +} + +// restoreFileAttributes gets the File Attributes from the data and sets them to the file/folder +// at the specified path. +func restoreFileAttributes(path string, fileAttributes *uint32) (err error) { + pathPointer, err := syscall.UTF16PtrFromString(fixpath(path)) + if err != nil { + return err + } + err = fixEncryptionAttribute(path, fileAttributes, pathPointer) + if err != nil { + debug.Log("Could not change encryption attribute for path: %s: %v", path, err) + } + return syscall.SetFileAttributes(pathPointer, *fileAttributes) +} + +// fixEncryptionAttribute checks if a file needs to be marked encrypted and is not already encrypted, it sets +// the FILE_ATTRIBUTE_ENCRYPTED. Conversely, if the file needs to be marked unencrypted and it is already +// marked encrypted, it removes the FILE_ATTRIBUTE_ENCRYPTED. +func fixEncryptionAttribute(path string, attrs *uint32, pathPointer *uint16) (err error) { + if *attrs&windows.FILE_ATTRIBUTE_ENCRYPTED != 0 { + // File should be encrypted. + err = encryptFile(pathPointer) + if err != nil { + if IsAccessDenied(err) || errors.Is(err, windows.ERROR_FILE_READ_ONLY) { + // If existing file already has readonly or system flag, encrypt file call fails. + // The readonly and system flags will be set again at the end of this func if they are needed. + err = ResetPermissions(path) + if err != nil { + return fmt.Errorf("failed to encrypt file: failed to reset permissions: %s : %v", path, err) + } + err = clearSystem(path) + if err != nil { + return fmt.Errorf("failed to encrypt file: failed to clear system flag: %s : %v", path, err) + } + err = encryptFile(pathPointer) + if err != nil { + return fmt.Errorf("failed retry to encrypt file: %s : %v", path, err) + } + } else { + return fmt.Errorf("failed to encrypt file: %s : %v", path, err) + } + } + } else { + existingAttrs, err := windows.GetFileAttributes(pathPointer) + if err != nil { + return fmt.Errorf("failed to get file attributes for existing file: %s : %v", path, err) + } + if existingAttrs&windows.FILE_ATTRIBUTE_ENCRYPTED != 0 { + // File should not be encrypted, but its already encrypted. Decrypt it. + err = decryptFile(pathPointer) + if err != nil { + if IsAccessDenied(err) || errors.Is(err, windows.ERROR_FILE_READ_ONLY) { + // If existing file already has readonly or system flag, decrypt file call fails. + // The readonly and system flags will be set again after this func if they are needed. + err = ResetPermissions(path) + if err != nil { + return fmt.Errorf("failed to encrypt file: failed to reset permissions: %s : %v", path, err) + } + err = clearSystem(path) + if err != nil { + return fmt.Errorf("failed to decrypt file: failed to clear system flag: %s : %v", path, err) + } + err = decryptFile(pathPointer) + if err != nil { + return fmt.Errorf("failed retry to decrypt file: %s : %v", path, err) + } + } else { + return fmt.Errorf("failed to decrypt file: %s : %v", path, err) + } + } + } + } + return err +} + +// encryptFile set the encrypted flag on the file. +func encryptFile(pathPointer *uint16) error { + // Call EncryptFile function + ret, _, err := procEncryptFile.Call(uintptr(unsafe.Pointer(pathPointer))) + if ret == 0 { + return err + } + return nil +} + +// decryptFile removes the encrypted flag from the file. +func decryptFile(pathPointer *uint16) error { + // Call DecryptFile function + ret, _, err := procDecryptFile.Call(uintptr(unsafe.Pointer(pathPointer))) + if ret == 0 { + return err + } + return nil +} + +// nodeFillGenericAttributes fills in the generic attributes for windows like File Attributes, +// Created time and Security Descriptors. +func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFileInfo) error { + if strings.Contains(filepath.Base(path), ":") { + // Do not process for Alternate Data Streams in Windows + return nil + } + + isVolume, err := isVolumePath(path) + if err != nil { + return err + } + if isVolume { + // Do not process file attributes, created time and sd for windows root volume paths + // Security descriptors are not supported for root volume paths. + // Though file attributes and created time are supported for root volume paths, + // we ignore them and we do not want to replace them during every restore. + return nil + } + + var sd *[]byte + if node.Type == restic.NodeTypeFile || node.Type == restic.NodeTypeDir { + if sd, err = getSecurityDescriptor(path); err != nil { + return err + } + } + + winFI := stat.sys.(*syscall.Win32FileAttributeData) + + // Add Windows attributes + node.GenericAttributes, err = restic.WindowsAttrsToGenericAttributes(restic.WindowsAttributes{ + CreationTime: &winFI.CreationTime, + FileAttributes: &winFI.FileAttributes, + SecurityDescriptor: sd, + }) + return err +} + +// checkAndStoreEASupport checks if the volume of the path supports extended attributes and stores the result in a map +// If the result is already in the map, it returns the result from the map. +func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { + var volumeName string + volumeName, err = prepareVolumeName(path) + if err != nil { + return false, err + } + + if volumeName != "" { + // First check if the manually prepared volume name is already in the map + eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeName) + if exists { + // Cache hit, immediately return the cached value + return eaSupportedValue.(bool), nil + } + // If not found, check if EA is supported with manually prepared volume name + isEASupportedVolume, err = pathSupportsExtendedAttributes(volumeName + `\`) + // If the prepared volume name is not valid, we will fetch the actual volume name next. + if err != nil && !errors.Is(err, windows.DNS_ERROR_INVALID_NAME) { + debug.Log("Error checking if extended attributes are supported for prepared volume name %s: %v", volumeName, err) + // There can be multiple errors like path does not exist, bad network path, etc. + // We just gracefully disallow extended attributes for cases. + return false, nil + } + } + // If an entry is not found, get the actual volume name + volumeNameActual, err := getVolumePathName(path) + if err != nil { + debug.Log("Error getting actual volume name %s for path %s: %v", volumeName, path, err) + // There can be multiple errors like path does not exist, bad network path, etc. + // We just gracefully disallow extended attributes for cases. + return false, nil + } + if volumeNameActual != volumeName { + // If the actual volume name is different, check cache for the actual volume name + eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeNameActual) + if exists { + // Cache hit, immediately return the cached value + return eaSupportedValue.(bool), nil + } + // If the actual volume name is different and is not in the map, again check if the new volume supports extended attributes with the actual volume name + isEASupportedVolume, err = pathSupportsExtendedAttributes(volumeNameActual + `\`) + // Debug log for cases where the prepared volume name is not valid + if err != nil { + debug.Log("Error checking if extended attributes are supported for actual volume name %s: %v", volumeNameActual, err) + // There can be multiple errors like path does not exist, bad network path, etc. + // We just gracefully disallow extended attributes for cases. + return false, nil + } else { + debug.Log("Checking extended attributes. Prepared volume name: %s, actual volume name: %s, isEASupportedVolume: %v, err: %v", volumeName, volumeNameActual, isEASupportedVolume, err) + } + } + if volumeNameActual != "" { + eaSupportedVolumesMap.Store(volumeNameActual, isEASupportedVolume) + } + return isEASupportedVolume, err +} + +// getVolumePathName returns the volume path name for the given path. +func getVolumePathName(path string) (volumeName string, err error) { + utf16Path, err := windows.UTF16PtrFromString(path) + if err != nil { + return "", err + } + // Get the volume path (e.g., "D:") + var volumePath [windows.MAX_PATH + 1]uint16 + err = windows.GetVolumePathName(utf16Path, &volumePath[0], windows.MAX_PATH+1) + if err != nil { + return "", err + } + // Trim any trailing backslashes + volumeName = strings.TrimRight(windows.UTF16ToString(volumePath[:]), "\\") + return volumeName, nil +} + +// isVolumePath returns whether a path refers to a volume +func isVolumePath(path string) (bool, error) { + volName, err := prepareVolumeName(path) + if err != nil { + return false, err + } + + cleanPath := filepath.Clean(path) + cleanVolume := filepath.Clean(volName + `\`) + return cleanPath == cleanVolume, nil +} + +// prepareVolumeName prepares the volume name for different cases in Windows +func prepareVolumeName(path string) (volumeName string, err error) { + // Check if it's an extended length path + if strings.HasPrefix(path, globalRootPrefix) { + // Extract the VSS snapshot volume name eg. `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX` + if parts := strings.SplitN(path, `\`, 7); len(parts) >= 6 { + volumeName = strings.Join(parts[:6], `\`) + } else { + volumeName = filepath.VolumeName(path) + } + } else { + if !strings.HasPrefix(path, volumeGUIDPrefix) { // Handle volume GUID path + if strings.HasPrefix(path, uncPathPrefix) { + // Convert \\?\UNC\ extended path to standard path to get the volume name correctly + path = `\\` + path[len(uncPathPrefix):] + } else if strings.HasPrefix(path, extendedPathPrefix) { + //Extended length path prefix needs to be trimmed to get the volume name correctly + path = path[len(extendedPathPrefix):] + } else { + // Use the absolute path + path, err = filepath.Abs(path) + if err != nil { + return "", fmt.Errorf("failed to get absolute path: %w", err) + } + } + } + volumeName = filepath.VolumeName(path) + } + return volumeName, nil +} diff --git a/internal/fs/node_windows_test.go b/internal/fs/node_windows_test.go new file mode 100644 index 00000000000..458a7bcb179 --- /dev/null +++ b/internal/fs/node_windows_test.go @@ -0,0 +1,578 @@ +//go:build windows +// +build windows + +package fs + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + "testing" + "time" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" + "golang.org/x/sys/windows" +) + +func TestRestoreSecurityDescriptors(t *testing.T) { + t.Parallel() + tempDir := t.TempDir() + for i, sd := range testFileSDs { + testRestoreSecurityDescriptor(t, sd, tempDir, restic.NodeTypeFile, fmt.Sprintf("testfile%d", i)) + } + for i, sd := range testDirSDs { + testRestoreSecurityDescriptor(t, sd, tempDir, restic.NodeTypeDir, fmt.Sprintf("testdir%d", i)) + } +} + +func testRestoreSecurityDescriptor(t *testing.T, sd string, tempDir string, fileType restic.NodeType, fileName string) { + // Decode the encoded string SD to get the security descriptor input in bytes. + sdInputBytes, err := base64.StdEncoding.DecodeString(sd) + test.OK(t, errors.Wrapf(err, "Error decoding SD for: %s", fileName)) + // Wrap the security descriptor bytes in windows attributes and convert to generic attributes. + genericAttributes, err := restic.WindowsAttrsToGenericAttributes(restic.WindowsAttributes{CreationTime: nil, FileAttributes: nil, SecurityDescriptor: &sdInputBytes}) + test.OK(t, errors.Wrapf(err, "Error constructing windows attributes for: %s", fileName)) + // Construct a Node with the generic attributes. + expectedNode := getNode(fileName, fileType, genericAttributes) + + // Restore the file/dir and restore the meta data including the security descriptors. + testPath, node := restoreAndGetNode(t, tempDir, &expectedNode, false) + // Get the security descriptor from the node constructed from the file info of the restored path. + sdByteFromRestoredNode := getWindowsAttr(t, testPath, node).SecurityDescriptor + + // Get the security descriptor for the test path after the restore. + sdBytesFromRestoredPath, err := getSecurityDescriptor(testPath) + test.OK(t, errors.Wrapf(err, "Error while getting the security descriptor for: %s", testPath)) + + // Compare the input SD and the SD got from the restored file. + compareSecurityDescriptors(t, testPath, sdInputBytes, *sdBytesFromRestoredPath) + // Compare the SD got from node constructed from the restored file info and the SD got directly from the restored file. + compareSecurityDescriptors(t, testPath, *sdByteFromRestoredNode, *sdBytesFromRestoredPath) +} + +func getNode(name string, fileType restic.NodeType, genericAttributes map[restic.GenericAttributeType]json.RawMessage) restic.Node { + return restic.Node{ + Name: name, + Type: fileType, + Mode: 0644, + ModTime: parseTime("2024-02-21 6:30:01.111"), + AccessTime: parseTime("2024-02-22 7:31:02.222"), + ChangeTime: parseTime("2024-02-23 8:32:03.333"), + GenericAttributes: genericAttributes, + } +} + +func getWindowsAttr(t *testing.T, testPath string, node *restic.Node) restic.WindowsAttributes { + windowsAttributes, unknownAttribs, err := genericAttributesToWindowsAttrs(node.GenericAttributes) + test.OK(t, errors.Wrapf(err, "Error getting windows attr from generic attr: %s", testPath)) + test.Assert(t, len(unknownAttribs) == 0, "Unknown attribs found: %s for: %s", unknownAttribs, testPath) + return windowsAttributes +} + +func TestRestoreCreationTime(t *testing.T) { + t.Parallel() + path := t.TempDir() + fi, err := os.Lstat(path) + test.OK(t, errors.Wrapf(err, "Could not Lstat for path: %s", path)) + attr := fi.Sys().(*syscall.Win32FileAttributeData) + creationTimeAttribute := attr.CreationTime + //Using the temp dir creation time as the test creation time for the test file and folder + runGenericAttributesTest(t, path, restic.TypeCreationTime, restic.WindowsAttributes{CreationTime: &creationTimeAttribute}, false) +} + +func TestRestoreFileAttributes(t *testing.T) { + t.Parallel() + genericAttributeName := restic.TypeFileAttributes + tempDir := t.TempDir() + normal := uint32(syscall.FILE_ATTRIBUTE_NORMAL) + hidden := uint32(syscall.FILE_ATTRIBUTE_HIDDEN) + system := uint32(syscall.FILE_ATTRIBUTE_SYSTEM) + archive := uint32(syscall.FILE_ATTRIBUTE_ARCHIVE) + encrypted := uint32(windows.FILE_ATTRIBUTE_ENCRYPTED) + fileAttributes := []restic.WindowsAttributes{ + //normal + {FileAttributes: &normal}, + //hidden + {FileAttributes: &hidden}, + //system + {FileAttributes: &system}, + //archive + {FileAttributes: &archive}, + //encrypted + {FileAttributes: &encrypted}, + } + for i, fileAttr := range fileAttributes { + genericAttrs, err := restic.WindowsAttrsToGenericAttributes(fileAttr) + test.OK(t, err) + expectedNodes := []restic.Node{ + { + Name: fmt.Sprintf("testfile%d", i), + Type: restic.NodeTypeFile, + Mode: 0655, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + GenericAttributes: genericAttrs, + }, + } + runGenericAttributesTestForNodes(t, expectedNodes, tempDir, genericAttributeName, fileAttr, false) + } + normal = uint32(syscall.FILE_ATTRIBUTE_DIRECTORY) + hidden = uint32(syscall.FILE_ATTRIBUTE_DIRECTORY | syscall.FILE_ATTRIBUTE_HIDDEN) + system = uint32(syscall.FILE_ATTRIBUTE_DIRECTORY | windows.FILE_ATTRIBUTE_SYSTEM) + archive = uint32(syscall.FILE_ATTRIBUTE_DIRECTORY | windows.FILE_ATTRIBUTE_ARCHIVE) + encrypted = uint32(syscall.FILE_ATTRIBUTE_DIRECTORY | windows.FILE_ATTRIBUTE_ENCRYPTED) + folderAttributes := []restic.WindowsAttributes{ + //normal + {FileAttributes: &normal}, + //hidden + {FileAttributes: &hidden}, + //system + {FileAttributes: &system}, + //archive + {FileAttributes: &archive}, + //encrypted + {FileAttributes: &encrypted}, + } + for i, folderAttr := range folderAttributes { + genericAttrs, err := restic.WindowsAttrsToGenericAttributes(folderAttr) + test.OK(t, err) + expectedNodes := []restic.Node{ + { + Name: fmt.Sprintf("testdirectory%d", i), + Type: restic.NodeTypeDir, + Mode: 0755, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + GenericAttributes: genericAttrs, + }, + } + runGenericAttributesTestForNodes(t, expectedNodes, tempDir, genericAttributeName, folderAttr, false) + } +} + +func runGenericAttributesTest(t *testing.T, tempDir string, genericAttributeName restic.GenericAttributeType, genericAttributeExpected restic.WindowsAttributes, warningExpected bool) { + genericAttributes, err := restic.WindowsAttrsToGenericAttributes(genericAttributeExpected) + test.OK(t, err) + expectedNodes := []restic.Node{ + { + Name: "testfile", + Type: restic.NodeTypeFile, + Mode: 0644, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + GenericAttributes: genericAttributes, + }, + { + Name: "testdirectory", + Type: restic.NodeTypeDir, + Mode: 0755, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + GenericAttributes: genericAttributes, + }, + } + runGenericAttributesTestForNodes(t, expectedNodes, tempDir, genericAttributeName, genericAttributeExpected, warningExpected) +} +func runGenericAttributesTestForNodes(t *testing.T, expectedNodes []restic.Node, tempDir string, genericAttr restic.GenericAttributeType, genericAttributeExpected restic.WindowsAttributes, warningExpected bool) { + + for _, testNode := range expectedNodes { + testPath, node := restoreAndGetNode(t, tempDir, &testNode, warningExpected) + rawMessage := node.GenericAttributes[genericAttr] + genericAttrsExpected, err := restic.WindowsAttrsToGenericAttributes(genericAttributeExpected) + test.OK(t, err) + rawMessageExpected := genericAttrsExpected[genericAttr] + test.Equals(t, rawMessageExpected, rawMessage, "Generic attribute: %s got from NodeFromFileInfo not equal for path: %s", string(genericAttr), testPath) + } +} + +func restoreAndGetNode(t *testing.T, tempDir string, testNode *restic.Node, warningExpected bool) (string, *restic.Node) { + testPath := filepath.Join(tempDir, "001", testNode.Name) + err := os.MkdirAll(filepath.Dir(testPath), testNode.Mode) + test.OK(t, errors.Wrapf(err, "Failed to create parent directories for: %s", testPath)) + + if testNode.Type == restic.NodeTypeFile { + + testFile, err := os.Create(testPath) + test.OK(t, errors.Wrapf(err, "Failed to create test file: %s", testPath)) + testFile.Close() + } else if testNode.Type == restic.NodeTypeDir { + + err := os.Mkdir(testPath, testNode.Mode) + test.OK(t, errors.Wrapf(err, "Failed to create test directory: %s", testPath)) + } + + err = NodeRestoreMetadata(testNode, testPath, func(msg string) { + if warningExpected { + test.Assert(t, warningExpected, "Warning triggered as expected: %s", msg) + } else { + // If warning is not expected, this code should not get triggered. + test.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", testPath, msg)) + } + }, func(_ string) bool { return true }) + test.OK(t, errors.Wrapf(err, "Failed to restore metadata for: %s", testPath)) + + fs := &Local{} + meta, err := fs.OpenFile(testPath, O_NOFOLLOW, true) + test.OK(t, err) + nodeFromFileInfo, err := meta.ToNode(false) + test.OK(t, errors.Wrapf(err, "Could not get NodeFromFileInfo for path: %s", testPath)) + test.OK(t, meta.Close()) + + return testPath, nodeFromFileInfo +} + +const TypeSomeNewAttribute restic.GenericAttributeType = "MockAttributes.SomeNewAttribute" + +func TestNewGenericAttributeType(t *testing.T) { + t.Parallel() + + newGenericAttribute := map[restic.GenericAttributeType]json.RawMessage{} + newGenericAttribute[TypeSomeNewAttribute] = []byte("any value") + + tempDir := t.TempDir() + expectedNodes := []restic.Node{ + { + Name: "testfile", + Type: restic.NodeTypeFile, + Mode: 0644, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + GenericAttributes: newGenericAttribute, + }, + { + Name: "testdirectory", + Type: restic.NodeTypeDir, + Mode: 0755, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + GenericAttributes: newGenericAttribute, + }, + } + for _, testNode := range expectedNodes { + testPath, node := restoreAndGetNode(t, tempDir, &testNode, true) + _, ua, err := genericAttributesToWindowsAttrs(node.GenericAttributes) + test.OK(t, err) + // Since this GenericAttribute is unknown to this version of the software, it will not get set on the file. + test.Assert(t, len(ua) == 0, "Unknown attributes: %s found for path: %s", ua, testPath) + } +} + +func TestRestoreExtendedAttributes(t *testing.T) { + t.Parallel() + tempDir := t.TempDir() + expectedNodes := []restic.Node{ + { + Name: "testfile", + Type: restic.NodeTypeFile, + Mode: 0644, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + ExtendedAttributes: []restic.ExtendedAttribute{ + {"user.foo", []byte("bar")}, + }, + }, + { + Name: "testdirectory", + Type: restic.NodeTypeDir, + Mode: 0755, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + ExtendedAttributes: []restic.ExtendedAttribute{ + {"user.foo", []byte("bar")}, + }, + }, + } + for _, testNode := range expectedNodes { + testPath, node := restoreAndGetNode(t, tempDir, &testNode, false) + + var handle windows.Handle + var err error + utf16Path := windows.StringToUTF16Ptr(testPath) + if node.Type == restic.NodeTypeFile { + handle, err = windows.CreateFile(utf16Path, windows.FILE_READ_EA, 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) + } else if node.Type == restic.NodeTypeDir { + handle, err = windows.CreateFile(utf16Path, windows.FILE_READ_EA, 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + } + test.OK(t, errors.Wrapf(err, "Error opening file/directory for: %s", testPath)) + defer func() { + err := windows.Close(handle) + test.OK(t, errors.Wrapf(err, "Error closing file for: %s", testPath)) + }() + + extAttr, err := fgetEA(handle) + test.OK(t, errors.Wrapf(err, "Error getting extended attributes for: %s", testPath)) + test.Equals(t, len(node.ExtendedAttributes), len(extAttr)) + + for _, expectedExtAttr := range node.ExtendedAttributes { + var foundExtAttr *extendedAttribute + for _, ea := range extAttr { + if strings.EqualFold(ea.Name, expectedExtAttr.Name) { + foundExtAttr = &ea + break + + } + } + test.Assert(t, foundExtAttr != nil, "Expected extended attribute not found") + test.Equals(t, expectedExtAttr.Value, foundExtAttr.Value) + } + } +} + +func TestPrepareVolumeName(t *testing.T) { + currentVolume := filepath.VolumeName(func() string { + // Get the current working directory + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get current working directory: %v", err) + } + return pwd + }()) + // Create a temporary directory for the test + tempDir, err := os.MkdirTemp("", "restic_test_"+time.Now().Format("20060102150405")) + if err != nil { + t.Fatalf("Failed to create temp directory: %v", err) + } + defer os.RemoveAll(tempDir) + + // Create a long file name + longFileName := `\Very\Long\Path\That\Exceeds\260\Characters\` + strings.Repeat(`\VeryLongFolderName`, 20) + `\\LongFile.txt` + longFilePath := filepath.Join(tempDir, longFileName) + + tempDirVolume := filepath.VolumeName(tempDir) + // Create the file + content := []byte("This is a test file with a very long name.") + err = os.MkdirAll(filepath.Dir(longFilePath), 0755) + test.OK(t, err) + if err != nil { + t.Fatalf("Failed to create long folder: %v", err) + } + err = os.WriteFile(longFilePath, content, 0644) + test.OK(t, err) + if err != nil { + t.Fatalf("Failed to create long file: %v", err) + } + osVolumeGUIDPath := getOSVolumeGUIDPath(t) + osVolumeGUIDVolume := filepath.VolumeName(osVolumeGUIDPath) + + testCases := []struct { + name string + path string + expectedVolume string + expectError bool + expectedEASupported bool + isRealPath bool + }{ + { + name: "Network drive path", + path: `Z:\Shared\Documents`, + expectedVolume: `Z:`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Subst drive path", + path: `X:\Virtual\Folder`, + expectedVolume: `X:`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Windows reserved path", + path: `\\.\` + os.Getenv("SystemDrive") + `\System32\drivers\etc\hosts`, + expectedVolume: `\\.\` + os.Getenv("SystemDrive"), + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "Long UNC path", + path: `\\?\UNC\LongServerName\VeryLongShareName\DeepPath\File.txt`, + expectedVolume: `\\LongServerName\VeryLongShareName`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Volume GUID path", + path: osVolumeGUIDPath, + expectedVolume: osVolumeGUIDVolume, + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "Volume GUID path with subfolder", + path: osVolumeGUIDPath + `\Windows`, + expectedVolume: osVolumeGUIDVolume, + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "Standard path", + path: os.Getenv("SystemDrive") + `\Users\`, + expectedVolume: os.Getenv("SystemDrive"), + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "Extended length path", + path: longFilePath, + expectedVolume: tempDirVolume, + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "UNC path", + path: `\\server\share\folder`, + expectedVolume: `\\server\share`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Extended UNC path", + path: `\\?\UNC\server\share\folder`, + expectedVolume: `\\server\share`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Volume Shadow Copy root", + path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy5555`, + expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy5555`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Volume Shadow Copy path", + path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy5555\Users\test`, + expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy5555`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Relative path", + path: `folder\subfolder`, + + expectedVolume: currentVolume, // Get current volume + expectError: false, + expectedEASupported: true, + }, + { + name: "Empty path", + path: ``, + expectedVolume: currentVolume, + expectError: false, + expectedEASupported: true, + isRealPath: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + isEASupported, err := checkAndStoreEASupport(tc.path) + test.OK(t, err) + test.Equals(t, tc.expectedEASupported, isEASupported) + + volume, err := prepareVolumeName(tc.path) + + if tc.expectError { + test.Assert(t, err != nil, "Expected an error, but got none") + } else { + test.OK(t, err) + } + test.Equals(t, tc.expectedVolume, volume) + + if tc.isRealPath { + isEASupportedVolume, err := pathSupportsExtendedAttributes(volume + `\`) + // If the prepared volume name is not valid, we will next fetch the actual volume name. + test.OK(t, err) + + test.Equals(t, tc.expectedEASupported, isEASupportedVolume) + + actualVolume, err := getVolumePathName(tc.path) + test.OK(t, err) + test.Equals(t, tc.expectedVolume, actualVolume) + } + }) + } +} + +func getOSVolumeGUIDPath(t *testing.T) string { + // Get the path of the OS drive (usually C:\) + osDrive := os.Getenv("SystemDrive") + "\\" + + // Convert to a volume GUID path + volumeName, err := windows.UTF16PtrFromString(osDrive) + test.OK(t, err) + if err != nil { + return "" + } + + var volumeGUID [windows.MAX_PATH]uint16 + err = windows.GetVolumeNameForVolumeMountPoint(volumeName, &volumeGUID[0], windows.MAX_PATH) + test.OK(t, err) + if err != nil { + return "" + } + + return windows.UTF16ToString(volumeGUID[:]) +} + +func TestGetVolumePathName(t *testing.T) { + tempDirVolume := filepath.VolumeName(os.TempDir()) + testCases := []struct { + name string + path string + expectedPrefix string + }{ + { + name: "Root directory", + path: os.Getenv("SystemDrive") + `\`, + expectedPrefix: os.Getenv("SystemDrive"), + }, + { + name: "Nested directory", + path: os.Getenv("SystemDrive") + `\Windows\System32`, + expectedPrefix: os.Getenv("SystemDrive"), + }, + { + name: "Temp directory", + path: os.TempDir() + `\`, + expectedPrefix: tempDirVolume, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + volumeName, err := getVolumePathName(tc.path) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if !strings.HasPrefix(volumeName, tc.expectedPrefix) { + t.Errorf("Expected volume name to start with %s, but got %s", tc.expectedPrefix, volumeName) + } + }) + } + + // Test with an invalid path + _, err := getVolumePathName("Z:\\NonExistentPath") + if err == nil { + t.Error("Expected an error for non-existent path, but got nil") + } +} diff --git a/internal/fs/node_xattr.go b/internal/fs/node_xattr.go new file mode 100644 index 00000000000..2a2b5c0fba4 --- /dev/null +++ b/internal/fs/node_xattr.go @@ -0,0 +1,127 @@ +//go:build darwin || freebsd || netbsd || linux || solaris +// +build darwin freebsd netbsd linux solaris + +package fs + +import ( + "fmt" + "os" + "syscall" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/pkg/xattr" +) + +// getxattr retrieves extended attribute data associated with path. +func getxattr(path, name string) ([]byte, error) { + b, err := xattr.LGet(path, name) + return b, handleXattrErr(err) +} + +// listxattr retrieves a list of names of extended attributes associated with the +// given path in the file system. +func listxattr(path string) ([]string, error) { + l, err := xattr.LList(path) + return l, handleXattrErr(err) +} + +func isListxattrPermissionError(err error) bool { + var xerr *xattr.Error + if errors.As(err, &xerr) { + return xerr.Op == "xattr.list" && errors.Is(xerr.Err, os.ErrPermission) + } + return false +} + +// setxattr associates name and data together as an attribute of path. +func setxattr(path, name string, data []byte) error { + return handleXattrErr(xattr.LSet(path, name, data)) +} + +// removexattr removes the attribute name from path. +func removexattr(path, name string) error { + return handleXattrErr(xattr.LRemove(path, name)) +} + +func handleXattrErr(err error) error { + switch e := err.(type) { + case nil: + return nil + + case *xattr.Error: + // On Linux, xattr calls on files in an SMB/CIFS mount can return + // ENOATTR instead of ENOTSUP. + switch e.Err { + case syscall.ENOTSUP, xattr.ENOATTR: + return nil + } + return errors.WithStack(e) + + default: + return errors.WithStack(e) + } +} + +func nodeRestoreExtendedAttributes(node *restic.Node, path string, xattrSelectFilter func(xattrName string) bool) error { + expectedAttrs := map[string]struct{}{} + for _, attr := range node.ExtendedAttributes { + // Only restore xattrs that match the filter + if xattrSelectFilter(attr.Name) { + err := setxattr(path, attr.Name, attr.Value) + if err != nil { + return err + } + expectedAttrs[attr.Name] = struct{}{} + } + } + + // remove unexpected xattrs + xattrs, err := listxattr(path) + if err != nil { + return err + } + for _, name := range xattrs { + if _, ok := expectedAttrs[name]; ok { + continue + } + // Only attempt to remove xattrs that match the filter + if xattrSelectFilter(name) { + if err := removexattr(path, name); err != nil { + return err + } + } + } + + return nil +} + +func nodeFillExtendedAttributes(node *restic.Node, path string, ignoreListError bool) error { + xattrs, err := listxattr(path) + debug.Log("fillExtendedAttributes(%v) %v %v", path, xattrs, err) + if err != nil { + if ignoreListError && isListxattrPermissionError(err) { + return nil + } + return err + } + + node.ExtendedAttributes = make([]restic.ExtendedAttribute, 0, len(xattrs)) + for _, attr := range xattrs { + attrVal, err := getxattr(path, attr) + if err != nil { + fmt.Fprintf(os.Stderr, "can not obtain extended attribute %v for %v:\n", attr, path) + continue + } + attr := restic.ExtendedAttribute{ + Name: attr, + Value: attrVal, + } + + node.ExtendedAttributes = append(node.ExtendedAttributes, attr) + } + + return nil +} diff --git a/internal/fs/node_xattr_all_test.go b/internal/fs/node_xattr_all_test.go new file mode 100644 index 00000000000..6a9a2e4bf91 --- /dev/null +++ b/internal/fs/node_xattr_all_test.go @@ -0,0 +1,201 @@ +//go:build darwin || freebsd || netbsd || linux || solaris || windows +// +build darwin freebsd netbsd linux solaris windows + +package fs + +import ( + "bytes" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/restic/restic/internal/filter" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func setAndVerifyXattr(t *testing.T, file string, attrs []restic.ExtendedAttribute) { + if runtime.GOOS == "windows" { + // windows seems to convert the xattr name to upper case + for i := range attrs { + attrs[i].Name = strings.ToUpper(attrs[i].Name) + } + } + + node := &restic.Node{ + Type: restic.NodeTypeFile, + ExtendedAttributes: attrs, + } + /* restore all xattrs */ + rtest.OK(t, nodeRestoreExtendedAttributes(node, file, func(_ string) bool { return true })) + + nodeActual := &restic.Node{ + Type: restic.NodeTypeFile, + } + rtest.OK(t, nodeFillExtendedAttributes(nodeActual, file, false)) + + rtest.Assert(t, nodeActual.Equals(*node), "xattr mismatch got %v expected %v", nodeActual.ExtendedAttributes, node.ExtendedAttributes) +} + +func setAndVerifyXattrWithSelectFilter(t *testing.T, file string, testAttr []testXattrToRestore, xattrSelectFilter func(_ string) bool) { + attrs := make([]restic.ExtendedAttribute, len(testAttr)) + for i := range testAttr { + // windows seems to convert the xattr name to upper case + if runtime.GOOS == "windows" { + testAttr[i].xattr.Name = strings.ToUpper(testAttr[i].xattr.Name) + } + attrs[i] = testAttr[i].xattr + } + + node := &restic.Node{ + Type: restic.NodeTypeFile, + ExtendedAttributes: attrs, + } + + rtest.OK(t, nodeRestoreExtendedAttributes(node, file, xattrSelectFilter)) + + nodeActual := &restic.Node{ + Type: restic.NodeTypeFile, + } + rtest.OK(t, nodeFillExtendedAttributes(nodeActual, file, false)) + + // Check nodeActual to make sure only xattrs we expect are there + for _, testAttr := range testAttr { + xattrFound := false + xattrRestored := false + for _, restoredAttr := range nodeActual.ExtendedAttributes { + if restoredAttr.Name == testAttr.xattr.Name { + xattrFound = true + xattrRestored = bytes.Equal(restoredAttr.Value, testAttr.xattr.Value) + break + } + } + if testAttr.shouldRestore { + rtest.Assert(t, xattrFound, "xattr %s not restored", testAttr.xattr.Name) + rtest.Assert(t, xattrRestored, "xattr %v value not restored", testAttr.xattr) + } else { + rtest.Assert(t, !xattrFound, "xattr %v should not have been restored", testAttr.xattr) + } + } +} + +type testXattrToRestore struct { + xattr restic.ExtendedAttribute + shouldRestore bool +} + +func TestOverwriteXattr(t *testing.T) { + dir := t.TempDir() + file := filepath.Join(dir, "file") + rtest.OK(t, os.WriteFile(file, []byte("hello world"), 0o600)) + + setAndVerifyXattr(t, file, []restic.ExtendedAttribute{ + { + Name: "user.foo", + Value: []byte("bar"), + }, + }) + + setAndVerifyXattr(t, file, []restic.ExtendedAttribute{ + { + Name: "user.other", + Value: []byte("some"), + }, + }) +} + +func uppercaseOnWindows(patterns []string) []string { + // windows seems to convert the xattr name to upper case + if runtime.GOOS == "windows" { + out := []string{} + for _, pattern := range patterns { + out = append(out, strings.ToUpper(pattern)) + } + return out + } + return patterns +} + +func TestOverwriteXattrWithSelectFilter(t *testing.T) { + dir := t.TempDir() + file := filepath.Join(dir, "file2") + rtest.OK(t, os.WriteFile(file, []byte("hello world"), 0o600)) + + noopWarnf := func(_ string, _ ...interface{}) {} + + // Set a filter as if the user passed in --include-xattr user.* + xattrSelectFilter1 := func(xattrName string) bool { + shouldInclude, _ := filter.IncludeByPattern(uppercaseOnWindows([]string{"user.*"}), noopWarnf)(xattrName) + return shouldInclude + } + + setAndVerifyXattrWithSelectFilter(t, file, []testXattrToRestore{ + { + xattr: restic.ExtendedAttribute{ + Name: "user.foo", + Value: []byte("bar"), + }, + shouldRestore: true, + }, + { + xattr: restic.ExtendedAttribute{ + Name: "user.test", + Value: []byte("testxattr"), + }, + shouldRestore: true, + }, + { + xattr: restic.ExtendedAttribute{ + Name: "security.other", + Value: []byte("testing"), + }, + shouldRestore: false, + }, + }, xattrSelectFilter1) + + // Set a filter as if the user passed in --include-xattr user.* + xattrSelectFilter2 := func(xattrName string) bool { + shouldInclude, _ := filter.IncludeByPattern(uppercaseOnWindows([]string{"user.o*", "user.comm*"}), noopWarnf)(xattrName) + return shouldInclude + } + + setAndVerifyXattrWithSelectFilter(t, file, []testXattrToRestore{ + { + xattr: restic.ExtendedAttribute{ + Name: "user.other", + Value: []byte("some"), + }, + shouldRestore: true, + }, + { + xattr: restic.ExtendedAttribute{ + Name: "security.other", + Value: []byte("testing"), + }, + shouldRestore: false, + }, + { + xattr: restic.ExtendedAttribute{ + Name: "user.open", + Value: []byte("door"), + }, + shouldRestore: true, + }, + { + xattr: restic.ExtendedAttribute{ + Name: "user.common", + Value: []byte("testing"), + }, + shouldRestore: true, + }, + { + xattr: restic.ExtendedAttribute{ + Name: "user.bad", + Value: []byte("dontincludeme"), + }, + shouldRestore: false, + }, + }, xattrSelectFilter2) +} diff --git a/internal/fs/node_xattr_test.go b/internal/fs/node_xattr_test.go new file mode 100644 index 00000000000..7205e1fbec1 --- /dev/null +++ b/internal/fs/node_xattr_test.go @@ -0,0 +1,28 @@ +//go:build darwin || freebsd || netbsd || linux || solaris +// +build darwin freebsd netbsd linux solaris + +package fs + +import ( + "os" + "testing" + + "github.com/pkg/xattr" + rtest "github.com/restic/restic/internal/test" +) + +func TestIsListxattrPermissionError(t *testing.T) { + xerr := &xattr.Error{ + Op: "xattr.list", + Name: "test", + Err: os.ErrPermission, + } + err := handleXattrErr(xerr) + rtest.Assert(t, err != nil, "missing error") + rtest.Assert(t, isListxattrPermissionError(err), "expected IsListxattrPermissionError to return true for %v", err) + + xerr.Err = os.ErrNotExist + err = handleXattrErr(xerr) + rtest.Assert(t, err != nil, "missing error") + rtest.Assert(t, !isListxattrPermissionError(err), "expected IsListxattrPermissionError to return false for %v", err) +} diff --git a/internal/fs/preallocate_linux.go b/internal/fs/preallocate_linux.go index 30b9e46441c..7b044950796 100644 --- a/internal/fs/preallocate_linux.go +++ b/internal/fs/preallocate_linux.go @@ -2,6 +2,7 @@ package fs import ( "os" + "syscall" "golang.org/x/sys/unix" ) @@ -12,5 +13,17 @@ func PreallocateFile(wr *os.File, size int64) error { } // int fallocate(int fd, int mode, off_t offset, off_t len) // use mode = 0 to also change the file size - return unix.Fallocate(int(wr.Fd()), 0, 0, size) + return ignoringEINTR(func() error { return unix.Fallocate(int(wr.Fd()), 0, 0, size) }) +} + +// ignoringEINTR makes a function call and repeats it if it returns +// an EINTR error. +// copied from /usr/lib/go/src/internal/poll/fd_posix.go of go 1.23.1 +func ignoringEINTR(fn func() error) error { + for { + err := fn() + if err != syscall.EINTR { + return err + } + } } diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go new file mode 100644 index 00000000000..04623e8d3dd --- /dev/null +++ b/internal/fs/sd_windows.go @@ -0,0 +1,216 @@ +package fs + +import ( + "fmt" + "sync" + "sync/atomic" + "syscall" + "unsafe" + + "github.com/Microsoft/go-winio" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "golang.org/x/sys/windows" +) + +var ( + onceBackup sync.Once + onceRestore sync.Once + + // seBackupPrivilege allows the application to bypass file and directory ACLs to back up files and directories. + seBackupPrivilege = "SeBackupPrivilege" + // seRestorePrivilege allows the application to bypass file and directory ACLs to restore files and directories. + seRestorePrivilege = "SeRestorePrivilege" + // seSecurityPrivilege allows read and write access to all SACLs. + seSecurityPrivilege = "SeSecurityPrivilege" + // seTakeOwnershipPrivilege allows the application to take ownership of files and directories, regardless of the permissions set on them. + seTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" + + lowerPrivileges atomic.Bool +) + +// Flags for backup and restore with admin permissions +var highSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.SACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.BACKUP_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION | windows.PROTECTED_SACL_SECURITY_INFORMATION | windows.UNPROTECTED_DACL_SECURITY_INFORMATION | windows.UNPROTECTED_SACL_SECURITY_INFORMATION + +// Flags for backup without admin permissions. If there are no admin permissions, only the current user's owner, group and DACL will be backed up. +var lowBackupSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION | windows.UNPROTECTED_DACL_SECURITY_INFORMATION + +// Flags for restore without admin permissions. If there are no admin permissions, only the DACL from the SD can be restored and owner and group will be set based on the current user. +var lowRestoreSecurityFlags windows.SECURITY_INFORMATION = windows.DACL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION + +// getSecurityDescriptor takes the path of the file and returns the SecurityDescriptor for the file. +// This needs admin permissions or SeBackupPrivilege for getting the full SD. +// If there are no admin permissions, only the current user's owner, group and DACL will be got. +func getSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err error) { + onceBackup.Do(enableBackupPrivilege) + + var sd *windows.SECURITY_DESCRIPTOR + + // store original value to avoid unrelated changes in the error check + useLowerPrivileges := lowerPrivileges.Load() + if useLowerPrivileges { + sd, err = getNamedSecurityInfoLow(filePath) + } else { + sd, err = getNamedSecurityInfoHigh(filePath) + // Fallback to the low privilege version when receiving an access denied error. + // For some reason the ERROR_PRIVILEGE_NOT_HELD error is not returned for removable media + // but instead an access denied error is returned. Workaround that by just retrying with + // the low privilege version, but don't switch privileges as we cannot distinguish this + // case from actual access denied errors. + // see https://github.com/restic/restic/issues/5003#issuecomment-2452314191 for details + if err != nil && isAccessDeniedError(err) { + sd, err = getNamedSecurityInfoLow(filePath) + } + } + if err != nil { + if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { + // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. + lowerPrivileges.Store(true) + return getSecurityDescriptor(filePath) + } else if errors.Is(err, windows.ERROR_NOT_SUPPORTED) { + return nil, nil + } else { + return nil, fmt.Errorf("get named security info failed with: %w", err) + } + } + + sdBytes, err := securityDescriptorStructToBytes(sd) + if err != nil { + return nil, fmt.Errorf("convert security descriptor to bytes failed: %w", err) + } + return &sdBytes, nil +} + +// setSecurityDescriptor sets the SecurityDescriptor for the file at the specified path. +// This needs admin permissions or SeRestorePrivilege, SeSecurityPrivilege and SeTakeOwnershipPrivilege +// for setting the full SD. +// If there are no admin permissions/required privileges, only the DACL from the SD can be set and +// owner and group will be set based on the current user. +func setSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { + onceRestore.Do(enableRestorePrivilege) + // Set the security descriptor on the file + sd, err := securityDescriptorBytesToStruct(*securityDescriptor) + if err != nil { + return fmt.Errorf("error converting bytes to security descriptor: %w", err) + } + + owner, _, err := sd.Owner() + if err != nil { + //Do not set partial values. + owner = nil + } + group, _, err := sd.Group() + if err != nil { + //Do not set partial values. + group = nil + } + dacl, _, err := sd.DACL() + if err != nil { + //Do not set partial values. + dacl = nil + } + sacl, _, err := sd.SACL() + if err != nil { + //Do not set partial values. + sacl = nil + } + + // store original value to avoid unrelated changes in the error check + useLowerPrivileges := lowerPrivileges.Load() + if useLowerPrivileges { + err = setNamedSecurityInfoLow(filePath, dacl) + } else { + err = setNamedSecurityInfoHigh(filePath, owner, group, dacl, sacl) + // See corresponding fallback in getSecurityDescriptor for an explanation + if err != nil && isAccessDeniedError(err) { + err = setNamedSecurityInfoLow(filePath, dacl) + } + } + + if err != nil { + if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { + // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. + lowerPrivileges.Store(true) + return setSecurityDescriptor(filePath, securityDescriptor) + } else { + return fmt.Errorf("set named security info failed with: %w", err) + } + } + return nil +} + +// getNamedSecurityInfoHigh gets the higher level SecurityDescriptor which requires admin permissions. +func getNamedSecurityInfoHigh(filePath string) (*windows.SECURITY_DESCRIPTOR, error) { + return windows.GetNamedSecurityInfo(fixpath(filePath), windows.SE_FILE_OBJECT, highSecurityFlags) +} + +// getNamedSecurityInfoLow gets the lower level SecurityDescriptor which requires no admin permissions. +func getNamedSecurityInfoLow(filePath string) (*windows.SECURITY_DESCRIPTOR, error) { + return windows.GetNamedSecurityInfo(fixpath(filePath), windows.SE_FILE_OBJECT, lowBackupSecurityFlags) +} + +// setNamedSecurityInfoHigh sets the higher level SecurityDescriptor which requires admin permissions. +func setNamedSecurityInfoHigh(filePath string, owner *windows.SID, group *windows.SID, dacl *windows.ACL, sacl *windows.ACL) error { + return windows.SetNamedSecurityInfo(fixpath(filePath), windows.SE_FILE_OBJECT, highSecurityFlags, owner, group, dacl, sacl) +} + +// setNamedSecurityInfoLow sets the lower level SecurityDescriptor which requires no admin permissions. +func setNamedSecurityInfoLow(filePath string, dacl *windows.ACL) error { + return windows.SetNamedSecurityInfo(fixpath(filePath), windows.SE_FILE_OBJECT, lowRestoreSecurityFlags, nil, nil, dacl, nil) +} + +func enableProcessPrivileges(privileges []string) error { + return winio.EnableProcessPrivileges(privileges) +} + +// enableBackupPrivilege enables privilege for backing up security descriptors +func enableBackupPrivilege() { + err := enableProcessPrivileges([]string{seBackupPrivilege}) + if err != nil { + debug.Log("error enabling backup privilege: %v", err) + } +} + +// enableRestorePrivilege enables privilege for restoring security descriptors +func enableRestorePrivilege() { + err := enableProcessPrivileges([]string{seRestorePrivilege, seSecurityPrivilege, seTakeOwnershipPrivilege}) + if err != nil { + debug.Log("error enabling restore/security privilege: %v", err) + } +} + +// isHandlePrivilegeNotHeldError checks if the error is ERROR_PRIVILEGE_NOT_HELD +func isHandlePrivilegeNotHeldError(err error) bool { + // Use a type assertion to check if the error is of type syscall.Errno + if errno, ok := err.(syscall.Errno); ok { + // Compare the error code to the expected value + return errno == windows.ERROR_PRIVILEGE_NOT_HELD + } + return false +} + +// isAccessDeniedError checks if the error is ERROR_ACCESS_DENIED +func isAccessDeniedError(err error) bool { + if errno, ok := err.(syscall.Errno); ok { + // Compare the error code to the expected value + return errno == windows.ERROR_ACCESS_DENIED + } + return false +} + +// securityDescriptorBytesToStruct converts the security descriptor bytes representation +// into a pointer to windows SECURITY_DESCRIPTOR. +func securityDescriptorBytesToStruct(sd []byte) (*windows.SECURITY_DESCRIPTOR, error) { + if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { + return nil, fmt.Errorf("securityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) + } + s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0])) + return s, nil +} + +// securityDescriptorStructToBytes converts the pointer to windows SECURITY_DESCRIPTOR +// into a security descriptor bytes representation. +func securityDescriptorStructToBytes(sd *windows.SECURITY_DESCRIPTOR) ([]byte, error) { + b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) + return b, nil +} diff --git a/internal/fs/sd_windows_test.go b/internal/fs/sd_windows_test.go new file mode 100644 index 00000000000..c31b19b8b23 --- /dev/null +++ b/internal/fs/sd_windows_test.go @@ -0,0 +1,60 @@ +//go:build windows +// +build windows + +package fs + +import ( + "encoding/base64" + "os" + "path/filepath" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/test" +) + +func TestSetGetFileSecurityDescriptors(t *testing.T) { + tempDir := t.TempDir() + testfilePath := filepath.Join(tempDir, "testfile.txt") + // create temp file + testfile, err := os.Create(testfilePath) + if err != nil { + t.Fatalf("failed to create temporary file: %s", err) + } + defer func() { + err := testfile.Close() + if err != nil { + t.Logf("Error closing file %s: %v\n", testfilePath, err) + } + }() + + testSecurityDescriptors(t, testFileSDs, testfilePath) +} + +func TestSetGetFolderSecurityDescriptors(t *testing.T) { + tempDir := t.TempDir() + testfolderPath := filepath.Join(tempDir, "testfolder") + // create temp folder + err := os.Mkdir(testfolderPath, os.ModeDir) + if err != nil { + t.Fatalf("failed to create temporary file: %s", err) + } + + testSecurityDescriptors(t, testDirSDs, testfolderPath) +} + +func testSecurityDescriptors(t *testing.T, testSDs []string, testPath string) { + for _, testSD := range testSDs { + sdInputBytes, err := base64.StdEncoding.DecodeString(testSD) + test.OK(t, errors.Wrapf(err, "Error decoding SD: %s", testPath)) + + err = setSecurityDescriptor(testPath, &sdInputBytes) + test.OK(t, errors.Wrapf(err, "Error setting file security descriptor for: %s", testPath)) + + var sdOutputBytes *[]byte + sdOutputBytes, err = getSecurityDescriptor(testPath) + test.OK(t, errors.Wrapf(err, "Error getting file security descriptor for: %s", testPath)) + + compareSecurityDescriptors(t, testPath, sdInputBytes, *sdOutputBytes) + } +} diff --git a/internal/fs/sd_windows_test_helpers.go b/internal/fs/sd_windows_test_helpers.go new file mode 100644 index 00000000000..0e888884ae0 --- /dev/null +++ b/internal/fs/sd_windows_test_helpers.go @@ -0,0 +1,126 @@ +//go:build windows +// +build windows + +package fs + +import ( + "os/user" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/test" + "golang.org/x/sys/windows" +) + +var ( + testFileSDs = []string{"AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAfAAEAAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABAUAP8BHwABAQAAAAAABRIAAAAAEBgA/wEfAAECAAAAAAAFIAAAACACAAAAECQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAA=", + "AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAyAAHAAAAAAAUAKkAEgABAQAAAAAABQcAAAAAABQAiQASAAEBAAAAAAAFBwAAAAAAJACpABIAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSar7QMAAAAAJAC/ARMAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSar6gMAAAAAFAD/AR8AAQEAAAAAAAUSAAAAAAAYAP8BHwABAgAAAAAABSAAAAAgAgAAAAAkAP8BHwABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAA", + "AQAUvBQAAAAwAAAA7AAAAEwAAAABBQAAAAAABRUAAAAvr7t03PyHGk2FokNHCAAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAoAAFAAAAAAAkAP8BHwABBQAAAAAABRUAAAAvr7t03PyHGk2FokNHCAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABAUAP8BHwABAQAAAAAABRIAAAAAEBgA/wEfAAECAAAAAAAFIAAAACACAAAAECQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAACAHQAAwAAAAKAJAC/AQIAAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDtgQAAALAJAC/AQMAAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDPgkAAAJAJAD/AQ8AAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDtQQAAA==", + } + testDirSDs = []string{"AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAfAAEAAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABMUAP8BHwABAQAAAAAABRIAAAAAExgA/wEfAAECAAAAAAAFIAAAACACAAAAEyQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAA=", + "AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIA3AAIAAAAAAIUAKkAEgABAQAAAAAABQcAAAAAAxQAiQASAAEBAAAAAAAFBwAAAAAAJACpABIAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSar7QMAAAAAJAC/ARMAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSar6gMAAAALFAC/ARMAAQEAAAAAAAMAAAAAABMUAP8BHwABAQAAAAAABRIAAAAAExgA/wEfAAECAAAAAAAFIAAAACACAAAAEyQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAA=", + "AQAUvBQAAAAwAAAA7AAAAEwAAAABBQAAAAAABRUAAAAvr7t03PyHGk2FokNHCAAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAoAAFAAAAAAAkAP8BHwABBQAAAAAABRUAAAAvr7t03PyHGk2FokNHCAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABMUAP8BHwABAQAAAAAABRIAAAAAExgA/wEfAAECAAAAAAAFIAAAACACAAAAEyQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAACAHQAAwAAAAKAJAC/AQIAAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDtgQAAALAJAC/AQMAAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDPgkAAAJAJAD/AQ8AAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDtQQAAA==", + } +) + +// isAdmin checks if current user is an administrator. +func isAdmin() (isAdmin bool, err error) { + var sid *windows.SID + err = windows.AllocateAndInitializeSid(&windows.SECURITY_NT_AUTHORITY, 2, windows.SECURITY_BUILTIN_DOMAIN_RID, windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, &sid) + if err != nil { + return false, errors.Errorf("sid error: %s", err) + } + windows.GetCurrentProcessToken() + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false, errors.Errorf("token membership error: %s", err) + } + return member, nil +} + +// compareSecurityDescriptors runs tests for comparing 2 security descriptors in []byte format. +func compareSecurityDescriptors(t *testing.T, testPath string, sdInputBytes, sdOutputBytes []byte) { + sdInput, err := securityDescriptorBytesToStruct(sdInputBytes) + test.OK(t, errors.Wrapf(err, "Error converting SD to struct for: %s", testPath)) + + sdOutput, err := securityDescriptorBytesToStruct(sdOutputBytes) + test.OK(t, errors.Wrapf(err, "Error converting SD to struct for: %s", testPath)) + + isAdmin, err := isAdmin() + test.OK(t, errors.Wrapf(err, "Error checking if user is admin: %s", testPath)) + + var ownerExpected *windows.SID + var defaultedOwnerExpected bool + var groupExpected *windows.SID + var defaultedGroupExpected bool + var daclExpected *windows.ACL + var defaultedDaclExpected bool + var saclExpected *windows.ACL + var defaultedSaclExpected bool + + // The Dacl is set correctly whether or not application is running as admin. + daclExpected, defaultedDaclExpected, err = sdInput.DACL() + test.OK(t, errors.Wrapf(err, "Error getting input dacl for: %s", testPath)) + + if isAdmin { + // If application is running as admin, all sd values including owner, group, dacl, sacl are set correctly during restore. + // Hence we will use the input values for comparison with the output values. + ownerExpected, defaultedOwnerExpected, err = sdInput.Owner() + test.OK(t, errors.Wrapf(err, "Error getting input owner for: %s", testPath)) + groupExpected, defaultedGroupExpected, err = sdInput.Group() + test.OK(t, errors.Wrapf(err, "Error getting input group for: %s", testPath)) + saclExpected, defaultedSaclExpected, err = sdInput.SACL() + test.OK(t, errors.Wrapf(err, "Error getting input sacl for: %s", testPath)) + } else { + // If application is not running as admin, owner and group are set as current user's SID/GID during restore and sacl is empty. + // Get the current user + user, err := user.Current() + test.OK(t, errors.Wrapf(err, "Could not get current user for: %s", testPath)) + // Get current user's SID + currentUserSID, err := windows.StringToSid(user.Uid) + test.OK(t, errors.Wrapf(err, "Error getting output group for: %s", testPath)) + // Get current user's Group SID + currentGroupSID, err := windows.StringToSid(user.Gid) + test.OK(t, errors.Wrapf(err, "Error getting output group for: %s", testPath)) + + // Set owner and group as current user's SID and GID during restore. + ownerExpected = currentUserSID + defaultedOwnerExpected = false + groupExpected = currentGroupSID + defaultedGroupExpected = false + + // If application is not running as admin, SACL is returned empty. + saclExpected = nil + defaultedSaclExpected = false + } + // Now do all the comparisons + // Get owner SID from output file + ownerOut, defaultedOwnerOut, err := sdOutput.Owner() + test.OK(t, errors.Wrapf(err, "Error getting output owner for: %s", testPath)) + // Compare owner SIDs. We must use the Equals method for comparison as a syscall is made for comparing SIDs. + test.Assert(t, ownerExpected.Equals(ownerOut), "Owner from SDs read from test path don't match: %s, cur:%s, exp: %s", testPath, ownerExpected.String(), ownerOut.String()) + test.Equals(t, defaultedOwnerExpected, defaultedOwnerOut, "Defaulted for owner from SDs read from test path don't match: %s", testPath) + + // Get group SID from output file + groupOut, defaultedGroupOut, err := sdOutput.Group() + test.OK(t, errors.Wrapf(err, "Error getting output group for: %s", testPath)) + // Compare group SIDs. We must use the Equals method for comparison as a syscall is made for comparing SIDs. + test.Assert(t, groupExpected.Equals(groupOut), "Group from SDs read from test path don't match: %s, cur:%s, exp: %s", testPath, groupExpected.String(), groupOut.String()) + test.Equals(t, defaultedGroupExpected, defaultedGroupOut, "Defaulted for group from SDs read from test path don't match: %s", testPath) + + // Get dacl from output file + daclOut, defaultedDaclOut, err := sdOutput.DACL() + test.OK(t, errors.Wrapf(err, "Error getting output dacl for: %s", testPath)) + // Compare dacls + test.Equals(t, daclExpected, daclOut, "DACL from SDs read from test path don't match: %s", testPath) + test.Equals(t, defaultedDaclExpected, defaultedDaclOut, "Defaulted for DACL from SDs read from test path don't match: %s", testPath) + + // Get sacl from output file + saclOut, defaultedSaclOut, err := sdOutput.SACL() + test.OK(t, errors.Wrapf(err, "Error getting output sacl for: %s", testPath)) + // Compare sacls + test.Equals(t, saclExpected, saclOut, "DACL from SDs read from test path don't match: %s", testPath) + test.Equals(t, defaultedSaclExpected, defaultedSaclOut, "Defaulted for SACL from SDs read from test path don't match: %s", testPath) +} diff --git a/internal/fs/setflags_linux_test.go b/internal/fs/setflags_linux_test.go index b561a1009d8..8fe14a5a61f 100644 --- a/internal/fs/setflags_linux_test.go +++ b/internal/fs/setflags_linux_test.go @@ -19,7 +19,7 @@ func TestNoatime(t *testing.T) { defer func() { _ = f.Close() - err = Remove(f.Name()) + err = os.Remove(f.Name()) if err != nil { t.Fatal(err) } diff --git a/internal/fs/stat.go b/internal/fs/stat.go index e1006fd61b7..bd3993f415b 100644 --- a/internal/fs/stat.go +++ b/internal/fs/stat.go @@ -8,7 +8,8 @@ import ( // ExtendedFileInfo is an extended stat_t, filled with attributes that are // supported by most operating systems. The original FileInfo is embedded. type ExtendedFileInfo struct { - os.FileInfo + Name string + Mode os.FileMode DeviceID uint64 // ID of device containing the file Inode uint64 // Inode number @@ -23,10 +24,13 @@ type ExtendedFileInfo struct { AccessTime time.Time // last access time stamp ModTime time.Time // last (content) modification time stamp ChangeTime time.Time // last status change time stamp + + // nolint:unused // only used on Windows + sys any // Value returned by os.FileInfo.Sys() } // ExtendedStat returns an ExtendedFileInfo constructed from the os.FileInfo. -func ExtendedStat(fi os.FileInfo) ExtendedFileInfo { +func ExtendedStat(fi os.FileInfo) *ExtendedFileInfo { if fi == nil { panic("os.FileInfo is nil") } diff --git a/internal/fs/stat_bsd.go b/internal/fs/stat_bsd.go index 11e075b5098..95238be777c 100644 --- a/internal/fs/stat_bsd.go +++ b/internal/fs/stat_bsd.go @@ -10,11 +10,13 @@ import ( ) // extendedStat extracts info into an ExtendedFileInfo for unix based operating systems. -func extendedStat(fi os.FileInfo) ExtendedFileInfo { +func extendedStat(fi os.FileInfo) *ExtendedFileInfo { s := fi.Sys().(*syscall.Stat_t) - extFI := ExtendedFileInfo{ - FileInfo: fi, + return &ExtendedFileInfo{ + Name: fi.Name(), + Mode: fi.Mode(), + DeviceID: uint64(s.Dev), Inode: uint64(s.Ino), Links: uint64(s.Nlink), @@ -29,6 +31,9 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo { ModTime: time.Unix(s.Mtimespec.Unix()), ChangeTime: time.Unix(s.Ctimespec.Unix()), } +} - return extFI +// RecallOnDataAccess checks windows-specific attributes to determine if a file is a cloud-only placeholder. +func (*ExtendedFileInfo) RecallOnDataAccess() (bool, error) { + return false, nil } diff --git a/internal/fs/stat_test.go b/internal/fs/stat_test.go index a5ec77c7a17..d52415c1d54 100644 --- a/internal/fs/stat_test.go +++ b/internal/fs/stat_test.go @@ -5,11 +5,11 @@ import ( "path/filepath" "testing" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) func TestExtendedStat(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) filename := filepath.Join(tempdir, "file") err := os.WriteFile(filename, []byte("foobar"), 0640) if err != nil { diff --git a/internal/fs/stat_unix.go b/internal/fs/stat_unix.go index c555710312a..70124658f44 100644 --- a/internal/fs/stat_unix.go +++ b/internal/fs/stat_unix.go @@ -10,11 +10,13 @@ import ( ) // extendedStat extracts info into an ExtendedFileInfo for unix based operating systems. -func extendedStat(fi os.FileInfo) ExtendedFileInfo { +func extendedStat(fi os.FileInfo) *ExtendedFileInfo { s := fi.Sys().(*syscall.Stat_t) - extFI := ExtendedFileInfo{ - FileInfo: fi, + return &ExtendedFileInfo{ + Name: fi.Name(), + Mode: fi.Mode(), + DeviceID: uint64(s.Dev), Inode: s.Ino, Links: uint64(s.Nlink), @@ -29,6 +31,9 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo { ModTime: time.Unix(s.Mtim.Unix()), ChangeTime: time.Unix(s.Ctim.Unix()), } +} - return extFI +// RecallOnDataAccess checks windows-specific attributes to determine if a file is a cloud-only placeholder. +func (*ExtendedFileInfo) RecallOnDataAccess() (bool, error) { + return false, nil } diff --git a/internal/fs/stat_windows.go b/internal/fs/stat_windows.go index ee678d92ac6..a62ddf87fd5 100644 --- a/internal/fs/stat_windows.go +++ b/internal/fs/stat_windows.go @@ -8,18 +8,23 @@ import ( "os" "syscall" "time" + + "golang.org/x/sys/windows" ) // extendedStat extracts info into an ExtendedFileInfo for Windows. -func extendedStat(fi os.FileInfo) ExtendedFileInfo { +func extendedStat(fi os.FileInfo) *ExtendedFileInfo { s, ok := fi.Sys().(*syscall.Win32FileAttributeData) if !ok { panic(fmt.Sprintf("conversion to syscall.Win32FileAttributeData failed, type is %T", fi.Sys())) } extFI := ExtendedFileInfo{ - FileInfo: fi, - Size: int64(s.FileSizeLow) + int64(s.FileSizeHigh)<<32, + Name: fi.Name(), + Mode: fi.Mode(), + + Size: int64(s.FileSizeLow) | (int64(s.FileSizeHigh) << 32), + sys: fi.Sys(), } atime := syscall.NsecToTimespec(s.LastAccessTime.Nanoseconds()) @@ -28,7 +33,25 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo { mtime := syscall.NsecToTimespec(s.LastWriteTime.Nanoseconds()) extFI.ModTime = time.Unix(mtime.Unix()) + // Windows does not have the concept of a "change time" in the sense Unix uses it, so we're using the LastWriteTime here. extFI.ChangeTime = extFI.ModTime - return extFI + return &extFI +} + +// RecallOnDataAccess checks if a file is available locally on the disk or if the file is +// just a placeholder which must be downloaded from a remote server. This is typically used +// in cloud syncing services (e.g. OneDrive) to prevent downloading files from cloud storage +// until they are accessed. +func (fi *ExtendedFileInfo) RecallOnDataAccess() (bool, error) { + attrs, ok := fi.sys.(*syscall.Win32FileAttributeData) + if !ok { + return false, fmt.Errorf("could not determine file attributes: %s", fi.Name) + } + + if attrs.FileAttributes&windows.FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS > 0 { + return true, nil + } + + return false, nil } diff --git a/internal/fs/stat_windows_test.go b/internal/fs/stat_windows_test.go new file mode 100644 index 00000000000..4f258d836c1 --- /dev/null +++ b/internal/fs/stat_windows_test.go @@ -0,0 +1,80 @@ +package fs_test + +import ( + iofs "io/fs" + "os" + "path/filepath" + "syscall" + "testing" + "time" + + "github.com/restic/restic/internal/fs" + rtest "github.com/restic/restic/internal/test" + "golang.org/x/sys/windows" +) + +func TestRecallOnDataAccessRealFile(t *testing.T) { + // create a temp file for testing + tempdir := rtest.TempDir(t) + filename := filepath.Join(tempdir, "regular-file") + err := os.WriteFile(filename, []byte("foobar"), 0640) + rtest.OK(t, err) + + fi, err := os.Stat(filename) + rtest.OK(t, err) + + xs := fs.ExtendedStat(fi) + + // ensure we can check attrs without error + recall, err := xs.RecallOnDataAccess() + rtest.Assert(t, err == nil, "err should be nil", err) + rtest.Assert(t, recall == false, "RecallOnDataAccess should be false") +} + +// mockFileInfo implements os.FileInfo for mocking file attributes +type mockFileInfo struct { + FileAttributes uint32 +} + +func (m mockFileInfo) IsDir() bool { + return false +} +func (m mockFileInfo) ModTime() time.Time { + return time.Now() +} +func (m mockFileInfo) Mode() iofs.FileMode { + return 0 +} +func (m mockFileInfo) Name() string { + return "test" +} +func (m mockFileInfo) Size() int64 { + return 0 +} +func (m mockFileInfo) Sys() any { + return &syscall.Win32FileAttributeData{ + FileAttributes: m.FileAttributes, + } +} + +func TestRecallOnDataAccessMockCloudFile(t *testing.T) { + fi := mockFileInfo{ + FileAttributes: windows.FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS, + } + xs := fs.ExtendedStat(fi) + + recall, err := xs.RecallOnDataAccess() + rtest.Assert(t, err == nil, "err should be nil", err) + rtest.Assert(t, recall, "RecallOnDataAccess should be true") +} + +func TestRecallOnDataAccessMockRegularFile(t *testing.T) { + fi := mockFileInfo{ + FileAttributes: windows.FILE_ATTRIBUTE_ARCHIVE, + } + xs := fs.ExtendedStat(fi) + + recall, err := xs.RecallOnDataAccess() + rtest.Assert(t, err == nil, "err should be nil", err) + rtest.Assert(t, recall == false, "RecallOnDataAccess should be false") +} diff --git a/internal/fs/vss.go b/internal/fs/vss.go index 5f0ea36d995..3215c9aa3cd 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -4,6 +4,8 @@ package fs import ( + "time" + "github.com/restic/restic/internal/errors" ) @@ -31,10 +33,16 @@ func HasSufficientPrivilegesForVSS() error { return errors.New("VSS snapshots are only supported on windows") } +// getVolumeNameForVolumeMountPoint add trailing backslash to input parameter +// and calls the equivalent windows api. +func getVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { + return mountPoint, nil +} + // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. -func NewVssSnapshot( - _ string, _ uint, _ ErrorHandler) (VssSnapshot, error) { +func NewVssSnapshot(_ string, + _ string, _ time.Duration, _ VolumeFilter, _ ErrorHandler) (VssSnapshot, error) { return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows") } diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 8c9b8942b47..840e971078d 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -5,10 +5,12 @@ package fs import ( "fmt" + "math" "path/filepath" "runtime" "strings" "syscall" + "time" "unsafe" ole "github.com/go-ole/go-ole" @@ -20,8 +22,11 @@ import ( type HRESULT uint // HRESULT constant values necessary for using VSS api. +// +//nolint:golint const ( S_OK HRESULT = 0x00000000 + S_FALSE HRESULT = 0x00000001 E_ACCESSDENIED HRESULT = 0x80070005 E_OUTOFMEMORY HRESULT = 0x8007000E E_INVALIDARG HRESULT = 0x80070057 @@ -166,6 +171,11 @@ func (h HRESULT) Str() string { return "UNKNOWN" } +// Error implements the error interface +func (h HRESULT) Error() string { + return h.Str() +} + // VssError encapsulates errors returned from calling VSS api. type vssError struct { text string @@ -190,7 +200,12 @@ func (e *vssError) Error() string { return fmt.Sprintf("VSS error: %s: %s (%#x)", e.text, e.hresult.Str(), e.hresult) } -// VssError encapsulates errors returned from calling VSS api. +// Unwrap returns the underlying HRESULT error +func (e *vssError) Unwrap() error { + return e.hresult +} + +// vssTextError encapsulates errors returned from calling VSS api. type vssTextError struct { text string } @@ -255,6 +270,7 @@ type IVssBackupComponents struct { } // IVssBackupComponentsVTable is the vtable for IVssBackupComponents. +// nolint:structcheck type IVssBackupComponentsVTable struct { ole.IUnknownVtbl getWriterComponentsCount uintptr @@ -364,7 +380,7 @@ func (vss *IVssBackupComponents) convertToVSSAsync( } // IsVolumeSupported calls the equivalent VSS api. -func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, error) { +func (vss *IVssBackupComponents) IsVolumeSupported(providerID *ole.GUID, volumeName string) (bool, error) { volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName) if err != nil { panic(err) @@ -374,7 +390,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err var result uintptr if runtime.GOARCH == "386" { - id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL)) + id := (*[4]uintptr)(unsafe.Pointer(providerID)) result, _, _ = syscall.Syscall9(vss.getVTable().isVolumeSupported, 7, uintptr(unsafe.Pointer(vss)), id[0], id[1], id[2], id[3], @@ -382,7 +398,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err 0) } else { result, _, _ = syscall.Syscall6(vss.getVTable().isVolumeSupported, 4, - uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(ole.IID_NULL)), + uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(providerID)), uintptr(unsafe.Pointer(volumeNamePointer)), uintptr(unsafe.Pointer(&isSupportedRaw)), 0, 0) } @@ -408,24 +424,24 @@ func (vss *IVssBackupComponents) StartSnapshotSet() (ole.GUID, error) { } // AddToSnapshotSet calls the equivalent VSS api. -func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, idSnapshot *ole.GUID) error { +func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, providerID *ole.GUID, idSnapshot *ole.GUID) error { volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName) if err != nil { panic(err) } - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { - id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL)) + id := (*[4]uintptr)(unsafe.Pointer(providerID)) result, _, _ = syscall.Syscall9(vss.getVTable().addToSnapshotSet, 7, - uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), id[0], id[1], - id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0) + uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), + id[0], id[1], id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0) } else { result, _, _ = syscall.Syscall6(vss.getVTable().addToSnapshotSet, 4, uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), - uintptr(unsafe.Pointer(ole.IID_NULL)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0) + uintptr(unsafe.Pointer(providerID)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0) } return newVssErrorIfResultNotOK("AddToSnapshotSet() failed", HRESULT(result)) @@ -478,9 +494,9 @@ func (vss *IVssBackupComponents) DoSnapshotSet() (*IVSSAsync, error) { // DeleteSnapshots calls the equivalent VSS api. func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ole.GUID, error) { - var deletedSnapshots int32 = 0 + var deletedSnapshots int32 var nondeletedSnapshotID ole.GUID - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { id := (*[4]uintptr)(unsafe.Pointer(&snapshotID)) @@ -504,7 +520,7 @@ func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ol // GetSnapshotProperties calls the equivalent VSS api. func (vss *IVssBackupComponents) GetSnapshotProperties(snapshotID ole.GUID, properties *VssSnapshotProperties) error { - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { id := (*[4]uintptr)(unsafe.Pointer(&snapshotID)) @@ -527,8 +543,8 @@ func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error { if err != nil { return err } - - proc.Call(uintptr(unsafe.Pointer(properties))) + // this function always succeeds and returns no value + _, _, _ = proc.Call(uintptr(unsafe.Pointer(properties))) return nil } @@ -543,6 +559,7 @@ func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) { } // VssSnapshotProperties defines the properties of a VSS snapshot as part of the VSS api. +// nolint:structcheck type VssSnapshotProperties struct { snapshotID ole.GUID snapshotSetID ole.GUID @@ -559,6 +576,24 @@ type VssSnapshotProperties struct { status uint } +// VssProviderProperties defines the properties of a VSS provider as part of the VSS api. +// nolint:structcheck +type VssProviderProperties struct { + providerID ole.GUID + providerName *uint16 + providerType uint32 + providerVersion *uint16 + providerVersionID ole.GUID + classID ole.GUID +} + +func vssFreeProviderProperties(p *VssProviderProperties) { + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName))) + p.providerName = nil + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion))) + p.providerVersion = nil +} + // GetSnapshotDeviceObject returns root path to access the snapshot files // and folders. func (p *VssSnapshotProperties) GetSnapshotDeviceObject() string { @@ -617,8 +652,13 @@ func (vssAsync *IVSSAsync) QueryStatus() (HRESULT, uint32) { // WaitUntilAsyncFinished waits until either the async call is finished or // the given timeout is reached. -func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(millis uint32) error { - hresult := vssAsync.Wait(millis) +func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error { + const maxTimeout = math.MaxInt32 * time.Millisecond + if timeout > maxTimeout { + timeout = maxTimeout + } + + hresult := vssAsync.Wait(uint32(timeout.Milliseconds())) err := newVssErrorIfResultNotOK("Wait() failed", hresult) if err != nil { vssAsync.Cancel() @@ -651,6 +691,75 @@ func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(millis uint32) error { return nil } +// UIID_IVSS_ADMIN defines the GUID of IVSSAdmin. +var ( + UIID_IVSS_ADMIN = ole.NewGUID("{77ED5996-2F63-11d3-8A39-00C04F72D8E3}") + CLSID_VSS_COORDINATOR = ole.NewGUID("{E579AB5F-1CC4-44b4-BED9-DE0991FF0623}") +) + +// IVSSAdmin VSS api interface. +type IVSSAdmin struct { + ole.IUnknown +} + +// IVSSAdminVTable is the vtable for IVSSAdmin. +// nolint:structcheck +type IVSSAdminVTable struct { + ole.IUnknownVtbl + registerProvider uintptr + unregisterProvider uintptr + queryProviders uintptr + abortAllSnapshotsInProgress uintptr +} + +// getVTable returns the vtable for IVSSAdmin. +func (vssAdmin *IVSSAdmin) getVTable() *IVSSAdminVTable { + return (*IVSSAdminVTable)(unsafe.Pointer(vssAdmin.RawVTable)) +} + +// QueryProviders calls the equivalent VSS api. +func (vssAdmin *IVSSAdmin) QueryProviders() (*IVssEnumObject, error) { + var enum *IVssEnumObject + + result, _, _ := syscall.Syscall(vssAdmin.getVTable().queryProviders, 2, + uintptr(unsafe.Pointer(vssAdmin)), uintptr(unsafe.Pointer(&enum)), 0) + + return enum, newVssErrorIfResultNotOK("QueryProviders() failed", HRESULT(result)) +} + +// IVssEnumObject VSS api interface. +type IVssEnumObject struct { + ole.IUnknown +} + +// IVssEnumObjectVTable is the vtable for IVssEnumObject. +// nolint:structcheck +type IVssEnumObjectVTable struct { + ole.IUnknownVtbl + next uintptr + skip uintptr + reset uintptr + clone uintptr +} + +// getVTable returns the vtable for IVssEnumObject. +func (vssEnum *IVssEnumObject) getVTable() *IVssEnumObjectVTable { + return (*IVssEnumObjectVTable)(unsafe.Pointer(vssEnum.RawVTable)) +} + +// Next calls the equivalent VSS api. +func (vssEnum *IVssEnumObject) Next(count uint, props unsafe.Pointer) (uint, error) { + var fetched uint32 + result, _, _ := syscall.Syscall6(vssEnum.getVTable().next, 4, + uintptr(unsafe.Pointer(vssEnum)), uintptr(count), uintptr(props), + uintptr(unsafe.Pointer(&fetched)), 0, 0) + if HRESULT(result) == S_FALSE { + return uint(fetched), nil + } + + return uint(fetched), newVssErrorIfResultNotOK("Next() failed", HRESULT(result)) +} + // MountPoint wraps all information of a snapshot of a mountpoint on a volume. type MountPoint struct { isSnapshotted bool @@ -677,7 +786,7 @@ type VssSnapshot struct { snapshotProperties VssSnapshotProperties snapshotDeviceObject string mountPointInfo map[string]MountPoint - timeoutInMillis uint32 + timeout time.Duration } // GetSnapshotDeviceObject returns root path to access the snapshot files @@ -686,7 +795,7 @@ func (p *VssSnapshot) GetSnapshotDeviceObject() string { return p.snapshotDeviceObject } -// initializeCOMInterface initialize an instance of the VSS COM api +// initializeVssCOMInterface initialize an instance of the VSS COM api func initializeVssCOMInterface() (*ole.IUnknown, error) { vssInstance, err := loadIVssBackupComponentsConstructor() if err != nil { @@ -694,7 +803,12 @@ func initializeVssCOMInterface() (*ole.IUnknown, error) { } // ensure COM is initialized before use - ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + if err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil { + // CoInitializeEx returns S_FALSE if COM is already initialized + if oleErr, ok := err.(*ole.OleError); !ok || HRESULT(oleErr.Code()) != S_FALSE { + return nil, err + } + } var oleIUnknown *ole.IUnknown result, _, _ := vssInstance.Call(uintptr(unsafe.Pointer(&oleIUnknown))) @@ -727,12 +841,34 @@ func HasSufficientPrivilegesForVSS() error { return err } +// getVolumeNameForVolumeMountPoint add trailing backslash to input parameter +// and calls the equivalent windows api. +func getVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { + if mountPoint != "" && mountPoint[len(mountPoint)-1] != filepath.Separator { + mountPoint += string(filepath.Separator) + } + + mountPointPointer, err := syscall.UTF16PtrFromString(mountPoint) + if err != nil { + return mountPoint, err + } + + // A reasonable size for the buffer to accommodate the largest possible + // volume GUID path is 50 characters. + volumeNameBuffer := make([]uint16, 50) + if err := windows.GetVolumeNameForVolumeMountPoint( + mountPointPointer, &volumeNameBuffer[0], 50); err != nil { + return mountPoint, err + } + + return syscall.UTF16ToString(volumeNameBuffer), nil +} + // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. -func NewVssSnapshot( - volume string, timeoutInSeconds uint, msgError ErrorHandler) (VssSnapshot, error) { +func NewVssSnapshot(provider string, + volume string, timeout time.Duration, filter VolumeFilter, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() - if err != nil { return VssSnapshot{}, newVssTextError(fmt.Sprintf( "Failed to detect windows architecture: %s", err.Error())) @@ -744,7 +880,7 @@ func NewVssSnapshot( runtime.GOARCH)) } - timeoutInMillis := uint32(timeoutInSeconds * 1000) + deadline := time.Now().Add(timeout) oleIUnknown, err := initializeVssCOMInterface() if oleIUnknown != nil { @@ -778,6 +914,12 @@ func NewVssSnapshot( iVssBackupComponents := (*IVssBackupComponents)(unsafe.Pointer(comInterface)) + providerID, err := getProviderID(provider) + if err != nil { + iVssBackupComponents.Release() + return VssSnapshot{}, err + } + if err := iVssBackupComponents.InitializeForBackup(); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err @@ -796,13 +938,13 @@ func NewVssSnapshot( } err = callAsyncFunctionAndWait(iVssBackupComponents.GatherWriterMetadata, - "GatherWriterMetadata", timeoutInMillis) + "GatherWriterMetadata", deadline) if err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err } - if isSupported, err := iVssBackupComponents.IsVolumeSupported(volume); err != nil { + if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, volume); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err } else if !isSupported { @@ -811,50 +953,72 @@ func NewVssSnapshot( "%s", volume)) } - snapshotSetID, err := iVssBackupComponents.StartSnapshotSet() - if err != nil { - iVssBackupComponents.Release() - return VssSnapshot{}, err - } + const retryStartSnapshotSetSleep = 5 * time.Second + var snapshotSetID ole.GUID + for { + var err error + snapshotSetID, err = iVssBackupComponents.StartSnapshotSet() + if errors.Is(err, VSS_E_SNAPSHOT_SET_IN_PROGRESS) && time.Now().Add(-retryStartSnapshotSetSleep).Before(deadline) { + // retry snapshot set creation while deadline is not reached + time.Sleep(retryStartSnapshotSetSleep) + continue + } - if err := iVssBackupComponents.AddToSnapshotSet(volume, &snapshotSetID); err != nil { - iVssBackupComponents.Release() - return VssSnapshot{}, err + if err != nil { + iVssBackupComponents.Release() + return VssSnapshot{}, err + } else { + break + } } - mountPoints, err := enumerateMountedFolders(volume) - if err != nil { + if err := iVssBackupComponents.AddToSnapshotSet(volume, providerID, &snapshotSetID); err != nil { iVssBackupComponents.Release() - return VssSnapshot{}, newVssTextError(fmt.Sprintf( - "failed to enumerate mount points for volume %s: %s", volume, err)) + return VssSnapshot{}, err } mountPointInfo := make(map[string]MountPoint) - for _, mountPoint := range mountPoints { - // ensure every mountpoint is available even without a valid - // snapshot because we need to consider this when backing up files - mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false} - - if isSupported, err := iVssBackupComponents.IsVolumeSupported(mountPoint); err != nil { - continue - } else if !isSupported { - continue - } - - var mountPointSnapshotSetID ole.GUID - err := iVssBackupComponents.AddToSnapshotSet(mountPoint, &mountPointSnapshotSetID) + // if filter==nil just don't process mount points for this volume at all + if filter != nil { + mountPoints, err := enumerateMountedFolders(volume) if err != nil { iVssBackupComponents.Release() - return VssSnapshot{}, err + + return VssSnapshot{}, newVssTextError(fmt.Sprintf( + "failed to enumerate mount points for volume %s: %s", volume, err)) } - mountPointInfo[mountPoint] = MountPoint{isSnapshotted: true, - snapshotSetID: mountPointSnapshotSetID} + for _, mountPoint := range mountPoints { + // ensure every mountpoint is available even without a valid + // snapshot because we need to consider this when backing up files + mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false} + + if !filter(mountPoint) { + continue + } else if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, mountPoint); err != nil { + continue + } else if !isSupported { + continue + } + + var mountPointSnapshotSetID ole.GUID + err := iVssBackupComponents.AddToSnapshotSet(mountPoint, providerID, &mountPointSnapshotSetID) + if err != nil { + iVssBackupComponents.Release() + + return VssSnapshot{}, err + } + + mountPointInfo[mountPoint] = MountPoint{ + isSnapshotted: true, + snapshotSetID: mountPointSnapshotSetID, + } + } } err = callAsyncFunctionAndWait(iVssBackupComponents.PrepareForBackup, "PrepareForBackup", - timeoutInMillis) + deadline) if err != nil { // After calling PrepareForBackup one needs to call AbortBackup() before releasing the VSS // instance for proper cleanup. @@ -865,9 +1029,9 @@ func NewVssSnapshot( } err = callAsyncFunctionAndWait(iVssBackupComponents.DoSnapshotSet, "DoSnapshotSet", - timeoutInMillis) + deadline) if err != nil { - iVssBackupComponents.AbortBackup() + _ = iVssBackupComponents.AbortBackup() iVssBackupComponents.Release() return VssSnapshot{}, err } @@ -875,13 +1039,12 @@ func NewVssSnapshot( var snapshotProperties VssSnapshotProperties err = iVssBackupComponents.GetSnapshotProperties(snapshotSetID, &snapshotProperties) if err != nil { - iVssBackupComponents.AbortBackup() + _ = iVssBackupComponents.AbortBackup() iVssBackupComponents.Release() return VssSnapshot{}, err } for mountPoint, info := range mountPointInfo { - if !info.isSnapshotted { continue } @@ -900,8 +1063,10 @@ func NewVssSnapshot( mountPointInfo[mountPoint] = info } - return VssSnapshot{iVssBackupComponents, snapshotSetID, snapshotProperties, - snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, timeoutInMillis}, nil + return VssSnapshot{ + iVssBackupComponents, snapshotSetID, snapshotProperties, + snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, time.Until(deadline), + }, nil } // Delete deletes the created snapshot. @@ -922,15 +1087,17 @@ func (p *VssSnapshot) Delete() error { if p.iVssBackupComponents != nil { defer p.iVssBackupComponents.Release() + deadline := time.Now().Add(p.timeout) + err = callAsyncFunctionAndWait(p.iVssBackupComponents.BackupComplete, "BackupComplete", - p.timeoutInMillis) + deadline) if err != nil { return err } if _, _, e := p.iVssBackupComponents.DeleteSnapshots(p.snapshotID); e != nil { err = newVssTextError(fmt.Sprintf("Failed to delete snapshot: %s", e.Error())) - p.iVssBackupComponents.AbortBackup() + _ = p.iVssBackupComponents.AbortBackup() if err != nil { return err } @@ -940,12 +1107,61 @@ func (p *VssSnapshot) Delete() error { return nil } +func getProviderID(provider string) (*ole.GUID, error) { + providerLower := strings.ToLower(provider) + switch providerLower { + case "": + return ole.IID_NULL, nil + case "ms": + return ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}"), nil + } + + comInterface, err := ole.CreateInstance(CLSID_VSS_COORDINATOR, UIID_IVSS_ADMIN) + if err != nil { + return nil, err + } + defer comInterface.Release() + + vssAdmin := (*IVSSAdmin)(unsafe.Pointer(comInterface)) + + enum, err := vssAdmin.QueryProviders() + if err != nil { + return nil, err + } + defer enum.Release() + + id := ole.NewGUID(provider) + + var props struct { + objectType uint32 + provider VssProviderProperties + } + for { + count, err := enum.Next(1, unsafe.Pointer(&props)) + if err != nil { + return nil, err + } + + if count < 1 { + return nil, errors.Errorf(`invalid VSS provider "%s"`, provider) + } + + name := ole.UTF16PtrToString(props.provider.providerName) + vssFreeProviderProperties(&props.provider) + + if id != nil && *id == props.provider.providerID || + id == nil && providerLower == strings.ToLower(name) { + return &props.provider.providerID, nil + } + } +} + // asyncCallFunc is the callback type for callAsyncFunctionAndWait. type asyncCallFunc func() (*IVSSAsync, error) // callAsyncFunctionAndWait calls an async functions and waits for it to either // finish or timeout. -func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMillis uint32) error { +func callAsyncFunctionAndWait(function asyncCallFunc, name string, deadline time.Time) error { iVssAsync, err := function() if err != nil { return err @@ -955,7 +1171,12 @@ func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMill return newVssTextError(fmt.Sprintf("%s() returned nil", name)) } - err = iVssAsync.WaitUntilAsyncFinished(timeoutInMillis) + timeout := time.Until(deadline) + if timeout <= 0 { + return newVssTextError(fmt.Sprintf("%s() deadline exceeded", name)) + } + + err = iVssAsync.WaitUntilAsyncFinished(timeout) iVssAsync.Release() return err } @@ -1036,6 +1257,7 @@ func enumerateMountedFolders(volume string) ([]string, error) { return mountedFolders, nil } + // nolint:errcheck defer windows.FindVolumeMountPointClose(handle) volumeMountPoint := syscall.UTF16ToString(volumeMountPointBuffer) diff --git a/internal/fuse/dir.go b/internal/fuse/dir.go index 763a9640ce3..a0317a757dc 100644 --- a/internal/fuse/dir.go +++ b/internal/fuse/dir.go @@ -20,29 +20,36 @@ import ( // Statically ensure that *dir implement those interface var _ = fs.HandleReadDirAller(&dir{}) +var _ = fs.NodeForgetter(&dir{}) +var _ = fs.NodeGetxattrer(&dir{}) +var _ = fs.NodeListxattrer(&dir{}) var _ = fs.NodeStringLookuper(&dir{}) type dir struct { root *Root + forget forgetFn items map[string]*restic.Node inode uint64 parentInode uint64 node *restic.Node m sync.Mutex + cache treeCache } func cleanupNodeName(name string) string { return filepath.Base(name) } -func newDir(root *Root, inode, parentInode uint64, node *restic.Node) (*dir, error) { +func newDir(root *Root, forget forgetFn, inode, parentInode uint64, node *restic.Node) (*dir, error) { debug.Log("new dir for %v (%v)", node.Name, node.Subtree) return &dir{ root: root, + forget: forget, node: node, inode: inode, parentInode: parentInode, + cache: *newTreeCache(), }, nil } @@ -59,7 +66,7 @@ func unwrapCtxCanceled(err error) error { // replaceSpecialNodes replaces nodes with name "." and "/" by their contents. // Otherwise, the node is returned. func replaceSpecialNodes(ctx context.Context, repo restic.BlobLoader, node *restic.Node) ([]*restic.Node, error) { - if node.Type != "dir" || node.Subtree == nil { + if node.Type != restic.NodeTypeDir || node.Subtree == nil { return []*restic.Node{node}, nil } @@ -75,10 +82,11 @@ func replaceSpecialNodes(ctx context.Context, repo restic.BlobLoader, node *rest return tree.Nodes, nil } -func newDirFromSnapshot(root *Root, inode uint64, snapshot *restic.Snapshot) (*dir, error) { +func newDirFromSnapshot(root *Root, forget forgetFn, inode uint64, snapshot *restic.Snapshot) (*dir, error) { debug.Log("new dir for snapshot %v (%v)", snapshot.ID(), snapshot.Tree) return &dir{ - root: root, + root: root, + forget: forget, node: &restic.Node{ AccessTime: snapshot.Time, ModTime: snapshot.Time, @@ -87,6 +95,7 @@ func newDirFromSnapshot(root *Root, inode uint64, snapshot *restic.Snapshot) (*d Subtree: snapshot.Tree, }, inode: inode, + cache: *newTreeCache(), }, nil } @@ -107,6 +116,10 @@ func (d *dir) open(ctx context.Context) error { } items := make(map[string]*restic.Node) for _, n := range tree.Nodes { + if ctx.Err() != nil { + return ctx.Err() + } + nodes, err := replaceSpecialNodes(ctx, d.root.repo, n) if err != nil { debug.Log(" replaceSpecialNodes(%v) failed: %v", n, err) @@ -143,7 +156,7 @@ func (d *dir) calcNumberOfLinks() uint32 { // of directories contained by d count := uint32(2) for _, node := range d.items { - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { count++ } } @@ -171,14 +184,18 @@ func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { }) for _, node := range d.items { + if ctx.Err() != nil { + return nil, ctx.Err() + } + name := cleanupNodeName(node.Name) var typ fuse.DirentType switch node.Type { - case "dir": + case restic.NodeTypeDir: typ = fuse.DT_Dir - case "file": + case restic.NodeTypeFile: typ = fuse.DT_File - case "symlink": + case restic.NodeTypeSymlink: typ = fuse.DT_Link } @@ -200,25 +217,27 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { return nil, err } - node, ok := d.items[name] - if !ok { - debug.Log(" Lookup(%v) -> not found", name) - return nil, syscall.ENOENT - } - inode := inodeFromNode(d.inode, node) - switch node.Type { - case "dir": - return newDir(d.root, inode, d.inode, node) - case "file": - return newFile(d.root, inode, node) - case "symlink": - return newLink(d.root, inode, node) - case "dev", "chardev", "fifo", "socket": - return newOther(d.root, inode, node) - default: - debug.Log(" node %v has unknown type %v", name, node.Type) - return nil, syscall.ENOENT - } + return d.cache.lookupOrCreate(name, func(forget forgetFn) (fs.Node, error) { + node, ok := d.items[name] + if !ok { + debug.Log(" Lookup(%v) -> not found", name) + return nil, syscall.ENOENT + } + inode := inodeFromNode(d.inode, node) + switch node.Type { + case restic.NodeTypeDir: + return newDir(d.root, forget, inode, d.inode, node) + case restic.NodeTypeFile: + return newFile(d.root, forget, inode, node) + case restic.NodeTypeSymlink: + return newLink(d.root, forget, inode, node) + case restic.NodeTypeDev, restic.NodeTypeCharDev, restic.NodeTypeFifo, restic.NodeTypeSocket: + return newOther(d.root, forget, inode, node) + default: + debug.Log(" node %v has unknown type %v", name, node.Type) + return nil, syscall.ENOENT + } + }) } func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { @@ -229,3 +248,7 @@ func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fus func (d *dir) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { return nodeGetXattr(d.node, req, resp) } + +func (d *dir) Forget() { + d.forget() +} diff --git a/internal/fuse/file.go b/internal/fuse/file.go index 6152c912271..a69471f8372 100644 --- a/internal/fuse/file.go +++ b/internal/fuse/file.go @@ -20,14 +20,16 @@ const blockSize = 512 // Statically ensure that *file and *openFile implement the given interfaces var _ = fs.HandleReader(&openFile{}) -var _ = fs.NodeListxattrer(&file{}) +var _ = fs.NodeForgetter(&file{}) var _ = fs.NodeGetxattrer(&file{}) +var _ = fs.NodeListxattrer(&file{}) var _ = fs.NodeOpener(&file{}) type file struct { - root *Root - node *restic.Node - inode uint64 + root *Root + forget forgetFn + node *restic.Node + inode uint64 } type openFile struct { @@ -36,12 +38,13 @@ type openFile struct { cumsize []uint64 } -func newFile(root *Root, inode uint64, node *restic.Node) (fusefile *file, err error) { +func newFile(root *Root, forget forgetFn, inode uint64, node *restic.Node) (fusefile *file, err error) { debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content)) return &file{ - inode: inode, - root: root, - node: node, + inode: inode, + forget: forget, + root: root, + node: node, }, nil } @@ -66,13 +69,17 @@ func (f *file) Attr(_ context.Context, a *fuse.Attr) error { } -func (f *file) Open(_ context.Context, _ *fuse.OpenRequest, _ *fuse.OpenResponse) (fs.Handle, error) { +func (f *file) Open(ctx context.Context, _ *fuse.OpenRequest, _ *fuse.OpenResponse) (fs.Handle, error) { debug.Log("open file %v with %d blobs", f.node.Name, len(f.node.Content)) var bytes uint64 cumsize := make([]uint64, 1+len(f.node.Content)) for i, id := range f.node.Content { - size, found := f.root.repo.LookupBlobSize(id, restic.DataBlob) + if ctx.Err() != nil { + return nil, ctx.Err() + } + + size, found := f.root.repo.LookupBlobSize(restic.DataBlob, id) if !found { return nil, errors.Errorf("id %v not found in repository", id) } @@ -96,20 +103,14 @@ func (f *file) Open(_ context.Context, _ *fuse.OpenRequest, _ *fuse.OpenResponse } func (f *openFile) getBlobAt(ctx context.Context, i int) (blob []byte, err error) { - - blob, ok := f.root.blobCache.Get(f.node.Content[i]) - if ok { - return blob, nil - } - - blob, err = f.root.repo.LoadBlob(ctx, restic.DataBlob, f.node.Content[i], nil) + blob, err = f.root.blobCache.GetOrCompute(f.node.Content[i], func() ([]byte, error) { + return f.root.repo.LoadBlob(ctx, restic.DataBlob, f.node.Content[i], nil) + }) if err != nil { debug.Log("LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err) return nil, unwrapCtxCanceled(err) } - f.root.blobCache.Add(f.node.Content[i], blob) - return blob, nil } @@ -174,3 +175,7 @@ func (f *file) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fu func (f *file) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { return nodeGetXattr(f.node, req, resp) } + +func (f *file) Forget() { + f.forget() +} diff --git a/internal/fuse/fuse_test.go b/internal/fuse/fuse_test.go index 1053d49a434..3c0648bc6c9 100644 --- a/internal/fuse/fuse_test.go +++ b/internal/fuse/fuse_test.go @@ -89,7 +89,7 @@ func TestFuseFile(t *testing.T) { memfile []byte ) for _, id := range content { - size, found := repo.LookupBlobSize(id, restic.DataBlob) + size, found := repo.LookupBlobSize(restic.DataBlob, id) rtest.Assert(t, found, "Expected to find blob id %v", id) filesize += uint64(size) @@ -119,7 +119,7 @@ func TestFuseFile(t *testing.T) { root := &Root{repo: repo, blobCache: bloblru.New(blobCacheSize)} inode := inodeFromNode(1, node) - f, err := newFile(root, inode, node) + f, err := newFile(root, func() {}, inode, node) rtest.OK(t, err) of, err := f.Open(context.TODO(), nil, nil) rtest.OK(t, err) @@ -162,7 +162,7 @@ func TestFuseDir(t *testing.T) { } parentInode := inodeFromName(0, "parent") inode := inodeFromName(1, "foo") - d, err := newDir(root, inode, parentInode, node) + d, err := newDir(root, func() {}, inode, parentInode, node) rtest.OK(t, err) // don't open the directory as that would require setting up a proper tree blob @@ -217,6 +217,34 @@ func testTopUIDGID(t *testing.T, cfg Config, repo restic.Repository, uid, gid ui rtest.Equals(t, uint32(0), attr.Gid) } +// The Lookup method must return the same Node object unless it was forgotten in the meantime +func testStableLookup(t *testing.T, node fs.Node, path string) fs.Node { + t.Helper() + result, err := node.(fs.NodeStringLookuper).Lookup(context.TODO(), path) + rtest.OK(t, err) + result2, err := node.(fs.NodeStringLookuper).Lookup(context.TODO(), path) + rtest.OK(t, err) + rtest.Assert(t, result == result2, "%v are not the same object", path) + + result2.(fs.NodeForgetter).Forget() + result2, err = node.(fs.NodeStringLookuper).Lookup(context.TODO(), path) + rtest.OK(t, err) + rtest.Assert(t, result != result2, "object for %v should change after forget", path) + return result +} + +func TestStableNodeObjects(t *testing.T) { + repo := repository.TestRepository(t) + restic.TestCreateSnapshot(t, repo, time.Unix(1460289341, 207401672), 2) + root := NewRoot(repo, Config{}) + + idsdir := testStableLookup(t, root, "ids") + snapID := loadFirstSnapshot(t, repo).ID().Str() + snapshotdir := testStableLookup(t, idsdir, snapID) + dir := testStableLookup(t, snapshotdir, "dir-0") + testStableLookup(t, dir, "file-2") +} + // Test reporting of fuse.Attr.Blocks in multiples of 512. func TestBlocks(t *testing.T) { root := &Root{} @@ -249,7 +277,7 @@ func TestBlocks(t *testing.T) { } func TestInodeFromNode(t *testing.T) { - node := &restic.Node{Name: "foo.txt", Type: "chardev", Links: 2} + node := &restic.Node{Name: "foo.txt", Type: restic.NodeTypeCharDev, Links: 2} ino1 := inodeFromNode(1, node) ino2 := inodeFromNode(2, node) rtest.Assert(t, ino1 == ino2, "inodes %d, %d of hard links differ", ino1, ino2) @@ -261,9 +289,9 @@ func TestInodeFromNode(t *testing.T) { // Regression test: in a path a/b/b, the grandchild should not get the // same inode as the grandparent. - a := &restic.Node{Name: "a", Type: "dir", Links: 2} - ab := &restic.Node{Name: "b", Type: "dir", Links: 2} - abb := &restic.Node{Name: "b", Type: "dir", Links: 2} + a := &restic.Node{Name: "a", Type: restic.NodeTypeDir, Links: 2} + ab := &restic.Node{Name: "b", Type: restic.NodeTypeDir, Links: 2} + abb := &restic.Node{Name: "b", Type: restic.NodeTypeDir, Links: 2} inoA := inodeFromNode(1, a) inoAb := inodeFromNode(inoA, ab) inoAbb := inodeFromNode(inoAb, abb) @@ -272,11 +300,11 @@ func TestInodeFromNode(t *testing.T) { } func TestLink(t *testing.T) { - node := &restic.Node{Name: "foo.txt", Type: "symlink", Links: 1, LinkTarget: "dst", ExtendedAttributes: []restic.ExtendedAttribute{ + node := &restic.Node{Name: "foo.txt", Type: restic.NodeTypeSymlink, Links: 1, LinkTarget: "dst", ExtendedAttributes: []restic.ExtendedAttribute{ {Name: "foo", Value: []byte("bar")}, }} - lnk, err := newLink(&Root{}, 42, node) + lnk, err := newLink(&Root{}, func() {}, 42, node) rtest.OK(t, err) target, err := lnk.Readlink(context.TODO(), nil) rtest.OK(t, err) @@ -305,11 +333,11 @@ func BenchmarkInode(b *testing.B) { }{ { name: "no_hard_links", - node: restic.Node{Name: "a somewhat long-ish filename.svg.bz2", Type: "fifo"}, + node: restic.Node{Name: "a somewhat long-ish filename.svg.bz2", Type: restic.NodeTypeFifo}, }, { name: "hard_link", - node: restic.Node{Name: "some other filename", Type: "file", Links: 2}, + node: restic.Node{Name: "some other filename", Type: restic.NodeTypeFile, Links: 2}, }, } { b.Run(sub.name, func(b *testing.B) { diff --git a/internal/fuse/inode.go b/internal/fuse/inode.go index 5e2ece4ac99..88d5b8bb812 100644 --- a/internal/fuse/inode.go +++ b/internal/fuse/inode.go @@ -25,7 +25,7 @@ func inodeFromName(parent uint64, name string) uint64 { // inodeFromNode generates an inode number for a file within a snapshot. func inodeFromNode(parent uint64, node *restic.Node) (inode uint64) { - if node.Links > 1 && node.Type != "dir" { + if node.Links > 1 && node.Type != restic.NodeTypeDir { // If node has hard links, give them all the same inode, // irrespective of the parent. var buf [16]byte diff --git a/internal/fuse/link.go b/internal/fuse/link.go index 3aea8b06e91..f8bf8d3ee11 100644 --- a/internal/fuse/link.go +++ b/internal/fuse/link.go @@ -12,16 +12,20 @@ import ( ) // Statically ensure that *link implements the given interface +var _ = fs.NodeForgetter(&link{}) +var _ = fs.NodeGetxattrer(&link{}) +var _ = fs.NodeListxattrer(&link{}) var _ = fs.NodeReadlinker(&link{}) type link struct { - root *Root - node *restic.Node - inode uint64 + root *Root + forget forgetFn + node *restic.Node + inode uint64 } -func newLink(root *Root, inode uint64, node *restic.Node) (*link, error) { - return &link{root: root, inode: inode, node: node}, nil +func newLink(root *Root, forget forgetFn, inode uint64, node *restic.Node) (*link, error) { + return &link{root: root, forget: forget, inode: inode, node: node}, nil } func (l *link) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) { @@ -55,3 +59,7 @@ func (l *link) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fu func (l *link) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { return nodeGetXattr(l.node, req, resp) } + +func (l *link) Forget() { + l.forget() +} diff --git a/internal/fuse/other.go b/internal/fuse/other.go index f536de5c1be..cbd9667ccf1 100644 --- a/internal/fuse/other.go +++ b/internal/fuse/other.go @@ -7,17 +7,23 @@ import ( "context" "github.com/anacrolix/fuse" + "github.com/anacrolix/fuse/fs" "github.com/restic/restic/internal/restic" ) +// Statically ensure that *other implements the given interface +var _ = fs.NodeForgetter(&other{}) +var _ = fs.NodeReadlinker(&other{}) + type other struct { - root *Root - node *restic.Node - inode uint64 + root *Root + forget forgetFn + node *restic.Node + inode uint64 } -func newOther(root *Root, inode uint64, node *restic.Node) (*other, error) { - return &other{root: root, inode: inode, node: node}, nil +func newOther(root *Root, forget forgetFn, inode uint64, node *restic.Node) (*other, error) { + return &other{root: root, forget: forget, inode: inode, node: node}, nil } func (l *other) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) { @@ -40,3 +46,7 @@ func (l *other) Attr(_ context.Context, a *fuse.Attr) error { return nil } + +func (l *other) Forget() { + l.forget() +} diff --git a/internal/fuse/root.go b/internal/fuse/root.go index ab6116f0dcc..72a0634fca5 100644 --- a/internal/fuse/root.go +++ b/internal/fuse/root.go @@ -66,7 +66,7 @@ func NewRoot(repo restic.Repository, cfg Config) *Root { } } - root.SnapshotsDir = NewSnapshotsDir(root, rootInode, rootInode, NewSnapshotsDirStructure(root, cfg.PathTemplates, cfg.TimeTemplate), "") + root.SnapshotsDir = NewSnapshotsDir(root, func() {}, rootInode, rootInode, NewSnapshotsDirStructure(root, cfg.PathTemplates, cfg.TimeTemplate), "") return root } diff --git a/internal/fuse/snapshots_dir.go b/internal/fuse/snapshots_dir.go index 7369ea17ad5..bcab160849e 100644 --- a/internal/fuse/snapshots_dir.go +++ b/internal/fuse/snapshots_dir.go @@ -19,25 +19,30 @@ import ( // It uses the saved prefix to select the corresponding MetaDirData. type SnapshotsDir struct { root *Root + forget forgetFn inode uint64 parentInode uint64 dirStruct *SnapshotsDirStructure prefix string + cache treeCache } // ensure that *SnapshotsDir implements these interfaces var _ = fs.HandleReadDirAller(&SnapshotsDir{}) +var _ = fs.NodeForgetter(&SnapshotsDir{}) var _ = fs.NodeStringLookuper(&SnapshotsDir{}) // NewSnapshotsDir returns a new directory structure containing snapshots and "latest" links -func NewSnapshotsDir(root *Root, inode, parentInode uint64, dirStruct *SnapshotsDirStructure, prefix string) *SnapshotsDir { +func NewSnapshotsDir(root *Root, forget forgetFn, inode, parentInode uint64, dirStruct *SnapshotsDirStructure, prefix string) *SnapshotsDir { debug.Log("create snapshots dir, inode %d", inode) return &SnapshotsDir{ root: root, + forget: forget, inode: inode, parentInode: parentInode, dirStruct: dirStruct, prefix: prefix, + cache: *newTreeCache(), } } @@ -78,6 +83,10 @@ func (d *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { } for name, entry := range meta.names { + if ctx.Err() != nil { + return nil, ctx.Err() + } + d := fuse.Dirent{ Inode: inodeFromName(d.inode, name), Name: name, @@ -103,33 +112,41 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) return nil, syscall.ENOENT } - entry := meta.names[name] - if entry != nil { + return d.cache.lookupOrCreate(name, func(forget forgetFn) (fs.Node, error) { + entry := meta.names[name] + if entry == nil { + return nil, syscall.ENOENT + } + inode := inodeFromName(d.inode, name) if entry.linkTarget != "" { - return newSnapshotLink(d.root, inode, entry.linkTarget, entry.snapshot) + return newSnapshotLink(d.root, forget, inode, entry.linkTarget, entry.snapshot) } else if entry.snapshot != nil { - return newDirFromSnapshot(d.root, inode, entry.snapshot) + return newDirFromSnapshot(d.root, forget, inode, entry.snapshot) } - return NewSnapshotsDir(d.root, inode, d.inode, d.dirStruct, d.prefix+"/"+name), nil - } + return NewSnapshotsDir(d.root, forget, inode, d.inode, d.dirStruct, d.prefix+"/"+name), nil + }) +} - return nil, syscall.ENOENT +func (d *SnapshotsDir) Forget() { + d.forget() } // SnapshotLink type snapshotLink struct { root *Root + forget forgetFn inode uint64 target string snapshot *restic.Snapshot } +var _ = fs.NodeForgetter(&snapshotLink{}) var _ = fs.NodeReadlinker(&snapshotLink{}) // newSnapshotLink -func newSnapshotLink(root *Root, inode uint64, target string, snapshot *restic.Snapshot) (*snapshotLink, error) { - return &snapshotLink{root: root, inode: inode, target: target, snapshot: snapshot}, nil +func newSnapshotLink(root *Root, forget forgetFn, inode uint64, target string, snapshot *restic.Snapshot) (*snapshotLink, error) { + return &snapshotLink{root: root, forget: forget, inode: inode, target: target, snapshot: snapshot}, nil } // Readlink @@ -153,3 +170,7 @@ func (l *snapshotLink) Attr(_ context.Context, a *fuse.Attr) error { return nil } + +func (l *snapshotLink) Forget() { + l.forget() +} diff --git a/internal/fuse/snapshots_dirstruct.go b/internal/fuse/snapshots_dirstruct.go index d40ae629809..03ff5319301 100644 --- a/internal/fuse/snapshots_dirstruct.go +++ b/internal/fuse/snapshots_dirstruct.go @@ -6,6 +6,7 @@ package fuse import ( "bytes" "context" + "crypto/sha256" "fmt" "path" "sort" @@ -15,8 +16,6 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/restic" - - "github.com/minio/sha256-simd" ) type MetaDirData struct { @@ -295,7 +294,7 @@ func (d *SnapshotsDirStructure) updateSnapshots(ctx context.Context) error { } var snapshots restic.Snapshots - err := d.root.cfg.Filter.FindAll(ctx, d.root.repo, d.root.repo, nil, func(id string, sn *restic.Snapshot, err error) error { + err := d.root.cfg.Filter.FindAll(ctx, d.root.repo, d.root.repo, nil, func(_ string, sn *restic.Snapshot, _ error) error { if sn != nil { snapshots = append(snapshots, sn) } diff --git a/internal/fuse/tree_cache.go b/internal/fuse/tree_cache.go new file mode 100644 index 00000000000..d913f9b81b3 --- /dev/null +++ b/internal/fuse/tree_cache.go @@ -0,0 +1,45 @@ +//go:build darwin || freebsd || linux +// +build darwin freebsd linux + +package fuse + +import ( + "sync" + + "github.com/anacrolix/fuse/fs" +) + +type treeCache struct { + nodes map[string]fs.Node + m sync.Mutex +} + +type forgetFn func() + +func newTreeCache() *treeCache { + return &treeCache{ + nodes: map[string]fs.Node{}, + } +} + +func (t *treeCache) lookupOrCreate(name string, create func(forget forgetFn) (fs.Node, error)) (fs.Node, error) { + t.m.Lock() + defer t.m.Unlock() + + if node, ok := t.nodes[name]; ok { + return node, nil + } + + node, err := create(func() { + t.m.Lock() + defer t.m.Unlock() + + delete(t.nodes, name) + }) + if err != nil { + return nil, err + } + + t.nodes[name] = node + return node, nil +} diff --git a/internal/migrations/s3_layout.go b/internal/migrations/s3_layout.go deleted file mode 100644 index 6b40013eec5..00000000000 --- a/internal/migrations/s3_layout.go +++ /dev/null @@ -1,122 +0,0 @@ -package migrations - -import ( - "context" - "fmt" - "os" - "path" - - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/layout" - "github.com/restic/restic/internal/backend/s3" - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/restic" -) - -func init() { - register(&S3Layout{}) -} - -// S3Layout migrates a repository on an S3 backend from the "s3legacy" to the -// "default" layout. -type S3Layout struct{} - -// Check tests whether the migration can be applied. -func (m *S3Layout) Check(_ context.Context, repo restic.Repository) (bool, string, error) { - be := backend.AsBackend[*s3.Backend](repo.Backend()) - if be == nil { - debug.Log("backend is not s3") - return false, "backend is not s3", nil - } - - if be.Layout.Name() != "s3legacy" { - debug.Log("layout is not s3legacy") - return false, "not using the legacy s3 layout", nil - } - - return true, "", nil -} - -func (m *S3Layout) RepoCheck() bool { - return false -} - -func retry(max int, fail func(err error), f func() error) error { - var err error - for i := 0; i < max; i++ { - err = f() - if err == nil { - return nil - } - if fail != nil { - fail(err) - } - } - return err -} - -// maxErrors for retrying renames on s3. -const maxErrors = 20 - -func (m *S3Layout) moveFiles(ctx context.Context, be *s3.Backend, l layout.Layout, t restic.FileType) error { - printErr := func(err error) { - fmt.Fprintf(os.Stderr, "renaming file returned error: %v\n", err) - } - - return be.List(ctx, t, func(fi backend.FileInfo) error { - h := backend.Handle{Type: t, Name: fi.Name} - debug.Log("move %v", h) - - return retry(maxErrors, printErr, func() error { - return be.Rename(ctx, h, l) - }) - }) -} - -// Apply runs the migration. -func (m *S3Layout) Apply(ctx context.Context, repo restic.Repository) error { - be := backend.AsBackend[*s3.Backend](repo.Backend()) - if be == nil { - debug.Log("backend is not s3") - return errors.New("backend is not s3") - } - - oldLayout := &layout.S3LegacyLayout{ - Path: be.Path(), - Join: path.Join, - } - - newLayout := &layout.DefaultLayout{ - Path: be.Path(), - Join: path.Join, - } - - be.Layout = oldLayout - - for _, t := range []restic.FileType{ - restic.SnapshotFile, - restic.PackFile, - restic.KeyFile, - restic.LockFile, - } { - err := m.moveFiles(ctx, be, newLayout, t) - if err != nil { - return err - } - } - - be.Layout = newLayout - - return nil -} - -// Name returns the name for this migration. -func (m *S3Layout) Name() string { - return "s3_layout" -} - -// Desc returns a short description what the migration does. -func (m *S3Layout) Desc() string { - return "move files from 's3legacy' to the 'default' repository layout" -} diff --git a/internal/migrations/upgrade_repo_v2.go b/internal/migrations/upgrade_repo_v2.go index 585d9e8c745..23a7f1ff0aa 100644 --- a/internal/migrations/upgrade_repo_v2.go +++ b/internal/migrations/upgrade_repo_v2.go @@ -3,11 +3,8 @@ package migrations import ( "context" "fmt" - "io" - "os" - "path/filepath" - "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) @@ -15,26 +12,6 @@ func init() { register(&UpgradeRepoV2{}) } -type UpgradeRepoV2Error struct { - UploadNewConfigError error - ReuploadOldConfigError error - - BackupFilePath string -} - -func (err *UpgradeRepoV2Error) Error() string { - if err.ReuploadOldConfigError != nil { - return fmt.Sprintf("error uploading config (%v), re-uploading old config filed failed as well (%v), but there is a backup of the config file in %v", err.UploadNewConfigError, err.ReuploadOldConfigError, err.BackupFilePath) - } - - return fmt.Sprintf("error uploading config (%v), re-uploaded old config was successful, there is a backup of the config file in %v", err.UploadNewConfigError, err.BackupFilePath) -} - -func (err *UpgradeRepoV2Error) Unwrap() error { - // consider the original upload error as the primary cause - return err.UploadNewConfigError -} - type UpgradeRepoV2 struct{} func (*UpgradeRepoV2) Name() string { @@ -57,74 +34,7 @@ func (*UpgradeRepoV2) Check(_ context.Context, repo restic.Repository) (bool, st func (*UpgradeRepoV2) RepoCheck() bool { return true } -func (*UpgradeRepoV2) upgrade(ctx context.Context, repo restic.Repository) error { - h := backend.Handle{Type: backend.ConfigFile} - - if !repo.Backend().HasAtomicReplace() { - // remove the original file for backends which do not support atomic overwriting - err := repo.Backend().Remove(ctx, h) - if err != nil { - return fmt.Errorf("remove config failed: %w", err) - } - } - - // upgrade config - cfg := repo.Config() - cfg.Version = 2 - - err := restic.SaveConfig(ctx, repo, cfg) - if err != nil { - return fmt.Errorf("save new config file failed: %w", err) - } - - return nil -} func (m *UpgradeRepoV2) Apply(ctx context.Context, repo restic.Repository) error { - tempdir, err := os.MkdirTemp("", "restic-migrate-upgrade-repo-v2-") - if err != nil { - return fmt.Errorf("create temp dir failed: %w", err) - } - - h := backend.Handle{Type: restic.ConfigFile} - - // read raw config file and save it to a temp dir, just in case - var rawConfigFile []byte - err = repo.Backend().Load(ctx, h, 0, 0, func(rd io.Reader) (err error) { - rawConfigFile, err = io.ReadAll(rd) - return err - }) - if err != nil { - return fmt.Errorf("load config file failed: %w", err) - } - - backupFileName := filepath.Join(tempdir, "config") - err = os.WriteFile(backupFileName, rawConfigFile, 0600) - if err != nil { - return fmt.Errorf("write config file backup to %v failed: %w", tempdir, err) - } - - // run the upgrade - err = m.upgrade(ctx, repo) - if err != nil { - - // build an error we can return to the caller - repoError := &UpgradeRepoV2Error{ - UploadNewConfigError: err, - BackupFilePath: backupFileName, - } - - // try contingency methods, reupload the original file - _ = repo.Backend().Remove(ctx, h) - err = repo.Backend().Save(ctx, h, backend.NewByteReader(rawConfigFile, nil)) - if err != nil { - repoError.ReuploadOldConfigError = err - } - - return repoError - } - - _ = os.Remove(backupFileName) - _ = os.Remove(tempdir) - return nil + return repository.UpgradeRepo(ctx, repo.(*repository.Repository)) } diff --git a/internal/migrations/upgrade_repo_v2_test.go b/internal/migrations/upgrade_repo_v2_test.go index 845d20e92ba..1f4cba4e57a 100644 --- a/internal/migrations/upgrade_repo_v2_test.go +++ b/internal/migrations/upgrade_repo_v2_test.go @@ -2,19 +2,13 @@ package migrations import ( "context" - "os" - "path/filepath" - "sync" "testing" - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/test" ) func TestUpgradeRepoV2(t *testing.T) { - repo := repository.TestRepositoryWithVersion(t, 1) + repo, _, _ := repository.TestRepositoryWithVersion(t, 1) if repo.Config().Version != 1 { t.Fatal("test repo has wrong version") } @@ -35,73 +29,3 @@ func TestUpgradeRepoV2(t *testing.T) { t.Fatal(err) } } - -type failBackend struct { - backend.Backend - - mu sync.Mutex - ConfigFileSavesUntilError uint -} - -func (be *failBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { - if h.Type != backend.ConfigFile { - return be.Backend.Save(ctx, h, rd) - } - - be.mu.Lock() - if be.ConfigFileSavesUntilError == 0 { - be.mu.Unlock() - return errors.New("failure induced for testing") - } - - be.ConfigFileSavesUntilError-- - be.mu.Unlock() - - return be.Backend.Save(ctx, h, rd) -} - -func TestUpgradeRepoV2Failure(t *testing.T) { - be := repository.TestBackend(t) - - // wrap backend so that it fails upgrading the config after the initial write - be = &failBackend{ - ConfigFileSavesUntilError: 1, - Backend: be, - } - - repo := repository.TestRepositoryWithBackend(t, be, 1, repository.Options{}) - if repo.Config().Version != 1 { - t.Fatal("test repo has wrong version") - } - - m := &UpgradeRepoV2{} - - ok, _, err := m.Check(context.Background(), repo) - if err != nil { - t.Fatal(err) - } - - if !ok { - t.Fatal("migration check returned false") - } - - err = m.Apply(context.Background(), repo) - if err == nil { - t.Fatal("expected error returned from Apply(), got nil") - } - - upgradeErr := err.(*UpgradeRepoV2Error) - if upgradeErr.UploadNewConfigError == nil { - t.Fatal("expected upload error, got nil") - } - - if upgradeErr.ReuploadOldConfigError == nil { - t.Fatal("expected reupload error, got nil") - } - - if upgradeErr.BackupFilePath == "" { - t.Fatal("no backup file path found") - } - test.OK(t, os.Remove(upgradeErr.BackupFilePath)) - test.OK(t, os.Remove(filepath.Dir(upgradeErr.BackupFilePath))) -} diff --git a/internal/repository/check.go b/internal/repository/check.go new file mode 100644 index 00000000000..2bf2ac8f36d --- /dev/null +++ b/internal/repository/check.go @@ -0,0 +1,213 @@ +package repository + +import ( + "bufio" + "bytes" + "context" + "crypto/sha256" + "fmt" + "io" + "sort" + + "github.com/klauspost/compress/zstd" + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository/hashing" + "github.com/restic/restic/internal/repository/pack" + "github.com/restic/restic/internal/restic" +) + +// ErrPackData is returned if errors are discovered while verifying a packfile +type ErrPackData struct { + PackID restic.ID + errs []error +} + +func (e *ErrPackData) Error() string { + return fmt.Sprintf("pack %v contains %v errors: %v", e.PackID, len(e.errs), e.errs) +} + +type partialReadError struct { + err error +} + +func (e *partialReadError) Error() string { + return e.err.Error() +} + +// CheckPack reads a pack and checks the integrity of all blobs. +func CheckPack(ctx context.Context, r *Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { + err := checkPackInner(ctx, r, id, blobs, size, bufRd, dec) + if err != nil { + if r.cache != nil { + // ignore error as there's not much we can do here + _ = r.cache.Forget(backend.Handle{Type: restic.PackFile, Name: id.String()}) + } + + // retry pack verification to detect transient errors + err2 := checkPackInner(ctx, r, id, blobs, size, bufRd, dec) + if err2 != nil { + err = err2 + } else { + err = fmt.Errorf("check successful on second attempt, original error %w", err) + } + } + return err +} + +func checkPackInner(ctx context.Context, r *Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { + + debug.Log("checking pack %v", id.String()) + + if len(blobs) == 0 { + return &ErrPackData{PackID: id, errs: []error{errors.New("pack is empty or not indexed")}} + } + + // sanity check blobs in index + sort.Slice(blobs, func(i, j int) bool { + return blobs[i].Offset < blobs[j].Offset + }) + idxHdrSize := pack.CalculateHeaderSize(blobs) + lastBlobEnd := 0 + nonContinuousPack := false + for _, blob := range blobs { + if lastBlobEnd != int(blob.Offset) { + nonContinuousPack = true + } + lastBlobEnd = int(blob.Offset + blob.Length) + } + // size was calculated by masterindex.PackSize, thus there's no need to recalculate it here + + var errs []error + if nonContinuousPack { + debug.Log("Index for pack contains gaps / overlaps, blobs: %v", blobs) + errs = append(errs, errors.New("index for pack contains gaps / overlapping blobs")) + } + + // calculate hash on-the-fly while reading the pack and capture pack header + var hash restic.ID + var hdrBuf []byte + h := backend.Handle{Type: backend.PackFile, Name: id.String()} + err := r.be.Load(ctx, h, int(size), 0, func(rd io.Reader) error { + hrd := hashing.NewReader(rd, sha256.New()) + bufRd.Reset(hrd) + + it := newPackBlobIterator(id, newBufReader(bufRd), 0, blobs, r.Key(), dec) + for { + if ctx.Err() != nil { + return ctx.Err() + } + + val, err := it.Next() + if err == errPackEOF { + break + } else if err != nil { + return &partialReadError{err} + } + debug.Log(" check blob %v: %v", val.Handle.ID, val.Handle) + if val.Err != nil { + debug.Log(" error verifying blob %v: %v", val.Handle.ID, val.Err) + errs = append(errs, errors.Errorf("blob %v: %v", val.Handle.ID, val.Err)) + } + } + + // skip enough bytes until we reach the possible header start + curPos := lastBlobEnd + minHdrStart := int(size) - pack.MaxHeaderSize + if minHdrStart > curPos { + _, err := bufRd.Discard(minHdrStart - curPos) + if err != nil { + return &partialReadError{err} + } + curPos += minHdrStart - curPos + } + + // read remainder, which should be the pack header + var err error + hdrBuf = make([]byte, int(size-int64(curPos))) + _, err = io.ReadFull(bufRd, hdrBuf) + if err != nil { + return &partialReadError{err} + } + + hash = restic.IDFromHash(hrd.Sum(nil)) + return nil + }) + if err != nil { + var e *partialReadError + isPartialReadError := errors.As(err, &e) + // failed to load the pack file, return as further checks cannot succeed anyways + debug.Log(" error streaming pack (partial %v): %v", isPartialReadError, err) + if isPartialReadError { + return &ErrPackData{PackID: id, errs: append(errs, fmt.Errorf("partial download error: %w", err))} + } + + // The check command suggests to repair files for which a `ErrPackData` is returned. However, this file + // completely failed to download such that there's no point in repairing anything. + return fmt.Errorf("download error: %w", err) + } + if !hash.Equal(id) { + debug.Log("pack ID does not match, want %v, got %v", id, hash) + return &ErrPackData{PackID: id, errs: append(errs, errors.Errorf("unexpected pack id %v", hash))} + } + + blobs, hdrSize, err := pack.List(r.Key(), bytes.NewReader(hdrBuf), int64(len(hdrBuf))) + if err != nil { + return &ErrPackData{PackID: id, errs: append(errs, err)} + } + + if uint32(idxHdrSize) != hdrSize { + debug.Log("Pack header size does not match, want %v, got %v", idxHdrSize, hdrSize) + errs = append(errs, errors.Errorf("pack header size does not match, want %v, got %v", idxHdrSize, hdrSize)) + } + + for _, blob := range blobs { + // Check if blob is contained in index and position is correct + idxHas := false + for _, pb := range r.LookupBlob(blob.BlobHandle.Type, blob.BlobHandle.ID) { + if pb.PackID == id && pb.Blob == blob { + idxHas = true + break + } + } + if !idxHas { + errs = append(errs, errors.Errorf("blob %v is not contained in index or position is incorrect", blob.ID)) + continue + } + } + + if len(errs) > 0 { + return &ErrPackData{PackID: id, errs: errs} + } + + return nil +} + +type bufReader struct { + rd *bufio.Reader + buf []byte +} + +func newBufReader(rd *bufio.Reader) *bufReader { + return &bufReader{ + rd: rd, + } +} + +func (b *bufReader) Discard(n int) (discarded int, err error) { + return b.rd.Discard(n) +} + +func (b *bufReader) ReadFull(n int) (buf []byte, err error) { + if cap(b.buf) < n { + b.buf = make([]byte, n) + } + b.buf = b.buf[:n] + + _, err = io.ReadFull(b.rd, b.buf) + if err != nil { + return nil, err + } + return b.buf, nil +} diff --git a/internal/repository/fuzz_test.go b/internal/repository/fuzz_test.go index 80372f8e099..c20f9a710ad 100644 --- a/internal/repository/fuzz_test.go +++ b/internal/repository/fuzz_test.go @@ -18,7 +18,7 @@ func FuzzSaveLoadBlob(f *testing.F) { } id := restic.Hash(blob) - repo := TestRepositoryWithVersion(t, 2) + repo, _, _ := TestRepositoryWithVersion(t, 2) var wg errgroup.Group repo.StartPackUploader(context.TODO(), &wg) diff --git a/internal/hashing/reader.go b/internal/repository/hashing/reader.go similarity index 100% rename from internal/hashing/reader.go rename to internal/repository/hashing/reader.go diff --git a/internal/hashing/reader_test.go b/internal/repository/hashing/reader_test.go similarity index 100% rename from internal/hashing/reader_test.go rename to internal/repository/hashing/reader_test.go diff --git a/internal/hashing/writer.go b/internal/repository/hashing/writer.go similarity index 100% rename from internal/hashing/writer.go rename to internal/repository/hashing/writer.go diff --git a/internal/hashing/writer_test.go b/internal/repository/hashing/writer_test.go similarity index 100% rename from internal/hashing/writer_test.go rename to internal/repository/hashing/writer_test.go diff --git a/internal/repository/index/associated_data.go b/internal/repository/index/associated_data.go new file mode 100644 index 00000000000..ee58957e0fe --- /dev/null +++ b/internal/repository/index/associated_data.go @@ -0,0 +1,156 @@ +package index + +import ( + "context" + "sort" + + "github.com/restic/restic/internal/restic" +) + +type associatedSetSub[T any] struct { + value []T + isSet []bool +} + +// AssociatedSet is a memory efficient implementation of a BlobSet that can +// store a small data item for each BlobHandle. It relies on a special property +// of our MasterIndex implementation. A BlobHandle can be permanently identified +// using an offset that never changes as MasterIndex entries cannot be modified (only added). +// +// The AssociatedSet thus can use an array with the size of the MasterIndex to store +// its data. Access to an individual entry is possible by looking up the BlobHandle's +// offset from the MasterIndex. +// +// BlobHandles that are not part of the MasterIndex can be stored by placing them in +// an overflow set that is expected to be empty in the normal case. +type AssociatedSet[T any] struct { + byType [restic.NumBlobTypes]associatedSetSub[T] + overflow map[restic.BlobHandle]T + idx *MasterIndex +} + +func NewAssociatedSet[T any](mi *MasterIndex) *AssociatedSet[T] { + a := AssociatedSet[T]{ + overflow: make(map[restic.BlobHandle]T), + idx: mi, + } + + for typ := range a.byType { + if typ == 0 { + continue + } + // index starts counting at 1 + count := mi.stableLen(restic.BlobType(typ)) + 1 + a.byType[typ].value = make([]T, count) + a.byType[typ].isSet = make([]bool, count) + } + + return &a +} + +func (a *AssociatedSet[T]) Get(bh restic.BlobHandle) (T, bool) { + if val, ok := a.overflow[bh]; ok { + return val, true + } + + idx := a.idx.blobIndex(bh) + bt := &a.byType[bh.Type] + if idx >= len(bt.value) || idx == -1 { + var zero T + return zero, false + } + + has := bt.isSet[idx] + if has { + return bt.value[idx], has + } + var zero T + return zero, false +} + +func (a *AssociatedSet[T]) Has(bh restic.BlobHandle) bool { + _, ok := a.Get(bh) + return ok +} + +func (a *AssociatedSet[T]) Set(bh restic.BlobHandle, val T) { + if _, ok := a.overflow[bh]; ok { + a.overflow[bh] = val + return + } + + idx := a.idx.blobIndex(bh) + bt := &a.byType[bh.Type] + if idx >= len(bt.value) || idx == -1 { + a.overflow[bh] = val + } else { + bt.value[idx] = val + bt.isSet[idx] = true + } +} + +func (a *AssociatedSet[T]) Insert(bh restic.BlobHandle) { + var zero T + a.Set(bh, zero) +} + +func (a *AssociatedSet[T]) Delete(bh restic.BlobHandle) { + if _, ok := a.overflow[bh]; ok { + delete(a.overflow, bh) + return + } + + idx := a.idx.blobIndex(bh) + bt := &a.byType[bh.Type] + if idx < len(bt.value) && idx != -1 { + bt.isSet[idx] = false + } +} + +func (a *AssociatedSet[T]) Len() int { + count := 0 + a.For(func(_ restic.BlobHandle, _ T) { + count++ + }) + return count +} + +func (a *AssociatedSet[T]) For(cb func(bh restic.BlobHandle, val T)) { + for k, v := range a.overflow { + cb(k, v) + } + + _ = a.idx.Each(context.Background(), func(pb restic.PackedBlob) { + if _, ok := a.overflow[pb.BlobHandle]; ok { + // already reported via overflow set + return + } + + val, known := a.Get(pb.BlobHandle) + if known { + cb(pb.BlobHandle, val) + } + }) +} + +// List returns a sorted slice of all BlobHandle in the set. +func (a *AssociatedSet[T]) List() restic.BlobHandles { + list := make(restic.BlobHandles, 0) + a.For(func(bh restic.BlobHandle, _ T) { + list = append(list, bh) + }) + + return list +} + +func (a *AssociatedSet[T]) String() string { + list := a.List() + sort.Sort(list) + + str := list.String() + if len(str) < 2 { + return "{}" + } + + return "{" + str[1:len(str)-1] + "}" +} diff --git a/internal/repository/index/associated_data_test.go b/internal/repository/index/associated_data_test.go new file mode 100644 index 00000000000..82dd9908d58 --- /dev/null +++ b/internal/repository/index/associated_data_test.go @@ -0,0 +1,154 @@ +package index + +import ( + "context" + "testing" + + "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +type noopSaver struct{} + +func (n *noopSaver) Connections() uint { + return 2 +} +func (n *noopSaver) SaveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (restic.ID, error) { + return restic.Hash(buf), nil +} + +func makeFakePackedBlob() (restic.BlobHandle, restic.PackedBlob) { + bh := restic.NewRandomBlobHandle() + blob := restic.PackedBlob{ + PackID: restic.NewRandomID(), + Blob: restic.Blob{ + BlobHandle: bh, + Length: uint(crypto.CiphertextLength(10)), + Offset: 0, + }, + } + return bh, blob +} + +func TestAssociatedSet(t *testing.T) { + bh, blob := makeFakePackedBlob() + + mi := NewMasterIndex() + mi.StorePack(blob.PackID, []restic.Blob{blob.Blob}) + test.OK(t, mi.SaveIndex(context.TODO(), &noopSaver{})) + + bs := NewAssociatedSet[uint8](mi) + test.Equals(t, bs.Len(), 0) + test.Equals(t, bs.List(), restic.BlobHandles{}) + + // check non existent + test.Equals(t, bs.Has(bh), false) + _, ok := bs.Get(bh) + test.Equals(t, false, ok) + + // test insert + bs.Insert(bh) + test.Equals(t, bs.Has(bh), true) + test.Equals(t, bs.Len(), 1) + test.Equals(t, bs.List(), restic.BlobHandles{bh}) + test.Equals(t, 0, len(bs.overflow)) + + // test set + bs.Set(bh, 42) + test.Equals(t, bs.Has(bh), true) + test.Equals(t, bs.Len(), 1) + val, ok := bs.Get(bh) + test.Equals(t, true, ok) + test.Equals(t, uint8(42), val) + + s := bs.String() + test.Assert(t, len(s) > 10, "invalid string: %v", s) + + // test remove + bs.Delete(bh) + test.Equals(t, bs.Len(), 0) + test.Equals(t, bs.Has(bh), false) + test.Equals(t, bs.List(), restic.BlobHandles{}) + + test.Equals(t, "{}", bs.String()) + + // test set + bs.Set(bh, 43) + test.Equals(t, bs.Has(bh), true) + test.Equals(t, bs.Len(), 1) + val, ok = bs.Get(bh) + test.Equals(t, true, ok) + test.Equals(t, uint8(43), val) + test.Equals(t, 0, len(bs.overflow)) + // test update + bs.Set(bh, 44) + val, ok = bs.Get(bh) + test.Equals(t, true, ok) + test.Equals(t, uint8(44), val) + test.Equals(t, 0, len(bs.overflow)) + + // test overflow blob + of := restic.NewRandomBlobHandle() + test.Equals(t, false, bs.Has(of)) + // set + bs.Set(of, 7) + test.Equals(t, 1, len(bs.overflow)) + test.Equals(t, bs.Len(), 2) + // get + val, ok = bs.Get(of) + test.Equals(t, true, ok) + test.Equals(t, uint8(7), val) + test.Equals(t, bs.List(), restic.BlobHandles{of, bh}) + // update + bs.Set(of, 8) + val, ok = bs.Get(of) + test.Equals(t, true, ok) + test.Equals(t, uint8(8), val) + test.Equals(t, 1, len(bs.overflow)) + // delete + bs.Delete(of) + test.Equals(t, bs.Len(), 1) + test.Equals(t, bs.Has(of), false) + test.Equals(t, bs.List(), restic.BlobHandles{bh}) + test.Equals(t, 0, len(bs.overflow)) +} + +func TestAssociatedSetWithExtendedIndex(t *testing.T) { + _, blob := makeFakePackedBlob() + + mi := NewMasterIndex() + mi.StorePack(blob.PackID, []restic.Blob{blob.Blob}) + test.OK(t, mi.SaveIndex(context.TODO(), &noopSaver{})) + + bs := NewAssociatedSet[uint8](mi) + + // add new blobs to index after building the set + of, blob2 := makeFakePackedBlob() + mi.StorePack(blob2.PackID, []restic.Blob{blob2.Blob}) + test.OK(t, mi.SaveIndex(context.TODO(), &noopSaver{})) + + // non-existent + test.Equals(t, false, bs.Has(of)) + // set + bs.Set(of, 5) + test.Equals(t, 1, len(bs.overflow)) + test.Equals(t, bs.Len(), 1) + // get + val, ok := bs.Get(of) + test.Equals(t, true, ok) + test.Equals(t, uint8(5), val) + test.Equals(t, bs.List(), restic.BlobHandles{of}) + // update + bs.Set(of, 8) + val, ok = bs.Get(of) + test.Equals(t, true, ok) + test.Equals(t, uint8(8), val) + test.Equals(t, 1, len(bs.overflow)) + // delete + bs.Delete(of) + test.Equals(t, bs.Len(), 0) + test.Equals(t, bs.Has(of), false) + test.Equals(t, bs.List(), restic.BlobHandles{}) + test.Equals(t, 0, len(bs.overflow)) +} diff --git a/internal/index/index.go b/internal/repository/index/index.go similarity index 79% rename from internal/index/index.go rename to internal/repository/index/index.go index ecd4815944f..c62c1c462df 100644 --- a/internal/index/index.go +++ b/internal/repository/index/index.go @@ -1,9 +1,11 @@ package index import ( + "bytes" "context" "encoding/json" "io" + "math" "sync" "time" @@ -43,14 +45,13 @@ import ( // Index holds lookup tables for id -> pack. type Index struct { - m sync.Mutex + m sync.RWMutex byType [restic.NumBlobTypes]indexMap packs restic.IDs - final bool // set to true for all indexes read from the backend ("finalized") - ids restic.IDs // set to the IDs of the contained finalized indexes - supersedes restic.IDs - created time.Time + final bool // set to true for all indexes read from the backend ("finalized") + ids restic.IDs // set to the IDs of the contained finalized indexes + created time.Time } // NewIndex returns a new index. @@ -67,11 +68,9 @@ func (idx *Index) addToPacks(id restic.ID) int { return len(idx.packs) - 1 } -const maxuint32 = 1<<32 - 1 - func (idx *Index) store(packIndex int, blob restic.Blob) { // assert that offset and length fit into uint32! - if blob.Offset > maxuint32 || blob.Length > maxuint32 || blob.UncompressedLength > maxuint32 { + if blob.Offset > math.MaxUint32 || blob.Length > math.MaxUint32 || blob.UncompressedLength > math.MaxUint32 { panic("offset or length does not fit in uint32. You have packs > 4GB!") } @@ -82,22 +81,21 @@ func (idx *Index) store(packIndex int, blob restic.Blob) { // Final returns true iff the index is already written to the repository, it is // finalized. func (idx *Index) Final() bool { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() return idx.final } const ( - indexMaxBlobs = 50000 - indexMaxBlobsCompressed = 3 * indexMaxBlobs - indexMaxAge = 10 * time.Minute + indexMaxBlobs = 50000 + indexMaxAge = 10 * time.Minute ) // IndexFull returns true iff the index is "full enough" to be saved as a preliminary index. -var IndexFull = func(idx *Index, compress bool) bool { - idx.m.Lock() - defer idx.m.Unlock() +var IndexFull = func(idx *Index) bool { + idx.m.RLock() + defer idx.m.RUnlock() debug.Log("checking whether index %p is full", idx) @@ -106,18 +104,12 @@ var IndexFull = func(idx *Index, compress bool) bool { blobs += idx.byType[typ].len() } age := time.Since(idx.created) - var maxBlobs uint - if compress { - maxBlobs = indexMaxBlobsCompressed - } else { - maxBlobs = indexMaxBlobs - } switch { case age >= indexMaxAge: debug.Log("index %p is old enough", idx, age) return true - case blobs >= maxBlobs: + case blobs >= indexMaxBlobs: debug.Log("index %p has %d blobs", idx, blobs) return true } @@ -162,8 +154,8 @@ func (idx *Index) toPackedBlob(e *indexEntry, t restic.BlobType) restic.PackedBl // Lookup queries the index for the blob ID and returns all entries including // duplicates. Adds found entries to blobs and returns the result. func (idx *Index) Lookup(bh restic.BlobHandle, pbs []restic.PackedBlob) []restic.PackedBlob { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() idx.byType[bh.Type].foreachWithID(bh.ID, func(e *indexEntry) { pbs = append(pbs, idx.toPackedBlob(e, bh.Type)) @@ -174,8 +166,8 @@ func (idx *Index) Lookup(bh restic.BlobHandle, pbs []restic.PackedBlob) []restic // Has returns true iff the id is listed in the index. func (idx *Index) Has(bh restic.BlobHandle) bool { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() return idx.byType[bh.Type].get(bh.ID) != nil } @@ -183,8 +175,8 @@ func (idx *Index) Has(bh restic.BlobHandle) bool { // LookupSize returns the length of the plaintext content of the blob with the // given id. func (idx *Index) LookupSize(bh restic.BlobHandle) (plaintextLength uint, found bool) { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() e := idx.byType[bh.Type].get(bh.ID) if e == nil { @@ -196,30 +188,11 @@ func (idx *Index) LookupSize(bh restic.BlobHandle) (plaintextLength uint, found return uint(crypto.PlaintextLength(int(e.length))), true } -// Supersedes returns the list of indexes this index supersedes, if any. -func (idx *Index) Supersedes() restic.IDs { - return idx.supersedes -} - -// AddToSupersedes adds the ids to the list of indexes superseded by this -// index. If the index has already been finalized, an error is returned. -func (idx *Index) AddToSupersedes(ids ...restic.ID) error { - idx.m.Lock() - defer idx.m.Unlock() - - if idx.final { - return errors.New("index already finalized") - } - - idx.supersedes = append(idx.supersedes, ids...) - return nil -} - // Each passes all blobs known to the index to the callback fn. This blocks any // modification of the index. -func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) { - idx.m.Lock() - defer idx.m.Unlock() +func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) error { + idx.m.RLock() + defer idx.m.RUnlock() for typ := range idx.byType { m := &idx.byType[typ] @@ -231,6 +204,7 @@ func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) { return true }) } + return ctx.Err() } type EachByPackResult struct { @@ -246,12 +220,12 @@ type EachByPackResult struct { // When the context is cancelled, the background goroutine // terminates. This blocks any modification of the index. func (idx *Index) EachByPack(ctx context.Context, packBlacklist restic.IDSet) <-chan EachByPackResult { - idx.m.Lock() + idx.m.RLock() ch := make(chan EachByPackResult) go func() { - defer idx.m.Unlock() + defer idx.m.RUnlock() defer close(ch) byPack := make(map[restic.ID][restic.NumBlobTypes][]*indexEntry) @@ -292,8 +266,8 @@ func (idx *Index) EachByPack(ctx context.Context, packBlacklist restic.IDSet) <- // Packs returns all packs in this index func (idx *Index) Packs() restic.IDSet { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() packs := restic.NewIDSet() for _, packID := range idx.packs { @@ -354,15 +328,15 @@ func (idx *Index) generatePackList() ([]packJSON, error) { } type jsonIndex struct { - Supersedes restic.IDs `json:"supersedes,omitempty"` - Packs []packJSON `json:"packs"` + // removed: Supersedes restic.IDs `json:"supersedes,omitempty"` + Packs []packJSON `json:"packs"` } // Encode writes the JSON serialization of the index to the writer w. func (idx *Index) Encode(w io.Writer) error { debug.Log("encoding index") - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() list, err := idx.generatePackList() if err != nil { @@ -371,12 +345,29 @@ func (idx *Index) Encode(w io.Writer) error { enc := json.NewEncoder(w) idxJSON := jsonIndex{ - Supersedes: idx.supersedes, - Packs: list, + Packs: list, } return enc.Encode(idxJSON) } +// SaveIndex saves an index in the repository. +func (idx *Index) SaveIndex(ctx context.Context, repo restic.SaverUnpacked[restic.FileType]) (restic.ID, error) { + buf := bytes.NewBuffer(nil) + + err := idx.Encode(buf) + if err != nil { + return restic.ID{}, err + } + + id, err := repo.SaveUnpacked(ctx, restic.IndexFile, buf.Bytes()) + ierr := idx.SetID(id) + if ierr != nil { + // logic bug + panic(ierr) + } + return id, err +} + // Finalize sets the index to final. func (idx *Index) Finalize() { debug.Log("finalizing index") @@ -389,8 +380,8 @@ func (idx *Index) Finalize() { // IDs returns the IDs of the index, if available. If the index is not yet // finalized, an error is returned. func (idx *Index) IDs() (restic.IDs, error) { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() if !idx.final { return nil, errors.New("index not finalized") @@ -422,8 +413,8 @@ func (idx *Index) SetID(id restic.ID) error { // Dump writes the pretty-printed JSON representation of the index to w. func (idx *Index) Dump(w io.Writer) error { debug.Log("dumping index") - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() list, err := idx.generatePackList() if err != nil { @@ -431,8 +422,7 @@ func (idx *Index) Dump(w io.Writer) error { } outer := jsonIndex{ - Supersedes: idx.Supersedes(), - Packs: list, + Packs: list, } buf, err := json.MarshalIndent(outer, "", " ") @@ -493,34 +483,19 @@ func (idx *Index) merge(idx2 *Index) error { } idx.ids = append(idx.ids, idx2.ids...) - idx.supersedes = append(idx.supersedes, idx2.supersedes...) return nil } -// isErrOldIndex returns true if the error may be caused by an old index -// format. -func isErrOldIndex(err error) bool { - e, ok := err.(*json.UnmarshalTypeError) - return ok && e.Value == "array" -} - // DecodeIndex unserializes an index from buf. -func DecodeIndex(buf []byte, id restic.ID) (idx *Index, oldFormat bool, err error) { +func DecodeIndex(buf []byte, id restic.ID) (idx *Index, err error) { debug.Log("Start decoding index") idxJSON := &jsonIndex{} err = json.Unmarshal(buf, idxJSON) if err != nil { debug.Log("Error %v", err) - - if isErrOldIndex(err) { - debug.Log("index is probably old format, trying that") - idx, err = decodeOldIndex(buf) - return idx, err == nil, err - } - - return nil, false, errors.Wrap(err, "DecodeIndex") + return nil, errors.Wrap(err, "DecodeIndex") } idx = NewIndex() @@ -538,42 +513,23 @@ func DecodeIndex(buf []byte, id restic.ID) (idx *Index, oldFormat bool, err erro }) } } - idx.supersedes = idxJSON.Supersedes idx.ids = append(idx.ids, id) idx.final = true debug.Log("done") - return idx, false, nil + return idx, nil } -// DecodeOldIndex loads and unserializes an index in the old format from rd. -func decodeOldIndex(buf []byte) (idx *Index, err error) { - debug.Log("Start decoding old index") - list := []*packJSON{} - - err = json.Unmarshal(buf, &list) - if err != nil { - debug.Log("Error %#v", err) - return nil, errors.Wrap(err, "Decode") - } +func (idx *Index) BlobIndex(bh restic.BlobHandle) int { + idx.m.RLock() + defer idx.m.RUnlock() - idx = NewIndex() - for _, pack := range list { - packID := idx.addToPacks(pack.ID) + return idx.byType[bh.Type].firstIndex(bh.ID) +} - for _, blob := range pack.Blobs { - idx.store(packID, restic.Blob{ - BlobHandle: restic.BlobHandle{ - Type: blob.Type, - ID: blob.ID}, - Offset: blob.Offset, - Length: blob.Length, - // no compressed length in the old index format - }) - } - } - idx.final = true +func (idx *Index) Len(t restic.BlobType) uint { + idx.m.RLock() + defer idx.m.RUnlock() - debug.Log("done") - return idx, nil + return idx.byType[t].len() } diff --git a/internal/index/index_parallel.go b/internal/repository/index/index_parallel.go similarity index 75% rename from internal/index/index_parallel.go rename to internal/repository/index/index_parallel.go index 3c16d049bd0..fda5123d396 100644 --- a/internal/index/index_parallel.go +++ b/internal/repository/index/index_parallel.go @@ -11,26 +11,25 @@ import ( // ForAllIndexes loads all index files in parallel and calls the given callback. // It is guaranteed that the function is not run concurrently. If the callback // returns an error, this function is cancelled and also returns that error. -func ForAllIndexes(ctx context.Context, lister restic.Lister, repo restic.ListerLoaderUnpacked, - fn func(id restic.ID, index *Index, oldFormat bool, err error) error) error { +func ForAllIndexes(ctx context.Context, lister restic.Lister, repo restic.LoaderUnpacked, + fn func(id restic.ID, index *Index, err error) error) error { // decoding an index can take quite some time such that this can be both CPU- or IO-bound // as the whole index is kept in memory anyways, a few workers too much don't matter workerCount := repo.Connections() + uint(runtime.GOMAXPROCS(0)) var m sync.Mutex - return restic.ParallelList(ctx, lister, restic.IndexFile, workerCount, func(ctx context.Context, id restic.ID, size int64) error { + return restic.ParallelList(ctx, lister, restic.IndexFile, workerCount, func(ctx context.Context, id restic.ID, _ int64) error { var err error var idx *Index - oldFormat := false buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id) if err == nil { - idx, oldFormat, err = DecodeIndex(buf, id) + idx, err = DecodeIndex(buf, id) } m.Lock() defer m.Unlock() - return fn(id, idx, oldFormat, err) + return fn(id, idx, err) }) } diff --git a/internal/index/index_parallel_test.go b/internal/repository/index/index_parallel_test.go similarity index 74% rename from internal/index/index_parallel_test.go rename to internal/repository/index/index_parallel_test.go index db4853e1909..96f1c2a6a35 100644 --- a/internal/index/index_parallel_test.go +++ b/internal/repository/index/index_parallel_test.go @@ -6,20 +6,18 @@ import ( "testing" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) -var repoFixture = filepath.Join("..", "repository", "testdata", "test-repo.tar.gz") +var repoFixture = filepath.Join("..", "testdata", "test-repo.tar.gz") func TestRepositoryForAllIndexes(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) + repo, _, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - expectedIndexIDs := restic.NewIDSet() rtest.OK(t, repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error { expectedIndexIDs.Insert(id) @@ -29,7 +27,7 @@ func TestRepositoryForAllIndexes(t *testing.T) { // check that all expected indexes are loaded without errors indexIDs := restic.NewIDSet() var indexErr error - rtest.OK(t, index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, oldFormat bool, err error) error { + rtest.OK(t, index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, err error) error { if err != nil { indexErr = err } @@ -42,7 +40,7 @@ func TestRepositoryForAllIndexes(t *testing.T) { // must failed with the returned error iterErr := errors.New("error to pass upwards") - err := index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, oldFormat bool, err error) error { + err := index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, err error) error { return iterErr }) diff --git a/internal/index/index_test.go b/internal/repository/index/index_test.go similarity index 86% rename from internal/index/index_test.go rename to internal/repository/index/index_test.go index 4f0dbd2a0e5..93803603ded 100644 --- a/internal/index/index_test.go +++ b/internal/repository/index/index_test.go @@ -8,7 +8,7 @@ import ( "sync" "testing" - "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -52,11 +52,9 @@ func TestIndexSerialize(t *testing.T) { rtest.OK(t, err) idx2ID := restic.NewRandomID() - idx2, oldFormat, err := index.DecodeIndex(wr.Bytes(), idx2ID) + idx2, err := index.DecodeIndex(wr.Bytes(), idx2ID) rtest.OK(t, err) - rtest.Assert(t, idx2 != nil, - "nil returned for decoded index") - rtest.Assert(t, !oldFormat, "new index format recognized as old format") + rtest.Assert(t, idx2 != nil, "nil returned for decoded index") indexID, err := idx2.IDs() rtest.OK(t, err) rtest.Equals(t, indexID, restic.IDs{idx2ID}) @@ -122,13 +120,10 @@ func TestIndexSerialize(t *testing.T) { rtest.OK(t, err) rtest.Equals(t, restic.IDs{id}, ids) - idx3, oldFormat, err := index.DecodeIndex(wr3.Bytes(), id) + idx3, err := index.DecodeIndex(wr3.Bytes(), id) rtest.OK(t, err) - rtest.Assert(t, idx3 != nil, - "nil returned for decoded index") - rtest.Assert(t, idx3.Final(), - "decoded index is not final") - rtest.Assert(t, !oldFormat, "new index format recognized as old format") + rtest.Assert(t, idx3 != nil, "nil returned for decoded index") + rtest.Assert(t, idx3.Final(), "decoded index is not final") // all new blobs must be in the index for _, testBlob := range newtests { @@ -171,6 +166,9 @@ func TestIndexSize(t *testing.T) { err := idx.Encode(wr) rtest.OK(t, err) + rtest.Equals(t, uint(packs*blobCount), idx.Len(restic.DataBlob)) + rtest.Equals(t, uint(0), idx.Len(restic.TreeBlob)) + t.Logf("Index file size for %d blobs in %d packs is %d", blobCount*packs, packs, wr.Len()) } @@ -242,31 +240,6 @@ var docExampleV2 = []byte(` } `) -var docOldExample = []byte(` -[ { - "id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c", - "blobs": [ - { - "id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce", - "type": "data", - "offset": 0, - "length": 38 - },{ - "id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae", - "type": "tree", - "offset": 38, - "length": 112 - }, - { - "id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66", - "type": "data", - "offset": 150, - "length": 123 - } - ] -} ] -`) - var exampleTests = []struct { id, packID restic.ID tpe restic.BlobType @@ -308,11 +281,8 @@ func TestIndexUnserialize(t *testing.T) { {docExampleV1, 1}, {docExampleV2, 2}, } { - oldIdx := restic.IDs{restic.TestParseID("ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452")} - - idx, oldFormat, err := index.DecodeIndex(task.idxBytes, restic.NewRandomID()) + idx, err := index.DecodeIndex(task.idxBytes, restic.NewRandomID()) rtest.OK(t, err) - rtest.Assert(t, !oldFormat, "new index format recognized as old format") for _, test := range exampleTests { list := idx.Lookup(restic.BlobHandle{ID: test.id, Type: test.tpe}, nil) @@ -336,9 +306,7 @@ func TestIndexUnserialize(t *testing.T) { } } - rtest.Equals(t, oldIdx, idx.Supersedes()) - - blobs := listPack(idx, exampleLookupTest.packID) + blobs := listPack(t, idx, exampleLookupTest.packID) if len(blobs) != len(exampleLookupTest.blobs) { t.Fatalf("expected %d blobs in pack, got %d", len(exampleLookupTest.blobs), len(blobs)) } @@ -355,12 +323,12 @@ func TestIndexUnserialize(t *testing.T) { } } -func listPack(idx *index.Index, id restic.ID) (pbs []restic.PackedBlob) { - idx.Each(context.TODO(), func(pb restic.PackedBlob) { +func listPack(t testing.TB, idx *index.Index, id restic.ID) (pbs []restic.PackedBlob) { + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { if pb.PackID.Equal(id) { pbs = append(pbs, pb) } - }) + })) return pbs } @@ -387,7 +355,7 @@ func BenchmarkDecodeIndex(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _, _, err := index.DecodeIndex(benchmarkIndexJSON, id) + _, err := index.DecodeIndex(benchmarkIndexJSON, id) rtest.OK(b, err) } } @@ -400,7 +368,7 @@ func BenchmarkDecodeIndexParallel(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { - _, _, err := index.DecodeIndex(benchmarkIndexJSON, id) + _, err := index.DecodeIndex(benchmarkIndexJSON, id) rtest.OK(b, err) } }) @@ -426,27 +394,6 @@ func BenchmarkEncodeIndex(b *testing.B) { } } -func TestIndexUnserializeOld(t *testing.T) { - idx, oldFormat, err := index.DecodeIndex(docOldExample, restic.NewRandomID()) - rtest.OK(t, err) - rtest.Assert(t, oldFormat, "old index format recognized as new format") - - for _, test := range exampleTests { - list := idx.Lookup(restic.BlobHandle{ID: test.id, Type: test.tpe}, nil) - if len(list) != 1 { - t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list) - } - blob := list[0] - - rtest.Equals(t, test.packID, blob.PackID) - rtest.Equals(t, test.tpe, blob.Type) - rtest.Equals(t, test.offset, blob.Offset) - rtest.Equals(t, test.length, blob.Length) - } - - rtest.Equals(t, 0, len(idx.Supersedes())) -} - func TestIndexPacks(t *testing.T) { idx := index.NewIndex() packs := restic.NewIDSet() diff --git a/internal/index/indexmap.go b/internal/repository/index/indexmap.go similarity index 88% rename from internal/index/indexmap.go rename to internal/repository/index/indexmap.go index 2386e01b625..6db523633b8 100644 --- a/internal/index/indexmap.go +++ b/internal/repository/index/indexmap.go @@ -99,6 +99,32 @@ func (m *indexMap) get(id restic.ID) *indexEntry { return nil } +// firstIndex returns the index of the first entry for ID id. +// This index is guaranteed to never change. +func (m *indexMap) firstIndex(id restic.ID) int { + if len(m.buckets) == 0 { + return -1 + } + + idx := -1 + h := m.hash(id) + ei := m.buckets[h] + for ei != 0 { + e := m.resolve(ei) + cur := ei + ei = e.next + if e.id != id { + continue + } + if int(cur) < idx || idx == -1 { + // casting from uint to int is unproblematic as we'd run out of memory + // before this can result in an overflow. + idx = int(cur) + } + } + return idx +} + func (m *indexMap) grow() { m.buckets = make([]uint, growthFactor*len(m.buckets)) @@ -118,9 +144,10 @@ func (m *indexMap) hash(id restic.ID) uint { // While SHA-256 should be collision-resistant, for hash table indices // we use only a few bits of it and finding collisions for those is // much easier than breaking the whole algorithm. - m.mh.Reset() - _, _ = m.mh.Write(id[:]) - h := uint(m.mh.Sum64()) + mh := maphash.Hash{} + mh.SetSeed(m.mh.Seed()) + _, _ = mh.Write(id[:]) + h := uint(mh.Sum64()) return h & uint(len(m.buckets)-1) } @@ -204,7 +231,7 @@ func (h *hashedArrayTree) Size() uint { func (h *hashedArrayTree) grow() { idx, subIdx := h.index(h.size) if int(idx) == len(h.blockList) { - // blockList is too small -> double list and block size + // blockList is too short -> double list and block size h.blockSize *= 2 h.mask = h.mask*2 + 1 h.maskShift++ diff --git a/internal/index/indexmap_test.go b/internal/repository/index/indexmap_test.go similarity index 77% rename from internal/index/indexmap_test.go rename to internal/repository/index/indexmap_test.go index a16670c7db7..f34e6a1d37d 100644 --- a/internal/index/indexmap_test.go +++ b/internal/repository/index/indexmap_test.go @@ -143,3 +143,45 @@ func BenchmarkIndexMapHash(b *testing.B) { } } } + +func TestIndexMapFirstIndex(t *testing.T) { + t.Parallel() + + var ( + id restic.ID + m indexMap + r = rand.New(rand.NewSource(98765)) + fi = make(map[restic.ID]int) + ) + + for i := 1; i <= 400; i++ { + r.Read(id[:]) + rtest.Equals(t, -1, m.firstIndex(id), "wrong firstIndex for nonexistent id") + + m.add(id, 0, 0, 0, 0) + idx := m.firstIndex(id) + rtest.Equals(t, i, idx, "unexpected index for id") + fi[id] = idx + } + // iterate over blobs, as this is a hashmap the order is effectively random + for id, idx := range fi { + rtest.Equals(t, idx, m.firstIndex(id), "wrong index returned") + } +} + +func TestIndexMapFirstIndexDuplicates(t *testing.T) { + t.Parallel() + + var ( + id restic.ID + m indexMap + r = rand.New(rand.NewSource(98765)) + ) + + r.Read(id[:]) + for i := 1; i <= 10; i++ { + m.add(id, 0, 0, 0, 0) + } + idx := m.firstIndex(id) + rtest.Equals(t, 1, idx, "unexpected index for id") +} diff --git a/internal/index/master_index.go b/internal/repository/index/master_index.go similarity index 56% rename from internal/index/master_index.go rename to internal/repository/index/master_index.go index 4c114b955d8..16923090b9d 100644 --- a/internal/index/master_index.go +++ b/internal/repository/index/master_index.go @@ -1,7 +1,6 @@ package index import ( - "bytes" "context" "fmt" "runtime" @@ -9,6 +8,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" "golang.org/x/sync/errgroup" ) @@ -17,21 +17,19 @@ type MasterIndex struct { idx []*Index pendingBlobs restic.BlobSet idxMutex sync.RWMutex - compress bool } // NewMasterIndex creates a new master index. func NewMasterIndex() *MasterIndex { - // Always add an empty final index, such that MergeFinalIndexes can merge into this. - // Note that removing this index could lead to a race condition in the rare - // situation that only two indexes exist which are saved and merged concurrently. - idx := []*Index{NewIndex()} - idx[0].Finalize() - return &MasterIndex{idx: idx, pendingBlobs: restic.NewBlobSet()} + mi := &MasterIndex{pendingBlobs: restic.NewBlobSet()} + mi.clear() + return mi } -func (mi *MasterIndex) MarkCompressed() { - mi.compress = true +func (mi *MasterIndex) clear() { + // Always add an empty final index, such that MergeFinalIndexes can merge into this. + mi.idx = []*Index{NewIndex()} + mi.idx[0].Finalize() } // Lookup queries all known Indexes for the ID and returns all matches. @@ -208,7 +206,7 @@ func (mi *MasterIndex) finalizeFullIndexes() []*Index { continue } - if IndexFull(idx, mi.compress) { + if IndexFull(idx) { debug.Log("index %p is full", idx) idx.Finalize() list = append(list, idx) @@ -223,13 +221,16 @@ func (mi *MasterIndex) finalizeFullIndexes() []*Index { // Each runs fn on all blobs known to the index. When the context is cancelled, // the index iteration return immediately. This blocks any modification of the index. -func (mi *MasterIndex) Each(ctx context.Context, fn func(restic.PackedBlob)) { +func (mi *MasterIndex) Each(ctx context.Context, fn func(restic.PackedBlob)) error { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() for _, idx := range mi.idx { - idx.Each(ctx, fn) + if err := idx.Each(ctx, fn); err != nil { + return err + } } + return nil } // MergeFinalIndexes merges all final indexes together. @@ -264,72 +265,190 @@ func (mi *MasterIndex) MergeFinalIndexes() error { return nil } -// Save saves all known indexes to index files, leaving out any -// packs whose ID is contained in packBlacklist from finalized indexes. -// It also removes the old index files and those listed in extraObsolete. -func (mi *MasterIndex) Save(ctx context.Context, repo restic.Repository, excludePacks restic.IDSet, extraObsolete restic.IDs, opts restic.MasterIndexSaveOpts) error { - p := opts.SaveProgress - p.SetMax(uint64(len(mi.Packs(excludePacks)))) +func (mi *MasterIndex) Load(ctx context.Context, r restic.ListerLoaderUnpacked, p *progress.Counter, cb func(id restic.ID, idx *Index, err error) error) error { + indexList, err := restic.MemorizeList(ctx, r, restic.IndexFile) + if err != nil { + return err + } - mi.idxMutex.Lock() - defer mi.idxMutex.Unlock() + if p != nil { + var numIndexFiles uint64 + err := indexList.List(ctx, restic.IndexFile, func(_ restic.ID, _ int64) error { + numIndexFiles++ + return nil + }) + if err != nil { + return err + } + p.SetMax(numIndexFiles) + defer p.Done() + } - debug.Log("start rebuilding index of %d indexes, excludePacks: %v", len(mi.idx), excludePacks) + err = ForAllIndexes(ctx, indexList, r, func(id restic.ID, idx *Index, err error) error { + if p != nil { + p.Add(1) + } + if cb != nil { + err = cb(id, idx, err) + } + if err != nil { + return err + } + // special case to allow check to ignore index loading errors + if idx == nil { + return nil + } + mi.Insert(idx) + return nil + }) - newIndex := NewIndex() - obsolete := restic.NewIDSet() + if err != nil { + return err + } + + return mi.MergeFinalIndexes() +} - // track spawned goroutines using wg, create a new context which is - // cancelled as soon as an error occurs. +type MasterIndexRewriteOpts struct { + SaveProgress *progress.Counter + DeleteProgress func() *progress.Counter + DeleteReport func(id restic.ID, err error) +} + +// Rewrite removes packs whose ID is in excludePacks from all known indexes. +// It also removes the rewritten index files and those listed in extraObsolete. +// If oldIndexes is not nil, then only the indexes in this set are processed. +// This is used by repair index to only rewrite and delete the old indexes. +// +// Must not be called concurrently to any other MasterIndex operation. +func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked[restic.FileType], excludePacks restic.IDSet, oldIndexes restic.IDSet, extraObsolete restic.IDs, opts MasterIndexRewriteOpts) error { + for _, idx := range mi.idx { + if !idx.Final() { + panic("internal error - index must be saved before calling MasterIndex.Rewrite") + } + } + + var indexes restic.IDSet + if oldIndexes != nil { + // repair index adds new index entries for already existing pack files + // only remove the old (possibly broken) entries by only processing old indexes + indexes = oldIndexes + } else { + indexes = mi.IDs() + } + + p := opts.SaveProgress + p.SetMax(uint64(len(indexes))) + + // reset state which is not necessary for Rewrite and just consumes a lot of memory + // the index state would be invalid after Rewrite completes anyways + mi.clear() + runtime.GC() + + // copy excludePacks to prevent unintended sideeffects + excludePacks = excludePacks.Clone() + if excludePacks == nil { + excludePacks = restic.NewIDSet() + } + debug.Log("start rebuilding index of %d indexes, excludePacks: %v", len(indexes), excludePacks) wg, wgCtx := errgroup.WithContext(ctx) - ch := make(chan *Index) + idxCh := make(chan restic.ID) + wg.Go(func() error { + defer close(idxCh) + for id := range indexes { + select { + case idxCh <- id: + case <-wgCtx.Done(): + return wgCtx.Err() + } + } + return nil + }) + var rewriteWg sync.WaitGroup + type rewriteTask struct { + idx *Index + } + rewriteCh := make(chan rewriteTask) + loader := func() error { + defer rewriteWg.Done() + for id := range idxCh { + buf, err := repo.LoadUnpacked(wgCtx, restic.IndexFile, id) + if err != nil { + return fmt.Errorf("LoadUnpacked(%v): %w", id.Str(), err) + } + idx, err := DecodeIndex(buf, id) + if err != nil { + return err + } + + select { + case rewriteCh <- rewriteTask{idx}: + case <-wgCtx.Done(): + return wgCtx.Err() + } + + } + return nil + } + // loading an index can take quite some time such that this is probably CPU-bound + // the index files are probably already cached at this point + loaderCount := runtime.GOMAXPROCS(0) + // run workers on ch + for i := 0; i < loaderCount; i++ { + rewriteWg.Add(1) + wg.Go(loader) + } wg.Go(func() error { - defer close(ch) - for i, idx := range mi.idx { - if idx.Final() { - ids, err := idx.IDs() - if err != nil { - debug.Log("index %d does not have an ID: %v", err) - return err - } + rewriteWg.Wait() + close(rewriteCh) + return nil + }) - debug.Log("adding index ids %v to supersedes field", ids) + obsolete := restic.NewIDSet(extraObsolete...) + saveCh := make(chan *Index) - err = newIndex.AddToSupersedes(ids...) - if err != nil { - return err - } - obsolete.Merge(restic.NewIDSet(ids...)) - } else { - debug.Log("index %d isn't final, don't add to supersedes field", i) + wg.Go(func() error { + defer close(saveCh) + newIndex := NewIndex() + for task := range rewriteCh { + // always rewrite indexes that include a pack that must be removed or that are not full + if len(task.idx.Packs().Intersect(excludePacks)) == 0 && IndexFull(task.idx) { + // make sure that each pack is only stored exactly once in the index + excludePacks.Merge(task.idx.Packs()) + // index is already up to date + p.Add(1) + continue } - debug.Log("adding index %d", i) + ids, err := task.idx.IDs() + if err != nil || len(ids) != 1 { + panic("internal error, index has no ID") + } + obsolete.Merge(restic.NewIDSet(ids...)) - for pbs := range idx.EachByPack(wgCtx, excludePacks) { + for pbs := range task.idx.EachByPack(wgCtx, excludePacks) { newIndex.StorePack(pbs.PackID, pbs.Blobs) - p.Add(1) - if IndexFull(newIndex, mi.compress) { + if IndexFull(newIndex) { select { - case ch <- newIndex: + case saveCh <- newIndex: case <-wgCtx.Done(): return wgCtx.Err() } newIndex = NewIndex() } } + if wgCtx.Err() != nil { + return wgCtx.Err() + } + // make sure that each pack is only stored exactly once in the index + excludePacks.Merge(task.idx.Packs()) + p.Add(1) } - err := newIndex.AddToSupersedes(extraObsolete...) - if err != nil { - return err - } - obsolete.Merge(restic.NewIDSet(extraObsolete...)) - select { - case ch <- newIndex: + case saveCh <- newIndex: case <-wgCtx.Done(): } return nil @@ -337,17 +456,21 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.Repository, exclude // a worker receives an index from ch, and saves the index worker := func() error { - for idx := range ch { + for idx := range saveCh { idx.Finalize() - if _, err := SaveIndex(wgCtx, repo, idx); err != nil { + if len(idx.packs) == 0 { + continue + } + if _, err := idx.SaveIndex(wgCtx, repo); err != nil { return err } } return nil } - // encoding an index can take quite some time such that this can be both CPU- or IO-bound - workerCount := int(repo.Connections()) + runtime.GOMAXPROCS(0) + // encoding an index can take quite some time such that this can be CPU- or IO-bound + // do not add repo.Connections() here as there are already the loader goroutines. + workerCount := runtime.GOMAXPROCS(0) // run workers on ch for i := 0; i < workerCount; i++ { wg.Go(worker) @@ -355,11 +478,7 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.Repository, exclude err := wg.Wait() p.Done() if err != nil { - return err - } - - if opts.SkipDeletion { - return nil + return fmt.Errorf("failed to rewrite indexes: %w", err) } p = nil @@ -375,30 +494,91 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.Repository, exclude }, p) } -// SaveIndex saves an index in the repository. -func SaveIndex(ctx context.Context, repo restic.SaverUnpacked, index *Index) (restic.ID, error) { - buf := bytes.NewBuffer(nil) +// SaveFallback saves all known indexes to index files, leaving out any +// packs whose ID is contained in packBlacklist from finalized indexes. +// It is only intended for use by prune with the UnsafeRecovery option. +// +// Must not be called concurrently to any other MasterIndex operation. +func (mi *MasterIndex) SaveFallback(ctx context.Context, repo restic.SaverRemoverUnpacked[restic.FileType], excludePacks restic.IDSet, p *progress.Counter) error { + p.SetMax(uint64(len(mi.Packs(excludePacks)))) - err := index.Encode(buf) - if err != nil { - return restic.ID{}, err + mi.idxMutex.Lock() + defer mi.idxMutex.Unlock() + + debug.Log("start rebuilding index of %d indexes, excludePacks: %v", len(mi.idx), excludePacks) + + obsolete := restic.NewIDSet() + wg, wgCtx := errgroup.WithContext(ctx) + + ch := make(chan *Index) + wg.Go(func() error { + defer close(ch) + newIndex := NewIndex() + for _, idx := range mi.idx { + if idx.Final() { + ids, err := idx.IDs() + if err != nil { + panic("internal error - finalized index without ID") + } + debug.Log("adding index ids %v to supersedes field", ids) + obsolete.Merge(restic.NewIDSet(ids...)) + } + + for pbs := range idx.EachByPack(wgCtx, excludePacks) { + newIndex.StorePack(pbs.PackID, pbs.Blobs) + p.Add(1) + if IndexFull(newIndex) { + select { + case ch <- newIndex: + case <-wgCtx.Done(): + return wgCtx.Err() + } + newIndex = NewIndex() + } + } + if wgCtx.Err() != nil { + return wgCtx.Err() + } + } + + select { + case ch <- newIndex: + case <-wgCtx.Done(): + } + return nil + }) + + // a worker receives an index from ch, and saves the index + worker := func() error { + for idx := range ch { + idx.Finalize() + if _, err := idx.SaveIndex(wgCtx, repo); err != nil { + return err + } + } + return nil } - id, err := repo.SaveUnpacked(ctx, restic.IndexFile, buf.Bytes()) - ierr := index.SetID(id) - if ierr != nil { - // logic bug - panic(ierr) + // keep concurrency bounded as we're on a fallback path + workerCount := int(repo.Connections()) + // run workers on ch + for i := 0; i < workerCount; i++ { + wg.Go(worker) } - return id, err + err := wg.Wait() + p.Done() + // the index no longer matches to stored state + mi.clear() + + return err } // saveIndex saves all indexes in the backend. -func (mi *MasterIndex) saveIndex(ctx context.Context, r restic.SaverUnpacked, indexes ...*Index) error { +func (mi *MasterIndex) saveIndex(ctx context.Context, r restic.SaverUnpacked[restic.FileType], indexes ...*Index) error { for i, idx := range indexes { debug.Log("Saving index %d", i) - sid, err := SaveIndex(ctx, r, idx) + sid, err := idx.SaveIndex(ctx, r) if err != nil { return err } @@ -410,12 +590,12 @@ func (mi *MasterIndex) saveIndex(ctx context.Context, r restic.SaverUnpacked, in } // SaveIndex saves all new indexes in the backend. -func (mi *MasterIndex) SaveIndex(ctx context.Context, r restic.SaverUnpacked) error { +func (mi *MasterIndex) SaveIndex(ctx context.Context, r restic.SaverUnpacked[restic.FileType]) error { return mi.saveIndex(ctx, r, mi.finalizeNotFinalIndexes()...) } // SaveFullIndex saves all full indexes in the backend. -func (mi *MasterIndex) SaveFullIndex(ctx context.Context, r restic.SaverUnpacked) error { +func (mi *MasterIndex) SaveFullIndex(ctx context.Context, r restic.SaverUnpacked[restic.FileType]) error { return mi.saveIndex(ctx, r, mi.finalizeFullIndexes()...) } @@ -426,10 +606,6 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan defer close(out) // only resort a part of the index to keep the memory overhead bounded for i := byte(0); i < 16; i++ { - if ctx.Err() != nil { - return - } - packBlob := make(map[restic.ID][]restic.Blob) for pack := range packs { if pack[0]&0xf == i { @@ -439,11 +615,14 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan if len(packBlob) == 0 { continue } - mi.Each(ctx, func(pb restic.PackedBlob) { + err := mi.Each(ctx, func(pb restic.PackedBlob) { if packs.Has(pb.PackID) && pb.PackID[0]&0xf == i { packBlob[pb.PackID] = append(packBlob[pb.PackID], pb.Blob) } }) + if err != nil { + return + } // pass on packs for packID, pbs := range packBlob { @@ -459,3 +638,21 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan }() return out } + +// Only for use by AssociatedSet +func (mi *MasterIndex) blobIndex(h restic.BlobHandle) int { + mi.idxMutex.RLock() + defer mi.idxMutex.RUnlock() + + // other indexes are ignored as their ids can change when merged into the main index + return mi.idx[0].BlobIndex(h) +} + +// Only for use by AssociatedSet +func (mi *MasterIndex) stableLen(t restic.BlobType) uint { + mi.idxMutex.RLock() + defer mi.idxMutex.RUnlock() + + // other indexes are ignored as their ids can change when merged into the main index + return mi.idx[0].Len(t) +} diff --git a/internal/index/master_index_test.go b/internal/repository/index/master_index_test.go similarity index 67% rename from internal/index/master_index_test.go rename to internal/repository/index/master_index_test.go index dcf6a94f6e9..516ef045c28 100644 --- a/internal/index/master_index_test.go +++ b/internal/repository/index/master_index_test.go @@ -10,8 +10,8 @@ import ( "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/crypto" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -161,14 +161,17 @@ func TestMasterMergeFinalIndexes(t *testing.T) { mIdx.Insert(idx1) mIdx.Insert(idx2) - finalIndexes, idxCount := index.TestMergeIndex(t, mIdx) + rtest.Equals(t, restic.NewIDSet(), mIdx.IDs()) + + finalIndexes, idxCount, ids := index.TestMergeIndex(t, mIdx) rtest.Equals(t, []*index.Index{idx1, idx2}, finalIndexes) rtest.Equals(t, 1, idxCount) + rtest.Equals(t, ids, mIdx.IDs()) blobCount := 0 - mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { blobCount++ - }) + })) rtest.Equals(t, 2, blobCount) blobs := mIdx.Lookup(bhInIdx1) @@ -186,9 +189,11 @@ func TestMasterMergeFinalIndexes(t *testing.T) { idx3.StorePack(blob2.PackID, []restic.Blob{blob2.Blob}) mIdx.Insert(idx3) - finalIndexes, idxCount = index.TestMergeIndex(t, mIdx) + finalIndexes, idxCount, newIDs := index.TestMergeIndex(t, mIdx) rtest.Equals(t, []*index.Index{idx3}, finalIndexes) rtest.Equals(t, 1, idxCount) + ids.Merge(newIDs) + rtest.Equals(t, ids, mIdx.IDs()) // Index should have same entries as before! blobs = mIdx.Lookup(bhInIdx1) @@ -198,9 +203,9 @@ func TestMasterMergeFinalIndexes(t *testing.T) { rtest.Equals(t, []restic.PackedBlob{blob2}, blobs) blobCount = 0 - mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { blobCount++ - }) + })) rtest.Equals(t, 2, blobCount) } @@ -319,9 +324,9 @@ func BenchmarkMasterIndexEach(b *testing.B) { for i := 0; i < b.N; i++ { entries := 0 - mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(b, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { entries++ - }) + })) } } @@ -341,13 +346,13 @@ var ( depth = 3 ) -func createFilledRepo(t testing.TB, snapshots int, version uint) restic.Repository { - repo := repository.TestRepositoryWithVersion(t, version) +func createFilledRepo(t testing.TB, snapshots int, version uint) (restic.Repository, restic.Unpacked[restic.FileType]) { + repo, unpacked, _ := repository.TestRepositoryWithVersion(t, version) for i := 0; i < snapshots; i++ { restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth) } - return repo + return repo, unpacked } func TestIndexSave(t *testing.T) { @@ -355,46 +360,102 @@ func TestIndexSave(t *testing.T) { } func testIndexSave(t *testing.T, version uint) { - repo := createFilledRepo(t, 3, version) + for _, test := range []struct { + name string + saver func(idx *index.MasterIndex, repo restic.Unpacked[restic.FileType]) error + }{ + {"rewrite no-op", func(idx *index.MasterIndex, repo restic.Unpacked[restic.FileType]) error { + return idx.Rewrite(context.TODO(), repo, nil, nil, nil, index.MasterIndexRewriteOpts{}) + }}, + {"rewrite skip-all", func(idx *index.MasterIndex, repo restic.Unpacked[restic.FileType]) error { + return idx.Rewrite(context.TODO(), repo, nil, restic.NewIDSet(), nil, index.MasterIndexRewriteOpts{}) + }}, + {"SaveFallback", func(idx *index.MasterIndex, repo restic.Unpacked[restic.FileType]) error { + err := restic.ParallelRemove(context.TODO(), repo, idx.IDs(), restic.IndexFile, nil, nil) + if err != nil { + return nil + } + return idx.SaveFallback(context.TODO(), repo, restic.NewIDSet(), nil) + }}, + } { + t.Run(test.name, func(t *testing.T) { + repo, unpacked := createFilledRepo(t, 3, version) + + idx := index.NewMasterIndex() + rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) + blobs := make(map[restic.PackedBlob]struct{}) + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { + blobs[pb] = struct{}{} + })) + + rtest.OK(t, test.saver(idx, unpacked)) + idx = index.NewMasterIndex() + rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) + + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { + if _, ok := blobs[pb]; ok { + delete(blobs, pb) + } else { + t.Fatalf("unexpected blobs %v", pb) + } + })) + rtest.Equals(t, 0, len(blobs), "saved index is missing blobs") - err := repo.LoadIndex(context.TODO(), nil) - if err != nil { - t.Fatal(err) + checker.TestCheckRepo(t, repo, false) + }) } +} - err = repo.Index().Save(context.TODO(), repo, nil, nil, restic.MasterIndexSaveOpts{}) - if err != nil { - t.Fatalf("unable to save new index: %v", err) - } +func TestIndexSavePartial(t *testing.T) { + repository.TestAllVersions(t, testIndexSavePartial) +} - checker := checker.New(repo, false) - err = checker.LoadSnapshots(context.TODO()) - if err != nil { - t.Error(err) - } +func testIndexSavePartial(t *testing.T, version uint) { + repo, unpacked := createFilledRepo(t, 3, version) + + // capture blob list before adding fourth snapshot + idx := index.NewMasterIndex() + rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) + blobs := make(map[restic.PackedBlob]struct{}) + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { + blobs[pb] = struct{}{} + })) + + // add+remove new snapshot and track its pack files + packsBefore := listPacks(t, repo) + sn := restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(4)*time.Second), depth) + rtest.OK(t, repo.RemoveUnpacked(context.TODO(), restic.WriteableSnapshotFile, *sn.ID())) + packsAfter := listPacks(t, repo) + newPacks := packsAfter.Sub(packsBefore) + + // rewrite index and remove pack files of new snapshot + idx = index.NewMasterIndex() + rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) + rtest.OK(t, idx.Rewrite(context.TODO(), unpacked, newPacks, nil, nil, index.MasterIndexRewriteOpts{})) + + // check blobs + idx = index.NewMasterIndex() + rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { + if _, ok := blobs[pb]; ok { + delete(blobs, pb) + } else { + t.Fatalf("unexpected blobs %v", pb) + } + })) + rtest.Equals(t, 0, len(blobs), "saved index is missing blobs") - hints, errs := checker.LoadIndex(context.TODO(), nil) - for _, h := range hints { - t.Logf("hint: %v\n", h) - } + // remove pack files to make check happy + rtest.OK(t, restic.ParallelRemove(context.TODO(), unpacked, newPacks, restic.PackFile, nil, nil)) - for _, err := range errs { - t.Errorf("checker found error: %v", err) - } + checker.TestCheckRepo(t, repo, false) +} - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - errCh := make(chan error) - go checker.Structure(ctx, nil, errCh) - i := 0 - for err := range errCh { - t.Errorf("checker returned error: %v", err) - i++ - if i == 10 { - t.Errorf("more than 10 errors returned, skipping the rest") - cancel() - break - } - } +func listPacks(t testing.TB, repo restic.Lister) restic.IDSet { + s := restic.NewIDSet() + rtest.OK(t, repo.List(context.TODO(), restic.PackFile, func(id restic.ID, _ int64) error { + s.Insert(id) + return nil + })) + return s } diff --git a/internal/index/testing.go b/internal/repository/index/testing.go similarity index 66% rename from internal/index/testing.go rename to internal/repository/index/testing.go index 7c05ac6511d..0b5084bb02b 100644 --- a/internal/index/testing.go +++ b/internal/repository/index/testing.go @@ -7,12 +7,15 @@ import ( "github.com/restic/restic/internal/test" ) -func TestMergeIndex(t testing.TB, mi *MasterIndex) ([]*Index, int) { +func TestMergeIndex(t testing.TB, mi *MasterIndex) ([]*Index, int, restic.IDSet) { finalIndexes := mi.finalizeNotFinalIndexes() + ids := restic.NewIDSet() for _, idx := range finalIndexes { - test.OK(t, idx.SetID(restic.NewRandomID())) + id := restic.NewRandomID() + ids.Insert(id) + test.OK(t, idx.SetID(id)) } test.OK(t, mi.MergeFinalIndexes()) - return finalIndexes, len(mi.idx) + return finalIndexes, len(mi.idx), ids } diff --git a/internal/repository/key.go b/internal/repository/key.go index 5f7a932970d..08f997544bd 100644 --- a/internal/repository/key.go +++ b/internal/repository/key.go @@ -43,11 +43,11 @@ type Key struct { id restic.ID } -// Params tracks the parameters used for the KDF. If not set, it will be +// params tracks the parameters used for the KDF. If not set, it will be // calibrated on the first run of AddKey(). -var Params *crypto.Params +var params *crypto.Params -var ( +const ( // KDFTimeout specifies the maximum runtime for the KDF. KDFTimeout = 500 * time.Millisecond @@ -136,7 +136,7 @@ func SearchKey(ctx context.Context, s *Repository, password string, maxKeys int, defer cancel() // try at most maxKeys keys in repo - err = s.List(listCtx, restic.KeyFile, func(id restic.ID, size int64) error { + err = s.List(listCtx, restic.KeyFile, func(id restic.ID, _ int64) error { checked++ if maxKeys > 0 && checked > maxKeys { return ErrMaxKeysReached @@ -178,8 +178,7 @@ func SearchKey(ctx context.Context, s *Repository, password string, maxKeys int, // LoadKey loads a key from the backend. func LoadKey(ctx context.Context, s *Repository, id restic.ID) (k *Key, err error) { - h := backend.Handle{Type: restic.KeyFile, Name: id.String()} - data, err := backend.LoadAll(ctx, nil, s.be, h) + data, err := s.LoadRaw(ctx, restic.KeyFile, id) if err != nil { return nil, err } @@ -196,13 +195,13 @@ func LoadKey(ctx context.Context, s *Repository, id restic.ID) (k *Key, err erro // AddKey adds a new key to an already existing repository. func AddKey(ctx context.Context, s *Repository, password, username, hostname string, template *crypto.Key) (*Key, error) { // make sure we have valid KDF parameters - if Params == nil { + if params == nil { p, err := crypto.Calibrate(KDFTimeout, KDFMemory) if err != nil { return nil, errors.Wrap(err, "Calibrate") } - Params = &p + params = &p debug.Log("calibrated KDF parameters are %v", p) } @@ -213,9 +212,9 @@ func AddKey(ctx context.Context, s *Repository, password, username, hostname str Hostname: hostname, KDF: "scrypt", - N: Params.N, - R: Params.R, - P: Params.P, + N: params.N, + R: params.R, + P: params.P, } if newkey.Hostname == "" { @@ -237,7 +236,7 @@ func AddKey(ctx context.Context, s *Repository, password, username, hostname str } // call KDF to derive user key - newkey.user, err = crypto.KDF(*Params, newkey.Salt, password) + newkey.user, err = crypto.KDF(*params, newkey.Salt, password) if err != nil { return nil, err } diff --git a/internal/repository/lock.go b/internal/repository/lock.go new file mode 100644 index 00000000000..a5019523343 --- /dev/null +++ b/internal/repository/lock.go @@ -0,0 +1,307 @@ +package repository + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +type lockContext struct { + lock *restic.Lock + cancel context.CancelFunc + refreshWG sync.WaitGroup +} + +type locker struct { + retrySleepStart time.Duration + retrySleepMax time.Duration + refreshInterval time.Duration + refreshabilityTimeout time.Duration +} + +const defaultRefreshInterval = 5 * time.Minute + +var lockerInst = &locker{ + retrySleepStart: 5 * time.Second, + retrySleepMax: 60 * time.Second, + refreshInterval: defaultRefreshInterval, + // consider a lock refresh failed a bit before the lock actually becomes stale + // the difference allows to compensate for a small time drift between clients. + refreshabilityTimeout: restic.StaleLockTimeout - defaultRefreshInterval*3/2, +} + +func Lock(ctx context.Context, repo *Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { + return lockerInst.Lock(ctx, repo, exclusive, retryLock, printRetry, logger) +} + +// Lock wraps the ctx such that it is cancelled when the repository is unlocked +// cancelling the original context also stops the lock refresh +func (l *locker) Lock(ctx context.Context, r *Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { + var lock *restic.Lock + var err error + + retrySleep := minDuration(l.retrySleepStart, retryLock) + retryMessagePrinted := false + retryTimeout := time.After(retryLock) + + repo := &internalRepository{r} + +retryLoop: + for { + lock, err = restic.NewLock(ctx, repo, exclusive) + if err != nil && restic.IsAlreadyLocked(err) { + + if !retryMessagePrinted { + printRetry(fmt.Sprintf("repo already locked, waiting up to %s for the lock\n", retryLock)) + retryMessagePrinted = true + } + + debug.Log("repo already locked, retrying in %v", retrySleep) + retrySleepCh := time.After(retrySleep) + + select { + case <-ctx.Done(): + return nil, ctx, ctx.Err() + case <-retryTimeout: + debug.Log("repo already locked, timeout expired") + // Last lock attempt + lock, err = restic.NewLock(ctx, repo, exclusive) + break retryLoop + case <-retrySleepCh: + retrySleep = minDuration(retrySleep*2, l.retrySleepMax) + } + } else { + // anything else, either a successful lock or another error + break retryLoop + } + } + if restic.IsInvalidLock(err) { + return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err) + } + if err != nil { + return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err) + } + debug.Log("create lock %p (exclusive %v)", lock, exclusive) + + ctx, cancel := context.WithCancel(ctx) + lockInfo := &lockContext{ + lock: lock, + cancel: cancel, + } + lockInfo.refreshWG.Add(2) + refreshChan := make(chan struct{}) + forceRefreshChan := make(chan refreshLockRequest) + + go l.refreshLocks(ctx, repo.be, lockInfo, refreshChan, forceRefreshChan, logger) + go l.monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) + + return &Unlocker{lockInfo}, ctx, nil +} + +func minDuration(a, b time.Duration) time.Duration { + if a <= b { + return a + } + return b +} + +type refreshLockRequest struct { + result chan bool +} + +func (l *locker) refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest, logger func(format string, args ...interface{})) { + debug.Log("start") + lock := lockInfo.lock + ticker := time.NewTicker(l.refreshInterval) + lastRefresh := lock.Time + + defer func() { + ticker.Stop() + // ensure that the context was cancelled before removing the lock + lockInfo.cancel() + + // remove the lock from the repo + debug.Log("unlocking repository with lock %v", lock) + if err := lock.Unlock(ctx); err != nil { + debug.Log("error while unlocking: %v", err) + logger("error while unlocking: %v", err) + } + + lockInfo.refreshWG.Done() + }() + + for { + select { + case <-ctx.Done(): + debug.Log("terminate") + return + + case req := <-forceRefresh: + debug.Log("trying to refresh stale lock") + // keep on going if our current lock still exists + success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel, logger) + // inform refresh goroutine about forced refresh + select { + case <-ctx.Done(): + case req.result <- success: + } + + if success { + // update lock refresh time + lastRefresh = lock.Time + } + + case <-ticker.C: + if time.Since(lastRefresh) > l.refreshabilityTimeout { + // the lock is too old, wait until the expiry monitor cancels the context + continue + } + + debug.Log("refreshing locks") + err := lock.Refresh(context.TODO()) + if err != nil { + logger("unable to refresh lock: %v\n", err) + } else { + lastRefresh = lock.Time + // inform monitor goroutine about successful refresh + select { + case <-ctx.Done(): + case refreshed <- struct{}{}: + } + } + } + } +} + +func (l *locker) monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest, logger func(format string, args ...interface{})) { + // time.Now() might use a monotonic timer which is paused during standby + // convert to unix time to ensure we compare real time values + lastRefresh := time.Now().UnixNano() + pollDuration := 1 * time.Second + if l.refreshInterval < pollDuration { + // required for TestLockFailedRefresh + pollDuration = l.refreshInterval / 5 + } + // timers are paused during standby, which is a problem as the refresh timeout + // _must_ expire if the host was too long in standby. Thus fall back to periodic checks + // https://github.com/golang/go/issues/35012 + ticker := time.NewTicker(pollDuration) + defer func() { + ticker.Stop() + lockInfo.cancel() + lockInfo.refreshWG.Done() + }() + + var refreshStaleLockResult chan bool + + for { + select { + case <-ctx.Done(): + debug.Log("terminate expiry monitoring") + return + case <-refreshed: + if refreshStaleLockResult != nil { + // ignore delayed refresh notifications while the stale lock is refreshed + continue + } + lastRefresh = time.Now().UnixNano() + case <-ticker.C: + if time.Now().UnixNano()-lastRefresh < l.refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { + continue + } + + debug.Log("trying to refreshStaleLock") + // keep on going if our current lock still exists + refreshReq := refreshLockRequest{ + result: make(chan bool), + } + refreshStaleLockResult = refreshReq.result + + // inform refresh goroutine about forced refresh + select { + case <-ctx.Done(): + case forceRefresh <- refreshReq: + } + case success := <-refreshStaleLockResult: + if success { + lastRefresh = time.Now().UnixNano() + refreshStaleLockResult = nil + continue + } + + logger("Fatal: failed to refresh lock in time\n") + return + } + } +} + +func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc, logger func(format string, args ...interface{})) bool { + freeze := backend.AsBackend[backend.FreezeBackend](be) + if freeze != nil { + debug.Log("freezing backend") + freeze.Freeze() + defer freeze.Unfreeze() + } + + err := lock.RefreshStaleLock(ctx) + if err != nil { + logger("failed to refresh stale lock: %v\n", err) + // cancel context while the backend is still frozen to prevent accidental modifications + cancel() + return false + } + + return true +} + +type Unlocker struct { + info *lockContext +} + +func (l *Unlocker) Unlock() { + l.info.cancel() + l.info.refreshWG.Wait() +} + +// RemoveStaleLocks deletes all locks detected as stale from the repository. +func RemoveStaleLocks(ctx context.Context, repo *Repository) (uint, error) { + var processed uint + err := restic.ForAllLocks(ctx, repo, nil, func(id restic.ID, lock *restic.Lock, err error) error { + if err != nil { + // ignore locks that cannot be loaded + debug.Log("ignore lock %v: %v", id, err) + return nil + } + + if lock.Stale() { + err = (&internalRepository{repo}).RemoveUnpacked(ctx, restic.LockFile, id) + if err == nil { + processed++ + } + return err + } + + return nil + }) + return processed, err +} + +// RemoveAllLocks removes all locks forcefully. +func RemoveAllLocks(ctx context.Context, repo *Repository) (uint, error) { + var processed uint32 + err := restic.ParallelList(ctx, repo, restic.LockFile, repo.Connections(), func(ctx context.Context, id restic.ID, _ int64) error { + err := (&internalRepository{repo}).RemoveUnpacked(ctx, restic.LockFile, id) + if err == nil { + atomic.AddUint32(&processed, 1) + } + return err + }) + return uint(processed), err +} diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go new file mode 100644 index 00000000000..c31221e429a --- /dev/null +++ b/internal/repository/lock_test.go @@ -0,0 +1,384 @@ +package repository + +import ( + "context" + "fmt" + "os" + "runtime" + "strings" + "sync" + "testing" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/mem" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +type backendWrapper func(r backend.Backend) (backend.Backend, error) + +func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*Repository, backend.Backend) { + be := backend.Backend(mem.New()) + // initialize repo + TestRepositoryWithBackend(t, be, 0, Options{}) + + // reopen repository to allow injecting a backend wrapper + if wrapper != nil { + var err error + be, err = wrapper(be) + rtest.OK(t, err) + } + + return TestOpenBackend(t, be), be +} + +func checkedLockRepo(ctx context.Context, t *testing.T, repo *Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) { + lock, wrappedCtx, err := lockerInst.Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) + rtest.OK(t, err) + rtest.OK(t, wrappedCtx.Err()) + if lock.info.lock.Stale() { + t.Fatal("lock returned stale lock") + } + return lock, wrappedCtx +} + +func TestLock(t *testing.T) { + t.Parallel() + repo, _ := openLockTestRepo(t, nil) + + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, lockerInst, 0) + lock.Unlock() + if wrappedCtx.Err() == nil { + t.Fatal("unlock did not cancel context") + } +} + +func TestLockCancel(t *testing.T) { + t.Parallel() + repo, _ := openLockTestRepo(t, nil) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + lock, wrappedCtx := checkedLockRepo(ctx, t, repo, lockerInst, 0) + cancel() + if wrappedCtx.Err() == nil { + t.Fatal("canceled parent context did not cancel context") + } + + // Unlock should not crash + lock.Unlock() +} + +func TestLockConflict(t *testing.T) { + t.Parallel() + repo, be := openLockTestRepo(t, nil) + repo2 := TestOpenBackend(t, be) + + lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) + rtest.OK(t, err) + defer lock.Unlock() + _, _, err = Lock(context.Background(), repo2, false, 0, func(msg string) {}, func(format string, args ...interface{}) {}) + if err == nil { + t.Fatal("second lock should have failed") + } + rtest.Assert(t, restic.IsAlreadyLocked(err), "unexpected error %v", err) +} + +type writeOnceBackend struct { + backend.Backend + written bool +} + +func (b *writeOnceBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { + if b.written { + return fmt.Errorf("fail after first write") + } + b.written = true + return b.Backend.Save(ctx, h, rd) +} + +func TestLockFailedRefresh(t *testing.T) { + t.Parallel() + repo, _ := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + return &writeOnceBackend{Backend: r}, nil + }) + + // reduce locking intervals to be suitable for testing + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 20 * time.Millisecond, + refreshabilityTimeout: 100 * time.Millisecond, + } + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) + + select { + case <-wrappedCtx.Done(): + // expected lock refresh failure + case <-time.After(time.Second): + t.Fatal("failed lock refresh did not cause context cancellation") + } + // Unlock should not crash + lock.Unlock() +} + +type loggingBackend struct { + backend.Backend + t *testing.T +} + +func (b *loggingBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { + b.t.Logf("save %v @ %v", h, time.Now()) + err := b.Backend.Save(ctx, h, rd) + b.t.Logf("save finished %v @ %v", h, time.Now()) + return err +} + +func TestLockSuccessfulRefresh(t *testing.T) { + t.Parallel() + repo, _ := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + return &loggingBackend{ + Backend: r, + t: t, + }, nil + }) + + t.Logf("test for successful lock refresh %v", time.Now()) + // reduce locking intervals to be suitable for testing + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 60 * time.Millisecond, + refreshabilityTimeout: 500 * time.Millisecond, + } + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) + + select { + case <-wrappedCtx.Done(): + // don't call t.Fatal to allow the lock to be properly cleaned up + t.Error("lock refresh failed", time.Now()) + + // Dump full stacktrace + buf := make([]byte, 1024*1024) + n := runtime.Stack(buf, true) + buf = buf[:n] + t.Log(string(buf)) + + case <-time.After(2 * li.refreshabilityTimeout): + // expected lock refresh to work + } + // Unlock should not crash + lock.Unlock() +} + +type slowBackend struct { + backend.Backend + m sync.Mutex + sleep time.Duration +} + +func (b *slowBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { + b.m.Lock() + sleep := b.sleep + b.m.Unlock() + time.Sleep(sleep) + return b.Backend.Save(ctx, h, rd) +} + +func TestLockSuccessfulStaleRefresh(t *testing.T) { + t.Parallel() + var sb *slowBackend + repo, _ := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + sb = &slowBackend{Backend: r} + return sb, nil + }) + + t.Logf("test for successful lock refresh %v", time.Now()) + // reduce locking intervals to be suitable for testing + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 10 * time.Millisecond, + refreshabilityTimeout: 50 * time.Millisecond, + } + + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) + // delay lock refreshing long enough that the lock would expire + sb.m.Lock() + sb.sleep = li.refreshabilityTimeout + li.refreshInterval + sb.m.Unlock() + + select { + case <-wrappedCtx.Done(): + // don't call t.Fatal to allow the lock to be properly cleaned up + t.Error("lock refresh failed", time.Now()) + + case <-time.After(li.refreshabilityTimeout): + } + // reset slow backend + sb.m.Lock() + sb.sleep = 0 + sb.m.Unlock() + debug.Log("normal lock period has expired") + + select { + case <-wrappedCtx.Done(): + // don't call t.Fatal to allow the lock to be properly cleaned up + t.Error("lock refresh failed", time.Now()) + + case <-time.After(3 * li.refreshabilityTimeout): + // expected lock refresh to work + } + + // Unlock should not crash + lock.Unlock() +} + +func TestLockWaitTimeout(t *testing.T) { + t.Parallel() + repo, _ := openLockTestRepo(t, nil) + + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) + rtest.OK(t, err) + defer elock.Unlock() + + retryLock := 200 * time.Millisecond + + start := time.Now() + _, _, err = Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) + duration := time.Since(start) + + rtest.Assert(t, err != nil, + "create normal lock with exclusively locked repo didn't return an error") + rtest.Assert(t, strings.Contains(err.Error(), "repository is already locked exclusively"), + "create normal lock with exclusively locked repo didn't return the correct error") + rtest.Assert(t, retryLock <= duration && duration < retryLock*3/2, + "create normal lock with exclusively locked repo didn't wait for the specified timeout") +} + +func TestLockWaitCancel(t *testing.T) { + t.Parallel() + repo, _ := openLockTestRepo(t, nil) + + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) + rtest.OK(t, err) + defer elock.Unlock() + + retryLock := 200 * time.Millisecond + cancelAfter := 40 * time.Millisecond + + start := time.Now() + ctx, cancel := context.WithCancel(context.TODO()) + time.AfterFunc(cancelAfter, cancel) + + _, _, err = Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) + duration := time.Since(start) + + rtest.Assert(t, err != nil, + "create normal lock with exclusively locked repo didn't return an error") + rtest.Assert(t, strings.Contains(err.Error(), "context canceled"), + "create normal lock with exclusively locked repo didn't return the correct error") + rtest.Assert(t, cancelAfter <= duration && duration < retryLock-10*time.Millisecond, + "create normal lock with exclusively locked repo didn't return in time, duration %v", duration) +} + +func TestLockWaitSuccess(t *testing.T) { + t.Parallel() + repo, _ := openLockTestRepo(t, nil) + + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) + rtest.OK(t, err) + + retryLock := 200 * time.Millisecond + unlockAfter := 40 * time.Millisecond + + time.AfterFunc(unlockAfter, func() { + elock.Unlock() + }) + + lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) + rtest.OK(t, err) + lock.Unlock() +} + +func createFakeLock(repo *Repository, t time.Time, pid int) (restic.ID, error) { + hostname, err := os.Hostname() + if err != nil { + return restic.ID{}, err + } + + newLock := &restic.Lock{Time: t, PID: pid, Hostname: hostname} + return restic.SaveJSONUnpacked(context.TODO(), &internalRepository{repo}, restic.LockFile, &newLock) +} + +func lockExists(repo restic.Lister, t testing.TB, lockID restic.ID) bool { + var exists bool + rtest.OK(t, repo.List(context.TODO(), restic.LockFile, func(id restic.ID, size int64) error { + if id == lockID { + exists = true + } + return nil + })) + + return exists +} + +func removeLock(repo *Repository, id restic.ID) error { + return (&internalRepository{repo}).RemoveUnpacked(context.TODO(), restic.LockFile, id) +} + +func TestLockWithStaleLock(t *testing.T) { + repo := TestRepository(t) + + id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) + rtest.OK(t, err) + + id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()) + rtest.OK(t, err) + + id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000) + rtest.OK(t, err) + + processed, err := RemoveStaleLocks(context.TODO(), repo) + rtest.OK(t, err) + + rtest.Assert(t, lockExists(repo, t, id1) == false, + "stale lock still exists after RemoveStaleLocks was called") + rtest.Assert(t, lockExists(repo, t, id2) == true, + "non-stale lock was removed by RemoveStaleLocks") + rtest.Assert(t, lockExists(repo, t, id3) == false, + "stale lock still exists after RemoveStaleLocks was called") + rtest.Assert(t, processed == 2, + "number of locks removed does not match: expected %d, got %d", + 2, processed) + + rtest.OK(t, removeLock(repo, id2)) +} + +func TestRemoveAllLocks(t *testing.T) { + repo := TestRepository(t) + + id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) + rtest.OK(t, err) + + id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()) + rtest.OK(t, err) + + id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000) + rtest.OK(t, err) + + processed, err := RemoveAllLocks(context.TODO(), repo) + rtest.OK(t, err) + + rtest.Assert(t, lockExists(repo, t, id1) == false, + "lock still exists after RemoveAllLocks was called") + rtest.Assert(t, lockExists(repo, t, id2) == false, + "lock still exists after RemoveAllLocks was called") + rtest.Assert(t, lockExists(repo, t, id3) == false, + "lock still exists after RemoveAllLocks was called") + rtest.Assert(t, processed == 3, + "number of locks removed does not match: expected %d, got %d", + 3, processed) +} diff --git a/internal/pack/doc.go b/internal/repository/pack/doc.go similarity index 100% rename from internal/pack/doc.go rename to internal/repository/pack/doc.go diff --git a/internal/pack/pack.go b/internal/repository/pack/pack.go similarity index 96% rename from internal/pack/pack.go rename to internal/repository/pack/pack.go index cd118ab032a..57957ce91a4 100644 --- a/internal/pack/pack.go +++ b/internal/repository/pack/pack.go @@ -239,7 +239,7 @@ func readRecords(rd io.ReaderAt, size int64, bufsize int) ([]byte, int, error) { case hlen == 0: err = InvalidFileError{Message: "header length is zero"} case hlen < crypto.Extension: - err = InvalidFileError{Message: "header length is too small"} + err = InvalidFileError{Message: "header length is too short"} case int64(hlen) > size-int64(headerLengthSize): err = InvalidFileError{Message: "header is larger than file"} case int64(hlen) > MaxHeaderSize-int64(headerLengthSize): @@ -263,7 +263,7 @@ func readRecords(rd io.ReaderAt, size int64, bufsize int) ([]byte, int, error) { func readHeader(rd io.ReaderAt, size int64) ([]byte, error) { debug.Log("size: %v", size) if size < int64(minFileSize) { - err := InvalidFileError{Message: "file is too small"} + err := InvalidFileError{Message: "file is too short"} return nil, errors.Wrap(err, "readHeader") } @@ -305,7 +305,7 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, hdr } if len(buf) < crypto.CiphertextLength(0) { - return nil, 0, errors.New("invalid header, too small") + return nil, 0, errors.New("invalid header, too short") } hdrSize = headerLengthSize + uint32(len(buf)) @@ -389,10 +389,10 @@ func CalculateHeaderSize(blobs []restic.Blob) int { // If onlyHdr is set to true, only the size of the header is returned // Note that this function only gives correct sizes, if there are no // duplicates in the index. -func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) map[restic.ID]int64 { +func Size(ctx context.Context, mi restic.ListBlobser, onlyHdr bool) (map[restic.ID]int64, error) { packSize := make(map[restic.ID]int64) - mi.Each(ctx, func(blob restic.PackedBlob) { + err := mi.ListBlobs(ctx, func(blob restic.PackedBlob) { size, ok := packSize[blob.PackID] if !ok { size = headerSize @@ -403,5 +403,5 @@ func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) map[restic.I packSize[blob.PackID] = size + int64(CalculateEntrySize(blob.Blob)) }) - return packSize + return packSize, err } diff --git a/internal/pack/pack_internal_test.go b/internal/repository/pack/pack_internal_test.go similarity index 100% rename from internal/pack/pack_internal_test.go rename to internal/repository/pack/pack_internal_test.go diff --git a/internal/pack/pack_test.go b/internal/repository/pack/pack_test.go similarity index 98% rename from internal/pack/pack_test.go rename to internal/repository/pack/pack_test.go index 76ff5c12733..5ac14634834 100644 --- a/internal/pack/pack_test.go +++ b/internal/repository/pack/pack_test.go @@ -12,7 +12,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/crypto" - "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/repository/pack" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) diff --git a/internal/repository/packer_manager.go b/internal/repository/packer_manager.go index 22eca0c2ef7..9d53c911b24 100644 --- a/internal/repository/packer_manager.go +++ b/internal/repository/packer_manager.go @@ -3,26 +3,24 @@ package repository import ( "bufio" "context" + "crypto/sha256" "io" "os" - "runtime" "sync" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/hashing" + "github.com/restic/restic/internal/repository/hashing" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/fs" - "github.com/restic/restic/internal/pack" - - "github.com/minio/sha256-simd" + "github.com/restic/restic/internal/repository/pack" ) -// Packer holds a pack.Packer together with a hash writer. -type Packer struct { +// packer holds a pack.packer together with a hash writer. +type packer struct { *pack.Packer tmpfile *os.File bufWr *bufio.Writer @@ -32,16 +30,16 @@ type Packer struct { type packerManager struct { tpe restic.BlobType key *crypto.Key - queueFn func(ctx context.Context, t restic.BlobType, p *Packer) error + queueFn func(ctx context.Context, t restic.BlobType, p *packer) error pm sync.Mutex - packer *Packer + packer *packer packSize uint } // newPackerManager returns a new packer manager which writes temporary files // to a temporary directory -func newPackerManager(key *crypto.Key, tpe restic.BlobType, packSize uint, queueFn func(ctx context.Context, t restic.BlobType, p *Packer) error) *packerManager { +func newPackerManager(key *crypto.Key, tpe restic.BlobType, packSize uint, queueFn func(ctx context.Context, t restic.BlobType, p *packer) error) *packerManager { return &packerManager{ tpe: tpe, key: key, @@ -114,7 +112,7 @@ func (r *packerManager) SaveBlob(ctx context.Context, t restic.BlobType, id rest // findPacker returns a packer for a new blob of size bytes. Either a new one is // created or one is returned that already has some blobs. -func (r *packerManager) newPacker() (packer *Packer, err error) { +func (r *packerManager) newPacker() (pck *packer, err error) { debug.Log("create new pack") tmpfile, err := fs.TempFile("", "restic-temp-pack-") if err != nil { @@ -123,17 +121,17 @@ func (r *packerManager) newPacker() (packer *Packer, err error) { bufWr := bufio.NewWriter(tmpfile) p := pack.NewPacker(r.key, bufWr) - packer = &Packer{ + pck = &packer{ Packer: p, tmpfile: tmpfile, bufWr: bufWr, } - return packer, nil + return pck, nil } // savePacker stores p in the backend. -func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packer) error { +func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *packer) error { debug.Log("save packer for %v with %d blobs (%d bytes)\n", t, p.Packer.Count(), p.Packer.Size()) err := p.Packer.Finalize() if err != nil { @@ -187,21 +185,10 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packe return errors.Wrap(err, "close tempfile") } - // on windows the tempfile is automatically deleted on close - if runtime.GOOS != "windows" { - err = fs.RemoveIfExists(p.tmpfile.Name()) - if err != nil { - return errors.WithStack(err) - } - } - // update blobs in the index debug.Log(" updating blobs %v to pack %v", p.Packer.Blobs(), id) r.idx.StorePack(id, p.Packer.Blobs()) // Save index if full - if r.noAutoIndexUpdate { - return nil - } - return r.idx.SaveFullIndex(ctx, r) + return r.idx.SaveFullIndex(ctx, &internalRepository{r}) } diff --git a/internal/repository/packer_manager_test.go b/internal/repository/packer_manager_test.go index 8984073dae0..0f3aea05f4b 100644 --- a/internal/repository/packer_manager_test.go +++ b/internal/repository/packer_manager_test.go @@ -70,7 +70,7 @@ func testPackerManager(t testing.TB) int64 { rnd := rand.New(rand.NewSource(randomSeed)) savedBytes := int(0) - pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, DefaultPackSize, func(ctx context.Context, tp restic.BlobType, p *Packer) error { + pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, DefaultPackSize, func(ctx context.Context, tp restic.BlobType, p *packer) error { err := p.Finalize() if err != nil { return err @@ -92,7 +92,7 @@ func testPackerManager(t testing.TB) int64 { func TestPackerManagerWithOversizeBlob(t *testing.T) { packFiles := int(0) sizeLimit := uint(512 * 1024) - pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, sizeLimit, func(ctx context.Context, tp restic.BlobType, p *Packer) error { + pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, sizeLimit, func(ctx context.Context, tp restic.BlobType, p *packer) error { packFiles++ return nil }) @@ -122,7 +122,7 @@ func BenchmarkPackerManager(t *testing.B) { for i := 0; i < t.N; i++ { rnd.Seed(randomSeed) - pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, DefaultPackSize, func(ctx context.Context, t restic.BlobType, p *Packer) error { + pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, DefaultPackSize, func(ctx context.Context, t restic.BlobType, p *packer) error { return nil }) fillPacks(t, rnd, pm, blobBuf) diff --git a/internal/repository/packer_uploader.go b/internal/repository/packer_uploader.go index 30c8f77afc6..936e7ea1d89 100644 --- a/internal/repository/packer_uploader.go +++ b/internal/repository/packer_uploader.go @@ -7,13 +7,13 @@ import ( "golang.org/x/sync/errgroup" ) -// SavePacker implements saving a pack in the repository. -type SavePacker interface { - savePacker(ctx context.Context, t restic.BlobType, p *Packer) error +// savePacker implements saving a pack in the repository. +type savePacker interface { + savePacker(ctx context.Context, t restic.BlobType, p *packer) error } type uploadTask struct { - packer *Packer + packer *packer tpe restic.BlobType } @@ -21,7 +21,7 @@ type packerUploader struct { uploadQueue chan uploadTask } -func newPackerUploader(ctx context.Context, wg *errgroup.Group, repo SavePacker, connections uint) *packerUploader { +func newPackerUploader(ctx context.Context, wg *errgroup.Group, repo savePacker, connections uint) *packerUploader { pu := &packerUploader{ uploadQueue: make(chan uploadTask), } @@ -48,7 +48,7 @@ func newPackerUploader(ctx context.Context, wg *errgroup.Group, repo SavePacker, return pu } -func (pu *packerUploader) QueuePacker(ctx context.Context, t restic.BlobType, p *Packer) (err error) { +func (pu *packerUploader) QueuePacker(ctx context.Context, t restic.BlobType, p *packer) (err error) { select { case <-ctx.Done(): return ctx.Err() diff --git a/internal/repository/prune.go b/internal/repository/prune.go new file mode 100644 index 00000000000..3803b6f337a --- /dev/null +++ b/internal/repository/prune.go @@ -0,0 +1,640 @@ +package repository + +import ( + "context" + "fmt" + "math" + "sort" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository/index" + "github.com/restic/restic/internal/repository/pack" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" +) + +var ErrIndexIncomplete = errors.Fatal("index is not complete") +var ErrPacksMissing = errors.Fatal("packs from index missing in repo") +var ErrSizeNotMatching = errors.Fatal("pack size does not match calculated size from index") + +// PruneOptions collects all options for the cleanup command. +type PruneOptions struct { + DryRun bool + UnsafeRecovery bool + + MaxUnusedBytes func(used uint64) (unused uint64) // calculates the number of unused bytes after repacking, according to MaxUnused + MaxRepackBytes uint64 + + RepackCacheableOnly bool + RepackSmall bool + RepackUncompressed bool +} + +type PruneStats struct { + Blobs struct { + Used uint + Duplicate uint + Unused uint + Remove uint + Repack uint + Repackrm uint + } + Size struct { + Used uint64 + Duplicate uint64 + Unused uint64 + Remove uint64 + Repack uint64 + Repackrm uint64 + Unref uint64 + Uncompressed uint64 + } + Packs struct { + Used uint + Unused uint + PartlyUsed uint + Unref uint + Keep uint + Repack uint + Remove uint + } +} + +type PrunePlan struct { + removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) + repackPacks restic.IDSet // packs to repack + keepBlobs *index.AssociatedSet[uint8] // blobs to keep during repacking + removePacks restic.IDSet // packs to remove + ignorePacks restic.IDSet // packs to ignore when rebuilding the index + + repo *Repository + stats PruneStats + opts PruneOptions +} + +type packInfo struct { + usedBlobs uint + unusedBlobs uint + duplicateBlobs uint + usedSize uint64 + unusedSize uint64 + + tpe restic.BlobType + uncompressed bool +} + +type packInfoWithID struct { + ID restic.ID + packInfo + mustCompress bool +} + +// PlanPrune selects which files to rewrite and which to delete and which blobs to keep. +// Also some summary statistics are returned. +func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet) error, printer progress.Printer) (*PrunePlan, error) { + var stats PruneStats + + if opts.UnsafeRecovery { + // prevent repacking data to make sure users cannot get stuck. + opts.MaxRepackBytes = 0 + } + if repo.Connections() < 2 { + return nil, fmt.Errorf("prune requires a backend connection limit of at least two") + } + if repo.Config().Version < 2 && opts.RepackUncompressed { + return nil, fmt.Errorf("compression requires at least repository format version 2") + } + + usedBlobs := index.NewAssociatedSet[uint8](repo.idx) + err := getUsedBlobs(ctx, repo, usedBlobs) + if err != nil { + return nil, err + } + + printer.P("searching used packs...\n") + keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo, usedBlobs, &stats, printer) + if err != nil { + return nil, err + } + + printer.P("collecting packs for deletion and repacking\n") + plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer) + if err != nil { + return nil, err + } + + if len(plan.repackPacks) != 0 { + // when repacking, we do not want to keep blobs which are + // already contained in kept packs, so delete them from keepBlobs + err := repo.ListBlobs(ctx, func(blob restic.PackedBlob) { + if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) { + return + } + keepBlobs.Delete(blob.BlobHandle) + }) + if err != nil { + return nil, err + } + } else { + // keepBlobs is only needed if packs are repacked + keepBlobs = nil + } + plan.keepBlobs = keepBlobs + + plan.repo = repo + plan.stats = stats + plan.opts = opts + + return &plan, nil +} + +func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs *index.AssociatedSet[uint8], stats *PruneStats, printer progress.Printer) (*index.AssociatedSet[uint8], map[restic.ID]packInfo, error) { + // iterate over all blobs in index to find out which blobs are duplicates + // The counter in usedBlobs describes how many instances of the blob exist in the repository index + // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist + err := idx.ListBlobs(ctx, func(blob restic.PackedBlob) { + bh := blob.BlobHandle + count, ok := usedBlobs.Get(bh) + if ok { + if count < math.MaxUint8 { + // don't overflow, but saturate count at 255 + // this can lead to a non-optimal pack selection, but won't cause + // problems otherwise + count++ + } + + usedBlobs.Set(bh, count) + } + }) + if err != nil { + return nil, nil, err + } + + // Check if all used blobs have been found in index + missingBlobs := restic.NewBlobSet() + usedBlobs.For(func(bh restic.BlobHandle, count uint8) { + if count == 0 { + // blob does not exist in any pack files + missingBlobs.Insert(bh) + } + }) + + if len(missingBlobs) != 0 { + printer.E("%v not found in the index\n\n"+ + "Integrity check failed: Data seems to be missing.\n"+ + "Will not start prune to prevent (additional) data loss!\n"+ + "Please report this error (along with the output of the 'prune' run) at\n"+ + "https://github.com/restic/restic/issues/new/choose\n", missingBlobs) + return nil, nil, ErrIndexIncomplete + } + + indexPack := make(map[restic.ID]packInfo) + + // save computed pack header size + sz, err := pack.Size(ctx, idx, true) + if err != nil { + return nil, nil, err + } + for pid, hdrSize := range sz { + // initialize tpe with NumBlobTypes to indicate it's not set + indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)} + } + + hasDuplicates := false + // iterate over all blobs in index to generate packInfo + err = idx.ListBlobs(ctx, func(blob restic.PackedBlob) { + ip := indexPack[blob.PackID] + + // Set blob type if not yet set + if ip.tpe == restic.NumBlobTypes { + ip.tpe = blob.Type + } + + // mark mixed packs with "Invalid blob type" + if ip.tpe != blob.Type { + ip.tpe = restic.InvalidBlob + } + + bh := blob.BlobHandle + size := uint64(blob.Length) + dupCount, _ := usedBlobs.Get(bh) + switch { + case dupCount >= 2: + hasDuplicates = true + // mark as unused for now, we will later on select one copy + ip.unusedSize += size + ip.unusedBlobs++ + ip.duplicateBlobs++ + + // count as duplicate, will later on change one copy to be counted as used + stats.Size.Duplicate += size + stats.Blobs.Duplicate++ + case dupCount == 1: // used blob, not duplicate + ip.usedSize += size + ip.usedBlobs++ + + stats.Size.Used += size + stats.Blobs.Used++ + default: // unused blob + ip.unusedSize += size + ip.unusedBlobs++ + + stats.Size.Unused += size + stats.Blobs.Unused++ + } + if !blob.IsCompressed() { + ip.uncompressed = true + } + // update indexPack + indexPack[blob.PackID] = ip + }) + if err != nil { + return nil, nil, err + } + + // if duplicate blobs exist, those will be set to either "used" or "unused": + // - mark only one occurrence of duplicate blobs as used + // - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used" + // - if a pack only consists of duplicates (which by definition are used blobs), mark it as "used". This + // ensures that already rewritten packs are kept. + // - if there are no used blobs in a pack, possibly mark duplicates as "unused" + if hasDuplicates { + // iterate again over all blobs in index (this is pretty cheap, all in-mem) + err = idx.ListBlobs(ctx, func(blob restic.PackedBlob) { + bh := blob.BlobHandle + count, ok := usedBlobs.Get(bh) + // skip non-duplicate, aka. normal blobs + // count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining + if !ok || count == 1 { + return + } + + ip := indexPack[blob.PackID] + size := uint64(blob.Length) + switch { + case ip.usedBlobs > 0, (ip.duplicateBlobs == ip.unusedBlobs), count == 0: + // other used blobs in pack, only duplicate blobs or "last" occurrence -> transition to used + // a pack file created by an interrupted prune run will consist of only duplicate blobs + // thus select such already repacked pack files + ip.usedSize += size + ip.usedBlobs++ + ip.unusedSize -= size + ip.unusedBlobs-- + // same for the global statistics + stats.Size.Used += size + stats.Blobs.Used++ + stats.Size.Duplicate -= size + stats.Blobs.Duplicate-- + // let other occurrences remain marked as unused + usedBlobs.Set(bh, 1) + default: + // remain unused and decrease counter + count-- + if count == 1 { + // setting count to 1 would lead to forgetting that this blob had duplicates + // thus use the special value zero. This will select the last instance of the blob for keeping. + count = 0 + } + usedBlobs.Set(bh, count) + } + // update indexPack + indexPack[blob.PackID] = ip + }) + if err != nil { + return nil, nil, err + } + } + + // Sanity check. If no duplicates exist, all blobs have value 1. After handling + // duplicates, this also applies to duplicates. + usedBlobs.For(func(_ restic.BlobHandle, count uint8) { + if count != 1 { + panic("internal error during blob selection") + } + }) + + return usedBlobs, indexPack, nil +} + +func decidePackAction(ctx context.Context, opts PruneOptions, repo *Repository, indexPack map[restic.ID]packInfo, stats *PruneStats, printer progress.Printer) (PrunePlan, error) { + removePacksFirst := restic.NewIDSet() + removePacks := restic.NewIDSet() + repackPacks := restic.NewIDSet() + + var repackCandidates []packInfoWithID + var repackSmallCandidates []packInfoWithID + repoVersion := repo.Config().Version + // only repack very small files by default + targetPackSize := repo.packSize() / 25 + if opts.RepackSmall { + // consider files with at least 80% of the target size as large enough + targetPackSize = repo.packSize() / 5 * 4 + } + + // loop over all packs and decide what to do + bar := printer.NewCounter("packs processed") + bar.SetMax(uint64(len(indexPack))) + err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { + p, ok := indexPack[id] + if !ok { + // Pack was not referenced in index and is not used => immediately remove! + printer.V("will remove pack %v as it is unused and not indexed\n", id.Str()) + removePacksFirst.Insert(id) + stats.Size.Unref += uint64(packSize) + return nil + } + + if p.unusedSize+p.usedSize != uint64(packSize) && p.usedBlobs != 0 { + // Pack size does not fit and pack is needed => error + // If the pack is not needed, this is no error, the pack can + // and will be simply removed, see below. + printer.E("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n", + id.Str(), p.unusedSize+p.usedSize, packSize) + return ErrSizeNotMatching + } + + // statistics + switch { + case p.usedBlobs == 0: + stats.Packs.Unused++ + case p.unusedBlobs == 0: + stats.Packs.Used++ + default: + stats.Packs.PartlyUsed++ + } + + if p.uncompressed { + stats.Size.Uncompressed += p.unusedSize + p.usedSize + } + mustCompress := false + if repoVersion >= 2 { + // repo v2: always repack tree blobs if uncompressed + // compress data blobs if requested + mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed + } + + // decide what to do + switch { + case p.usedBlobs == 0: + // All blobs in pack are no longer used => remove pack! + removePacks.Insert(id) + stats.Blobs.Remove += p.unusedBlobs + stats.Size.Remove += p.unusedSize + + case opts.RepackCacheableOnly && p.tpe == restic.DataBlob: + // if this is a data pack and --repack-cacheable-only is set => keep pack! + stats.Packs.Keep++ + + case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress: + if packSize >= int64(targetPackSize) { + // All blobs in pack are used and not mixed => keep pack! + stats.Packs.Keep++ + } else { + repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) + } + + default: + // all other packs are candidates for repacking + repackCandidates = append(repackCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) + } + + delete(indexPack, id) + bar.Add(1) + return nil + }) + bar.Done() + if err != nil { + return PrunePlan{}, err + } + + // At this point indexPacks contains only missing packs! + + // missing packs that are not needed can be ignored + ignorePacks := restic.NewIDSet() + for id, p := range indexPack { + if p.usedBlobs == 0 { + ignorePacks.Insert(id) + stats.Blobs.Remove += p.unusedBlobs + stats.Size.Remove += p.unusedSize + delete(indexPack, id) + } + } + + if len(indexPack) != 0 { + printer.E("The index references %d needed pack files which are missing from the repository:\n", len(indexPack)) + for id := range indexPack { + printer.E(" %v\n", id) + } + return PrunePlan{}, ErrPacksMissing + } + if len(ignorePacks) != 0 { + printer.E("Missing but unneeded pack files are referenced in the index, will be repaired\n") + for id := range ignorePacks { + printer.E("will forget missing pack file %v\n", id) + } + } + + if len(repackSmallCandidates) < 10 { + // too few small files to be worth the trouble, this also prevents endlessly repacking + // if there is just a single pack file below the target size + stats.Packs.Keep += uint(len(repackSmallCandidates)) + } else { + repackCandidates = append(repackCandidates, repackSmallCandidates...) + } + + // Sort repackCandidates such that packs with highest ratio unused/used space are picked first. + // This is equivalent to sorting by unused / total space. + // Instead of unused[i] / used[i] > unused[j] / used[j] we use + // unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64 + // Moreover packs containing trees and too short packs are sorted to the beginning + sort.Slice(repackCandidates, func(i, j int) bool { + pi := repackCandidates[i].packInfo + pj := repackCandidates[j].packInfo + switch { + case pi.tpe != restic.DataBlob && pj.tpe == restic.DataBlob: + return true + case pj.tpe != restic.DataBlob && pi.tpe == restic.DataBlob: + return false + case pi.unusedSize+pi.usedSize < uint64(targetPackSize) && pj.unusedSize+pj.usedSize >= uint64(targetPackSize): + return true + case pj.unusedSize+pj.usedSize < uint64(targetPackSize) && pi.unusedSize+pi.usedSize >= uint64(targetPackSize): + return false + } + return pi.unusedSize*pj.usedSize > pj.unusedSize*pi.usedSize + }) + + repack := func(id restic.ID, p packInfo) { + repackPacks.Insert(id) + stats.Blobs.Repack += p.unusedBlobs + p.usedBlobs + stats.Size.Repack += p.unusedSize + p.usedSize + stats.Blobs.Repackrm += p.unusedBlobs + stats.Size.Repackrm += p.unusedSize + if p.uncompressed { + stats.Size.Uncompressed -= p.unusedSize + p.usedSize + } + } + + // calculate limit for number of unused bytes in the repo after repacking + maxUnusedSizeAfter := opts.MaxUnusedBytes(stats.Size.Used) + + for _, p := range repackCandidates { + reachedUnusedSizeAfter := (stats.Size.Unused-stats.Size.Remove-stats.Size.Repackrm < maxUnusedSizeAfter) + reachedRepackSize := stats.Size.Repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes + packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize) + + switch { + case reachedRepackSize: + stats.Packs.Keep++ + + case p.tpe != restic.DataBlob, p.mustCompress: + // repacking non-data packs / uncompressed-trees is only limited by repackSize + repack(p.ID, p.packInfo) + + case reachedUnusedSizeAfter && packIsLargeEnough: + // for all other packs stop repacking if tolerated unused size is reached. + stats.Packs.Keep++ + + default: + repack(p.ID, p.packInfo) + } + } + + stats.Packs.Unref = uint(len(removePacksFirst)) + stats.Packs.Repack = uint(len(repackPacks)) + stats.Packs.Remove = uint(len(removePacks)) + + if repo.Config().Version < 2 { + // compression not supported for repository format version 1 + stats.Size.Uncompressed = 0 + } + + return PrunePlan{removePacksFirst: removePacksFirst, + removePacks: removePacks, + repackPacks: repackPacks, + ignorePacks: ignorePacks, + }, nil +} + +func (plan *PrunePlan) Stats() PruneStats { + return plan.stats +} + +// Execute does the actual pruning: +// - remove unreferenced packs first +// - repack given pack files while keeping the given blobs +// - rebuild the index while ignoring all files that will be deleted +// - delete the files +// plan.removePacks and plan.ignorePacks are modified in this function. +func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) error { + if plan.opts.DryRun { + printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") + if len(plan.removePacksFirst) > 0 { + printer.V("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) + } + printer.V("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks) + printer.V("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks) + // Always quit here if DryRun was set! + return nil + } + + repo := plan.repo + // make sure the plan can only be used once + plan.repo = nil + + // unreferenced packs can be safely deleted first + if len(plan.removePacksFirst) != 0 { + printer.P("deleting unreferenced packs\n") + _ = deleteFiles(ctx, true, &internalRepository{repo}, plan.removePacksFirst, restic.PackFile, printer) + // forget unused data + plan.removePacksFirst = nil + } + if ctx.Err() != nil { + return ctx.Err() + } + + if len(plan.repackPacks) != 0 { + printer.P("repacking packs\n") + bar := printer.NewCounter("packs repacked") + bar.SetMax(uint64(len(plan.repackPacks))) + _, err := Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar) + bar.Done() + if err != nil { + return errors.Fatal(err.Error()) + } + + // Also remove repacked packs + plan.removePacks.Merge(plan.repackPacks) + // forget unused data + plan.repackPacks = nil + + if plan.keepBlobs.Len() != 0 { + printer.E("%v was not repacked\n\n"+ + "Integrity check failed.\n"+ + "Please report this error (along with the output of the 'prune' run) at\n"+ + "https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs) + return errors.Fatal("internal error: blobs were not repacked") + } + + // allow GC of the blob set + plan.keepBlobs = nil + } + + if len(plan.ignorePacks) == 0 { + plan.ignorePacks = plan.removePacks + } else { + plan.ignorePacks.Merge(plan.removePacks) + } + + if plan.opts.UnsafeRecovery { + printer.P("deleting index files\n") + indexFiles := repo.idx.IDs() + err := deleteFiles(ctx, false, &internalRepository{repo}, indexFiles, restic.IndexFile, printer) + if err != nil { + return errors.Fatalf("%s", err) + } + } else if len(plan.ignorePacks) != 0 { + err := rewriteIndexFiles(ctx, repo, plan.ignorePacks, nil, nil, printer) + if err != nil { + return errors.Fatalf("%s", err) + } + } + + if len(plan.removePacks) != 0 { + printer.P("removing %d old packs\n", len(plan.removePacks)) + _ = deleteFiles(ctx, true, &internalRepository{repo}, plan.removePacks, restic.PackFile, printer) + } + if ctx.Err() != nil { + return ctx.Err() + } + + if plan.opts.UnsafeRecovery { + err := repo.idx.SaveFallback(ctx, &internalRepository{repo}, plan.ignorePacks, printer.NewCounter("packs processed")) + if err != nil { + return errors.Fatalf("%s", err) + } + } + + // drop outdated in-memory index + repo.clearIndex() + + printer.P("done\n") + return nil +} + +// deleteFiles deletes the given fileList of fileType in parallel +// if ignoreError=true, it will print a warning if there was an error, else it will abort. +func deleteFiles(ctx context.Context, ignoreError bool, repo restic.RemoverUnpacked[restic.FileType], fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { + bar := printer.NewCounter("files deleted") + defer bar.Done() + + return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error { + if err != nil { + printer.E("unable to remove %v/%v from the repository\n", fileType, id) + if !ignoreError { + return err + } + } + printer.VV("removed %v/%v\n", fileType, id) + return nil + }, bar) +} diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go new file mode 100644 index 00000000000..cc569aa4333 --- /dev/null +++ b/internal/repository/prune_test.go @@ -0,0 +1,114 @@ +package repository_test + +import ( + "context" + "math" + "math/rand" + "testing" + "time" + + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" + "golang.org/x/sync/errgroup" +) + +func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { + seed := time.Now().UnixNano() + random := rand.New(rand.NewSource(seed)) + t.Logf("rand initialized with seed %d", seed) + + repo, _, be := repository.TestRepositoryWithVersion(t, 0) + createRandomBlobs(t, random, repo, 4, 0.5, true) + createRandomBlobs(t, random, repo, 5, 0.5, true) + keep, _ := selectBlobs(t, random, repo, 0.5) + + var wg errgroup.Group + repo.StartPackUploader(context.TODO(), &wg) + // duplicate a few blobs to exercise those code paths + for blob := range keep { + buf, err := repo.LoadBlob(context.TODO(), blob.Type, blob.ID, nil) + rtest.OK(t, err) + _, _, _, err = repo.SaveBlob(context.TODO(), blob.Type, buf, blob.ID, true) + rtest.OK(t, err) + } + rtest.OK(t, repo.Flush(context.TODO())) + + plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet) error { + for blob := range keep { + usedBlobs.Insert(blob) + } + return nil + }, &progress.NoopPrinter{}) + rtest.OK(t, err) + + rtest.OK(t, plan.Execute(context.TODO(), &progress.NoopPrinter{})) + + repo = repository.TestOpenBackend(t, be) + checker.TestCheckRepo(t, repo, true) + + if errOnUnused { + existing := listBlobs(repo) + rtest.Assert(t, existing.Equals(keep), "unexpected blobs, wanted %v got %v", keep, existing) + } +} + +func TestPrune(t *testing.T) { + for _, test := range []struct { + name string + opts repository.PruneOptions + errOnUnused bool + }{ + { + name: "0", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return 0 }, + }, + errOnUnused: true, + }, + { + name: "50", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 2 }, + }, + }, + { + name: "unlimited", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return math.MaxUint64 }, + }, + }, + { + name: "cachableonly", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 20 }, + RepackCacheableOnly: true, + }, + }, + { + name: "small", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return math.MaxUint64 }, + RepackSmall: true, + }, + errOnUnused: true, + }, + } { + t.Run(test.name, func(t *testing.T) { + testPrune(t, test.opts, test.errOnUnused) + }) + t.Run(test.name+"-recovery", func(t *testing.T) { + opts := test.opts + opts.UnsafeRecovery = true + // unsafeNoSpaceRecovery does not repack partially used pack files + testPrune(t, opts, false) + }) + } +} diff --git a/internal/repository/raw.go b/internal/repository/raw.go new file mode 100644 index 00000000000..c5a4a72b78c --- /dev/null +++ b/internal/repository/raw.go @@ -0,0 +1,56 @@ +package repository + +import ( + "bytes" + "context" + "fmt" + "io" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/restic" +) + +// LoadRaw reads all data stored in the backend for the file with id and filetype t. +// If the backend returns data that does not match the id, then the buffer is returned +// along with an error that is a restic.ErrInvalidData error. +func (r *Repository) LoadRaw(ctx context.Context, t restic.FileType, id restic.ID) (buf []byte, err error) { + h := backend.Handle{Type: t, Name: id.String()} + + buf, err = loadRaw(ctx, r.be, h) + + // retry loading damaged data only once. If a file fails to download correctly + // the second time, then it is likely corrupted at the backend. + if h.Type != backend.ConfigFile && id != restic.Hash(buf) { + if r.cache != nil { + // Cleanup cache to make sure it's not the cached copy that is broken. + // Ignore error as there's not much we can do in that case. + _ = r.cache.Forget(h) + } + + buf, err = loadRaw(ctx, r.be, h) + + if err == nil && id != restic.Hash(buf) { + // Return corrupted data to the caller if it is still broken the second time to + // let the caller decide what to do with the data. + return buf, fmt.Errorf("LoadRaw(%v): %w", h, restic.ErrInvalidData) + } + } + + if err != nil { + return nil, err + } + return buf, nil +} + +func loadRaw(ctx context.Context, be backend.Backend, h backend.Handle) (buf []byte, err error) { + err = be.Load(ctx, h, 0, 0, func(rd io.Reader) error { + wr := new(bytes.Buffer) + _, cerr := io.Copy(wr, rd) + if cerr != nil { + return cerr + } + buf = wr.Bytes() + return cerr + }) + return buf, err +} diff --git a/internal/repository/raw_test.go b/internal/repository/raw_test.go new file mode 100644 index 00000000000..ac65a8dc8f1 --- /dev/null +++ b/internal/repository/raw_test.go @@ -0,0 +1,108 @@ +package repository_test + +import ( + "bytes" + "context" + "io" + "testing" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/cache" + "github.com/restic/restic/internal/backend/mem" + "github.com/restic/restic/internal/backend/mock" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +const KiB = 1 << 10 +const MiB = 1 << 20 + +func TestLoadRaw(t *testing.T) { + b := mem.New() + repo, err := repository.New(b, repository.Options{}) + rtest.OK(t, err) + + for i := 0; i < 5; i++ { + data := rtest.Random(23+i, 500*KiB) + + id := restic.Hash(data) + h := backend.Handle{Name: id.String(), Type: backend.PackFile} + err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher())) + rtest.OK(t, err) + + buf, err := repo.LoadRaw(context.TODO(), backend.PackFile, id) + rtest.OK(t, err) + + if len(buf) != len(data) { + t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf)) + continue + } + + if !bytes.Equal(buf, data) { + t.Errorf("wrong data returned") + continue + } + } +} + +func TestLoadRawBroken(t *testing.T) { + b := mock.NewBackend() + repo, err := repository.New(b, repository.Options{}) + rtest.OK(t, err) + + data := rtest.Random(23, 10*KiB) + id := restic.Hash(data) + // damage buffer + data[0] ^= 0xff + + b.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(data)), nil + } + + // must detect but still return corrupt data + buf, err := repo.LoadRaw(context.TODO(), backend.PackFile, id) + rtest.Assert(t, bytes.Equal(buf, data), "wrong data returned") + rtest.Assert(t, errors.Is(err, restic.ErrInvalidData), "missing expected ErrInvalidData error, got %v", err) + + // cause the first access to fail, but repair the data for the second access + data[0] ^= 0xff + loadCtr := 0 + b.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { + data[0] ^= 0xff + loadCtr++ + return io.NopCloser(bytes.NewReader(data)), nil + } + + // must retry load of corrupted data + buf, err = repo.LoadRaw(context.TODO(), backend.PackFile, id) + rtest.OK(t, err) + rtest.Assert(t, bytes.Equal(buf, data), "wrong data returned") + rtest.Equals(t, 2, loadCtr, "missing retry on broken data") +} + +func TestLoadRawBrokenWithCache(t *testing.T) { + b := mock.NewBackend() + c := cache.TestNewCache(t) + repo, err := repository.New(b, repository.Options{}) + rtest.OK(t, err) + repo.UseCache(c) + + data := rtest.Random(23, 10*KiB) + id := restic.Hash(data) + + loadCtr := 0 + // cause the first access to fail, but repair the data for the second access + b.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { + data[0] ^= 0xff + loadCtr++ + return io.NopCloser(bytes.NewReader(data)), nil + } + + // must retry load of corrupted data + buf, err := repo.LoadRaw(context.TODO(), backend.SnapshotFile, id) + rtest.OK(t, err) + rtest.Assert(t, bytes.Equal(buf, data), "wrong data returned") + rtest.Equals(t, 2, loadCtr, "missing retry on broken data") +} diff --git a/internal/repository/repack.go b/internal/repository/repack.go index 5588984f6f9..8c9ca28bb58 100644 --- a/internal/repository/repack.go +++ b/internal/repository/repack.go @@ -54,7 +54,7 @@ func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito downloadQueue := make(chan restic.PackBlobs) wg.Go(func() error { defer close(downloadQueue) - for pbs := range repo.Index().ListPacks(wgCtx, packs) { + for pbs := range repo.ListPacksFromIndex(wgCtx, packs) { var packBlobs []restic.Blob keepMutex.Lock() // filter out unnecessary blobs @@ -72,20 +72,15 @@ func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito return wgCtx.Err() } } - return nil + return wgCtx.Err() }) worker := func() error { for t := range downloadQueue { err := repo.LoadBlobsFromPack(wgCtx, t.PackID, t.Blobs, func(blob restic.BlobHandle, buf []byte, err error) error { if err != nil { - var ierr error - // check whether we can get a valid copy somewhere else - buf, ierr = repo.LoadBlob(wgCtx, blob.Type, blob.ID, nil) - if ierr != nil { - // no luck, return the original error - return err - } + // a required blob couldn't be retrieved + return err } keepMutex.Lock() diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index e5e46ac2af8..0691cdbbb04 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -7,18 +7,18 @@ import ( "time" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" "golang.org/x/sync/errgroup" ) -func randomSize(min, max int) int { - return rand.Intn(max-min) + min +func randomSize(random *rand.Rand, min, max int) int { + return random.Intn(max-min) + min } -func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32) { +func createRandomBlobs(t testing.TB, random *rand.Rand, repo restic.Repository, blobs int, pData float32, smallBlobs bool) { var wg errgroup.Group repo.StartPackUploader(context.TODO(), &wg) @@ -28,16 +28,20 @@ func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData fl length int ) - if rand.Float32() < pData { + if random.Float32() < pData { tpe = restic.DataBlob - length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data + if smallBlobs { + length = randomSize(random, 1*1024, 20*1024) // 1KiB to 20KiB of data + } else { + length = randomSize(random, 10*1024, 1024*1024) // 10KiB to 1MiB of data + } } else { tpe = restic.TreeBlob - length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB + length = randomSize(random, 1*1024, 20*1024) // 1KiB to 20KiB } buf := make([]byte, length) - rand.Read(buf) + random.Read(buf) id, exists, _, err := repo.SaveBlob(context.TODO(), tpe, buf, restic.ID{}, false) if err != nil { @@ -62,10 +66,10 @@ func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData fl } } -func createRandomWrongBlob(t testing.TB, repo restic.Repository) restic.BlobHandle { - length := randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data +func createRandomWrongBlob(t testing.TB, random *rand.Rand, repo restic.Repository) restic.BlobHandle { + length := randomSize(random, 10*1024, 1024*1024) // 10KiB to 1MiB of data buf := make([]byte, length) - rand.Read(buf) + random.Read(buf) id := restic.Hash(buf) // invert first data byte buf[0] ^= 0xff @@ -85,7 +89,7 @@ func createRandomWrongBlob(t testing.TB, repo restic.Repository) restic.BlobHand // selectBlobs splits the list of all blobs randomly into two lists. A blob // will be contained in the firstone with probability p. -func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 restic.BlobSet) { +func selectBlobs(t *testing.T, random *rand.Rand, repo restic.Repository, p float32) (list1, list2 restic.BlobSet) { list1 = restic.NewBlobSet() list2 = restic.NewBlobSet() @@ -105,7 +109,7 @@ func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 } blobs.Insert(h) - if rand.Float32() <= p { + if random.Float32() <= p { list1.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type}) } else { list2.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type}) @@ -121,8 +125,12 @@ func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 } func listPacks(t *testing.T, repo restic.Lister) restic.IDSet { + return listFiles(t, repo, restic.PackFile) +} + +func listFiles(t *testing.T, repo restic.Lister, tpe backend.FileType) restic.IDSet { list := restic.NewIDSet() - err := repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + err := repo.List(context.TODO(), tpe, func(id restic.ID, size int64) error { list.Insert(id) return nil }) @@ -137,9 +145,8 @@ func listPacks(t *testing.T, repo restic.Lister) restic.IDSet { func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSet) restic.IDSet { packs := restic.NewIDSet() - idx := repo.Index() for h := range blobs { - list := idx.Lookup(h) + list := repo.LookupBlob(h.Type, h.ID) if len(list) == 0 { t.Fatal("Failed to find blob", h.ID.Str(), "with type", h.Type) } @@ -152,60 +159,26 @@ func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSe return packs } -func repack(t *testing.T, repo restic.Repository, packs restic.IDSet, blobs restic.BlobSet) { +func repack(t *testing.T, repo restic.Repository, be backend.Backend, packs restic.IDSet, blobs restic.BlobSet) { repackedBlobs, err := repository.Repack(context.TODO(), repo, repo, packs, blobs, nil) if err != nil { t.Fatal(err) } for id := range repackedBlobs { - err = repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}) + err = be.Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}) if err != nil { t.Fatal(err) } } } -func flush(t *testing.T, repo restic.Repository) { - if err := repo.Flush(context.TODO()); err != nil { - t.Fatalf("repo.SaveIndex() %v", err) - } -} - -func rebuildIndex(t *testing.T, repo restic.Repository) { - err := repo.SetIndex(index.NewMasterIndex()) - rtest.OK(t, err) - - packs := make(map[restic.ID]int64) - err = repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { - packs[id] = size - return nil - }) - rtest.OK(t, err) - - _, err = repo.(*repository.Repository).CreateIndexFromPacks(context.TODO(), packs, nil) - rtest.OK(t, err) - - var obsoleteIndexes restic.IDs - err = repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error { - obsoleteIndexes = append(obsoleteIndexes, id) - return nil - }) - rtest.OK(t, err) - - err = repo.Index().Save(context.TODO(), repo, restic.NewIDSet(), obsoleteIndexes, restic.MasterIndexSaveOpts{}) - rtest.OK(t, err) -} - -func reloadIndex(t *testing.T, repo restic.Repository) { - err := repo.SetIndex(index.NewMasterIndex()) - if err != nil { - t.Fatal(err) - } +func rebuildAndReloadIndex(t *testing.T, repo *repository.Repository) { + rtest.OK(t, repository.RepairIndex(context.TODO(), repo, repository.RepairIndexOptions{ + ReadAllPacks: true, + }, &progress.NoopPrinter{})) - if err := repo.LoadIndex(context.TODO(), nil); err != nil { - t.Fatalf("error loading new index: %v", err) - } + rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) } func TestRepack(t *testing.T) { @@ -213,18 +186,20 @@ func TestRepack(t *testing.T) { } func testRepack(t *testing.T, version uint) { - repo := repository.TestRepositoryWithVersion(t, version) + repo, _, be := repository.TestRepositoryWithVersion(t, version) seed := time.Now().UnixNano() - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 100, 0.7) + // add a small amount of blobs twice to create multiple pack files + createRandomBlobs(t, random, repo, 10, 0.7, false) + createRandomBlobs(t, random, repo, 10, 0.7, false) packsBefore := listPacks(t, repo) // Running repack on empty ID sets should not do anything at all. - repack(t, repo, nil, nil) + repack(t, repo, be, nil, nil) packsAfter := listPacks(t, repo) @@ -233,15 +208,12 @@ func testRepack(t *testing.T, version uint) { packsBefore, packsAfter) } - flush(t, repo) - - removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2) + removeBlobs, keepBlobs := selectBlobs(t, random, repo, 0.2) removePacks := findPacksForBlobs(t, repo, removeBlobs) - repack(t, repo, removePacks, keepBlobs) - rebuildIndex(t, repo) - reloadIndex(t, repo) + repack(t, repo, be, removePacks, keepBlobs) + rebuildAndReloadIndex(t, repo) packsAfter = listPacks(t, repo) for id := range removePacks { @@ -250,10 +222,8 @@ func testRepack(t *testing.T, version uint) { } } - idx := repo.Index() - for h := range keepBlobs { - list := idx.Lookup(h) + list := repo.LookupBlob(h.Type, h.ID) if len(list) == 0 { t.Errorf("unable to find blob %v in repo", h.ID.Str()) continue @@ -272,7 +242,7 @@ func testRepack(t *testing.T, version uint) { } for h := range removeBlobs { - if _, found := repo.LookupBlobSize(h.ID, h.Type); found { + if _, found := repo.LookupBlobSize(h.Type, h.ID); found { t.Errorf("blob %v still contained in the repo", h) } } @@ -291,34 +261,32 @@ func (r oneConnectionRepo) Connections() uint { } func testRepackCopy(t *testing.T, version uint) { - repo := repository.TestRepositoryWithVersion(t, version) - dstRepo := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) + dstRepo, _, _ := repository.TestRepositoryWithVersion(t, version) // test with minimal possible connection count repoWrapped := &oneConnectionRepo{repo} dstRepoWrapped := &oneConnectionRepo{dstRepo} seed := time.Now().UnixNano() - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 100, 0.7) - flush(t, repo) + // add a small amount of blobs twice to create multiple pack files + createRandomBlobs(t, random, repo, 10, 0.7, false) + createRandomBlobs(t, random, repo, 10, 0.7, false) - _, keepBlobs := selectBlobs(t, repo, 0.2) + _, keepBlobs := selectBlobs(t, random, repo, 0.2) copyPacks := findPacksForBlobs(t, repo, keepBlobs) _, err := repository.Repack(context.TODO(), repoWrapped, dstRepoWrapped, copyPacks, keepBlobs, nil) if err != nil { t.Fatal(err) } - rebuildIndex(t, dstRepo) - reloadIndex(t, dstRepo) - - idx := dstRepo.Index() + rebuildAndReloadIndex(t, dstRepo) for h := range keepBlobs { - list := idx.Lookup(h) + list := dstRepo.LookupBlob(h.Type, h.ID) if len(list) == 0 { t.Errorf("unable to find blob %v in repo", h.ID.Str()) continue @@ -337,17 +305,17 @@ func TestRepackWrongBlob(t *testing.T) { func testRepackWrongBlob(t *testing.T, version uint) { // disable verification to allow adding corrupted blobs to the repository - repo := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) + repo, _ := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) seed := time.Now().UnixNano() - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 5, 0.7) - createRandomWrongBlob(t, repo) + createRandomBlobs(t, random, repo, 5, 0.7, false) + createRandomWrongBlob(t, random, repo) // just keep all blobs, but also rewrite every pack - _, keepBlobs := selectBlobs(t, repo, 0) + _, keepBlobs := selectBlobs(t, random, repo, 0) rewritePacks := findPacksForBlobs(t, repo, keepBlobs) _, err := repository.Repack(context.TODO(), repo, repo, rewritePacks, keepBlobs, nil) @@ -363,15 +331,15 @@ func TestRepackBlobFallback(t *testing.T) { func testRepackBlobFallback(t *testing.T, version uint) { // disable verification to allow adding corrupted blobs to the repository - repo := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) + repo, _ := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) seed := time.Now().UnixNano() - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) t.Logf("rand seed is %v", seed) - length := randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data + length := randomSize(random, 10*1024, 1024*1024) // 10KiB to 1MiB of data buf := make([]byte, length) - rand.Read(buf) + random.Read(buf) id := restic.Hash(buf) // corrupted copy diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go new file mode 100644 index 00000000000..cc08206d523 --- /dev/null +++ b/internal/repository/repair_index.go @@ -0,0 +1,139 @@ +package repository + +import ( + "context" + + "github.com/restic/restic/internal/repository/index" + "github.com/restic/restic/internal/repository/pack" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" +) + +type RepairIndexOptions struct { + ReadAllPacks bool +} + +func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, printer progress.Printer) error { + var obsoleteIndexes restic.IDs + packSizeFromList := make(map[restic.ID]int64) + packSizeFromIndex := make(map[restic.ID]int64) + removePacks := restic.NewIDSet() + + if opts.ReadAllPacks { + // get list of old index files but start with empty index + err := repo.List(ctx, restic.IndexFile, func(id restic.ID, _ int64) error { + obsoleteIndexes = append(obsoleteIndexes, id) + return nil + }) + if err != nil { + return err + } + repo.clearIndex() + + } else { + printer.P("loading indexes...\n") + mi := index.NewMasterIndex() + err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, err error) error { + if err != nil { + printer.E("removing invalid index %v: %v\n", id, err) + obsoleteIndexes = append(obsoleteIndexes, id) + return nil + } + + mi.Insert(idx) + return nil + }) + if err != nil { + return err + } + + err = mi.MergeFinalIndexes() + if err != nil { + return err + } + + err = repo.SetIndex(mi) + if err != nil { + return err + } + packSizeFromIndex, err = pack.Size(ctx, repo, false) + if err != nil { + return err + } + } + + oldIndexes := repo.idx.IDs() + + printer.P("getting pack files to read...\n") + err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { + size, ok := packSizeFromIndex[id] + if !ok || size != packSize { + // Pack was not referenced in index or size does not match + packSizeFromList[id] = packSize + removePacks.Insert(id) + } + if !ok { + printer.E("adding pack file to index %v\n", id) + } else if size != packSize { + printer.E("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size) + } + delete(packSizeFromIndex, id) + return nil + }) + if err != nil { + return err + } + for id := range packSizeFromIndex { + // forget pack files that are referenced in the index but do not exist + // when rebuilding the index + removePacks.Insert(id) + printer.E("removing not found pack file %v\n", id) + } + + if len(packSizeFromList) > 0 { + printer.P("reading pack files\n") + bar := printer.NewCounter("packs") + bar.SetMax(uint64(len(packSizeFromList))) + invalidFiles, err := repo.createIndexFromPacks(ctx, packSizeFromList, bar) + bar.Done() + if err != nil { + return err + } + + for _, id := range invalidFiles { + printer.V("skipped incomplete pack file: %v\n", id) + } + } + + if err := repo.Flush(ctx); err != nil { + return err + } + + err = rewriteIndexFiles(ctx, repo, removePacks, oldIndexes, obsoleteIndexes, printer) + if err != nil { + return err + } + + // drop outdated in-memory index + repo.clearIndex() + return nil +} + +func rewriteIndexFiles(ctx context.Context, repo *Repository, removePacks restic.IDSet, oldIndexes restic.IDSet, extraObsolete restic.IDs, printer progress.Printer) error { + printer.P("rebuilding index\n") + + bar := printer.NewCounter("indexes processed") + return repo.idx.Rewrite(ctx, &internalRepository{repo}, removePacks, oldIndexes, extraObsolete, index.MasterIndexRewriteOpts{ + SaveProgress: bar, + DeleteProgress: func() *progress.Counter { + return printer.NewCounter("old indexes deleted") + }, + DeleteReport: func(id restic.ID, err error) { + if err != nil { + printer.VV("failed to remove index %v: %v\n", id.String(), err) + } else { + printer.VV("removed index %v\n", id.String()) + } + }, + }) +} diff --git a/internal/repository/repair_index_test.go b/internal/repository/repair_index_test.go new file mode 100644 index 00000000000..0fc89c79a65 --- /dev/null +++ b/internal/repository/repair_index_test.go @@ -0,0 +1,81 @@ +package repository_test + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" +) + +func listIndex(t *testing.T, repo restic.Lister) restic.IDSet { + return listFiles(t, repo, restic.IndexFile) +} + +func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, repo *repository.Repository, be backend.Backend)) { + seed := time.Now().UnixNano() + random := rand.New(rand.NewSource(seed)) + t.Logf("rand initialized with seed %d", seed) + + repo, _, be := repository.TestRepositoryWithVersion(t, 0) + createRandomBlobs(t, random, repo, 4, 0.5, true) + createRandomBlobs(t, random, repo, 5, 0.5, true) + indexes := listIndex(t, repo) + t.Logf("old indexes %v", indexes) + + damage(t, repo, be) + + repo = repository.TestOpenBackend(t, be) + rtest.OK(t, repository.RepairIndex(context.TODO(), repo, repository.RepairIndexOptions{ + ReadAllPacks: readAllPacks, + }, &progress.NoopPrinter{})) + + checker.TestCheckRepo(t, repo, true) +} + +func TestRebuildIndex(t *testing.T) { + for _, test := range []struct { + name string + damage func(t *testing.T, repo *repository.Repository, be backend.Backend) + }{ + { + "valid index", + func(t *testing.T, repo *repository.Repository, be backend.Backend) {}, + }, + { + "damaged index", + func(t *testing.T, repo *repository.Repository, be backend.Backend) { + index := listIndex(t, repo).List()[0] + replaceFile(t, be, backend.Handle{Type: restic.IndexFile, Name: index.String()}, func(b []byte) []byte { + b[0] ^= 0xff + return b + }) + }, + }, + { + "missing index", + func(t *testing.T, repo *repository.Repository, be backend.Backend) { + index := listIndex(t, repo).List()[0] + rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: index.String()})) + }, + }, + { + "missing pack", + func(t *testing.T, repo *repository.Repository, be backend.Backend) { + pack := listPacks(t, repo).List()[0] + rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: pack.String()})) + }, + }, + } { + t.Run(test.name, func(t *testing.T) { + testRebuildIndex(t, false, test.damage) + testRebuildIndex(t, true, test.damage) + }) + } +} diff --git a/internal/repository/repair_pack.go b/internal/repository/repair_pack.go index 64279e7277a..a9f8413e477 100644 --- a/internal/repository/repair_pack.go +++ b/internal/repository/repair_pack.go @@ -10,7 +10,7 @@ import ( "golang.org/x/sync/errgroup" ) -func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet, printer progress.Printer) error { +func RepairPacks(ctx context.Context, repo *Repository, ids restic.IDSet, printer progress.Printer) error { wg, wgCtx := errgroup.WithContext(ctx) repo.StartPackUploader(wgCtx, wg) @@ -21,7 +21,7 @@ func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet, wg.Go(func() error { // examine all data the indexes have for the pack file - for b := range repo.Index().ListPacks(wgCtx, ids) { + for b := range repo.ListPacksFromIndex(wgCtx, ids) { blobs := b.Blobs if len(blobs) == 0 { printer.E("no blobs found for pack %v", b.PackID) @@ -31,12 +31,8 @@ func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet, err := repo.LoadBlobsFromPack(wgCtx, b.PackID, blobs, func(blob restic.BlobHandle, buf []byte, err error) error { if err != nil { - // Fallback path - buf, err = repo.LoadBlob(wgCtx, blob.Type, blob.ID, nil) - if err != nil { - printer.E("failed to load blob %v: %v", blob.ID, err) - return nil - } + printer.E("failed to load blob %v: %v", blob.ID, err) + return nil } id, _, _, err := repo.SaveBlob(wgCtx, blob.Type, buf, restic.ID{}, true) if !id.Equal(blob.ID) { @@ -60,19 +56,7 @@ func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet, } // remove salvaged packs from index - printer.P("rebuilding index") - - bar = printer.NewCounter("packs processed") - err = repo.Index().Save(ctx, repo, ids, nil, restic.MasterIndexSaveOpts{ - SaveProgress: bar, - DeleteProgress: func() *progress.Counter { - return printer.NewCounter("old indexes deleted") - }, - DeleteReport: func(id restic.ID, err error) { - printer.VV("removed index %v", id.String()) - }, - }) - + err = rewriteIndexFiles(ctx, repo, ids, nil, nil, printer) if err != nil { return err } @@ -81,7 +65,7 @@ func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet, printer.P("removing salvaged pack files") // if we fail to delete the damaged pack files, then prune will remove them later on bar = printer.NewCounter("files deleted") - _ = restic.ParallelRemove(ctx, repo, ids, restic.PackFile, nil, bar) + _ = restic.ParallelRemove(ctx, &internalRepository{repo}, ids, restic.PackFile, nil, bar) bar.Done() return nil diff --git a/internal/repository/repair_pack_test.go b/internal/repository/repair_pack_test.go index b950245aae0..5f02e7d6184 100644 --- a/internal/repository/repair_pack_test.go +++ b/internal/repository/repair_pack_test.go @@ -7,28 +7,27 @@ import ( "time" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/index" + backendtest "github.com/restic/restic/internal/backend/test" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui/progress" ) func listBlobs(repo restic.Repository) restic.BlobSet { blobs := restic.NewBlobSet() - repo.Index().Each(context.TODO(), func(pb restic.PackedBlob) { + _ = repo.ListBlobs(context.TODO(), func(pb restic.PackedBlob) { blobs.Insert(pb.BlobHandle) }) return blobs } -func replaceFile(t *testing.T, repo restic.Repository, h backend.Handle, damage func([]byte) []byte) { - buf, err := backend.LoadAll(context.TODO(), nil, repo.Backend(), h) - test.OK(t, err) +func replaceFile(t *testing.T, be backend.Backend, h backend.Handle, damage func([]byte) []byte) { + buf, err := backendtest.LoadAll(context.TODO(), be, h) + rtest.OK(t, err) buf = damage(buf) - test.OK(t, repo.Backend().Remove(context.TODO(), h)) - test.OK(t, repo.Backend().Save(context.TODO(), h, backend.NewByteReader(buf, repo.Backend().Hasher()))) + rtest.OK(t, be.Remove(context.TODO(), h)) + rtest.OK(t, be.Save(context.TODO(), h, backend.NewByteReader(buf, be.Hasher()))) } func TestRepairBrokenPack(t *testing.T) { @@ -38,28 +37,28 @@ func TestRepairBrokenPack(t *testing.T) { func testRepairBrokenPack(t *testing.T, version uint) { tests := []struct { name string - damage func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) + damage func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) }{ { "valid pack", - func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { return packsBefore, restic.NewBlobSet() }, }, { "broken pack", - func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { - wrongBlob := createRandomWrongBlob(t, repo) + func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + wrongBlob := createRandomWrongBlob(t, random, repo) damagedPacks := findPacksForBlobs(t, repo, restic.NewBlobSet(wrongBlob)) return damagedPacks, restic.NewBlobSet(wrongBlob) }, }, { "partially broken pack", - func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { // damage one of the pack files damagedID := packsBefore.List()[0] - replaceFile(t, repo, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, + replaceFile(t, be, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, func(buf []byte) []byte { buf[0] ^= 0xff return buf @@ -67,7 +66,7 @@ func testRepairBrokenPack(t *testing.T, version uint) { // find blob that starts at offset 0 var damagedBlob restic.BlobHandle - for blobs := range repo.Index().ListPacks(context.TODO(), restic.NewIDSet(damagedID)) { + for blobs := range repo.ListPacksFromIndex(context.TODO(), restic.NewIDSet(damagedID)) { for _, blob := range blobs.Blobs { if blob.Offset == 0 { damagedBlob = blob.BlobHandle @@ -79,10 +78,10 @@ func testRepairBrokenPack(t *testing.T, version uint) { }, }, { "truncated pack", - func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { // damage one of the pack files damagedID := packsBefore.List()[0] - replaceFile(t, repo, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, + replaceFile(t, be, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, func(buf []byte) []byte { buf = buf[0:10] return buf @@ -90,7 +89,7 @@ func testRepairBrokenPack(t *testing.T, version uint) { // all blobs in the file are broken damagedBlobs := restic.NewBlobSet() - for blobs := range repo.Index().ListPacks(context.TODO(), restic.NewIDSet(damagedID)) { + for blobs := range repo.ListPacksFromIndex(context.TODO(), restic.NewIDSet(damagedID)) { for _, blob := range blobs.Blobs { damagedBlobs.Insert(blob.BlobHandle) } @@ -103,21 +102,20 @@ func testRepairBrokenPack(t *testing.T, version uint) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { // disable verification to allow adding corrupted blobs to the repository - repo := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) + repo, be := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) seed := time.Now().UnixNano() - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 5, 0.7) + createRandomBlobs(t, random, repo, 5, 0.7, true) packsBefore := listPacks(t, repo) blobsBefore := listBlobs(repo) - toRepair, damagedBlobs := test.damage(t, repo, packsBefore) + toRepair, damagedBlobs := test.damage(t, random, repo, be, packsBefore) rtest.OK(t, repository.RepairPacks(context.TODO(), repo, toRepair, &progress.NoopPrinter{})) // reload index - rtest.OK(t, repo.SetIndex(index.NewMasterIndex())) rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) packsAfter := listPacks(t, repo) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index a20f71ab1ae..aee0db103bd 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -1,35 +1,32 @@ package repository import ( - "bufio" "bytes" "context" "fmt" "io" + "math" "os" "runtime" "sort" "sync" - "github.com/cenkalti/backoff/v4" "github.com/klauspost/compress/zstd" "github.com/restic/chunker" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/cache" "github.com/restic/restic/internal/backend/dryrun" - "github.com/restic/restic/internal/cache" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" - "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/repository/index" + "github.com/restic/restic/internal/repository/pack" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" "golang.org/x/sync/errgroup" ) -const MaxStreamBufferSize = 4 * 1024 * 1024 - const MinPackSize = 4 * 1024 * 1024 const DefaultPackSize = 16 * 1024 * 1024 const MaxPackSize = 128 * 1024 * 1024 @@ -41,12 +38,10 @@ type Repository struct { key *crypto.Key keyID restic.ID idx *index.MasterIndex - Cache *cache.Cache + cache *cache.Cache opts Options - noAutoIndexUpdate bool - packerWg *errgroup.Group uploader *packerUploader treePM *packerManager @@ -58,6 +53,11 @@ type Repository struct { dec *zstd.Decoder } +// internalRepository allows using SaveUnpacked and RemoveUnpacked with all FileTypes +type internalRepository struct { + *Repository +} + type Options struct { Compression CompressionMode PackSize uint @@ -133,18 +133,9 @@ func New(be backend.Backend, opts Options) (*Repository, error) { return repo, nil } -// DisableAutoIndexUpdate deactives the automatic finalization and upload of new -// indexes once these are full -func (r *Repository) DisableAutoIndexUpdate() { - r.noAutoIndexUpdate = true -} - // setConfig assigns the given config and updates the repository parameters accordingly func (r *Repository) setConfig(cfg restic.Config) { r.cfg = cfg - if r.cfg.Version >= 2 { - r.idx.MarkCompressed() - } } // Config returns the repository configuration. @@ -152,8 +143,8 @@ func (r *Repository) Config() restic.Config { return r.cfg } -// PackSize return the target size of a pack file when uploading -func (r *Repository) PackSize() uint { +// packSize return the target size of a pack file when uploading +func (r *Repository) packSize() uint { return r.opts.PackSize } @@ -163,10 +154,14 @@ func (r *Repository) UseCache(c *cache.Cache) { return } debug.Log("using cache") - r.Cache = c + r.cache = c r.be = c.Wrap(r.be) } +func (r *Repository) Cache() *cache.Cache { + return r.cache +} + // SetDryRun sets the repo backend into dry-run mode. func (r *Repository) SetDryRun() { r.be = dryrun.New(r.be) @@ -180,46 +175,11 @@ func (r *Repository) LoadUnpacked(ctx context.Context, t restic.FileType, id res id = restic.ID{} } - ctx, cancel := context.WithCancel(ctx) - - h := backend.Handle{Type: t, Name: id.String()} - retriedInvalidData := false - var dataErr error - wr := new(bytes.Buffer) - - err := r.be.Load(ctx, h, 0, 0, func(rd io.Reader) error { - // make sure this call is idempotent, in case an error occurs - wr.Reset() - _, cerr := io.Copy(wr, rd) - if cerr != nil { - return cerr - } - - buf := wr.Bytes() - if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) { - debug.Log("retry loading broken blob %v", h) - if !retriedInvalidData { - retriedInvalidData = true - } else { - // with a canceled context there is not guarantee which error will - // be returned by `be.Load`. - dataErr = fmt.Errorf("load(%v): %w", h, restic.ErrInvalidData) - cancel() - } - return restic.ErrInvalidData - - } - return nil - }) - - if dataErr != nil { - return nil, dataErr - } + buf, err := r.LoadRaw(ctx, t, id) if err != nil { return nil, err } - buf := wr.Bytes() nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():] plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil) if err != nil { @@ -274,18 +234,29 @@ func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic. } // try cached pack files first - sortCachedPacksFirst(r.Cache, blobs) + sortCachedPacksFirst(r.cache, blobs) - var lastError error - for _, blob := range blobs { - debug.Log("blob %v/%v found: %v", t, id, blob) - - if blob.Type != t { - debug.Log("blob %v has wrong block type, want %v", blob, t) + buf, err := r.loadBlob(ctx, blobs, buf) + if err != nil { + if r.cache != nil { + for _, blob := range blobs { + h := backend.Handle{Type: restic.PackFile, Name: blob.PackID.String(), IsMetadata: blob.Type.IsMetadata()} + // ignore errors as there's not much we can do here + _ = r.cache.Forget(h) + } } + buf, err = r.loadBlob(ctx, blobs, buf) + } + return buf, err +} + +func (r *Repository) loadBlob(ctx context.Context, blobs []restic.PackedBlob, buf []byte) ([]byte, error) { + var lastError error + for _, blob := range blobs { + debug.Log("blob %v found: %v", blob.BlobHandle, blob) // load blob from pack - h := backend.Handle{Type: restic.PackFile, Name: blob.PackID.String(), IsMetadata: t.IsMetadata()} + h := backend.Handle{Type: restic.PackFile, Name: blob.PackID.String(), IsMetadata: blob.Type.IsMetadata()} switch { case cap(buf) < int(blob.Length): @@ -294,42 +265,26 @@ func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic. buf = buf[:blob.Length] } - n, err := backend.ReadAt(ctx, r.be, h, int64(blob.Offset), buf) + _, err := backend.ReadAt(ctx, r.be, h, int64(blob.Offset), buf) if err != nil { debug.Log("error loading blob %v: %v", blob, err) lastError = err continue } - if uint(n) != blob.Length { - lastError = errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d", - id.Str(), blob.Length, uint(n)) - debug.Log("lastError: %v", lastError) - continue - } + it := newPackBlobIterator(blob.PackID, newByteReader(buf), uint(blob.Offset), []restic.Blob{blob.Blob}, r.key, r.getZstdDecoder()) + pbv, err := it.Next() - // decrypt - nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():] - plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil) - if err != nil { - lastError = errors.Errorf("decrypting blob %v failed: %v", id, err) - continue - } - - if blob.IsCompressed() { - plaintext, err = r.getZstdDecoder().DecodeAll(plaintext, make([]byte, 0, blob.DataLength())) - if err != nil { - lastError = errors.Errorf("decompressing blob %v failed: %v", id, err) - continue - } + if err == nil { + err = pbv.Err } - - // check hash - if !restic.Hash(plaintext).Equal(id) { - lastError = errors.Errorf("blob %v returned invalid hash", id) + if err != nil { + debug.Log("error decoding blob %v: %v", blob, err) + lastError = err continue } + plaintext := pbv.Plaintext if len(plaintext) > cap(buf) { return plaintext, nil } @@ -343,12 +298,7 @@ func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic. return nil, lastError } - return nil, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs)) -} - -// LookupBlobSize returns the size of blob id. -func (r *Repository) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, bool) { - return r.idx.LookupSize(restic.BlobHandle{ID: id, Type: tpe}) + return nil, errors.Errorf("loading %v from %v packs failed", blobs[0].BlobHandle, len(blobs)) } func (r *Repository) getZstdEncoder() *zstd.Encoder { @@ -505,7 +455,15 @@ func (r *Repository) decompressUnpacked(p []byte) ([]byte, error) { // SaveUnpacked encrypts data and stores it in the backend. Returned is the // storage hash. -func (r *Repository) SaveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (id restic.ID, err error) { +func (r *Repository) SaveUnpacked(ctx context.Context, t restic.WriteableFileType, buf []byte) (id restic.ID, err error) { + return r.saveUnpacked(ctx, t.ToFileType(), buf) +} + +func (r *internalRepository) SaveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (id restic.ID, err error) { + return r.Repository.saveUnpacked(ctx, t, buf) +} + +func (r *Repository) saveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (id restic.ID, err error) { p := buf if t != restic.ConfigFile { p, err = r.compressUnpacked(p) @@ -566,17 +524,25 @@ func (r *Repository) verifyUnpacked(buf []byte, t restic.FileType, expected []by return nil } +func (r *Repository) RemoveUnpacked(ctx context.Context, t restic.WriteableFileType, id restic.ID) error { + return r.removeUnpacked(ctx, t.ToFileType(), id) +} + +func (r *internalRepository) RemoveUnpacked(ctx context.Context, t restic.FileType, id restic.ID) error { + return r.Repository.removeUnpacked(ctx, t, id) +} + +func (r *Repository) removeUnpacked(ctx context.Context, t restic.FileType, id restic.ID) error { + return r.be.Remove(ctx, backend.Handle{Type: t, Name: id.String()}) +} + // Flush saves all remaining packs and the index func (r *Repository) Flush(ctx context.Context) error { if err := r.flushPacks(ctx); err != nil { return err } - // Save index after flushing only if noAutoIndexUpdate is not set - if r.noAutoIndexUpdate { - return nil - } - return r.idx.SaveIndex(ctx, r) + return r.idx.SaveIndex(ctx, &internalRepository{r}) } func (r *Repository) StartPackUploader(ctx context.Context, wg *errgroup.Group) { @@ -587,8 +553,8 @@ func (r *Repository) StartPackUploader(ctx context.Context, wg *errgroup.Group) innerWg, ctx := errgroup.WithContext(ctx) r.packerWg = innerWg r.uploader = newPackerUploader(ctx, innerWg, r, r.be.Connections()) - r.treePM = newPackerManager(r.key, restic.TreeBlob, r.PackSize(), r.uploader.QueuePacker) - r.dataPM = newPackerManager(r.key, restic.DataBlob, r.PackSize(), r.uploader.QueuePacker) + r.treePM = newPackerManager(r.key, restic.TreeBlob, r.packSize(), r.uploader.QueuePacker) + r.dataPM = newPackerManager(r.key, restic.DataBlob, r.packSize(), r.uploader.QueuePacker) wg.Go(func() error { return innerWg.Wait() @@ -620,18 +586,27 @@ func (r *Repository) flushPacks(ctx context.Context) error { return err } -// Backend returns the backend for the repository. -func (r *Repository) Backend() backend.Backend { - return r.be -} - func (r *Repository) Connections() uint { return r.be.Connections() } -// Index returns the currently used MasterIndex. -func (r *Repository) Index() restic.MasterIndex { - return r.idx +func (r *Repository) LookupBlob(tpe restic.BlobType, id restic.ID) []restic.PackedBlob { + return r.idx.Lookup(restic.BlobHandle{Type: tpe, ID: id}) +} + +// LookupBlobSize returns the size of blob id. +func (r *Repository) LookupBlobSize(tpe restic.BlobType, id restic.ID) (uint, bool) { + return r.idx.LookupSize(restic.BlobHandle{Type: tpe, ID: id}) +} + +// ListBlobs runs fn on all blobs known to the index. When the context is cancelled, +// the index iteration returns immediately with ctx.Err(). This blocks any modification of the index. +func (r *Repository) ListBlobs(ctx context.Context, fn func(restic.PackedBlob)) error { + return r.idx.Each(ctx, fn) +} + +func (r *Repository) ListPacksFromIndex(ctx context.Context, packs restic.IDSet) <-chan restic.PackBlobs { + return r.idx.ListPacks(ctx, packs) } // SetIndex instructs the repository to use the given index. @@ -640,44 +615,18 @@ func (r *Repository) SetIndex(i restic.MasterIndex) error { return r.prepareCache() } +func (r *Repository) clearIndex() { + r.idx = index.NewMasterIndex() +} + // LoadIndex loads all index files from the backend in parallel and stores them func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { debug.Log("Loading index") - indexList, err := restic.MemorizeList(ctx, r, restic.IndexFile) - if err != nil { - return err - } - - if p != nil { - var numIndexFiles uint64 - err := indexList.List(ctx, restic.IndexFile, func(id restic.ID, size int64) error { - numIndexFiles++ - return nil - }) - if err != nil { - return err - } - p.SetMax(numIndexFiles) - defer p.Done() - } - - err = index.ForAllIndexes(ctx, indexList, r, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error { - if err != nil { - return err - } - r.idx.Insert(idx) - if p != nil { - p.Add(1) - } - return nil - }) - - if err != nil { - return err - } + // reset in-memory index before loading it from the repository + r.clearIndex() - err = r.idx.MergeFinalIndexes() + err := r.idx.Load(ctx, r, p, nil) if err != nil { return err } @@ -691,24 +640,30 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { defer cancel() invalidIndex := false - r.idx.Each(ctx, func(blob restic.PackedBlob) { + err := r.idx.Each(ctx, func(blob restic.PackedBlob) { if blob.IsCompressed() { invalidIndex = true } }) + if err != nil { + return err + } if invalidIndex { return errors.New("index uses feature not supported by repository version 1") } } + if ctx.Err() != nil { + return ctx.Err() + } // remove index files from the cache which have been removed in the repo return r.prepareCache() } -// CreateIndexFromPacks creates a new index by reading all given pack files (with sizes). +// createIndexFromPacks creates a new index by reading all given pack files (with sizes). // The index is added to the MasterIndex but not marked as finalized. // Returned is the list of pack files which could not be read. -func (r *Repository) CreateIndexFromPacks(ctx context.Context, packsize map[restic.ID]int64, p *progress.Counter) (invalid restic.IDs, err error) { +func (r *Repository) createIndexFromPacks(ctx context.Context, packsize map[restic.ID]int64, p *progress.Counter) (invalid restic.IDs, err error) { var m sync.Mutex debug.Log("Loading index from pack files") @@ -771,23 +726,14 @@ func (r *Repository) CreateIndexFromPacks(ctx context.Context, packsize map[rest // prepareCache initializes the local cache. indexIDs is the list of IDs of // index files still present in the repo. func (r *Repository) prepareCache() error { - if r.Cache == nil { + if r.cache == nil { return nil } - indexIDs := r.idx.IDs() - debug.Log("prepare cache with %d index files", len(indexIDs)) - - // clear old index files - err := r.Cache.Clear(restic.IndexFile, indexIDs) - if err != nil { - fmt.Fprintf(os.Stderr, "error clearing index files in cache: %v\n", err) - } - packs := r.idx.Packs(restic.NewIDSet()) // clear old packs - err = r.Cache.Clear(restic.PackFile, packs) + err := r.cache.Clear(restic.PackFile, packs) if err != nil { fmt.Fprintf(os.Stderr, "error clearing pack files in cache: %v\n", err) } @@ -841,6 +787,23 @@ func (r *Repository) Init(ctx context.Context, version uint, password string, ch if err == nil { return errors.New("repository master key and config already initialized") } + // double check to make sure that a repository is not accidentally reinitialized + // if the backend somehow fails to stat the config file. An initialized repository + // must always contain at least one key file. + if err := r.List(ctx, restic.KeyFile, func(_ restic.ID, _ int64) error { + return errors.New("repository already contains keys") + }); err != nil { + return err + } + // Also check for snapshots to detect repositories with a misconfigured retention + // policy that deletes files older than x days. For such repositories usually the + // config and key files are removed first and therefore the check would not detect + // the old repository. + if err := r.List(ctx, restic.SnapshotFile, func(_ restic.ID, _ int64) error { + return errors.New("repository already contains snapshots") + }); err != nil { + return err + } cfg, err := restic.CreateConfig(version) if err != nil { @@ -864,7 +827,7 @@ func (r *Repository) init(ctx context.Context, password string, cfg restic.Confi r.key = key.master r.keyID = key.ID() r.setConfig(cfg) - return restic.SaveConfig(ctx, r, cfg) + return restic.SaveConfig(ctx, &internalRepository{r}, cfg) } // Key returns the current master key. @@ -894,7 +857,17 @@ func (r *Repository) List(ctx context.Context, t restic.FileType, fn func(restic func (r *Repository) ListPack(ctx context.Context, id restic.ID, size int64) ([]restic.Blob, uint32, error) { h := backend.Handle{Type: restic.PackFile, Name: id.String()} - return pack.List(r.Key(), backend.ReaderAt(ctx, r.Backend(), h), size) + entries, hdrSize, err := pack.List(r.Key(), backend.ReaderAt(ctx, r.be, h), size) + if err != nil { + if r.cache != nil { + // ignore error as there is not much we can do here + _ = r.cache.Forget(h) + } + + // retry on error + entries, hdrSize, err = pack.List(r.Key(), backend.ReaderAt(ctx, r.be, h), size) + } + return entries, hdrSize, err } // Delete calls backend.Delete() if implemented, and returns an error @@ -917,6 +890,10 @@ func (r *Repository) Close() error { // occupies in the repo (compressed or not, including encryption overhead). func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) { + if int64(len(buf)) > math.MaxUint32 { + return restic.ID{}, false, 0, fmt.Errorf("blob is larger than 4GB") + } + // compute plaintext hash if not already set if id.IsNull() { // Special case the hash calculation for all zero chunks. This is especially @@ -943,19 +920,21 @@ func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte } type backendLoadFn func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error +type loadBlobFn func(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error) -// Skip sections with more than 4MB unused blobs -const maxUnusedRange = 4 * 1024 * 1024 +// Skip sections with more than 1MB unused blobs +const maxUnusedRange = 1 * 1024 * 1024 // LoadBlobsFromPack loads the listed blobs from the specified pack file. The plaintext blob is passed to // the handleBlobFn callback or an error if decryption failed or the blob hash does not match. // handleBlobFn is called at most once for each blob. If the callback returns an error, -// then LoadBlobsFromPack will abort and not retry it. +// then LoadBlobsFromPack will abort and not retry it. The buf passed to the callback is only valid within +// this specific call. The callback must not keep a reference to buf. func (r *Repository) LoadBlobsFromPack(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { - return streamPack(ctx, r.Backend().Load, r.key, packID, blobs, handleBlobFn) + return streamPack(ctx, r.be.Load, r.LoadBlob, r.getZstdDecoder(), r.key, packID, blobs, handleBlobFn) } -func streamPack(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { +func streamPack(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, dec *zstd.Decoder, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { if len(blobs) == 0 { // nothing to do return nil @@ -967,14 +946,29 @@ func streamPack(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, pack lowerIdx := 0 lastPos := blobs[0].Offset + const maxChunkSize = 2 * DefaultPackSize + for i := 0; i < len(blobs); i++ { if blobs[i].Offset < lastPos { // don't wait for streamPackPart to fail return errors.Errorf("overlapping blobs in pack %v", packID) } + + chunkSizeAfter := (blobs[i].Offset + blobs[i].Length) - blobs[lowerIdx].Offset + split := false + // split if the chunk would become larger than maxChunkSize. Oversized chunks are + // handled by the requirement that the chunk contains at least one blob (i > lowerIdx) + if i > lowerIdx && chunkSizeAfter >= maxChunkSize { + split = true + } + // skip too large gaps as a new request is typically much cheaper than data transfers if blobs[i].Offset-lastPos > maxUnusedRange { + split = true + } + + if split { // load everything up to the skipped file section - err := streamPackPart(ctx, beLoad, key, packID, blobs[lowerIdx:i], handleBlobFn) + err := streamPackPart(ctx, beLoad, loadBlobFn, dec, key, packID, blobs[lowerIdx:i], handleBlobFn) if err != nil { return err } @@ -983,82 +977,137 @@ func streamPack(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, pack lastPos = blobs[i].Offset + blobs[i].Length } // load remainder - return streamPackPart(ctx, beLoad, key, packID, blobs[lowerIdx:], handleBlobFn) + return streamPackPart(ctx, beLoad, loadBlobFn, dec, key, packID, blobs[lowerIdx:], handleBlobFn) } -func streamPackPart(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { - h := backend.Handle{Type: restic.PackFile, Name: packID.String(), IsMetadata: false} +func streamPackPart(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, dec *zstd.Decoder, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { + h := backend.Handle{Type: restic.PackFile, Name: packID.String(), IsMetadata: blobs[0].Type.IsMetadata()} dataStart := blobs[0].Offset dataEnd := blobs[len(blobs)-1].Offset + blobs[len(blobs)-1].Length debug.Log("streaming pack %v (%d to %d bytes), blobs: %v", packID, dataStart, dataEnd, len(blobs)) - dec, err := zstd.NewReader(nil) + data := make([]byte, int(dataEnd-dataStart)) + err := beLoad(ctx, h, int(dataEnd-dataStart), int64(dataStart), func(rd io.Reader) error { + _, cerr := io.ReadFull(rd, data) + return cerr + }) + // prevent callbacks after cancellation + if ctx.Err() != nil { + return ctx.Err() + } if err != nil { - panic(dec) + // the context is only still valid if handleBlobFn never returned an error + if loadBlobFn != nil { + // check whether we can get the remaining blobs somewhere else + for _, entry := range blobs { + buf, ierr := loadBlobFn(ctx, entry.Type, entry.ID, nil) + err = handleBlobFn(entry.BlobHandle, buf, ierr) + if err != nil { + break + } + } + } + return errors.Wrap(err, "StreamPack") } - defer dec.Close() - ctx, cancel := context.WithCancel(ctx) - // stream blobs in pack - err = beLoad(ctx, h, int(dataEnd-dataStart), int64(dataStart), func(rd io.Reader) error { - // prevent callbacks after cancellation + it := newPackBlobIterator(packID, newByteReader(data), dataStart, blobs, key, dec) + + for { if ctx.Err() != nil { return ctx.Err() } - bufferSize := int(dataEnd - dataStart) - if bufferSize > MaxStreamBufferSize { - bufferSize = MaxStreamBufferSize + + val, err := it.Next() + if err == errPackEOF { + break + } else if err != nil { + return err } - bufRd := bufio.NewReaderSize(rd, bufferSize) - it := NewPackBlobIterator(packID, bufRd, dataStart, blobs, key, dec) - - for { - val, err := it.Next() - if err == ErrPackEOF { - break - } else if err != nil { - return err - } - err = handleBlobFn(val.Handle, val.Plaintext, val.Err) - if err != nil { - cancel() - return backoff.Permanent(err) + if val.Err != nil && loadBlobFn != nil { + var ierr error + // check whether we can get a valid copy somewhere else + buf, ierr := loadBlobFn(ctx, val.Handle.Type, val.Handle.ID, nil) + if ierr == nil { + // success + val.Plaintext = buf + val.Err = nil } - // ensure that each blob is only passed once to handleBlobFn - blobs = blobs[1:] } - return nil - }) + + err = handleBlobFn(val.Handle, val.Plaintext, val.Err) + if err != nil { + return err + } + // ensure that each blob is only passed once to handleBlobFn + blobs = blobs[1:] + } + return errors.Wrap(err, "StreamPack") } -type PackBlobIterator struct { +// discardReader allows the PackBlobIterator to perform zero copy +// reads if the underlying data source is a byte slice. +type discardReader interface { + Discard(n int) (discarded int, err error) + // ReadFull reads the next n bytes into a byte slice. The caller must not + // retain a reference to the byte. Modifications are only allowed within + // the boundaries of the returned slice. + ReadFull(n int) (buf []byte, err error) +} + +type byteReader struct { + buf []byte +} + +func newByteReader(buf []byte) *byteReader { + return &byteReader{ + buf: buf, + } +} + +func (b *byteReader) Discard(n int) (discarded int, err error) { + if len(b.buf) < n { + return 0, io.ErrUnexpectedEOF + } + b.buf = b.buf[n:] + return n, nil +} + +func (b *byteReader) ReadFull(n int) (buf []byte, err error) { + if len(b.buf) < n { + return nil, io.ErrUnexpectedEOF + } + buf = b.buf[:n] + b.buf = b.buf[n:] + return buf, nil +} + +type packBlobIterator struct { packID restic.ID - rd *bufio.Reader + rd discardReader currentOffset uint blobs []restic.Blob key *crypto.Key dec *zstd.Decoder - buf []byte decode []byte } -type PackBlobValue struct { +type packBlobValue struct { Handle restic.BlobHandle Plaintext []byte Err error } -var ErrPackEOF = errors.New("reached EOF of pack file") +var errPackEOF = errors.New("reached EOF of pack file") -func NewPackBlobIterator(packID restic.ID, rd *bufio.Reader, currentOffset uint, - blobs []restic.Blob, key *crypto.Key, dec *zstd.Decoder) *PackBlobIterator { - return &PackBlobIterator{ +func newPackBlobIterator(packID restic.ID, rd discardReader, currentOffset uint, + blobs []restic.Blob, key *crypto.Key, dec *zstd.Decoder) *packBlobIterator { + return &packBlobIterator{ packID: packID, rd: rd, currentOffset: currentOffset, @@ -1069,9 +1118,9 @@ func NewPackBlobIterator(packID restic.ID, rd *bufio.Reader, currentOffset uint, } // Next returns the next blob, an error or ErrPackEOF if all blobs were read -func (b *PackBlobIterator) Next() (PackBlobValue, error) { +func (b *packBlobIterator) Next() (packBlobValue, error) { if len(b.blobs) == 0 { - return PackBlobValue{}, ErrPackEOF + return packBlobValue{}, errPackEOF } entry := b.blobs[0] @@ -1079,50 +1128,44 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) { skipBytes := int(entry.Offset - b.currentOffset) if skipBytes < 0 { - return PackBlobValue{}, errors.Errorf("overlapping blobs in pack %v", b.packID) + return packBlobValue{}, fmt.Errorf("overlapping blobs in pack %v", b.packID) } _, err := b.rd.Discard(skipBytes) if err != nil { - return PackBlobValue{}, err + return packBlobValue{}, err } b.currentOffset = entry.Offset h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} debug.Log(" process blob %v, skipped %d, %v", h, skipBytes, entry) - if uint(cap(b.buf)) < entry.Length { - b.buf = make([]byte, entry.Length) - } - b.buf = b.buf[:entry.Length] - - n, err := io.ReadFull(b.rd, b.buf) + buf, err := b.rd.ReadFull(int(entry.Length)) if err != nil { debug.Log(" read error %v", err) - return PackBlobValue{}, errors.Wrap(err, "ReadFull") + return packBlobValue{}, fmt.Errorf("readFull: %w", err) } - if n != len(b.buf) { - return PackBlobValue{}, errors.Errorf("read blob %v from %v: not enough bytes read, want %v, got %v", - h, b.packID.Str(), len(b.buf), n) - } b.currentOffset = entry.Offset + entry.Length if int(entry.Length) <= b.key.NonceSize() { debug.Log("%v", b.blobs) - return PackBlobValue{}, errors.Errorf("invalid blob length %v", entry) + return packBlobValue{}, fmt.Errorf("invalid blob length %v", entry) } // decryption errors are likely permanent, give the caller a chance to skip them - nonce, ciphertext := b.buf[:b.key.NonceSize()], b.buf[b.key.NonceSize():] + nonce, ciphertext := buf[:b.key.NonceSize()], buf[b.key.NonceSize():] plaintext, err := b.key.Open(ciphertext[:0], nonce, ciphertext, nil) + if err != nil { + err = fmt.Errorf("decrypting blob %v from %v failed: %w", h, b.packID.Str(), err) + } if err == nil && entry.IsCompressed() { // DecodeAll will allocate a slice if it is not large enough since it // knows the decompressed size (because we're using EncodeAll) b.decode, err = b.dec.DecodeAll(plaintext, b.decode[:0]) plaintext = b.decode if err != nil { - err = errors.Errorf("decompressing blob %v failed: %v", h, err) + err = fmt.Errorf("decompressing blob %v from %v failed: %w", h, b.packID.Str(), err) } } if err == nil { @@ -1130,12 +1173,12 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) { if !id.Equal(entry.ID) { debug.Log("read blob %v/%v from %v: wrong data returned, hash is %v", h.Type, h.ID, b.packID.Str(), id) - err = errors.Errorf("read blob %v from %v: wrong data returned, hash is %v", + err = fmt.Errorf("read blob %v from %v: wrong data returned, hash is %v", h, b.packID.Str(), id) } } - return PackBlobValue{entry.BlobHandle, plaintext, err}, nil + return packBlobValue{entry.BlobHandle, plaintext, err}, nil } var zeroChunkOnce sync.Once diff --git a/internal/repository/repository_internal_test.go b/internal/repository/repository_internal_test.go index 0c7115bc96d..edec4aa482e 100644 --- a/internal/repository/repository_internal_test.go +++ b/internal/repository/repository_internal_test.go @@ -16,6 +16,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -84,6 +85,53 @@ func BenchmarkSortCachedPacksFirst(b *testing.B) { } } +func BenchmarkLoadIndex(b *testing.B) { + BenchmarkAllVersions(b, benchmarkLoadIndex) +} + +func benchmarkLoadIndex(b *testing.B, version uint) { + TestUseLowSecurityKDFParameters(b) + + repo, _, be := TestRepositoryWithVersion(b, version) + idx := index.NewIndex() + + for i := 0; i < 5000; i++ { + idx.StorePack(restic.NewRandomID(), []restic.Blob{ + { + BlobHandle: restic.NewRandomBlobHandle(), + Length: 1234, + Offset: 1235, + }, + }) + } + idx.Finalize() + + id, err := idx.SaveIndex(context.TODO(), &internalRepository{repo}) + rtest.OK(b, err) + + b.Logf("index saved as %v", id.Str()) + fi, err := be.Stat(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: id.String()}) + rtest.OK(b, err) + b.Logf("filesize is %v", fi.Size) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := loadIndex(context.TODO(), repo, id) + rtest.OK(b, err) + } +} + +// loadIndex loads the index id from backend and returns it. +func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (*index.Index, error) { + buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id) + if err != nil { + return nil, err + } + + return index.DecodeIndex(buf, id) +} + // buildPackfileWithoutHeader returns a manually built pack file without a header. func buildPackfileWithoutHeader(blobSizes []int, key *crypto.Key, compress bool) (blobs []restic.Blob, packfile []byte) { opts := []zstd.EOption{ @@ -146,14 +194,14 @@ func TestStreamPack(t *testing.T) { } func testStreamPack(t *testing.T, version uint) { - // always use the same key for deterministic output - const jsonKey = `{"mac":{"k":"eQenuI8adktfzZMuC8rwdA==","r":"k8cfAly2qQSky48CQK7SBA=="},"encrypt":"MKO9gZnRiQFl8mDUurSDa9NMjiu9MUifUrODTHS05wo="}` - - var key crypto.Key - err := json.Unmarshal([]byte(jsonKey), &key) + dec, err := zstd.NewReader(nil) if err != nil { - t.Fatal(err) + panic(dec) } + defer dec.Close() + + // always use the same key for deterministic output + key := testKey(t) blobSizes := []int{ 5522811, @@ -276,7 +324,7 @@ func testStreamPack(t *testing.T, version uint) { loadCalls = 0 shortFirstLoad = test.shortFirstLoad - err = streamPack(ctx, load, &key, restic.ID{}, test.blobs, handleBlob) + err := streamPack(ctx, load, nil, dec, &key, restic.ID{}, test.blobs, handleBlob) if err != nil { t.Fatal(err) } @@ -339,7 +387,7 @@ func testStreamPack(t *testing.T, version uint) { return err } - err = streamPack(ctx, load, &key, restic.ID{}, test.blobs, handleBlob) + err := streamPack(ctx, load, nil, dec, &key, restic.ID{}, test.blobs, handleBlob) if err == nil { t.Fatalf("wanted error %v, got nil", test.err) } @@ -353,7 +401,7 @@ func testStreamPack(t *testing.T, version uint) { } func TestBlobVerification(t *testing.T) { - repo := TestRepository(t).(*Repository) + repo := TestRepository(t) type DamageType string const ( @@ -402,7 +450,7 @@ func TestBlobVerification(t *testing.T) { } func TestUnpackedVerification(t *testing.T) { - repo := TestRepository(t).(*Repository) + repo := TestRepository(t) type DamageType string const ( @@ -449,3 +497,83 @@ func TestUnpackedVerification(t *testing.T) { } } } + +func testKey(t *testing.T) crypto.Key { + const jsonKey = `{"mac":{"k":"eQenuI8adktfzZMuC8rwdA==","r":"k8cfAly2qQSky48CQK7SBA=="},"encrypt":"MKO9gZnRiQFl8mDUurSDa9NMjiu9MUifUrODTHS05wo="}` + + var key crypto.Key + err := json.Unmarshal([]byte(jsonKey), &key) + if err != nil { + t.Fatal(err) + } + return key +} + +func TestStreamPackFallback(t *testing.T) { + dec, err := zstd.NewReader(nil) + if err != nil { + panic(dec) + } + defer dec.Close() + + test := func(t *testing.T, failLoad bool) { + key := testKey(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + plaintext := rtest.Random(800, 42) + blobID := restic.Hash(plaintext) + blobs := []restic.Blob{ + { + Length: uint(crypto.CiphertextLength(len(plaintext))), + Offset: 0, + BlobHandle: restic.BlobHandle{ + ID: blobID, + Type: restic.DataBlob, + }, + }, + } + + var loadPack backendLoadFn + if failLoad { + loadPack = func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + return errors.New("load error") + } + } else { + loadPack = func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + // just return an empty array to provoke an error + data := make([]byte, length) + return fn(bytes.NewReader(data)) + } + } + + loadBlob := func(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error) { + if id == blobID { + return plaintext, nil + } + return nil, errors.New("unknown blob") + } + + blobOK := false + handleBlob := func(blob restic.BlobHandle, buf []byte, err error) error { + rtest.OK(t, err) + rtest.Equals(t, blobID, blob.ID) + rtest.Equals(t, plaintext, buf) + blobOK = true + return err + } + + err := streamPack(ctx, loadPack, loadBlob, dec, &key, restic.ID{}, blobs, handleBlob) + rtest.OK(t, err) + rtest.Assert(t, blobOK, "blob failed to load") + } + + t.Run("corrupted blob", func(t *testing.T) { + test(t, false) + }) + + // test fallback for failed pack loading + t.Run("failed load", func(t *testing.T) { + test(t, true) + }) +} diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 0fa8e4d4acb..1b0d47c8f46 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -4,20 +4,22 @@ import ( "bytes" "context" "crypto/sha256" - "fmt" "io" "math/rand" - "os" "path/filepath" "strings" + "sync" "testing" "time" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/cache" "github.com/restic/restic/internal/backend/local" + "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/crypto" - "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" @@ -41,7 +43,7 @@ func testSaveCalculateID(t *testing.T, version uint) { } func testSave(t *testing.T, version uint, calculateID bool) { - repo := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) for _, size := range testSizes { data := make([]byte, size) @@ -84,7 +86,7 @@ func BenchmarkSaveAndEncrypt(t *testing.B) { } func benchmarkSaveAndEncrypt(t *testing.B, version uint) { - repo := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) size := 4 << 20 // 4MiB data := make([]byte, size) @@ -110,7 +112,7 @@ func TestLoadBlob(t *testing.T) { } func testLoadBlob(t *testing.T, version uint) { - repo := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) length := 1000000 buf := crypto.NewBlobBuffer(length) _, err := io.ReadFull(rnd, buf) @@ -139,12 +141,34 @@ func testLoadBlob(t *testing.T, version uint) { } } +func TestLoadBlobBroken(t *testing.T) { + be := mem.New() + repo, _ := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}) + buf := rtest.Random(42, 1000) + + var wg errgroup.Group + repo.StartPackUploader(context.TODO(), &wg) + id, _, _, err := repo.SaveBlob(context.TODO(), restic.TreeBlob, buf, restic.ID{}, false) + rtest.OK(t, err) + rtest.OK(t, repo.Flush(context.Background())) + + // setup cache after saving the blob to make sure that the damageOnceBackend damages the cached data + c := cache.TestNewCache(t) + repo.UseCache(c) + + data, err := repo.LoadBlob(context.TODO(), restic.TreeBlob, id, nil) + rtest.OK(t, err) + rtest.Assert(t, bytes.Equal(buf, data), "data mismatch") + pack := repo.LookupBlob(restic.TreeBlob, id)[0].PackID + rtest.Assert(t, c.Has(backend.Handle{Type: restic.PackFile, Name: pack.String()}), "expected tree pack to be cached") +} + func BenchmarkLoadBlob(b *testing.B) { repository.BenchmarkAllVersions(b, benchmarkLoadBlob) } func benchmarkLoadBlob(b *testing.B, version uint) { - repo := repository.TestRepositoryWithVersion(b, version) + repo, _, _ := repository.TestRepositoryWithVersion(b, version) length := 1000000 buf := crypto.NewBlobBuffer(length) _, err := io.ReadFull(rnd, buf) @@ -185,7 +209,7 @@ func BenchmarkLoadUnpacked(b *testing.B) { } func benchmarkLoadUnpacked(b *testing.B, version uint) { - repo := repository.TestRepositoryWithVersion(b, version) + repo, _, _ := repository.TestRepositoryWithVersion(b, version) length := 1000000 buf := crypto.NewBlobBuffer(length) _, err := io.ReadFull(rnd, buf) @@ -193,7 +217,7 @@ func benchmarkLoadUnpacked(b *testing.B, version uint) { dataID := restic.Hash(buf) - storageID, err := repo.SaveUnpacked(context.TODO(), restic.PackFile, buf) + storageID, err := repo.SaveUnpacked(context.TODO(), restic.WriteableSnapshotFile, buf) rtest.OK(b, err) // rtest.OK(b, repo.Flush()) @@ -201,7 +225,7 @@ func benchmarkLoadUnpacked(b *testing.B, version uint) { b.SetBytes(int64(length)) for i := 0; i < b.N; i++ { - data, err := repo.LoadUnpacked(context.TODO(), restic.PackFile, storageID) + data, err := repo.LoadUnpacked(context.TODO(), restic.SnapshotFile, storageID) rtest.OK(b, err) // See comment in BenchmarkLoadBlob. @@ -221,10 +245,9 @@ func benchmarkLoadUnpacked(b *testing.B, version uint) { var repoFixture = filepath.Join("testdata", "test-repo.tar.gz") func TestRepositoryLoadIndex(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) + repo, _, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) } @@ -235,16 +258,11 @@ func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (* return nil, err } - idx, oldFormat, err := index.DecodeIndex(buf, id) - if oldFormat { - fmt.Fprintf(os.Stderr, "index %v has old format\n", id.Str()) - } - return idx, err + return index.DecodeIndex(buf, id) } func TestRepositoryLoadUnpackedBroken(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) - defer cleanup() + repo, _, be := repository.TestRepositoryWithVersion(t, 0) data := rtest.Random(23, 12345) id := restic.Hash(data) @@ -252,21 +270,17 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) { // damage buffer data[0] ^= 0xff - repo := repository.TestOpenLocal(t, repodir) // store broken file - err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, nil)) + err := be.Save(context.TODO(), h, backend.NewByteReader(data, be.Hasher())) rtest.OK(t, err) - // without a retry backend this will just return an error that the file is broken _, err = repo.LoadUnpacked(context.TODO(), restic.IndexFile, id) - if err == nil { - t.Fatal("missing expected error") - } - rtest.Assert(t, strings.Contains(err.Error(), "invalid data returned"), "unexpected error: %v", err) + rtest.Assert(t, errors.Is(err, restic.ErrInvalidData), "unexpected error: %v", err) } type damageOnceBackend struct { backend.Backend + m sync.Map } func (be *damageOnceBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { @@ -274,13 +288,14 @@ func (be *damageOnceBackend) Load(ctx context.Context, h backend.Handle, length if h.Type == restic.ConfigFile { return be.Backend.Load(ctx, h, length, offset, fn) } - // return broken data on the first try - err := be.Backend.Load(ctx, h, length+1, offset, fn) - if err != nil { - // retry - err = be.Backend.Load(ctx, h, length, offset, fn) + + h.IsMetadata = false + _, isRetry := be.m.LoadOrStore(h, true) + if !isRetry { + // return broken data on the first try + offset++ } - return err + return be.Backend.Load(ctx, h, length, offset, fn) } func TestRepositoryLoadUnpackedRetryBroken(t *testing.T) { @@ -289,51 +304,11 @@ func TestRepositoryLoadUnpackedRetryBroken(t *testing.T) { be, err := local.Open(context.TODO(), local.Config{Path: repodir, Connections: 2}) rtest.OK(t, err) - repo, err := repository.New(&damageOnceBackend{Backend: be}, repository.Options{}) - rtest.OK(t, err) - err = repo.SearchKey(context.TODO(), rtest.TestPassword, 10, "") - rtest.OK(t, err) + repo := repository.TestOpenBackend(t, &damageOnceBackend{Backend: be}) rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) } -func BenchmarkLoadIndex(b *testing.B) { - repository.BenchmarkAllVersions(b, benchmarkLoadIndex) -} - -func benchmarkLoadIndex(b *testing.B, version uint) { - repository.TestUseLowSecurityKDFParameters(b) - - repo := repository.TestRepositoryWithVersion(b, version) - idx := index.NewIndex() - - for i := 0; i < 5000; i++ { - idx.StorePack(restic.NewRandomID(), []restic.Blob{ - { - BlobHandle: restic.NewRandomBlobHandle(), - Length: 1234, - Offset: 1235, - }, - }) - } - idx.Finalize() - - id, err := index.SaveIndex(context.TODO(), repo, idx) - rtest.OK(b, err) - - b.Logf("index saved as %v", id.Str()) - fi, err := repo.Backend().Stat(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: id.String()}) - rtest.OK(b, err) - b.Logf("filesize is %v", fi.Size) - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _, err := loadIndex(context.TODO(), repo, id) - rtest.OK(b, err) - } -} - // saveRandomDataBlobs generates random data blobs and saves them to the repository. func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax int) { var wg errgroup.Group @@ -356,9 +331,9 @@ func TestRepositoryIncrementalIndex(t *testing.T) { } func testRepositoryIncrementalIndex(t *testing.T, version uint) { - repo := repository.TestRepositoryWithVersion(t, version).(*repository.Repository) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) - index.IndexFull = func(*index.Index, bool) bool { return true } + index.IndexFull = func(*index.Index) bool { return true } // add a few rounds of packs for j := 0; j < 5; j++ { @@ -376,13 +351,13 @@ func testRepositoryIncrementalIndex(t *testing.T, version uint) { idx, err := loadIndex(context.TODO(), repo, id) rtest.OK(t, err) - idx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { if _, ok := packEntries[pb.PackID]; !ok { packEntries[pb.PackID] = make(map[restic.ID]struct{}) } packEntries[pb.PackID][id] = struct{}{} - }) + })) return nil }) if err != nil { @@ -404,3 +379,64 @@ func TestInvalidCompression(t *testing.T) { _, err = repository.New(nil, repository.Options{Compression: comp}) rtest.Assert(t, err != nil, "missing error") } + +func TestListPack(t *testing.T) { + be := mem.New() + repo, _ := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}) + buf := rtest.Random(42, 1000) + + var wg errgroup.Group + repo.StartPackUploader(context.TODO(), &wg) + id, _, _, err := repo.SaveBlob(context.TODO(), restic.TreeBlob, buf, restic.ID{}, false) + rtest.OK(t, err) + rtest.OK(t, repo.Flush(context.Background())) + + // setup cache after saving the blob to make sure that the damageOnceBackend damages the cached data + c := cache.TestNewCache(t) + repo.UseCache(c) + + // Forcibly cache pack file + packID := repo.LookupBlob(restic.TreeBlob, id)[0].PackID + rtest.OK(t, be.Load(context.TODO(), backend.Handle{Type: restic.PackFile, IsMetadata: true, Name: packID.String()}, 0, 0, func(rd io.Reader) error { return nil })) + + // Get size to list pack + var size int64 + rtest.OK(t, repo.List(context.TODO(), restic.PackFile, func(id restic.ID, sz int64) error { + if id == packID { + size = sz + } + return nil + })) + + blobs, _, err := repo.ListPack(context.TODO(), packID, size) + rtest.OK(t, err) + rtest.Assert(t, len(blobs) == 1 && blobs[0].ID == id, "unexpected blobs in pack: %v", blobs) + + rtest.Assert(t, !c.Has(backend.Handle{Type: restic.PackFile, Name: packID.String()}), "tree pack should no longer be cached as ListPack does not set IsMetadata in the backend.Handle") +} + +func TestNoDoubleInit(t *testing.T) { + r, _, be := repository.TestRepositoryWithVersion(t, restic.StableRepoVersion) + + repo, err := repository.New(be, repository.Options{}) + rtest.OK(t, err) + + pol := r.Config().ChunkerPolynomial + err = repo.Init(context.TODO(), r.Config().Version, rtest.TestPassword, &pol) + rtest.Assert(t, strings.Contains(err.Error(), "repository master key and config already initialized"), "expected config exist error, got %q", err) + + // must also prevent init if only keys exist + rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: backend.ConfigFile})) + err = repo.Init(context.TODO(), r.Config().Version, rtest.TestPassword, &pol) + rtest.Assert(t, strings.Contains(err.Error(), "repository already contains keys"), "expected already contains keys error, got %q", err) + + // must also prevent init if a snapshot exists and keys were deleted + var data [32]byte + hash := restic.Hash(data[:]) + rtest.OK(t, be.Save(context.TODO(), backend.Handle{Type: backend.SnapshotFile, Name: hash.String()}, backend.NewByteReader(data[:], be.Hasher()))) + rtest.OK(t, be.List(context.TODO(), restic.KeyFile, func(fi backend.FileInfo) error { + return be.Remove(context.TODO(), backend.Handle{Type: restic.KeyFile, Name: fi.Name}) + })) + err = repo.Init(context.TODO(), r.Config().Version, rtest.TestPassword, &pol) + rtest.Assert(t, strings.Contains(err.Error(), "repository already contains snapshots"), "expected already contains snapshots error, got %q", err) +} diff --git a/internal/repository/testing.go b/internal/repository/testing.go index dbbdbeb07b1..a8321faadff 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "sync" "testing" "github.com/restic/restic/internal/backend" @@ -17,21 +18,22 @@ import ( "github.com/restic/chunker" ) -// testKDFParams are the parameters for the KDF to be used during testing. -var testKDFParams = crypto.Params{ - N: 128, - R: 1, - P: 1, -} - type logger interface { Logf(format string, args ...interface{}) } +var paramsOnce sync.Once + // TestUseLowSecurityKDFParameters configures low-security KDF parameters for testing. func TestUseLowSecurityKDFParameters(t logger) { t.Logf("using low-security KDF parameters for test") - Params = &testKDFParams + paramsOnce.Do(func() { + params = &crypto.Params{ + N: 128, + R: 1, + P: 1, + } + }) } // TestBackend returns a fully configured in-memory backend. @@ -39,12 +41,12 @@ func TestBackend(_ testing.TB) backend.Backend { return mem.New() } -const TestChunkerPol = chunker.Pol(0x3DA3358B4DC173) +const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) // TestRepositoryWithBackend returns a repository initialized with a test // password. If be is nil, an in-memory backend is used. A constant polynomial // is used for the chunker and low-security test parameters. -func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, opts Options) restic.Repository { +func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, opts Options) (*Repository, backend.Backend) { t.Helper() TestUseLowSecurityKDFParameters(t) restic.TestDisableCheckPolynomial(t) @@ -58,48 +60,60 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, o t.Fatalf("TestRepository(): new repo failed: %v", err) } - cfg := restic.TestCreateConfig(t, TestChunkerPol, version) - err = repo.init(context.TODO(), test.TestPassword, cfg) + if version == 0 { + version = restic.StableRepoVersion + } + pol := testChunkerPol + err = repo.Init(context.TODO(), version, test.TestPassword, &pol) if err != nil { t.Fatalf("TestRepository(): initialize repo failed: %v", err) } - return repo + return repo, be } // TestRepository returns a repository initialized with a test password on an // in-memory backend. When the environment variable RESTIC_TEST_REPO is set to // a non-existing directory, a local backend is created there and this is used // instead. The directory is not removed, but left there for inspection. -func TestRepository(t testing.TB) restic.Repository { +func TestRepository(t testing.TB) *Repository { t.Helper() - return TestRepositoryWithVersion(t, 0) + repo, _, _ := TestRepositoryWithVersion(t, 0) + return repo } -func TestRepositoryWithVersion(t testing.TB, version uint) restic.Repository { +func TestRepositoryWithVersion(t testing.TB, version uint) (*Repository, restic.Unpacked[restic.FileType], backend.Backend) { t.Helper() dir := os.Getenv("RESTIC_TEST_REPO") opts := Options{} + var repo *Repository + var be backend.Backend if dir != "" { _, err := os.Stat(dir) if err != nil { - be, err := local.Create(context.TODO(), local.Config{Path: dir}) + lbe, err := local.Create(context.TODO(), local.Config{Path: dir}) if err != nil { t.Fatalf("error creating local backend at %v: %v", dir, err) } - return TestRepositoryWithBackend(t, be, version, opts) - } - - if err == nil { + repo, be = TestRepositoryWithBackend(t, lbe, version, opts) + } else { t.Logf("directory at %v already exists, using mem backend", dir) } + } else { + repo, be = TestRepositoryWithBackend(t, nil, version, opts) } + return repo, &internalRepository{repo}, be +} + +func TestFromFixture(t testing.TB, repoFixture string) (*Repository, backend.Backend, func()) { + repodir, cleanup := test.Env(t, repoFixture) + repo, be := TestOpenLocal(t, repodir) - return TestRepositoryWithBackend(t, nil, version, opts) + return repo, be, cleanup } // TestOpenLocal opens a local repository. -func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { +func TestOpenLocal(t testing.TB, dir string) (*Repository, backend.Backend) { var be backend.Backend be, err := local.Open(context.TODO(), local.Config{Path: dir, Connections: 2}) if err != nil { @@ -108,6 +122,10 @@ func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { be = retry.New(be, 3, nil, nil) + return TestOpenBackend(t, be), be +} + +func TestOpenBackend(t testing.TB, be backend.Backend) *Repository { repo, err := New(be, Options{}) if err != nil { t.Fatal(err) @@ -139,3 +157,8 @@ func BenchmarkAllVersions(b *testing.B, bench VersionedBenchmark) { }) } } + +func TestNewLock(t *testing.T, repo *Repository, exclusive bool) (*restic.Lock, error) { + // TODO get rid of this test helper + return restic.NewLock(context.TODO(), &internalRepository{repo}, exclusive) +} diff --git a/internal/repository/upgrade_repo.go b/internal/repository/upgrade_repo.go new file mode 100644 index 00000000000..0a91b109320 --- /dev/null +++ b/internal/repository/upgrade_repo.go @@ -0,0 +1,103 @@ +package repository + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/restic" +) + +type upgradeRepoV2Error struct { + UploadNewConfigError error + ReuploadOldConfigError error + + BackupFilePath string +} + +func (err *upgradeRepoV2Error) Error() string { + if err.ReuploadOldConfigError != nil { + return fmt.Sprintf("error uploading config (%v), re-uploading old config filed failed as well (%v), but there is a backup of the config file in %v", err.UploadNewConfigError, err.ReuploadOldConfigError, err.BackupFilePath) + } + + return fmt.Sprintf("error uploading config (%v), re-uploaded old config was successful, there is a backup of the config file in %v", err.UploadNewConfigError, err.BackupFilePath) +} + +func (err *upgradeRepoV2Error) Unwrap() error { + // consider the original upload error as the primary cause + return err.UploadNewConfigError +} + +func upgradeRepository(ctx context.Context, repo *Repository) error { + h := backend.Handle{Type: backend.ConfigFile} + + if !repo.be.HasAtomicReplace() { + // remove the original file for backends which do not support atomic overwriting + err := repo.be.Remove(ctx, h) + if err != nil { + return fmt.Errorf("remove config failed: %w", err) + } + } + + // upgrade config + cfg := repo.Config() + cfg.Version = 2 + + err := restic.SaveConfig(ctx, &internalRepository{repo}, cfg) + if err != nil { + return fmt.Errorf("save new config file failed: %w", err) + } + + return nil +} + +func UpgradeRepo(ctx context.Context, repo *Repository) error { + if repo.Config().Version != 1 { + return fmt.Errorf("repository has version %v, only upgrades from version 1 are supported", repo.Config().Version) + } + + tempdir, err := os.MkdirTemp("", "restic-migrate-upgrade-repo-v2-") + if err != nil { + return fmt.Errorf("create temp dir failed: %w", err) + } + + h := backend.Handle{Type: restic.ConfigFile} + + // read raw config file and save it to a temp dir, just in case + rawConfigFile, err := repo.LoadRaw(ctx, restic.ConfigFile, restic.ID{}) + if err != nil { + return fmt.Errorf("load config file failed: %w", err) + } + + backupFileName := filepath.Join(tempdir, "config") + err = os.WriteFile(backupFileName, rawConfigFile, 0600) + if err != nil { + return fmt.Errorf("write config file backup to %v failed: %w", tempdir, err) + } + + // run the upgrade + err = upgradeRepository(ctx, repo) + if err != nil { + + // build an error we can return to the caller + repoError := &upgradeRepoV2Error{ + UploadNewConfigError: err, + BackupFilePath: backupFileName, + } + + // try contingency methods, reupload the original file + _ = repo.be.Remove(ctx, h) + err = repo.be.Save(ctx, h, backend.NewByteReader(rawConfigFile, nil)) + if err != nil { + repoError.ReuploadOldConfigError = err + } + + return repoError + } + + _ = os.Remove(backupFileName) + _ = os.Remove(tempdir) + return nil +} diff --git a/internal/repository/upgrade_repo_test.go b/internal/repository/upgrade_repo_test.go new file mode 100644 index 00000000000..c6bc574cf4e --- /dev/null +++ b/internal/repository/upgrade_repo_test.go @@ -0,0 +1,82 @@ +package repository + +import ( + "context" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/errors" + rtest "github.com/restic/restic/internal/test" +) + +func TestUpgradeRepoV2(t *testing.T) { + repo, _, _ := TestRepositoryWithVersion(t, 1) + if repo.Config().Version != 1 { + t.Fatal("test repo has wrong version") + } + + err := UpgradeRepo(context.Background(), repo) + rtest.OK(t, err) +} + +type failBackend struct { + backend.Backend + + mu sync.Mutex + ConfigFileSavesUntilError uint +} + +func (be *failBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { + if h.Type != backend.ConfigFile { + return be.Backend.Save(ctx, h, rd) + } + + be.mu.Lock() + if be.ConfigFileSavesUntilError == 0 { + be.mu.Unlock() + return errors.New("failure induced for testing") + } + + be.ConfigFileSavesUntilError-- + be.mu.Unlock() + + return be.Backend.Save(ctx, h, rd) +} + +func TestUpgradeRepoV2Failure(t *testing.T) { + be := TestBackend(t) + + // wrap backend so that it fails upgrading the config after the initial write + be = &failBackend{ + ConfigFileSavesUntilError: 1, + Backend: be, + } + + repo, _ := TestRepositoryWithBackend(t, be, 1, Options{}) + if repo.Config().Version != 1 { + t.Fatal("test repo has wrong version") + } + + err := UpgradeRepo(context.Background(), repo) + if err == nil { + t.Fatal("expected error returned from Apply(), got nil") + } + + upgradeErr := err.(*upgradeRepoV2Error) + if upgradeErr.UploadNewConfigError == nil { + t.Fatal("expected upload error, got nil") + } + + if upgradeErr.ReuploadOldConfigError == nil { + t.Fatal("expected reupload error, got nil") + } + + if upgradeErr.BackupFilePath == "" { + t.Fatal("no backup file path found") + } + rtest.OK(t, os.Remove(upgradeErr.BackupFilePath)) + rtest.OK(t, os.Remove(filepath.Dir(upgradeErr.BackupFilePath))) +} diff --git a/internal/restic/backend_find.go b/internal/restic/backend_find.go index a6eacabd0a9..2f00595c49e 100644 --- a/internal/restic/backend_find.go +++ b/internal/restic/backend_find.go @@ -30,7 +30,7 @@ func Find(ctx context.Context, be Lister, t FileType, prefix string) (ID, error) ctx, cancel := context.WithCancel(ctx) defer cancel() - err := be.List(ctx, t, func(id ID, size int64) error { + err := be.List(ctx, t, func(id ID, _ int64) error { name := id.String() if len(name) >= len(prefix) && prefix == name[:len(prefix)] { if match.IsNull() { diff --git a/internal/restic/blob_set_test.go b/internal/restic/blob_set_test.go index e26b48fe964..4e0961aa506 100644 --- a/internal/restic/blob_set_test.go +++ b/internal/restic/blob_set_test.go @@ -9,6 +9,8 @@ import ( ) func TestBlobSetString(t *testing.T) { + random := rand.New(rand.NewSource(42)) + s := NewBlobSet() rtest.Equals(t, "{}", s.String()) @@ -21,7 +23,7 @@ func TestBlobSetString(t *testing.T) { var h BlobHandle for i := 0; i < 100; i++ { h.Type = DataBlob - _, _ = rand.Read(h.ID[:]) + _, _ = random.Read(h.ID[:]) s.Insert(h) } diff --git a/internal/restic/config.go b/internal/restic/config.go index 67ee190bc6c..264792e11eb 100644 --- a/internal/restic/config.go +++ b/internal/restic/config.go @@ -2,6 +2,7 @@ package restic import ( "context" + "sync" "testing" "github.com/restic/restic/internal/errors" @@ -50,29 +51,16 @@ func CreateConfig(version uint) (Config, error) { return cfg, nil } -// TestCreateConfig creates a config for use within tests. -func TestCreateConfig(t testing.TB, pol chunker.Pol, version uint) (cfg Config) { - cfg.ChunkerPolynomial = pol - - cfg.ID = NewRandomID().String() - if version == 0 { - version = StableRepoVersion - } - if version < MinRepoVersion || version > MaxRepoVersion { - t.Fatalf("version %d is out of range", version) - } - cfg.Version = version - - return cfg -} - var checkPolynomial = true +var checkPolynomialOnce sync.Once // TestDisableCheckPolynomial disables the check that the polynomial used for // the chunker. func TestDisableCheckPolynomial(t testing.TB) { t.Logf("disabling check of the chunker polynomial") - checkPolynomial = false + checkPolynomialOnce.Do(func() { + checkPolynomial = false + }) } // LoadConfig returns loads, checks and returns the config for a repository. @@ -99,7 +87,7 @@ func LoadConfig(ctx context.Context, r LoaderUnpacked) (Config, error) { return cfg, nil } -func SaveConfig(ctx context.Context, r SaverUnpacked, cfg Config) error { +func SaveConfig(ctx context.Context, r SaverUnpacked[FileType], cfg Config) error { _, err := SaveJSONUnpacked(ctx, r, ConfigFile, cfg) return err } diff --git a/internal/restic/counted_blob_set.go b/internal/restic/counted_blob_set.go deleted file mode 100644 index f965d3129f0..00000000000 --- a/internal/restic/counted_blob_set.go +++ /dev/null @@ -1,68 +0,0 @@ -package restic - -import "sort" - -// CountedBlobSet is a set of blobs. For each blob it also stores a uint8 value -// which can be used to track some information. The CountedBlobSet does not use -// that value in any way. New entries are created with value 0. -type CountedBlobSet map[BlobHandle]uint8 - -// NewCountedBlobSet returns a new CountedBlobSet, populated with ids. -func NewCountedBlobSet(handles ...BlobHandle) CountedBlobSet { - m := make(CountedBlobSet) - for _, h := range handles { - m[h] = 0 - } - - return m -} - -// Has returns true iff id is contained in the set. -func (s CountedBlobSet) Has(h BlobHandle) bool { - _, ok := s[h] - return ok -} - -// Insert adds id to the set. -func (s CountedBlobSet) Insert(h BlobHandle) { - s[h] = 0 -} - -// Delete removes id from the set. -func (s CountedBlobSet) Delete(h BlobHandle) { - delete(s, h) -} - -func (s CountedBlobSet) Len() int { - return len(s) -} - -// List returns a sorted slice of all BlobHandle in the set. -func (s CountedBlobSet) List() BlobHandles { - list := make(BlobHandles, 0, len(s)) - for h := range s { - list = append(list, h) - } - - sort.Sort(list) - - return list -} - -func (s CountedBlobSet) String() string { - str := s.List().String() - if len(str) < 2 { - return "{}" - } - - return "{" + str[1:len(str)-1] + "}" -} - -// Copy returns a copy of the CountedBlobSet. -func (s CountedBlobSet) Copy() CountedBlobSet { - cp := make(CountedBlobSet, len(s)) - for k, v := range s { - cp[k] = v - } - return cp -} diff --git a/internal/restic/counted_blob_set_test.go b/internal/restic/counted_blob_set_test.go deleted file mode 100644 index edd39e65b9b..00000000000 --- a/internal/restic/counted_blob_set_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package restic_test - -import ( - "testing" - - "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/test" -) - -func TestCountedBlobSet(t *testing.T) { - bs := restic.NewCountedBlobSet() - test.Equals(t, bs.Len(), 0) - test.Equals(t, bs.List(), restic.BlobHandles{}) - - bh := restic.NewRandomBlobHandle() - // check non existent - test.Equals(t, bs.Has(bh), false) - - // test insert - bs.Insert(bh) - test.Equals(t, bs.Has(bh), true) - test.Equals(t, bs.Len(), 1) - test.Equals(t, bs.List(), restic.BlobHandles{bh}) - - // test remove - bs.Delete(bh) - test.Equals(t, bs.Len(), 0) - test.Equals(t, bs.Has(bh), false) - test.Equals(t, bs.List(), restic.BlobHandles{}) - - bs = restic.NewCountedBlobSet(bh) - test.Equals(t, bs.Len(), 1) - test.Equals(t, bs.List(), restic.BlobHandles{bh}) - - s := bs.String() - test.Assert(t, len(s) > 10, "invalid string: %v", s) -} - -func TestCountedBlobSetCopy(t *testing.T) { - bs := restic.NewCountedBlobSet(restic.NewRandomBlobHandle(), restic.NewRandomBlobHandle(), restic.NewRandomBlobHandle()) - test.Equals(t, bs.Len(), 3) - cp := bs.Copy() - test.Equals(t, cp.Len(), 3) - test.Equals(t, bs.List(), cp.List()) -} diff --git a/internal/restic/find.go b/internal/restic/find.go index 08670a49f48..f9b4e8bdfda 100644 --- a/internal/restic/find.go +++ b/internal/restic/find.go @@ -11,18 +11,18 @@ import ( // Loader loads a blob from a repository. type Loader interface { LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error) - LookupBlobSize(id ID, tpe BlobType) (uint, bool) + LookupBlobSize(tpe BlobType, id ID) (uint, bool) Connections() uint } -type findBlobSet interface { +type FindBlobSet interface { Has(bh BlobHandle) bool Insert(bh BlobHandle) } // FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data // blobs) to the set blobs. Already seen tree blobs will not be visited again. -func FindUsedBlobs(ctx context.Context, repo Loader, treeIDs IDs, blobs findBlobSet, p *progress.Counter) error { +func FindUsedBlobs(ctx context.Context, repo Loader, treeIDs IDs, blobs FindBlobSet, p *progress.Counter) error { var lock sync.Mutex wg, ctx := errgroup.WithContext(ctx) @@ -46,7 +46,7 @@ func FindUsedBlobs(ctx context.Context, repo Loader, treeIDs IDs, blobs findBlob lock.Lock() for _, node := range tree.Nodes { switch node.Type { - case "file": + case NodeTypeFile: for _, blob := range node.Content { blobs.Insert(BlobHandle{ID: blob, Type: DataBlob}) } diff --git a/internal/restic/find_test.go b/internal/restic/find_test.go index 1ae30ded980..9b8315ad4f1 100644 --- a/internal/restic/find_test.go +++ b/internal/restic/find_test.go @@ -166,7 +166,7 @@ func (r ForbiddenRepo) LoadBlob(context.Context, restic.BlobType, restic.ID, []b return nil, errors.New("should not be called") } -func (r ForbiddenRepo) LookupBlobSize(_ restic.ID, _ restic.BlobType) (uint, bool) { +func (r ForbiddenRepo) LookupBlobSize(_ restic.BlobType, _ restic.ID) (uint, bool) { return 0, false } diff --git a/internal/restic/id.go b/internal/restic/id.go index e71c6d71b33..0742cd6f1d0 100644 --- a/internal/restic/id.go +++ b/internal/restic/id.go @@ -2,11 +2,10 @@ package restic import ( "crypto/rand" + "crypto/sha256" "encoding/hex" "fmt" "io" - - "github.com/minio/sha256-simd" ) // Hash returns the ID for data. diff --git a/internal/restic/idset.go b/internal/restic/idset.go index 1b12a6398fc..7d98b487cf6 100644 --- a/internal/restic/idset.go +++ b/internal/restic/idset.go @@ -1,6 +1,9 @@ package restic -import "sort" +import ( + "maps" + "sort" +) // IDSet is a set of IDs. type IDSet map[ID]struct{} @@ -44,28 +47,10 @@ func (s IDSet) List() IDs { } // Equals returns true iff s equals other. -func (s IDSet) Equals(other IDSet) bool { - if len(s) != len(other) { - return false - } - - for id := range s { - if _, ok := other[id]; !ok { - return false - } - } - - // length + one-way comparison is sufficient implication of equality - - return true -} +func (s IDSet) Equals(other IDSet) bool { return maps.Equal(s, other) } // Merge adds the blobs in other to the current set. -func (s IDSet) Merge(other IDSet) { - for id := range other { - s.Insert(id) - } -} +func (s IDSet) Merge(other IDSet) { maps.Copy(s, other) } // Intersect returns a new set containing the IDs that are present in both sets. func (s IDSet) Intersect(other IDSet) (result IDSet) { @@ -105,3 +90,5 @@ func (s IDSet) String() string { str := s.List().String() return "{" + str[1:len(str)-1] + "}" } + +func (s IDSet) Clone() IDSet { return maps.Clone(s) } diff --git a/internal/restic/idset_test.go b/internal/restic/idset_test.go index 734b3123775..14c88b3143e 100644 --- a/internal/restic/idset_test.go +++ b/internal/restic/idset_test.go @@ -35,4 +35,7 @@ func TestIDSet(t *testing.T) { } rtest.Equals(t, "{1285b303 7bb086db f658198b}", set.String()) + + copied := set.Clone() + rtest.Equals(t, "{1285b303 7bb086db f658198b}", copied.String()) } diff --git a/internal/restic/json.go b/internal/restic/json.go index 05d049b5949..ec64ff15344 100644 --- a/internal/restic/json.go +++ b/internal/restic/json.go @@ -21,7 +21,7 @@ func LoadJSONUnpacked(ctx context.Context, repo LoaderUnpacked, t FileType, id I // SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the // backend as type t, without a pack. It returns the storage hash. -func SaveJSONUnpacked(ctx context.Context, repo SaverUnpacked, t FileType, item interface{}) (ID, error) { +func SaveJSONUnpacked[FT FileTypes](ctx context.Context, repo SaverUnpacked[FT], t FT, item interface{}) (ID, error) { debug.Log("save new blob %v", t) plaintext, err := json.Marshal(item) if err != nil { diff --git a/internal/restic/lock.go b/internal/restic/lock.go index d98f8ec94f4..20fa1e20e8c 100644 --- a/internal/restic/lock.go +++ b/internal/restic/lock.go @@ -7,17 +7,19 @@ import ( "os/signal" "os/user" "sync" - "sync/atomic" "syscall" "testing" "time" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/debug" ) +// UnlockCancelDelay bounds the duration how long lock cleanup operations will wait +// if the passed in context was canceled. +const UnlockCancelDelay time.Duration = 1 * time.Minute + // Lock represents a process locking the repository for an operation. // // There are two types of locks: exclusive and non-exclusive. There may be many @@ -36,7 +38,7 @@ type Lock struct { UID uint32 `json:"uid,omitempty"` GID uint32 `json:"gid,omitempty"` - repo Repository + repo Unpacked[FileType] lockID *ID } @@ -84,33 +86,27 @@ func IsInvalidLock(err error) bool { var ErrRemovedLock = errors.New("lock file was removed in the meantime") -// NewLock returns a new, non-exclusive lock for the repository. If an -// exclusive lock is already held by another process, it returns an error -// that satisfies IsAlreadyLocked. -func NewLock(ctx context.Context, repo Repository) (*Lock, error) { - return newLock(ctx, repo, false) -} - -// NewExclusiveLock returns a new, exclusive lock for the repository. If -// another lock (normal and exclusive) is already held by another process, -// it returns an error that satisfies IsAlreadyLocked. -func NewExclusiveLock(ctx context.Context, repo Repository) (*Lock, error) { - return newLock(ctx, repo, true) -} - var waitBeforeLockCheck = 200 * time.Millisecond +// delay increases by factor 2 on each retry +var initialWaitBetweenLockRetries = 5 * time.Second + // TestSetLockTimeout can be used to reduce the lock wait timeout for tests. func TestSetLockTimeout(t testing.TB, d time.Duration) { t.Logf("setting lock timeout to %v", d) waitBeforeLockCheck = d + initialWaitBetweenLockRetries = d } -func newLock(ctx context.Context, repo Repository, excl bool) (*Lock, error) { +// NewLock returns a new lock for the repository. If an +// exclusive lock is already held by another process, it returns an error +// that satisfies IsAlreadyLocked. If the new lock is exclude, then other +// non-exclusive locks also result in an IsAlreadyLocked error. +func NewLock(ctx context.Context, repo Unpacked[FileType], exclusive bool) (*Lock, error) { lock := &Lock{ Time: time.Now(), PID: os.Getpid(), - Exclusive: excl, + Exclusive: exclusive, repo: repo, } @@ -137,7 +133,7 @@ func newLock(ctx context.Context, repo Repository, excl bool) (*Lock, error) { time.Sleep(waitBeforeLockCheck) if err = lock.checkForOtherLocks(ctx); err != nil { - _ = lock.Unlock() + _ = lock.Unlock(ctx) return nil, err } @@ -167,11 +163,20 @@ func (l *Lock) checkForOtherLocks(ctx context.Context) error { if l.lockID != nil { checkedIDs.Insert(*l.lockID) } + delay := initialWaitBetweenLockRetries // retry locking a few times - for i := 0; i < 3; i++ { + for i := 0; i < 4; i++ { + if i != 0 { + // sleep between retries to give backend some time to settle + if err := cancelableDelay(ctx, delay); err != nil { + return err + } + delay *= 2 + } + // Store updates in new IDSet to prevent data races var m sync.Mutex - newCheckedIDs := NewIDSet(checkedIDs.List()...) + newCheckedIDs := checkedIDs.Clone() err = ForAllLocks(ctx, l.repo, checkedIDs, func(id ID, lock *Lock, err error) error { if err != nil { // if we cannot load a lock then it is unclear whether it can be ignored @@ -210,6 +215,18 @@ func (l *Lock) checkForOtherLocks(ctx context.Context) error { return err } +func cancelableDelay(ctx context.Context, delay time.Duration) error { + // delay next try a bit + timer := time.NewTimer(delay) + select { + case <-ctx.Done(): + timer.Stop() + return ctx.Err() + case <-timer.C: + } + return nil +} + // createLock acquires the lock by creating a file in the repository. func (l *Lock) createLock(ctx context.Context) (ID, error) { id, err := SaveJSONUnpacked(ctx, l.repo, LockFile, l) @@ -221,12 +238,15 @@ func (l *Lock) createLock(ctx context.Context) (ID, error) { } // Unlock removes the lock from the repository. -func (l *Lock) Unlock() error { +func (l *Lock) Unlock(ctx context.Context) error { if l == nil || l.lockID == nil { return nil } - return l.repo.Backend().Remove(context.TODO(), backend.Handle{Type: LockFile, Name: l.lockID.String()}) + ctx, cancel := delayedCancelContext(ctx, UnlockCancelDelay) + defer cancel() + + return l.repo.RemoveUnpacked(ctx, LockFile, *l.lockID) } var StaleLockTimeout = 30 * time.Minute @@ -267,6 +287,23 @@ func (l *Lock) Stale() bool { return false } +func delayedCancelContext(parentCtx context.Context, delay time.Duration) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + select { + case <-parentCtx.Done(): + case <-ctx.Done(): + return + } + + time.Sleep(delay) + cancel() + }() + + return ctx, cancel +} + // Refresh refreshes the lock by creating a new file in the backend with a new // timestamp. Afterwards the old lock is removed. func (l *Lock) Refresh(ctx context.Context) error { @@ -286,7 +323,10 @@ func (l *Lock) Refresh(ctx context.Context) error { oldLockID := l.lockID l.lockID = &id - return l.repo.Backend().Remove(context.TODO(), backend.Handle{Type: LockFile, Name: oldLockID.String()}) + ctx, cancel := delayedCancelContext(ctx, UnlockCancelDelay) + defer cancel() + + return l.repo.RemoveUnpacked(ctx, LockFile, *oldLockID) } // RefreshStaleLock is an extended variant of Refresh that can also refresh stale lock files. @@ -313,15 +353,19 @@ func (l *Lock) RefreshStaleLock(ctx context.Context) error { time.Sleep(waitBeforeLockCheck) exists, err = l.checkExistence(ctx) + + ctx, cancel := delayedCancelContext(ctx, UnlockCancelDelay) + defer cancel() + if err != nil { // cleanup replacement lock - _ = l.repo.Backend().Remove(context.TODO(), backend.Handle{Type: LockFile, Name: id.String()}) + _ = l.repo.RemoveUnpacked(ctx, LockFile, id) return err } if !exists { // cleanup replacement lock - _ = l.repo.Backend().Remove(context.TODO(), backend.Handle{Type: LockFile, Name: id.String()}) + _ = l.repo.RemoveUnpacked(ctx, LockFile, id) return ErrRemovedLock } @@ -332,7 +376,7 @@ func (l *Lock) RefreshStaleLock(ctx context.Context) error { oldLockID := l.lockID l.lockID = &id - return l.repo.Backend().Remove(context.TODO(), backend.Handle{Type: LockFile, Name: oldLockID.String()}) + return l.repo.RemoveUnpacked(ctx, LockFile, *oldLockID) } func (l *Lock) checkExistence(ctx context.Context) (bool, error) { @@ -341,7 +385,7 @@ func (l *Lock) checkExistence(ctx context.Context) (bool, error) { exists := false - err := l.repo.List(ctx, LockFile, func(id ID, size int64) error { + err := l.repo.List(ctx, LockFile, func(id ID, _ int64) error { if id.Equal(*l.lockID) { exists = true } @@ -389,42 +433,6 @@ func LoadLock(ctx context.Context, repo LoaderUnpacked, id ID) (*Lock, error) { return lock, nil } -// RemoveStaleLocks deletes all locks detected as stale from the repository. -func RemoveStaleLocks(ctx context.Context, repo Repository) (uint, error) { - var processed uint - err := ForAllLocks(ctx, repo, nil, func(id ID, lock *Lock, err error) error { - if err != nil { - // ignore locks that cannot be loaded - debug.Log("ignore lock %v: %v", id, err) - return nil - } - - if lock.Stale() { - err = repo.Backend().Remove(ctx, backend.Handle{Type: LockFile, Name: id.String()}) - if err == nil { - processed++ - } - return err - } - - return nil - }) - return processed, err -} - -// RemoveAllLocks removes all locks forcefully. -func RemoveAllLocks(ctx context.Context, repo Repository) (uint, error) { - var processed uint32 - err := ParallelList(ctx, repo, LockFile, repo.Connections(), func(ctx context.Context, id ID, size int64) error { - err := repo.Backend().Remove(ctx, backend.Handle{Type: LockFile, Name: id.String()}) - if err == nil { - atomic.AddUint32(&processed, 1) - } - return err - }) - return uint(processed), err -} - // ForAllLocks reads all locks in parallel and calls the given callback. // It is guaranteed that the function is not run concurrently. If the // callback returns an error, this function is cancelled and also returns that error. diff --git a/internal/restic/lock_test.go b/internal/restic/lock_test.go index 0d282aaf7f1..67d2b9a465c 100644 --- a/internal/restic/lock_test.go +++ b/internal/restic/lock_test.go @@ -19,22 +19,22 @@ func TestLock(t *testing.T) { repo := repository.TestRepository(t) restic.TestSetLockTimeout(t, 5*time.Millisecond) - lock, err := restic.NewLock(context.TODO(), repo) + lock, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) - rtest.OK(t, lock.Unlock()) + rtest.OK(t, lock.Unlock(context.TODO())) } func TestDoubleUnlock(t *testing.T) { repo := repository.TestRepository(t) restic.TestSetLockTimeout(t, 5*time.Millisecond) - lock, err := restic.NewLock(context.TODO(), repo) + lock, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) - rtest.OK(t, lock.Unlock()) + rtest.OK(t, lock.Unlock(context.TODO())) - err = lock.Unlock() + err = lock.Unlock(context.TODO()) rtest.Assert(t, err != nil, "double unlock didn't return an error, got %v", err) } @@ -43,14 +43,14 @@ func TestMultipleLock(t *testing.T) { repo := repository.TestRepository(t) restic.TestSetLockTimeout(t, 5*time.Millisecond) - lock1, err := restic.NewLock(context.TODO(), repo) + lock1, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) - lock2, err := restic.NewLock(context.TODO(), repo) + lock2, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) - rtest.OK(t, lock1.Unlock()) - rtest.OK(t, lock2.Unlock()) + rtest.OK(t, lock1.Unlock(context.TODO())) + rtest.OK(t, lock2.Unlock(context.TODO())) } type failLockLoadingBackend struct { @@ -66,73 +66,58 @@ func (be *failLockLoadingBackend) Load(ctx context.Context, h backend.Handle, le func TestMultipleLockFailure(t *testing.T) { be := &failLockLoadingBackend{Backend: mem.New()} - repo := repository.TestRepositoryWithBackend(t, be, 0, repository.Options{}) + repo, _ := repository.TestRepositoryWithBackend(t, be, 0, repository.Options{}) restic.TestSetLockTimeout(t, 5*time.Millisecond) - lock1, err := restic.NewLock(context.TODO(), repo) + lock1, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) - _, err = restic.NewLock(context.TODO(), repo) + _, err = repository.TestNewLock(t, repo, false) rtest.Assert(t, err != nil, "unreadable lock file did not result in an error") - rtest.OK(t, lock1.Unlock()) + rtest.OK(t, lock1.Unlock(context.TODO())) } func TestLockExclusive(t *testing.T) { repo := repository.TestRepository(t) - elock, err := restic.NewExclusiveLock(context.TODO(), repo) + elock, err := repository.TestNewLock(t, repo, true) rtest.OK(t, err) - rtest.OK(t, elock.Unlock()) + rtest.OK(t, elock.Unlock(context.TODO())) } func TestLockOnExclusiveLockedRepo(t *testing.T) { repo := repository.TestRepository(t) restic.TestSetLockTimeout(t, 5*time.Millisecond) - elock, err := restic.NewExclusiveLock(context.TODO(), repo) + elock, err := repository.TestNewLock(t, repo, true) rtest.OK(t, err) - lock, err := restic.NewLock(context.TODO(), repo) + lock, err := repository.TestNewLock(t, repo, false) rtest.Assert(t, err != nil, "create normal lock with exclusively locked repo didn't return an error") rtest.Assert(t, restic.IsAlreadyLocked(err), "create normal lock with exclusively locked repo didn't return the correct error") - rtest.OK(t, lock.Unlock()) - rtest.OK(t, elock.Unlock()) + rtest.OK(t, lock.Unlock(context.TODO())) + rtest.OK(t, elock.Unlock(context.TODO())) } func TestExclusiveLockOnLockedRepo(t *testing.T) { repo := repository.TestRepository(t) restic.TestSetLockTimeout(t, 5*time.Millisecond) - elock, err := restic.NewLock(context.TODO(), repo) + elock, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) - lock, err := restic.NewExclusiveLock(context.TODO(), repo) + lock, err := repository.TestNewLock(t, repo, true) rtest.Assert(t, err != nil, "create normal lock with exclusively locked repo didn't return an error") rtest.Assert(t, restic.IsAlreadyLocked(err), "create normal lock with exclusively locked repo didn't return the correct error") - rtest.OK(t, lock.Unlock()) - rtest.OK(t, elock.Unlock()) -} - -func createFakeLock(repo restic.SaverUnpacked, t time.Time, pid int) (restic.ID, error) { - hostname, err := os.Hostname() - if err != nil { - return restic.ID{}, err - } - - newLock := &restic.Lock{Time: t, PID: pid, Hostname: hostname} - return restic.SaveJSONUnpacked(context.TODO(), repo, restic.LockFile, &newLock) -} - -func removeLock(repo restic.Repository, id restic.ID) error { - h := backend.Handle{Type: restic.LockFile, Name: id.String()} - return repo.Backend().Remove(context.TODO(), h) + rtest.OK(t, lock.Unlock(context.TODO())) + rtest.OK(t, elock.Unlock(context.TODO())) } var staleLockTests = []struct { @@ -191,69 +176,6 @@ func TestLockStale(t *testing.T) { } } -func lockExists(repo restic.Repository, t testing.TB, id restic.ID) bool { - h := backend.Handle{Type: restic.LockFile, Name: id.String()} - _, err := repo.Backend().Stat(context.TODO(), h) - if err != nil && !repo.Backend().IsNotExist(err) { - t.Fatal(err) - } - return err == nil -} - -func TestLockWithStaleLock(t *testing.T) { - repo := repository.TestRepository(t) - - id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) - rtest.OK(t, err) - - id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()) - rtest.OK(t, err) - - id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000) - rtest.OK(t, err) - - processed, err := restic.RemoveStaleLocks(context.TODO(), repo) - rtest.OK(t, err) - - rtest.Assert(t, lockExists(repo, t, id1) == false, - "stale lock still exists after RemoveStaleLocks was called") - rtest.Assert(t, lockExists(repo, t, id2) == true, - "non-stale lock was removed by RemoveStaleLocks") - rtest.Assert(t, lockExists(repo, t, id3) == false, - "stale lock still exists after RemoveStaleLocks was called") - rtest.Assert(t, processed == 2, - "number of locks removed does not match: expected %d, got %d", - 2, processed) - - rtest.OK(t, removeLock(repo, id2)) -} - -func TestRemoveAllLocks(t *testing.T) { - repo := repository.TestRepository(t) - - id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) - rtest.OK(t, err) - - id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()) - rtest.OK(t, err) - - id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000) - rtest.OK(t, err) - - processed, err := restic.RemoveAllLocks(context.TODO(), repo) - rtest.OK(t, err) - - rtest.Assert(t, lockExists(repo, t, id1) == false, - "lock still exists after RemoveAllLocks was called") - rtest.Assert(t, lockExists(repo, t, id2) == false, - "lock still exists after RemoveAllLocks was called") - rtest.Assert(t, lockExists(repo, t, id3) == false, - "lock still exists after RemoveAllLocks was called") - rtest.Assert(t, processed == 3, - "number of locks removed does not match: expected %d, got %d", - 3, processed) -} - func checkSingleLock(t *testing.T, repo restic.Lister) restic.ID { t.Helper() var lockID *restic.ID @@ -277,7 +199,7 @@ func testLockRefresh(t *testing.T, refresh func(lock *restic.Lock) error) { repo := repository.TestRepository(t) restic.TestSetLockTimeout(t, 5*time.Millisecond) - lock, err := restic.NewLock(context.TODO(), repo) + lock, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) time0 := lock.Time @@ -294,7 +216,7 @@ func testLockRefresh(t *testing.T, refresh func(lock *restic.Lock) error) { rtest.OK(t, err) rtest.Assert(t, lock2.Time.After(time0), "expected a later timestamp after lock refresh") - rtest.OK(t, lock.Unlock()) + rtest.OK(t, lock.Unlock(context.TODO())) } func TestLockRefresh(t *testing.T) { @@ -310,15 +232,15 @@ func TestLockRefreshStale(t *testing.T) { } func TestLockRefreshStaleMissing(t *testing.T) { - repo := repository.TestRepository(t) + repo, _, be := repository.TestRepositoryWithVersion(t, 0) restic.TestSetLockTimeout(t, 5*time.Millisecond) - lock, err := restic.NewLock(context.TODO(), repo) + lock, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) lockID := checkSingleLock(t, repo) // refresh must fail if lock was removed - rtest.OK(t, repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.LockFile, Name: lockID.String()})) + rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.LockFile, Name: lockID.String()})) time.Sleep(time.Millisecond) err = lock.RefreshStaleLock(context.TODO()) rtest.Assert(t, err == restic.ErrRemovedLock, "unexpected error, expected %v, got %v", restic.ErrRemovedLock, err) diff --git a/internal/restic/mknod_unix.go b/internal/restic/mknod_unix.go deleted file mode 100644 index 7dd6c60d0b9..00000000000 --- a/internal/restic/mknod_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !freebsd && !windows -// +build !freebsd,!windows - -package restic - -import "golang.org/x/sys/unix" - -func mknod(path string, mode uint32, dev uint64) (err error) { - return unix.Mknod(path, mode, int(dev)) -} diff --git a/internal/restic/node.go b/internal/restic/node.go index 1d5bb51af43..c572996a5fe 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -1,14 +1,13 @@ package restic import ( - "context" "encoding/json" "fmt" "os" - "os/user" + "reflect" "strconv" + "strings" "sync" - "syscall" "time" "unicode/utf8" @@ -17,19 +16,75 @@ import ( "bytes" "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/fs" ) -// ExtendedAttribute is a tuple storing the xattr name and value. +// ExtendedAttribute is a tuple storing the xattr name and value for various filesystems. type ExtendedAttribute struct { Name string `json:"name"` Value []byte `json:"value"` } +// GenericAttributeType can be used for OS specific functionalities by defining specific types +// in node.go to be used by the specific node_xx files. +// OS specific attribute types should follow the convention Attributes. +// GenericAttributeTypes should follow the convention . +// The attributes in OS specific attribute types must be pointers as we want to distinguish nil values +// and not create GenericAttributes for them. +type GenericAttributeType string + +// OSType is the type created to represent each specific OS +type OSType string + +const ( + // When new GenericAttributeType are defined, they must be added in the init function as well. + + // Below are windows specific attributes. + + // TypeCreationTime is the GenericAttributeType used for storing creation time for windows files within the generic attributes map. + TypeCreationTime GenericAttributeType = "windows.creation_time" + // TypeFileAttributes is the GenericAttributeType used for storing file attributes for windows files within the generic attributes map. + TypeFileAttributes GenericAttributeType = "windows.file_attributes" + // TypeSecurityDescriptor is the GenericAttributeType used for storing security descriptors including owner, group, discretionary access control list (DACL), system access control list (SACL)) for windows files within the generic attributes map. + TypeSecurityDescriptor GenericAttributeType = "windows.security_descriptor" + + // Generic Attributes for other OS types should be defined here. +) + +// init is called when the package is initialized. Any new GenericAttributeTypes being created must be added here as well. +func init() { + storeGenericAttributeType(TypeCreationTime, TypeFileAttributes, TypeSecurityDescriptor) +} + +// genericAttributesForOS maintains a map of known genericAttributesForOS to the OSType +var genericAttributesForOS = map[GenericAttributeType]OSType{} + +// storeGenericAttributeType adds and entry in genericAttributesForOS map +func storeGenericAttributeType(attributeTypes ...GenericAttributeType) { + for _, attributeType := range attributeTypes { + // Get the OS attribute type from the GenericAttributeType + osAttributeName := strings.Split(string(attributeType), ".")[0] + genericAttributesForOS[attributeType] = OSType(osAttributeName) + } +} + +type NodeType string + +var ( + NodeTypeFile = NodeType("file") + NodeTypeDir = NodeType("dir") + NodeTypeSymlink = NodeType("symlink") + NodeTypeDev = NodeType("dev") + NodeTypeCharDev = NodeType("chardev") + NodeTypeFifo = NodeType("fifo") + NodeTypeSocket = NodeType("socket") + NodeTypeIrregular = NodeType("irregular") + NodeTypeInvalid = NodeType("") +) + // Node is a file, directory or other item in a backup. type Node struct { Name string `json:"name"` - Type string `json:"type"` + Type NodeType `json:"type"` Mode os.FileMode `json:"mode,omitempty"` ModTime time.Time `json:"mtime,omitempty"` AccessTime time.Time `json:"atime,omitempty"` @@ -39,7 +94,7 @@ type Node struct { User string `json:"user,omitempty"` Group string `json:"group,omitempty"` Inode uint64 `json:"inode,omitempty"` - DeviceID uint64 `json:"device_id,omitempty"` // device id of the file, stat.st_dev + DeviceID uint64 `json:"device_id,omitempty"` // device id of the file, stat.st_dev, only stored for hardlinks Size uint64 `json:"size,omitempty"` Links uint64 `json:"links,omitempty"` LinkTarget string `json:"linktarget,omitempty"` @@ -47,11 +102,12 @@ type Node struct { // This allows storing arbitrary byte-sequences, which are possible as symlink targets on unix systems, // as LinkTarget without breaking backwards-compatibility. // Must only be set of the linktarget cannot be encoded as valid utf8. - LinkTargetRaw []byte `json:"linktarget_raw,omitempty"` - ExtendedAttributes []ExtendedAttribute `json:"extended_attributes,omitempty"` - Device uint64 `json:"device,omitempty"` // in case of Type == "dev", stat.st_rdev - Content IDs `json:"content"` - Subtree *ID `json:"subtree,omitempty"` + LinkTargetRaw []byte `json:"linktarget_raw,omitempty"` + ExtendedAttributes []ExtendedAttribute `json:"extended_attributes,omitempty"` + GenericAttributes map[GenericAttributeType]json.RawMessage `json:"generic_attributes,omitempty"` + Device uint64 `json:"device,omitempty"` // in case of Type == "dev", stat.st_rdev + Content IDs `json:"content"` + Subtree *ID `json:"subtree,omitempty"` Error string `json:"error,omitempty"` @@ -68,19 +124,19 @@ func (n Nodes) Swap(i, j int) { n[i], n[j] = n[j], n[i] } func (node Node) String() string { var mode os.FileMode switch node.Type { - case "file": + case NodeTypeFile: mode = 0 - case "dir": + case NodeTypeDir: mode = os.ModeDir - case "symlink": + case NodeTypeSymlink: mode = os.ModeSymlink - case "dev": + case NodeTypeDev: mode = os.ModeDevice - case "chardev": + case NodeTypeCharDev: mode = os.ModeDevice | os.ModeCharDevice - case "fifo": + case NodeTypeFifo: mode = os.ModeNamedPipe - case "socket": + case NodeTypeSocket: mode = os.ModeSocket } @@ -88,49 +144,6 @@ func (node Node) String() string { mode|node.Mode, node.UID, node.GID, node.Size, node.ModTime, node.Name) } -// NodeFromFileInfo returns a new node from the given path and FileInfo. It -// returns the first error that is encountered, together with a node. -func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { - mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky - node := &Node{ - Path: path, - Name: fi.Name(), - Mode: fi.Mode() & mask, - ModTime: fi.ModTime(), - } - - node.Type = nodeTypeFromFileInfo(fi) - if node.Type == "file" { - node.Size = uint64(fi.Size()) - } - - err := node.fillExtra(path, fi) - return node, err -} - -func nodeTypeFromFileInfo(fi os.FileInfo) string { - switch fi.Mode() & os.ModeType { - case 0: - return "file" - case os.ModeDir: - return "dir" - case os.ModeSymlink: - return "symlink" - case os.ModeDevice | os.ModeCharDevice: - return "chardev" - case os.ModeDevice: - return "dev" - case os.ModeNamedPipe: - return "fifo" - case os.ModeSocket: - return "socket" - case os.ModeIrregular: - return "irregular" - } - - return "" -} - // GetExtendedAttribute gets the extended attribute. func (node Node) GetExtendedAttribute(a string) []byte { for _, attr := range node.ExtendedAttributes { @@ -141,191 +154,6 @@ func (node Node) GetExtendedAttribute(a string) []byte { return nil } -// CreateAt creates the node at the given path but does NOT restore node meta data. -func (node *Node) CreateAt(ctx context.Context, path string, repo BlobLoader) error { - debug.Log("create node %v at %v", node.Name, path) - - switch node.Type { - case "dir": - if err := node.createDirAt(path); err != nil { - return err - } - case "file": - if err := node.createFileAt(ctx, path, repo); err != nil { - return err - } - case "symlink": - if err := node.createSymlinkAt(path); err != nil { - return err - } - case "dev": - if err := node.createDevAt(path); err != nil { - return err - } - case "chardev": - if err := node.createCharDevAt(path); err != nil { - return err - } - case "fifo": - if err := node.createFifoAt(path); err != nil { - return err - } - case "socket": - return nil - default: - return errors.Errorf("filetype %q not implemented", node.Type) - } - - return nil -} - -// RestoreMetadata restores node metadata -func (node Node) RestoreMetadata(path string) error { - err := node.restoreMetadata(path) - if err != nil { - debug.Log("restoreMetadata(%s) error %v", path, err) - } - - return err -} - -func (node Node) restoreMetadata(path string) error { - var firsterr error - - if err := lchown(path, int(node.UID), int(node.GID)); err != nil { - // Like "cp -a" and "rsync -a" do, we only report lchown permission errors - // if we run as root. - if os.Geteuid() > 0 && os.IsPermission(err) { - debug.Log("not running as root, ignoring lchown permission error for %v: %v", - path, err) - } else { - firsterr = errors.WithStack(err) - } - } - - if node.Type != "symlink" { - if err := fs.Chmod(path, node.Mode); err != nil { - if firsterr != nil { - firsterr = errors.WithStack(err) - } - } - } - - if err := node.RestoreTimestamps(path); err != nil { - debug.Log("error restoring timestamps for dir %v: %v", path, err) - if firsterr != nil { - firsterr = err - } - } - - if err := node.restoreExtendedAttributes(path); err != nil { - debug.Log("error restoring extended attributes for %v: %v", path, err) - if firsterr != nil { - firsterr = err - } - } - - return firsterr -} - -func (node Node) restoreExtendedAttributes(path string) error { - for _, attr := range node.ExtendedAttributes { - err := Setxattr(path, attr.Name, attr.Value) - if err != nil { - return err - } - } - return nil -} - -func (node Node) RestoreTimestamps(path string) error { - var utimes = [...]syscall.Timespec{ - syscall.NsecToTimespec(node.AccessTime.UnixNano()), - syscall.NsecToTimespec(node.ModTime.UnixNano()), - } - - if node.Type == "symlink" { - return node.restoreSymlinkTimestamps(path, utimes) - } - - if err := syscall.UtimesNano(path, utimes[:]); err != nil { - return errors.Wrap(err, "UtimesNano") - } - - return nil -} - -func (node Node) createDirAt(path string) error { - err := fs.Mkdir(path, node.Mode) - if err != nil && !os.IsExist(err) { - return errors.WithStack(err) - } - - return nil -} - -func (node Node) createFileAt(ctx context.Context, path string, repo BlobLoader) error { - f, err := fs.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) - if err != nil { - return errors.WithStack(err) - } - - err = node.writeNodeContent(ctx, repo, f) - closeErr := f.Close() - - if err != nil { - return err - } - - if closeErr != nil { - return errors.WithStack(closeErr) - } - - return nil -} - -func (node Node) writeNodeContent(ctx context.Context, repo BlobLoader, f *os.File) error { - var buf []byte - for _, id := range node.Content { - buf, err := repo.LoadBlob(ctx, DataBlob, id, buf) - if err != nil { - return err - } - - _, err = f.Write(buf) - if err != nil { - return errors.WithStack(err) - } - } - - return nil -} - -func (node Node) createSymlinkAt(path string) error { - - if err := os.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) { - return errors.Wrap(err, "Symlink") - } - - if err := fs.Symlink(node.LinkTarget, path); err != nil { - return errors.WithStack(err) - } - - return nil -} - -func (node *Node) createDevAt(path string) error { - return mknod(path, syscall.S_IFBLK|0600, node.Device) -} - -func (node *Node) createCharDevAt(path string) error { - return mknod(path, syscall.S_IFCHR|0600, node.Device) -} - -func (node *Node) createFifoAt(path string) error { - return mkfifo(path, 0600) -} - // FixTime returns a time.Time which can safely be used to marshal as JSON. If // the timestamp is earlier than year zero, the year is set to zero. In the same // way, if the year is larger than 9999, the year is set to 9999. Other than @@ -438,6 +266,9 @@ func (node Node) Equals(other Node) bool { if !node.sameExtendedAttributes(other) { return false } + if !node.sameGenericAttributes(other) { + return false + } if node.Subtree != nil { if other.Subtree == nil { return false @@ -480,8 +311,13 @@ func (node Node) sameContent(other Node) bool { } func (node Node) sameExtendedAttributes(other Node) bool { - if len(node.ExtendedAttributes) != len(other.ExtendedAttributes) { + ln := len(node.ExtendedAttributes) + lo := len(other.ExtendedAttributes) + if ln != lo { return false + } else if ln == 0 { + // This means lo is also of length 0 + return true } // build a set of all attributes that node has @@ -525,143 +361,141 @@ func (node Node) sameExtendedAttributes(other Node) bool { return true } -func (node *Node) fillUser(stat *statT) { - uid, gid := stat.uid(), stat.gid() - node.UID, node.GID = uid, gid - node.User = lookupUsername(uid) - node.Group = lookupGroup(gid) +func (node Node) sameGenericAttributes(other Node) bool { + return deepEqual(node.GenericAttributes, other.GenericAttributes) } -var ( - uidLookupCache = make(map[uint32]string) - uidLookupCacheMutex = sync.RWMutex{} -) - -// Cached user name lookup by uid. Returns "" when no name can be found. -func lookupUsername(uid uint32) string { - uidLookupCacheMutex.RLock() - username, ok := uidLookupCache[uid] - uidLookupCacheMutex.RUnlock() - - if ok { - return username +func deepEqual(map1, map2 map[GenericAttributeType]json.RawMessage) bool { + // Check if the maps have the same number of keys + if len(map1) != len(map2) { + return false } - u, err := user.LookupId(strconv.Itoa(int(uid))) - if err == nil { - username = u.Username - } + // Iterate over each key-value pair in map1 + for key, value1 := range map1 { + // Check if the key exists in map2 + value2, ok := map2[key] + if !ok { + return false + } - uidLookupCacheMutex.Lock() - uidLookupCache[uid] = username - uidLookupCacheMutex.Unlock() + // Check if the JSON.RawMessage values are equal byte by byte + if !bytes.Equal(value1, value2) { + return false + } + } - return username + return true } -var ( - gidLookupCache = make(map[uint32]string) - gidLookupCacheMutex = sync.RWMutex{} -) +// HandleUnknownGenericAttributesFound is used for handling and distinguing between scenarios related to future versions and cross-OS repositories +func HandleUnknownGenericAttributesFound(unknownAttribs []GenericAttributeType, warn func(msg string)) { + for _, unknownAttrib := range unknownAttribs { + handleUnknownGenericAttributeFound(unknownAttrib, warn) + } +} -// Cached group name lookup by gid. Returns "" when no name can be found. -func lookupGroup(gid uint32) string { - gidLookupCacheMutex.RLock() - group, ok := gidLookupCache[gid] - gidLookupCacheMutex.RUnlock() +// handleUnknownGenericAttributeFound is used for handling and distinguing between scenarios related to future versions and cross-OS repositories +func handleUnknownGenericAttributeFound(genericAttributeType GenericAttributeType, warn func(msg string)) { + if checkGenericAttributeNameNotHandledAndPut(genericAttributeType) { + // Print the unique error only once for a given execution + os, exists := genericAttributesForOS[genericAttributeType] - if ok { - return group + if exists { + // If genericAttributesForOS contains an entry but we still got here, it means the specific node_xx.go for the current OS did not handle it and the repository may have been originally created on a different OS. + // The fact that node.go knows about the attribute, means it is not a new attribute. This may be a common situation if a repo is used across OSs. + debug.Log("Ignoring a generic attribute found in the repository: %s which may not be compatible with your OS. Compatible OS: %s", genericAttributeType, os) + } else { + // If genericAttributesForOS in node.go does not know about this attribute, then the repository may have been created by a newer version which has a newer GenericAttributeType. + warn(fmt.Sprintf("Found an unrecognized generic attribute in the repository: %s. You may need to upgrade to latest version of restic.", genericAttributeType)) + } } +} - g, err := user.LookupGroupId(strconv.Itoa(int(gid))) - if err == nil { - group = g.Name +// HandleAllUnknownGenericAttributesFound performs validations for all generic attributes of a node. +// This is not used on windows currently because windows has handling for generic attributes. +// nolint:unused +func HandleAllUnknownGenericAttributesFound(attributes map[GenericAttributeType]json.RawMessage, warn func(msg string)) error { + for name := range attributes { + handleUnknownGenericAttributeFound(name, warn) } + return nil +} - gidLookupCacheMutex.Lock() - gidLookupCache[gid] = group - gidLookupCacheMutex.Unlock() +var unknownGenericAttributesHandlingHistory sync.Map - return group +// checkGenericAttributeNameNotHandledAndPut checks if the GenericAttributeType name entry +// already exists and puts it in the map if not. +func checkGenericAttributeNameNotHandledAndPut(value GenericAttributeType) bool { + // If Key doesn't exist, put the value and return true because it is not already handled + _, exists := unknownGenericAttributesHandlingHistory.LoadOrStore(value, "") + // Key exists, then it is already handled so return false + return !exists } -func (node *Node) fillExtra(path string, fi os.FileInfo) error { - stat, ok := toStatT(fi.Sys()) - if !ok { - // fill minimal info with current values for uid, gid - node.UID = uint32(os.Getuid()) - node.GID = uint32(os.Getgid()) - node.ChangeTime = node.ModTime - return nil +// The functions below are common helper functions which can be used for generic attributes support +// across different OS. + +// GenericAttributesToOSAttrs gets the os specific attribute from the generic attribute using reflection +func GenericAttributesToOSAttrs(attrs map[GenericAttributeType]json.RawMessage, attributeType reflect.Type, attributeValuePtr *reflect.Value, keyPrefix string) (unknownAttribs []GenericAttributeType, err error) { + attributeValue := *attributeValuePtr + + for key, rawMsg := range attrs { + found := false + for i := 0; i < attributeType.NumField(); i++ { + if getFQKeyByIndex(attributeType, i, keyPrefix) == key { + found = true + fieldValue := attributeValue.Field(i) + // For directly supported types, use json.Unmarshal directly + if err := json.Unmarshal(rawMsg, fieldValue.Addr().Interface()); err != nil { + return unknownAttribs, errors.Wrap(err, "Unmarshal") + } + break + } + } + if !found { + unknownAttribs = append(unknownAttribs, key) + } } + return unknownAttribs, nil +} - node.Inode = uint64(stat.ino()) - node.DeviceID = uint64(stat.dev()) - - node.fillTimes(stat) +// getFQKey gets the fully qualified key for the field +func getFQKey(field reflect.StructField, keyPrefix string) GenericAttributeType { + return GenericAttributeType(fmt.Sprintf("%s.%s", keyPrefix, field.Tag.Get("generic"))) +} - node.fillUser(stat) +// getFQKeyByIndex gets the fully qualified key for the field index +func getFQKeyByIndex(attributeType reflect.Type, index int, keyPrefix string) GenericAttributeType { + return getFQKey(attributeType.Field(index), keyPrefix) +} - switch node.Type { - case "file": - node.Size = uint64(stat.size()) - node.Links = uint64(stat.nlink()) - case "dir": - case "symlink": - var err error - node.LinkTarget, err = fs.Readlink(path) - node.Links = uint64(stat.nlink()) - if err != nil { - return errors.WithStack(err) - } - case "dev": - node.Device = uint64(stat.rdev()) - node.Links = uint64(stat.nlink()) - case "chardev": - node.Device = uint64(stat.rdev()) - node.Links = uint64(stat.nlink()) - case "fifo": - case "socket": - default: - return errors.Errorf("unsupported file type %q", node.Type) - } +// OSAttrsToGenericAttributes gets the generic attribute from the os specific attribute using reflection +func OSAttrsToGenericAttributes(attributeType reflect.Type, attributeValuePtr *reflect.Value, keyPrefix string) (attrs map[GenericAttributeType]json.RawMessage, err error) { + attributeValue := *attributeValuePtr + attrs = make(map[GenericAttributeType]json.RawMessage) - return node.fillExtendedAttributes(path) -} + // Iterate over the fields of the struct + for i := 0; i < attributeType.NumField(); i++ { + field := attributeType.Field(i) -func (node *Node) fillExtendedAttributes(path string) error { - xattrs, err := Listxattr(path) - debug.Log("fillExtendedAttributes(%v) %v %v", path, xattrs, err) - if err != nil { - return err - } + // Get the field value using reflection + fieldValue := attributeValue.FieldByName(field.Name) - node.ExtendedAttributes = make([]ExtendedAttribute, 0, len(xattrs)) - for _, attr := range xattrs { - attrVal, err := Getxattr(path, attr) - if err != nil { - fmt.Fprintf(os.Stderr, "can not obtain extended attribute %v for %v:\n", attr, path) + // Check if the field is nil + if fieldValue.IsNil() { + // If it's nil, skip this field continue } - attr := ExtendedAttribute{ - Name: attr, - Value: attrVal, + + // Marshal the field value into a json.RawMessage + var fieldBytes []byte + if fieldBytes, err = json.Marshal(fieldValue.Interface()); err != nil { + return attrs, errors.Wrap(err, "Marshal") } - node.ExtendedAttributes = append(node.ExtendedAttributes, attr) + // Insert the field into the map + attrs[getFQKey(field, keyPrefix)] = json.RawMessage(fieldBytes) } - - return nil -} - -func mkfifo(path string, mode uint32) (err error) { - return mknod(path, mode|syscall.S_IFIFO, 0) -} - -func (node *Node) fillTimes(stat *statT) { - ctim := stat.ctim() - atim := stat.atim() - node.ChangeTime = time.Unix(ctim.Unix()) - node.AccessTime = time.Unix(atim.Unix()) + return attrs, nil } diff --git a/internal/restic/node_aix.go b/internal/restic/node_aix.go deleted file mode 100644 index 572e33a6508..00000000000 --- a/internal/restic/node_aix.go +++ /dev/null @@ -1,36 +0,0 @@ -//go:build aix -// +build aix - -package restic - -import "syscall" - -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { - return nil -} - -// AIX has a funny timespec type in syscall, with 32-bit nanoseconds. -// golang.org/x/sys/unix handles this cleanly, but we're stuck with syscall -// because os.Stat returns a syscall type in its os.FileInfo.Sys(). -func toTimespec(t syscall.StTimespec_t) syscall.Timespec { - return syscall.Timespec{Sec: t.Sec, Nsec: int64(t.Nsec)} -} - -func (s statT) atim() syscall.Timespec { return toTimespec(s.Atim) } -func (s statT) mtim() syscall.Timespec { return toTimespec(s.Mtim) } -func (s statT) ctim() syscall.Timespec { return toTimespec(s.Ctim) } - -// Getxattr is a no-op on AIX. -func Getxattr(path, name string) ([]byte, error) { - return nil, nil -} - -// Listxattr is a no-op on AIX. -func Listxattr(path string) ([]string, error) { - return nil, nil -} - -// Setxattr is a no-op on AIX. -func Setxattr(path, name string, data []byte) error { - return nil -} diff --git a/internal/restic/node_darwin.go b/internal/restic/node_darwin.go deleted file mode 100644 index 803aa68e572..00000000000 --- a/internal/restic/node_darwin.go +++ /dev/null @@ -1,11 +0,0 @@ -package restic - -import "syscall" - -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { - return nil -} - -func (s statT) atim() syscall.Timespec { return s.Atimespec } -func (s statT) mtim() syscall.Timespec { return s.Mtimespec } -func (s statT) ctim() syscall.Timespec { return s.Ctimespec } diff --git a/internal/restic/node_freebsd.go b/internal/restic/node_freebsd.go deleted file mode 100644 index 34d5b272c01..00000000000 --- a/internal/restic/node_freebsd.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build freebsd -// +build freebsd - -package restic - -import "syscall" - -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { - return nil -} - -func mknod(path string, mode uint32, dev uint64) (err error) { - return syscall.Mknod(path, mode, dev) -} - -func (s statT) atim() syscall.Timespec { return s.Atimespec } -func (s statT) mtim() syscall.Timespec { return s.Mtimespec } -func (s statT) ctim() syscall.Timespec { return s.Ctimespec } diff --git a/internal/restic/node_linux.go b/internal/restic/node_linux.go deleted file mode 100644 index 85a3638306d..00000000000 --- a/internal/restic/node_linux.go +++ /dev/null @@ -1,37 +0,0 @@ -package restic - -import ( - "path/filepath" - "syscall" - - "golang.org/x/sys/unix" - - "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/fs" -) - -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { - dir, err := fs.Open(filepath.Dir(path)) - if err != nil { - return errors.WithStack(err) - } - - times := []unix.Timespec{ - {Sec: utimes[0].Sec, Nsec: utimes[0].Nsec}, - {Sec: utimes[1].Sec, Nsec: utimes[1].Nsec}, - } - - err = unix.UtimesNanoAt(int(dir.Fd()), filepath.Base(path), times, unix.AT_SYMLINK_NOFOLLOW) - - if err != nil { - // ignore subsequent errors - _ = dir.Close() - return errors.Wrap(err, "UtimesNanoAt") - } - - return dir.Close() -} - -func (s statT) atim() syscall.Timespec { return s.Atim } -func (s statT) mtim() syscall.Timespec { return s.Mtim } -func (s statT) ctim() syscall.Timespec { return s.Ctim } diff --git a/internal/restic/node_netbsd.go b/internal/restic/node_netbsd.go deleted file mode 100644 index 0eade2f37f9..00000000000 --- a/internal/restic/node_netbsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package restic - -import "syscall" - -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { - return nil -} - -func (s statT) atim() syscall.Timespec { return s.Atimespec } -func (s statT) mtim() syscall.Timespec { return s.Mtimespec } -func (s statT) ctim() syscall.Timespec { return s.Ctimespec } - -// Getxattr retrieves extended attribute data associated with path. -func Getxattr(path, name string) ([]byte, error) { - return nil, nil -} - -// Listxattr retrieves a list of names of extended attributes associated with the -// given path in the file system. -func Listxattr(path string) ([]string, error) { - return nil, nil -} - -// Setxattr associates name and data together as an attribute of path. -func Setxattr(path, name string, data []byte) error { - return nil -} diff --git a/internal/restic/node_openbsd.go b/internal/restic/node_openbsd.go deleted file mode 100644 index a4ccc72113b..00000000000 --- a/internal/restic/node_openbsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package restic - -import "syscall" - -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { - return nil -} - -func (s statT) atim() syscall.Timespec { return s.Atim } -func (s statT) mtim() syscall.Timespec { return s.Mtim } -func (s statT) ctim() syscall.Timespec { return s.Ctim } - -// Getxattr retrieves extended attribute data associated with path. -func Getxattr(path, name string) ([]byte, error) { - return nil, nil -} - -// Listxattr retrieves a list of names of extended attributes associated with the -// given path in the file system. -func Listxattr(path string) ([]string, error) { - return nil, nil -} - -// Setxattr associates name and data together as an attribute of path. -func Setxattr(path, name string, data []byte) error { - return nil -} diff --git a/internal/restic/node_solaris.go b/internal/restic/node_solaris.go deleted file mode 100644 index c9d03f9c263..00000000000 --- a/internal/restic/node_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package restic - -import "syscall" - -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { - return nil -} - -func (s statT) atim() syscall.Timespec { return s.Atim } -func (s statT) mtim() syscall.Timespec { return s.Mtim } -func (s statT) ctim() syscall.Timespec { return s.Ctim } diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index aae010421b8..38a17cb0906 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -1,292 +1,14 @@ -package restic_test +package restic import ( - "context" "encoding/json" "fmt" - "os" - "path/filepath" - "reflect" - "runtime" "testing" "time" - "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" - rtest "github.com/restic/restic/internal/test" ) -func BenchmarkNodeFillUser(t *testing.B) { - tempfile, err := os.CreateTemp("", "restic-test-temp-") - if err != nil { - t.Fatal(err) - } - - fi, err := tempfile.Stat() - if err != nil { - t.Fatal(err) - } - - path := tempfile.Name() - - t.ResetTimer() - - for i := 0; i < t.N; i++ { - _, err := restic.NodeFromFileInfo(path, fi) - rtest.OK(t, err) - } - - rtest.OK(t, tempfile.Close()) - rtest.RemoveAll(t, tempfile.Name()) -} - -func BenchmarkNodeFromFileInfo(t *testing.B) { - tempfile, err := os.CreateTemp("", "restic-test-temp-") - if err != nil { - t.Fatal(err) - } - - fi, err := tempfile.Stat() - if err != nil { - t.Fatal(err) - } - - path := tempfile.Name() - - t.ResetTimer() - - for i := 0; i < t.N; i++ { - _, err := restic.NodeFromFileInfo(path, fi) - if err != nil { - t.Fatal(err) - } - } - - rtest.OK(t, tempfile.Close()) - rtest.RemoveAll(t, tempfile.Name()) -} - -func parseTime(s string) time.Time { - t, err := time.Parse("2006-01-02 15:04:05.999", s) - if err != nil { - panic(err) - } - - return t.Local() -} - -var nodeTests = []restic.Node{ - { - Name: "testFile", - Type: "file", - Content: restic.IDs{}, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0604, - ModTime: parseTime("2015-05-14 21:07:23.111"), - AccessTime: parseTime("2015-05-14 21:07:24.222"), - ChangeTime: parseTime("2015-05-14 21:07:25.333"), - }, - { - Name: "testSuidFile", - Type: "file", - Content: restic.IDs{}, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0755 | os.ModeSetuid, - ModTime: parseTime("2015-05-14 21:07:23.111"), - AccessTime: parseTime("2015-05-14 21:07:24.222"), - ChangeTime: parseTime("2015-05-14 21:07:25.333"), - }, - { - Name: "testSuidFile2", - Type: "file", - Content: restic.IDs{}, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0755 | os.ModeSetgid, - ModTime: parseTime("2015-05-14 21:07:23.111"), - AccessTime: parseTime("2015-05-14 21:07:24.222"), - ChangeTime: parseTime("2015-05-14 21:07:25.333"), - }, - { - Name: "testSticky", - Type: "file", - Content: restic.IDs{}, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0755 | os.ModeSticky, - ModTime: parseTime("2015-05-14 21:07:23.111"), - AccessTime: parseTime("2015-05-14 21:07:24.222"), - ChangeTime: parseTime("2015-05-14 21:07:25.333"), - }, - { - Name: "testDir", - Type: "dir", - Subtree: nil, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0750 | os.ModeDir, - ModTime: parseTime("2015-05-14 21:07:23.111"), - AccessTime: parseTime("2015-05-14 21:07:24.222"), - ChangeTime: parseTime("2015-05-14 21:07:25.333"), - }, - { - Name: "testSymlink", - Type: "symlink", - LinkTarget: "invalid", - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0777 | os.ModeSymlink, - ModTime: parseTime("2015-05-14 21:07:23.111"), - AccessTime: parseTime("2015-05-14 21:07:24.222"), - ChangeTime: parseTime("2015-05-14 21:07:25.333"), - }, - - // include "testFile" and "testDir" again with slightly different - // metadata, so we can test if CreateAt works with pre-existing files. - { - Name: "testFile", - Type: "file", - Content: restic.IDs{}, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0604, - ModTime: parseTime("2005-05-14 21:07:03.111"), - AccessTime: parseTime("2005-05-14 21:07:04.222"), - ChangeTime: parseTime("2005-05-14 21:07:05.333"), - }, - { - Name: "testDir", - Type: "dir", - Subtree: nil, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0750 | os.ModeDir, - ModTime: parseTime("2005-05-14 21:07:03.111"), - AccessTime: parseTime("2005-05-14 21:07:04.222"), - ChangeTime: parseTime("2005-05-14 21:07:05.333"), - }, - { - Name: "testXattrFile", - Type: "file", - Content: restic.IDs{}, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0604, - ModTime: parseTime("2005-05-14 21:07:03.111"), - AccessTime: parseTime("2005-05-14 21:07:04.222"), - ChangeTime: parseTime("2005-05-14 21:07:05.333"), - ExtendedAttributes: []restic.ExtendedAttribute{ - {"user.foo", []byte("bar")}, - }, - }, - { - Name: "testXattrDir", - Type: "dir", - Subtree: nil, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0750 | os.ModeDir, - ModTime: parseTime("2005-05-14 21:07:03.111"), - AccessTime: parseTime("2005-05-14 21:07:04.222"), - ChangeTime: parseTime("2005-05-14 21:07:05.333"), - ExtendedAttributes: []restic.ExtendedAttribute{ - {"user.foo", []byte("bar")}, - }, - }, -} - -func TestNodeRestoreAt(t *testing.T) { - tempdir := t.TempDir() - - for _, test := range nodeTests { - t.Run("", func(t *testing.T) { - var nodePath string - if test.ExtendedAttributes != nil { - if runtime.GOOS == "windows" { - // restic does not support xattrs on windows - return - } - - // tempdir might be backed by a filesystem that does not support - // extended attributes - nodePath = test.Name - defer func() { - _ = os.Remove(nodePath) - }() - } else { - nodePath = filepath.Join(tempdir, test.Name) - } - rtest.OK(t, test.CreateAt(context.TODO(), nodePath, nil)) - rtest.OK(t, test.RestoreMetadata(nodePath)) - - if test.Type == "dir" { - rtest.OK(t, test.RestoreTimestamps(nodePath)) - } - - fi, err := os.Lstat(nodePath) - rtest.OK(t, err) - - n2, err := restic.NodeFromFileInfo(nodePath, fi) - rtest.OK(t, err) - - rtest.Assert(t, test.Name == n2.Name, - "%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name) - rtest.Assert(t, test.Type == n2.Type, - "%v: type doesn't match (%v != %v)", test.Type, test.Type, n2.Type) - rtest.Assert(t, test.Size == n2.Size, - "%v: size doesn't match (%v != %v)", test.Size, test.Size, n2.Size) - - if runtime.GOOS != "windows" { - rtest.Assert(t, test.UID == n2.UID, - "%v: UID doesn't match (%v != %v)", test.Type, test.UID, n2.UID) - rtest.Assert(t, test.GID == n2.GID, - "%v: GID doesn't match (%v != %v)", test.Type, test.GID, n2.GID) - if test.Type != "symlink" { - // On OpenBSD only root can set sticky bit (see sticky(8)). - if runtime.GOOS != "openbsd" && runtime.GOOS != "netbsd" && runtime.GOOS != "solaris" && test.Name == "testSticky" { - rtest.Assert(t, test.Mode == n2.Mode, - "%v: mode doesn't match (0%o != 0%o)", test.Type, test.Mode, n2.Mode) - } - } - } - - AssertFsTimeEqual(t, "AccessTime", test.Type, test.AccessTime, n2.AccessTime) - AssertFsTimeEqual(t, "ModTime", test.Type, test.ModTime, n2.ModTime) - if len(n2.ExtendedAttributes) == 0 { - n2.ExtendedAttributes = nil - } - rtest.Assert(t, reflect.DeepEqual(test.ExtendedAttributes, n2.ExtendedAttributes), - "%v: xattrs don't match (%v != %v)", test.Name, test.ExtendedAttributes, n2.ExtendedAttributes) - }) - } -} - -func AssertFsTimeEqual(t *testing.T, label string, nodeType string, t1 time.Time, t2 time.Time) { - var equal bool - - // Go currently doesn't support setting timestamps of symbolic links on darwin and bsd - if nodeType == "symlink" { - switch runtime.GOOS { - case "darwin", "freebsd", "openbsd", "netbsd", "solaris": - return - } - } - - switch runtime.GOOS { - case "darwin": - // HFS+ timestamps don't support sub-second precision, - // see https://en.wikipedia.org/wiki/Comparison_of_file_systems - diff := int(t1.Sub(t2).Seconds()) - equal = diff == 0 - default: - equal = t1.Equal(t2) - } - - rtest.Assert(t, equal, "%s: %s doesn't match (%v != %v)", label, nodeType, t1, t2) -} - func parseTimeNano(t testing.TB, s string) time.Time { // 2006-01-02T15:04:05.999999999Z07:00 ts, err := time.Parse(time.RFC3339Nano, s) @@ -330,7 +52,7 @@ func TestFixTime(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - res := restic.FixTime(test.src) + res := FixTime(test.src) if !res.Equal(test.want) { t.Fatalf("wrong result for %v, want:\n %v\ngot:\n %v", test.src, test.want, res) } @@ -343,12 +65,12 @@ func TestSymlinkSerialization(t *testing.T) { "válîd \t Üñi¢òde \n śẗŕinǵ", string([]byte{0, 1, 2, 0xfa, 0xfb, 0xfc}), } { - n := restic.Node{ + n := Node{ LinkTarget: link, } ser, err := json.Marshal(n) test.OK(t, err) - var n2 restic.Node + var n2 Node err = json.Unmarshal(ser, &n2) test.OK(t, err) fmt.Println(string(ser)) @@ -365,7 +87,7 @@ func TestSymlinkSerializationFormat(t *testing.T) { {`{"linktarget":"test"}`, "test"}, {`{"linktarget":"\u0000\u0001\u0002\ufffd\ufffd\ufffd","linktarget_raw":"AAEC+vv8"}`, string([]byte{0, 1, 2, 0xfa, 0xfb, 0xfc})}, } { - var n2 restic.Node + var n2 Node err := json.Unmarshal([]byte(d.ser), &n2) test.OK(t, err) test.Equals(t, d.linkTarget, n2.LinkTarget) diff --git a/internal/restic/node_unix.go b/internal/restic/node_unix.go deleted file mode 100644 index 976cd7b0366..00000000000 --- a/internal/restic/node_unix.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build !windows -// +build !windows - -package restic - -import ( - "os" - "syscall" -) - -func lchown(name string, uid, gid int) error { - return os.Lchown(name, uid, gid) -} - -type statT syscall.Stat_t - -func toStatT(i interface{}) (*statT, bool) { - s, ok := i.(*syscall.Stat_t) - if ok && s != nil { - return (*statT)(s), true - } - return nil, false -} - -func (s statT) dev() uint64 { return uint64(s.Dev) } -func (s statT) ino() uint64 { return uint64(s.Ino) } -func (s statT) nlink() uint64 { return uint64(s.Nlink) } -func (s statT) uid() uint32 { return uint32(s.Uid) } -func (s statT) gid() uint32 { return uint32(s.Gid) } -func (s statT) rdev() uint64 { return uint64(s.Rdev) } -func (s statT) size() int64 { return int64(s.Size) } diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index fc6439b4045..7df4266656b 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -1,85 +1,26 @@ package restic import ( + "encoding/json" + "reflect" + "runtime" "syscall" - - "github.com/restic/restic/internal/errors" ) -// mknod is not supported on Windows. -func mknod(path string, mode uint32, dev uint64) (err error) { - return errors.New("device nodes cannot be created on windows") -} - -// Windows doesn't need lchown -func lchown(path string, uid int, gid int) (err error) { - return nil -} - -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { - // tweaked version of UtimesNano from go/src/syscall/syscall_windows.go - pathp, e := syscall.UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := syscall.CreateFile(pathp, - syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) - if e != nil { - return e - } - defer syscall.Close(h) - a := syscall.NsecToFiletime(syscall.TimespecToNsec(utimes[0])) - w := syscall.NsecToFiletime(syscall.TimespecToNsec(utimes[1])) - return syscall.SetFileTime(h, nil, &a, &w) -} - -// Getxattr retrieves extended attribute data associated with path. -func Getxattr(path, name string) ([]byte, error) { - return nil, nil -} - -// Listxattr retrieves a list of names of extended attributes associated with the -// given path in the file system. -func Listxattr(path string) ([]string, error) { - return nil, nil -} - -// Setxattr associates name and data together as an attribute of path. -func Setxattr(path, name string, data []byte) error { - return nil -} - -type statT syscall.Win32FileAttributeData - -func toStatT(i interface{}) (*statT, bool) { - s, ok := i.(*syscall.Win32FileAttributeData) - if ok && s != nil { - return (*statT)(s), true - } - return nil, false -} - -func (s statT) dev() uint64 { return 0 } -func (s statT) ino() uint64 { return 0 } -func (s statT) nlink() uint64 { return 0 } -func (s statT) uid() uint32 { return 0 } -func (s statT) gid() uint32 { return 0 } -func (s statT) rdev() uint64 { return 0 } - -func (s statT) size() int64 { - return int64(s.FileSizeLow) | (int64(s.FileSizeHigh) << 32) -} - -func (s statT) atim() syscall.Timespec { - return syscall.NsecToTimespec(s.LastAccessTime.Nanoseconds()) -} - -func (s statT) mtim() syscall.Timespec { - return syscall.NsecToTimespec(s.LastWriteTime.Nanoseconds()) -} - -func (s statT) ctim() syscall.Timespec { - // Windows does not have the concept of a "change time" in the sense Unix uses it, so we're using the LastWriteTime here. - return syscall.NsecToTimespec(s.LastWriteTime.Nanoseconds()) +// WindowsAttributes are the genericAttributes for Windows OS +type WindowsAttributes struct { + // CreationTime is used for storing creation time for windows files. + CreationTime *syscall.Filetime `generic:"creation_time"` + // FileAttributes is used for storing file attributes for windows files. + FileAttributes *uint32 `generic:"file_attributes"` + // SecurityDescriptor is used for storing security descriptors which includes + // owner, group, discretionary access control list (DACL), system access control list (SACL) + SecurityDescriptor *[]byte `generic:"security_descriptor"` +} + +// windowsAttrsToGenericAttributes converts the WindowsAttributes to a generic attributes map using reflection +func WindowsAttrsToGenericAttributes(windowsAttributes WindowsAttributes) (attrs map[GenericAttributeType]json.RawMessage, err error) { + // Get the value of the WindowsAttributes + windowsAttributesValue := reflect.ValueOf(windowsAttributes) + return OSAttrsToGenericAttributes(reflect.TypeOf(windowsAttributes), &windowsAttributesValue, runtime.GOOS) } diff --git a/internal/restic/node_xattr.go b/internal/restic/node_xattr.go deleted file mode 100644 index ea9eafe94b2..00000000000 --- a/internal/restic/node_xattr.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build darwin || freebsd || linux || solaris -// +build darwin freebsd linux solaris - -package restic - -import ( - "syscall" - - "github.com/restic/restic/internal/errors" - - "github.com/pkg/xattr" -) - -// Getxattr retrieves extended attribute data associated with path. -func Getxattr(path, name string) ([]byte, error) { - b, err := xattr.LGet(path, name) - return b, handleXattrErr(err) -} - -// Listxattr retrieves a list of names of extended attributes associated with the -// given path in the file system. -func Listxattr(path string) ([]string, error) { - l, err := xattr.LList(path) - return l, handleXattrErr(err) -} - -// Setxattr associates name and data together as an attribute of path. -func Setxattr(path, name string, data []byte) error { - return handleXattrErr(xattr.LSet(path, name, data)) -} - -func handleXattrErr(err error) error { - switch e := err.(type) { - case nil: - return nil - - case *xattr.Error: - // On Linux, xattr calls on files in an SMB/CIFS mount can return - // ENOATTR instead of ENOTSUP. - switch e.Err { - case syscall.ENOTSUP, xattr.ENOATTR: - return nil - } - return errors.WithStack(e) - - default: - return errors.WithStack(e) - } -} diff --git a/internal/restic/parallel.go b/internal/restic/parallel.go index cefbf03589c..1c56f684861 100644 --- a/internal/restic/parallel.go +++ b/internal/restic/parallel.go @@ -3,7 +3,6 @@ package restic import ( "context" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/ui/progress" "golang.org/x/sync/errgroup" @@ -55,7 +54,7 @@ func ParallelList(ctx context.Context, r Lister, t FileType, parallelism uint, f // ParallelRemove deletes the given fileList of fileType in parallel // if callback returns an error, then it will abort. -func ParallelRemove(ctx context.Context, repo Repository, fileList IDSet, fileType FileType, report func(id ID, err error) error, bar *progress.Counter) error { +func ParallelRemove[FT FileTypes](ctx context.Context, repo RemoverUnpacked[FT], fileList IDSet, fileType FT, report func(id ID, err error) error, bar *progress.Counter) error { fileChan := make(chan ID) wg, ctx := errgroup.WithContext(ctx) wg.Go(func() error { @@ -77,8 +76,7 @@ func ParallelRemove(ctx context.Context, repo Repository, fileList IDSet, fileTy for i := 0; i < int(workerCount); i++ { wg.Go(func() error { for id := range fileChan { - h := backend.Handle{Type: fileType, Name: id.String()} - err := repo.Backend().Remove(ctx, h) + err := repo.RemoveUnpacked(ctx, fileType, id) if report != nil { err = report(id, err) } diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 66cc22ea95b..07ef9cbc085 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -16,51 +16,57 @@ var ErrInvalidData = errors.New("invalid data returned") // Repository stores data in a backend. It provides high-level functions and // transparently encrypts/decrypts data. type Repository interface { - - // Backend returns the backend used by the repository - Backend() backend.Backend // Connections returns the maximum number of concurrent backend operations Connections() uint - + Config() Config Key() *crypto.Key - Index() MasterIndex - LoadIndex(context.Context, *progress.Counter) error - SetIndex(MasterIndex) error - LookupBlobSize(ID, BlobType) (uint, bool) + LoadIndex(ctx context.Context, p *progress.Counter) error + SetIndex(mi MasterIndex) error - Config() Config - PackSize() uint - - // List calls the function fn for each file of type t in the repository. - // When an error is returned by fn, processing stops and List() returns the - // error. - // - // The function fn is called in the same Goroutine List() was called from. - List(ctx context.Context, t FileType, fn func(ID, int64) error) error + LookupBlob(t BlobType, id ID) []PackedBlob + LookupBlobSize(t BlobType, id ID) (size uint, exists bool) + // ListBlobs runs fn on all blobs known to the index. When the context is cancelled, + // the index iteration returns immediately with ctx.Err(). This blocks any modification of the index. + ListBlobs(ctx context.Context, fn func(PackedBlob)) error + ListPacksFromIndex(ctx context.Context, packs IDSet) <-chan PackBlobs // ListPack returns the list of blobs saved in the pack id and the length of // the pack header. - ListPack(context.Context, ID, int64) ([]Blob, uint32, error) + ListPack(ctx context.Context, id ID, packSize int64) (entries []Blob, hdrSize uint32, err error) - LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error) + LoadBlob(ctx context.Context, t BlobType, id ID, buf []byte) ([]byte, error) LoadBlobsFromPack(ctx context.Context, packID ID, blobs []Blob, handleBlobFn func(blob BlobHandle, buf []byte, err error) error) error - SaveBlob(context.Context, BlobType, []byte, ID, bool) (ID, bool, int, error) // StartPackUploader start goroutines to upload new pack files. The errgroup // is used to immediately notify about an upload error. Flush() will also return // that error. StartPackUploader(ctx context.Context, wg *errgroup.Group) - Flush(context.Context) error + SaveBlob(ctx context.Context, t BlobType, buf []byte, id ID, storeDuplicate bool) (newID ID, known bool, size int, err error) + Flush(ctx context.Context) error + // List calls the function fn for each file of type t in the repository. + // When an error is returned by fn, processing stops and List() returns the + // error. + // + // The function fn is called in the same Goroutine List() was called from. + List(ctx context.Context, t FileType, fn func(ID, int64) error) error + // LoadRaw reads all data stored in the backend for the file with id and filetype t. + // If the backend returns data that does not match the id, then the buffer is returned + // along with an error that is a restic.ErrInvalidData error. + LoadRaw(ctx context.Context, t FileType, id ID) (data []byte, err error) // LoadUnpacked loads and decrypts the file with the given type and ID. LoadUnpacked(ctx context.Context, t FileType, id ID) (data []byte, err error) - SaveUnpacked(context.Context, FileType, []byte) (ID, error) + SaveUnpacked(ctx context.Context, t WriteableFileType, buf []byte) (ID, error) + // RemoveUnpacked removes a file from the repository. This will eventually be restricted to deleting only snapshots. + RemoveUnpacked(ctx context.Context, t WriteableFileType, id ID) error } type FileType = backend.FileType -// These are the different data types a backend can store. +// These are the different data types a backend can store. Only filetypes contained +// in the `WriteableFileType` subset can be modified via the Repository interface. +// All other filetypes are considered internal datastructures of the Repository. const ( PackFile FileType = backend.PackFile KeyFile FileType = backend.KeyFile @@ -70,6 +76,26 @@ const ( ConfigFile FileType = backend.ConfigFile ) +type WriteableFileType backend.FileType + +// These are the different data types that can be modified via SaveUnpacked or RemoveUnpacked. +const ( + WriteableSnapshotFile WriteableFileType = WriteableFileType(SnapshotFile) +) + +func (w *WriteableFileType) ToFileType() FileType { + switch *w { + case WriteableSnapshotFile: + return SnapshotFile + default: + panic("invalid WriteableFileType") + } +} + +type FileTypes interface { + FileType | WriteableFileType +} + // LoaderUnpacked allows loading a blob not stored in a pack file type LoaderUnpacked interface { // Connections returns the maximum number of concurrent backend operations @@ -78,10 +104,22 @@ type LoaderUnpacked interface { } // SaverUnpacked allows saving a blob not stored in a pack file -type SaverUnpacked interface { +type SaverUnpacked[FT FileTypes] interface { + // Connections returns the maximum number of concurrent backend operations + Connections() uint + SaveUnpacked(ctx context.Context, t FT, buf []byte) (ID, error) +} + +// RemoverUnpacked allows removing an unpacked blob +type RemoverUnpacked[FT FileTypes] interface { // Connections returns the maximum number of concurrent backend operations Connections() uint - SaveUnpacked(context.Context, FileType, []byte) (ID, error) + RemoveUnpacked(ctx context.Context, t FT, id ID) error +} + +type SaverRemoverUnpacked[FT FileTypes] interface { + SaverUnpacked[FT] + RemoverUnpacked[FT] } type PackBlobs struct { @@ -89,24 +127,15 @@ type PackBlobs struct { Blobs []Blob } -type MasterIndexSaveOpts struct { - SaveProgress *progress.Counter - DeleteProgress func() *progress.Counter - DeleteReport func(id ID, err error) - SkipDeletion bool -} - // MasterIndex keeps track of the blobs are stored within files. type MasterIndex interface { - Has(BlobHandle) bool - Lookup(BlobHandle) []PackedBlob + Has(bh BlobHandle) bool + Lookup(bh BlobHandle) []PackedBlob // Each runs fn on all blobs known to the index. When the context is cancelled, - // the index iteration return immediately. This blocks any modification of the index. - Each(ctx context.Context, fn func(PackedBlob)) + // the index iteration returns immediately with ctx.Err(). This blocks any modification of the index. + Each(ctx context.Context, fn func(PackedBlob)) error ListPacks(ctx context.Context, packs IDSet) <-chan PackBlobs - - Save(ctx context.Context, repo Repository, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error } // Lister allows listing files in a backend. @@ -118,3 +147,13 @@ type ListerLoaderUnpacked interface { Lister LoaderUnpacked } + +type Unpacked[FT FileTypes] interface { + ListerLoaderUnpacked + SaverUnpacked[FT] + RemoverUnpacked[FT] +} + +type ListBlobser interface { + ListBlobs(ctx context.Context, fn func(PackedBlob)) error +} diff --git a/internal/restic/snapshot.go b/internal/restic/snapshot.go index 88171a646ad..f9cdf4daf21 100644 --- a/internal/restic/snapshot.go +++ b/internal/restic/snapshot.go @@ -25,11 +25,31 @@ type Snapshot struct { Tags []string `json:"tags,omitempty"` Original *ID `json:"original,omitempty"` - ProgramVersion string `json:"program_version,omitempty"` + ProgramVersion string `json:"program_version,omitempty"` + Summary *SnapshotSummary `json:"summary,omitempty"` id *ID // plaintext ID, used during restore } +type SnapshotSummary struct { + BackupStart time.Time `json:"backup_start"` + BackupEnd time.Time `json:"backup_end"` + + // statistics from the backup json output + FilesNew uint `json:"files_new"` + FilesChanged uint `json:"files_changed"` + FilesUnmodified uint `json:"files_unmodified"` + DirsNew uint `json:"dirs_new"` + DirsChanged uint `json:"dirs_changed"` + DirsUnmodified uint `json:"dirs_unmodified"` + DataBlobs int `json:"data_blobs"` + TreeBlobs int `json:"tree_blobs"` + DataAdded uint64 `json:"data_added"` + DataAddedPacked uint64 `json:"data_added_packed"` + TotalFilesProcessed uint `json:"total_files_processed"` + TotalBytesProcessed uint64 `json:"total_bytes_processed"` +} + // NewSnapshot returns an initialized snapshot struct for the current user and // time. func NewSnapshot(paths []string, tags []string, hostname string, time time.Time) (*Snapshot, error) { @@ -70,8 +90,8 @@ func LoadSnapshot(ctx context.Context, loader LoaderUnpacked, id ID) (*Snapshot, } // SaveSnapshot saves the snapshot sn and returns its ID. -func SaveSnapshot(ctx context.Context, repo SaverUnpacked, sn *Snapshot) (ID, error) { - return SaveJSONUnpacked(ctx, repo, SnapshotFile, sn) +func SaveSnapshot(ctx context.Context, repo SaverUnpacked[WriteableFileType], sn *Snapshot) (ID, error) { + return SaveJSONUnpacked(ctx, repo, WriteableSnapshotFile, sn) } // ForAllSnapshots reads all snapshots in parallel and calls the @@ -83,7 +103,7 @@ func ForAllSnapshots(ctx context.Context, be Lister, loader LoaderUnpacked, excl var m sync.Mutex // For most snapshots decoding is nearly for free, thus just assume were only limited by IO - return ParallelList(ctx, be, SnapshotFile, loader.Connections(), func(ctx context.Context, id ID, size int64) error { + return ParallelList(ctx, be, SnapshotFile, loader.Connections(), func(ctx context.Context, id ID, _ int64) error { if excludeIDs.Has(id) { return nil } diff --git a/internal/restic/snapshot_find.go b/internal/restic/snapshot_find.go index cb761aee35b..6eb51b23707 100644 --- a/internal/restic/snapshot_find.go +++ b/internal/restic/snapshot_find.go @@ -24,7 +24,7 @@ type SnapshotFilter struct { TimestampLimit time.Time } -func (f *SnapshotFilter) empty() bool { +func (f *SnapshotFilter) Empty() bool { return len(f.Hosts)+len(f.Tags)+len(f.Paths) == 0 } @@ -134,6 +134,10 @@ func (f *SnapshotFilter) FindAll(ctx context.Context, be Lister, loader LoaderUn ids := NewIDSet() // Process all snapshot IDs given as arguments. for _, s := range snapshotIDs { + if ctx.Err() != nil { + return ctx.Err() + } + var sn *Snapshot if s == "latest" { if usedFilter { @@ -173,7 +177,7 @@ func (f *SnapshotFilter) FindAll(ctx context.Context, be Lister, loader LoaderUn } // Give the user some indication their filters are not used. - if !usedFilter && !f.empty() { + if !usedFilter && !f.Empty() { return fn("filters", nil, errors.Errorf("explicit snapshot ids are given")) } return nil diff --git a/internal/restic/snapshot_group.go b/internal/restic/snapshot_group.go index 964a230b3af..f4e1ed3843d 100644 --- a/internal/restic/snapshot_group.go +++ b/internal/restic/snapshot_group.go @@ -66,6 +66,20 @@ type SnapshotGroupKey struct { Tags []string `json:"tags"` } +func (s *SnapshotGroupKey) String() string { + var parts []string + if s.Hostname != "" { + parts = append(parts, fmt.Sprintf("host %v", s.Hostname)) + } + if len(s.Paths) != 0 { + parts = append(parts, fmt.Sprintf("path %v", s.Paths)) + } + if len(s.Tags) != 0 { + parts = append(parts, fmt.Sprintf("tags %v", s.Tags)) + } + return strings.Join(parts, ", ") +} + // GroupSnapshots takes a list of snapshots and a grouping criteria and creates // a grouped list of snapshots. func GroupSnapshots(snapshots Snapshots, groupBy SnapshotGroupByOptions) (map[string]Snapshots, bool, error) { diff --git a/internal/restic/snapshot_policy.go b/internal/restic/snapshot_policy.go index 0ff0c5ec829..28f871f4a7e 100644 --- a/internal/restic/snapshot_policy.go +++ b/internal/restic/snapshot_policy.go @@ -94,7 +94,11 @@ func (e ExpirePolicy) String() (s string) { s += fmt.Sprintf("all snapshots within %s of the newest", e.Within) } - s = "keep " + s + if s == "" { + s = "remove" + } else { + s = "keep " + s + } return s } @@ -186,16 +190,6 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reason // sort newest snapshots first sort.Stable(list) - if p.Empty() { - for _, sn := range list { - reasons = append(reasons, KeepReason{ - Snapshot: sn, - Matches: []string{"policy is empty"}, - }) - } - return list, remove, reasons - } - if len(list) == 0 { return list, nil, nil } @@ -262,6 +256,9 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reason if val != b.Last || nr == len(list)-1 { debug.Log("keep %v %v, bucker %v, val %v\n", cur.Time, cur.id.Str(), i, val) keepSnap = true + if val == b.Last && nr == len(list)-1 { + b.reason = fmt.Sprintf("oldest %v", b.reason) + } buckets[i].Last = val if buckets[i].Count > 0 { buckets[i].Count-- @@ -281,6 +278,9 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reason if val != b.Last || nr == len(list)-1 { debug.Log("keep %v, time %v, ID %v, bucker %v, val %v %v\n", b.reason, cur.Time, cur.id.Str(), i, val, b.Last) keepSnap = true + if val == b.Last && nr == len(list)-1 { + b.reason = fmt.Sprintf("oldest %v", b.reason) + } bucketsWithin[i].Last = val keepSnapReasons = append(keepSnapReasons, fmt.Sprintf("%v %v", b.reason, b.Within)) } diff --git a/internal/restic/snapshot_test.go b/internal/restic/snapshot_test.go index b32c771d4bf..68016287a58 100644 --- a/internal/restic/snapshot_test.go +++ b/internal/restic/snapshot_test.go @@ -32,7 +32,7 @@ func TestLoadJSONUnpacked(t *testing.T) { } func testLoadJSONUnpacked(t *testing.T, version uint) { - repo := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) // archive a snapshot sn := restic.Snapshot{} diff --git a/internal/restic/testdata/policy_keep_snapshots_0 b/internal/restic/testdata/policy_keep_snapshots_0 index 11ca587c8af..96cc25cc7a5 100644 --- a/internal/restic/testdata/policy_keep_snapshots_0 +++ b/internal/restic/testdata/policy_keep_snapshots_0 @@ -1,1782 +1,3 @@ { - "keep": [ - { - "time": "2016-01-18T12:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-12T21:08:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-12T21:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-09T21:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-08T20:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-07T10:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-06T08:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-05T09:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T16:23:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T12:30:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T12:28:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T12:24:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T12:23:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T11:23:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T10:23:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-03T07:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-01T07:08:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-01T01:03:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-01T01:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-21T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-18T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-15T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-13T10:20:30.1Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-13T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-12T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-10T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-08T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo", - "bar" - ] - }, - { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo", - "bar" - ] - }, - { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": [ - "path1", - "path2" - ], - "tags": [ - "foo", - "bar" - ] - }, - { - "time": "2015-10-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-11T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-10T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-09T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-08T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-06T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-05T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-02T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-01T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-11T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-10T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-09T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-08T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-06T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-05T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-02T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-01T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-21T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-18T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-15T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-13T10:20:30.1Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-13T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-12T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-10T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-08T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-11-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-11-21T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-11-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-11-18T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-11-15T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo", - "bar" - ] - }, - { - "time": "2014-11-13T10:20:30.1Z", - "tree": null, - "paths": null, - "tags": [ - "bar" - ] - }, - { - "time": "2014-11-13T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-11-12T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-11-10T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-11-08T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-22T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-20T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-11T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-10T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-09T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-08T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-06T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-05T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-02T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-01T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-09-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-11T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-10T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-09T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-08T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-06T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-05T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-02T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-01T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-21T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-18T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-15T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-13T10:20:30.1Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-13T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-12T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-10T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-08T10:20:30Z", - "tree": null, - "paths": null - } - ], - "reasons": [ - { - "snapshot": { - "time": "2016-01-18T12:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-12T21:08:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-12T21:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-09T21:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-08T20:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-07T10:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-06T08:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-05T09:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T16:23:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T12:30:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T12:28:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T12:24:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T12:23:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T11:23:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T10:23:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-03T07:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-01T07:08:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-01T01:03:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-01T01:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-21T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-18T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-15T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-13T10:20:30.1Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-13T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-12T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-10T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-08T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo", - "bar" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo", - "bar" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": [ - "path1", - "path2" - ], - "tags": [ - "foo", - "bar" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-11T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-10T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-09T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-08T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-06T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-05T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-02T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-01T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-11T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-10T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-09T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-08T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-06T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-05T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-02T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-01T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-21T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-18T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-15T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-13T10:20:30.1Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-13T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-12T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-10T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-08T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-21T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-18T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-15T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo", - "bar" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-13T10:20:30.1Z", - "tree": null, - "paths": null, - "tags": [ - "bar" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-13T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-12T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-10T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-08T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-22T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-20T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-11T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-10T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-09T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-08T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-06T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-05T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-02T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-01T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-11T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-10T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-09T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-08T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-06T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-05T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-02T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-01T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-21T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-18T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-15T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-13T10:20:30.1Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-13T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-12T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-10T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-08T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - } - ] + "keep": null } \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_16 b/internal/restic/testdata/policy_keep_snapshots_16 index da6f43a1c7c..07444102b5f 100644 --- a/internal/restic/testdata/policy_keep_snapshots_16 +++ b/internal/restic/testdata/policy_keep_snapshots_16 @@ -68,7 +68,7 @@ "paths": null }, "matches": [ - "yearly snapshot" + "oldest yearly snapshot" ], "counters": { "yearly": 6 diff --git a/internal/restic/testdata/policy_keep_snapshots_17 b/internal/restic/testdata/policy_keep_snapshots_17 index ee728d4e09b..de489d445ce 100644 --- a/internal/restic/testdata/policy_keep_snapshots_17 +++ b/internal/restic/testdata/policy_keep_snapshots_17 @@ -214,7 +214,7 @@ "paths": null }, "matches": [ - "yearly snapshot" + "oldest yearly snapshot" ], "counters": { "yearly": 6 diff --git a/internal/restic/testdata/policy_keep_snapshots_35 b/internal/restic/testdata/policy_keep_snapshots_35 index ece4ddbd226..afc2017ddf6 100644 --- a/internal/restic/testdata/policy_keep_snapshots_35 +++ b/internal/restic/testdata/policy_keep_snapshots_35 @@ -165,7 +165,7 @@ "paths": null }, "matches": [ - "yearly within 9999y" + "oldest yearly within 9999y" ], "counters": {} } diff --git a/internal/restic/testdata/policy_keep_snapshots_36 b/internal/restic/testdata/policy_keep_snapshots_36 index 75a3a5b4645..cce4cf537fb 100644 --- a/internal/restic/testdata/policy_keep_snapshots_36 +++ b/internal/restic/testdata/policy_keep_snapshots_36 @@ -590,7 +590,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -601,7 +603,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -612,7 +616,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -623,7 +629,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -634,7 +642,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -645,7 +655,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -656,7 +668,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -667,7 +681,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -678,7 +694,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -689,7 +707,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -700,7 +720,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -711,7 +733,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -722,7 +746,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -733,7 +759,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -744,7 +772,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -755,7 +785,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -766,7 +798,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -777,7 +811,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -788,7 +824,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -799,7 +837,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -810,7 +850,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -821,7 +863,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -832,7 +876,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -843,7 +889,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -854,7 +902,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -865,7 +915,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -876,7 +928,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -887,7 +941,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -898,7 +954,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -909,7 +967,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -920,7 +980,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -935,7 +997,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -950,7 +1014,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -968,7 +1034,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -979,7 +1047,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -990,7 +1060,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1001,7 +1073,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1012,7 +1086,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1023,7 +1099,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1034,7 +1112,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1045,7 +1125,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1056,7 +1138,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1067,7 +1151,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1078,7 +1164,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1089,7 +1177,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1100,7 +1190,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1111,7 +1203,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1122,7 +1216,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1133,7 +1229,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1144,7 +1242,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1155,7 +1255,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1166,7 +1268,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1177,7 +1281,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1188,7 +1294,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1199,7 +1307,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1210,7 +1320,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1221,7 +1333,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1232,7 +1346,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1243,7 +1359,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1254,7 +1372,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1265,7 +1385,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1276,7 +1398,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1287,7 +1411,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1298,7 +1424,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1309,7 +1437,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1320,7 +1450,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1331,7 +1463,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1346,7 +1480,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1360,7 +1496,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1374,7 +1512,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1388,7 +1528,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1402,7 +1544,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1416,7 +1560,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1430,7 +1576,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1444,7 +1592,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1458,7 +1608,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1472,7 +1624,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1486,7 +1640,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1500,7 +1656,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1514,7 +1672,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1528,7 +1688,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1542,7 +1704,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1556,7 +1720,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1567,7 +1733,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1578,7 +1746,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1589,7 +1759,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1600,7 +1772,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1611,7 +1785,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1622,7 +1798,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1633,7 +1811,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1644,7 +1824,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1655,7 +1837,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1666,7 +1850,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1677,7 +1863,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1688,7 +1876,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1699,7 +1889,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1710,7 +1902,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1721,7 +1915,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1732,7 +1928,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1743,7 +1941,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1754,7 +1954,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1765,7 +1967,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1776,7 +1980,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } } ] } \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_37 b/internal/restic/testdata/policy_keep_snapshots_37 index f6ffa40ea06..9856a83d69b 100644 --- a/internal/restic/testdata/policy_keep_snapshots_37 +++ b/internal/restic/testdata/policy_keep_snapshots_37 @@ -591,7 +591,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -603,7 +606,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -614,7 +620,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -626,7 +635,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -638,7 +650,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -650,7 +665,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -662,7 +680,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -674,7 +695,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -686,7 +710,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -698,7 +725,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -709,7 +739,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -720,7 +753,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -731,7 +767,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -743,7 +782,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -755,7 +797,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -767,7 +812,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -779,7 +827,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -791,7 +842,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -802,7 +856,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -814,7 +871,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -826,7 +886,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -838,7 +901,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -850,7 +916,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -862,7 +931,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -874,7 +946,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -885,7 +960,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -897,7 +975,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -909,7 +990,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -921,7 +1005,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -933,7 +1020,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -944,7 +1034,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -959,7 +1052,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -974,7 +1070,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -992,7 +1091,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1004,7 +1106,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1016,7 +1121,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1028,7 +1136,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1040,7 +1151,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1052,7 +1166,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1064,7 +1181,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1076,7 +1196,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1088,7 +1211,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1100,7 +1226,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1112,7 +1241,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1124,7 +1256,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1136,7 +1271,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1148,7 +1286,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1160,7 +1301,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1172,7 +1316,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1184,7 +1331,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1196,7 +1346,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1208,7 +1361,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1220,7 +1376,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1232,7 +1391,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1244,7 +1406,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1256,7 +1421,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1268,7 +1436,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1280,7 +1451,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1292,7 +1466,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1303,7 +1480,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1315,7 +1495,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1327,7 +1510,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1339,7 +1525,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1351,7 +1540,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1363,7 +1555,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1375,7 +1570,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1387,7 +1585,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1403,7 +1604,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1418,7 +1622,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1432,7 +1639,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1447,7 +1657,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1462,7 +1675,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1477,7 +1693,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1492,7 +1711,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1507,7 +1729,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1522,7 +1747,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1537,7 +1765,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1552,7 +1783,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1567,7 +1801,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1582,7 +1819,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1597,7 +1837,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1612,7 +1855,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1627,7 +1873,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1639,7 +1888,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1651,7 +1903,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1663,7 +1918,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1675,7 +1933,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1687,7 +1948,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1699,7 +1963,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1711,7 +1978,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1723,7 +1993,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1735,7 +2008,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1747,7 +2023,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1759,7 +2038,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1771,7 +2053,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1783,7 +2068,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1795,7 +2083,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1807,7 +2098,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1819,7 +2113,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1830,7 +2127,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1842,7 +2142,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1854,7 +2157,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1866,7 +2172,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } } ] } \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_38 b/internal/restic/testdata/policy_keep_snapshots_38 index 6bfdd57f181..f5d7136d421 100644 --- a/internal/restic/testdata/policy_keep_snapshots_38 +++ b/internal/restic/testdata/policy_keep_snapshots_38 @@ -507,7 +507,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -518,7 +520,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -529,7 +533,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -540,7 +546,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -551,7 +559,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -562,7 +572,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -573,7 +585,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -584,7 +598,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -595,7 +611,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -606,7 +624,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -617,7 +637,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -628,7 +650,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -639,7 +663,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -650,7 +676,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -661,7 +689,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -672,7 +702,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -683,7 +715,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -694,7 +728,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -705,7 +741,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -716,7 +754,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -727,7 +767,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -738,7 +780,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -749,7 +793,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -760,7 +806,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -771,7 +819,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -782,7 +832,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -793,7 +845,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -804,7 +858,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -815,7 +871,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -826,7 +884,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -837,7 +897,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -848,7 +910,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -859,7 +923,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -870,7 +936,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -881,7 +949,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -892,7 +962,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -903,7 +975,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -914,7 +988,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -925,7 +1001,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -936,7 +1014,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -947,7 +1027,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -958,7 +1040,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -969,7 +1053,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -980,7 +1066,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -991,7 +1079,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1002,7 +1092,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1013,7 +1105,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1024,7 +1118,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1035,7 +1131,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1046,7 +1144,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1057,7 +1157,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1068,7 +1170,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1079,7 +1183,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1090,7 +1196,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1101,7 +1209,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1112,7 +1222,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1127,7 +1239,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1141,7 +1255,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1155,7 +1271,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1169,7 +1287,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1183,7 +1303,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1197,7 +1319,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1211,7 +1335,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1225,7 +1351,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1239,7 +1367,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1253,7 +1383,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1267,7 +1399,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1281,7 +1415,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1295,7 +1431,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1309,7 +1447,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1323,7 +1463,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1334,7 +1476,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1345,7 +1489,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1356,7 +1502,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1367,7 +1515,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1378,7 +1528,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1389,7 +1541,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1400,7 +1554,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1411,7 +1567,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1422,7 +1580,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1433,7 +1593,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1444,7 +1606,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1455,7 +1619,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1466,7 +1632,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1477,7 +1645,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1488,7 +1658,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1499,7 +1671,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1510,7 +1684,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1521,7 +1697,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1532,7 +1710,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } } ] -} +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_39 b/internal/restic/testdata/policy_keep_snapshots_39 index 4b111503bc1..fc06d899456 100644 --- a/internal/restic/testdata/policy_keep_snapshots_39 +++ b/internal/restic/testdata/policy_keep_snapshots_39 @@ -74,10 +74,15 @@ "matches": [ "daily snapshot", "weekly snapshot", - "monthly snapshot", - "yearly snapshot" + "monthly snapshot", + "yearly snapshot" ], - "counters": {"Daily": 2, "Weekly": 1, "Monthly": -1, "Yearly": -1} + "counters": { + "daily": 2, + "weekly": 1, + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -89,7 +94,11 @@ "daily snapshot", "weekly snapshot" ], - "counters": {"Daily": 1, "Monthly": -1, "Yearly": -1} + "counters": { + "daily": 1, + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -100,7 +109,10 @@ "matches": [ "daily snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -112,7 +124,10 @@ "monthly snapshot", "yearly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -123,7 +138,10 @@ "matches": [ "monthly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -134,7 +152,10 @@ "matches": [ "monthly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -145,7 +166,10 @@ "matches": [ "monthly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -157,7 +181,10 @@ "monthly snapshot", "yearly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -171,7 +198,10 @@ "matches": [ "monthly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -182,7 +212,10 @@ "matches": [ "monthly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -193,7 +226,10 @@ "matches": [ "monthly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -202,10 +238,13 @@ "paths": null }, "matches": [ - "monthly snapshot", - "yearly snapshot" + "oldest monthly snapshot", + "oldest yearly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } } ] } \ No newline at end of file diff --git a/internal/restic/testing.go b/internal/restic/testing.go index d2acd3ee9ab..3e056343035 100644 --- a/internal/restic/testing.go +++ b/internal/restic/testing.go @@ -81,7 +81,7 @@ func (fs *fakeFileSystem) saveTree(ctx context.Context, seed int64, depth int) I node := &Node{ Name: fmt.Sprintf("dir-%v", treeSeed), - Type: "dir", + Type: NodeTypeDir, Mode: 0755, Subtree: &id, } @@ -95,7 +95,7 @@ func (fs *fakeFileSystem) saveTree(ctx context.Context, seed int64, depth int) I node := &Node{ Name: fmt.Sprintf("file-%v", fileSeed), - Type: "file", + Type: NodeTypeFile, Mode: 0644, Size: uint64(fileSize), } @@ -190,7 +190,7 @@ func ParseDurationOrPanic(s string) Duration { // TestLoadAllSnapshots returns a list of all snapshots in the repo. // If a snapshot ID is in excludeIDs, it will not be included in the result. -func TestLoadAllSnapshots(ctx context.Context, repo Repository, excludeIDs IDSet) (snapshots Snapshots, err error) { +func TestLoadAllSnapshots(ctx context.Context, repo ListerLoaderUnpacked, excludeIDs IDSet) (snapshots Snapshots, err error) { err = ForAllSnapshots(ctx, repo, repo, excludeIDs, func(id ID, sn *Snapshot, err error) error { if err != nil { return err diff --git a/internal/restic/testing_test.go b/internal/restic/testing_test.go index ae8f8dd3483..0a0c43892e2 100644 --- a/internal/restic/testing_test.go +++ b/internal/restic/testing_test.go @@ -45,7 +45,7 @@ func TestCreateSnapshot(t *testing.T) { t.Fatalf("snapshot has zero tree ID") } - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) } func BenchmarkTestCreateSnapshot(t *testing.B) { diff --git a/internal/restic/tree.go b/internal/restic/tree.go index 3c3e3ab5660..f406b489f8c 100644 --- a/internal/restic/tree.go +++ b/internal/restic/tree.go @@ -96,7 +96,7 @@ func (t *Tree) Sort() { // Subtrees returns a slice of all subtree IDs of the tree. func (t *Tree) Subtrees() (trees IDs) { for _, node := range t.Nodes { - if node.Type == "dir" && node.Subtree != nil { + if node.Type == NodeTypeDir && node.Subtree != nil { trees = append(trees, *node.Subtree) } } @@ -162,7 +162,7 @@ func NewTreeJSONBuilder() *TreeJSONBuilder { func (builder *TreeJSONBuilder) AddNode(node *Node) error { if node.Name <= builder.lastName { - return fmt.Errorf("node %q, last%q: %w", node.Name, builder.lastName, ErrTreeNotOrdered) + return fmt.Errorf("node %q, last %q: %w", node.Name, builder.lastName, ErrTreeNotOrdered) } if builder.lastName != "" { _ = builder.buf.WriteByte(',') @@ -208,7 +208,7 @@ func FindTreeDirectory(ctx context.Context, repo BlobLoader, id *ID, dir string) if node == nil { return nil, fmt.Errorf("path %s: not found", subfolder) } - if node.Type != "dir" || node.Subtree == nil { + if node.Type != NodeTypeDir || node.Subtree == nil { return nil, fmt.Errorf("path %s: not a directory", subfolder) } id = node.Subtree diff --git a/internal/restic/tree_stream.go b/internal/restic/tree_stream.go index 4110a5e8d83..123295533e3 100644 --- a/internal/restic/tree_stream.go +++ b/internal/restic/tree_stream.go @@ -77,7 +77,7 @@ func filterTrees(ctx context.Context, repo Loader, trees IDs, loaderChan chan<- continue } - treeSize, found := repo.LookupBlobSize(nextTreeID.ID, TreeBlob) + treeSize, found := repo.LookupBlobSize(TreeBlob, nextTreeID.ID) if found && treeSize > 50*1024*1024 { loadCh = hugeTreeLoaderChan } else { diff --git a/internal/restic/tree_test.go b/internal/restic/tree_test.go index da674eb1c0c..5c9c0739c1e 100644 --- a/internal/restic/tree_test.go +++ b/internal/restic/tree_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/restic/restic/internal/archiver" + "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -82,12 +83,17 @@ func TestNodeMarshal(t *testing.T) { } } -func TestNodeComparison(t *testing.T) { - fi, err := os.Lstat("tree_test.go") +func nodeForFile(t *testing.T, name string) *restic.Node { + f, err := (&fs.Local{}).OpenFile(name, fs.O_NOFOLLOW, true) rtest.OK(t, err) - - node, err := restic.NodeFromFileInfo("tree_test.go", fi) + node, err := f.ToNode(false) rtest.OK(t, err) + rtest.OK(t, f.Close()) + return node +} + +func TestNodeComparison(t *testing.T) { + node := nodeForFile(t, "tree_test.go") n2 := *node rtest.Assert(t, node.Equals(n2), "nodes aren't equal") @@ -125,10 +131,7 @@ func TestTreeEqualSerialization(t *testing.T) { builder := restic.NewTreeJSONBuilder() for _, fn := range files[:i] { - fi, err := os.Lstat(fn) - rtest.OK(t, err) - node, err := restic.NodeFromFileInfo(fn, fi) - rtest.OK(t, err) + node := nodeForFile(t, fn) rtest.OK(t, tree.Insert(node)) rtest.OK(t, builder.AddNode(node)) @@ -181,7 +184,7 @@ func testLoadTree(t *testing.T, version uint) { } // archive a few files - repo := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil) rtest.OK(t, repo.Flush(context.Background())) @@ -199,7 +202,7 @@ func benchmarkLoadTree(t *testing.B, version uint) { } // archive a few files - repo := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil) rtest.OK(t, repo.Flush(context.Background())) diff --git a/internal/restorer/doc.go b/internal/restorer/doc.go index 8d68d716155..e230f23f0b3 100644 --- a/internal/restorer/doc.go +++ b/internal/restorer/doc.go @@ -18,7 +18,7 @@ // // Implementation does not guarantee order in which blobs are written to the // target files and, for example, the last blob of a file can be written to the -// file before any of the preceeding file blobs. It is therefore possible to +// file before any of the preceding file blobs. It is therefore possible to // have gaps in the data written to the target files if restore fails or // interrupted by the user. package restorer diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index f2c134ea926..31234b96098 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -14,11 +14,6 @@ import ( "github.com/restic/restic/internal/ui/restore" ) -// TODO if a blob is corrupt, there may be good blob copies in other packs -// TODO evaluate if it makes sense to split download and processing workers -// pro: can (slowly) read network and decrypt/write files concurrently -// con: each worker needs to keep one pack in memory - const ( largeFileBlobCount = 25 ) @@ -31,6 +26,7 @@ type fileInfo struct { size int64 location string // file on local filesystem relative to restorer basedir blobs interface{} // blobs of the file + state *fileState } type fileBlobInfo struct { @@ -48,7 +44,7 @@ type blobsLoaderFn func(ctx context.Context, packID restic.ID, blobs []restic.Bl // fileRestorer restores set of files type fileRestorer struct { - idx func(restic.BlobHandle) []restic.PackedBlob + idx func(restic.BlobType, restic.ID) []restic.PackedBlob blobsLoader blobsLoaderFn workerCount int @@ -57,6 +53,8 @@ type fileRestorer struct { sparse bool progress *restore.Progress + allowRecursiveDelete bool + dst string files []*fileInfo Error func(string, error) error @@ -64,46 +62,51 @@ type fileRestorer struct { func newFileRestorer(dst string, blobsLoader blobsLoaderFn, - idx func(restic.BlobHandle) []restic.PackedBlob, + idx func(restic.BlobType, restic.ID) []restic.PackedBlob, connections uint, sparse bool, + allowRecursiveDelete bool, progress *restore.Progress) *fileRestorer { // as packs are streamed the concurrency is limited by IO workerCount := int(connections) return &fileRestorer{ - idx: idx, - blobsLoader: blobsLoader, - filesWriter: newFilesWriter(workerCount), - zeroChunk: repository.ZeroChunk(), - sparse: sparse, - progress: progress, - workerCount: workerCount, - dst: dst, - Error: restorerAbortOnAllErrors, + idx: idx, + blobsLoader: blobsLoader, + filesWriter: newFilesWriter(workerCount, allowRecursiveDelete), + zeroChunk: repository.ZeroChunk(), + sparse: sparse, + progress: progress, + allowRecursiveDelete: allowRecursiveDelete, + workerCount: workerCount, + dst: dst, + Error: restorerAbortOnAllErrors, } } -func (r *fileRestorer) addFile(location string, content restic.IDs, size int64) { - r.files = append(r.files, &fileInfo{location: location, blobs: content, size: size}) +func (r *fileRestorer) addFile(location string, content restic.IDs, size int64, state *fileState) { + r.files = append(r.files, &fileInfo{location: location, blobs: content, size: size, state: state}) } func (r *fileRestorer) targetPath(location string) string { return filepath.Join(r.dst, location) } -func (r *fileRestorer) forEachBlob(blobIDs []restic.ID, fn func(packID restic.ID, packBlob restic.Blob)) error { +func (r *fileRestorer) forEachBlob(blobIDs []restic.ID, fn func(packID restic.ID, packBlob restic.Blob, idx int, fileOffset int64)) error { if len(blobIDs) == 0 { return nil } - for _, blobID := range blobIDs { - packs := r.idx(restic.BlobHandle{ID: blobID, Type: restic.DataBlob}) + fileOffset := int64(0) + for i, blobID := range blobIDs { + packs := r.idx(restic.DataBlob, blobID) if len(packs) == 0 { return errors.Errorf("Unknown blob %s", blobID.String()) } - fn(packs[0].PackID, packs[0].Blob) + pb := packs[0] + fn(pb.PackID, pb.Blob, i, fileOffset) + fileOffset += int64(pb.DataLength()) } return nil @@ -119,17 +122,28 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { // create packInfo from fileInfo for _, file := range r.files { + if ctx.Err() != nil { + return ctx.Err() + } + fileBlobs := file.blobs.(restic.IDs) largeFile := len(fileBlobs) > largeFileBlobCount var packsMap map[restic.ID][]fileBlobInfo if largeFile { packsMap = make(map[restic.ID][]fileBlobInfo) + file.blobs = packsMap } - fileOffset := int64(0) - err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob) { - if largeFile { - packsMap[packID] = append(packsMap[packID], fileBlobInfo{id: blob.ID, offset: fileOffset}) - fileOffset += int64(blob.DataLength()) + restoredBlobs := false + err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int, fileOffset int64) { + if !file.state.HasMatchingBlob(idx) { + if largeFile { + packsMap[packID] = append(packsMap[packID], fileBlobInfo{id: blob.ID, offset: fileOffset}) + } + restoredBlobs = true + } else { + r.reportBlobProgress(file, uint64(blob.DataLength())) + // completely ignore blob + return } pack, ok := packs[packID] if !ok { @@ -145,20 +159,38 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { file.sparse = r.sparse } }) + if err != nil { + // repository index is messed up, can't do anything + return err + } + if len(fileBlobs) == 1 { // no need to preallocate files with a single block, thus we can always consider them to be sparse // in addition, a short chunk will never match r.zeroChunk which would prevent sparseness for short files file.sparse = r.sparse } - - if err != nil { - // repository index is messed up, can't do anything - return err + if file.state != nil { + // The restorer currently cannot punch new holes into an existing files. + // Thus sections that contained data but should be sparse after restoring + // the snapshot would still contain the old data resulting in a corrupt restore. + file.sparse = false } - if largeFile { - file.blobs = packsMap + + // empty file or one with already uptodate content. Make sure that the file size is correct + if !restoredBlobs { + err := r.truncateFileToSize(file.location, file.size) + if errFile := r.sanitizeError(file, err); errFile != nil { + return errFile + } + + // the progress events were already sent for non-zero size files + if file.size == 0 { + r.reportBlobProgress(file, 0) + } } } + // drop no longer necessary file list + r.files = nil wg, ctx := errgroup.WithContext(ctx) downloadCh := make(chan *packInfo) @@ -177,6 +209,7 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { // the main restore loop wg.Go(func() error { + defer close(downloadCh) for _, id := range packOrder { pack := packs[id] // allow garbage collection of packInfo @@ -188,13 +221,20 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { debug.Log("Scheduled download pack %s", pack.id.Str()) } } - close(downloadCh) return nil }) return wg.Wait() } +func (r *fileRestorer) truncateFileToSize(location string, size int64) error { + f, err := createFile(r.targetPath(location), size, false, r.allowRecursiveDelete) + if err != nil { + return err + } + return f.Close() +} + type blobToFileOffsetsMapping map[restic.ID]struct { files map[*fileInfo][]int64 // file -> offsets (plural!) of the blob in the file blob restic.Blob @@ -214,12 +254,10 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error { blobInfo.files[file] = append(blobInfo.files[file], fileOffset) } if fileBlobs, ok := file.blobs.(restic.IDs); ok { - fileOffset := int64(0) - err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob) { - if packID.Equal(pack.id) { + err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int, fileOffset int64) { + if packID.Equal(pack.id) && !file.state.HasMatchingBlob(idx) { addBlob(blob, fileOffset) } - fileOffset += int64(blob.DataLength()) }) if err != nil { // restoreFiles should have caught this error before @@ -227,7 +265,7 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error { } } else if packsMap, ok := file.blobs.(map[restic.ID][]fileBlobInfo); ok { for _, blob := range packsMap[pack.id] { - idxPacks := r.idx(restic.BlobHandle{ID: blob.id, Type: restic.DataBlob}) + idxPacks := r.idx(restic.DataBlob, blob.id) for _, idxPack := range idxPacks { if idxPack.PackID.Equal(pack.id) { addBlob(idxPack.Blob, blob.offset) @@ -240,41 +278,18 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error { // track already processed blobs for precise error reporting processedBlobs := restic.NewBlobSet() - for _, entry := range blobs { - occurrences := 0 - for _, offsets := range entry.files { - occurrences += len(offsets) - } - // With a maximum blob size of 8MB, the normal blob streaming has to write - // at most 800MB for a single blob. This should be short enough to avoid - // network connection timeouts. Based on a quick test, a limit of 100 only - // selects a very small number of blobs (the number of references per blob - // - aka. `count` - seem to follow a expontential distribution) - if occurrences > 100 { - // process frequently referenced blobs first as these can take a long time to write - // which can cause backend connections to time out - delete(blobs, entry.blob.ID) - partialBlobs := blobToFileOffsetsMapping{entry.blob.ID: entry} - err := r.downloadBlobs(ctx, pack.id, partialBlobs, processedBlobs) - if err := r.reportError(blobs, processedBlobs, err); err != nil { - return err - } - } - } - - if len(blobs) == 0 { - return nil - } - err := r.downloadBlobs(ctx, pack.id, blobs, processedBlobs) return r.reportError(blobs, processedBlobs, err) } func (r *fileRestorer) sanitizeError(file *fileInfo, err error) error { - if err != nil { - err = r.Error(file.location, err) + switch err { + case nil, context.Canceled, context.DeadlineExceeded: + // Context errors are permanent. + return err + default: + return r.Error(file.location, err) } - return err } func (r *fileRestorer) reportError(blobs blobToFileOffsetsMapping, processedBlobs restic.BlobSet, err error) error { @@ -322,6 +337,11 @@ func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID, } for file, offsets := range blob.files { for _, offset := range offsets { + // avoid long cancelation delays for frequently used blobs + if ctx.Err() != nil { + return ctx.Err() + } + writeToFile := func() error { // this looks overly complicated and needs explanation // two competing requirements: @@ -339,11 +359,7 @@ func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID, createSize = file.size } writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse) - - if r.progress != nil { - r.progress.AddProgress(file.location, uint64(len(blobData)), uint64(file.size)) - } - + r.reportBlobProgress(file, uint64(len(blobData))) return writeErr } err := r.sanitizeError(file, writeToFile()) @@ -355,3 +371,11 @@ func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID, return nil }) } + +func (r *fileRestorer) reportBlobProgress(file *fileInfo, blobSize uint64) { + action := restore.ActionFileUpdated + if file.state == nil { + action = restore.ActionFileRestored + } + r.progress.AddProgress(file.location, action, uint64(blobSize), uint64(file.size)) +} diff --git a/internal/restorer/filerestorer_test.go b/internal/restorer/filerestorer_test.go index befeb5d2c0c..f594760e4af 100644 --- a/internal/restorer/filerestorer_test.go +++ b/internal/restorer/filerestorer_test.go @@ -35,8 +35,8 @@ type TestRepo struct { loader blobsLoaderFn } -func (i *TestRepo) Lookup(bh restic.BlobHandle) []restic.PackedBlob { - packs := i.blobs[bh.ID] +func (i *TestRepo) Lookup(tpe restic.BlobType, id restic.ID) []restic.PackedBlob { + packs := i.blobs[id] return packs } @@ -144,7 +144,7 @@ func restoreAndVerify(t *testing.T, tempdir string, content []TestFile, files ma t.Helper() repo := newTestRepo(content) - r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, sparse, nil) + r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, sparse, false, nil) if files == nil { r.files = repo.files @@ -206,6 +206,10 @@ func TestFileRestorerBasic(t *testing.T) { {"data3-1", "pack3-1"}, }, }, + { + name: "empty", + blobs: []TestBlob{}, + }, }, nil, sparse) } } @@ -281,7 +285,7 @@ func TestErrorRestoreFiles(t *testing.T) { return loadError } - r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, nil) + r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, false, nil) r.files = repo.files err := r.restoreFiles(context.TODO()) @@ -322,7 +326,7 @@ func TestFatalDownloadError(t *testing.T) { }) } - r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, nil) + r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, false, nil) r.files = repo.files var errors []string diff --git a/internal/restorer/fileswriter.go b/internal/restorer/fileswriter.go index 589aa502aa8..d6f78f2d7cd 100644 --- a/internal/restorer/fileswriter.go +++ b/internal/restorer/fileswriter.go @@ -1,11 +1,14 @@ package restorer import ( + "fmt" "os" "sync" + "syscall" "github.com/cespare/xxhash/v2" "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" ) @@ -15,7 +18,8 @@ import ( // TODO I am not 100% convinced this is necessary, i.e. it may be okay // to use multiple os.File to write to the same target file type filesWriter struct { - buckets []filesWriterBucket + buckets []filesWriterBucket + allowRecursiveDelete bool } type filesWriterBucket struct { @@ -29,16 +33,135 @@ type partialFile struct { sparse bool } -func newFilesWriter(count int) *filesWriter { +func newFilesWriter(count int, allowRecursiveDelete bool) *filesWriter { buckets := make([]filesWriterBucket, count) for b := 0; b < count; b++ { buckets[b].files = make(map[string]*partialFile) } return &filesWriter{ - buckets: buckets, + buckets: buckets, + allowRecursiveDelete: allowRecursiveDelete, } } +func openFile(path string) (*os.File, error) { + f, err := fs.OpenFile(path, fs.O_WRONLY|fs.O_NOFOLLOW, 0600) + if err != nil { + return nil, err + } + fi, err := f.Stat() + if err != nil { + _ = f.Close() + return nil, err + } + if !fi.Mode().IsRegular() { + _ = f.Close() + return nil, fmt.Errorf("unexpected file type %v at %q", fi.Mode().Type(), path) + } + return f, nil +} + +func createFile(path string, createSize int64, sparse bool, allowRecursiveDelete bool) (*os.File, error) { + f, err := fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_NOFOLLOW, 0600) + if err != nil && fs.IsAccessDenied(err) { + // If file is readonly, clear the readonly flag by resetting the + // permissions of the file and try again + // as the metadata will be set again in the second pass and the + // readonly flag will be applied again if needed. + if err = fs.ResetPermissions(path); err != nil { + return nil, err + } + if f, err = fs.OpenFile(path, fs.O_WRONLY|fs.O_NOFOLLOW, 0600); err != nil { + return nil, err + } + } else if err != nil && (errors.Is(err, syscall.ELOOP) || errors.Is(err, syscall.EISDIR)) { + // symlink or directory, try to remove it later on + f = nil + } else if err != nil { + return nil, err + } + + var fi os.FileInfo + if f != nil { + // stat to check that we've opened a regular file + fi, err = f.Stat() + if err != nil { + _ = f.Close() + return nil, err + } + } + + mustReplace := f == nil || !fi.Mode().IsRegular() + if !mustReplace { + ex := fs.ExtendedStat(fi) + if ex.Links > 1 { + // there is no efficient way to find out which other files might be linked to this file + // thus nuke the existing file and start with a fresh one + mustReplace = true + } + } + + if mustReplace { + // close handle if we still have it + if f != nil { + if err := f.Close(); err != nil { + return nil, err + } + } + + // not what we expected, try to get rid of it + if allowRecursiveDelete { + if err := fs.RemoveAll(path); err != nil { + return nil, err + } + } else { + if err := fs.Remove(path); err != nil { + return nil, err + } + } + // create a new file, pass O_EXCL to make sure there are no surprises + f, err = fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_EXCL|fs.O_NOFOLLOW, 0600) + if err != nil { + return nil, err + } + fi, err = f.Stat() + if err != nil { + _ = f.Close() + return nil, err + } + } + + return ensureSize(f, fi, createSize, sparse) +} + +func ensureSize(f *os.File, fi os.FileInfo, createSize int64, sparse bool) (*os.File, error) { + if sparse { + err := truncateSparse(f, createSize) + if err != nil { + _ = f.Close() + return nil, err + } + } else if fi.Size() > createSize { + // file is too long must shorten it + err := f.Truncate(createSize) + if err != nil { + _ = f.Close() + return nil, err + } + } else if createSize > 0 { + err := fs.PreallocateFile(f, createSize) + if err != nil { + // Just log the preallocate error but don't let it cause the restore process to fail. + // Preallocate might return an error if the filesystem (implementation) does not + // support preallocation or our parameters combination to the preallocate call + // This should yield a syscall.ENOTSUP error, but some other errors might also + // show up. + debug.Log("Failed to preallocate %v with size %v: %v", f.Name(), createSize, err) + } + } + return f, nil +} + func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, createSize int64, sparse bool) error { bucket := &w.buckets[uint(xxhash.Sum64String(path))%uint(len(w.buckets))] @@ -50,41 +173,20 @@ func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, create bucket.files[path].users++ return wr, nil } - - var flags int + var f *os.File + var err error if createSize >= 0 { - flags = os.O_CREATE | os.O_TRUNC | os.O_WRONLY - } else { - flags = os.O_WRONLY - } - - f, err := os.OpenFile(path, flags, 0600) - if err != nil { + f, err = createFile(path, createSize, sparse, w.allowRecursiveDelete) + if err != nil { + return nil, err + } + } else if f, err = openFile(path); err != nil { return nil, err } wr := &partialFile{File: f, users: 1, sparse: sparse} bucket.files[path] = wr - if createSize >= 0 { - if sparse { - err = truncateSparse(f, createSize) - if err != nil { - return nil, err - } - } else { - err := fs.PreallocateFile(wr.File, createSize) - if err != nil { - // Just log the preallocate error but don't let it cause the restore process to fail. - // Preallocate might return an error if the filesystem (implementation) does not - // support preallocation or our parameters combination to the preallocate call - // This should yield a syscall.ENOTSUP error, but some other errors might also - // show up. - debug.Log("Failed to preallocate %v with size %v: %v", path, createSize, err) - } - } - } - return wr, nil } diff --git a/internal/restorer/fileswriter_other_test.go b/internal/restorer/fileswriter_other_test.go new file mode 100644 index 00000000000..530a190e524 --- /dev/null +++ b/internal/restorer/fileswriter_other_test.go @@ -0,0 +1,10 @@ +//go:build !windows +// +build !windows + +package restorer + +import "syscall" + +func notEmptyDirError() error { + return syscall.ENOTEMPTY +} diff --git a/internal/restorer/fileswriter_test.go b/internal/restorer/fileswriter_test.go index 7beb9a2dc1b..9ea8767b871 100644 --- a/internal/restorer/fileswriter_test.go +++ b/internal/restorer/fileswriter_test.go @@ -1,15 +1,19 @@ package restorer import ( + "fmt" "os" + "path/filepath" + "runtime" "testing" + "github.com/restic/restic/internal/errors" rtest "github.com/restic/restic/internal/test" ) func TestFilesWriterBasic(t *testing.T) { dir := rtest.TempDir(t) - w := newFilesWriter(1) + w := newFilesWriter(1, false) f1 := dir + "/f1" f2 := dir + "/f2" @@ -34,3 +38,133 @@ func TestFilesWriterBasic(t *testing.T) { rtest.OK(t, err) rtest.Equals(t, []byte{2, 2}, buf) } + +func TestFilesWriterRecursiveOverwrite(t *testing.T) { + path := filepath.Join(t.TempDir(), "test") + + // create filled directory + rtest.OK(t, os.Mkdir(path, 0o700)) + rtest.OK(t, os.WriteFile(filepath.Join(path, "file"), []byte("data"), 0o400)) + + // must error if recursive delete is not allowed + w := newFilesWriter(1, false) + err := w.writeToFile(path, []byte{1}, 0, 2, false) + rtest.Assert(t, errors.Is(err, notEmptyDirError()), "unexpected error got %v", err) + rtest.Equals(t, 0, len(w.buckets[0].files)) + + // must replace directory + w = newFilesWriter(1, true) + rtest.OK(t, w.writeToFile(path, []byte{1, 1}, 0, 2, false)) + rtest.Equals(t, 0, len(w.buckets[0].files)) + + buf, err := os.ReadFile(path) + rtest.OK(t, err) + rtest.Equals(t, []byte{1, 1}, buf) +} + +func TestCreateFile(t *testing.T) { + basepath := filepath.Join(t.TempDir(), "test") + + scenarios := []struct { + name string + create func(t testing.TB, path string) + check func(t testing.TB, path string) + err error + }{ + { + name: "file", + create: func(t testing.TB, path string) { + rtest.OK(t, os.WriteFile(path, []byte("test-test-test-data"), 0o400)) + }, + }, + { + name: "empty dir", + create: func(t testing.TB, path string) { + rtest.OK(t, os.Mkdir(path, 0o400)) + }, + }, + { + name: "symlink", + create: func(t testing.TB, path string) { + rtest.OK(t, os.Symlink("./something", path)) + }, + }, + { + name: "filled dir", + create: func(t testing.TB, path string) { + rtest.OK(t, os.Mkdir(path, 0o700)) + rtest.OK(t, os.WriteFile(filepath.Join(path, "file"), []byte("data"), 0o400)) + }, + err: notEmptyDirError(), + }, + { + name: "hardlinks", + create: func(t testing.TB, path string) { + rtest.OK(t, os.WriteFile(path, []byte("test-test-test-data"), 0o400)) + rtest.OK(t, os.Link(path, path+"h")) + }, + check: func(t testing.TB, path string) { + if runtime.GOOS == "windows" { + // hardlinks are not supported on windows + return + } + + data, err := os.ReadFile(path + "h") + rtest.OK(t, err) + rtest.Equals(t, "test-test-test-data", string(data), "unexpected content change") + }, + }, + } + + tests := []struct { + size int64 + isSparse bool + }{ + {5, false}, + {21, false}, + {100, false}, + {5, true}, + {21, true}, + {100, true}, + } + + for i, sc := range scenarios { + t.Run(sc.name, func(t *testing.T) { + for j, test := range tests { + path := basepath + fmt.Sprintf("%v%v", i, j) + sc.create(t, path) + f, err := createFile(path, test.size, test.isSparse, false) + if sc.err == nil { + rtest.OK(t, err) + fi, err := f.Stat() + rtest.OK(t, err) + rtest.Assert(t, fi.Mode().IsRegular(), "wrong filetype %v", fi.Mode()) + rtest.Assert(t, fi.Size() <= test.size, "unexpected file size expected %v, got %v", test.size, fi.Size()) + rtest.OK(t, f.Close()) + if sc.check != nil { + sc.check(t, path) + } + } else { + rtest.Assert(t, errors.Is(err, sc.err), "unexpected error got %v expected %v", err, sc.err) + } + rtest.OK(t, os.RemoveAll(path)) + } + }) + } +} + +func TestCreateFileRecursiveDelete(t *testing.T) { + path := filepath.Join(t.TempDir(), "test") + + // create filled directory + rtest.OK(t, os.Mkdir(path, 0o700)) + rtest.OK(t, os.WriteFile(filepath.Join(path, "file"), []byte("data"), 0o400)) + + // replace it + f, err := createFile(path, 42, false, true) + rtest.OK(t, err) + fi, err := f.Stat() + rtest.OK(t, err) + rtest.Assert(t, fi.Mode().IsRegular(), "wrong filetype %v", fi.Mode()) + rtest.OK(t, f.Close()) +} diff --git a/internal/restorer/fileswriter_windows_test.go b/internal/restorer/fileswriter_windows_test.go new file mode 100644 index 00000000000..ec2b062f0b3 --- /dev/null +++ b/internal/restorer/fileswriter_windows_test.go @@ -0,0 +1,7 @@ +package restorer + +import "syscall" + +func notEmptyDirError() error { + return syscall.ERROR_DIR_NOT_EMPTY +} diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 2ce1ee98e77..cce175ebc1a 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -2,6 +2,8 @@ package restorer import ( "context" + "fmt" + "io" "os" "path/filepath" "sync/atomic" @@ -10,6 +12,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" restoreui "github.com/restic/restic/internal/ui/restore" "golang.org/x/sync/errgroup" @@ -17,28 +20,92 @@ import ( // Restorer is used to restore a snapshot to a directory. type Restorer struct { - repo restic.Repository - sn *restic.Snapshot - sparse bool + repo restic.Repository + sn *restic.Snapshot + opts Options - progress *restoreui.Progress + fileList map[string]bool - Error func(location string, err error) error - SelectFilter func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) + Error func(location string, err error) error + Warn func(message string) + // SelectFilter determines whether the item is selectedForRestore or whether a childMayBeSelected. + // selectedForRestore must not depend on isDir as `removeUnexpectedFiles` always passes false to isDir. + SelectFilter func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) + + XattrSelectFilter func(xattrName string) (xattrSelectedForRestore bool) +} + +var restorerAbortOnAllErrors = func(_ string, err error) error { return err } + +type Options struct { + DryRun bool + Sparse bool + Progress *restoreui.Progress + Overwrite OverwriteBehavior + Delete bool +} + +type OverwriteBehavior int + +// Constants for different overwrite behavior +const ( + OverwriteAlways OverwriteBehavior = iota + // OverwriteIfChanged is like OverwriteAlways except that it skips restoring the content + // of files with matching size&mtime. Metadata is always restored. + OverwriteIfChanged + OverwriteIfNewer + OverwriteNever + OverwriteInvalid +) + +// Set implements the method needed for pflag command flag parsing. +func (c *OverwriteBehavior) Set(s string) error { + switch s { + case "always": + *c = OverwriteAlways + case "if-changed": + *c = OverwriteIfChanged + case "if-newer": + *c = OverwriteIfNewer + case "never": + *c = OverwriteNever + default: + *c = OverwriteInvalid + return fmt.Errorf("invalid overwrite behavior %q, must be one of (always|if-newer|never)", s) + } + + return nil } -var restorerAbortOnAllErrors = func(location string, err error) error { return err } +func (c *OverwriteBehavior) String() string { + switch *c { + case OverwriteAlways: + return "always" + case OverwriteIfChanged: + return "if-changed" + case OverwriteIfNewer: + return "if-newer" + case OverwriteNever: + return "never" + default: + return "invalid" + } + +} +func (c *OverwriteBehavior) Type() string { + return "behavior" +} // NewRestorer creates a restorer preloaded with the content from the snapshot id. -func NewRestorer(repo restic.Repository, sn *restic.Snapshot, sparse bool, - progress *restoreui.Progress) *Restorer { +func NewRestorer(repo restic.Repository, sn *restic.Snapshot, opts Options) *Restorer { r := &Restorer{ - repo: repo, - sparse: sparse, - Error: restorerAbortOnAllErrors, - SelectFilter: func(string, string, *restic.Node) (bool, bool) { return true, true }, - progress: progress, - sn: sn, + repo: repo, + opts: opts, + fileList: make(map[string]bool), + Error: restorerAbortOnAllErrors, + SelectFilter: func(string, bool) (bool, bool) { return true, true }, + XattrSelectFilter: func(string) bool { return true }, + sn: sn, } return r @@ -47,30 +114,78 @@ func NewRestorer(repo restic.Repository, sn *restic.Snapshot, sparse bool, type treeVisitor struct { enterDir func(node *restic.Node, target, location string) error visitNode func(node *restic.Node, target, location string) error - leaveDir func(node *restic.Node, target, location string) error + // 'entries' contains all files the snapshot contains for this node. This also includes files + // ignored by the SelectFilter. + leaveDir func(node *restic.Node, target, location string, entries []string) error +} + +func (res *Restorer) sanitizeError(location string, err error) error { + switch err { + case nil, context.Canceled, context.DeadlineExceeded: + // Context errors are permanent. + return err + default: + return res.Error(location, err) + } } // traverseTree traverses a tree from the repo and calls treeVisitor. // target is the path in the file system, location within the snapshot. -func (res *Restorer) traverseTree(ctx context.Context, target, location string, treeID restic.ID, visitor treeVisitor) (hasRestored bool, err error) { +func (res *Restorer) traverseTree(ctx context.Context, target string, treeID restic.ID, visitor treeVisitor) error { + location := string(filepath.Separator) + + if visitor.enterDir != nil { + err := res.sanitizeError(location, visitor.enterDir(nil, target, location)) + if err != nil { + return err + } + } + childFilenames, hasRestored, err := res.traverseTreeInner(ctx, target, location, treeID, visitor) + if err != nil { + return err + } + if hasRestored && visitor.leaveDir != nil { + err = res.sanitizeError(location, visitor.leaveDir(nil, target, location, childFilenames)) + } + + return err +} + +func (res *Restorer) traverseTreeInner(ctx context.Context, target, location string, treeID restic.ID, visitor treeVisitor) (filenames []string, hasRestored bool, err error) { debug.Log("%v %v %v", target, location, treeID) tree, err := restic.LoadTree(ctx, res.repo, treeID) if err != nil { debug.Log("error loading tree %v: %v", treeID, err) - return hasRestored, res.Error(location, err) + return nil, hasRestored, res.sanitizeError(location, err) } - for _, node := range tree.Nodes { + if res.opts.Delete { + filenames = make([]string, 0, len(tree.Nodes)) + } + for i, node := range tree.Nodes { + if ctx.Err() != nil { + return nil, hasRestored, ctx.Err() + } + + // allow GC of tree node + tree.Nodes[i] = nil + if res.opts.Delete { + // just track all files included in the tree node to simplify the control flow. + // tracking too many files does not matter except for a slightly elevated memory usage + filenames = append(filenames, node.Name) + } // ensure that the node name does not contain anything that refers to a // top-level directory. nodeName := filepath.Base(filepath.Join(string(filepath.Separator), node.Name)) if nodeName != node.Name { debug.Log("node %q has invalid name %q", node.Name, nodeName) - err := res.Error(location, errors.Errorf("invalid child node name %s", node.Name)) + err := res.sanitizeError(location, errors.Errorf("invalid child node name %s", node.Name)) if err != nil { - return hasRestored, err + return nil, hasRestored, err } + // force disable deletion to prevent unexpected behavior + res.opts.Delete = false continue } @@ -80,56 +195,49 @@ func (res *Restorer) traverseTree(ctx context.Context, target, location string, if target == nodeTarget || !fs.HasPathPrefix(target, nodeTarget) { debug.Log("target: %v %v", target, nodeTarget) debug.Log("node %q has invalid target path %q", node.Name, nodeTarget) - err := res.Error(nodeLocation, errors.New("node has invalid path")) + err := res.sanitizeError(nodeLocation, errors.New("node has invalid path")) if err != nil { - return hasRestored, err + return nil, hasRestored, err } + // force disable deletion to prevent unexpected behavior + res.opts.Delete = false continue } // sockets cannot be restored - if node.Type == "socket" { + if node.Type == restic.NodeTypeSocket { continue } - selectedForRestore, childMayBeSelected := res.SelectFilter(nodeLocation, nodeTarget, node) + selectedForRestore, childMayBeSelected := res.SelectFilter(nodeLocation, node.Type == restic.NodeTypeDir) debug.Log("SelectFilter returned %v %v for %q", selectedForRestore, childMayBeSelected, nodeLocation) if selectedForRestore { hasRestored = true } - sanitizeError := func(err error) error { - switch err { - case nil, context.Canceled, context.DeadlineExceeded: - // Context errors are permanent. - return err - default: - return res.Error(nodeLocation, err) - } - } - - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { if node.Subtree == nil { - return hasRestored, errors.Errorf("Dir without subtree in tree %v", treeID.Str()) + return nil, hasRestored, errors.Errorf("Dir without subtree in tree %v", treeID.Str()) } if selectedForRestore && visitor.enterDir != nil { - err = sanitizeError(visitor.enterDir(node, nodeTarget, nodeLocation)) + err = res.sanitizeError(nodeLocation, visitor.enterDir(node, nodeTarget, nodeLocation)) if err != nil { - return hasRestored, err + return nil, hasRestored, err } } // keep track of restored child status // so metadata of the current directory are restored on leaveDir childHasRestored := false + var childFilenames []string if childMayBeSelected { - childHasRestored, err = res.traverseTree(ctx, nodeTarget, nodeLocation, *node.Subtree, visitor) - err = sanitizeError(err) + childFilenames, childHasRestored, err = res.traverseTreeInner(ctx, nodeTarget, nodeLocation, *node.Subtree, visitor) + err = res.sanitizeError(nodeLocation, err) if err != nil { - return hasRestored, err + return nil, hasRestored, err } // inform the parent directory to restore parent metadata on leaveDir if needed if childHasRestored { @@ -140,9 +248,9 @@ func (res *Restorer) traverseTree(ctx context.Context, target, location string, // metadata need to be restore when leaving the directory in both cases // selected for restore or any child of any subtree have been restored if (selectedForRestore || childHasRestored) && visitor.leaveDir != nil { - err = sanitizeError(visitor.leaveDir(node, nodeTarget, nodeLocation)) + err = res.sanitizeError(nodeLocation, visitor.leaveDir(node, nodeTarget, nodeLocation, childFilenames)) if err != nil { - return hasRestored, err + return nil, hasRestored, err } } @@ -150,35 +258,40 @@ func (res *Restorer) traverseTree(ctx context.Context, target, location string, } if selectedForRestore { - err = sanitizeError(visitor.visitNode(node, nodeTarget, nodeLocation)) + err = res.sanitizeError(nodeLocation, visitor.visitNode(node, nodeTarget, nodeLocation)) if err != nil { - return hasRestored, err + return nil, hasRestored, err } } } - return hasRestored, nil + return filenames, hasRestored, nil } -func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, target, location string) error { - debug.Log("restoreNode %v %v %v", node.Name, target, location) - - err := node.CreateAt(ctx, target, res.repo) - if err != nil { - debug.Log("node.CreateAt(%s) error %v", target, err) - return err - } +func (res *Restorer) restoreNodeTo(node *restic.Node, target, location string) error { + if !res.opts.DryRun { + debug.Log("restoreNode %v %v %v", node.Name, target, location) + if err := fs.Remove(target); err != nil && !errors.Is(err, os.ErrNotExist) { + return errors.Wrap(err, "RemoveNode") + } - if res.progress != nil { - res.progress.AddProgress(location, 0, 0) + err := fs.NodeCreateAt(node, target) + if err != nil { + debug.Log("node.CreateAt(%s) error %v", target, err) + return err + } } + res.opts.Progress.AddProgress(location, restoreui.ActionOtherRestored, 0, 0) return res.restoreNodeMetadataTo(node, target, location) } func (res *Restorer) restoreNodeMetadataTo(node *restic.Node, target, location string) error { + if res.opts.DryRun { + return nil + } debug.Log("restoreNodeMetadata %v %v %v", node.Name, target, location) - err := node.RestoreMetadata(target) + err := fs.NodeRestoreMetadata(node, target, res.Warn, res.XattrSelectFilter) if err != nil { debug.Log("node.RestoreMetadata(%s) error %v", target, err) } @@ -186,154 +299,306 @@ func (res *Restorer) restoreNodeMetadataTo(node *restic.Node, target, location s } func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location string) error { - if err := fs.Remove(path); !os.IsNotExist(err) { - return errors.Wrap(err, "RemoveCreateHardlink") - } - err := fs.Link(target, path) - if err != nil { - return errors.WithStack(err) - } - - if res.progress != nil { - res.progress.AddProgress(location, 0, 0) + if !res.opts.DryRun { + if err := fs.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) { + return errors.Wrap(err, "RemoveCreateHardlink") + } + err := fs.Link(target, path) + if err != nil { + return errors.WithStack(err) + } } + res.opts.Progress.AddProgress(location, restoreui.ActionOtherRestored, 0, 0) // TODO investigate if hardlinks have separate metadata on any supported system return res.restoreNodeMetadataTo(node, path, location) } -func (res *Restorer) restoreEmptyFileAt(node *restic.Node, target, location string) error { - wr, err := os.OpenFile(target, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) - if err != nil { - return err - } - err = wr.Close() - if err != nil { - return err +func (res *Restorer) ensureDir(target string) error { + if res.opts.DryRun { + return nil } - if res.progress != nil { - res.progress.AddProgress(location, 0, 0) + fi, err := fs.Lstat(target) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed to check for directory: %w", err) + } + if err == nil && !fi.IsDir() { + // try to cleanup unexpected file + if err := fs.Remove(target); err != nil { + return fmt.Errorf("failed to remove stale item: %w", err) + } } - return res.restoreNodeMetadataTo(node, target, location) + // create parent dir with default permissions + // second pass #leaveDir restores dir metadata after visiting/restoring all children + return fs.MkdirAll(target, 0700) } // RestoreTo creates the directories and files in the snapshot below dst. // Before an item is created, res.Filter is called. -func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { +func (res *Restorer) RestoreTo(ctx context.Context, dst string) (uint64, error) { + restoredFileCount := uint64(0) var err error if !filepath.IsAbs(dst) { dst, err = filepath.Abs(dst) if err != nil { - return errors.Wrap(err, "Abs") + return restoredFileCount, errors.Wrap(err, "Abs") + } + } + + if !res.opts.DryRun { + // ensure that the target directory exists and is actually a directory + // Using ensureDir is too aggressive here as it also removes unexpected files + if err := fs.MkdirAll(dst, 0700); err != nil { + return restoredFileCount, fmt.Errorf("cannot create target directory: %w", err) } } idx := NewHardlinkIndex[string]() - filerestorer := newFileRestorer(dst, res.repo.LoadBlobsFromPack, res.repo.Index().Lookup, - res.repo.Connections(), res.sparse, res.progress) + filerestorer := newFileRestorer(dst, res.repo.LoadBlobsFromPack, res.repo.LookupBlob, + res.repo.Connections(), res.opts.Sparse, res.opts.Delete, res.opts.Progress) filerestorer.Error = res.Error debug.Log("first pass for %q", dst) + var buf []byte + // first tree pass: create directories and collect all files to restore - _, err = res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ - enterDir: func(node *restic.Node, target, location string) error { + err = res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{ + enterDir: func(_ *restic.Node, target, location string) error { debug.Log("first pass, enterDir: mkdir %q, leaveDir should restore metadata", location) - if res.progress != nil { - res.progress.AddFile(0) + if location != string(filepath.Separator) { + res.opts.Progress.AddFile(0) } - // create dir with default permissions - // #leaveDir restores dir metadata after visiting all children - return fs.MkdirAll(target, 0700) + return res.ensureDir(target) }, visitNode: func(node *restic.Node, target, location string) error { debug.Log("first pass, visitNode: mkdir %q, leaveDir on second pass should restore metadata", location) - // create parent dir with default permissions - // second pass #leaveDir restores dir metadata after visiting/restoring all children - err := fs.MkdirAll(filepath.Dir(target), 0700) - if err != nil { + if err := res.ensureDir(filepath.Dir(target)); err != nil { return err } - if node.Type != "file" { - if res.progress != nil { - res.progress.AddFile(0) - } + if node.Type != restic.NodeTypeFile { + res.opts.Progress.AddFile(0) return nil } - if node.Size == 0 { - if res.progress != nil { - res.progress.AddFile(node.Size) - } - return nil // deal with empty files later - } - if node.Links > 1 { if idx.Has(node.Inode, node.DeviceID) { - if res.progress != nil { - // a hardlinked file does not increase the restore size - res.progress.AddFile(0) - } + // a hardlinked file does not increase the restore size + res.opts.Progress.AddFile(0) return nil } idx.Add(node.Inode, node.DeviceID, location) } - if res.progress != nil { - res.progress.AddFile(node.Size) - } - - filerestorer.addFile(location, node.Content, int64(node.Size)) - - return nil + buf, err = res.withOverwriteCheck(ctx, node, target, location, false, buf, func(updateMetadataOnly bool, matches *fileState) error { + if updateMetadataOnly { + res.opts.Progress.AddSkippedFile(location, node.Size) + } else { + res.opts.Progress.AddFile(node.Size) + if !res.opts.DryRun { + filerestorer.addFile(location, node.Content, int64(node.Size), matches) + } else { + action := restoreui.ActionFileUpdated + if matches == nil { + action = restoreui.ActionFileRestored + } + // immediately mark as completed + res.opts.Progress.AddProgress(location, action, node.Size, node.Size) + } + } + res.trackFile(location, updateMetadataOnly) + if !updateMetadataOnly { + restoredFileCount++ + } + return nil + }) + return err }, }) if err != nil { - return err + return 0, err } - err = filerestorer.restoreFiles(ctx) - if err != nil { - return err + if !res.opts.DryRun { + err = filerestorer.restoreFiles(ctx) + if err != nil { + return 0, err + } } debug.Log("second pass for %q", dst) // second tree pass: restore special files and filesystem metadata - _, err = res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ + err = res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{ visitNode: func(node *restic.Node, target, location string) error { debug.Log("second pass, visitNode: restore node %q", location) - if node.Type != "file" { - return res.restoreNodeTo(ctx, node, target, location) + if node.Type != restic.NodeTypeFile { + _, err := res.withOverwriteCheck(ctx, node, target, location, false, nil, func(_ bool, _ *fileState) error { + return res.restoreNodeTo(node, target, location) + }) + return err + } + + if idx.Has(node.Inode, node.DeviceID) && idx.Value(node.Inode, node.DeviceID) != location { + _, err := res.withOverwriteCheck(ctx, node, target, location, true, nil, func(_ bool, _ *fileState) error { + return res.restoreHardlinkAt(node, filerestorer.targetPath(idx.Value(node.Inode, node.DeviceID)), target, location) + }) + return err } - // create empty files, but not hardlinks to empty files - if node.Size == 0 && (node.Links < 2 || !idx.Has(node.Inode, node.DeviceID)) { - if node.Links > 1 { - idx.Add(node.Inode, node.DeviceID, location) + if _, ok := res.hasRestoredFile(location); ok { + return res.restoreNodeMetadataTo(node, target, location) + } + // don't touch skipped files + return nil + }, + leaveDir: func(node *restic.Node, target, location string, expectedFilenames []string) error { + if res.opts.Delete { + if err := res.removeUnexpectedFiles(ctx, target, location, expectedFilenames); err != nil { + return err } - return res.restoreEmptyFileAt(node, target, location) } - if idx.Has(node.Inode, node.DeviceID) && idx.Value(node.Inode, node.DeviceID) != location { - return res.restoreHardlinkAt(node, filerestorer.targetPath(idx.Value(node.Inode, node.DeviceID)), target, location) + if node == nil { + return nil } - return res.restoreNodeMetadataTo(node, target, location) - }, - leaveDir: func(node *restic.Node, target, location string) error { err := res.restoreNodeMetadataTo(node, target, location) - if err == nil && res.progress != nil { - res.progress.AddProgress(location, 0, 0) + if err == nil { + res.opts.Progress.AddProgress(location, restoreui.ActionDirRestored, 0, 0) } return err }, }) - return err + return restoredFileCount, err +} + +func (res *Restorer) removeUnexpectedFiles(ctx context.Context, target, location string, expectedFilenames []string) error { + if !res.opts.Delete { + panic("internal error") + } + + entries, err := fs.Readdirnames(fs.Local{}, target, fs.O_NOFOLLOW) + if errors.Is(err, os.ErrNotExist) { + return nil + } else if err != nil { + return err + } + + keep := map[string]struct{}{} + for _, name := range expectedFilenames { + keep[toComparableFilename(name)] = struct{}{} + } + + for _, entry := range entries { + if ctx.Err() != nil { + return ctx.Err() + } + + if _, ok := keep[toComparableFilename(entry)]; ok { + continue + } + + nodeTarget := filepath.Join(target, entry) + nodeLocation := filepath.Join(location, entry) + + if target == nodeTarget || !fs.HasPathPrefix(target, nodeTarget) { + return fmt.Errorf("skipping deletion due to invalid filename: %v", entry) + } + + // TODO pass a proper value to the isDir parameter once this becomes relevant for the filters + selectedForRestore, _ := res.SelectFilter(nodeLocation, false) + // only delete files that were selected for restore + if selectedForRestore { + // First collect all files that will be deleted + var filesToDelete []string + err := filepath.Walk(nodeTarget, func(path string, _ os.FileInfo, err error) error { + if err != nil { + return err + } + filesToDelete = append(filesToDelete, path) + return nil + }) + if err != nil { + return err + } + + if !res.opts.DryRun { + // Perform the deletion + if err := fs.RemoveAll(nodeTarget); err != nil { + return err + } + } + + // Report paths as deleted only after successful removal + for i := len(filesToDelete) - 1; i >= 0; i-- { + res.opts.Progress.ReportDeletion(filesToDelete[i]) + } + } + } + + return nil +} + +func (res *Restorer) trackFile(location string, metadataOnly bool) { + res.fileList[location] = metadataOnly +} + +func (res *Restorer) hasRestoredFile(location string) (metadataOnly bool, ok bool) { + metadataOnly, ok = res.fileList[location] + return metadataOnly, ok +} + +func (res *Restorer) withOverwriteCheck(ctx context.Context, node *restic.Node, target, location string, isHardlink bool, buf []byte, cb func(updateMetadataOnly bool, matches *fileState) error) ([]byte, error) { + overwrite, err := shouldOverwrite(res.opts.Overwrite, node, target) + if err != nil { + return buf, err + } else if !overwrite { + size := node.Size + if isHardlink { + size = 0 + } + res.opts.Progress.AddSkippedFile(location, size) + return buf, nil + } + + var matches *fileState + updateMetadataOnly := false + if node.Type == restic.NodeTypeFile && !isHardlink { + // if a file fails to verify, then matches is nil which results in restoring from scratch + matches, buf, _ = res.verifyFile(ctx, target, node, false, res.opts.Overwrite == OverwriteIfChanged, buf) + // skip files that are already correct completely + updateMetadataOnly = !matches.NeedsRestore() + } + + return buf, cb(updateMetadataOnly, matches) +} + +func shouldOverwrite(overwrite OverwriteBehavior, node *restic.Node, destination string) (bool, error) { + if overwrite == OverwriteAlways || overwrite == OverwriteIfChanged { + return true, nil + } + + fi, err := fs.Lstat(destination) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return true, nil + } + return false, err + } + + if overwrite == OverwriteIfNewer { + // return if node is newer + return node.ModTime.After(fi.ModTime()), nil + } else if overwrite == OverwriteNever { + // file exists + return false, nil + } + panic("unknown overwrite behavior") } // Snapshot returns the snapshot this restorer is configured to use. @@ -348,7 +613,7 @@ const nVerifyWorkers = 8 // have been successfully written to dst. It stops when it encounters an // error. It returns that error and the number of files it has successfully // verified. -func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { +func (res *Restorer) VerifyFiles(ctx context.Context, dst string, countRestoredFiles uint64, p *progress.Counter) (int, error) { type mustCheck struct { node *restic.Node path string @@ -359,15 +624,23 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { work = make(chan mustCheck, 2*nVerifyWorkers) ) + if p != nil { + p.SetMax(countRestoredFiles) + defer p.Done() + } + g, ctx := errgroup.WithContext(ctx) // Traverse tree and send jobs to work. g.Go(func() error { defer close(work) - _, err := res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ + err := res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{ visitNode: func(node *restic.Node, target, location string) error { - if node.Type != "file" { + if node.Type != restic.NodeTypeFile { + return nil + } + if metadataOnly, ok := res.hasRestoredFile(location); !ok || metadataOnly { return nil } select { @@ -385,13 +658,12 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { g.Go(func() (err error) { var buf []byte for job := range work { - buf, err = res.verifyFile(job.path, job.node, buf) - if err != nil { - err = res.Error(job.path, err) - } + _, buf, err = res.verifyFile(ctx, job.path, job.node, true, false, buf) + err = res.sanitizeError(job.path, err) if err != nil || ctx.Err() != nil { break } + p.Add(1) atomic.AddUint64(&nchecked, 1) } return err @@ -401,34 +673,75 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { return int(nchecked), g.Wait() } +type fileState struct { + blobMatches []bool + sizeMatches bool +} + +func (s *fileState) NeedsRestore() bool { + if s == nil { + return true + } + if !s.sizeMatches { + return true + } + for _, match := range s.blobMatches { + if !match { + return true + } + } + return false +} + +func (s *fileState) HasMatchingBlob(i int) bool { + if s == nil || s.blobMatches == nil { + return false + } + return i < len(s.blobMatches) && s.blobMatches[i] +} + // Verify that the file target has the contents of node. // // buf and the first return value are scratch space, passed around for reuse. // Reusing buffers prevents the verifier goroutines allocating all of RAM and // flushing the filesystem cache (at least on Linux). -func (res *Restorer) verifyFile(target string, node *restic.Node, buf []byte) ([]byte, error) { - f, err := os.Open(target) +func (res *Restorer) verifyFile(ctx context.Context, target string, node *restic.Node, failFast bool, trustMtime bool, buf []byte) (*fileState, []byte, error) { + f, err := fs.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) if err != nil { - return buf, err + return nil, buf, err } defer func() { _ = f.Close() }() fi, err := f.Stat() + sizeMatches := true switch { case err != nil: - return buf, err + return nil, buf, err + case !fi.Mode().IsRegular(): + return nil, buf, errors.Errorf("Expected %s to be a regular file", target) case int64(node.Size) != fi.Size(): - return buf, errors.Errorf("Invalid file size for %s: expected %d, got %d", - target, node.Size, fi.Size()) + if failFast { + return nil, buf, errors.Errorf("Invalid file size for %s: expected %d, got %d", + target, node.Size, fi.Size()) + } + sizeMatches = false + } + + if trustMtime && fi.ModTime().Equal(node.ModTime) && sizeMatches { + return &fileState{nil, sizeMatches}, buf, nil } + matches := make([]bool, len(node.Content)) var offset int64 - for _, blobID := range node.Content { - length, found := res.repo.LookupBlobSize(blobID, restic.DataBlob) + for i, blobID := range node.Content { + if ctx.Err() != nil { + return nil, buf, ctx.Err() + } + length, found := res.repo.LookupBlobSize(restic.DataBlob, blobID) if !found { - return buf, errors.Errorf("Unable to fetch blob %s", blobID) + return nil, buf, errors.Errorf("Unable to fetch blob %s", blobID) } if length > uint(cap(buf)) { @@ -437,16 +750,21 @@ func (res *Restorer) verifyFile(target string, node *restic.Node, buf []byte) ([ buf = buf[:length] _, err = f.ReadAt(buf, offset) + if err == io.EOF && !failFast { + sizeMatches = false + break + } if err != nil { - return buf, err + return nil, buf, err } - if !blobID.Equal(restic.Hash(buf)) { - return buf, errors.Errorf( + matches[i] = blobID.Equal(restic.Hash(buf)) + if failFast && !matches[i] { + return nil, buf, errors.Errorf( "Unexpected content in %s, starting at offset %d", target, offset) } offset += int64(length) } - return buf, nil + return &fileState{matches, sizeMatches}, buf, nil } diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index c33214bc3cd..e0306ce01cc 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -3,20 +3,27 @@ package restorer import ( "bytes" "context" + "encoding/json" + "fmt" "io" "math" "os" "path/filepath" + "reflect" "runtime" "strings" + "syscall" "testing" "time" "github.com/restic/restic/internal/archiver" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" + restoreui "github.com/restic/restic/internal/ui/restore" "golang.org/x/sync/errgroup" ) @@ -27,24 +34,40 @@ type Snapshot struct { } type File struct { - Data string - Links uint64 - Inode uint64 - Mode os.FileMode + Data string + DataParts []string + Links uint64 + Inode uint64 + Mode os.FileMode + ModTime time.Time + attributes *FileAttributes +} + +type Symlink struct { + Target string ModTime time.Time } type Dir struct { - Nodes map[string]Node - Mode os.FileMode - ModTime time.Time + Nodes map[string]Node + Mode os.FileMode + ModTime time.Time + attributes *FileAttributes +} + +type FileAttributes struct { + ReadOnly bool + Hidden bool + System bool + Archive bool + Encrypted bool } -func saveFile(t testing.TB, repo restic.BlobSaver, node File) restic.ID { +func saveFile(t testing.TB, repo restic.BlobSaver, data string) restic.ID { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - id, _, _, err := repo.SaveBlob(ctx, restic.DataBlob, []byte(node.Data), restic.ID{}, false) + id, _, _, err := repo.SaveBlob(ctx, restic.DataBlob, []byte(data), restic.ID{}, false) if err != nil { t.Fatal(err) } @@ -52,7 +75,7 @@ func saveFile(t testing.TB, repo restic.BlobSaver, node File) restic.ID { return id } -func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode uint64) restic.ID { +func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode uint64, getGenericAttributes func(attr *FileAttributes, isDir bool) (genericAttributes map[restic.GenericAttributeType]json.RawMessage)) restic.ID { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -61,37 +84,58 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u inode++ switch node := n.(type) { case File: - fi := n.(File).Inode + fi := node.Inode if fi == 0 { fi = inode } - lc := n.(File).Links + lc := node.Links if lc == 0 { lc = 1 } fc := []restic.ID{} - if len(n.(File).Data) > 0 { - fc = append(fc, saveFile(t, repo, node)) + size := 0 + if len(node.Data) > 0 { + size = len(node.Data) + fc = append(fc, saveFile(t, repo, node.Data)) + } else if len(node.DataParts) > 0 { + for _, part := range node.DataParts { + fc = append(fc, saveFile(t, repo, part)) + size += len(part) + } } mode := node.Mode if mode == 0 { mode = 0644 } err := tree.Insert(&restic.Node{ - Type: "file", - Mode: mode, - ModTime: node.ModTime, - Name: name, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Content: fc, - Size: uint64(len(n.(File).Data)), - Inode: fi, - Links: lc, + Type: restic.NodeTypeFile, + Mode: mode, + ModTime: node.ModTime, + Name: name, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Content: fc, + Size: uint64(size), + Inode: fi, + Links: lc, + GenericAttributes: getGenericAttributes(node.attributes, false), + }) + rtest.OK(t, err) + case Symlink: + err := tree.Insert(&restic.Node{ + Type: restic.NodeTypeSymlink, + Mode: os.ModeSymlink | 0o777, + ModTime: node.ModTime, + Name: name, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + LinkTarget: node.Target, + Inode: inode, + Links: 1, }) rtest.OK(t, err) case Dir: - id := saveDir(t, repo, node.Nodes, inode) + id := saveDir(t, repo, node.Nodes, inode, getGenericAttributes) mode := node.Mode if mode == 0 { @@ -99,13 +143,14 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u } err := tree.Insert(&restic.Node{ - Type: "dir", - Mode: mode, - ModTime: node.ModTime, - Name: name, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Subtree: &id, + Type: restic.NodeTypeDir, + Mode: mode, + ModTime: node.ModTime, + Name: name, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Subtree: &id, + GenericAttributes: getGenericAttributes(node.attributes, false), }) rtest.OK(t, err) default: @@ -121,13 +166,13 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u return id } -func saveSnapshot(t testing.TB, repo restic.Repository, snapshot Snapshot) (*restic.Snapshot, restic.ID) { +func saveSnapshot(t testing.TB, repo restic.Repository, snapshot Snapshot, getGenericAttributes func(attr *FileAttributes, isDir bool) (genericAttributes map[restic.GenericAttributeType]json.RawMessage)) (*restic.Snapshot, restic.ID) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() wg, wgCtx := errgroup.WithContext(ctx) repo.StartPackUploader(wgCtx, wg) - treeID := saveDir(t, repo, snapshot.Nodes, 1000) + treeID := saveDir(t, repo, snapshot.Nodes, 1000, getGenericAttributes) err := repo.Flush(ctx) if err != nil { t.Fatal(err) @@ -147,13 +192,18 @@ func saveSnapshot(t testing.TB, repo restic.Repository, snapshot Snapshot) (*res return sn, id } +var noopGetGenericAttributes = func(attr *FileAttributes, isDir bool) (genericAttributes map[restic.GenericAttributeType]json.RawMessage) { + // No-op + return nil +} + func TestRestorer(t *testing.T) { var tests = []struct { Snapshot Files map[string]string ErrorsMust map[string]map[string]struct{} ErrorsMay map[string]map[string]struct{} - Select func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) + Select func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) }{ // valid test cases { @@ -245,7 +295,7 @@ func TestRestorer(t *testing.T) { Files: map[string]string{ "dir/file": "content: file\n", }, - Select: func(item, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + Select: func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { switch item { case filepath.FromSlash("/dir"): childMayBeSelected = true @@ -322,25 +372,19 @@ func TestRestorer(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { repo := repository.TestRepository(t) - sn, id := saveSnapshot(t, repo, test.Snapshot) + sn, id := saveSnapshot(t, repo, test.Snapshot, noopGetGenericAttributes) t.Logf("snapshot saved as %v", id.Str()) - res := NewRestorer(repo, sn, false, nil) + res := NewRestorer(repo, sn, Options{}) tempdir := rtest.TempDir(t) // make sure we're creating a new subdir of the tempdir tempdir = filepath.Join(tempdir, "target") - res.SelectFilter = func(item, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { - t.Logf("restore %v to %v", item, dstpath) - if !fs.HasPathPrefix(tempdir, dstpath) { - t.Errorf("would restore %v to %v, which is not within the target dir %v", - item, dstpath, tempdir) - return false, false - } - + res.SelectFilter = func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + t.Logf("restore %v", item) if test.Select != nil { - return test.Select(item, dstpath, node) + return test.Select(item, isDir) } return true, true @@ -360,13 +404,13 @@ func TestRestorer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + countRestoredFiles, err := res.RestoreTo(ctx, tempdir) if err != nil { t.Fatal(err) } if len(test.ErrorsMust)+len(test.ErrorsMay) == 0 { - _, err = res.VerifyFiles(ctx, tempdir) + _, err = res.VerifyFiles(ctx, tempdir, countRestoredFiles, nil) rtest.OK(t, err) } @@ -439,10 +483,10 @@ func TestRestorerRelative(t *testing.T) { t.Run("", func(t *testing.T) { repo := repository.TestRepository(t) - sn, id := saveSnapshot(t, repo, test.Snapshot) + sn, id := saveSnapshot(t, repo, test.Snapshot, noopGetGenericAttributes) t.Logf("snapshot saved as %v", id.Str()) - res := NewRestorer(repo, sn, false, nil) + res := NewRestorer(repo, sn, Options{}) tempdir := rtest.TempDir(t) cleanup := rtest.Chdir(t, tempdir) @@ -458,13 +502,18 @@ func TestRestorerRelative(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, "restore") + countRestoredFiles, err := res.RestoreTo(ctx, "restore") if err != nil { t.Fatal(err) } - nverified, err := res.VerifyFiles(ctx, "restore") + p := progress.NewCounter(time.Second, countRestoredFiles, func(value uint64, total uint64, runtime time.Duration, final bool) {}) + defer p.Done() + nverified, err := res.VerifyFiles(ctx, "restore", countRestoredFiles, p) rtest.OK(t, err) rtest.Equals(t, len(test.Files), nverified) + counterValue, maxValue := p.Get() + rtest.Equals(t, counterValue, uint64(2)) + rtest.Equals(t, maxValue, uint64(2)) for filename, err := range errors { t.Errorf("unexpected error for %v found: %v", filename, err) @@ -481,6 +530,13 @@ func TestRestorerRelative(t *testing.T) { t.Errorf("file %v has wrong content: want %q, got %q", filename, content, data) } } + + // verify that restoring the same snapshot again results in countRestoredFiles == 0 + countRestoredFiles, err = res.RestoreTo(ctx, "restore") + if err != nil { + t.Fatal(err) + } + rtest.Equals(t, uint64(0), countRestoredFiles) }) } } @@ -488,16 +544,17 @@ func TestRestorerRelative(t *testing.T) { type TraverseTreeCheck func(testing.TB) treeVisitor type TreeVisit struct { - funcName string // name of the function - location string // location passed to the function + funcName string // name of the function + location string // location passed to the function + files []string // file list passed to the function } func checkVisitOrder(list []TreeVisit) TraverseTreeCheck { var pos int return func(t testing.TB) treeVisitor { - check := func(funcName string) func(*restic.Node, string, string) error { - return func(node *restic.Node, target, location string) error { + check := func(funcName string) func(*restic.Node, string, string, []string) error { + return func(node *restic.Node, target, location string, expectedFilenames []string) error { if pos >= len(list) { t.Errorf("step %v, %v(%v): expected no more than %d function calls", pos, funcName, location, len(list)) pos++ @@ -515,14 +572,24 @@ func checkVisitOrder(list []TreeVisit) TraverseTreeCheck { t.Errorf("step %v: want location %v, got %v", pos, list[pos].location, location) } + if !reflect.DeepEqual(expectedFilenames, v.files) { + t.Errorf("step %v: want files %v, got %v", pos, list[pos].files, expectedFilenames) + } + pos++ return nil } } + checkNoFilename := func(funcName string) func(*restic.Node, string, string) error { + f := check(funcName) + return func(node *restic.Node, target, location string) error { + return f(node, target, location, nil) + } + } return treeVisitor{ - enterDir: check("enterDir"), - visitNode: check("visitNode"), + enterDir: checkNoFilename("enterDir"), + visitNode: checkNoFilename("visitNode"), leaveDir: check("leaveDir"), } } @@ -531,7 +598,7 @@ func checkVisitOrder(list []TreeVisit) TraverseTreeCheck { func TestRestorerTraverseTree(t *testing.T) { var tests = []struct { Snapshot - Select func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) + Select func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) Visitor TraverseTreeCheck }{ { @@ -547,17 +614,19 @@ func TestRestorerTraverseTree(t *testing.T) { "foo": File{Data: "content: foo\n"}, }, }, - Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { return true, true }, Visitor: checkVisitOrder([]TreeVisit{ - {"enterDir", "/dir"}, - {"visitNode", "/dir/otherfile"}, - {"enterDir", "/dir/subdir"}, - {"visitNode", "/dir/subdir/file"}, - {"leaveDir", "/dir/subdir"}, - {"leaveDir", "/dir"}, - {"visitNode", "/foo"}, + {"enterDir", "/", nil}, + {"enterDir", "/dir", nil}, + {"visitNode", "/dir/otherfile", nil}, + {"enterDir", "/dir/subdir", nil}, + {"visitNode", "/dir/subdir/file", nil}, + {"leaveDir", "/dir/subdir", []string{"file"}}, + {"leaveDir", "/dir", []string{"otherfile", "subdir"}}, + {"visitNode", "/foo", nil}, + {"leaveDir", "/", []string{"dir", "foo"}}, }), }, @@ -574,14 +643,16 @@ func TestRestorerTraverseTree(t *testing.T) { "foo": File{Data: "content: foo\n"}, }, }, - Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { if item == "/foo" { return true, false } return false, false }, Visitor: checkVisitOrder([]TreeVisit{ - {"visitNode", "/foo"}, + {"enterDir", "/", nil}, + {"visitNode", "/foo", nil}, + {"leaveDir", "/", []string{"dir", "foo"}}, }), }, { @@ -596,14 +667,16 @@ func TestRestorerTraverseTree(t *testing.T) { }}, }, }, - Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { if item == "/aaa" { return true, false } return false, false }, Visitor: checkVisitOrder([]TreeVisit{ - {"visitNode", "/aaa"}, + {"enterDir", "/", nil}, + {"visitNode", "/aaa", nil}, + {"leaveDir", "/", []string{"aaa", "dir"}}, }), }, @@ -620,19 +693,21 @@ func TestRestorerTraverseTree(t *testing.T) { "foo": File{Data: "content: foo\n"}, }, }, - Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { if strings.HasPrefix(item, "/dir") { return true, true } return false, false }, Visitor: checkVisitOrder([]TreeVisit{ - {"enterDir", "/dir"}, - {"visitNode", "/dir/otherfile"}, - {"enterDir", "/dir/subdir"}, - {"visitNode", "/dir/subdir/file"}, - {"leaveDir", "/dir/subdir"}, - {"leaveDir", "/dir"}, + {"enterDir", "/", nil}, + {"enterDir", "/dir", nil}, + {"visitNode", "/dir/otherfile", nil}, + {"enterDir", "/dir/subdir", nil}, + {"visitNode", "/dir/subdir/file", nil}, + {"leaveDir", "/dir/subdir", []string{"file"}}, + {"leaveDir", "/dir", []string{"otherfile", "subdir"}}, + {"leaveDir", "/", []string{"dir", "foo"}}, }), }, @@ -649,7 +724,7 @@ func TestRestorerTraverseTree(t *testing.T) { "foo": File{Data: "content: foo\n"}, }, }, - Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { switch item { case "/dir": return false, true @@ -660,8 +735,10 @@ func TestRestorerTraverseTree(t *testing.T) { } }, Visitor: checkVisitOrder([]TreeVisit{ - {"visitNode", "/dir/otherfile"}, - {"leaveDir", "/dir"}, + {"enterDir", "/", nil}, + {"visitNode", "/dir/otherfile", nil}, + {"leaveDir", "/dir", []string{"otherfile", "subdir"}}, + {"leaveDir", "/", []string{"dir", "foo"}}, }), }, } @@ -669,9 +746,10 @@ func TestRestorerTraverseTree(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { repo := repository.TestRepository(t) - sn, _ := saveSnapshot(t, repo, test.Snapshot) + sn, _ := saveSnapshot(t, repo, test.Snapshot, noopGetGenericAttributes) - res := NewRestorer(repo, sn, false, nil) + // set Delete option to enable tracking filenames in a directory + res := NewRestorer(repo, sn, Options{Delete: true}) res.SelectFilter = test.Select @@ -682,7 +760,7 @@ func TestRestorerTraverseTree(t *testing.T) { // make sure we're creating a new subdir of the tempdir target := filepath.Join(tempdir, "target") - _, err := res.traverseTree(ctx, target, string(filepath.Separator), *sn.Tree, test.Visitor(t)) + err := res.traverseTree(ctx, target, *sn.Tree, test.Visitor(t)) if err != nil { t.Fatal(err) } @@ -745,11 +823,11 @@ func TestRestorerConsistentTimestampsAndPermissions(t *testing.T) { }, }, }, - }) + }, noopGetGenericAttributes) - res := NewRestorer(repo, sn, false, nil) + res := NewRestorer(repo, sn, Options{}) - res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + res.SelectFilter = func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { switch filepath.ToSlash(item) { case "/dir": childMayBeSelected = true @@ -770,7 +848,7 @@ func TestRestorerConsistentTimestampsAndPermissions(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.OK(t, err) var testPatterns = []struct { @@ -800,16 +878,16 @@ func TestVerifyCancel(t *testing.T) { } repo := repository.TestRepository(t) - sn, _ := saveSnapshot(t, repo, snapshot) + sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) - res := NewRestorer(repo, sn, false, nil) + res := NewRestorer(repo, sn, Options{}) tempdir := rtest.TempDir(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - - rtest.OK(t, res.RestoreTo(ctx, tempdir)) - err := os.WriteFile(filepath.Join(tempdir, "foo"), []byte("bar"), 0644) + countRestoredFiles, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + err = os.WriteFile(filepath.Join(tempdir, "foo"), []byte("bar"), 0644) rtest.OK(t, err) var errs []error @@ -818,7 +896,7 @@ func TestVerifyCancel(t *testing.T) { return err } - nverified, err := res.VerifyFiles(ctx, tempdir) + nverified, err := res.VerifyFiles(ctx, tempdir, countRestoredFiles, nil) rtest.Equals(t, 0, nverified) rtest.Assert(t, err != nil, "nil error from VerifyFiles") rtest.Equals(t, 1, len(errs)) @@ -840,17 +918,17 @@ func TestRestorerSparseFiles(t *testing.T) { rtest.OK(t, err) arch := archiver.New(repo, target, archiver.Options{}) - sn, _, err := arch.Snapshot(context.Background(), []string{"/zeros"}, + sn, _, _, err := arch.Snapshot(context.Background(), []string{"/zeros"}, archiver.SnapshotOptions{}) rtest.OK(t, err) - res := NewRestorer(repo, sn, true, nil) + res := NewRestorer(repo, sn, Options{Sparse: true}) tempdir := rtest.TempDir(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err = res.RestoreTo(ctx, tempdir) + _, err = res.RestoreTo(ctx, tempdir) rtest.OK(t, err) filename := filepath.Join(tempdir, "zeros") @@ -875,3 +953,613 @@ func TestRestorerSparseFiles(t *testing.T) { t.Logf("wrote %d zeros as %d blocks, %.1f%% sparse", len(zeros), blocks, 100*sparsity) } + +func saveSnapshotsAndOverwrite(t *testing.T, baseSnapshot Snapshot, overwriteSnapshot Snapshot, baseOptions, overwriteOptions Options) string { + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // base snapshot + sn, id := saveSnapshot(t, repo, baseSnapshot, noopGetGenericAttributes) + t.Logf("base snapshot saved as %v", id.Str()) + + res := NewRestorer(repo, sn, baseOptions) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + // overwrite snapshot + sn, id = saveSnapshot(t, repo, overwriteSnapshot, noopGetGenericAttributes) + t.Logf("overwrite snapshot saved as %v", id.Str()) + res = NewRestorer(repo, sn, overwriteOptions) + countRestoredFiles, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + _, err = res.VerifyFiles(ctx, tempdir, countRestoredFiles, nil) + rtest.OK(t, err) + + return tempdir +} + +func TestRestorerSparseOverwrite(t *testing.T) { + baseSnapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: new\n"}, + }, + } + var zero [14]byte + sparseSnapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: string(zero[:])}, + }, + } + + opts := Options{Sparse: true, Overwrite: OverwriteAlways} + saveSnapshotsAndOverwrite(t, baseSnapshot, sparseSnapshot, opts, opts) +} + +type printerMock struct { + s restoreui.State +} + +func (p *printerMock) Update(_ restoreui.State, _ time.Duration) { +} +func (p *printerMock) Error(item string, err error) error { + return nil +} +func (p *printerMock) CompleteItem(action restoreui.ItemAction, item string, size uint64) { +} +func (p *printerMock) Finish(s restoreui.State, _ time.Duration) { + p.s = s +} + +func TestRestorerOverwriteBehavior(t *testing.T) { + baseTime := time.Now() + baseSnapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n", ModTime: baseTime}, + "dirtest": Dir{ + Nodes: map[string]Node{ + "file": File{Data: "content: file\n", ModTime: baseTime}, + "foo": File{Data: "content: foobar", ModTime: baseTime}, + }, + ModTime: baseTime, + }, + }, + } + overwriteSnapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: new\n", ModTime: baseTime.Add(time.Second)}, + "dirtest": Dir{ + Nodes: map[string]Node{ + "file": File{Data: "content: file2\n", ModTime: baseTime.Add(-time.Second)}, + "foo": File{Data: "content: foo", ModTime: baseTime}, + }, + }, + }, + } + + var tests = []struct { + Overwrite OverwriteBehavior + Files map[string]string + Progress restoreui.State + }{ + { + Overwrite: OverwriteAlways, + Files: map[string]string{ + "foo": "content: new\n", + "dirtest/file": "content: file2\n", + "dirtest/foo": "content: foo", + }, + Progress: restoreui.State{ + FilesFinished: 4, + FilesTotal: 4, + FilesSkipped: 0, + AllBytesWritten: 40, + AllBytesTotal: 40, + AllBytesSkipped: 0, + }, + }, + { + Overwrite: OverwriteIfChanged, + Files: map[string]string{ + "foo": "content: new\n", + "dirtest/file": "content: file2\n", + "dirtest/foo": "content: foo", + }, + Progress: restoreui.State{ + FilesFinished: 4, + FilesTotal: 4, + FilesSkipped: 0, + AllBytesWritten: 40, + AllBytesTotal: 40, + AllBytesSkipped: 0, + }, + }, + { + Overwrite: OverwriteIfNewer, + Files: map[string]string{ + "foo": "content: new\n", + "dirtest/file": "content: file\n", + "dirtest/foo": "content: foobar", + }, + Progress: restoreui.State{ + FilesFinished: 2, + FilesTotal: 2, + FilesSkipped: 2, + AllBytesWritten: 13, + AllBytesTotal: 13, + AllBytesSkipped: 27, + }, + }, + { + Overwrite: OverwriteNever, + Files: map[string]string{ + "foo": "content: foo\n", + "dirtest/file": "content: file\n", + "dirtest/foo": "content: foobar", + }, + Progress: restoreui.State{ + FilesFinished: 1, + FilesTotal: 1, + FilesSkipped: 3, + AllBytesWritten: 0, + AllBytesTotal: 0, + AllBytesSkipped: 40, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + mock := &printerMock{} + progress := restoreui.NewProgress(mock, 0) + tempdir := saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{}, Options{Overwrite: test.Overwrite, Progress: progress}) + + for filename, content := range test.Files { + data, err := os.ReadFile(filepath.Join(tempdir, filepath.FromSlash(filename))) + if err != nil { + t.Errorf("unable to read file %v: %v", filename, err) + continue + } + + if !bytes.Equal(data, []byte(content)) { + t.Errorf("file %v has wrong content: want %q, got %q", filename, content, data) + } + } + + progress.Finish() + rtest.Equals(t, test.Progress, mock.s) + }) + } +} + +func TestRestorerOverwritePartial(t *testing.T) { + parts := make([]string, 100) + size := 0 + for i := 0; i < len(parts); i++ { + parts[i] = fmt.Sprint(i) + size += len(parts[i]) + if i < 8 { + // small file + size += len(parts[i]) + } + } + + // the data of both snapshots is stored in different pack files + // thus both small an foo in the overwriteSnapshot contain blobs from + // two different pack files. This tests basic handling of blobs from + // different pack files. + baseTime := time.Now() + baseSnapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{DataParts: parts[0:5], ModTime: baseTime}, + "small": File{DataParts: parts[0:5], ModTime: baseTime}, + }, + } + overwriteSnapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{DataParts: parts, ModTime: baseTime}, + "small": File{DataParts: parts[0:8], ModTime: baseTime}, + }, + } + + mock := &printerMock{} + progress := restoreui.NewProgress(mock, 0) + saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{}, Options{Overwrite: OverwriteAlways, Progress: progress}) + progress.Finish() + rtest.Equals(t, restoreui.State{ + FilesFinished: 2, + FilesTotal: 2, + FilesSkipped: 0, + AllBytesWritten: uint64(size), + AllBytesTotal: uint64(size), + AllBytesSkipped: 0, + }, mock.s) +} + +func TestRestorerOverwriteSpecial(t *testing.T) { + baseTime := time.Now() + baseSnapshot := Snapshot{ + Nodes: map[string]Node{ + "dirtest": Dir{ModTime: baseTime}, + "link": Symlink{Target: "foo", ModTime: baseTime}, + "file": File{Data: "content: file\n", Inode: 42, Links: 2, ModTime: baseTime}, + "hardlink": File{Data: "content: file\n", Inode: 42, Links: 2, ModTime: baseTime}, + "newdir": File{Data: "content: dir\n", ModTime: baseTime}, + }, + } + overwriteSnapshot := Snapshot{ + Nodes: map[string]Node{ + "dirtest": Symlink{Target: "foo", ModTime: baseTime}, + "link": File{Data: "content: link\n", Inode: 42, Links: 2, ModTime: baseTime.Add(time.Second)}, + "file": Symlink{Target: "foo2", ModTime: baseTime}, + "hardlink": File{Data: "content: link\n", Inode: 42, Links: 2, ModTime: baseTime.Add(time.Second)}, + "newdir": Dir{ModTime: baseTime}, + }, + } + + files := map[string]string{ + "link": "content: link\n", + "hardlink": "content: link\n", + } + links := map[string]string{ + "dirtest": "foo", + "file": "foo2", + } + + opts := Options{Overwrite: OverwriteAlways} + tempdir := saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, opts, opts) + + for filename, content := range files { + data, err := os.ReadFile(filepath.Join(tempdir, filepath.FromSlash(filename))) + if err != nil { + t.Errorf("unable to read file %v: %v", filename, err) + continue + } + + if !bytes.Equal(data, []byte(content)) { + t.Errorf("file %v has wrong content: want %q, got %q", filename, content, data) + } + } + for filename, target := range links { + link, err := os.Readlink(filepath.Join(tempdir, filepath.FromSlash(filename))) + rtest.OK(t, err) + rtest.Equals(t, link, target, "wrong symlink target") + } +} + +func TestRestoreModified(t *testing.T) { + // overwrite files between snapshots and also change their filesize + snapshots := []Snapshot{ + { + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n", ModTime: time.Now()}, + "bar": File{Data: "content: a\n", ModTime: time.Now()}, + }, + }, + { + Nodes: map[string]Node{ + "foo": File{Data: "content: a\n", ModTime: time.Now()}, + "bar": File{Data: "content: bar\n", ModTime: time.Now()}, + }, + }, + } + + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for _, snapshot := range snapshots { + sn, id := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) + t.Logf("snapshot saved as %v", id.Str()) + + res := NewRestorer(repo, sn, Options{Overwrite: OverwriteIfChanged}) + countRestoredFiles, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + n, err := res.VerifyFiles(ctx, tempdir, countRestoredFiles, nil) + rtest.OK(t, err) + rtest.Equals(t, 2, n, "unexpected number of verified files") + } +} + +func TestRestoreIfChanged(t *testing.T) { + origData := "content: foo\n" + modData := "content: bar\n" + rtest.Equals(t, len(modData), len(origData), "broken testcase") + snapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: origData, ModTime: time.Now()}, + }, + } + + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sn, id := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) + t.Logf("snapshot saved as %v", id.Str()) + + res := NewRestorer(repo, sn, Options{}) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + // modify file but maintain size and timestamp + path := filepath.Join(tempdir, "foo") + f, err := os.OpenFile(path, os.O_RDWR, 0) + rtest.OK(t, err) + fi, err := f.Stat() + rtest.OK(t, err) + _, err = f.Write([]byte(modData)) + rtest.OK(t, err) + rtest.OK(t, f.Close()) + var utimes = [...]syscall.Timespec{ + syscall.NsecToTimespec(fi.ModTime().UnixNano()), + syscall.NsecToTimespec(fi.ModTime().UnixNano()), + } + rtest.OK(t, syscall.UtimesNano(path, utimes[:])) + + for _, overwrite := range []OverwriteBehavior{OverwriteIfChanged, OverwriteAlways} { + res = NewRestorer(repo, sn, Options{Overwrite: overwrite}) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + data, err := os.ReadFile(path) + rtest.OK(t, err) + if overwrite == OverwriteAlways { + // restore should notice the changed file content + rtest.Equals(t, origData, string(data), "expected original file content") + } else { + // restore should not have noticed the changed file content + rtest.Equals(t, modData, string(data), "expected modified file content") + } + } +} + +func TestRestoreDryRun(t *testing.T) { + snapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n", Links: 2, Inode: 42}, + "foo2": File{Data: "content: foo\n", Links: 2, Inode: 42}, + "dirtest": Dir{ + Nodes: map[string]Node{ + "file": File{Data: "content: file\n"}, + }, + }, + "link": Symlink{Target: "foo"}, + }, + } + + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sn, id := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) + t.Logf("snapshot saved as %v", id.Str()) + + res := NewRestorer(repo, sn, Options{DryRun: true}) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + _, err = os.Stat(tempdir) + rtest.Assert(t, errors.Is(err, os.ErrNotExist), "expected no file to be created, got %v", err) +} + +func TestRestoreDryRunDelete(t *testing.T) { + snapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n"}, + }, + } + + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + tempfile := filepath.Join(tempdir, "existing") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rtest.OK(t, os.Mkdir(tempdir, 0o755)) + f, err := os.Create(tempfile) + rtest.OK(t, err) + rtest.OK(t, f.Close()) + + sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) + res := NewRestorer(repo, sn, Options{DryRun: true, Delete: true}) + _, err = res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + _, err = os.Stat(tempfile) + rtest.Assert(t, err == nil, "expected file to still exist, got error %v", err) +} + +func TestRestoreOverwriteDirectory(t *testing.T) { + saveSnapshotsAndOverwrite(t, + Snapshot{ + Nodes: map[string]Node{ + "dir": Dir{ + Mode: normalizeFileMode(0755 | os.ModeDir), + Nodes: map[string]Node{ + "anotherfile": File{Data: "content: file\n"}, + }, + }, + }, + }, + Snapshot{ + Nodes: map[string]Node{ + "dir": File{Data: "content: file\n"}, + }, + }, + Options{}, + Options{Delete: true}, + ) +} + +func TestRestoreDelete(t *testing.T) { + repo := repository.TestRepository(t) + tempdir := rtest.TempDir(t) + + sn, _ := saveSnapshot(t, repo, Snapshot{ + Nodes: map[string]Node{ + "dir": Dir{ + Mode: normalizeFileMode(0755 | os.ModeDir), + Nodes: map[string]Node{ + "file1": File{Data: "content: file\n"}, + "anotherfile": File{Data: "content: file\n"}, + }, + }, + "dir2": Dir{ + Mode: normalizeFileMode(0755 | os.ModeDir), + Nodes: map[string]Node{ + "anotherfile": File{Data: "content: file\n"}, + }, + }, + "anotherfile": File{Data: "content: file\n"}, + }, + }, noopGetGenericAttributes) + + // should delete files that no longer exist in the snapshot + deleteSn, _ := saveSnapshot(t, repo, Snapshot{ + Nodes: map[string]Node{ + "dir": Dir{ + Mode: normalizeFileMode(0755 | os.ModeDir), + Nodes: map[string]Node{ + "file1": File{Data: "content: file\n"}, + }, + }, + }, + }, noopGetGenericAttributes) + + tests := []struct { + selectFilter func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) + fileState map[string]bool + }{ + { + selectFilter: nil, + fileState: map[string]bool{ + "dir": true, + filepath.Join("dir", "anotherfile"): false, + filepath.Join("dir", "file1"): true, + "dir2": false, + filepath.Join("dir2", "anotherfile"): false, + "anotherfile": false, + }, + }, + { + selectFilter: func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + return false, false + }, + fileState: map[string]bool{ + "dir": true, + filepath.Join("dir", "anotherfile"): true, + filepath.Join("dir", "file1"): true, + "dir2": true, + filepath.Join("dir2", "anotherfile"): true, + "anotherfile": true, + }, + }, + { + selectFilter: func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + switch item { + case filepath.FromSlash("/dir"): + selectedForRestore = true + case filepath.FromSlash("/dir2"): + selectedForRestore = true + } + return + }, + fileState: map[string]bool{ + "dir": true, + filepath.Join("dir", "anotherfile"): true, + filepath.Join("dir", "file1"): true, + "dir2": false, + filepath.Join("dir2", "anotherfile"): false, + "anotherfile": true, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + res := NewRestorer(repo, sn, Options{}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + res = NewRestorer(repo, deleteSn, Options{Delete: true}) + if test.selectFilter != nil { + res.SelectFilter = test.selectFilter + } + _, err = res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + for fn, shouldExist := range test.fileState { + _, err := os.Stat(filepath.Join(tempdir, fn)) + if shouldExist { + rtest.OK(t, err) + } else { + rtest.Assert(t, errors.Is(err, os.ErrNotExist), "file %v: unexpected error got %v, expected ErrNotExist", fn, err) + } + } + }) + } +} + +func TestRestoreToFile(t *testing.T) { + snapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n"}, + }, + } + + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + + // create a file in the place of the target directory + rtest.OK(t, os.WriteFile(tempdir, []byte{}, 0o700)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) + res := NewRestorer(repo, sn, Options{}) + _, err := res.RestoreTo(ctx, tempdir) + rtest.Assert(t, strings.Contains(err.Error(), "cannot create target directory"), "unexpected error %v", err) +} + +func TestRestorerLongPath(t *testing.T) { + tmp := t.TempDir() + + longPath := tmp + for i := 0; i < 20; i++ { + longPath = filepath.Join(longPath, "aaaaaaaaaaaaaaaaaaaa") + } + + rtest.OK(t, os.MkdirAll(longPath, 0o700)) + f, err := fs.OpenFile(filepath.Join(longPath, "file"), fs.O_CREATE|fs.O_RDWR, 0o600) + rtest.OK(t, err) + _, err = f.WriteString("Hello, World!") + rtest.OK(t, err) + rtest.OK(t, f.Close()) + + repo := repository.TestRepository(t) + + local := &fs.Local{} + sc := archiver.NewScanner(local) + rtest.OK(t, sc.Scan(context.TODO(), []string{tmp})) + arch := archiver.New(repo, local, archiver.Options{}) + sn, _, _, err := arch.Snapshot(context.Background(), []string{tmp}, archiver.SnapshotOptions{}) + rtest.OK(t, err) + + res := NewRestorer(repo, sn, Options{}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + countRestoredFiles, err := res.RestoreTo(ctx, tmp) + rtest.OK(t, err) + _, err = res.VerifyFiles(ctx, tmp, countRestoredFiles, nil) + rtest.OK(t, err) +} diff --git a/internal/restorer/restorer_unix.go b/internal/restorer/restorer_unix.go new file mode 100644 index 00000000000..7316f7b5dd1 --- /dev/null +++ b/internal/restorer/restorer_unix.go @@ -0,0 +1,10 @@ +//go:build !windows +// +build !windows + +package restorer + +// toComparableFilename returns a filename suitable for equality checks. On Windows, it returns the +// uppercase version of the string. On all other systems, it returns the unmodified filename. +func toComparableFilename(path string) string { + return path +} diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go index 2c30a6b6446..c4e8149b247 100644 --- a/internal/restorer/restorer_unix_test.go +++ b/internal/restorer/restorer_unix_test.go @@ -5,6 +5,7 @@ package restorer import ( "context" + "io/fs" "os" "path/filepath" "syscall" @@ -12,12 +13,11 @@ import ( "time" "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" restoreui "github.com/restic/restic/internal/ui/restore" ) -func TestRestorerRestoreEmptyHardlinkedFileds(t *testing.T) { +func TestRestorerRestoreEmptyHardlinkedFields(t *testing.T) { repo := repository.TestRepository(t) sn, _ := saveSnapshot(t, repo, Snapshot{ @@ -29,19 +29,15 @@ func TestRestorerRestoreEmptyHardlinkedFileds(t *testing.T) { }, }, }, - }) + }, noopGetGenericAttributes) - res := NewRestorer(repo, sn, false, nil) - - res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { - return true, true - } + res := NewRestorer(repo, sn, Options{}) tempdir := rtest.TempDir(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.OK(t, err) f1, err := os.Stat(filepath.Join(tempdir, "dirtest/file1")) @@ -69,20 +65,15 @@ func getBlockCount(t *testing.T, filename string) int64 { return st.Blocks } -type printerMock struct { - filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64 +func TestRestorerProgressBar(t *testing.T) { + testRestorerProgressBar(t, false) } -func (p *printerMock) Update(_, _, _, _ uint64, _ time.Duration) { -} -func (p *printerMock) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, _ time.Duration) { - p.filesFinished = filesFinished - p.filesTotal = filesTotal - p.allBytesWritten = allBytesWritten - p.allBytesTotal = allBytesTotal +func TestRestorerProgressBarDryRun(t *testing.T) { + testRestorerProgressBar(t, true) } -func TestRestorerProgressBar(t *testing.T) { +func testRestorerProgressBar(t *testing.T, dryRun bool) { repo := repository.TestRepository(t) sn, _ := saveSnapshot(t, repo, Snapshot{ @@ -95,29 +86,59 @@ func TestRestorerProgressBar(t *testing.T) { }, "file2": File{Links: 1, Inode: 2, Data: "example"}, }, - }) + }, noopGetGenericAttributes) mock := &printerMock{} progress := restoreui.NewProgress(mock, 0) - res := NewRestorer(repo, sn, false, progress) - res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { - return true, true - } + res := NewRestorer(repo, sn, Options{Progress: progress, DryRun: dryRun}) tempdir := rtest.TempDir(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.OK(t, err) progress.Finish() - const filesFinished = 4 - const filesTotal = filesFinished - const allBytesWritten = 10 - const allBytesTotal = allBytesWritten - rtest.Assert(t, mock.filesFinished == filesFinished, "filesFinished: expected %v, got %v", filesFinished, mock.filesFinished) - rtest.Assert(t, mock.filesTotal == filesTotal, "filesTotal: expected %v, got %v", filesTotal, mock.filesTotal) - rtest.Assert(t, mock.allBytesWritten == allBytesWritten, "allBytesWritten: expected %v, got %v", allBytesWritten, mock.allBytesWritten) - rtest.Assert(t, mock.allBytesTotal == allBytesTotal, "allBytesTotal: expected %v, got %v", allBytesTotal, mock.allBytesTotal) + rtest.Equals(t, restoreui.State{ + FilesFinished: 4, + FilesTotal: 4, + FilesSkipped: 0, + AllBytesWritten: 10, + AllBytesTotal: 10, + AllBytesSkipped: 0, + }, mock.s) +} + +func TestRestorePermissions(t *testing.T) { + snapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n", Mode: 0o600, ModTime: time.Now()}, + }, + } + + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sn, id := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) + t.Logf("snapshot saved as %v", id.Str()) + + res := NewRestorer(repo, sn, Options{}) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + for _, overwrite := range []OverwriteBehavior{OverwriteIfChanged, OverwriteAlways} { + // tamper with permissions + path := filepath.Join(tempdir, "foo") + rtest.OK(t, os.Chmod(path, 0o700)) + + res = NewRestorer(repo, sn, Options{Overwrite: overwrite}) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + fi, err := os.Stat(path) + rtest.OK(t, err) + rtest.Equals(t, fs.FileMode(0o600), fi.Mode().Perm(), "unexpected permissions") + } } diff --git a/internal/restorer/restorer_windows.go b/internal/restorer/restorer_windows.go new file mode 100644 index 00000000000..9ddc0a932e1 --- /dev/null +++ b/internal/restorer/restorer_windows.go @@ -0,0 +1,13 @@ +//go:build windows +// +build windows + +package restorer + +import "strings" + +// toComparableFilename returns a filename suitable for equality checks. On Windows, it returns the +// uppercase version of the string. On all other systems, it returns the unmodified filename. +func toComparableFilename(path string) string { + // apparently NTFS internally uppercases filenames for comparison + return strings.ToUpper(path) +} diff --git a/internal/restorer/restorer_windows_test.go b/internal/restorer/restorer_windows_test.go index 3ec4b1f11e0..4764bed2d4c 100644 --- a/internal/restorer/restorer_windows_test.go +++ b/internal/restorer/restorer_windows_test.go @@ -4,11 +4,21 @@ package restorer import ( + "context" + "encoding/json" "math" + "os" + "path" + "path/filepath" "syscall" "testing" + "time" "unsafe" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" "golang.org/x/sys/windows" ) @@ -33,3 +43,533 @@ func getBlockCount(t *testing.T, filename string) int64 { return int64(math.Ceil(float64(result) / 512)) } + +type DataStreamInfo struct { + name string + data string +} + +type NodeInfo struct { + DataStreamInfo + parentDir string + attributes FileAttributes + Exists bool + IsDirectory bool +} + +func TestFileAttributeCombination(t *testing.T) { + testFileAttributeCombination(t, false) +} + +func TestEmptyFileAttributeCombination(t *testing.T) { + testFileAttributeCombination(t, true) +} + +func testFileAttributeCombination(t *testing.T, isEmpty bool) { + t.Parallel() + //Generate combination of 5 attributes. + attributeCombinations := generateCombinations(5, []bool{}) + + fileName := "TestFile.txt" + // Iterate through each attribute combination + for _, attr1 := range attributeCombinations { + + //Set up the required file information + fileInfo := NodeInfo{ + DataStreamInfo: getDataStreamInfo(isEmpty, fileName), + parentDir: "dir", + attributes: getFileAttributes(attr1), + Exists: false, + } + + //Get the current test name + testName := getCombinationTestName(fileInfo, fileName, fileInfo.attributes) + + //Run test + t.Run(testName, func(t *testing.T) { + mainFilePath := runAttributeTests(t, fileInfo, fileInfo.attributes) + + verifyFileRestores(isEmpty, mainFilePath, t, fileInfo) + }) + } +} + +func generateCombinations(n int, prefix []bool) [][]bool { + if n == 0 { + // Return a slice containing the current permutation + return [][]bool{append([]bool{}, prefix...)} + } + + // Generate combinations with True + prefixTrue := append(prefix, true) + permsTrue := generateCombinations(n-1, prefixTrue) + + // Generate combinations with False + prefixFalse := append(prefix, false) + permsFalse := generateCombinations(n-1, prefixFalse) + + // Combine combinations with True and False + return append(permsTrue, permsFalse...) +} + +func getDataStreamInfo(isEmpty bool, fileName string) DataStreamInfo { + var dataStreamInfo DataStreamInfo + if isEmpty { + dataStreamInfo = DataStreamInfo{ + name: fileName, + } + } else { + dataStreamInfo = DataStreamInfo{ + name: fileName, + data: "Main file data stream.", + } + } + return dataStreamInfo +} + +func getFileAttributes(values []bool) FileAttributes { + return FileAttributes{ + ReadOnly: values[0], + Hidden: values[1], + System: values[2], + Archive: values[3], + Encrypted: values[4], + } +} + +func getCombinationTestName(fi NodeInfo, fileName string, overwriteAttr FileAttributes) string { + if fi.attributes.ReadOnly { + fileName += "-ReadOnly" + } + if fi.attributes.Hidden { + fileName += "-Hidden" + } + if fi.attributes.System { + fileName += "-System" + } + if fi.attributes.Archive { + fileName += "-Archive" + } + if fi.attributes.Encrypted { + fileName += "-Encrypted" + } + if fi.Exists { + fileName += "-Overwrite" + if overwriteAttr.ReadOnly { + fileName += "-R" + } + if overwriteAttr.Hidden { + fileName += "-H" + } + if overwriteAttr.System { + fileName += "-S" + } + if overwriteAttr.Archive { + fileName += "-A" + } + if overwriteAttr.Encrypted { + fileName += "-E" + } + } + return fileName +} + +func runAttributeTests(t *testing.T, fileInfo NodeInfo, existingFileAttr FileAttributes) string { + testDir := t.TempDir() + res, _ := setupWithFileAttributes(t, fileInfo, testDir, existingFileAttr) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, err := res.RestoreTo(ctx, testDir) + rtest.OK(t, err) + + mainFilePath := path.Join(testDir, fileInfo.parentDir, fileInfo.name) + //Verify restore + verifyFileAttributes(t, mainFilePath, fileInfo.attributes) + return mainFilePath +} + +func setupWithFileAttributes(t *testing.T, nodeInfo NodeInfo, testDir string, existingFileAttr FileAttributes) (*Restorer, []int) { + t.Helper() + if nodeInfo.Exists { + if !nodeInfo.IsDirectory { + err := os.MkdirAll(path.Join(testDir, nodeInfo.parentDir), os.ModeDir) + rtest.OK(t, err) + filepath := path.Join(testDir, nodeInfo.parentDir, nodeInfo.name) + if existingFileAttr.Encrypted { + err := createEncryptedFileWriteData(filepath, nodeInfo) + rtest.OK(t, err) + } else { + // Write the data to the file + file, err := os.OpenFile(path.Clean(filepath), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) + rtest.OK(t, err) + _, err = file.Write([]byte(nodeInfo.data)) + rtest.OK(t, err) + + err = file.Close() + rtest.OK(t, err) + } + } else { + err := os.MkdirAll(path.Join(testDir, nodeInfo.parentDir, nodeInfo.name), os.ModeDir) + rtest.OK(t, err) + } + + pathPointer, err := syscall.UTF16PtrFromString(path.Join(testDir, nodeInfo.parentDir, nodeInfo.name)) + rtest.OK(t, err) + syscall.SetFileAttributes(pathPointer, getAttributeValue(&existingFileAttr)) + } + + index := 0 + + order := []int{} + streams := []DataStreamInfo{} + if !nodeInfo.IsDirectory { + order = append(order, index) + index++ + streams = append(streams, nodeInfo.DataStreamInfo) + } + return setup(t, getNodes(nodeInfo.parentDir, nodeInfo.name, order, streams, nodeInfo.IsDirectory, &nodeInfo.attributes)), order +} + +func createEncryptedFileWriteData(filepath string, fileInfo NodeInfo) (err error) { + var ptr *uint16 + if ptr, err = windows.UTF16PtrFromString(filepath); err != nil { + return err + } + var handle windows.Handle + //Create the file with encrypted flag + if handle, err = windows.CreateFile(ptr, uint32(windows.GENERIC_READ|windows.GENERIC_WRITE), uint32(windows.FILE_SHARE_READ), nil, uint32(windows.CREATE_ALWAYS), windows.FILE_ATTRIBUTE_ENCRYPTED, 0); err != nil { + return err + } + //Write data to file + if _, err = windows.Write(handle, []byte(fileInfo.data)); err != nil { + return err + } + //Close handle + return windows.CloseHandle(handle) +} + +func setup(t *testing.T, nodesMap map[string]Node) *Restorer { + repo := repository.TestRepository(t) + getFileAttributes := func(attr *FileAttributes, isDir bool) (genericAttributes map[restic.GenericAttributeType]json.RawMessage) { + if attr == nil { + return + } + + fileattr := getAttributeValue(attr) + + if isDir { + //If the node is a directory add FILE_ATTRIBUTE_DIRECTORY to attributes + fileattr |= windows.FILE_ATTRIBUTE_DIRECTORY + } + attrs, err := restic.WindowsAttrsToGenericAttributes(restic.WindowsAttributes{FileAttributes: &fileattr}) + test.OK(t, err) + return attrs + } + sn, _ := saveSnapshot(t, repo, Snapshot{ + Nodes: nodesMap, + }, getFileAttributes) + res := NewRestorer(repo, sn, Options{}) + return res +} + +func getAttributeValue(attr *FileAttributes) uint32 { + var fileattr uint32 + if attr.ReadOnly { + fileattr |= windows.FILE_ATTRIBUTE_READONLY + } + if attr.Hidden { + fileattr |= windows.FILE_ATTRIBUTE_HIDDEN + } + if attr.Encrypted { + fileattr |= windows.FILE_ATTRIBUTE_ENCRYPTED + } + if attr.Archive { + fileattr |= windows.FILE_ATTRIBUTE_ARCHIVE + } + if attr.System { + fileattr |= windows.FILE_ATTRIBUTE_SYSTEM + } + return fileattr +} + +func getNodes(dir string, mainNodeName string, order []int, streams []DataStreamInfo, isDirectory bool, attributes *FileAttributes) map[string]Node { + var mode os.FileMode + if isDirectory { + mode = os.FileMode(2147484159) + } else { + if attributes != nil && attributes.ReadOnly { + mode = os.FileMode(0o444) + } else { + mode = os.FileMode(0o666) + } + } + + getFileNodes := func() map[string]Node { + nodes := map[string]Node{} + if isDirectory { + //Add a directory node at the same level as the other streams + nodes[mainNodeName] = Dir{ + ModTime: time.Now(), + attributes: attributes, + Mode: mode, + } + } + + if len(streams) > 0 { + for _, index := range order { + stream := streams[index] + + var attr *FileAttributes = nil + if mainNodeName == stream.name { + attr = attributes + } else if attributes != nil && attributes.Encrypted { + //Set encrypted attribute + attr = &FileAttributes{Encrypted: true} + } + + nodes[stream.name] = File{ + ModTime: time.Now(), + Data: stream.data, + Mode: mode, + attributes: attr, + } + } + } + return nodes + } + + return map[string]Node{ + dir: Dir{ + Mode: normalizeFileMode(0750 | mode), + ModTime: time.Now(), + Nodes: getFileNodes(), + }, + } +} + +func verifyFileAttributes(t *testing.T, mainFilePath string, attr FileAttributes) { + ptr, err := windows.UTF16PtrFromString(mainFilePath) + rtest.OK(t, err) + //Get file attributes using syscall + fileAttributes, err := syscall.GetFileAttributes(ptr) + rtest.OK(t, err) + //Test positive and negative scenarios + if attr.ReadOnly { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY != 0, "Expected read only attribute.") + } else { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY == 0, "Unexpected read only attribute.") + } + if attr.Hidden { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN != 0, "Expected hidden attribute.") + } else { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN == 0, "Unexpected hidden attribute.") + } + if attr.System { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM != 0, "Expected system attribute.") + } else { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM == 0, "Unexpected system attribute.") + } + if attr.Archive { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE != 0, "Expected archive attribute.") + } else { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE == 0, "Unexpected archive attribute.") + } + if attr.Encrypted { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED != 0, "Expected encrypted attribute.") + } else { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED == 0, "Unexpected encrypted attribute.") + } +} + +func verifyFileRestores(isEmpty bool, mainFilePath string, t *testing.T, fileInfo NodeInfo) { + if isEmpty { + _, err1 := os.Stat(mainFilePath) + rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The file "+fileInfo.name+" does not exist") + } else { + + verifyMainFileRestore(t, mainFilePath, fileInfo) + } +} + +func verifyMainFileRestore(t *testing.T, mainFilePath string, fileInfo NodeInfo) { + fi, err1 := os.Stat(mainFilePath) + rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The file "+fileInfo.name+" does not exist") + + size := fi.Size() + rtest.Assert(t, size > 0, "The file "+fileInfo.name+" exists but is empty") + + content, err := os.ReadFile(mainFilePath) + rtest.OK(t, err) + rtest.Assert(t, string(content) == fileInfo.data, "The file "+fileInfo.name+" exists but the content is not overwritten") +} + +func TestDirAttributeCombination(t *testing.T) { + t.Parallel() + attributeCombinations := generateCombinations(4, []bool{}) + + dirName := "TestDir" + // Iterate through each attribute combination + for _, attr1 := range attributeCombinations { + + //Set up the required directory information + dirInfo := NodeInfo{ + DataStreamInfo: DataStreamInfo{ + name: dirName, + }, + parentDir: "dir", + attributes: getDirFileAttributes(attr1), + Exists: false, + IsDirectory: true, + } + + //Get the current test name + testName := getCombinationTestName(dirInfo, dirName, dirInfo.attributes) + + //Run test + t.Run(testName, func(t *testing.T) { + mainDirPath := runAttributeTests(t, dirInfo, dirInfo.attributes) + + //Check directory exists + _, err1 := os.Stat(mainDirPath) + rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The directory "+dirInfo.name+" does not exist") + }) + } +} + +func getDirFileAttributes(values []bool) FileAttributes { + return FileAttributes{ + // readonly not valid for directories + Hidden: values[0], + System: values[1], + Archive: values[2], + Encrypted: values[3], + } +} + +func TestFileAttributeCombinationsOverwrite(t *testing.T) { + testFileAttributeCombinationsOverwrite(t, false) +} + +func TestEmptyFileAttributeCombinationsOverwrite(t *testing.T) { + testFileAttributeCombinationsOverwrite(t, true) +} + +func testFileAttributeCombinationsOverwrite(t *testing.T, isEmpty bool) { + t.Parallel() + //Get attribute combinations + attributeCombinations := generateCombinations(5, []bool{}) + //Get overwrite file attribute combinations + overwriteCombinations := generateCombinations(5, []bool{}) + + fileName := "TestOverwriteFile" + + //Iterate through each attribute combination + for _, attr1 := range attributeCombinations { + + fileInfo := NodeInfo{ + DataStreamInfo: getDataStreamInfo(isEmpty, fileName), + parentDir: "dir", + attributes: getFileAttributes(attr1), + Exists: true, + } + + overwriteFileAttributes := []FileAttributes{} + + for _, overwrite := range overwriteCombinations { + overwriteFileAttributes = append(overwriteFileAttributes, getFileAttributes(overwrite)) + } + + //Iterate through each overwrite attribute combination + for _, overwriteFileAttr := range overwriteFileAttributes { + //Get the test name + testName := getCombinationTestName(fileInfo, fileName, overwriteFileAttr) + + //Run test + t.Run(testName, func(t *testing.T) { + mainFilePath := runAttributeTests(t, fileInfo, overwriteFileAttr) + + verifyFileRestores(isEmpty, mainFilePath, t, fileInfo) + }) + } + } +} + +func TestDirAttributeCombinationsOverwrite(t *testing.T) { + t.Parallel() + //Get attribute combinations + attributeCombinations := generateCombinations(4, []bool{}) + //Get overwrite dir attribute combinations + overwriteCombinations := generateCombinations(4, []bool{}) + + dirName := "TestOverwriteDir" + + //Iterate through each attribute combination + for _, attr1 := range attributeCombinations { + + dirInfo := NodeInfo{ + DataStreamInfo: DataStreamInfo{ + name: dirName, + }, + parentDir: "dir", + attributes: getDirFileAttributes(attr1), + Exists: true, + IsDirectory: true, + } + + overwriteDirFileAttributes := []FileAttributes{} + + for _, overwrite := range overwriteCombinations { + overwriteDirFileAttributes = append(overwriteDirFileAttributes, getDirFileAttributes(overwrite)) + } + + //Iterate through each overwrite attribute combinations + for _, overwriteDirAttr := range overwriteDirFileAttributes { + //Get the test name + testName := getCombinationTestName(dirInfo, dirName, overwriteDirAttr) + + //Run test + t.Run(testName, func(t *testing.T) { + mainDirPath := runAttributeTests(t, dirInfo, dirInfo.attributes) + + //Check directory exists + _, err1 := os.Stat(mainDirPath) + rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The directory "+dirInfo.name+" does not exist") + }) + } + } +} + +func TestRestoreDeleteCaseInsensitive(t *testing.T) { + repo := repository.TestRepository(t) + tempdir := rtest.TempDir(t) + + sn, _ := saveSnapshot(t, repo, Snapshot{ + Nodes: map[string]Node{ + "anotherfile": File{Data: "content: file\n"}, + }, + }, noopGetGenericAttributes) + + // should delete files that no longer exist in the snapshot + deleteSn, _ := saveSnapshot(t, repo, Snapshot{ + Nodes: map[string]Node{ + "AnotherfilE": File{Data: "content: file\n"}, + }, + }, noopGetGenericAttributes) + + res := NewRestorer(repo, sn, Options{}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + res = NewRestorer(repo, deleteSn, Options{Delete: true}) + _, err = res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + // anotherfile must still exist + _, err = os.Stat(filepath.Join(tempdir, "anotherfile")) + rtest.OK(t, err) +} diff --git a/internal/restorer/sparsewrite.go b/internal/restorer/sparsewrite.go index 2c1f234defb..ae354f64f17 100644 --- a/internal/restorer/sparsewrite.go +++ b/internal/restorer/sparsewrite.go @@ -1,6 +1,3 @@ -//go:build !windows -// +build !windows - package restorer import ( diff --git a/internal/test/helpers.go b/internal/test/helpers.go index 65e3e36ec6c..3387d36df67 100644 --- a/internal/test/helpers.go +++ b/internal/test/helpers.go @@ -3,7 +3,9 @@ package test import ( "compress/bzip2" "compress/gzip" + "fmt" "io" + "math/rand" "os" "os/exec" "path/filepath" @@ -11,8 +13,6 @@ import ( "testing" "github.com/restic/restic/internal/errors" - - mrand "math/rand" ) // Assert fails the test if the condition is false. @@ -47,10 +47,22 @@ func OKs(tb testing.TB, errs []error) { } // Equals fails the test if exp is not equal to act. -func Equals(tb testing.TB, exp, act interface{}) { +// msg is optional message to be printed, first param being format string and rest being arguments. +func Equals(tb testing.TB, exp, act interface{}, msgs ...string) { tb.Helper() if !reflect.DeepEqual(exp, act) { - tb.Fatalf("\033[31m\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", exp, act) + var msgString string + length := len(msgs) + if length == 1 { + msgString = msgs[0] + } else if length > 1 { + args := make([]interface{}, length-1) + for i, msg := range msgs[1:] { + args[i] = msg + } + msgString = fmt.Sprintf(msgs[0], args...) + } + tb.Fatalf("\033[31m\n\n\t"+msgString+"\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", exp, act) } } @@ -58,7 +70,7 @@ func Equals(tb testing.TB, exp, act interface{}) { func Random(seed, count int) []byte { p := make([]byte, count) - rnd := mrand.New(mrand.NewSource(int64(seed))) + rnd := rand.New(rand.NewSource(int64(seed))) for i := 0; i < len(p); i += 8 { val := rnd.Int63() diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index 10f0e91fad8..79da353eb82 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -7,14 +7,13 @@ import ( "github.com/restic/restic/internal/archiver" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" - "github.com/restic/restic/internal/ui/termstatus" ) // JSONProgress reports progress for the `backup` command in JSON. type JSONProgress struct { *ui.Message - term *termstatus.Terminal + term ui.Terminal v uint } @@ -22,7 +21,7 @@ type JSONProgress struct { var _ ProgressPrinter = &JSONProgress{} // NewJSONProgress returns a new backup progress reporter. -func NewJSONProgress(term *termstatus.Terminal, verbosity uint) *JSONProgress { +func NewJSONProgress(term ui.Terminal, verbosity uint) *JSONProgress { return &JSONProgress{ Message: ui.NewMessage(term, verbosity), term: term, @@ -68,7 +67,7 @@ func (b *JSONProgress) Update(total, processed Counter, errors uint, currentFile func (b *JSONProgress) ScannerError(item string, err error) error { b.error(errorUpdate{ MessageType: "error", - Error: err, + Error: errorObject{err.Error()}, During: "scan", Item: item, }) @@ -79,7 +78,7 @@ func (b *JSONProgress) ScannerError(item string, err error) error { func (b *JSONProgress) Error(item string, err error) error { b.error(errorUpdate{ MessageType: "error", - Error: err, + Error: errorObject{err.Error()}, During: "archival", Item: item, }) @@ -163,7 +162,12 @@ func (b *JSONProgress) ReportTotal(start time.Time, s archiver.ScanStats) { } // Finish prints the finishing messages. -func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *Summary, dryRun bool) { +func (b *JSONProgress) Finish(snapshotID restic.ID, summary *archiver.Summary, dryRun bool) { + id := "" + // empty if snapshot creation was skipped + if !snapshotID.IsNull() { + id = snapshotID.String() + } b.print(summaryOutput{ MessageType: "summary", FilesNew: summary.Files.New, @@ -175,10 +179,13 @@ func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *Su DataBlobs: summary.ItemStats.DataBlobs, TreeBlobs: summary.ItemStats.TreeBlobs, DataAdded: summary.ItemStats.DataSize + summary.ItemStats.TreeSize, + DataAddedPacked: summary.ItemStats.DataSizeInRepo + summary.ItemStats.TreeSizeInRepo, TotalFilesProcessed: summary.Files.New + summary.Files.Changed + summary.Files.Unchanged, TotalBytesProcessed: summary.ProcessedBytes, - TotalDuration: time.Since(start).Seconds(), - SnapshotID: snapshotID.String(), + BackupStart: summary.BackupStart, + BackupEnd: summary.BackupEnd, + TotalDuration: summary.BackupEnd.Sub(summary.BackupStart).Seconds(), + SnapshotID: id, DryRun: dryRun, }) } @@ -200,11 +207,15 @@ type statusUpdate struct { CurrentFiles []string `json:"current_files,omitempty"` } +type errorObject struct { + Message string `json:"message"` +} + type errorUpdate struct { - MessageType string `json:"message_type"` // "error" - Error error `json:"error"` - During string `json:"during"` - Item string `json:"item"` + MessageType string `json:"message_type"` // "error" + Error errorObject `json:"error"` + During string `json:"during"` + Item string `json:"item"` } type verboseUpdate struct { @@ -220,19 +231,22 @@ type verboseUpdate struct { } type summaryOutput struct { - MessageType string `json:"message_type"` // "summary" - FilesNew uint `json:"files_new"` - FilesChanged uint `json:"files_changed"` - FilesUnmodified uint `json:"files_unmodified"` - DirsNew uint `json:"dirs_new"` - DirsChanged uint `json:"dirs_changed"` - DirsUnmodified uint `json:"dirs_unmodified"` - DataBlobs int `json:"data_blobs"` - TreeBlobs int `json:"tree_blobs"` - DataAdded uint64 `json:"data_added"` - TotalFilesProcessed uint `json:"total_files_processed"` - TotalBytesProcessed uint64 `json:"total_bytes_processed"` - TotalDuration float64 `json:"total_duration"` // in seconds - SnapshotID string `json:"snapshot_id"` - DryRun bool `json:"dry_run,omitempty"` + MessageType string `json:"message_type"` // "summary" + FilesNew uint `json:"files_new"` + FilesChanged uint `json:"files_changed"` + FilesUnmodified uint `json:"files_unmodified"` + DirsNew uint `json:"dirs_new"` + DirsChanged uint `json:"dirs_changed"` + DirsUnmodified uint `json:"dirs_unmodified"` + DataBlobs int `json:"data_blobs"` + TreeBlobs int `json:"tree_blobs"` + DataAdded uint64 `json:"data_added"` + DataAddedPacked uint64 `json:"data_added_packed"` + TotalFilesProcessed uint `json:"total_files_processed"` + TotalBytesProcessed uint64 `json:"total_bytes_processed"` + TotalDuration float64 `json:"total_duration"` // in seconds + BackupStart time.Time `json:"backup_start"` + BackupEnd time.Time `json:"backup_end"` + SnapshotID string `json:"snapshot_id,omitempty"` + DryRun bool `json:"dry_run,omitempty"` } diff --git a/internal/ui/backup/json_test.go b/internal/ui/backup/json_test.go new file mode 100644 index 00000000000..b4872efd51f --- /dev/null +++ b/internal/ui/backup/json_test.go @@ -0,0 +1,27 @@ +package backup + +import ( + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui" +) + +func createJSONProgress() (*ui.MockTerminal, ProgressPrinter) { + term := &ui.MockTerminal{} + printer := NewJSONProgress(term, 3) + return term, printer +} + +func TestJSONError(t *testing.T) { + term, printer := createJSONProgress() + test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil) + test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":{\"message\":\"error \\\"message\\\"\"},\"during\":\"archival\",\"item\":\"/path\"}\n"}, term.Errors) +} + +func TestJSONScannerError(t *testing.T) { + term, printer := createJSONProgress() + test.Equals(t, printer.ScannerError("/path", errors.New("error \"message\"")), nil) + test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":{\"message\":\"error \\\"message\\\"\"},\"during\":\"scan\",\"item\":\"/path\"}\n"}, term.Errors) +} diff --git a/internal/ui/backup/progress.go b/internal/ui/backup/progress.go index 4362a8c83e2..318d304357f 100644 --- a/internal/ui/backup/progress.go +++ b/internal/ui/backup/progress.go @@ -17,7 +17,7 @@ type ProgressPrinter interface { ScannerError(item string, err error) error CompleteItem(messageType string, item string, s archiver.ItemStats, d time.Duration) ReportTotal(start time.Time, s archiver.ScanStats) - Finish(snapshotID restic.ID, start time.Time, summary *Summary, dryRun bool) + Finish(snapshotID restic.ID, summary *archiver.Summary, dryRun bool) Reset() P(msg string, args ...interface{}) @@ -28,16 +28,6 @@ type Counter struct { Files, Dirs, Bytes uint64 } -type Summary struct { - Files, Dirs struct { - New uint - Changed uint - Unchanged uint - } - ProcessedBytes uint64 - archiver.ItemStats -} - // Progress reports progress for the `backup` command. type Progress struct { progress.Updater @@ -52,7 +42,6 @@ type Progress struct { processed, total Counter errors uint - summary Summary printer ProgressPrinter } @@ -63,7 +52,7 @@ func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress { printer: printer, estimator: *newRateEstimator(time.Now()), } - p.Updater = *progress.NewUpdater(interval, func(runtime time.Duration, final bool) { + p.Updater = *progress.NewUpdater(interval, func(_ time.Duration, final bool) { if final { p.printer.Reset() } else { @@ -126,16 +115,6 @@ func (p *Progress) CompleteBlob(bytes uint64) { // CompleteItem is the status callback function for the archiver when a // file/dir has been saved successfully. func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) { - p.mu.Lock() - p.summary.ItemStats.Add(s) - - // for the last item "/", current is nil - if current != nil { - p.summary.ProcessedBytes += current.Size - } - - p.mu.Unlock() - if current == nil { // error occurred, tell the status display to remove the line p.mu.Lock() @@ -145,7 +124,7 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a } switch current.Type { - case "dir": + case restic.NodeTypeDir: p.mu.Lock() p.addProcessed(Counter{Dirs: 1}) p.mu.Unlock() @@ -153,24 +132,13 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a switch { case previous == nil: p.printer.CompleteItem("dir new", item, s, d) - p.mu.Lock() - p.summary.Dirs.New++ - p.mu.Unlock() - case previous.Equals(*current): p.printer.CompleteItem("dir unchanged", item, s, d) - p.mu.Lock() - p.summary.Dirs.Unchanged++ - p.mu.Unlock() - default: p.printer.CompleteItem("dir modified", item, s, d) - p.mu.Lock() - p.summary.Dirs.Changed++ - p.mu.Unlock() } - case "file": + case restic.NodeTypeFile: p.mu.Lock() p.addProcessed(Counter{Files: 1}) delete(p.currentFiles, item) @@ -179,21 +147,10 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a switch { case previous == nil: p.printer.CompleteItem("file new", item, s, d) - p.mu.Lock() - p.summary.Files.New++ - p.mu.Unlock() - case previous.Equals(*current): p.printer.CompleteItem("file unchanged", item, s, d) - p.mu.Lock() - p.summary.Files.Unchanged++ - p.mu.Unlock() - default: p.printer.CompleteItem("file modified", item, s, d) - p.mu.Lock() - p.summary.Files.Changed++ - p.mu.Unlock() } } } @@ -213,8 +170,8 @@ func (p *Progress) ReportTotal(item string, s archiver.ScanStats) { } // Finish prints the finishing messages. -func (p *Progress) Finish(snapshotID restic.ID, dryrun bool) { +func (p *Progress) Finish(snapshotID restic.ID, summary *archiver.Summary, dryrun bool) { // wait for the status update goroutine to shut down p.Updater.Done() - p.printer.Finish(snapshotID, p.start, &p.summary, dryrun) + p.printer.Finish(snapshotID, summary, dryrun) } diff --git a/internal/ui/backup/progress_test.go b/internal/ui/backup/progress_test.go index 79a56c91ee7..60e754b4a4d 100644 --- a/internal/ui/backup/progress_test.go +++ b/internal/ui/backup/progress_test.go @@ -33,11 +33,10 @@ func (p *mockPrinter) CompleteItem(messageType string, _ string, _ archiver.Item } func (p *mockPrinter) ReportTotal(_ time.Time, _ archiver.ScanStats) {} -func (p *mockPrinter) Finish(id restic.ID, _ time.Time, summary *Summary, _ bool) { +func (p *mockPrinter) Finish(id restic.ID, _ *archiver.Summary, _ bool) { p.Lock() defer p.Unlock() - _ = *summary // Should not be nil. p.id = id } @@ -56,15 +55,15 @@ func TestProgress(t *testing.T) { prog.CompleteBlob(1024) // "dir unchanged" - node := restic.Node{Type: "dir"} + node := restic.Node{Type: restic.NodeTypeDir} prog.CompleteItem("foo", &node, &node, archiver.ItemStats{}, 0) // "file new" - node.Type = "file" + node.Type = restic.NodeTypeFile prog.CompleteItem("foo", nil, &node, archiver.ItemStats{}, 0) time.Sleep(10 * time.Millisecond) id := restic.NewRandomID() - prog.Finish(id, false) + prog.Finish(id, nil, false) if !prnt.dirUnchanged { t.Error(`"dir unchanged" event not seen`) diff --git a/internal/ui/backup/text.go b/internal/ui/backup/text.go index 215982cd484..efd7ffdfee6 100644 --- a/internal/ui/backup/text.go +++ b/internal/ui/backup/text.go @@ -15,17 +15,19 @@ import ( type TextProgress struct { *ui.Message - term *termstatus.Terminal + term ui.Terminal + verbosity uint } // assert that Backup implements the ProgressPrinter interface var _ ProgressPrinter = &TextProgress{} // NewTextProgress returns a new backup progress reporter. -func NewTextProgress(term *termstatus.Terminal, verbosity uint) *TextProgress { +func NewTextProgress(term ui.Terminal, verbosity uint) *TextProgress { return &TextProgress{ - Message: ui.NewMessage(term, verbosity), - term: term, + Message: ui.NewMessage(term, verbosity), + term: term, + verbosity: verbosity, } } @@ -73,7 +75,9 @@ func (b *TextProgress) Update(total, processed Counter, errors uint, currentFile // ScannerError is the error callback function for the scanner, it prints the // error in verbose mode and returns nil. func (b *TextProgress) ScannerError(_ string, err error) error { - b.V("scan: %v\n", err) + if b.verbosity >= 2 { + b.E("scan: %v\n", err) + } return nil } @@ -121,12 +125,12 @@ func (b *TextProgress) ReportTotal(start time.Time, s archiver.ScanStats) { // Reset status func (b *TextProgress) Reset() { if b.term.CanUpdateStatus() { - b.term.SetStatus([]string{""}) + b.term.SetStatus(nil) } } // Finish prints the finishing messages. -func (b *TextProgress) Finish(_ restic.ID, start time.Time, summary *Summary, dryRun bool) { +func (b *TextProgress) Finish(id restic.ID, summary *archiver.Summary, dryRun bool) { b.P("\n") b.P("Files: %5d new, %5d changed, %5d unmodified\n", summary.Files.New, summary.Files.Changed, summary.Files.Unchanged) b.P("Dirs: %5d new, %5d changed, %5d unmodified\n", summary.Dirs.New, summary.Dirs.Changed, summary.Dirs.Unchanged) @@ -143,6 +147,14 @@ func (b *TextProgress) Finish(_ restic.ID, start time.Time, summary *Summary, dr b.P("processed %v files, %v in %s", summary.Files.New+summary.Files.Changed+summary.Files.Unchanged, ui.FormatBytes(summary.ProcessedBytes), - ui.FormatDuration(time.Since(start)), + ui.FormatDuration(summary.BackupEnd.Sub(summary.BackupStart)), ) + + if !dryRun { + if id.IsNull() { + b.P("skipped creating snapshot\n") + } else { + b.P("snapshot %s saved\n", id.Str()) + } + } } diff --git a/internal/ui/backup/text_test.go b/internal/ui/backup/text_test.go new file mode 100644 index 00000000000..39338a50cf7 --- /dev/null +++ b/internal/ui/backup/text_test.go @@ -0,0 +1,27 @@ +package backup + +import ( + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui" +) + +func createTextProgress() (*ui.MockTerminal, ProgressPrinter) { + term := &ui.MockTerminal{} + printer := NewTextProgress(term, 3) + return term, printer +} + +func TestError(t *testing.T) { + term, printer := createTextProgress() + test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil) + test.Equals(t, []string{"error: error \"message\"\n"}, term.Errors) +} + +func TestScannerError(t *testing.T) { + term, printer := createTextProgress() + test.Equals(t, printer.ScannerError("/path", errors.New("error \"message\"")), nil) + test.Equals(t, []string{"scan: error \"message\"\n"}, term.Errors) +} diff --git a/internal/ui/format.go b/internal/ui/format.go index d2e0a4d2b43..de650607d5b 100644 --- a/internal/ui/format.go +++ b/internal/ui/format.go @@ -8,6 +8,8 @@ import ( "math/bits" "strconv" "time" + + "golang.org/x/text/width" ) func FormatBytes(c uint64) string { @@ -105,3 +107,24 @@ func ToJSONString(status interface{}) string { } return buf.String() } + +// TerminalDisplayWidth returns the number of terminal cells needed to display s +func TerminalDisplayWidth(s string) int { + width := 0 + for _, r := range s { + width += terminalDisplayRuneWidth(r) + } + + return width +} + +func terminalDisplayRuneWidth(r rune) int { + switch width.LookupRune(r).Kind() { + case width.EastAsianWide, width.EastAsianFullwidth: + return 2 + case width.EastAsianNarrow, width.EastAsianHalfwidth, width.EastAsianAmbiguous, width.Neutral: + return 1 + default: + return 0 + } +} diff --git a/internal/ui/format_test.go b/internal/ui/format_test.go index 4223d4e20bc..d595026c407 100644 --- a/internal/ui/format_test.go +++ b/internal/ui/format_test.go @@ -84,3 +84,21 @@ func TestParseBytesInvalid(t *testing.T) { test.Equals(t, int64(0), v) } } + +func TestTerminalDisplayWidth(t *testing.T) { + for _, c := range []struct { + input string + want int + }{ + {"foo", 3}, + {"aéb", 3}, + {"ab", 3}, + {"a’b", 3}, + {"aあb", 4}, + } { + if got := TerminalDisplayWidth(c.input); got != c.want { + t.Errorf("wrong display width for '%s', want %d, got %d", c.input, c.want, got) + } + } + +} diff --git a/internal/ui/message.go b/internal/ui/message.go index 75e54b01920..6ad5a439e67 100644 --- a/internal/ui/message.go +++ b/internal/ui/message.go @@ -1,16 +1,18 @@ package ui -import "github.com/restic/restic/internal/ui/termstatus" +import ( + "fmt" +) // Message reports progress with messages of different verbosity. type Message struct { - term *termstatus.Terminal + term Terminal v uint } // NewMessage returns a message progress reporter with underlying terminal // term. -func NewMessage(term *termstatus.Terminal, verbosity uint) *Message { +func NewMessage(term Terminal, verbosity uint) *Message { return &Message{ term: term, v: verbosity, @@ -19,27 +21,27 @@ func NewMessage(term *termstatus.Terminal, verbosity uint) *Message { // E reports an error func (m *Message) E(msg string, args ...interface{}) { - m.term.Errorf(msg, args...) + m.term.Error(fmt.Sprintf(msg, args...)) } // P prints a message if verbosity >= 1, this is used for normal messages which // are not errors. func (m *Message) P(msg string, args ...interface{}) { if m.v >= 1 { - m.term.Printf(msg, args...) + m.term.Print(fmt.Sprintf(msg, args...)) } } // V prints a message if verbosity >= 2, this is used for verbose messages. func (m *Message) V(msg string, args ...interface{}) { if m.v >= 2 { - m.term.Printf(msg, args...) + m.term.Print(fmt.Sprintf(msg, args...)) } } // VV prints a message if verbosity >= 3, this is used for debug messages. func (m *Message) VV(msg string, args ...interface{}) { if m.v >= 3 { - m.term.Printf(msg, args...) + m.term.Print(fmt.Sprintf(msg, args...)) } } diff --git a/internal/ui/mock.go b/internal/ui/mock.go new file mode 100644 index 00000000000..5a4debb024d --- /dev/null +++ b/internal/ui/mock.go @@ -0,0 +1,22 @@ +package ui + +type MockTerminal struct { + Output []string + Errors []string +} + +func (m *MockTerminal) Print(line string) { + m.Output = append(m.Output, line) +} + +func (m *MockTerminal) Error(line string) { + m.Errors = append(m.Errors, line) +} + +func (m *MockTerminal) SetStatus(lines []string) { + m.Output = append([]string{}, lines...) +} + +func (m *MockTerminal) CanUpdateStatus() bool { + return true +} diff --git a/internal/ui/progress/printer.go b/internal/ui/progress/printer.go index a671621e98b..a2bc4c4b547 100644 --- a/internal/ui/progress/printer.go +++ b/internal/ui/progress/printer.go @@ -1,5 +1,7 @@ package progress +import "testing" + // A Printer can can return a new counter or print messages // at different log levels. // It must be safe to call its methods from concurrent goroutines. @@ -28,3 +30,36 @@ func (*NoopPrinter) P(_ string, _ ...interface{}) {} func (*NoopPrinter) V(_ string, _ ...interface{}) {} func (*NoopPrinter) VV(_ string, _ ...interface{}) {} + +// TestPrinter prints messages during testing +type TestPrinter struct { + t testing.TB +} + +func NewTestPrinter(t testing.TB) *TestPrinter { + return &TestPrinter{ + t: t, + } +} + +var _ Printer = (*TestPrinter)(nil) + +func (p *TestPrinter) NewCounter(_ string) *Counter { + return nil +} + +func (p *TestPrinter) E(msg string, args ...interface{}) { + p.t.Logf("error: "+msg, args...) +} + +func (p *TestPrinter) P(msg string, args ...interface{}) { + p.t.Logf("print: "+msg, args...) +} + +func (p *TestPrinter) V(msg string, args ...interface{}) { + p.t.Logf("verbose: "+msg, args...) +} + +func (p *TestPrinter) VV(msg string, args ...interface{}) { + p.t.Logf("verbose2: "+msg, args...) +} diff --git a/internal/ui/restore/json.go b/internal/ui/restore/json.go index c1b95b00bf9..f7f7bdd1f52 100644 --- a/internal/ui/restore/json.go +++ b/internal/ui/restore/json.go @@ -7,12 +7,14 @@ import ( ) type jsonPrinter struct { - terminal term + terminal ui.Terminal + verbosity uint } -func NewJSONProgress(terminal term) ProgressPrinter { +func NewJSONProgress(terminal ui.Terminal, verbosity uint) ProgressPrinter { return &jsonPrinter{ - terminal: terminal, + terminal: terminal, + verbosity: verbosity, } } @@ -20,31 +22,83 @@ func (t *jsonPrinter) print(status interface{}) { t.terminal.Print(ui.ToJSONString(status)) } -func (t *jsonPrinter) Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) { +func (t *jsonPrinter) error(status interface{}) { + t.terminal.Error(ui.ToJSONString(status)) +} + +func (t *jsonPrinter) Update(p State, duration time.Duration) { status := statusUpdate{ MessageType: "status", SecondsElapsed: uint64(duration / time.Second), - TotalFiles: filesTotal, - FilesRestored: filesFinished, - TotalBytes: allBytesTotal, - BytesRestored: allBytesWritten, + TotalFiles: p.FilesTotal, + FilesRestored: p.FilesFinished, + FilesSkipped: p.FilesSkipped, + FilesDeleted: p.FilesDeleted, + TotalBytes: p.AllBytesTotal, + BytesRestored: p.AllBytesWritten, + BytesSkipped: p.AllBytesSkipped, + } + + if p.AllBytesTotal > 0 { + status.PercentDone = float64(p.AllBytesWritten) / float64(p.AllBytesTotal) + } + + t.print(status) +} + +func (t *jsonPrinter) Error(item string, err error) error { + t.error(errorUpdate{ + MessageType: "error", + Error: errorObject{err.Error()}, + During: "restore", + Item: item, + }) + return nil +} + +func (t *jsonPrinter) CompleteItem(messageType ItemAction, item string, size uint64) { + if t.verbosity < 3 { + return } - if allBytesTotal > 0 { - status.PercentDone = float64(allBytesWritten) / float64(allBytesTotal) + var action string + switch messageType { + case ActionDirRestored: + action = "restored" + case ActionFileRestored: + action = "restored" + case ActionOtherRestored: + action = "restored" + case ActionFileUpdated: + action = "updated" + case ActionFileUnchanged: + action = "unchanged" + case ActionDeleted: + action = "deleted" + default: + panic("unknown message type") } + status := verboseUpdate{ + MessageType: "verbose_status", + Action: action, + Item: item, + Size: size, + } t.print(status) } -func (t *jsonPrinter) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) { +func (t *jsonPrinter) Finish(p State, duration time.Duration) { status := summaryOutput{ MessageType: "summary", SecondsElapsed: uint64(duration / time.Second), - TotalFiles: filesTotal, - FilesRestored: filesFinished, - TotalBytes: allBytesTotal, - BytesRestored: allBytesWritten, + TotalFiles: p.FilesTotal, + FilesRestored: p.FilesFinished, + FilesSkipped: p.FilesSkipped, + FilesDeleted: p.FilesDeleted, + TotalBytes: p.AllBytesTotal, + BytesRestored: p.AllBytesWritten, + BytesSkipped: p.AllBytesSkipped, } t.print(status) } @@ -55,8 +109,29 @@ type statusUpdate struct { PercentDone float64 `json:"percent_done"` TotalFiles uint64 `json:"total_files,omitempty"` FilesRestored uint64 `json:"files_restored,omitempty"` + FilesSkipped uint64 `json:"files_skipped,omitempty"` + FilesDeleted uint64 `json:"files_deleted,omitempty"` TotalBytes uint64 `json:"total_bytes,omitempty"` BytesRestored uint64 `json:"bytes_restored,omitempty"` + BytesSkipped uint64 `json:"bytes_skipped,omitempty"` +} + +type errorObject struct { + Message string `json:"message"` +} + +type errorUpdate struct { + MessageType string `json:"message_type"` // "error" + Error errorObject `json:"error"` + During string `json:"during"` + Item string `json:"item"` +} + +type verboseUpdate struct { + MessageType string `json:"message_type"` // "verbose_status" + Action string `json:"action"` + Item string `json:"item"` + Size uint64 `json:"size"` } type summaryOutput struct { @@ -64,6 +139,9 @@ type summaryOutput struct { SecondsElapsed uint64 `json:"seconds_elapsed,omitempty"` TotalFiles uint64 `json:"total_files,omitempty"` FilesRestored uint64 `json:"files_restored,omitempty"` + FilesSkipped uint64 `json:"files_skipped,omitempty"` + FilesDeleted uint64 `json:"files_deleted,omitempty"` TotalBytes uint64 `json:"total_bytes,omitempty"` BytesRestored uint64 `json:"bytes_restored,omitempty"` + BytesSkipped uint64 `json:"bytes_skipped,omitempty"` } diff --git a/internal/ui/restore/json_test.go b/internal/ui/restore/json_test.go index 7bcabb4d78b..c7096c24658 100644 --- a/internal/ui/restore/json_test.go +++ b/internal/ui/restore/json_test.go @@ -4,26 +4,67 @@ import ( "testing" "time" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui" ) +func createJSONProgress() (*ui.MockTerminal, ProgressPrinter) { + term := &ui.MockTerminal{} + printer := NewJSONProgress(term, 3) + return term, printer +} + func TestJSONPrintUpdate(t *testing.T) { - term := &mockTerm{} - printer := NewJSONProgress(term) - printer.Update(3, 11, 29, 47, 5*time.Second) - test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.output) + term, printer := createJSONProgress() + printer.Update(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second) + test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.Output) +} + +func TestJSONPrintUpdateWithSkipped(t *testing.T) { + term, printer := createJSONProgress() + printer.Update(State{3, 11, 2, 0, 29, 47, 59}, 5*time.Second) + test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":29,\"bytes_skipped\":59}\n"}, term.Output) } func TestJSONPrintSummaryOnSuccess(t *testing.T) { - term := &mockTerm{} - printer := NewJSONProgress(term) - printer.Finish(11, 11, 47, 47, 5*time.Second) - test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"total_bytes\":47,\"bytes_restored\":47}\n"}, term.output) + term, printer := createJSONProgress() + printer.Finish(State{11, 11, 0, 0, 47, 47, 0}, 5*time.Second) + test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"total_bytes\":47,\"bytes_restored\":47}\n"}, term.Output) } func TestJSONPrintSummaryOnErrors(t *testing.T) { - term := &mockTerm{} - printer := NewJSONProgress(term) - printer.Finish(3, 11, 29, 47, 5*time.Second) - test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.output) + term, printer := createJSONProgress() + printer.Finish(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second) + test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.Output) +} + +func TestJSONPrintSummaryOnSuccessWithSkipped(t *testing.T) { + term, printer := createJSONProgress() + printer.Finish(State{11, 11, 2, 0, 47, 47, 59}, 5*time.Second) + test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":47,\"bytes_skipped\":59}\n"}, term.Output) +} + +func TestJSONPrintCompleteItem(t *testing.T) { + for _, data := range []struct { + action ItemAction + size uint64 + expected string + }{ + {ActionDirRestored, 0, "{\"message_type\":\"verbose_status\",\"action\":\"restored\",\"item\":\"test\",\"size\":0}\n"}, + {ActionFileRestored, 123, "{\"message_type\":\"verbose_status\",\"action\":\"restored\",\"item\":\"test\",\"size\":123}\n"}, + {ActionFileUpdated, 123, "{\"message_type\":\"verbose_status\",\"action\":\"updated\",\"item\":\"test\",\"size\":123}\n"}, + {ActionFileUnchanged, 123, "{\"message_type\":\"verbose_status\",\"action\":\"unchanged\",\"item\":\"test\",\"size\":123}\n"}, + {ActionDeleted, 0, "{\"message_type\":\"verbose_status\",\"action\":\"deleted\",\"item\":\"test\",\"size\":0}\n"}, + } { + term, printer := createJSONProgress() + printer.CompleteItem(data.action, "test", data.size) + test.Equals(t, []string{data.expected}, term.Output) + } +} + +func TestJSONError(t *testing.T) { + term, printer := createJSONProgress() + test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil) + test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":{\"message\":\"error \\\"message\\\"\"},\"during\":\"restore\",\"item\":\"/path\"}\n"}, term.Errors) } diff --git a/internal/ui/restore/progress.go b/internal/ui/restore/progress.go index f2bd5d38b91..41367f34651 100644 --- a/internal/ui/restore/progress.go +++ b/internal/ui/restore/progress.go @@ -7,15 +7,22 @@ import ( "github.com/restic/restic/internal/ui/progress" ) +type State struct { + FilesFinished uint64 + FilesTotal uint64 + FilesSkipped uint64 + FilesDeleted uint64 + AllBytesWritten uint64 + AllBytesTotal uint64 + AllBytesSkipped uint64 +} + type Progress struct { updater progress.Updater m sync.Mutex progressInfoMap map[string]progressInfoEntry - filesFinished uint64 - filesTotal uint64 - allBytesWritten uint64 - allBytesTotal uint64 + s State started time.Time printer ProgressPrinter @@ -26,16 +33,25 @@ type progressInfoEntry struct { bytesTotal uint64 } -type term interface { - Print(line string) - SetStatus(lines []string) -} - type ProgressPrinter interface { - Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) - Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) + Update(progress State, duration time.Duration) + Error(item string, err error) error + CompleteItem(action ItemAction, item string, size uint64) + Finish(progress State, duration time.Duration) } +type ItemAction string + +// Constants for the different CompleteItem actions. +const ( + ActionDirRestored ItemAction = "dir restored" + ActionFileRestored ItemAction = "file restored" + ActionFileUpdated ItemAction = "file updated" + ActionFileUnchanged ItemAction = "file unchanged" + ActionOtherRestored ItemAction = "other restored" + ActionDeleted ItemAction = "deleted" +) + func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress { p := &Progress{ progressInfoMap: make(map[string]progressInfoEntry), @@ -51,23 +67,31 @@ func (p *Progress) update(runtime time.Duration, final bool) { defer p.m.Unlock() if !final { - p.printer.Update(p.filesFinished, p.filesTotal, p.allBytesWritten, p.allBytesTotal, runtime) + p.printer.Update(p.s, runtime) } else { - p.printer.Finish(p.filesFinished, p.filesTotal, p.allBytesWritten, p.allBytesTotal, runtime) + p.printer.Finish(p.s, runtime) } } // AddFile starts tracking a new file with the given size func (p *Progress) AddFile(size uint64) { + if p == nil { + return + } + p.m.Lock() defer p.m.Unlock() - p.filesTotal++ - p.allBytesTotal += size + p.s.FilesTotal++ + p.s.AllBytesTotal += size } // AddProgress accumulates the number of bytes written for a file -func (p *Progress) AddProgress(name string, bytesWrittenPortion uint64, bytesTotal uint64) { +func (p *Progress) AddProgress(name string, action ItemAction, bytesWrittenPortion uint64, bytesTotal uint64) { + if p == nil { + return + } + p.m.Lock() defer p.m.Unlock() @@ -78,13 +102,53 @@ func (p *Progress) AddProgress(name string, bytesWrittenPortion uint64, bytesTot entry.bytesWritten += bytesWrittenPortion p.progressInfoMap[name] = entry - p.allBytesWritten += bytesWrittenPortion + p.s.AllBytesWritten += bytesWrittenPortion if entry.bytesWritten == entry.bytesTotal { delete(p.progressInfoMap, name) - p.filesFinished++ + p.s.FilesFinished++ + + p.printer.CompleteItem(action, name, bytesTotal) } } +func (p *Progress) AddSkippedFile(name string, size uint64) { + if p == nil { + return + } + + p.m.Lock() + defer p.m.Unlock() + + p.s.FilesSkipped++ + p.s.AllBytesSkipped += size + + p.printer.CompleteItem(ActionFileUnchanged, name, size) +} + +func (p *Progress) ReportDeletion(name string) { + if p == nil { + return + } + + p.s.FilesDeleted++ + + p.m.Lock() + defer p.m.Unlock() + + p.printer.CompleteItem(ActionDeleted, name, 0) +} + +func (p *Progress) Error(item string, err error) error { + if p == nil { + return nil + } + + p.m.Lock() + defer p.m.Unlock() + + return p.printer.Error(item, err) +} + func (p *Progress) Finish() { p.updater.Done() } diff --git a/internal/ui/restore/progress_test.go b/internal/ui/restore/progress_test.go index 9e625aa20b7..b6f72726cb4 100644 --- a/internal/ui/restore/progress_test.go +++ b/internal/ui/restore/progress_test.go @@ -4,11 +4,12 @@ import ( "testing" "time" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/test" ) type printerTraceEntry struct { - filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64 + progress State duration time.Duration isFinished bool @@ -16,122 +17,205 @@ type printerTraceEntry struct { type printerTrace []printerTraceEntry +type itemTraceEntry struct { + action ItemAction + item string + size uint64 +} + +type itemTrace []itemTraceEntry + +type errorTraceEntry struct { + item string + err error +} + +type errorTrace []errorTraceEntry + type mockPrinter struct { - trace printerTrace + trace printerTrace + items itemTrace + errors errorTrace } const mockFinishDuration = 42 * time.Second -func (p *mockPrinter) Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) { - p.trace = append(p.trace, printerTraceEntry{filesFinished, filesTotal, allBytesWritten, allBytesTotal, duration, false}) +func (p *mockPrinter) Update(progress State, duration time.Duration) { + p.trace = append(p.trace, printerTraceEntry{progress, duration, false}) +} +func (p *mockPrinter) Error(item string, err error) error { + p.errors = append(p.errors, errorTraceEntry{item, err}) + return nil } -func (p *mockPrinter) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, _ time.Duration) { - p.trace = append(p.trace, printerTraceEntry{filesFinished, filesTotal, allBytesWritten, allBytesTotal, mockFinishDuration, true}) +func (p *mockPrinter) CompleteItem(action ItemAction, item string, size uint64) { + p.items = append(p.items, itemTraceEntry{action, item, size}) +} +func (p *mockPrinter) Finish(progress State, _ time.Duration) { + p.trace = append(p.trace, printerTraceEntry{progress, mockFinishDuration, true}) } -func testProgress(fn func(progress *Progress) bool) printerTrace { +func testProgress(fn func(progress *Progress) bool) (printerTrace, itemTrace, errorTrace) { printer := &mockPrinter{} progress := NewProgress(printer, 0) final := fn(progress) progress.update(0, final) trace := append(printerTrace{}, printer.trace...) + items := append(itemTrace{}, printer.items...) + errors := append(errorTrace{}, printer.errors...) // cleanup to avoid goroutine leak, but copy trace first progress.Finish() - return trace + return trace, items, errors } func TestNew(t *testing.T) { - result := testProgress(func(progress *Progress) bool { + result, items, _ := testProgress(func(progress *Progress) bool { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{0, 0, 0, 0, 0, false}, + printerTraceEntry{State{0, 0, 0, 0, 0, 0, 0}, 0, false}, }, result) + test.Equals(t, itemTrace{}, items) } func TestAddFile(t *testing.T) { fileSize := uint64(100) - result := testProgress(func(progress *Progress) bool { + result, items, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) return false }) test.Equals(t, printerTrace{ - printerTraceEntry{0, 1, 0, fileSize, 0, false}, + printerTraceEntry{State{0, 1, 0, 0, 0, fileSize, 0}, 0, false}, }, result) + test.Equals(t, itemTrace{}, items) } func TestFirstProgressOnAFile(t *testing.T) { expectedBytesWritten := uint64(5) expectedBytesTotal := uint64(100) - result := testProgress(func(progress *Progress) bool { + result, items, _ := testProgress(func(progress *Progress) bool { progress.AddFile(expectedBytesTotal) - progress.AddProgress("test", expectedBytesWritten, expectedBytesTotal) + progress.AddProgress("test", ActionFileUpdated, expectedBytesWritten, expectedBytesTotal) return false }) test.Equals(t, printerTrace{ - printerTraceEntry{0, 1, expectedBytesWritten, expectedBytesTotal, 0, false}, + printerTraceEntry{State{0, 1, 0, 0, expectedBytesWritten, expectedBytesTotal, 0}, 0, false}, }, result) + test.Equals(t, itemTrace{}, items) } func TestLastProgressOnAFile(t *testing.T) { fileSize := uint64(100) - result := testProgress(func(progress *Progress) bool { + result, items, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) - progress.AddProgress("test", 30, fileSize) - progress.AddProgress("test", 35, fileSize) - progress.AddProgress("test", 35, fileSize) + progress.AddProgress("test", ActionFileUpdated, 30, fileSize) + progress.AddProgress("test", ActionFileUpdated, 35, fileSize) + progress.AddProgress("test", ActionFileUpdated, 35, fileSize) return false }) test.Equals(t, printerTrace{ - printerTraceEntry{1, 1, fileSize, fileSize, 0, false}, + printerTraceEntry{State{1, 1, 0, 0, fileSize, fileSize, 0}, 0, false}, }, result) + test.Equals(t, itemTrace{ + itemTraceEntry{action: ActionFileUpdated, item: "test", size: fileSize}, + }, items) } func TestLastProgressOnLastFile(t *testing.T) { fileSize := uint64(100) - result := testProgress(func(progress *Progress) bool { + result, items, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(50) - progress.AddProgress("test1", 50, 50) - progress.AddProgress("test2", 50, fileSize) - progress.AddProgress("test2", 50, fileSize) + progress.AddProgress("test1", ActionFileUpdated, 50, 50) + progress.AddProgress("test2", ActionFileUpdated, 50, fileSize) + progress.AddProgress("test2", ActionFileUpdated, 50, fileSize) return false }) test.Equals(t, printerTrace{ - printerTraceEntry{2, 2, 50 + fileSize, 50 + fileSize, 0, false}, + printerTraceEntry{State{2, 2, 0, 0, 50 + fileSize, 50 + fileSize, 0}, 0, false}, }, result) + test.Equals(t, itemTrace{ + itemTraceEntry{action: ActionFileUpdated, item: "test1", size: 50}, + itemTraceEntry{action: ActionFileUpdated, item: "test2", size: fileSize}, + }, items) } func TestSummaryOnSuccess(t *testing.T) { fileSize := uint64(100) - result := testProgress(func(progress *Progress) bool { + result, _, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(50) - progress.AddProgress("test1", 50, 50) - progress.AddProgress("test2", fileSize, fileSize) + progress.AddProgress("test1", ActionFileUpdated, 50, 50) + progress.AddProgress("test2", ActionFileUpdated, fileSize, fileSize) return true }) test.Equals(t, printerTrace{ - printerTraceEntry{2, 2, 50 + fileSize, 50 + fileSize, mockFinishDuration, true}, + printerTraceEntry{State{2, 2, 0, 0, 50 + fileSize, 50 + fileSize, 0}, mockFinishDuration, true}, }, result) } func TestSummaryOnErrors(t *testing.T) { fileSize := uint64(100) - result := testProgress(func(progress *Progress) bool { + result, _, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(50) - progress.AddProgress("test1", 50, 50) - progress.AddProgress("test2", fileSize/2, fileSize) + progress.AddProgress("test1", ActionFileUpdated, 50, 50) + progress.AddProgress("test2", ActionFileUpdated, fileSize/2, fileSize) return true }) test.Equals(t, printerTrace{ - printerTraceEntry{1, 2, 50 + fileSize/2, 50 + fileSize, mockFinishDuration, true}, + printerTraceEntry{State{1, 2, 0, 0, 50 + fileSize/2, 50 + fileSize, 0}, mockFinishDuration, true}, }, result) } + +func TestSkipFile(t *testing.T) { + fileSize := uint64(100) + + result, items, _ := testProgress(func(progress *Progress) bool { + progress.AddSkippedFile("test", fileSize) + return true + }) + test.Equals(t, printerTrace{ + printerTraceEntry{State{0, 0, 1, 0, 0, 0, fileSize}, mockFinishDuration, true}, + }, result) + test.Equals(t, itemTrace{ + itemTraceEntry{ActionFileUnchanged, "test", fileSize}, + }, items) +} + +func TestProgressTypes(t *testing.T) { + fileSize := uint64(100) + + _, items, _ := testProgress(func(progress *Progress) bool { + progress.AddFile(fileSize) + progress.AddFile(0) + progress.AddProgress("dir", ActionDirRestored, fileSize, fileSize) + progress.AddProgress("new", ActionFileRestored, 0, 0) + progress.ReportDeletion("del") + return true + }) + test.Equals(t, itemTrace{ + itemTraceEntry{ActionDirRestored, "dir", fileSize}, + itemTraceEntry{ActionFileRestored, "new", 0}, + itemTraceEntry{ActionDeleted, "del", 0}, + }, items) +} + +func TestProgressError(t *testing.T) { + err1 := errors.New("err1") + err2 := errors.New("err2") + _, _, errors := testProgress(func(progress *Progress) bool { + test.Equals(t, progress.Error("first", err1), nil) + test.Equals(t, progress.Error("second", err2), nil) + return true + }) + test.Equals(t, errorTrace{ + errorTraceEntry{"first", err1}, + errorTraceEntry{"second", err2}, + }, errors) +} diff --git a/internal/ui/restore/text.go b/internal/ui/restore/text.go index 2647bb28b1b..35c9db029f7 100644 --- a/internal/ui/restore/text.go +++ b/internal/ui/restore/text.go @@ -8,39 +8,85 @@ import ( ) type textPrinter struct { - terminal term + *ui.Message + + terminal ui.Terminal } -func NewTextProgress(terminal term) ProgressPrinter { +func NewTextProgress(terminal ui.Terminal, verbosity uint) ProgressPrinter { return &textPrinter{ + Message: ui.NewMessage(terminal, verbosity), terminal: terminal, } } -func (t *textPrinter) Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) { +func (t *textPrinter) Update(p State, duration time.Duration) { timeLeft := ui.FormatDuration(duration) - formattedAllBytesWritten := ui.FormatBytes(allBytesWritten) - formattedAllBytesTotal := ui.FormatBytes(allBytesTotal) - allPercent := ui.FormatPercent(allBytesWritten, allBytesTotal) + formattedAllBytesWritten := ui.FormatBytes(p.AllBytesWritten) + formattedAllBytesTotal := ui.FormatBytes(p.AllBytesTotal) + allPercent := ui.FormatPercent(p.AllBytesWritten, p.AllBytesTotal) progress := fmt.Sprintf("[%s] %s %v files/dirs %s, total %v files/dirs %v", - timeLeft, allPercent, filesFinished, formattedAllBytesWritten, filesTotal, formattedAllBytesTotal) + timeLeft, allPercent, p.FilesFinished, formattedAllBytesWritten, p.FilesTotal, formattedAllBytesTotal) + if p.FilesSkipped > 0 { + progress += fmt.Sprintf(", skipped %v files/dirs %v", p.FilesSkipped, ui.FormatBytes(p.AllBytesSkipped)) + } + if p.FilesDeleted > 0 { + progress += fmt.Sprintf(", deleted %v files/dirs", p.FilesDeleted) + } t.terminal.SetStatus([]string{progress}) } -func (t *textPrinter) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) { - t.terminal.SetStatus([]string{}) +func (t *textPrinter) Error(item string, err error) error { + t.E("ignoring error for %s: %s\n", item, err) + return nil +} + +func (t *textPrinter) CompleteItem(messageType ItemAction, item string, size uint64) { + var action string + switch messageType { + case ActionDirRestored: + action = "restored" + case ActionFileRestored: + action = "restored" + case ActionOtherRestored: + action = "restored" + case ActionFileUpdated: + action = "updated" + case ActionFileUnchanged: + action = "unchanged" + case ActionDeleted: + action = "deleted" + default: + panic("unknown message type") + } + + if messageType == ActionDirRestored || messageType == ActionOtherRestored || messageType == ActionDeleted { + t.VV("%-9v %v", action, item) + } else { + t.VV("%-9v %v with size %v", action, item, ui.FormatBytes(size)) + } +} + +func (t *textPrinter) Finish(p State, duration time.Duration) { + t.terminal.SetStatus(nil) timeLeft := ui.FormatDuration(duration) - formattedAllBytesTotal := ui.FormatBytes(allBytesTotal) + formattedAllBytesTotal := ui.FormatBytes(p.AllBytesTotal) var summary string - if filesFinished == filesTotal && allBytesWritten == allBytesTotal { - summary = fmt.Sprintf("Summary: Restored %d files/dirs (%s) in %s", filesTotal, formattedAllBytesTotal, timeLeft) + if p.FilesFinished == p.FilesTotal && p.AllBytesWritten == p.AllBytesTotal { + summary = fmt.Sprintf("Summary: Restored %d files/dirs (%s) in %s", p.FilesTotal, formattedAllBytesTotal, timeLeft) } else { - formattedAllBytesWritten := ui.FormatBytes(allBytesWritten) + formattedAllBytesWritten := ui.FormatBytes(p.AllBytesWritten) summary = fmt.Sprintf("Summary: Restored %d / %d files/dirs (%s / %s) in %s", - filesFinished, filesTotal, formattedAllBytesWritten, formattedAllBytesTotal, timeLeft) + p.FilesFinished, p.FilesTotal, formattedAllBytesWritten, formattedAllBytesTotal, timeLeft) + } + if p.FilesSkipped > 0 { + summary += fmt.Sprintf(", skipped %v files/dirs %v", p.FilesSkipped, ui.FormatBytes(p.AllBytesSkipped)) + } + if p.FilesDeleted > 0 { + summary += fmt.Sprintf(", deleted %v files/dirs", p.FilesDeleted) } t.terminal.Print(summary) diff --git a/internal/ui/restore/text_test.go b/internal/ui/restore/text_test.go index fc03904ff08..746700cd897 100644 --- a/internal/ui/restore/text_test.go +++ b/internal/ui/restore/text_test.go @@ -4,38 +4,68 @@ import ( "testing" "time" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui" ) -type mockTerm struct { - output []string +func createTextProgress() (*ui.MockTerminal, ProgressPrinter) { + term := &ui.MockTerminal{} + printer := NewTextProgress(term, 3) + return term, printer } -func (m *mockTerm) Print(line string) { - m.output = append(m.output, line) +func TestPrintUpdate(t *testing.T) { + term, printer := createTextProgress() + printer.Update(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second) + test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B"}, term.Output) } -func (m *mockTerm) SetStatus(lines []string) { - m.output = append([]string{}, lines...) -} - -func TestPrintUpdate(t *testing.T) { - term := &mockTerm{} - printer := NewTextProgress(term) - printer.Update(3, 11, 29, 47, 5*time.Second) - test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B"}, term.output) +func TestPrintUpdateWithSkipped(t *testing.T) { + term, printer := createTextProgress() + printer.Update(State{3, 11, 2, 0, 29, 47, 59}, 5*time.Second) + test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B, skipped 2 files/dirs 59 B"}, term.Output) } func TestPrintSummaryOnSuccess(t *testing.T) { - term := &mockTerm{} - printer := NewTextProgress(term) - printer.Finish(11, 11, 47, 47, 5*time.Second) - test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05"}, term.output) + term, printer := createTextProgress() + printer.Finish(State{11, 11, 0, 0, 47, 47, 0}, 5*time.Second) + test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05"}, term.Output) } func TestPrintSummaryOnErrors(t *testing.T) { - term := &mockTerm{} - printer := NewTextProgress(term) - printer.Finish(3, 11, 29, 47, 5*time.Second) - test.Equals(t, []string{"Summary: Restored 3 / 11 files/dirs (29 B / 47 B) in 0:05"}, term.output) + term, printer := createTextProgress() + printer.Finish(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second) + test.Equals(t, []string{"Summary: Restored 3 / 11 files/dirs (29 B / 47 B) in 0:05"}, term.Output) +} + +func TestPrintSummaryOnSuccessWithSkipped(t *testing.T) { + term, printer := createTextProgress() + printer.Finish(State{11, 11, 2, 0, 47, 47, 59}, 5*time.Second) + test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05, skipped 2 files/dirs 59 B"}, term.Output) +} + +func TestPrintCompleteItem(t *testing.T) { + for _, data := range []struct { + action ItemAction + size uint64 + expected string + }{ + {ActionDirRestored, 0, "restored test"}, + {ActionFileRestored, 123, "restored test with size 123 B"}, + {ActionOtherRestored, 0, "restored test"}, + {ActionFileUpdated, 123, "updated test with size 123 B"}, + {ActionFileUnchanged, 123, "unchanged test with size 123 B"}, + {ActionDeleted, 0, "deleted test"}, + } { + term, printer := createTextProgress() + printer.CompleteItem(data.action, "test", data.size) + test.Equals(t, []string{data.expected}, term.Output) + } +} + +func TestError(t *testing.T) { + term, printer := createTextProgress() + test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil) + test.Equals(t, []string{"ignoring error for /path: error \"message\"\n"}, term.Errors) } diff --git a/internal/ui/stdio_wrapper.go b/internal/ui/stdio_wrapper.go deleted file mode 100644 index 42f4cc54523..00000000000 --- a/internal/ui/stdio_wrapper.go +++ /dev/null @@ -1,72 +0,0 @@ -package ui - -import ( - "bytes" - "io" - - "github.com/restic/restic/internal/ui/termstatus" -) - -// StdioWrapper provides stdout and stderr integration with termstatus. -type StdioWrapper struct { - stdout *lineWriter - stderr *lineWriter -} - -// NewStdioWrapper initializes a new stdio wrapper that can be used in place of -// os.Stdout or os.Stderr. -func NewStdioWrapper(term *termstatus.Terminal) *StdioWrapper { - return &StdioWrapper{ - stdout: newLineWriter(term.Print), - stderr: newLineWriter(term.Error), - } -} - -// Stdout returns a writer that is line buffered and can be used in place of -// os.Stdout. On Close(), the remaining bytes are written, followed by a line -// break. -func (w *StdioWrapper) Stdout() io.WriteCloser { - return w.stdout -} - -// Stderr returns a writer that is line buffered and can be used in place of -// os.Stderr. On Close(), the remaining bytes are written, followed by a line -// break. -func (w *StdioWrapper) Stderr() io.WriteCloser { - return w.stderr -} - -type lineWriter struct { - buf *bytes.Buffer - print func(string) -} - -var _ io.WriteCloser = &lineWriter{} - -func newLineWriter(print func(string)) *lineWriter { - return &lineWriter{buf: bytes.NewBuffer(nil), print: print} -} - -func (w *lineWriter) Write(data []byte) (n int, err error) { - n, err = w.buf.Write(data) - if err != nil { - return n, err - } - - // look for line breaks - buf := w.buf.Bytes() - i := bytes.LastIndexByte(buf, '\n') - if i != -1 { - w.print(string(buf[:i+1])) - w.buf.Next(i + 1) - } - - return n, err -} - -func (w *lineWriter) Close() error { - if w.buf.Len() > 0 { - w.print(string(append(w.buf.Bytes(), '\n'))) - } - return nil -} diff --git a/internal/ui/table/table.go b/internal/ui/table/table.go index c3ae47f5462..1c535cadbec 100644 --- a/internal/ui/table/table.go +++ b/internal/ui/table/table.go @@ -6,6 +6,8 @@ import ( "strings" "text/template" + + "github.com/restic/restic/internal/ui" ) // Table contains data for a table to be printed. @@ -89,7 +91,7 @@ func printLine(w io.Writer, print func(io.Writer, string) error, sep string, dat } // apply padding - pad := widths[fieldNum] - len(v) + pad := widths[fieldNum] - ui.TerminalDisplayWidth(v) if pad > 0 { v += strings.Repeat(" ", pad) } @@ -139,16 +141,18 @@ func (t *Table) Write(w io.Writer) error { columnWidths := make([]int, columns) for i, desc := range t.columns { for _, line := range strings.Split(desc, "\n") { - if columnWidths[i] < len(line) { - columnWidths[i] = len(desc) + width := ui.TerminalDisplayWidth(line) + if columnWidths[i] < width { + columnWidths[i] = width } } } for _, line := range lines { for i, content := range line { for _, l := range strings.Split(content, "\n") { - if columnWidths[i] < len(l) { - columnWidths[i] = len(l) + width := ui.TerminalDisplayWidth(l) + if columnWidths[i] < width { + columnWidths[i] = width } } } @@ -159,7 +163,7 @@ func (t *Table) Write(w io.Writer) error { for _, width := range columnWidths { totalWidth += width } - totalWidth += (columns - 1) * len(t.CellSeparator) + totalWidth += (columns - 1) * ui.TerminalDisplayWidth(t.CellSeparator) // write header if len(t.columns) > 0 { diff --git a/internal/ui/table/table_test.go b/internal/ui/table/table_test.go index db116bbc512..2902860b90e 100644 --- a/internal/ui/table/table_test.go +++ b/internal/ui/table/table_test.go @@ -29,6 +29,21 @@ first column ---------------------- data: first data field ---------------------- +`, + }, + { + func(t testing.TB) *Table { + table := New() + table.AddColumn("first\ncolumn", "{{.First}}") + table.AddRow(struct{ First string }{"data"}) + return table + }, + ` +first +column +------ +data +------ `, }, { @@ -126,7 +141,7 @@ foo 2018-08-19 22:22:22 xxx other /home/user/other Time string Tags, Dirs []string } - table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"work", "go"}, []string{"/home/user/work", "/home/user/go"}}) + table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"work", "go’s"}, []string{"/home/user/work", "/home/user/go"}}) table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"other"}, []string{"/home/user/other"}}) table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"other", "bar"}, []string{"/home/user/other"}}) return table @@ -135,7 +150,7 @@ foo 2018-08-19 22:22:22 xxx other /home/user/other host name time zz tags dirs ------------------------------------------------------------ foo 2018-08-19 22:22:22 xxx work /home/user/work - go /home/user/go + go’s /home/user/go foo 2018-08-19 22:22:22 xxx other /home/user/other foo 2018-08-19 22:22:22 xxx other /home/user/other bar diff --git a/internal/ui/terminal.go b/internal/ui/terminal.go new file mode 100644 index 00000000000..2d9418a61f0 --- /dev/null +++ b/internal/ui/terminal.go @@ -0,0 +1,10 @@ +package ui + +// Terminal is used to write messages and display status lines which can be +// updated. See termstatus.Terminal for a concrete implementation. +type Terminal interface { + Print(line string) + Error(line string) + SetStatus(lines []string) + CanUpdateStatus() bool +} diff --git a/internal/ui/termstatus/status.go b/internal/ui/termstatus/status.go index fc731b02339..e6533095871 100644 --- a/internal/ui/termstatus/status.go +++ b/internal/ui/termstatus/status.go @@ -212,7 +212,7 @@ func (t *Terminal) runWithoutStatus(ctx context.Context) { } if _, err := io.WriteString(dst, msg.line); err != nil { - fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "write failed: %v\n", err) } if flush == nil { @@ -220,16 +220,18 @@ func (t *Terminal) runWithoutStatus(ctx context.Context) { } if err := flush(); err != nil { - fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) } case stat := <-t.status: for _, line := range stat.lines { // Ensure that each message ends with exactly one newline. - fmt.Fprintln(t.wr, strings.TrimRight(line, "\n")) + if _, err := fmt.Fprintln(t.wr, strings.TrimRight(line, "\n")); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + } } if err := t.wr.Flush(); err != nil { - fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) } } } @@ -252,23 +254,11 @@ func (t *Terminal) Print(line string) { t.print(line, false) } -// Printf uses fmt.Sprintf to write a line to the terminal. -func (t *Terminal) Printf(msg string, args ...interface{}) { - s := fmt.Sprintf(msg, args...) - t.Print(s) -} - // Error writes an error to the terminal. func (t *Terminal) Error(line string) { t.print(line, true) } -// Errorf uses fmt.Sprintf to write an error line to the terminal. -func (t *Terminal) Errorf(msg string, args ...interface{}) { - s := fmt.Sprintf(msg, args...) - t.Error(s) -} - // Truncate s to fit in width (number of terminal cells) w. // If w is negative, returns the empty string. func Truncate(s string, w int) string { @@ -327,11 +317,8 @@ func sanitizeLines(lines []string, width int) []string { // SetStatus updates the status lines. // The lines should not contain newlines; this method adds them. +// Pass nil or an empty array to remove the status lines. func (t *Terminal) SetStatus(lines []string) { - if len(lines) == 0 { - return - } - // only truncate interactive status output var width int if t.canUpdateStatus { diff --git a/internal/ui/termstatus/status_test.go b/internal/ui/termstatus/status_test.go index 997a2d7b1cb..2a17a905a42 100644 --- a/internal/ui/termstatus/status_test.go +++ b/internal/ui/termstatus/status_test.go @@ -32,6 +32,15 @@ func TestSetStatus(t *testing.T) { term.SetStatus([]string{"first"}) exp := home + clear + "first" + home + term.SetStatus([]string{""}) + exp += home + clear + "" + home + + term.SetStatus([]string{}) + exp += home + clear + "" + home + + // already empty status + term.SetStatus([]string{}) + term.SetStatus([]string{"foo", "bar", "baz"}) exp += home + clear + "foo\n" + home + clear + "bar\n" + home + clear + "baz" + home + up + up diff --git a/internal/ui/termstatus/stdio_wrapper.go b/internal/ui/termstatus/stdio_wrapper.go new file mode 100644 index 00000000000..233610ba397 --- /dev/null +++ b/internal/ui/termstatus/stdio_wrapper.go @@ -0,0 +1,47 @@ +package termstatus + +import ( + "bytes" + "io" +) + +// WrapStdio returns line-buffering replacements for os.Stdout and os.Stderr. +// On Close, the remaining bytes are written, followed by a line break. +func WrapStdio(term *Terminal) (stdout, stderr io.WriteCloser) { + return newLineWriter(term.Print), newLineWriter(term.Error) +} + +type lineWriter struct { + buf bytes.Buffer + print func(string) +} + +var _ io.WriteCloser = &lineWriter{} + +func newLineWriter(print func(string)) *lineWriter { + return &lineWriter{print: print} +} + +func (w *lineWriter) Write(data []byte) (n int, err error) { + n, err = w.buf.Write(data) + if err != nil { + return n, err + } + + // look for line breaks + buf := w.buf.Bytes() + i := bytes.LastIndexByte(buf, '\n') + if i != -1 { + w.print(string(buf[:i+1])) + w.buf.Next(i + 1) + } + + return n, err +} + +func (w *lineWriter) Close() error { + if w.buf.Len() > 0 { + w.print(string(append(w.buf.Bytes(), '\n'))) + } + return nil +} diff --git a/internal/ui/stdio_wrapper_test.go b/internal/ui/termstatus/stdio_wrapper_test.go similarity index 98% rename from internal/ui/stdio_wrapper_test.go rename to internal/ui/termstatus/stdio_wrapper_test.go index b95d9180da0..1e214f1f4f4 100644 --- a/internal/ui/stdio_wrapper_test.go +++ b/internal/ui/termstatus/stdio_wrapper_test.go @@ -1,4 +1,4 @@ -package ui +package termstatus import ( "strings" diff --git a/internal/walker/rewriter.go b/internal/walker/rewriter.go index 64985703231..968ef44f352 100644 --- a/internal/walker/rewriter.go +++ b/internal/walker/rewriter.go @@ -11,6 +11,12 @@ import ( type NodeRewriteFunc func(node *restic.Node, path string) *restic.Node type FailedTreeRewriteFunc func(nodeID restic.ID, path string, err error) (restic.ID, error) +type QueryRewrittenSizeFunc func() SnapshotSize + +type SnapshotSize struct { + FileCount uint + FileSize uint64 +} type RewriteOpts struct { // return nil to remove the node @@ -39,19 +45,42 @@ func NewTreeRewriter(opts RewriteOpts) *TreeRewriter { } // setup default implementations if rw.opts.RewriteNode == nil { - rw.opts.RewriteNode = func(node *restic.Node, path string) *restic.Node { + rw.opts.RewriteNode = func(node *restic.Node, _ string) *restic.Node { return node } } if rw.opts.RewriteFailedTree == nil { // fail with error by default - rw.opts.RewriteFailedTree = func(nodeID restic.ID, path string, err error) (restic.ID, error) { + rw.opts.RewriteFailedTree = func(_ restic.ID, _ string, err error) (restic.ID, error) { return restic.ID{}, err } } return rw } +func NewSnapshotSizeRewriter(rewriteNode NodeRewriteFunc) (*TreeRewriter, QueryRewrittenSizeFunc) { + var count uint + var size uint64 + + t := NewTreeRewriter(RewriteOpts{ + RewriteNode: func(node *restic.Node, path string) *restic.Node { + node = rewriteNode(node, path) + if node != nil && node.Type == restic.NodeTypeFile { + count++ + size += node.Size + } + return node + }, + DisableNodeCache: true, + }) + + ss := func() SnapshotSize { + return SnapshotSize{count, size} + } + + return t, ss +} + type BlobLoadSaver interface { restic.BlobSaver restic.BlobLoader @@ -87,13 +116,17 @@ func (t *TreeRewriter) RewriteTree(ctx context.Context, repo BlobLoadSaver, node tb := restic.NewTreeJSONBuilder() for _, node := range curTree.Nodes { + if ctx.Err() != nil { + return restic.ID{}, ctx.Err() + } + path := path.Join(nodepath, node.Name) node = t.opts.RewriteNode(node, path) if node == nil { continue } - if node.Type != "dir" { + if node.Type != restic.NodeTypeDir { err = tb.AddNode(node) if err != nil { return restic.ID{}, err diff --git a/internal/walker/rewriter_test.go b/internal/walker/rewriter_test.go index e5fcb9915cb..58dd25cd017 100644 --- a/internal/walker/rewriter_test.go +++ b/internal/walker/rewriter_test.go @@ -110,7 +110,7 @@ func checkIncreaseNodeSize(increase uint64) checkRewriteFunc { return func(t testing.TB) (rewriter *TreeRewriter, final func(testing.TB)) { rewriter = NewTreeRewriter(RewriteOpts{ RewriteNode: func(node *restic.Node, path string) *restic.Node { - if node.Type == "file" { + if node.Type == restic.NodeTypeFile { node.Size += increase } return node @@ -303,6 +303,60 @@ func TestRewriter(t *testing.T) { } } +func TestSnapshotSizeQuery(t *testing.T) { + tree := TestTree{ + "foo": TestFile{Size: 21}, + "bar": TestFile{Size: 21}, + "subdir": TestTree{ + "subfile": TestFile{Size: 21}, + }, + } + newTree := TestTree{ + "foo": TestFile{Size: 42}, + "subdir": TestTree{ + "subfile": TestFile{Size: 42}, + }, + } + t.Run("", func(t *testing.T) { + repo, root := BuildTreeMap(tree) + expRepo, expRoot := BuildTreeMap(newTree) + modrepo := WritableTreeMap{repo} + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + rewriteNode := func(node *restic.Node, path string) *restic.Node { + if path == "/bar" { + return nil + } + if node.Type == restic.NodeTypeFile { + node.Size += 21 + } + return node + } + rewriter, querySize := NewSnapshotSizeRewriter(rewriteNode) + newRoot, err := rewriter.RewriteTree(ctx, modrepo, "/", root) + if err != nil { + t.Error(err) + } + + ss := querySize() + + test.Equals(t, uint(2), ss.FileCount, "snapshot file count mismatch") + test.Equals(t, uint64(84), ss.FileSize, "snapshot size mismatch") + + // verifying against the expected tree root also implicitly checks the structural integrity + if newRoot != expRoot { + t.Error("hash mismatch") + fmt.Println("Got") + modrepo.Dump() + fmt.Println("Expected") + WritableTreeMap{expRepo}.Dump() + } + }) + +} + func TestRewriterFailOnUnknownFields(t *testing.T) { tm := WritableTreeMap{TreeMap{}} node := []byte(`{"nodes":[{"name":"subfile","type":"file","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","uid":0,"gid":0,"content":null,"unknown_field":42}]}`) diff --git a/internal/walker/walker.go b/internal/walker/walker.go index 091b0548988..252bc3530e0 100644 --- a/internal/walker/walker.go +++ b/internal/walker/walker.go @@ -28,7 +28,7 @@ type WalkVisitor struct { // was returned. This function is mandatory ProcessNode WalkFunc // Optional callback - LeaveDir func(path string) + LeaveDir func(path string) error } // Walk calls walkFn recursively for each node in root. If walkFn returns an @@ -57,13 +57,17 @@ func walk(ctx context.Context, repo restic.BlobLoader, prefix string, parentTree }) for _, node := range tree.Nodes { + if ctx.Err() != nil { + return ctx.Err() + } + p := path.Join(prefix, node.Name) - if node.Type == "" { + if node.Type == restic.NodeTypeInvalid { return errors.Errorf("node type is empty for node %q", node.Name) } - if node.Type != "dir" { + if node.Type != restic.NodeTypeDir { err := visitor.ProcessNode(parentTreeID, p, node, nil) if err != nil { if err == ErrSkipNode { @@ -96,7 +100,7 @@ func walk(ctx context.Context, repo restic.BlobLoader, prefix string, parentTree } if visitor.LeaveDir != nil { - visitor.LeaveDir(prefix) + return visitor.LeaveDir(prefix) } return nil diff --git a/internal/walker/walker_test.go b/internal/walker/walker_test.go index 0f0009107c5..3614a2397e4 100644 --- a/internal/walker/walker_test.go +++ b/internal/walker/walker_test.go @@ -13,7 +13,7 @@ import ( // TestTree is used to construct a list of trees for testing the walker. type TestTree map[string]interface{} -// TestNode is used to test the walker. +// TestFile is used to test the walker. type TestFile struct { Size uint64 } @@ -38,7 +38,7 @@ func buildTreeMap(tree TestTree, m TreeMap) restic.ID { case TestFile: err := tb.AddNode(&restic.Node{ Name: name, - Type: "file", + Type: restic.NodeTypeFile, Size: elem.Size, }) if err != nil { @@ -49,7 +49,7 @@ func buildTreeMap(tree TestTree, m TreeMap) restic.ID { err := tb.AddNode(&restic.Node{ Name: name, Subtree: &id, - Type: "dir", + Type: restic.NodeTypeDir, }) if err != nil { panic(err) @@ -93,12 +93,12 @@ func (t TreeMap) Connections() uint { // checkFunc returns a function suitable for walking the tree to check // something, and a function which will check the final result. -type checkFunc func(t testing.TB) (walker WalkFunc, leaveDir func(path string), final func(testing.TB)) +type checkFunc func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB)) // checkItemOrder ensures that the order of the 'path' arguments is the one passed in as 'want'. func checkItemOrder(want []string) checkFunc { pos := 0 - return func(t testing.TB) (walker WalkFunc, leaveDir func(path string), final func(testing.TB)) { + return func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB)) { walker = func(treeID restic.ID, path string, node *restic.Node, err error) error { if err != nil { t.Errorf("error walking %v: %v", path, err) @@ -117,8 +117,8 @@ func checkItemOrder(want []string) checkFunc { return nil } - leaveDir = func(path string) { - _ = walker(restic.ID{}, "leave: "+path, nil, nil) + leaveDir = func(path string) error { + return walker(restic.ID{}, "leave: "+path, nil, nil) } final = func(t testing.TB) { @@ -134,7 +134,7 @@ func checkItemOrder(want []string) checkFunc { // checkParentTreeOrder ensures that the order of the 'parentID' arguments is the one passed in as 'want'. func checkParentTreeOrder(want []string) checkFunc { pos := 0 - return func(t testing.TB) (walker WalkFunc, leaveDir func(path string), final func(testing.TB)) { + return func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB)) { walker = func(treeID restic.ID, path string, node *restic.Node, err error) error { if err != nil { t.Errorf("error walking %v: %v", path, err) @@ -168,7 +168,7 @@ func checkParentTreeOrder(want []string) checkFunc { func checkSkipFor(skipFor map[string]struct{}, wantPaths []string) checkFunc { var pos int - return func(t testing.TB) (walker WalkFunc, leaveDir func(path string), final func(testing.TB)) { + return func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB)) { walker = func(treeID restic.ID, path string, node *restic.Node, err error) error { if err != nil { t.Errorf("error walking %v: %v", path, err) @@ -192,8 +192,8 @@ func checkSkipFor(skipFor map[string]struct{}, wantPaths []string) checkFunc { return nil } - leaveDir = func(path string) { - _ = walker(restic.ID{}, "leave: "+path, nil, nil) + leaveDir = func(path string) error { + return walker(restic.ID{}, "leave: "+path, nil, nil) } final = func(t testing.TB) {