Merge develop
Fix conflicting schema version
11
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
|
@ -6,6 +6,15 @@ body:
|
|||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report!
|
||||
- type: checkboxes
|
||||
id: confirm-troubleshooting
|
||||
attributes:
|
||||
label: Have you enabled troubleshooting mode?
|
||||
description: |
|
||||
To ensure the bug is not caused by custom modifications or plugins make sure to enable troubleshooting mode before filing the report. In Stash go to Settings and click **Troubleshooting mode** and then retest to see if the bug still occurs.
|
||||
options:
|
||||
- label: I confirm that the troubleshooting mode is enabled.
|
||||
required: true
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
|
|
@ -61,4 +70,4 @@ body:
|
|||
attributes:
|
||||
label: Relevant log output
|
||||
description: Please copy and paste any relevant log output from Settings > Logs. This will be automatically formatted into code, so no need for backticks.
|
||||
render: shell
|
||||
render: shell
|
||||
|
|
|
|||
28
.github/workflows/build-compiler.yml
vendored
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
name: Compiler Build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
COMPILER_IMAGE: ghcr.io/stashapp/compiler:13
|
||||
|
||||
jobs:
|
||||
build-compiler:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- uses: docker/build-push-action@v6
|
||||
with:
|
||||
push: true
|
||||
context: "{{defaultContext}}:docker/compiler"
|
||||
tags: |
|
||||
${{ env.COMPILER_IMAGE }}
|
||||
ghcr.io/stashapp/compiler:latest
|
||||
cache-from: type=gha,scope=all,mode=max
|
||||
cache-to: type=gha,scope=all,mode=max
|
||||
270
.github/workflows/build.yml
vendored
|
|
@ -2,7 +2,7 @@ name: Build
|
|||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
branches:
|
||||
- develop
|
||||
- master
|
||||
- 'releases/**'
|
||||
|
|
@ -15,50 +15,163 @@ concurrency:
|
|||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
COMPILER_IMAGE: stashapp/compiler:12
|
||||
COMPILER_IMAGE: ghcr.io/stashapp/compiler:13
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
# Job 1: Generate code and build UI
|
||||
# Runs natively (no Docker) — go generate/gqlgen and node don't need cross-compilers.
|
||||
# Produces artifacts (generated Go files + UI build) consumed by test and build jobs.
|
||||
generate:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v6
|
||||
|
||||
- name: Checkout
|
||||
run: git fetch --prune --unshallow --tags
|
||||
# pnpm version is read from the packageManager field in package.json
|
||||
# very broken (4.3, 4.4)
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@41ff72655975bd51cab0327fa583b6e92b6d3061
|
||||
with:
|
||||
package_json_file: ui/v2.5/package.json
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'pnpm'
|
||||
cache-dependency-path: ui/v2.5/pnpm-lock.yaml
|
||||
|
||||
- name: Install UI dependencies
|
||||
run: cd ui/v2.5 && pnpm install --frozen-lockfile
|
||||
|
||||
- name: Generate
|
||||
run: make generate
|
||||
|
||||
- name: Cache UI build
|
||||
uses: actions/cache@v5
|
||||
id: cache-ui
|
||||
with:
|
||||
path: ui/v2.5/build
|
||||
key: ${{ runner.os }}-ui-build-${{ hashFiles('ui/v2.5/pnpm-lock.yaml', 'ui/v2.5/public/**', 'ui/v2.5/src/**', 'graphql/**/*.graphql') }}
|
||||
|
||||
- name: Validate UI
|
||||
# skip UI validation for pull requests if UI is unchanged
|
||||
if: ${{ github.event_name != 'pull_request' || steps.cache-ui.outputs.cache-hit != 'true' }}
|
||||
run: make validate-ui
|
||||
|
||||
- name: Build UI
|
||||
# skip UI build for pull requests if UI is unchanged (UI was cached)
|
||||
if: ${{ github.event_name != 'pull_request' || steps.cache-ui.outputs.cache-hit != 'true' }}
|
||||
run: make ui
|
||||
|
||||
# Bundle generated Go files + UI build for downstream jobs (test + build)
|
||||
- name: Upload generated artifacts
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: generated
|
||||
retention-days: 1
|
||||
path: |
|
||||
internal/api/generated_exec.go
|
||||
internal/api/generated_models.go
|
||||
ui/v2.5/build/
|
||||
ui/login/locales/
|
||||
|
||||
# Job 2: Integration tests
|
||||
# Runs natively (no Docker) — only needs Go + GCC (for CGO/SQLite), both on ubuntu-22.04.
|
||||
# Runs in parallel with the build matrix jobs.
|
||||
test:
|
||||
needs: generate
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- name: Pull compiler image
|
||||
run: docker pull $COMPILER_IMAGE
|
||||
|
||||
- name: Cache node modules
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-node_modules
|
||||
# Places generated Go files + UI build into the working tree so the build compiles
|
||||
- name: Download generated artifacts
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
path: ui/v2.5/node_modules
|
||||
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('ui/v2.5/pnpm-lock.yaml') }}
|
||||
name: generated
|
||||
|
||||
- name: Cache UI build
|
||||
uses: actions/cache@v3
|
||||
id: cache-ui
|
||||
env:
|
||||
cache-name: cache-ui
|
||||
- name: Test Backend
|
||||
run: make it
|
||||
|
||||
# Job 3: Cross-compile for all platforms
|
||||
# Each platform gets its own runner and Docker container (ghcr.io/stashapp/compiler:13).
|
||||
# Each build-cc-* make target is self-contained (sets its own GOOS/GOARCH/CC),
|
||||
# so running them in separate containers is functionally identical to one container.
|
||||
# Runs in parallel with the test job.
|
||||
build:
|
||||
needs: generate
|
||||
runs-on: ubuntu-24.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: windows
|
||||
make-target: build-cc-windows
|
||||
artifact-paths: |
|
||||
dist/stash-win.exe
|
||||
tag: win
|
||||
- platform: macos
|
||||
make-target: build-cc-macos
|
||||
artifact-paths: |
|
||||
dist/stash-macos
|
||||
dist/Stash.app.zip
|
||||
tag: osx
|
||||
- platform: linux
|
||||
make-target: build-cc-linux
|
||||
artifact-paths: |
|
||||
dist/stash-linux
|
||||
tag: linux
|
||||
- platform: linux-arm64v8
|
||||
make-target: build-cc-linux-arm64v8
|
||||
artifact-paths: |
|
||||
dist/stash-linux-arm64v8
|
||||
tag: arm
|
||||
- platform: linux-arm32v7
|
||||
make-target: build-cc-linux-arm32v7
|
||||
artifact-paths: |
|
||||
dist/stash-linux-arm32v7
|
||||
tag: arm
|
||||
- platform: linux-arm32v6
|
||||
make-target: build-cc-linux-arm32v6
|
||||
artifact-paths: |
|
||||
dist/stash-linux-arm32v6
|
||||
tag: arm
|
||||
- platform: freebsd
|
||||
make-target: build-cc-freebsd
|
||||
artifact-paths: |
|
||||
dist/stash-freebsd
|
||||
tag: freebsd
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
path: ui/v2.5/build
|
||||
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('ui/v2.5/pnpm-lock.yaml', 'ui/v2.5/public/**', 'ui/v2.5/src/**', 'graphql/**/*.graphql') }}
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Cache go build
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
# increment the number suffix to bump the cache
|
||||
cache-name: cache-go-cache-1
|
||||
- name: Download generated artifacts
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: generated
|
||||
|
||||
- name: Cache Go build
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: .go-cache
|
||||
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('go.mod', '**/go.sum') }}
|
||||
key: ${{ runner.os }}-go-cache-${{ matrix.platform }}-${{ hashFiles('go.mod', '**/go.sum') }}
|
||||
|
||||
# kept seperate to test timings
|
||||
- name: pull compiler image
|
||||
run: docker pull $COMPILER_IMAGE
|
||||
|
||||
- name: Start build container
|
||||
env:
|
||||
|
|
@ -67,45 +180,50 @@ jobs:
|
|||
mkdir -p .go-cache
|
||||
docker run -d --name build --mount type=bind,source="$(pwd)",target=/stash,consistency=delegated --mount type=bind,source="$(pwd)/.go-cache",target=/root/.cache/go-build,consistency=delegated --env OFFICIAL_BUILD=${{ env.official-build }} -w /stash $COMPILER_IMAGE tail -f /dev/null
|
||||
|
||||
- name: Pre-install
|
||||
run: docker exec -t build /bin/bash -c "make CI=1 pre-ui"
|
||||
|
||||
- name: Generate
|
||||
run: docker exec -t build /bin/bash -c "make generate"
|
||||
|
||||
- name: Validate UI
|
||||
# skip UI validation for pull requests if UI is unchanged
|
||||
if: ${{ github.event_name != 'pull_request' || steps.cache-ui.outputs.cache-hit != 'true' }}
|
||||
run: docker exec -t build /bin/bash -c "make validate-ui"
|
||||
|
||||
# Static validation happens in the linter workflow in parallel to this workflow
|
||||
# Run Dynamic validation here, to make sure we pass all the projects integration tests
|
||||
- name: Test Backend
|
||||
run: docker exec -t build /bin/bash -c "make it"
|
||||
|
||||
- name: Build UI
|
||||
# skip UI build for pull requests if UI is unchanged (UI was cached)
|
||||
# this means that the build version/time may be incorrect if the UI is
|
||||
# not changed in a pull request
|
||||
if: ${{ github.event_name != 'pull_request' || steps.cache-ui.outputs.cache-hit != 'true' }}
|
||||
run: docker exec -t build /bin/bash -c "make ui"
|
||||
|
||||
- name: Compile for all supported platforms
|
||||
run: |
|
||||
docker exec -t build /bin/bash -c "make build-cc-windows"
|
||||
docker exec -t build /bin/bash -c "make build-cc-macos"
|
||||
docker exec -t build /bin/bash -c "make build-cc-linux"
|
||||
docker exec -t build /bin/bash -c "make build-cc-linux-arm64v8"
|
||||
docker exec -t build /bin/bash -c "make build-cc-linux-arm32v7"
|
||||
docker exec -t build /bin/bash -c "make build-cc-linux-arm32v6"
|
||||
docker exec -t build /bin/bash -c "make build-cc-freebsd"
|
||||
|
||||
- name: Zip UI
|
||||
run: docker exec -t build /bin/bash -c "make zip-ui"
|
||||
- name: Build (${{ matrix.platform }})
|
||||
run: docker exec -t build /bin/bash -c "make ${{ matrix.make-target }}"
|
||||
|
||||
- name: Cleanup build container
|
||||
run: docker rm -f -v build
|
||||
|
||||
- name: Upload build artifact
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: build-${{ matrix.platform }}
|
||||
retention-days: 1
|
||||
path: ${{ matrix.artifact-paths }}
|
||||
|
||||
# Job 4: Release
|
||||
# Waits for both test and build to pass, then collects all platform artifacts
|
||||
# into dist/ for checksums, GitHub releases, and multi-arch Docker push.
|
||||
release:
|
||||
needs: [test, build]
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
# Downloads all artifacts (generated + 7 platform builds) into artifacts/ subdirectories
|
||||
- name: Download all build artifacts
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
# Reassemble platform binaries from matrix job artifacts into a single dist/ directory
|
||||
# make sure that artifacts have executable bit set
|
||||
# upload-artifact@v4 strips the common path prefix (dist/), so files are at the artifact root
|
||||
- name: Collect binaries
|
||||
run: |
|
||||
mkdir -p dist
|
||||
cp artifacts/build-*/* dist/
|
||||
chmod +x dist/*
|
||||
|
||||
- name: Zip UI
|
||||
run: |
|
||||
cd artifacts/generated/ui/v2.5/build && zip -r ../../../../../dist/stash-ui.zip .
|
||||
|
||||
- name: Generate checksums
|
||||
run: |
|
||||
git describe --tags --exclude latest_develop | tee CHECKSUMS_SHA1
|
||||
|
|
@ -116,7 +234,7 @@ jobs:
|
|||
- name: Upload Windows binary
|
||||
# only upload binaries for pull requests
|
||||
if: ${{ github.event_name == 'pull_request' && github.base_ref != 'refs/heads/develop' && github.base_ref != 'refs/heads/master'}}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: stash-win.exe
|
||||
path: dist/stash-win.exe
|
||||
|
|
@ -124,15 +242,23 @@ jobs:
|
|||
- name: Upload macOS binary
|
||||
# only upload binaries for pull requests
|
||||
if: ${{ github.event_name == 'pull_request' && github.base_ref != 'refs/heads/develop' && github.base_ref != 'refs/heads/master'}}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: stash-macos
|
||||
path: dist/stash-macos
|
||||
|
||||
- name: Upload macOS bundle
|
||||
# only upload binaries for pull requests
|
||||
if: ${{ github.event_name == 'pull_request' && github.base_ref != 'refs/heads/develop' && github.base_ref != 'refs/heads/master'}}
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: Stash.app.zip
|
||||
path: dist/Stash.app.zip
|
||||
|
||||
- name: Upload Linux binary
|
||||
# only upload binaries for pull requests
|
||||
if: ${{ github.event_name == 'pull_request' && github.base_ref != 'refs/heads/develop' && github.base_ref != 'refs/heads/master'}}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: stash-linux
|
||||
path: dist/stash-linux
|
||||
|
|
@ -140,14 +266,14 @@ jobs:
|
|||
- name: Upload UI
|
||||
# only upload for pull requests
|
||||
if: ${{ github.event_name == 'pull_request' && github.base_ref != 'refs/heads/develop' && github.base_ref != 'refs/heads/master'}}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: stash-ui.zip
|
||||
path: dist/stash-ui.zip
|
||||
|
||||
- name: Update latest_develop tag
|
||||
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/develop' }}
|
||||
run : git tag -f latest_develop; git push -f --tags
|
||||
run: git tag -f latest_develop; git push -f --tags
|
||||
|
||||
- name: Development Release
|
||||
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/develop' }}
|
||||
|
|
@ -197,7 +323,7 @@ jobs:
|
|||
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||
run: |
|
||||
docker run --rm --privileged docker/binfmt:a7996909642ee92942dcd6cff44b9b95f08dad64
|
||||
docker run --rm --privileged tonistiigi/binfmt
|
||||
docker info
|
||||
docker buildx create --name builder --use
|
||||
docker buildx inspect --bootstrap
|
||||
|
|
@ -213,7 +339,7 @@ jobs:
|
|||
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||
run: |
|
||||
docker run --rm --privileged docker/binfmt:a7996909642ee92942dcd6cff44b9b95f08dad64
|
||||
docker run --rm --privileged tonistiigi/binfmt
|
||||
docker info
|
||||
docker buildx create --name builder --use
|
||||
docker buildx inspect --bootstrap
|
||||
|
|
|
|||
61
.github/workflows/golangci-lint.yml
vendored
|
|
@ -9,65 +9,20 @@ on:
|
|||
- 'releases/**'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
COMPILER_IMAGE: stashapp/compiler:12
|
||||
|
||||
jobs:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Checkout
|
||||
run: git fetch --prune --unshallow --tags
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- name: Pull compiler image
|
||||
run: docker pull $COMPILER_IMAGE
|
||||
|
||||
- name: Start build container
|
||||
run: |
|
||||
mkdir -p .go-cache
|
||||
docker run -d --name build --mount type=bind,source="$(pwd)",target=/stash,consistency=delegated --mount type=bind,source="$(pwd)/.go-cache",target=/root/.cache/go-build,consistency=delegated -w /stash $COMPILER_IMAGE tail -f /dev/null
|
||||
# no tags or depth needed for lint
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
|
||||
# generate-backend runs natively (just go generate + touch-ui) — no Docker needed
|
||||
- name: Generate Backend
|
||||
run: docker exec -t build /bin/bash -c "make generate-backend"
|
||||
run: make generate-backend
|
||||
|
||||
## WARN
|
||||
## using v1, update in a later PR
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
|
||||
# Optional: working directory, useful for monorepos
|
||||
# working-directory: somedir
|
||||
|
||||
# Optional: golangci-lint command line arguments.
|
||||
#
|
||||
# Note: By default, the `.golangci.yml` file should be at the root of the repository.
|
||||
# The location of the configuration file can be changed by using `--config=`
|
||||
args: --timeout=5m
|
||||
|
||||
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
||||
# only-new-issues: true
|
||||
|
||||
# Optional: if set to true, then all caching functionality will be completely disabled,
|
||||
# takes precedence over all other caching options.
|
||||
# skip-cache: true
|
||||
|
||||
# Optional: if set to true, then the action won't cache or restore ~/go/pkg.
|
||||
# skip-pkg-cache: true
|
||||
|
||||
# Optional: if set to true, then the action won't cache or restore ~/.cache/go-build.
|
||||
# skip-build-cache: true
|
||||
|
||||
# Optional: The mode to install golangci-lint. It can be 'binary' or 'goinstall'.
|
||||
# install-mode: "goinstall"
|
||||
|
||||
- name: Cleanup build container
|
||||
run: docker rm -f -v build
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
16
Makefile
|
|
@ -50,7 +50,7 @@ export CGO_ENABLED := 1
|
|||
|
||||
# define COMPILER_IMAGE for cross-compilation docker container
|
||||
ifndef COMPILER_IMAGE
|
||||
COMPILER_IMAGE := stashapp/compiler:latest
|
||||
COMPILER_IMAGE := ghcr.io/stashapp/compiler:latest
|
||||
endif
|
||||
|
||||
.PHONY: release
|
||||
|
|
@ -129,7 +129,7 @@ phasher: build-flags
|
|||
|
||||
# builds dynamically-linked debug binaries
|
||||
.PHONY: build
|
||||
build: stash phasher
|
||||
build: stash
|
||||
|
||||
# builds dynamically-linked PIE release binaries
|
||||
.PHONY: build-release
|
||||
|
|
@ -187,8 +187,6 @@ build-cc-macos:
|
|||
# Combine into universal binaries
|
||||
lipo -create -output dist/stash-macos dist/stash-macos-intel dist/stash-macos-arm
|
||||
rm dist/stash-macos-intel dist/stash-macos-arm
|
||||
lipo -create -output dist/phasher-macos dist/phasher-macos-intel dist/phasher-macos-arm
|
||||
rm dist/phasher-macos-intel dist/phasher-macos-arm
|
||||
|
||||
# Place into bundle and zip up
|
||||
rm -rf dist/Stash.app
|
||||
|
|
@ -198,6 +196,16 @@ build-cc-macos:
|
|||
cd dist && rm -f Stash.app.zip && zip -r Stash.app.zip Stash.app
|
||||
rm -rf dist/Stash.app
|
||||
|
||||
.PHONY: build-cc-macos-phasher
|
||||
build-cc-macos-phasher:
|
||||
make build-cc-macos-arm
|
||||
make build-cc-macos-intel
|
||||
|
||||
# Combine into universal binaries
|
||||
lipo -create -output dist/phasher-macos dist/phasher-macos-intel dist/phasher-macos-arm
|
||||
rm dist/phasher-macos-intel dist/phasher-macos-arm
|
||||
# do not bundle phasher
|
||||
|
||||
.PHONY: build-cc-freebsd
|
||||
build-cc-freebsd: export GOOS := freebsd
|
||||
build-cc-freebsd: export GOARCH := amd64
|
||||
|
|
|
|||
73
README.md
|
|
@ -9,14 +9,14 @@
|
|||
[](https://github.com/stashapp/stash/releases/latest)
|
||||
[](https://github.com/stashapp/stash/labels/bounty)
|
||||
|
||||
### **Stash is a self-hosted webapp written in Go which organizes and serves your diverse content collection, catering to both your SFW and NSFW needs.**
|
||||
<h3>Stash is a self-hosted webapp written in Go which organizes and serves your diverse content collection, catering to both your SFW and NSFW needs.</h3>
|
||||
|
||||

|
||||
|
||||
* Stash gathers information about videos in your collection from the internet, and is extensible through the use of community-built plugins for a large number of content producers and sites.
|
||||
* Stash supports a wide variety of both video and image formats.
|
||||
* You can tag videos and find them later.
|
||||
* Stash provides statistics about performers, tags, studios and more.
|
||||
- Stash gathers information about videos in your collection from the internet, and is extensible through the use of community-built plugins for a large number of content producers and sites.
|
||||
- Stash supports a wide variety of both video and image formats.
|
||||
- You can tag videos and find them later.
|
||||
- Stash provides statistics about performers, tags, studios and more.
|
||||
|
||||
You can [watch a SFW demo video](https://vimeo.com/545323354) to see it in action.
|
||||
|
||||
|
|
@ -24,17 +24,19 @@ For further information you can consult the [documentation](https://docs.stashap
|
|||
|
||||
# Installing Stash
|
||||
|
||||
> [!tip]
|
||||
Step-by-step instructions are available at [docs.stashapp.cc/installation](https://docs.stashapp.cc/installation/).
|
||||
|
||||
#### Windows Users:
|
||||
|
||||
As of version 0.27.0, Stash no longer supports _Windows 7, 8, Server 2008 and Server 2012._
|
||||
At least Windows 10 or Server 2016 is required.
|
||||
|
||||
#### Mac Users:
|
||||
|
||||
As of version 0.29.0, Stash requires _macOS 11 Big Sur_ or later.
|
||||
Stash can still be run through docker on older versions of macOS.
|
||||
> [!important]
|
||||
> **Windows Users**
|
||||
>
|
||||
> As of version 0.27.0, Stash no longer supports _Windows 7, 8, Server 2008 and Server 2012._
|
||||
> At least Windows 10 or Server 2016 is required.
|
||||
>
|
||||
> **macOS Users**
|
||||
>
|
||||
> As of version 0.29.0, Stash requires _macOS 11 Big Sur_ or later.
|
||||
> Stash can still be run through docker on older versions of macOS.
|
||||
|
||||
<img src="docs/readme_assets/windows_logo.svg" width="100%" height="75"> Windows | <img src="docs/readme_assets/mac_logo.svg" width="100%" height="75"> macOS | <img src="docs/readme_assets/linux_logo.svg" width="100%" height="75"> Linux | <img src="docs/readme_assets/docker_logo.svg" width="100%" height="75"> Docker
|
||||
:---:|:---:|:---:|:---:
|
||||
|
|
@ -85,23 +87,36 @@ The badge below shows the current translation status of Stash across all support
|
|||
|
||||
Need help or want to get involved? Start with the documentation, then reach out to the community if you need further assistance.
|
||||
|
||||
- Documentation
|
||||
- Official docs: https://docs.stashapp.cc - official guides guides and troubleshooting.
|
||||
- In-app manual: press <kbd>Shift</kbd> + <kbd>?</kbd> in the app or view the manual online: https://docs.stashapp.cc/in-app-manual.
|
||||
- FAQ: https://discourse.stashapp.cc/c/support/faq/28 - common questions and answers.
|
||||
- Community wiki: https://discourse.stashapp.cc/tags/c/community-wiki/22/stash - guides, how-to’s and tips.
|
||||
### Documentation
|
||||
- [Official documentation](https://docs.stashapp.cc) - official guides guides and troubleshooting.
|
||||
- [In-app manual](https://docs.stashapp.cc/in-app-manual) press <kbd>Shift</kbd> + <kbd>?</kbd> in the app or view the manual online.
|
||||
- [FAQ](https://discourse.stashapp.cc/c/support/faq/28) - common questions and answers.
|
||||
- [Community wiki](https://discourse.stashapp.cc/tags/c/community-wiki/22/stash) - guides, how-to’s and tips.
|
||||
|
||||
- Community & discussion
|
||||
- Community forum: https://discourse.stashapp.cc - community support, feature requests and discussions.
|
||||
- Discord: https://discord.gg/2TsNFKt - real-time chat and community support.
|
||||
- GitHub discussions: https://github.com/stashapp/stash/discussions - community support and feature discussions.
|
||||
- Lemmy community: https://discuss.online/c/stashapp - Reddit-style community space.
|
||||
### Community & discussion
|
||||
- [Community forum](https://discourse.stashapp.cc) - community support, feature requests and discussions.
|
||||
- [Discord](https://discord.gg/2TsNFKt) - real-time chat and community support.
|
||||
- [GitHub discussions](https://github.com/stashapp/stash/discussions) - community support and feature discussions.
|
||||
- [Lemmy community](https://discuss.online/c/stashapp) - board-style community space.
|
||||
|
||||
- Community scrapers & plugins
|
||||
- Metadata sources: https://docs.stashapp.cc/metadata-sources/
|
||||
- Plugins: https://docs.stashapp.cc/plugins/
|
||||
- Themes: https://docs.stashapp.cc/themes/
|
||||
- Other projects: https://docs.stashapp.cc/other-projects/
|
||||
### Community scrapers & plugins
|
||||
- [Metadata sources](https://docs.stashapp.cc/metadata-sources/)
|
||||
- [Plugins](https://docs.stashapp.cc/plugins/)
|
||||
- [Themes](https://docs.stashapp.cc/themes/)
|
||||
- [Other projects](https://docs.stashapp.cc/other-projects/)
|
||||
|
||||
# Architecture
|
||||
|
||||
## Backend
|
||||
|
||||
- Go
|
||||
- GraphQL API
|
||||
- SQLite
|
||||
|
||||
## Frontend
|
||||
|
||||
- React
|
||||
- TypeScript
|
||||
|
||||
# For Developers
|
||||
|
||||
|
|
|
|||
|
|
@ -27,11 +27,11 @@ func printPhash(ff *ffmpeg.FFMpeg, ffp *ffmpeg.FFProbe, inputfile string, quiet
|
|||
|
||||
// Common image extensions
|
||||
imageExts := map[string]bool{
|
||||
"jpg": true, "jpeg": true, "png": true, "gif": true, "webp": true, "bmp": true,
|
||||
"jpg": true, "jpeg": true, "png": true, "gif": true, "webp": true, "bmp": true, "avif": true,
|
||||
}
|
||||
|
||||
if imageExts[ext] {
|
||||
return printImagePhash(inputfile, quiet)
|
||||
return printImagePhash(ff, inputfile, quiet)
|
||||
}
|
||||
|
||||
return printVideoPhash(ff, ffp, inputfile, quiet)
|
||||
|
|
@ -65,12 +65,12 @@ func printVideoPhash(ff *ffmpeg.FFMpeg, ffp *ffmpeg.FFProbe, inputfile string, q
|
|||
return nil
|
||||
}
|
||||
|
||||
func printImagePhash(inputfile string, quiet *bool) error {
|
||||
func printImagePhash(ff *ffmpeg.FFMpeg, inputfile string, quiet *bool) error {
|
||||
imgFile := &models.ImageFile{
|
||||
BaseFile: &models.BaseFile{Path: inputfile},
|
||||
}
|
||||
|
||||
phash, err := imagephash.Generate(imgFile)
|
||||
phash, err := imagephash.Generate(ff, imgFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ RUN if [ "$TARGETPLATFORM" = "linux/arm/v6" ]; then BIN=stash-linux-arm32v6; \
|
|||
FROM --platform=$TARGETPLATFORM alpine:latest AS app
|
||||
COPY --from=binary /stash /usr/bin/
|
||||
|
||||
RUN apk add --no-cache ca-certificates python3 py3-requests py3-requests-toolbelt py3-lxml py3-pip ffmpeg tzdata vips vips-tools \
|
||||
RUN apk add --no-cache ca-certificates python3 py3-requests py3-requests-toolbelt py3-lxml py3-pip ffmpeg tzdata vips vips-tools vips-heif \
|
||||
&& pip install --break-system-packages mechanicalsoup cloudscraper stashapp-tools
|
||||
ENV STASH_CONFIG_FILE=/root/.stash/config.yml
|
||||
|
||||
|
|
|
|||
1
docker/compiler/.gitignore
vendored
|
|
@ -1 +0,0 @@
|
|||
*.sdk.tar.*
|
||||
|
|
@ -1,82 +1,86 @@
|
|||
FROM golang:1.24.3
|
||||
### OSXCROSS
|
||||
FROM debian:bookworm AS osxcross
|
||||
# add osxcross
|
||||
WORKDIR /tmp/osxcross
|
||||
ARG OSXCROSS_REVISION=5e1b71fcceb23952f3229995edca1b6231525b5b
|
||||
ADD --checksum=sha256:d3f771bbc20612fea577b18a71be3af2eb5ad2dd44624196cf55de866d008647 https://codeload.github.com/tpoechtrager/osxcross/tar.gz/${OSXCROSS_REVISION} /tmp/osxcross.tar.gz
|
||||
|
||||
LABEL maintainer="https://discord.gg/2TsNFKt"
|
||||
ARG OSX_SDK_VERSION=11.3
|
||||
ARG OSX_SDK_DOWNLOAD_FILE=MacOSX${OSX_SDK_VERSION}.sdk.tar.xz
|
||||
ARG OSX_SDK_DOWNLOAD_URL=https://github.com/phracker/MacOSX-SDKs/releases/download/${OSX_SDK_VERSION}/${OSX_SDK_DOWNLOAD_FILE}
|
||||
ADD --checksum=sha256:cd4f08a75577145b8f05245a2975f7c81401d75e9535dcffbb879ee1deefcbf4 ${OSX_SDK_DOWNLOAD_URL} /tmp/osxcross/tarballs/${OSX_SDK_DOWNLOAD_FILE}
|
||||
|
||||
RUN apt-get update && apt-get install -y apt-transport-https ca-certificates gnupg
|
||||
ENV UNATTENDED=yes \
|
||||
SDK_VERSION=${OSX_SDK_VERSION} \
|
||||
OSX_VERSION_MIN=10.10
|
||||
RUN apt update && \
|
||||
apt install -y --no-install-recommends \
|
||||
bash ca-certificates clang cmake git patch libssl-dev bzip2 cpio libbz2-dev libxml2-dev make python3 xz-utils zlib1g-dev
|
||||
# lzma-dev libxml2-dev xz
|
||||
RUN tar --strip=1 -C /tmp/osxcross -xf /tmp/osxcross.tar.gz
|
||||
RUN ./build.sh
|
||||
|
||||
RUN mkdir -p /etc/apt/keyrings
|
||||
### FREEBSD cross-compilation stage
|
||||
# use alpine for cacheable image since apt is notorous for not caching
|
||||
FROM alpine:3 AS freebsd
|
||||
# match golang latest
|
||||
# https://go.dev/wiki/FreeBSD
|
||||
ARG FREEBSD_VERSION=12.4
|
||||
ADD --checksum=sha256:581c7edacfd2fca2bdf5791f667402d22fccd8a5e184635e0cac075564d57aa8 \
|
||||
http://ftp-archive.freebsd.org/mirror/FreeBSD-Archive/old-releases/amd64/${FREEBSD_VERSION}-RELEASE/base.txz \
|
||||
/tmp/base.txz
|
||||
|
||||
ADD https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key nodesource.gpg.key
|
||||
RUN cat nodesource.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg && rm nodesource.gpg.key
|
||||
RUN echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_24.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list
|
||||
WORKDIR /opt/cross-freebsd
|
||||
RUN apk add --no-cache tar xz
|
||||
RUN tar -xf /tmp/base.txz --strip-components=1 ./usr/lib ./usr/include ./lib
|
||||
RUN cd /opt/cross-freebsd/usr/lib && \
|
||||
find . -type l -exec sh -c ' \
|
||||
for link; do \
|
||||
target=$(readlink "$link"); \
|
||||
case "$target" in \
|
||||
/lib/*) ln -sf "/opt/cross-freebsd$target" "$link";; \
|
||||
esac; \
|
||||
done \
|
||||
' sh {} + && \
|
||||
ln -s libc++.a libstdc++.a && \
|
||||
ln -s libc++.so libstdc++.so
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
git make tar bash nodejs zip \
|
||||
clang llvm-dev cmake patch libxml2-dev uuid-dev libssl-dev xz-utils \
|
||||
bzip2 gzip sed cpio libbz2-dev zlib1g-dev \
|
||||
gcc-mingw-w64 \
|
||||
gcc-arm-linux-gnueabi libc-dev-armel-cross linux-libc-dev-armel-cross \
|
||||
gcc-aarch64-linux-gnu libc-dev-arm64-cross && \
|
||||
rm -rf /var/lib/apt/lists/*;
|
||||
### BUILDER
|
||||
FROM golang:1.24.3 AS builder
|
||||
ENV PATH=/opt/osx-ndk-x86/bin:$PATH
|
||||
|
||||
# copy in nodejs instead of using nodesource :thumbsup:
|
||||
COPY --from=docker.io/library/node:24-bookworm /usr/local /usr/local
|
||||
# copy in osxcross
|
||||
COPY --from=osxcross /tmp/osxcross/target/lib /usr/lib
|
||||
COPY --from=osxcross /tmp/osxcross/target /opt/osx-ndk-x86
|
||||
# copy in cross-freebsd
|
||||
COPY --from=freebsd /opt/cross-freebsd /opt/cross-freebsd
|
||||
|
||||
# pnpm install with npm
|
||||
RUN npm install -g pnpm
|
||||
|
||||
# FreeBSD cross-compilation setup
|
||||
# https://github.com/smartmontools/docker-build/blob/6b8c92560d17d325310ba02d9f5a4b250cb0764a/Dockerfile#L66
|
||||
ENV FREEBSD_VERSION 13.4
|
||||
ENV FREEBSD_DOWNLOAD_URL http://ftp.plusline.de/FreeBSD/releases/amd64/${FREEBSD_VERSION}-RELEASE/base.txz
|
||||
ENV FREEBSD_SHA 8e13b0a93daba349b8d28ad246d7beb327659b2ef4fe44d89f447392daec5a7c
|
||||
# git for getting hash
|
||||
# make and bash for building
|
||||
|
||||
RUN cd /tmp && \
|
||||
curl -o base.txz $FREEBSD_DOWNLOAD_URL && \
|
||||
echo "$FREEBSD_SHA base.txz" | sha256sum -c - && \
|
||||
mkdir -p /opt/cross-freebsd && \
|
||||
cd /opt/cross-freebsd && \
|
||||
tar -xf /tmp/base.txz ./lib/ ./usr/lib/ ./usr/include/ && \
|
||||
rm -f /tmp/base.txz && \
|
||||
cd /opt/cross-freebsd/usr/lib && \
|
||||
find . -xtype l | xargs ls -l | grep ' /lib/' | awk '{print "ln -sf /opt/cross-freebsd"$11 " " $9}' | /bin/sh && \
|
||||
ln -s libc++.a libstdc++.a && \
|
||||
ln -s libc++.so libstdc++.so
|
||||
|
||||
# macOS cross-compilation setup
|
||||
ENV OSX_SDK_VERSION 11.3
|
||||
ENV OSX_SDK_DOWNLOAD_FILE MacOSX${OSX_SDK_VERSION}.sdk.tar.xz
|
||||
ENV OSX_SDK_DOWNLOAD_URL https://github.com/phracker/MacOSX-SDKs/releases/download/${OSX_SDK_VERSION}/${OSX_SDK_DOWNLOAD_FILE}
|
||||
ENV OSX_SDK_SHA cd4f08a75577145b8f05245a2975f7c81401d75e9535dcffbb879ee1deefcbf4
|
||||
ENV OSXCROSS_REVISION 5e1b71fcceb23952f3229995edca1b6231525b5b
|
||||
ENV OSXCROSS_DOWNLOAD_URL https://codeload.github.com/tpoechtrager/osxcross/tar.gz/${OSXCROSS_REVISION}
|
||||
ENV OSXCROSS_SHA d3f771bbc20612fea577b18a71be3af2eb5ad2dd44624196cf55de866d008647
|
||||
|
||||
RUN cd /tmp && \
|
||||
curl -o osxcross.tar.gz $OSXCROSS_DOWNLOAD_URL && \
|
||||
echo "$OSXCROSS_SHA osxcross.tar.gz" | sha256sum -c - && \
|
||||
mkdir osxcross && \
|
||||
tar --strip=1 -C osxcross -xf osxcross.tar.gz && \
|
||||
rm -f osxcross.tar.gz && \
|
||||
curl -Lo $OSX_SDK_DOWNLOAD_FILE $OSX_SDK_DOWNLOAD_URL && \
|
||||
echo "$OSX_SDK_SHA $OSX_SDK_DOWNLOAD_FILE" | sha256sum -c - && \
|
||||
mv $OSX_SDK_DOWNLOAD_FILE osxcross/tarballs/ && \
|
||||
UNATTENDED=yes SDK_VERSION=$OSX_SDK_VERSION OSX_VERSION_MIN=10.10 osxcross/build.sh && \
|
||||
cp osxcross/target/lib/* /usr/lib/ && \
|
||||
mv osxcross/target /opt/osx-ndk-x86 && \
|
||||
rm -rf /tmp/osxcross
|
||||
|
||||
ENV PATH /opt/osx-ndk-x86/bin:$PATH
|
||||
|
||||
RUN mkdir -p /root/.ssh && \
|
||||
chmod 0700 /root/.ssh && \
|
||||
ssh-keyscan github.com > /root/.ssh/known_hosts
|
||||
|
||||
# ignore "dubious ownership" errors
|
||||
# clang for macos
|
||||
# zip for stashapp.zip
|
||||
# gcc-extensions for cross-arch build
|
||||
# we still target arm soft float?
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
git make bash \
|
||||
clang zip \
|
||||
gcc-mingw-w64 \
|
||||
gcc-arm-linux-gnueabi \
|
||||
libc-dev-armel-cross linux-libc-dev-armel-cross \
|
||||
gcc-aarch64-linux-gnu libc-dev-arm64-cross && \
|
||||
rm -rf /var/lib/apt/lists/*;
|
||||
RUN git config --global safe.directory '*'
|
||||
|
||||
# To test locally:
|
||||
# make generate
|
||||
# make ui
|
||||
# cd docker/compiler
|
||||
# make build
|
||||
# docker run --rm -v /PATH_TO_STASH:/stash -w /stash -i -t stashapp/compiler:latest make build-cc-all
|
||||
# # binaries will show up in /dist
|
||||
# docker build . -t ghcr.io/stashapp/compiler:latest
|
||||
# docker run --rm -v /PATH_TO_STASH:/stash -w /stash -i -t ghcr.io/stashapp/compiler:latest make build-cc-all
|
||||
# # binaries will show up in /dist
|
||||
|
|
@ -1,16 +1,22 @@
|
|||
host=ghcr.io
|
||||
user=stashapp
|
||||
repo=compiler
|
||||
version=12
|
||||
version=13
|
||||
|
||||
VERSION_IMAGE = ${host}/${user}/${repo}:${version}
|
||||
LATEST_IMAGE = ${host}/${user}/${repo}:latest
|
||||
|
||||
latest:
|
||||
docker build -t ${user}/${repo}:latest .
|
||||
docker build -t ${LATEST_IMAGE} .
|
||||
|
||||
build:
|
||||
docker build -t ${user}/${repo}:${version} -t ${user}/${repo}:latest .
|
||||
docker build -t ${VERSION_IMAGE} -t ${LATEST_IMAGE} .
|
||||
|
||||
build-no-cache:
|
||||
docker build --no-cache -t ${user}/${repo}:${version} -t ${user}/${repo}:latest .
|
||||
docker build --no-cache -t ${VERSION_IMAGE} -t ${LATEST_IMAGE} .
|
||||
|
||||
install: build
|
||||
docker push ${user}/${repo}:${version}
|
||||
docker push ${user}/${repo}:latest
|
||||
# requires docker login ghcr.io
|
||||
# echo $CR_PAT | docker login ghcr.io -u USERNAME --password-stdin
|
||||
push:
|
||||
docker push ${VERSION_IMAGE}
|
||||
docker push ${LATEST_IMAGE}
|
||||
|
|
@ -1,3 +1,3 @@
|
|||
Modified from https://github.com/bep/dockerfiles/tree/master/ci-goreleaser
|
||||
|
||||
When the Dockerfile is changed, the version number should be incremented in the Makefile and the new version tag should be pushed to Docker Hub. The GitHub workflow files also need to be updated to pull the correct image tag.
|
||||
When the Dockerfile is changed, the version number should be incremented in [.github/workflows/build-compiler.yml](../../.github/workflows/build-compiler.yml) and the workflow [manually ran](). `env: COMPILER_IMAGE` in [.github/workflows/build.yml](../../.github/workflows/build.yml) also needs to be updated to pull the correct image tag.
|
||||
|
|
@ -118,8 +118,8 @@ This project uses a modification of the [CI-GoReleaser](https://github.com/bep/d
|
|||
To cross-compile the app yourself:
|
||||
|
||||
1. Run `make pre-ui`, `make generate` and `make ui` outside the container, to generate files and build the UI.
|
||||
2. Pull the latest compiler image from Docker Hub: `docker pull stashapp/compiler`
|
||||
3. Run `docker run --rm --mount type=bind,source="$(pwd)",target=/stash -w /stash -it stashapp/compiler /bin/bash` to open a shell inside the container.
|
||||
2. Pull the latest compiler image from GHCR: `docker pull ghcr.io/stashapp/compiler`
|
||||
3. Run `docker run --rm --mount type=bind,source="$(pwd)",target=/stash -w /stash -it ghcr.io/stashapp/compiler /bin/bash` to open a shell inside the container.
|
||||
4. From inside the container, run `make build-cc-all` to build for all platforms, or run `make build-cc-{platform}` to build for a specific platform (have a look at the `Makefile` for the list of targets).
|
||||
5. You will find the compiled binaries in `dist/`.
|
||||
|
||||
|
|
|
|||
3
go.mod
|
|
@ -15,6 +15,7 @@ require (
|
|||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/dop251/goja v0.0.0-20231027120936-b396bb4c349d
|
||||
github.com/doug-martin/goqu/v9 v9.18.0
|
||||
github.com/feederbox826/gosx-notifier v0.2.2
|
||||
github.com/go-chi/chi/v5 v5.2.2
|
||||
github.com/go-chi/cors v1.2.1
|
||||
github.com/go-chi/httplog v0.3.1
|
||||
|
|
@ -30,7 +31,6 @@ require (
|
|||
github.com/jinzhu/copier v0.4.0
|
||||
github.com/jmoiron/sqlx v1.4.0
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/kermieisinthehouse/gosx-notifier v0.1.2
|
||||
github.com/kermieisinthehouse/systray v1.2.4
|
||||
github.com/knadh/koanf/parsers/yaml v1.1.0
|
||||
github.com/knadh/koanf/providers/env v1.1.0
|
||||
|
|
@ -44,6 +44,7 @@ require (
|
|||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8
|
||||
github.com/remeh/sizedwaitgroup v1.0.0
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cast v1.6.0
|
||||
github.com/spf13/pflag v1.0.6
|
||||
|
|
|
|||
8
go.sum
|
|
@ -187,6 +187,10 @@ github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E
|
|||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/feederbox826/gosx-notifier v0.2.1 h1:47FdsdfVQUOkAlHdp9qevviVaGnNT152uMcgY91MiGs=
|
||||
github.com/feederbox826/gosx-notifier v0.2.1/go.mod h1:R6rqw7VuwuiCuvsr7EOONmWq++CRA5Ijmkmx75/C3Fs=
|
||||
github.com/feederbox826/gosx-notifier v0.2.2 h1:26NkaJZ8Wzptx82R46c9pkVAcFwGSU7kxWrOKmRWlC0=
|
||||
github.com/feederbox826/gosx-notifier v0.2.2/go.mod h1:R6rqw7VuwuiCuvsr7EOONmWq++CRA5Ijmkmx75/C3Fs=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||
|
|
@ -389,8 +393,6 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
|
|||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kermieisinthehouse/gosx-notifier v0.1.2 h1:KV0KBeKK2B24kIHY7iK0jgS64Q05f4oB+hUZmsPodxQ=
|
||||
github.com/kermieisinthehouse/gosx-notifier v0.1.2/go.mod h1:xyWT07azFtUOcHl96qMVvKhvKzsMcS7rKTHQyv8WTho=
|
||||
github.com/kermieisinthehouse/systray v1.2.4 h1:pdH5vnl+KKjRrVCRU4g/2W1/0HVzuuJ6WXHlPPHYY6s=
|
||||
github.com/kermieisinthehouse/systray v1.2.4/go.mod h1:axh6C/jNuSyC0QGtidZJURc9h+h41HNoMySoLVrhVR4=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
|
|
@ -537,6 +539,8 @@ github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK
|
|||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI=
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs=
|
||||
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
|
||||
github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
|
|
|
|||
|
|
@ -140,4 +140,8 @@ models:
|
|||
fields:
|
||||
plugins:
|
||||
resolver: true
|
||||
Performer:
|
||||
fields:
|
||||
career_length:
|
||||
resolver: true
|
||||
|
||||
|
|
|
|||
|
|
@ -426,6 +426,10 @@ type Mutation {
|
|||
destroyFiles(ids: [ID!]!): Boolean!
|
||||
|
||||
fileSetFingerprints(input: FileSetFingerprintsInput!): Boolean!
|
||||
"Reveal the file in the system file manager"
|
||||
revealFileInFileManager(id: ID!): Boolean!
|
||||
"Reveal the folder in the system file manager"
|
||||
revealFolderInFileManager(id: ID!): Boolean!
|
||||
|
||||
# Saved filters
|
||||
saveFilter(input: SaveFilterInput!): SavedFilter!
|
||||
|
|
@ -579,6 +583,8 @@ type Mutation {
|
|||
stashBoxBatchPerformerTag(input: StashBoxBatchTagInput!): String!
|
||||
"Run batch studio tag task. Returns the job ID."
|
||||
stashBoxBatchStudioTag(input: StashBoxBatchTagInput!): String!
|
||||
"Run batch tag tag task. Returns the job ID."
|
||||
stashBoxBatchTagTag(input: StashBoxBatchTagInput!): String!
|
||||
|
||||
"Enables DLNA for an optional duration. Has no effect if DLNA is enabled by default"
|
||||
enableDLNA(input: EnableDLNAInput!): Boolean!
|
||||
|
|
|
|||
|
|
@ -184,6 +184,18 @@ input ConfigGeneralInput {
|
|||
scraperPackageSources: [PackageSourceInput!]
|
||||
"Source of plugin packages"
|
||||
pluginPackageSources: [PackageSourceInput!]
|
||||
|
||||
"Size of the longest dimension for each sprite in pixels"
|
||||
spriteScreenshotSize: Int
|
||||
|
||||
"True if sprite generation should use the sprite interval and min/max sprites settings instead of the default"
|
||||
useCustomSpriteInterval: Boolean
|
||||
"Time between two different scrubber sprites in seconds - only used if useCustomSpriteInterval is true"
|
||||
spriteInterval: Float
|
||||
"Minimum number of sprites to be generated - only used if useCustomSpriteInterval is true"
|
||||
minimumSprites: Int
|
||||
"Minimum number of sprites to be generated - only used if useCustomSpriteInterval is true"
|
||||
maximumSprites: Int
|
||||
}
|
||||
|
||||
type ConfigGeneralResult {
|
||||
|
|
@ -287,6 +299,16 @@ type ConfigGeneralResult {
|
|||
logAccess: Boolean!
|
||||
"Maximum log size"
|
||||
logFileMaxSize: Int!
|
||||
"True if sprite generation should use the sprite interval and min/max sprites settings instead of the default"
|
||||
useCustomSpriteInterval: Boolean!
|
||||
"Time between two different scrubber sprites in seconds - only used if useCustomSpriteInterval is true"
|
||||
spriteInterval: Float!
|
||||
"Minimum number of sprites to be generated - only used if useCustomSpriteInterval is true"
|
||||
minimumSprites: Int!
|
||||
"Maximum number of sprites to be generated - only used if useCustomSpriteInterval is true"
|
||||
maximumSprites: Int!
|
||||
"Size of the longest dimension for each sprite in pixels"
|
||||
spriteScreenshotSize: Int!
|
||||
"Array of video file extensions"
|
||||
videoExtensions: [String!]!
|
||||
"Array of image file extensions"
|
||||
|
|
|
|||
|
|
@ -6,13 +6,19 @@ type Fingerprint {
|
|||
type Folder {
|
||||
id: ID!
|
||||
path: String!
|
||||
basename: String!
|
||||
|
||||
parent_folder_id: ID @deprecated(reason: "Use parent_folder instead")
|
||||
zip_file_id: ID @deprecated(reason: "Use zip_file instead")
|
||||
|
||||
parent_folder: Folder
|
||||
"Returns all parent folders in order from immediate parent to top-level"
|
||||
parent_folders: [Folder!]!
|
||||
zip_file: BasicFile
|
||||
|
||||
"Returns direct sub-folders"
|
||||
sub_folders: [Folder!]!
|
||||
|
||||
mod_time: Time!
|
||||
|
||||
created_at: Time!
|
||||
|
|
@ -153,7 +159,7 @@ input MoveFilesInput {
|
|||
|
||||
input SetFingerprintsInput {
|
||||
type: String!
|
||||
"an null value will remove the fingerprint"
|
||||
"a null value will remove the fingerprint"
|
||||
value: String
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -75,10 +75,26 @@ input OrientationCriterionInput {
|
|||
value: [OrientationEnum!]!
|
||||
}
|
||||
|
||||
input PHashDuplicationCriterionInput {
|
||||
duplicated: Boolean
|
||||
"Currently unimplemented"
|
||||
input DuplicationCriterionInput {
|
||||
duplicated: Boolean @deprecated(reason: "Use phash field instead")
|
||||
"Currently unimplemented. Intended for phash distance matching."
|
||||
distance: Int
|
||||
"Filter by phash duplication"
|
||||
phash: Boolean
|
||||
"Filter by URL duplication"
|
||||
url: Boolean
|
||||
"Filter by Stash ID duplication"
|
||||
stash_id: Boolean
|
||||
"Filter by title duplication"
|
||||
title: Boolean
|
||||
}
|
||||
|
||||
input FileDuplicationCriterionInput {
|
||||
duplicated: Boolean @deprecated(reason: "Use phash field instead")
|
||||
"Currently unimplemented. Intended for phash distance matching."
|
||||
distance: Int
|
||||
"Filter by phash duplication"
|
||||
phash: Boolean
|
||||
}
|
||||
|
||||
input StashIDCriterionInput {
|
||||
|
|
@ -136,10 +152,15 @@ input PerformerFilterType {
|
|||
fake_tits: StringCriterionInput
|
||||
"Filter by penis length value"
|
||||
penis_length: FloatCriterionInput
|
||||
"Filter by ciricumcision"
|
||||
"Filter by circumcision"
|
||||
circumcised: CircumcisionCriterionInput
|
||||
"Filter by career length"
|
||||
"Deprecated: use career_start and career_end. This filter is non-functional."
|
||||
career_length: StringCriterionInput
|
||||
@deprecated(reason: "Use career_start and career_end")
|
||||
"Filter by career start"
|
||||
career_start: DateCriterionInput
|
||||
"Filter by career end"
|
||||
career_end: DateCriterionInput
|
||||
"Filter by tattoos"
|
||||
tattoos: StringCriterionInput
|
||||
"Filter by piercings"
|
||||
|
|
@ -156,6 +177,8 @@ input PerformerFilterType {
|
|||
tag_count: IntCriterionInput
|
||||
"Filter by scene count"
|
||||
scene_count: IntCriterionInput
|
||||
"Filter by marker count (via scene)"
|
||||
marker_count: IntCriterionInput
|
||||
"Filter by image count"
|
||||
image_count: IntCriterionInput
|
||||
"Filter by gallery count"
|
||||
|
|
@ -199,6 +222,8 @@ input PerformerFilterType {
|
|||
galleries_filter: GalleryFilterType
|
||||
"Filter by related tags that meet this criteria"
|
||||
tags_filter: TagFilterType
|
||||
"Filter by related scene markers (via scene) that meet this criteria"
|
||||
markers_filter: SceneMarkerFilterType
|
||||
"Filter by creation time"
|
||||
created_at: TimestampCriterionInput
|
||||
"Filter by last update time"
|
||||
|
|
@ -224,9 +249,9 @@ input SceneMarkerFilterType {
|
|||
updated_at: TimestampCriterionInput
|
||||
"Filter by scene date"
|
||||
scene_date: DateCriterionInput
|
||||
"Filter by cscene reation time"
|
||||
"Filter by scene creation time"
|
||||
scene_created_at: TimestampCriterionInput
|
||||
"Filter by lscene ast update time"
|
||||
"Filter by scene last update time"
|
||||
scene_updated_at: TimestampCriterionInput
|
||||
"Filter by related scenes that meet this criteria"
|
||||
scene_filter: SceneFilterType
|
||||
|
|
@ -261,8 +286,8 @@ input SceneFilterType {
|
|||
organized: Boolean
|
||||
"Filter by o-counter"
|
||||
o_counter: IntCriterionInput
|
||||
"Filter Scenes that have an exact phash match available"
|
||||
duplicated: PHashDuplicationCriterionInput
|
||||
"Filter Scenes by duplication criteria"
|
||||
duplicated: DuplicationCriterionInput
|
||||
"Filter by resolution"
|
||||
resolution: ResolutionCriterionInput
|
||||
"Filter by orientation"
|
||||
|
|
@ -352,6 +377,8 @@ input SceneFilterType {
|
|||
markers_filter: SceneMarkerFilterType
|
||||
"Filter by related files that meet this criteria"
|
||||
files_filter: FileFilterType
|
||||
|
||||
custom_fields: [CustomFieldCriterionInput!]
|
||||
}
|
||||
|
||||
input MovieFilterType {
|
||||
|
|
@ -434,11 +461,16 @@ input GroupFilterType {
|
|||
containing_group_count: IntCriterionInput
|
||||
"Filter by number of sub-groups the group has"
|
||||
sub_group_count: IntCriterionInput
|
||||
"Filter by number of scenes the group has"
|
||||
scene_count: IntCriterionInput
|
||||
|
||||
"Filter by related scenes that meet this criteria"
|
||||
scenes_filter: SceneFilterType
|
||||
"Filter by related studios that meet this criteria"
|
||||
studios_filter: StudioFilterType
|
||||
|
||||
"Filter by custom fields"
|
||||
custom_fields: [CustomFieldCriterionInput!]
|
||||
}
|
||||
|
||||
input StudioFilterType {
|
||||
|
|
@ -469,6 +501,8 @@ input StudioFilterType {
|
|||
image_count: IntCriterionInput
|
||||
"Filter by gallery count"
|
||||
gallery_count: IntCriterionInput
|
||||
"Filter by group count"
|
||||
group_count: IntCriterionInput
|
||||
"Filter by tag count"
|
||||
tag_count: IntCriterionInput
|
||||
"Filter by url"
|
||||
|
|
@ -479,12 +513,16 @@ input StudioFilterType {
|
|||
child_count: IntCriterionInput
|
||||
"Filter by autotag ignore value"
|
||||
ignore_auto_tag: Boolean
|
||||
"Filter by organized"
|
||||
organized: Boolean
|
||||
"Filter by related scenes that meet this criteria"
|
||||
scenes_filter: SceneFilterType
|
||||
"Filter by related images that meet this criteria"
|
||||
images_filter: ImageFilterType
|
||||
"Filter by related galleries that meet this criteria"
|
||||
galleries_filter: GalleryFilterType
|
||||
"Filter by related groups that meet this criteria"
|
||||
groups_filter: GroupFilterType
|
||||
"Filter by creation time"
|
||||
created_at: TimestampCriterionInput
|
||||
"Filter by last update time"
|
||||
|
|
@ -567,6 +605,10 @@ input GalleryFilterType {
|
|||
files_filter: FileFilterType
|
||||
"Filter by related folders that meet this criteria"
|
||||
folders_filter: FolderFilterType
|
||||
"Filter by parent folder of the zip or folder the gallery is in"
|
||||
parent_folder: HierarchicalMultiCriterionInput
|
||||
|
||||
custom_fields: [CustomFieldCriterionInput!]
|
||||
}
|
||||
|
||||
input TagFilterType {
|
||||
|
|
@ -625,7 +667,7 @@ input TagFilterType {
|
|||
"Filter by number of parent tags the tag has"
|
||||
parent_count: IntCriterionInput
|
||||
|
||||
"Filter by number f child tags the tag has"
|
||||
"Filter by number of child tags the tag has"
|
||||
child_count: IntCriterionInput
|
||||
|
||||
"Filter by autotag ignore value"
|
||||
|
|
@ -644,10 +686,14 @@ input TagFilterType {
|
|||
images_filter: ImageFilterType
|
||||
"Filter by related galleries that meet this criteria"
|
||||
galleries_filter: GalleryFilterType
|
||||
"Filter by related groups that meet this criteria"
|
||||
groups_filter: GroupFilterType
|
||||
"Filter by related performers that meet this criteria"
|
||||
performers_filter: PerformerFilterType
|
||||
"Filter by related studios that meet this criteria"
|
||||
studios_filter: StudioFilterType
|
||||
"Filter by related scene markers that meet this criteria"
|
||||
markers_filter: SceneMarkerFilterType
|
||||
|
||||
"Filter by creation time"
|
||||
created_at: TimestampCriterionInput
|
||||
|
|
@ -729,6 +775,8 @@ input ImageFilterType {
|
|||
tags_filter: TagFilterType
|
||||
"Filter by related files that meet this criteria"
|
||||
files_filter: FileFilterType
|
||||
"Filter by custom fields"
|
||||
custom_fields: [CustomFieldCriterionInput!]
|
||||
}
|
||||
|
||||
input FileFilterType {
|
||||
|
|
@ -746,8 +794,8 @@ input FileFilterType {
|
|||
"Filter by modification time"
|
||||
mod_time: TimestampCriterionInput
|
||||
|
||||
"Filter files that have an exact match available"
|
||||
duplicated: PHashDuplicationCriterionInput
|
||||
"Filter files by duplication criteria (only phash applies to files)"
|
||||
duplicated: FileDuplicationCriterionInput
|
||||
|
||||
"find files based on hash"
|
||||
hashes: [FingerprintFilterInput!]
|
||||
|
|
@ -778,6 +826,7 @@ input FolderFilterType {
|
|||
NOT: FolderFilterType
|
||||
|
||||
path: StringCriterionInput
|
||||
basename: StringCriterionInput
|
||||
|
||||
parent_folder: HierarchicalMultiCriterionInput
|
||||
zip_file: MultiCriterionInput
|
||||
|
|
@ -886,7 +935,7 @@ input GenderCriterionInput {
|
|||
}
|
||||
|
||||
input CircumcisionCriterionInput {
|
||||
value: [CircumisedEnum!]
|
||||
value: [CircumcisedEnum!]
|
||||
modifier: CriterionModifier!
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ type Gallery {
|
|||
cover: Image
|
||||
|
||||
paths: GalleryPathsType! # Resolver
|
||||
custom_fields: Map!
|
||||
image(index: Int!): Image!
|
||||
}
|
||||
|
||||
|
|
@ -50,6 +51,8 @@ input GalleryCreateInput {
|
|||
studio_id: ID
|
||||
tag_ids: [ID!]
|
||||
performer_ids: [ID!]
|
||||
|
||||
custom_fields: Map
|
||||
}
|
||||
|
||||
input GalleryUpdateInput {
|
||||
|
|
@ -71,6 +74,8 @@ input GalleryUpdateInput {
|
|||
performer_ids: [ID!]
|
||||
|
||||
primary_file_id: ID
|
||||
|
||||
custom_fields: CustomFieldsInput
|
||||
}
|
||||
|
||||
input BulkGalleryUpdateInput {
|
||||
|
|
@ -89,6 +94,8 @@ input BulkGalleryUpdateInput {
|
|||
studio_id: ID
|
||||
tag_ids: BulkUpdateIds
|
||||
performer_ids: BulkUpdateIds
|
||||
|
||||
custom_fields: CustomFieldsInput
|
||||
}
|
||||
|
||||
input GalleryDestroyInput {
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ type Group {
|
|||
sub_group_count(depth: Int): Int! # Resolver
|
||||
scenes: [Scene!]!
|
||||
o_counter: Int # Resolver
|
||||
custom_fields: Map!
|
||||
}
|
||||
|
||||
input GroupDescriptionInput {
|
||||
|
|
@ -59,6 +60,8 @@ input GroupCreateInput {
|
|||
front_image: String
|
||||
"This should be a URL or a base64 encoded data URL"
|
||||
back_image: String
|
||||
|
||||
custom_fields: Map
|
||||
}
|
||||
|
||||
input GroupUpdateInput {
|
||||
|
|
@ -82,6 +85,8 @@ input GroupUpdateInput {
|
|||
front_image: String
|
||||
"This should be a URL or a base64 encoded data URL"
|
||||
back_image: String
|
||||
|
||||
custom_fields: CustomFieldsInput
|
||||
}
|
||||
|
||||
input BulkUpdateGroupDescriptionsInput {
|
||||
|
|
@ -94,6 +99,8 @@ input BulkGroupUpdateInput {
|
|||
ids: [ID!]
|
||||
# rating expressed as 1-100
|
||||
rating100: Int
|
||||
date: String
|
||||
synopsis: String
|
||||
studio_id: ID
|
||||
director: String
|
||||
urls: BulkUpdateStrings
|
||||
|
|
@ -101,6 +108,8 @@ input BulkGroupUpdateInput {
|
|||
|
||||
containing_groups: BulkUpdateGroupDescriptionsInput
|
||||
sub_groups: BulkUpdateGroupDescriptionsInput
|
||||
|
||||
custom_fields: CustomFieldsInput
|
||||
}
|
||||
|
||||
input GroupDestroyInput {
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ type Image {
|
|||
studio: Studio
|
||||
tags: [Tag!]!
|
||||
performers: [Performer!]!
|
||||
custom_fields: Map!
|
||||
}
|
||||
|
||||
type ImageFileType {
|
||||
|
|
@ -56,6 +57,7 @@ input ImageUpdateInput {
|
|||
gallery_ids: [ID!]
|
||||
|
||||
primary_file_id: ID
|
||||
custom_fields: CustomFieldsInput
|
||||
}
|
||||
|
||||
input BulkImageUpdateInput {
|
||||
|
|
@ -76,6 +78,7 @@ input BulkImageUpdateInput {
|
|||
performer_ids: BulkUpdateIds
|
||||
tag_ids: BulkUpdateIds
|
||||
gallery_ids: BulkUpdateIds
|
||||
custom_fields: CustomFieldsInput
|
||||
}
|
||||
|
||||
input ImageDestroyInput {
|
||||
|
|
|
|||
|
|
@ -26,6 +26,8 @@ input GenerateMetadataInput {
|
|||
imageIDs: [ID!]
|
||||
"gallery ids to generate for"
|
||||
galleryIDs: [ID!]
|
||||
"paths to run generate on, in addition to the other ID lists"
|
||||
paths: [String!]
|
||||
|
||||
"overwrite existing media"
|
||||
overwrite: Boolean
|
||||
|
|
@ -129,6 +131,14 @@ type ScanMetadataOptions {
|
|||
input CleanMetadataInput {
|
||||
paths: [String!]
|
||||
|
||||
"""
|
||||
Don't check zip file contents when determining whether to clean a file.
|
||||
This can significantly speed up the clean process, but will potentially miss removed files within zip files.
|
||||
Where users do not modify zip files contents directly, this should be safe to use.
|
||||
Defaults to false.
|
||||
"""
|
||||
ignoreZipFileContents: Boolean
|
||||
|
||||
"Do a dry run. Don't delete any files"
|
||||
dryRun: Boolean!
|
||||
}
|
||||
|
|
@ -215,7 +225,9 @@ input IdentifyMetadataOptionsInput {
|
|||
setCoverImage: Boolean
|
||||
setOrganized: Boolean
|
||||
"defaults to true if not provided"
|
||||
includeMalePerformers: Boolean
|
||||
includeMalePerformers: Boolean @deprecated(reason: "Use performerGenders")
|
||||
"Filter to only include performers with these genders. If not provided, all genders are included."
|
||||
performerGenders: [GenderEnum!]
|
||||
"defaults to true if not provided"
|
||||
skipMultipleMatches: Boolean
|
||||
"tag to tag skipped multiple matches with"
|
||||
|
|
@ -260,7 +272,9 @@ type IdentifyMetadataOptions {
|
|||
setCoverImage: Boolean
|
||||
setOrganized: Boolean
|
||||
"defaults to true if not provided"
|
||||
includeMalePerformers: Boolean
|
||||
includeMalePerformers: Boolean @deprecated(reason: "Use performerGenders")
|
||||
"Filter to only include performers with these genders. If not provided, all genders are included."
|
||||
performerGenders: [GenderEnum!]
|
||||
"defaults to true if not provided"
|
||||
skipMultipleMatches: Boolean
|
||||
"tag to tag skipped multiple matches with"
|
||||
|
|
@ -321,6 +335,8 @@ input ImportObjectsInput {
|
|||
|
||||
input BackupDatabaseInput {
|
||||
download: Boolean
|
||||
"If true, blob files will be included in the backup. This can significantly increase the size of the backup and the time it takes to create it, but allows for a complete backup of the system that can be restored without needing access to the original media files."
|
||||
includeBlobs: Boolean
|
||||
}
|
||||
|
||||
input AnonymiseDatabaseInput {
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ enum GenderEnum {
|
|||
NON_BINARY
|
||||
}
|
||||
|
||||
enum CircumisedEnum {
|
||||
enum CircumcisedEnum {
|
||||
CUT
|
||||
UNCUT
|
||||
}
|
||||
|
|
@ -29,8 +29,10 @@ type Performer {
|
|||
measurements: String
|
||||
fake_tits: String
|
||||
penis_length: Float
|
||||
circumcised: CircumisedEnum
|
||||
career_length: String
|
||||
circumcised: CircumcisedEnum
|
||||
career_length: String @deprecated(reason: "Use career_start and career_end")
|
||||
career_start: String
|
||||
career_end: String
|
||||
tattoos: String
|
||||
piercings: String
|
||||
alias_list: [String!]!
|
||||
|
|
@ -76,8 +78,10 @@ input PerformerCreateInput {
|
|||
measurements: String
|
||||
fake_tits: String
|
||||
penis_length: Float
|
||||
circumcised: CircumisedEnum
|
||||
career_length: String
|
||||
circumcised: CircumcisedEnum
|
||||
career_length: String @deprecated(reason: "Use career_start and career_end")
|
||||
career_start: String
|
||||
career_end: String
|
||||
tattoos: String
|
||||
piercings: String
|
||||
"Duplicate aliases and those equal to name will be ignored (case-insensitive)"
|
||||
|
|
@ -115,8 +119,10 @@ input PerformerUpdateInput {
|
|||
measurements: String
|
||||
fake_tits: String
|
||||
penis_length: Float
|
||||
circumcised: CircumisedEnum
|
||||
career_length: String
|
||||
circumcised: CircumcisedEnum
|
||||
career_length: String @deprecated(reason: "Use career_start and career_end")
|
||||
career_start: String
|
||||
career_end: String
|
||||
tattoos: String
|
||||
piercings: String
|
||||
"Duplicate aliases and those equal to name will be ignored (case-insensitive)"
|
||||
|
|
@ -159,8 +165,10 @@ input BulkPerformerUpdateInput {
|
|||
measurements: String
|
||||
fake_tits: String
|
||||
penis_length: Float
|
||||
circumcised: CircumisedEnum
|
||||
career_length: String
|
||||
circumcised: CircumcisedEnum
|
||||
career_length: String @deprecated(reason: "Use career_start and career_end")
|
||||
career_start: String
|
||||
career_end: String
|
||||
tattoos: String
|
||||
piercings: String
|
||||
"Duplicate aliases and those equal to name will result in an error (case-insensitive)"
|
||||
|
|
|
|||
|
|
@ -80,6 +80,8 @@ type Scene {
|
|||
performers: [Performer!]!
|
||||
stash_ids: [StashID!]!
|
||||
|
||||
custom_fields: Map!
|
||||
|
||||
"Return valid stream paths"
|
||||
sceneStreams: [SceneStreamEndpoint!]!
|
||||
}
|
||||
|
|
@ -122,6 +124,8 @@ input SceneCreateInput {
|
|||
Files must not already be primary for another scene.
|
||||
"""
|
||||
file_ids: [ID!]
|
||||
|
||||
custom_fields: Map
|
||||
}
|
||||
|
||||
input SceneUpdateInput {
|
||||
|
|
@ -161,6 +165,8 @@ input SceneUpdateInput {
|
|||
)
|
||||
|
||||
primary_file_id: ID
|
||||
|
||||
custom_fields: CustomFieldsInput
|
||||
}
|
||||
|
||||
enum BulkUpdateIdMode {
|
||||
|
|
@ -194,6 +200,8 @@ input BulkSceneUpdateInput {
|
|||
tag_ids: BulkUpdateIds
|
||||
group_ids: BulkUpdateIds
|
||||
movie_ids: BulkUpdateIds @deprecated(reason: "Use group_ids")
|
||||
|
||||
custom_fields: CustomFieldsInput
|
||||
}
|
||||
|
||||
input SceneDestroyInput {
|
||||
|
|
|
|||
|
|
@ -18,7 +18,9 @@ type ScrapedPerformer {
|
|||
fake_tits: String
|
||||
penis_length: String
|
||||
circumcised: String
|
||||
career_length: String
|
||||
career_length: String @deprecated(reason: "Use career_start and career_end")
|
||||
career_start: String
|
||||
career_end: String
|
||||
tattoos: String
|
||||
piercings: String
|
||||
# aliases must be comma-delimited to be parsed correctly
|
||||
|
|
@ -54,7 +56,9 @@ input ScrapedPerformerInput {
|
|||
fake_tits: String
|
||||
penis_length: String
|
||||
circumcised: String
|
||||
career_length: String
|
||||
career_length: String @deprecated(reason: "Use career_start and career_end")
|
||||
career_start: String
|
||||
career_end: String
|
||||
tattoos: String
|
||||
piercings: String
|
||||
aliases: String
|
||||
|
|
|
|||
|
|
@ -71,6 +71,9 @@ type ScrapedTag {
|
|||
"Set if tag matched"
|
||||
stored_id: ID
|
||||
name: String!
|
||||
description: String
|
||||
alias_list: [String!]
|
||||
parent: ScrapedTag
|
||||
"Remote site ID, if applicable"
|
||||
remote_site_id: String
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ type Studio {
|
|||
aliases: [String!]!
|
||||
tags: [Tag!]!
|
||||
ignore_auto_tag: Boolean!
|
||||
organized: Boolean!
|
||||
|
||||
image_path: String # Resolver
|
||||
scene_count(depth: Int): Int! # Resolver
|
||||
|
|
@ -46,6 +47,7 @@ input StudioCreateInput {
|
|||
aliases: [String!]
|
||||
tag_ids: [ID!]
|
||||
ignore_auto_tag: Boolean
|
||||
organized: Boolean
|
||||
|
||||
custom_fields: Map
|
||||
}
|
||||
|
|
@ -67,6 +69,7 @@ input StudioUpdateInput {
|
|||
aliases: [String!]
|
||||
tag_ids: [ID!]
|
||||
ignore_auto_tag: Boolean
|
||||
organized: Boolean
|
||||
|
||||
custom_fields: CustomFieldsInput
|
||||
}
|
||||
|
|
@ -82,6 +85,7 @@ input BulkStudioUpdateInput {
|
|||
details: String
|
||||
tag_ids: BulkUpdateIds
|
||||
ignore_auto_tag: Boolean
|
||||
organized: Boolean
|
||||
}
|
||||
|
||||
input StudioDestroyInput {
|
||||
|
|
|
|||
|
|
@ -78,6 +78,8 @@ type FindTagsResultType {
|
|||
input TagsMergeInput {
|
||||
source: [ID!]!
|
||||
destination: ID!
|
||||
# values defined here will override values in the destination
|
||||
values: TagUpdateInput
|
||||
}
|
||||
|
||||
input BulkTagUpdateInput {
|
||||
|
|
|
|||
|
|
@ -29,6 +29,13 @@ fragment StudioFragment on Studio {
|
|||
fragment TagFragment on Tag {
|
||||
name
|
||||
id
|
||||
description
|
||||
aliases
|
||||
category {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
|
||||
fragment MeasurementsFragment on Measurements {
|
||||
|
|
@ -121,18 +128,6 @@ fragment SceneFragment on Scene {
|
|||
}
|
||||
}
|
||||
|
||||
query FindSceneByFingerprint($fingerprint: FingerprintQueryInput!) {
|
||||
findSceneByFingerprint(fingerprint: $fingerprint) {
|
||||
...SceneFragment
|
||||
}
|
||||
}
|
||||
|
||||
query FindScenesByFullFingerprints($fingerprints: [FingerprintQueryInput!]!) {
|
||||
findScenesByFullFingerprints(fingerprints: $fingerprints) {
|
||||
...SceneFragment
|
||||
}
|
||||
}
|
||||
|
||||
query FindScenesBySceneFingerprints(
|
||||
$fingerprints: [[FingerprintQueryInput!]!]!
|
||||
) {
|
||||
|
|
|
|||
|
|
@ -40,6 +40,8 @@ func authenticateHandler() func(http.Handler) http.Handler {
|
|||
return
|
||||
}
|
||||
|
||||
r = session.SetLocalRequest(r)
|
||||
|
||||
userID, err := manager.GetInstance().SessionStore.Authenticate(w, r)
|
||||
if err != nil {
|
||||
if !errors.Is(err, session.ErrUnauthorized) {
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@
|
|||
//go:generate go run github.com/vektah/dataloaden GroupLoader int *github.com/stashapp/stash/pkg/models.Group
|
||||
//go:generate go run github.com/vektah/dataloaden FileLoader github.com/stashapp/stash/pkg/models.FileID github.com/stashapp/stash/pkg/models.File
|
||||
//go:generate go run github.com/vektah/dataloaden FolderLoader github.com/stashapp/stash/pkg/models.FolderID *github.com/stashapp/stash/pkg/models.Folder
|
||||
//go:generate go run github.com/vektah/dataloaden FolderRelatedFolderIDsLoader github.com/stashapp/stash/pkg/models.FolderID []github.com/stashapp/stash/pkg/models.FolderID
|
||||
//go:generate go run github.com/vektah/dataloaden SceneFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID
|
||||
//go:generate go run github.com/vektah/dataloaden ImageFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID
|
||||
//go:generate go run github.com/vektah/dataloaden GalleryFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID
|
||||
|
|
@ -42,19 +43,22 @@ const (
|
|||
)
|
||||
|
||||
type Loaders struct {
|
||||
SceneByID *SceneLoader
|
||||
SceneFiles *SceneFileIDsLoader
|
||||
ScenePlayCount *ScenePlayCountLoader
|
||||
SceneOCount *SceneOCountLoader
|
||||
ScenePlayHistory *ScenePlayHistoryLoader
|
||||
SceneOHistory *SceneOHistoryLoader
|
||||
SceneLastPlayed *SceneLastPlayedLoader
|
||||
SceneByID *SceneLoader
|
||||
SceneFiles *SceneFileIDsLoader
|
||||
ScenePlayCount *ScenePlayCountLoader
|
||||
SceneOCount *SceneOCountLoader
|
||||
ScenePlayHistory *ScenePlayHistoryLoader
|
||||
SceneOHistory *SceneOHistoryLoader
|
||||
SceneLastPlayed *SceneLastPlayedLoader
|
||||
SceneCustomFields *CustomFieldsLoader
|
||||
|
||||
ImageFiles *ImageFileIDsLoader
|
||||
GalleryFiles *GalleryFileIDsLoader
|
||||
|
||||
GalleryByID *GalleryLoader
|
||||
ImageByID *ImageLoader
|
||||
GalleryByID *GalleryLoader
|
||||
GalleryCustomFields *CustomFieldsLoader
|
||||
ImageByID *ImageLoader
|
||||
ImageCustomFields *CustomFieldsLoader
|
||||
|
||||
PerformerByID *PerformerLoader
|
||||
PerformerCustomFields *CustomFieldsLoader
|
||||
|
|
@ -64,9 +68,15 @@ type Loaders struct {
|
|||
|
||||
TagByID *TagLoader
|
||||
TagCustomFields *CustomFieldsLoader
|
||||
GroupByID *GroupLoader
|
||||
FileByID *FileLoader
|
||||
FolderByID *FolderLoader
|
||||
|
||||
GroupByID *GroupLoader
|
||||
GroupCustomFields *CustomFieldsLoader
|
||||
|
||||
FileByID *FileLoader
|
||||
|
||||
FolderByID *FolderLoader
|
||||
FolderParentFolderIDs *FolderRelatedFolderIDsLoader
|
||||
FolderSubFolderIDs *FolderRelatedFolderIDsLoader
|
||||
}
|
||||
|
||||
type Middleware struct {
|
||||
|
|
@ -87,11 +97,21 @@ func (m Middleware) Middleware(next http.Handler) http.Handler {
|
|||
maxBatch: maxBatch,
|
||||
fetch: m.fetchGalleries(ctx),
|
||||
},
|
||||
GalleryCustomFields: &CustomFieldsLoader{
|
||||
wait: wait,
|
||||
maxBatch: maxBatch,
|
||||
fetch: m.fetchGalleryCustomFields(ctx),
|
||||
},
|
||||
ImageByID: &ImageLoader{
|
||||
wait: wait,
|
||||
maxBatch: maxBatch,
|
||||
fetch: m.fetchImages(ctx),
|
||||
},
|
||||
ImageCustomFields: &CustomFieldsLoader{
|
||||
wait: wait,
|
||||
maxBatch: maxBatch,
|
||||
fetch: m.fetchImageCustomFields(ctx),
|
||||
},
|
||||
PerformerByID: &PerformerLoader{
|
||||
wait: wait,
|
||||
maxBatch: maxBatch,
|
||||
|
|
@ -107,6 +127,11 @@ func (m Middleware) Middleware(next http.Handler) http.Handler {
|
|||
maxBatch: maxBatch,
|
||||
fetch: m.fetchStudioCustomFields(ctx),
|
||||
},
|
||||
SceneCustomFields: &CustomFieldsLoader{
|
||||
wait: wait,
|
||||
maxBatch: maxBatch,
|
||||
fetch: m.fetchSceneCustomFields(ctx),
|
||||
},
|
||||
StudioByID: &StudioLoader{
|
||||
wait: wait,
|
||||
maxBatch: maxBatch,
|
||||
|
|
@ -127,6 +152,11 @@ func (m Middleware) Middleware(next http.Handler) http.Handler {
|
|||
maxBatch: maxBatch,
|
||||
fetch: m.fetchGroups(ctx),
|
||||
},
|
||||
GroupCustomFields: &CustomFieldsLoader{
|
||||
wait: wait,
|
||||
maxBatch: maxBatch,
|
||||
fetch: m.fetchGroupCustomFields(ctx),
|
||||
},
|
||||
FileByID: &FileLoader{
|
||||
wait: wait,
|
||||
maxBatch: maxBatch,
|
||||
|
|
@ -137,6 +167,16 @@ func (m Middleware) Middleware(next http.Handler) http.Handler {
|
|||
maxBatch: maxBatch,
|
||||
fetch: m.fetchFolders(ctx),
|
||||
},
|
||||
FolderParentFolderIDs: &FolderRelatedFolderIDsLoader{
|
||||
wait: wait,
|
||||
maxBatch: maxBatch,
|
||||
fetch: m.fetchFoldersParentFolderIDs(ctx),
|
||||
},
|
||||
FolderSubFolderIDs: &FolderRelatedFolderIDsLoader{
|
||||
wait: wait,
|
||||
maxBatch: maxBatch,
|
||||
fetch: m.fetchFoldersSubFolderIDs(ctx),
|
||||
},
|
||||
SceneFiles: &SceneFileIDsLoader{
|
||||
wait: wait,
|
||||
maxBatch: maxBatch,
|
||||
|
|
@ -207,6 +247,18 @@ func (m Middleware) fetchScenes(ctx context.Context) func(keys []int) ([]*models
|
|||
}
|
||||
}
|
||||
|
||||
func (m Middleware) fetchSceneCustomFields(ctx context.Context) func(keys []int) ([]models.CustomFieldMap, []error) {
|
||||
return func(keys []int) (ret []models.CustomFieldMap, errs []error) {
|
||||
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
ret, err = m.Repository.Scene.GetCustomFieldsBulk(ctx, keys)
|
||||
return err
|
||||
})
|
||||
|
||||
return ret, toErrorSlice(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m Middleware) fetchImages(ctx context.Context) func(keys []int) ([]*models.Image, []error) {
|
||||
return func(keys []int) (ret []*models.Image, errs []error) {
|
||||
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
|
||||
|
|
@ -219,6 +271,18 @@ func (m Middleware) fetchImages(ctx context.Context) func(keys []int) ([]*models
|
|||
}
|
||||
}
|
||||
|
||||
func (m Middleware) fetchImageCustomFields(ctx context.Context) func(keys []int) ([]models.CustomFieldMap, []error) {
|
||||
return func(keys []int) (ret []models.CustomFieldMap, errs []error) {
|
||||
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
ret, err = m.Repository.Image.GetCustomFieldsBulk(ctx, keys)
|
||||
return err
|
||||
})
|
||||
|
||||
return ret, toErrorSlice(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m Middleware) fetchGalleries(ctx context.Context) func(keys []int) ([]*models.Gallery, []error) {
|
||||
return func(keys []int) (ret []*models.Gallery, errs []error) {
|
||||
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
|
||||
|
|
@ -301,6 +365,30 @@ func (m Middleware) fetchTagCustomFields(ctx context.Context) func(keys []int) (
|
|||
}
|
||||
}
|
||||
|
||||
func (m Middleware) fetchGroupCustomFields(ctx context.Context) func(keys []int) ([]models.CustomFieldMap, []error) {
|
||||
return func(keys []int) (ret []models.CustomFieldMap, errs []error) {
|
||||
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
ret, err = m.Repository.Group.GetCustomFieldsBulk(ctx, keys)
|
||||
return err
|
||||
})
|
||||
|
||||
return ret, toErrorSlice(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m Middleware) fetchGalleryCustomFields(ctx context.Context) func(keys []int) ([]models.CustomFieldMap, []error) {
|
||||
return func(keys []int) (ret []models.CustomFieldMap, errs []error) {
|
||||
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
ret, err = m.Repository.Gallery.GetCustomFieldsBulk(ctx, keys)
|
||||
return err
|
||||
})
|
||||
|
||||
return ret, toErrorSlice(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m Middleware) fetchGroups(ctx context.Context) func(keys []int) ([]*models.Group, []error) {
|
||||
return func(keys []int) (ret []*models.Group, errs []error) {
|
||||
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
|
||||
|
|
@ -334,6 +422,28 @@ func (m Middleware) fetchFolders(ctx context.Context) func(keys []models.FolderI
|
|||
}
|
||||
}
|
||||
|
||||
func (m Middleware) fetchFoldersParentFolderIDs(ctx context.Context) func(keys []models.FolderID) ([][]models.FolderID, []error) {
|
||||
return func(keys []models.FolderID) (ret [][]models.FolderID, errs []error) {
|
||||
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
ret, err = m.Repository.Folder.GetManyParentFolderIDs(ctx, keys)
|
||||
return err
|
||||
})
|
||||
return ret, toErrorSlice(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m Middleware) fetchFoldersSubFolderIDs(ctx context.Context) func(keys []models.FolderID) ([][]models.FolderID, []error) {
|
||||
return func(keys []models.FolderID) (ret [][]models.FolderID, errs []error) {
|
||||
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
ret, err = m.Repository.Folder.GetManySubFolderIDs(ctx, keys)
|
||||
return err
|
||||
})
|
||||
return ret, toErrorSlice(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m Middleware) fetchScenesFileIDs(ctx context.Context) func(keys []int) ([][]models.FileID, []error) {
|
||||
return func(keys []int) (ret [][]models.FileID, errs []error) {
|
||||
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
|
||||
|
|
|
|||
225
internal/api/loaders/folderrelatedfolderidsloader_gen.go
Normal file
|
|
@ -0,0 +1,225 @@
|
|||
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
|
||||
|
||||
package loaders
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
)
|
||||
|
||||
// FolderParentFolderIDsLoaderConfig captures the config to create a new FolderParentFolderIDsLoader
|
||||
type FolderParentFolderIDsLoaderConfig struct {
|
||||
// Fetch is a method that provides the data for the loader
|
||||
Fetch func(keys []models.FolderID) ([][]models.FolderID, []error)
|
||||
|
||||
// Wait is how long wait before sending a batch
|
||||
Wait time.Duration
|
||||
|
||||
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
|
||||
MaxBatch int
|
||||
}
|
||||
|
||||
// NewFolderParentFolderIDsLoader creates a new FolderParentFolderIDsLoader given a fetch, wait, and maxBatch
|
||||
func NewFolderParentFolderIDsLoader(config FolderParentFolderIDsLoaderConfig) *FolderRelatedFolderIDsLoader {
|
||||
return &FolderRelatedFolderIDsLoader{
|
||||
fetch: config.Fetch,
|
||||
wait: config.Wait,
|
||||
maxBatch: config.MaxBatch,
|
||||
}
|
||||
}
|
||||
|
||||
// FolderRelatedFolderIDsLoader batches and caches requests
|
||||
type FolderRelatedFolderIDsLoader struct {
|
||||
// this method provides the data for the loader
|
||||
fetch func(keys []models.FolderID) ([][]models.FolderID, []error)
|
||||
|
||||
// how long to done before sending a batch
|
||||
wait time.Duration
|
||||
|
||||
// this will limit the maximum number of keys to send in one batch, 0 = no limit
|
||||
maxBatch int
|
||||
|
||||
// INTERNAL
|
||||
|
||||
// lazily created cache
|
||||
cache map[models.FolderID][]models.FolderID
|
||||
|
||||
// the current batch. keys will continue to be collected until timeout is hit,
|
||||
// then everything will be sent to the fetch method and out to the listeners
|
||||
batch *folderParentFolderIDsLoaderBatch
|
||||
|
||||
// mutex to prevent races
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type folderParentFolderIDsLoaderBatch struct {
|
||||
keys []models.FolderID
|
||||
data [][]models.FolderID
|
||||
error []error
|
||||
closing bool
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// Load a FolderID by key, batching and caching will be applied automatically
|
||||
func (l *FolderRelatedFolderIDsLoader) Load(key models.FolderID) ([]models.FolderID, error) {
|
||||
return l.LoadThunk(key)()
|
||||
}
|
||||
|
||||
// LoadThunk returns a function that when called will block waiting for a FolderID.
|
||||
// This method should be used if you want one goroutine to make requests to many
|
||||
// different data loaders without blocking until the thunk is called.
|
||||
func (l *FolderRelatedFolderIDsLoader) LoadThunk(key models.FolderID) func() ([]models.FolderID, error) {
|
||||
l.mu.Lock()
|
||||
if it, ok := l.cache[key]; ok {
|
||||
l.mu.Unlock()
|
||||
return func() ([]models.FolderID, error) {
|
||||
return it, nil
|
||||
}
|
||||
}
|
||||
if l.batch == nil {
|
||||
l.batch = &folderParentFolderIDsLoaderBatch{done: make(chan struct{})}
|
||||
}
|
||||
batch := l.batch
|
||||
pos := batch.keyIndex(l, key)
|
||||
l.mu.Unlock()
|
||||
|
||||
return func() ([]models.FolderID, error) {
|
||||
<-batch.done
|
||||
|
||||
var data []models.FolderID
|
||||
if pos < len(batch.data) {
|
||||
data = batch.data[pos]
|
||||
}
|
||||
|
||||
var err error
|
||||
// its convenient to be able to return a single error for everything
|
||||
if len(batch.error) == 1 {
|
||||
err = batch.error[0]
|
||||
} else if batch.error != nil {
|
||||
err = batch.error[pos]
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
l.mu.Lock()
|
||||
l.unsafeSet(key, data)
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
return data, err
|
||||
}
|
||||
}
|
||||
|
||||
// LoadAll fetches many keys at once. It will be broken into appropriate sized
|
||||
// sub batches depending on how the loader is configured
|
||||
func (l *FolderRelatedFolderIDsLoader) LoadAll(keys []models.FolderID) ([][]models.FolderID, []error) {
|
||||
results := make([]func() ([]models.FolderID, error), len(keys))
|
||||
|
||||
for i, key := range keys {
|
||||
results[i] = l.LoadThunk(key)
|
||||
}
|
||||
|
||||
folderIDs := make([][]models.FolderID, len(keys))
|
||||
errors := make([]error, len(keys))
|
||||
for i, thunk := range results {
|
||||
folderIDs[i], errors[i] = thunk()
|
||||
}
|
||||
return folderIDs, errors
|
||||
}
|
||||
|
||||
// LoadAllThunk returns a function that when called will block waiting for a FolderIDs.
|
||||
// This method should be used if you want one goroutine to make requests to many
|
||||
// different data loaders without blocking until the thunk is called.
|
||||
func (l *FolderRelatedFolderIDsLoader) LoadAllThunk(keys []models.FolderID) func() ([][]models.FolderID, []error) {
|
||||
results := make([]func() ([]models.FolderID, error), len(keys))
|
||||
for i, key := range keys {
|
||||
results[i] = l.LoadThunk(key)
|
||||
}
|
||||
return func() ([][]models.FolderID, []error) {
|
||||
folderIDs := make([][]models.FolderID, len(keys))
|
||||
errors := make([]error, len(keys))
|
||||
for i, thunk := range results {
|
||||
folderIDs[i], errors[i] = thunk()
|
||||
}
|
||||
return folderIDs, errors
|
||||
}
|
||||
}
|
||||
|
||||
// Prime the cache with the provided key and value. If the key already exists, no change is made
|
||||
// and false is returned.
|
||||
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
|
||||
func (l *FolderRelatedFolderIDsLoader) Prime(key models.FolderID, value []models.FolderID) bool {
|
||||
l.mu.Lock()
|
||||
var found bool
|
||||
if _, found = l.cache[key]; !found {
|
||||
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
|
||||
// and end up with the whole cache pointing to the same value.
|
||||
cpy := make([]models.FolderID, len(value))
|
||||
copy(cpy, value)
|
||||
l.unsafeSet(key, cpy)
|
||||
}
|
||||
l.mu.Unlock()
|
||||
return !found
|
||||
}
|
||||
|
||||
// Clear the value at key from the cache, if it exists
|
||||
func (l *FolderRelatedFolderIDsLoader) Clear(key models.FolderID) {
|
||||
l.mu.Lock()
|
||||
delete(l.cache, key)
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
func (l *FolderRelatedFolderIDsLoader) unsafeSet(key models.FolderID, value []models.FolderID) {
|
||||
if l.cache == nil {
|
||||
l.cache = map[models.FolderID][]models.FolderID{}
|
||||
}
|
||||
l.cache[key] = value
|
||||
}
|
||||
|
||||
// keyIndex will return the location of the key in the batch, if its not found
|
||||
// it will add the key to the batch
|
||||
func (b *folderParentFolderIDsLoaderBatch) keyIndex(l *FolderRelatedFolderIDsLoader, key models.FolderID) int {
|
||||
for i, existingKey := range b.keys {
|
||||
if key == existingKey {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
pos := len(b.keys)
|
||||
b.keys = append(b.keys, key)
|
||||
if pos == 0 {
|
||||
go b.startTimer(l)
|
||||
}
|
||||
|
||||
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
|
||||
if !b.closing {
|
||||
b.closing = true
|
||||
l.batch = nil
|
||||
go b.end(l)
|
||||
}
|
||||
}
|
||||
|
||||
return pos
|
||||
}
|
||||
|
||||
func (b *folderParentFolderIDsLoaderBatch) startTimer(l *FolderRelatedFolderIDsLoader) {
|
||||
time.Sleep(l.wait)
|
||||
l.mu.Lock()
|
||||
|
||||
// we must have hit a batch limit and are already finalizing this batch
|
||||
if b.closing {
|
||||
l.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
l.batch = nil
|
||||
l.mu.Unlock()
|
||||
|
||||
b.end(l)
|
||||
}
|
||||
|
||||
func (b *folderParentFolderIDsLoaderBatch) end(l *FolderRelatedFolderIDsLoader) {
|
||||
b.data, b.error = l.fetch(b.keys)
|
||||
close(b.done)
|
||||
}
|
||||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/99designs/gqlgen/graphql"
|
||||
"github.com/stashapp/stash/internal/build"
|
||||
"github.com/stashapp/stash/internal/manager"
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
|
|
@ -145,6 +146,13 @@ func (r *Resolver) withReadTxn(ctx context.Context, fn func(ctx context.Context)
|
|||
return r.repository.WithReadTxn(ctx, fn)
|
||||
}
|
||||
|
||||
// idOnly returns true if the query is only asking for the id field.
|
||||
// This can be used to optimize certain queries where we don't need to load the full object if we're only getting the id.
|
||||
func (r *Resolver) idOnly(ctx context.Context) bool {
|
||||
fields := graphql.CollectAllFields(ctx)
|
||||
return len(fields) == 1 && fields[0] == "id"
|
||||
}
|
||||
|
||||
func (r *queryResolver) MarkerWall(ctx context.Context, q *string) (ret []*models.SceneMarker, err error) {
|
||||
if err := r.withReadTxn(ctx, func(ctx context.Context) error {
|
||||
ret, err = r.repository.SceneMarker.Wall(ctx, q)
|
||||
|
|
|
|||
|
|
@ -2,19 +2,77 @@ package api
|
|||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/stashapp/stash/internal/api/loaders"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
)
|
||||
|
||||
func (r *folderResolver) Basename(ctx context.Context, obj *models.Folder) (string, error) {
|
||||
return filepath.Base(obj.Path), nil
|
||||
}
|
||||
|
||||
func (r *folderResolver) ParentFolder(ctx context.Context, obj *models.Folder) (*models.Folder, error) {
|
||||
if obj.ParentFolderID == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if r.idOnly(ctx) {
|
||||
return &models.Folder{ID: *obj.ParentFolderID}, nil
|
||||
}
|
||||
|
||||
return loaders.From(ctx).FolderByID.Load(*obj.ParentFolderID)
|
||||
}
|
||||
|
||||
func foldersFromIDs(ids []models.FolderID) []*models.Folder {
|
||||
ret := make([]*models.Folder, len(ids))
|
||||
for i, id := range ids {
|
||||
ret[i] = &models.Folder{ID: id}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (r *folderResolver) ParentFolders(ctx context.Context, obj *models.Folder) ([]*models.Folder, error) {
|
||||
ids, err := loaders.From(ctx).FolderParentFolderIDs.Load(obj.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.idOnly(ctx) {
|
||||
return foldersFromIDs(ids), nil
|
||||
}
|
||||
|
||||
var errs []error
|
||||
ret, errs := loaders.From(ctx).FolderByID.LoadAll(ids)
|
||||
return ret, firstError(errs)
|
||||
}
|
||||
|
||||
func (r *folderResolver) SubFolders(ctx context.Context, obj *models.Folder) ([]*models.Folder, error) {
|
||||
ids, err := loaders.From(ctx).FolderSubFolderIDs.Load(obj.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.idOnly(ctx) {
|
||||
return foldersFromIDs(ids), nil
|
||||
}
|
||||
|
||||
var errs []error
|
||||
ret, errs := loaders.From(ctx).FolderByID.LoadAll(ids)
|
||||
return ret, firstError(errs)
|
||||
}
|
||||
|
||||
func (r *folderResolver) ZipFile(ctx context.Context, obj *models.Folder) (*BasicFile, error) {
|
||||
// shortcut for id only queries
|
||||
if r.idOnly(ctx) {
|
||||
if obj.ZipFileID == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &BasicFile{
|
||||
BaseFile: &models.BaseFile{ID: *obj.ZipFileID},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return zipFileResolver(ctx, obj.ZipFileID)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -216,3 +216,16 @@ func (r *galleryResolver) Image(ctx context.Context, obj *models.Gallery, index
|
|||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *galleryResolver) CustomFields(ctx context.Context, obj *models.Gallery) (map[string]interface{}, error) {
|
||||
m, err := loaders.From(ctx).GalleryCustomFields.Load(obj.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m == nil {
|
||||
return make(map[string]interface{}), nil
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -161,3 +161,12 @@ func (r *imageResolver) Urls(ctx context.Context, obj *models.Image) ([]string,
|
|||
|
||||
return obj.URLs.List(), nil
|
||||
}
|
||||
|
||||
func (r *imageResolver) CustomFields(ctx context.Context, obj *models.Image) (map[string]interface{}, error) {
|
||||
customFields, err := loaders.From(ctx).ImageCustomFields.Load(obj.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return customFields, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -215,3 +215,16 @@ func (r *groupResolver) OCounter(ctx context.Context, obj *models.Group) (ret *i
|
|||
}
|
||||
return &count, nil
|
||||
}
|
||||
|
||||
func (r *groupResolver) CustomFields(ctx context.Context, obj *models.Group) (map[string]interface{}, error) {
|
||||
m, err := loaders.From(ctx).GroupCustomFields.Load(obj.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m == nil {
|
||||
return make(map[string]interface{}), nil
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -109,6 +109,31 @@ func (r *performerResolver) HeightCm(ctx context.Context, obj *models.Performer)
|
|||
return obj.Height, nil
|
||||
}
|
||||
|
||||
func (r *performerResolver) CareerStart(ctx context.Context, obj *models.Performer) (*string, error) {
|
||||
if obj.CareerStart != nil {
|
||||
ret := obj.CareerStart.String()
|
||||
return &ret, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *performerResolver) CareerEnd(ctx context.Context, obj *models.Performer) (*string, error) {
|
||||
if obj.CareerEnd != nil {
|
||||
ret := obj.CareerEnd.String()
|
||||
return &ret, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *performerResolver) CareerLength(ctx context.Context, obj *models.Performer) (*string, error) {
|
||||
if obj.CareerStart == nil && obj.CareerEnd == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ret := models.FormatYearRange(obj.CareerStart, obj.CareerEnd)
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (r *performerResolver) Birthdate(ctx context.Context, obj *models.Performer) (*string, error) {
|
||||
if obj.Birthdate != nil {
|
||||
ret := obj.Birthdate.String()
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ func (r *sceneResolver) Paths(ctx context.Context, obj *models.Scene) (*ScenePat
|
|||
objHash := obj.GetHash(config.GetVideoFileNamingAlgorithm())
|
||||
vttPath := builder.GetSpriteVTTURL(objHash)
|
||||
spritePath := builder.GetSpriteURL(objHash)
|
||||
funscriptPath := builder.GetFunscriptURL()
|
||||
funscriptPath := builder.GetFunscriptURL(config.GetAPIKey()).String()
|
||||
captionBasePath := builder.GetCaptionURL()
|
||||
interactiveHeatmap := builder.GetInteractiveHeatmapURL()
|
||||
|
||||
|
|
@ -418,3 +418,16 @@ func (r *sceneResolver) OHistory(ctx context.Context, obj *models.Scene) ([]*tim
|
|||
|
||||
return ptrRet, nil
|
||||
}
|
||||
|
||||
func (r *sceneResolver) CustomFields(ctx context.Context, obj *models.Scene) (map[string]interface{}, error) {
|
||||
m, err := loaders.From(ctx).SceneCustomFields.Load(obj.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m == nil {
|
||||
return make(map[string]interface{}), nil
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
|
@ -85,6 +86,8 @@ func (r *mutationResolver) setConfigFloat(key string, value *float64) {
|
|||
func (r *mutationResolver) ConfigureGeneral(ctx context.Context, input ConfigGeneralInput) (*ConfigGeneralResult, error) {
|
||||
c := config.GetInstance()
|
||||
|
||||
// #4709 - allow stash paths even if they do not exist, so that users may configure stash
|
||||
// for disconnected drives or network storage.
|
||||
existingPaths := c.GetStashPaths()
|
||||
if input.Stashes != nil {
|
||||
for _, s := range input.Stashes {
|
||||
|
|
@ -97,8 +100,12 @@ func (r *mutationResolver) ConfigureGeneral(ctx context.Context, input ConfigGen
|
|||
}
|
||||
}
|
||||
if isNew {
|
||||
s.Path = filepath.Clean(s.Path)
|
||||
|
||||
// if it exists, it must be directory
|
||||
exists, err := fsutil.DirExists(s.Path)
|
||||
if !exists {
|
||||
// allow it to not exist but if it does exist it must be a directory
|
||||
if !exists && !errors.Is(err, fs.ErrNotExist) {
|
||||
return makeConfigGeneralResult(), err
|
||||
}
|
||||
}
|
||||
|
|
@ -287,6 +294,11 @@ func (r *mutationResolver) ConfigureGeneral(ctx context.Context, input ConfigGen
|
|||
if input.PreviewPreset != nil {
|
||||
c.SetString(config.PreviewPreset, input.PreviewPreset.String())
|
||||
}
|
||||
r.setConfigBool(config.UseCustomSpriteInterval, input.UseCustomSpriteInterval)
|
||||
r.setConfigFloat(config.SpriteInterval, input.SpriteInterval)
|
||||
r.setConfigInt(config.MinimumSprites, input.MinimumSprites)
|
||||
r.setConfigInt(config.MaximumSprites, input.MaximumSprites)
|
||||
r.setConfigInt(config.SpriteScreenshotSize, input.SpriteScreenshotSize)
|
||||
|
||||
r.setConfigBool(config.TranscodeHardwareAcceleration, input.TranscodeHardwareAcceleration)
|
||||
if input.MaxTranscodeSize != nil {
|
||||
|
|
|
|||
|
|
@ -5,10 +5,14 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/stashapp/stash/internal/desktop"
|
||||
"github.com/stashapp/stash/internal/manager"
|
||||
"github.com/stashapp/stash/internal/manager/config"
|
||||
"github.com/stashapp/stash/pkg/file"
|
||||
"github.com/stashapp/stash/pkg/fsutil"
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/session"
|
||||
"github.com/stashapp/stash/pkg/sliceutil/stringslice"
|
||||
)
|
||||
|
||||
|
|
@ -16,7 +20,7 @@ func (r *mutationResolver) MoveFiles(ctx context.Context, input MoveFilesInput)
|
|||
if err := r.withTxn(ctx, func(ctx context.Context) error {
|
||||
fileStore := r.repository.File
|
||||
folderStore := r.repository.Folder
|
||||
mover := file.NewMover(fileStore, folderStore)
|
||||
mover := file.NewMover(fileStore, folderStore, manager.GetInstance().Config.GetStashPaths().Paths())
|
||||
mover.RegisterHooks(ctx)
|
||||
|
||||
var (
|
||||
|
|
@ -54,13 +58,14 @@ func (r *mutationResolver) MoveFiles(ctx context.Context, input MoveFilesInput)
|
|||
folderPath := *input.DestinationFolder
|
||||
|
||||
// ensure folder path is within the library
|
||||
if err := r.validateFolderPath(folderPath); err != nil {
|
||||
stashPaths := manager.GetInstance().Config.GetStashPaths()
|
||||
if err := r.validateFolderPath(stashPaths, folderPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get or create folder hierarchy
|
||||
var err error
|
||||
folder, err = file.GetOrCreateFolderHierarchy(ctx, folderStore, folderPath)
|
||||
folder, err = file.GetOrCreateFolderHierarchy(ctx, folderStore, folderPath, stashPaths.Paths())
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting or creating folder hierarchy: %w", err)
|
||||
}
|
||||
|
|
@ -109,8 +114,7 @@ func (r *mutationResolver) MoveFiles(ctx context.Context, input MoveFilesInput)
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (r *mutationResolver) validateFolderPath(folderPath string) error {
|
||||
paths := manager.GetInstance().Config.GetStashPaths()
|
||||
func (r *mutationResolver) validateFolderPath(paths config.StashConfigs, folderPath string) error {
|
||||
if l := paths.GetStashFromDirPath(folderPath); l == nil {
|
||||
return fmt.Errorf("folder path %s must be within a stash library path", folderPath)
|
||||
}
|
||||
|
|
@ -326,3 +330,71 @@ func (r *mutationResolver) FileSetFingerprints(ctx context.Context, input FileSe
|
|||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *mutationResolver) RevealFileInFileManager(ctx context.Context, id string) (bool, error) {
|
||||
// disallow if request did not come from localhost
|
||||
if !session.IsLocalRequest(ctx) {
|
||||
logger.Warnf("Attempt to reveal file in file manager from non-local request")
|
||||
return false, fmt.Errorf("access denied")
|
||||
}
|
||||
|
||||
fileIDInt, err := strconv.Atoi(id)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("converting id: %w", err)
|
||||
}
|
||||
|
||||
var filePath string
|
||||
if err := r.withReadTxn(ctx, func(ctx context.Context) error {
|
||||
files, err := r.repository.File.Find(ctx, models.FileID(fileIDInt))
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding file: %w", err)
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return fmt.Errorf("file with id %d not found", fileIDInt)
|
||||
}
|
||||
filePath = files[0].Base().Path
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := desktop.RevealInFileManager(filePath); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *mutationResolver) RevealFolderInFileManager(ctx context.Context, id string) (bool, error) {
|
||||
// disallow if request did not come from localhost
|
||||
if !session.IsLocalRequest(ctx) {
|
||||
logger.Warnf("Attempt to reveal folder in file manager from non-local request")
|
||||
return false, fmt.Errorf("access denied")
|
||||
}
|
||||
|
||||
folderIDInt, err := strconv.Atoi(id)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("converting id: %w", err)
|
||||
}
|
||||
|
||||
var folderPath string
|
||||
if err := r.withReadTxn(ctx, func(ctx context.Context) error {
|
||||
folder, err := r.repository.Folder.Find(ctx, models.FolderID(folderIDInt))
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding folder: %w", err)
|
||||
}
|
||||
if folder == nil {
|
||||
return fmt.Errorf("folder with id %d not found", folderIDInt)
|
||||
}
|
||||
folderPath = folder.Path
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := desktop.RevealInFileManager(folderPath); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,7 +42,10 @@ func (r *mutationResolver) GalleryCreate(ctx context.Context, input GalleryCreat
|
|||
}
|
||||
|
||||
// Populate a new gallery from the input
|
||||
newGallery := models.NewGallery()
|
||||
newGallery := models.CreateGalleryInput{
|
||||
Gallery: &models.Gallery{},
|
||||
}
|
||||
*newGallery.Gallery = models.NewGallery()
|
||||
|
||||
newGallery.Title = strings.TrimSpace(input.Title)
|
||||
newGallery.Code = translator.string(input.Code)
|
||||
|
|
@ -81,10 +84,12 @@ func (r *mutationResolver) GalleryCreate(ctx context.Context, input GalleryCreat
|
|||
newGallery.URLs = models.NewRelatedStrings([]string{strings.TrimSpace(*input.URL)})
|
||||
}
|
||||
|
||||
newGallery.CustomFields = convertMapJSONNumbers(input.CustomFields)
|
||||
|
||||
// Start the transaction and save the gallery
|
||||
if err := r.withTxn(ctx, func(ctx context.Context) error {
|
||||
qb := r.repository.Gallery
|
||||
if err := qb.Create(ctx, &newGallery, nil); err != nil {
|
||||
if err := qb.Create(ctx, &newGallery); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -241,6 +246,10 @@ func (r *mutationResolver) galleryUpdate(ctx context.Context, input models.Galle
|
|||
return nil, fmt.Errorf("converting scene ids: %w", err)
|
||||
}
|
||||
|
||||
if input.CustomFields != nil {
|
||||
updatedGallery.CustomFields = handleUpdateCustomFields(*input.CustomFields)
|
||||
}
|
||||
|
||||
// gallery scene is set from the scene only
|
||||
|
||||
gallery, err := qb.UpdatePartial(ctx, galleryID, updatedGallery)
|
||||
|
|
@ -293,6 +302,10 @@ func (r *mutationResolver) BulkGalleryUpdate(ctx context.Context, input BulkGall
|
|||
return nil, fmt.Errorf("converting scene ids: %w", err)
|
||||
}
|
||||
|
||||
if input.CustomFields != nil {
|
||||
updatedGallery.CustomFields = handleUpdateCustomFields(*input.CustomFields)
|
||||
}
|
||||
|
||||
ret := []*models.Gallery{}
|
||||
|
||||
// Start the transaction and save the galleries
|
||||
|
|
|
|||
|
|
@ -14,13 +14,17 @@ import (
|
|||
"github.com/stashapp/stash/pkg/utils"
|
||||
)
|
||||
|
||||
func groupFromGroupCreateInput(ctx context.Context, input GroupCreateInput) (*models.Group, error) {
|
||||
func groupFromGroupCreateInput(ctx context.Context, input GroupCreateInput) (*models.CreateGroupInput, error) {
|
||||
translator := changesetTranslator{
|
||||
inputMap: getUpdateInputMap(ctx),
|
||||
}
|
||||
|
||||
// Populate a new group from the input
|
||||
newGroup := models.NewGroup()
|
||||
newGroupInput := &models.CreateGroupInput{
|
||||
Group: &models.Group{},
|
||||
}
|
||||
*newGroupInput.Group = models.NewGroup()
|
||||
newGroup := newGroupInput.Group
|
||||
|
||||
newGroup.Name = strings.TrimSpace(input.Name)
|
||||
newGroup.Aliases = translator.string(input.Aliases)
|
||||
|
|
@ -59,28 +63,19 @@ func groupFromGroupCreateInput(ctx context.Context, input GroupCreateInput) (*mo
|
|||
newGroup.URLs = models.NewRelatedStrings(stringslice.TrimSpace(input.Urls))
|
||||
}
|
||||
|
||||
return &newGroup, nil
|
||||
}
|
||||
|
||||
func (r *mutationResolver) GroupCreate(ctx context.Context, input GroupCreateInput) (*models.Group, error) {
|
||||
newGroup, err := groupFromGroupCreateInput(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newGroupInput.CustomFields = convertMapJSONNumbers(input.CustomFields)
|
||||
|
||||
// Process the base 64 encoded image string
|
||||
var frontimageData []byte
|
||||
if input.FrontImage != nil {
|
||||
frontimageData, err = utils.ProcessImageInput(ctx, *input.FrontImage)
|
||||
newGroupInput.FrontImageData, err = utils.ProcessImageInput(ctx, *input.FrontImage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("processing front image: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Process the base 64 encoded image string
|
||||
var backimageData []byte
|
||||
if input.BackImage != nil {
|
||||
backimageData, err = utils.ProcessImageInput(ctx, *input.BackImage)
|
||||
newGroupInput.BackImageData, err = utils.ProcessImageInput(ctx, *input.BackImage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("processing back image: %w", err)
|
||||
}
|
||||
|
|
@ -88,13 +83,22 @@ func (r *mutationResolver) GroupCreate(ctx context.Context, input GroupCreateInp
|
|||
|
||||
// HACK: if back image is being set, set the front image to the default.
|
||||
// This is because we can't have a null front image with a non-null back image.
|
||||
if len(frontimageData) == 0 && len(backimageData) != 0 {
|
||||
frontimageData = static.ReadAll(static.DefaultGroupImage)
|
||||
if len(newGroupInput.FrontImageData) == 0 && len(newGroupInput.BackImageData) != 0 {
|
||||
newGroupInput.FrontImageData = static.ReadAll(static.DefaultGroupImage)
|
||||
}
|
||||
|
||||
return newGroupInput, nil
|
||||
}
|
||||
|
||||
func (r *mutationResolver) GroupCreate(ctx context.Context, input GroupCreateInput) (*models.Group, error) {
|
||||
createGroupInput, err := groupFromGroupCreateInput(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Start the transaction and save the group
|
||||
if err := r.withTxn(ctx, func(ctx context.Context) error {
|
||||
if err = r.groupService.Create(ctx, newGroup, frontimageData, backimageData); err != nil {
|
||||
if err = r.groupService.Create(ctx, createGroupInput); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -104,9 +108,9 @@ func (r *mutationResolver) GroupCreate(ctx context.Context, input GroupCreateInp
|
|||
}
|
||||
|
||||
// for backwards compatibility - run both movie and group hooks
|
||||
r.hookExecutor.ExecutePostHooks(ctx, newGroup.ID, hook.GroupCreatePost, input, nil)
|
||||
r.hookExecutor.ExecutePostHooks(ctx, newGroup.ID, hook.MovieCreatePost, input, nil)
|
||||
return r.getGroup(ctx, newGroup.ID)
|
||||
r.hookExecutor.ExecutePostHooks(ctx, createGroupInput.Group.ID, hook.GroupCreatePost, input, nil)
|
||||
r.hookExecutor.ExecutePostHooks(ctx, createGroupInput.Group.ID, hook.MovieCreatePost, input, nil)
|
||||
return r.getGroup(ctx, createGroupInput.Group.ID)
|
||||
}
|
||||
|
||||
func groupPartialFromGroupUpdateInput(translator changesetTranslator, input GroupUpdateInput) (ret models.GroupPartial, err error) {
|
||||
|
|
@ -150,6 +154,12 @@ func groupPartialFromGroupUpdateInput(translator changesetTranslator, input Grou
|
|||
}
|
||||
|
||||
updatedGroup.URLs = translator.updateStrings(input.Urls, "urls")
|
||||
if input.CustomFields != nil {
|
||||
updatedGroup.CustomFields = *input.CustomFields
|
||||
// convert json.Numbers to int/float
|
||||
updatedGroup.CustomFields.Full = convertMapJSONNumbers(updatedGroup.CustomFields.Full)
|
||||
updatedGroup.CustomFields.Partial = convertMapJSONNumbers(updatedGroup.CustomFields.Partial)
|
||||
}
|
||||
|
||||
return updatedGroup, nil
|
||||
}
|
||||
|
|
@ -217,6 +227,12 @@ func (r *mutationResolver) GroupUpdate(ctx context.Context, input GroupUpdateInp
|
|||
func groupPartialFromBulkGroupUpdateInput(translator changesetTranslator, input BulkGroupUpdateInput) (ret models.GroupPartial, err error) {
|
||||
updatedGroup := models.NewGroupPartial()
|
||||
|
||||
updatedGroup.Date, err = translator.optionalDate(input.Date, "date")
|
||||
if err != nil {
|
||||
err = fmt.Errorf("converting date: %w", err)
|
||||
return
|
||||
}
|
||||
updatedGroup.Synopsis = translator.optionalString(input.Synopsis, "synopsis")
|
||||
updatedGroup.Rating = translator.optionalInt(input.Rating100, "rating100")
|
||||
updatedGroup.Director = translator.optionalString(input.Director, "director")
|
||||
|
||||
|
|
@ -246,6 +262,13 @@ func groupPartialFromBulkGroupUpdateInput(translator changesetTranslator, input
|
|||
|
||||
updatedGroup.URLs = translator.optionalURLsBulk(input.Urls, nil)
|
||||
|
||||
if input.CustomFields != nil {
|
||||
updatedGroup.CustomFields = *input.CustomFields
|
||||
// convert json.Numbers to int/float
|
||||
updatedGroup.CustomFields.Full = convertMapJSONNumbers(updatedGroup.CustomFields.Full)
|
||||
updatedGroup.CustomFields.Partial = convertMapJSONNumbers(updatedGroup.CustomFields.Partial)
|
||||
}
|
||||
|
||||
return updatedGroup, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -177,6 +177,13 @@ func (r *mutationResolver) imageUpdate(ctx context.Context, input models.ImageUp
|
|||
return nil, fmt.Errorf("converting tag ids: %w", err)
|
||||
}
|
||||
|
||||
if input.CustomFields != nil {
|
||||
updatedImage.CustomFields = *input.CustomFields
|
||||
// convert json.Numbers to int/float
|
||||
updatedImage.CustomFields.Full = convertMapJSONNumbers(updatedImage.CustomFields.Full)
|
||||
updatedImage.CustomFields.Partial = convertMapJSONNumbers(updatedImage.CustomFields.Partial)
|
||||
}
|
||||
|
||||
qb := r.repository.Image
|
||||
image, err := qb.UpdatePartial(ctx, imageID, updatedImage)
|
||||
if err != nil {
|
||||
|
|
@ -237,6 +244,13 @@ func (r *mutationResolver) BulkImageUpdate(ctx context.Context, input BulkImageU
|
|||
return nil, fmt.Errorf("converting tag ids: %w", err)
|
||||
}
|
||||
|
||||
if input.CustomFields != nil {
|
||||
updatedImage.CustomFields = *input.CustomFields
|
||||
// convert json.Numbers to int/float
|
||||
updatedImage.CustomFields.Full = convertMapJSONNumbers(updatedImage.CustomFields.Full)
|
||||
updatedImage.CustomFields.Partial = convertMapJSONNumbers(updatedImage.CustomFields.Partial)
|
||||
}
|
||||
|
||||
// Start the transaction and save the images
|
||||
if err := r.withTxn(ctx, func(ctx context.Context) error {
|
||||
var updatedGalleryIDs []int
|
||||
|
|
|
|||
|
|
@ -122,9 +122,10 @@ func (r *mutationResolver) MigrateHashNaming(ctx context.Context) (string, error
|
|||
func (r *mutationResolver) BackupDatabase(ctx context.Context, input BackupDatabaseInput) (*string, error) {
|
||||
// if download is true, then backup to temporary file and return a link
|
||||
download := input.Download != nil && *input.Download
|
||||
includeBlobs := input.IncludeBlobs != nil && *input.IncludeBlobs
|
||||
mgr := manager.GetInstance()
|
||||
|
||||
backupPath, backupName, err := mgr.BackupDatabase(download)
|
||||
backupPath, backupName, err := mgr.BackupDatabase(download, includeBlobs)
|
||||
if err != nil {
|
||||
logger.Errorf("Error backing up database: %v", err)
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -47,6 +47,10 @@ func (r *mutationResolver) Migrate(ctx context.Context, input manager.MigrateInp
|
|||
Database: mgr.Database,
|
||||
}
|
||||
|
||||
if err := t.PreExecute(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
jobID := mgr.JobManager.Add(ctx, "Migrating database...", t)
|
||||
|
||||
return strconv.Itoa(jobID), nil
|
||||
|
|
|
|||
|
|
@ -52,7 +52,6 @@ func (r *mutationResolver) PerformerCreate(ctx context.Context, input models.Per
|
|||
newPerformer.FakeTits = translator.string(input.FakeTits)
|
||||
newPerformer.PenisLength = input.PenisLength
|
||||
newPerformer.Circumcised = input.Circumcised
|
||||
newPerformer.CareerLength = translator.string(input.CareerLength)
|
||||
newPerformer.Tattoos = translator.string(input.Tattoos)
|
||||
newPerformer.Piercings = translator.string(input.Piercings)
|
||||
newPerformer.Favorite = translator.bool(input.Favorite)
|
||||
|
|
@ -90,6 +89,25 @@ func (r *mutationResolver) PerformerCreate(ctx context.Context, input models.Per
|
|||
return nil, fmt.Errorf("converting death date: %w", err)
|
||||
}
|
||||
|
||||
newPerformer.CareerStart, err = translator.datePtr(input.CareerStart)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converting career start: %w", err)
|
||||
}
|
||||
newPerformer.CareerEnd, err = translator.datePtr(input.CareerEnd)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converting career end: %w", err)
|
||||
}
|
||||
|
||||
// if career_start/career_end not provided, parse deprecated career_length
|
||||
if newPerformer.CareerStart == nil && newPerformer.CareerEnd == nil && input.CareerLength != nil {
|
||||
start, end, err := models.ParseYearRangeString(*input.CareerLength)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse career_length %q: %w", *input.CareerLength, err)
|
||||
}
|
||||
newPerformer.CareerStart = start
|
||||
newPerformer.CareerEnd = end
|
||||
}
|
||||
|
||||
newPerformer.TagIDs, err = translator.relatedIds(input.TagIds)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converting tag ids: %w", err)
|
||||
|
|
@ -261,7 +279,29 @@ func performerPartialFromInput(input models.PerformerUpdateInput, translator cha
|
|||
updatedPerformer.FakeTits = translator.optionalString(input.FakeTits, "fake_tits")
|
||||
updatedPerformer.PenisLength = translator.optionalFloat64(input.PenisLength, "penis_length")
|
||||
updatedPerformer.Circumcised = translator.optionalString((*string)(input.Circumcised), "circumcised")
|
||||
updatedPerformer.CareerLength = translator.optionalString(input.CareerLength, "career_length")
|
||||
// prefer career_start/career_end over deprecated career_length
|
||||
if translator.hasField("career_start") || translator.hasField("career_end") {
|
||||
var err error
|
||||
updatedPerformer.CareerStart, err = translator.optionalDate(input.CareerStart, "career_start")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converting career start: %w", err)
|
||||
}
|
||||
updatedPerformer.CareerEnd, err = translator.optionalDate(input.CareerEnd, "career_end")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converting career end: %w", err)
|
||||
}
|
||||
} else if translator.hasField("career_length") && input.CareerLength != nil {
|
||||
start, end, err := models.ParseYearRangeString(*input.CareerLength)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse career_length %q: %w", *input.CareerLength, err)
|
||||
}
|
||||
if start != nil {
|
||||
updatedPerformer.CareerStart = models.NewOptionalDate(*start)
|
||||
}
|
||||
if end != nil {
|
||||
updatedPerformer.CareerEnd = models.NewOptionalDate(*end)
|
||||
}
|
||||
}
|
||||
updatedPerformer.Tattoos = translator.optionalString(input.Tattoos, "tattoos")
|
||||
updatedPerformer.Piercings = translator.optionalString(input.Piercings, "piercings")
|
||||
updatedPerformer.Favorite = translator.optionalBool(input.Favorite, "favorite")
|
||||
|
|
@ -417,7 +457,28 @@ func (r *mutationResolver) BulkPerformerUpdate(ctx context.Context, input BulkPe
|
|||
updatedPerformer.FakeTits = translator.optionalString(input.FakeTits, "fake_tits")
|
||||
updatedPerformer.PenisLength = translator.optionalFloat64(input.PenisLength, "penis_length")
|
||||
updatedPerformer.Circumcised = translator.optionalString((*string)(input.Circumcised), "circumcised")
|
||||
updatedPerformer.CareerLength = translator.optionalString(input.CareerLength, "career_length")
|
||||
// prefer career_start/career_end over deprecated career_length
|
||||
if translator.hasField("career_start") || translator.hasField("career_end") {
|
||||
updatedPerformer.CareerStart, err = translator.optionalDate(input.CareerStart, "career_start")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converting career start: %w", err)
|
||||
}
|
||||
updatedPerformer.CareerEnd, err = translator.optionalDate(input.CareerEnd, "career_end")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converting career end: %w", err)
|
||||
}
|
||||
} else if translator.hasField("career_length") && input.CareerLength != nil {
|
||||
start, end, err := models.ParseYearRangeString(*input.CareerLength)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse career_length %q: %w", *input.CareerLength, err)
|
||||
}
|
||||
if start != nil {
|
||||
updatedPerformer.CareerStart = models.NewOptionalDate(*start)
|
||||
}
|
||||
if end != nil {
|
||||
updatedPerformer.CareerEnd = models.NewOptionalDate(*end)
|
||||
}
|
||||
}
|
||||
updatedPerformer.Tattoos = translator.optionalString(input.Tattoos, "tattoos")
|
||||
updatedPerformer.Piercings = translator.optionalString(input.Piercings, "piercings")
|
||||
|
||||
|
|
|
|||
|
|
@ -107,8 +107,15 @@ func (r *mutationResolver) SceneCreate(ctx context.Context, input models.SceneCr
|
|||
}
|
||||
}
|
||||
|
||||
customFields := convertMapJSONNumbers(input.CustomFields)
|
||||
|
||||
if err := r.withTxn(ctx, func(ctx context.Context) error {
|
||||
ret, err = r.Resolver.sceneService.Create(ctx, &newScene, fileIDs, coverImageData)
|
||||
ret, err = r.Resolver.sceneService.Create(ctx, models.CreateSceneInput{
|
||||
Scene: &newScene,
|
||||
FileIDs: fileIDs,
|
||||
CoverImage: coverImageData,
|
||||
CustomFields: customFields,
|
||||
})
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -314,6 +321,15 @@ func (r *mutationResolver) sceneUpdate(ctx context.Context, input models.SceneUp
|
|||
}
|
||||
}
|
||||
|
||||
var customFields *models.CustomFieldsInput
|
||||
if input.CustomFields != nil {
|
||||
cfCopy := *input.CustomFields
|
||||
customFields = &cfCopy
|
||||
// convert json.Numbers to int/float
|
||||
customFields.Full = convertMapJSONNumbers(customFields.Full)
|
||||
customFields.Partial = convertMapJSONNumbers(customFields.Partial)
|
||||
}
|
||||
|
||||
scene, err := qb.UpdatePartial(ctx, sceneID, *updatedScene)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -325,6 +341,12 @@ func (r *mutationResolver) sceneUpdate(ctx context.Context, input models.SceneUp
|
|||
}
|
||||
}
|
||||
|
||||
if customFields != nil {
|
||||
if err := qb.SetCustomFields(ctx, scene.ID, *customFields); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return scene, nil
|
||||
}
|
||||
|
||||
|
|
@ -399,6 +421,12 @@ func (r *mutationResolver) BulkSceneUpdate(ctx context.Context, input BulkSceneU
|
|||
}
|
||||
}
|
||||
|
||||
var customFields *models.CustomFieldsInput
|
||||
if input.CustomFields != nil {
|
||||
cf := handleUpdateCustomFields(*input.CustomFields)
|
||||
customFields = &cf
|
||||
}
|
||||
|
||||
ret := []*models.Scene{}
|
||||
|
||||
// Start the transaction and save the scenes
|
||||
|
|
@ -411,6 +439,12 @@ func (r *mutationResolver) BulkSceneUpdate(ctx context.Context, input BulkSceneU
|
|||
return err
|
||||
}
|
||||
|
||||
if customFields != nil {
|
||||
if err := qb.SetCustomFields(ctx, scene.ID, *customFields); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ret = append(ret, scene)
|
||||
}
|
||||
|
||||
|
|
@ -587,6 +621,7 @@ func (r *mutationResolver) SceneMerge(ctx context.Context, input SceneMergeInput
|
|||
|
||||
var values *models.ScenePartial
|
||||
var coverImageData []byte
|
||||
var customFields *models.CustomFieldsInput
|
||||
|
||||
if input.Values != nil {
|
||||
translator := changesetTranslator{
|
||||
|
|
@ -605,6 +640,11 @@ func (r *mutationResolver) SceneMerge(ctx context.Context, input SceneMergeInput
|
|||
return nil, fmt.Errorf("processing cover image: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if input.Values.CustomFields != nil {
|
||||
cf := handleUpdateCustomFields(*input.Values.CustomFields)
|
||||
customFields = &cf
|
||||
}
|
||||
} else {
|
||||
v := models.NewScenePartial()
|
||||
values = &v
|
||||
|
|
@ -638,7 +678,15 @@ func (r *mutationResolver) SceneMerge(ctx context.Context, input SceneMergeInput
|
|||
|
||||
// only update cover image if one was provided
|
||||
if len(coverImageData) > 0 {
|
||||
return r.sceneUpdateCoverImage(ctx, ret, coverImageData)
|
||||
if err := r.sceneUpdateCoverImage(ctx, ret, coverImageData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if customFields != nil {
|
||||
if err := r.Resolver.repository.Scene.SetCustomFields(ctx, ret.ID, *customFields); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -58,6 +58,16 @@ func (r *mutationResolver) StashBoxBatchStudioTag(ctx context.Context, input man
|
|||
return strconv.Itoa(jobID), nil
|
||||
}
|
||||
|
||||
func (r *mutationResolver) StashBoxBatchTagTag(ctx context.Context, input manager.StashBoxBatchTagInput) (string, error) {
|
||||
b, err := resolveStashBoxBatchTagInput(input.Endpoint, input.StashBoxEndpoint) //nolint:staticcheck
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
jobID := manager.GetInstance().StashBoxBatchTagTag(ctx, b, input)
|
||||
return strconv.Itoa(jobID), nil
|
||||
}
|
||||
|
||||
func (r *mutationResolver) SubmitStashBoxSceneDraft(ctx context.Context, input StashBoxDraftSubmissionInput) (*string, error) {
|
||||
b, err := resolveStashBox(input.StashBoxIndex, input.StashBoxEndpoint)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ func (r *mutationResolver) StudioCreate(ctx context.Context, input models.Studio
|
|||
newStudio.Favorite = translator.bool(input.Favorite)
|
||||
newStudio.Details = translator.string(input.Details)
|
||||
newStudio.IgnoreAutoTag = translator.bool(input.IgnoreAutoTag)
|
||||
newStudio.Organized = translator.bool(input.Organized)
|
||||
newStudio.Aliases = models.NewRelatedStrings(stringslice.UniqueExcludeFold(stringslice.TrimSpace(input.Aliases), newStudio.Name))
|
||||
newStudio.StashIDs = models.NewRelatedStashIDs(models.StashIDInputs(input.StashIds).ToStashIDs())
|
||||
|
||||
|
|
@ -120,6 +121,7 @@ func (r *mutationResolver) StudioUpdate(ctx context.Context, input models.Studio
|
|||
updatedStudio.Rating = translator.optionalInt(input.Rating100, "rating100")
|
||||
updatedStudio.Favorite = translator.optionalBool(input.Favorite, "favorite")
|
||||
updatedStudio.IgnoreAutoTag = translator.optionalBool(input.IgnoreAutoTag, "ignore_auto_tag")
|
||||
updatedStudio.Organized = translator.optionalBool(input.Organized, "organized")
|
||||
updatedStudio.Aliases = translator.updateStrings(input.Aliases, "aliases")
|
||||
updatedStudio.StashIDs = translator.updateStashIDs(input.StashIds, "stash_ids")
|
||||
|
||||
|
|
@ -261,6 +263,7 @@ func (r *mutationResolver) BulkStudioUpdate(ctx context.Context, input BulkStudi
|
|||
partial.Rating = translator.optionalInt(input.Rating100, "rating100")
|
||||
partial.Details = translator.optionalString(input.Details, "details")
|
||||
partial.IgnoreAutoTag = translator.optionalBool(input.IgnoreAutoTag, "ignore_auto_tag")
|
||||
partial.Organized = translator.optionalBool(input.Organized, "organized")
|
||||
|
||||
partial.TagIDs, err = translator.updateIdsBulk(input.TagIds, "tag_ids")
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/plugin/hook"
|
||||
"github.com/stashapp/stash/pkg/sliceutil/stringslice"
|
||||
|
|
@ -103,17 +102,7 @@ func (r *mutationResolver) TagCreate(ctx context.Context, input TagCreateInput)
|
|||
return r.getTag(ctx, newTag.ID)
|
||||
}
|
||||
|
||||
func (r *mutationResolver) TagUpdate(ctx context.Context, input TagUpdateInput) (*models.Tag, error) {
|
||||
tagID, err := strconv.Atoi(input.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converting id: %w", err)
|
||||
}
|
||||
|
||||
translator := changesetTranslator{
|
||||
inputMap: getUpdateInputMap(ctx),
|
||||
}
|
||||
|
||||
// Populate tag from the input
|
||||
func tagPartialFromInput(input TagUpdateInput, translator changesetTranslator) (*models.TagPartial, error) {
|
||||
updatedTag := models.NewTagPartial()
|
||||
|
||||
updatedTag.Name = translator.optionalString(input.Name, "name")
|
||||
|
|
@ -132,6 +121,7 @@ func (r *mutationResolver) TagUpdate(ctx context.Context, input TagUpdateInput)
|
|||
}
|
||||
updatedTag.StashIDs = translator.updateStashIDs(updateStashIDInputs, "stash_ids")
|
||||
|
||||
var err error
|
||||
updatedTag.ParentIDs, err = translator.updateIds(input.ParentIds, "parent_ids")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converting parent tag ids: %w", err)
|
||||
|
|
@ -149,6 +139,25 @@ func (r *mutationResolver) TagUpdate(ctx context.Context, input TagUpdateInput)
|
|||
updatedTag.CustomFields.Partial = convertMapJSONNumbers(updatedTag.CustomFields.Partial)
|
||||
}
|
||||
|
||||
return &updatedTag, nil
|
||||
}
|
||||
|
||||
func (r *mutationResolver) TagUpdate(ctx context.Context, input TagUpdateInput) (*models.Tag, error) {
|
||||
tagID, err := strconv.Atoi(input.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converting id: %w", err)
|
||||
}
|
||||
|
||||
translator := changesetTranslator{
|
||||
inputMap: getUpdateInputMap(ctx),
|
||||
}
|
||||
|
||||
// Populate tag from the input
|
||||
updatedTag, err := tagPartialFromInput(input, translator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var imageData []byte
|
||||
imageIncluded := translator.hasField("image")
|
||||
if input.Image != nil {
|
||||
|
|
@ -185,11 +194,11 @@ func (r *mutationResolver) TagUpdate(ctx context.Context, input TagUpdateInput)
|
|||
}
|
||||
}
|
||||
|
||||
if err := tag.ValidateUpdate(ctx, tagID, updatedTag, qb); err != nil {
|
||||
if err := tag.ValidateUpdate(ctx, tagID, *updatedTag, qb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t, err = qb.UpdatePartial(ctx, tagID, updatedTag)
|
||||
t, err = qb.UpdatePartial(ctx, tagID, *updatedTag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -337,6 +346,31 @@ func (r *mutationResolver) TagsMerge(ctx context.Context, input TagsMergeInput)
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
var values *models.TagPartial
|
||||
var imageData []byte
|
||||
|
||||
if input.Values != nil {
|
||||
translator := changesetTranslator{
|
||||
inputMap: getNamedUpdateInputMap(ctx, "input.values"),
|
||||
}
|
||||
|
||||
values, err = tagPartialFromInput(*input.Values, translator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if input.Values.Image != nil {
|
||||
var err error
|
||||
imageData, err = utils.ProcessImageInput(ctx, *input.Values.Image)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("processing cover image: %w", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
v := models.NewTagPartial()
|
||||
values = &v
|
||||
}
|
||||
|
||||
var t *models.Tag
|
||||
if err := r.withTxn(ctx, func(ctx context.Context) error {
|
||||
qb := r.repository.Tag
|
||||
|
|
@ -351,28 +385,22 @@ func (r *mutationResolver) TagsMerge(ctx context.Context, input TagsMergeInput)
|
|||
return fmt.Errorf("tag with id %d not found", destination)
|
||||
}
|
||||
|
||||
parents, children, err := tag.MergeHierarchy(ctx, destination, source, qb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = qb.Merge(ctx, source, destination); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = qb.UpdateParentTags(ctx, destination, parents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = qb.UpdateChildTags(ctx, destination, children)
|
||||
if err != nil {
|
||||
if err := tag.ValidateUpdate(ctx, destination, *values, qb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tag.ValidateHierarchyExisting(ctx, t, parents, children, qb)
|
||||
if err != nil {
|
||||
logger.Errorf("Error merging tag: %s", err)
|
||||
return err
|
||||
if _, err := qb.UpdatePartial(ctx, destination, *values); err != nil {
|
||||
return fmt.Errorf("updating tag: %w", err)
|
||||
}
|
||||
|
||||
if len(imageData) > 0 {
|
||||
if err := qb.UpdateImage(ctx, destination, imageData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -96,6 +96,11 @@ func makeConfigGeneralResult() *ConfigGeneralResult {
|
|||
CalculateMd5: config.IsCalculateMD5(),
|
||||
VideoFileNamingAlgorithm: config.GetVideoFileNamingAlgorithm(),
|
||||
ParallelTasks: config.GetParallelTasks(),
|
||||
UseCustomSpriteInterval: config.GetUseCustomSpriteInterval(),
|
||||
SpriteInterval: config.GetSpriteInterval(),
|
||||
SpriteScreenshotSize: config.GetSpriteScreenshotSize(),
|
||||
MinimumSprites: config.GetMinimumSprites(),
|
||||
MaximumSprites: config.GetMaximumSprites(),
|
||||
PreviewAudio: config.GetPreviewAudio(),
|
||||
PreviewSegments: config.GetPreviewSegments(),
|
||||
PreviewSegmentDuration: config.GetPreviewSegmentDuration(),
|
||||
|
|
|
|||
|
|
@ -33,15 +33,26 @@ func (r *queryResolver) FindJob(ctx context.Context, input FindJobInput) (*Job,
|
|||
}
|
||||
|
||||
func jobToJobModel(j job.Job) *Job {
|
||||
subTasks := make([]string, len(j.Details))
|
||||
for i, t := range j.Details {
|
||||
subTasks[i] = sanitiseWebsocketString(t)
|
||||
}
|
||||
|
||||
var jobError *string
|
||||
if j.Error != nil {
|
||||
s := sanitiseWebsocketString(*j.Error)
|
||||
jobError = &s
|
||||
}
|
||||
|
||||
ret := &Job{
|
||||
ID: strconv.Itoa(j.ID),
|
||||
Status: JobStatus(j.Status),
|
||||
Description: j.Description,
|
||||
SubTasks: j.Details,
|
||||
Description: sanitiseWebsocketString(j.Description),
|
||||
SubTasks: subTasks,
|
||||
StartTime: j.StartTime,
|
||||
EndTime: j.EndTime,
|
||||
AddTime: j.AddTime,
|
||||
Error: j.Error,
|
||||
Error: jobError,
|
||||
}
|
||||
|
||||
if j.Progress != -1 {
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/stashapp/stash/pkg/match"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
|
|
@ -363,7 +364,8 @@ func (r *queryResolver) ScrapeSingleTag(ctx context.Context, source scraper.Sour
|
|||
client := r.newStashBoxClient(*b)
|
||||
|
||||
var ret []*models.ScrapedTag
|
||||
out, err := client.QueryTag(ctx, *input.Query)
|
||||
query := *input.Query
|
||||
out, err := client.QueryTag(ctx, query)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -383,6 +385,22 @@ func (r *queryResolver) ScrapeSingleTag(ctx context.Context, source scraper.Sour
|
|||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// tag name query returns results that may not match the query exactly.
|
||||
// if there is an exact match, it should be first
|
||||
if query != "" {
|
||||
for i, result := range ret {
|
||||
if strings.EqualFold(result.Name, query) {
|
||||
// prepend exact match to the front of the slice
|
||||
if i != 0 {
|
||||
ret = append([]*models.ScrapedTag{result}, append(ret[:i], ret[i+1:]...)...)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,11 +2,19 @@ package api
|
|||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/stashapp/stash/internal/log"
|
||||
"github.com/stashapp/stash/internal/manager"
|
||||
)
|
||||
|
||||
// sanitiseWebsocketString is used to ensure that any strings sent over the websocket are valid UTF-8.
|
||||
// Any invalid UTF-8 sequences will be replaced with the Unicode replacement character (U+FFFD).
|
||||
// Invalid UTF-8 sequences can cause the websocket connection to be closed.
|
||||
func sanitiseWebsocketString(s string) string {
|
||||
return strings.ToValidUTF8(s, "\uFFFD")
|
||||
}
|
||||
|
||||
func getLogLevel(logType string) LogLevel {
|
||||
switch logType {
|
||||
case "progress":
|
||||
|
|
@ -33,7 +41,7 @@ func logEntriesFromLogItems(logItems []log.LogItem) []*LogEntry {
|
|||
ret[i] = &LogEntry{
|
||||
Time: entry.Time,
|
||||
Level: getLogLevel(entry.Type),
|
||||
Message: entry.Message,
|
||||
Message: sanitiseWebsocketString(entry.Message),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -57,8 +57,20 @@ func (b SceneURLBuilder) GetScreenshotURL() string {
|
|||
return b.BaseURL + "/scene/" + b.SceneID + "/screenshot?t=" + b.UpdatedAt
|
||||
}
|
||||
|
||||
func (b SceneURLBuilder) GetFunscriptURL() string {
|
||||
return b.BaseURL + "/scene/" + b.SceneID + "/funscript"
|
||||
func (b SceneURLBuilder) GetFunscriptURL(apiKey string) *url.URL {
|
||||
u, err := url.Parse(fmt.Sprintf("%s/scene/%s/funscript", b.BaseURL, b.SceneID))
|
||||
if err != nil {
|
||||
// shouldn't happen
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if apiKey != "" {
|
||||
v := u.Query()
|
||||
v.Set("apikey", apiKey)
|
||||
u.RawQuery = v.Encode()
|
||||
}
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
func (b SceneURLBuilder) GetCaptionURL() string {
|
||||
|
|
|
|||
|
|
@ -365,7 +365,10 @@ func makeImage(expectedResult bool) *models.Image {
|
|||
}
|
||||
|
||||
func createImage(ctx context.Context, w models.ImageWriter, o *models.Image, f *models.ImageFile) error {
|
||||
err := w.Create(ctx, o, []models.FileID{f.ID})
|
||||
err := w.Create(ctx, &models.CreateImageInput{
|
||||
Image: o,
|
||||
FileIDs: []models.FileID{f.ID},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create image with path '%s': %s", f.Path, err.Error())
|
||||
|
|
@ -468,7 +471,10 @@ func makeGallery(expectedResult bool) *models.Gallery {
|
|||
}
|
||||
|
||||
func createGallery(ctx context.Context, w models.GalleryWriter, o *models.Gallery, f *models.BaseFile) error {
|
||||
err := w.Create(ctx, o, []models.FileID{f.ID})
|
||||
err := w.Create(ctx, &models.CreateGalleryInput{
|
||||
Gallery: o,
|
||||
FileIDs: []models.FileID{f.ID},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create gallery with path '%s': %s", f.Path, err.Error())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
package desktop
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
|
@ -155,15 +156,17 @@ func getIconPath() string {
|
|||
return path.Join(config.GetInstance().GetConfigPath(), "icon.png")
|
||||
}
|
||||
|
||||
func RevealInFileManager(path string) {
|
||||
exists, err := fsutil.FileExists(path)
|
||||
func RevealInFileManager(path string) error {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
logger.Errorf("Error checking file: %s", err)
|
||||
return
|
||||
return fmt.Errorf("error checking path: %w", err)
|
||||
}
|
||||
if exists && IsDesktop() {
|
||||
revealInFileManager(path)
|
||||
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting absolute path: %w", err)
|
||||
}
|
||||
return revealInFileManager(absPath, info)
|
||||
}
|
||||
|
||||
func getServerURL(path string) string {
|
||||
|
|
|
|||
|
|
@ -4,9 +4,11 @@
|
|||
package desktop
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/kermieisinthehouse/gosx-notifier"
|
||||
gosxnotifier "github.com/feederbox826/gosx-notifier"
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
)
|
||||
|
||||
|
|
@ -32,8 +34,11 @@ func sendNotification(notificationTitle string, notificationText string) {
|
|||
}
|
||||
}
|
||||
|
||||
func revealInFileManager(path string) {
|
||||
exec.Command(`open`, `-R`, path)
|
||||
func revealInFileManager(path string, _ os.FileInfo) error {
|
||||
if err := exec.Command(`open`, `-R`, path).Run(); err != nil {
|
||||
return fmt.Errorf("error revealing path in Finder: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isDoubleClickLaunched() bool {
|
||||
|
|
|
|||
|
|
@ -4,8 +4,10 @@
|
|||
package desktop
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
|
|
@ -33,8 +35,15 @@ func sendNotification(notificationTitle string, notificationText string) {
|
|||
}
|
||||
}
|
||||
|
||||
func revealInFileManager(path string) {
|
||||
|
||||
func revealInFileManager(path string, info os.FileInfo) error {
|
||||
dir := path
|
||||
if !info.IsDir() {
|
||||
dir = filepath.Dir(path)
|
||||
}
|
||||
if err := exec.Command("xdg-open", dir).Run(); err != nil {
|
||||
return fmt.Errorf("error opening directory in file manager: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isDoubleClickLaunched() bool {
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
package desktop
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
|
@ -83,6 +84,10 @@ func sendNotification(notificationTitle string, notificationText string) {
|
|||
}
|
||||
}
|
||||
|
||||
func revealInFileManager(path string) {
|
||||
exec.Command(`explorer`, `\select`, path)
|
||||
func revealInFileManager(path string, _ os.FileInfo) error {
|
||||
c := exec.Command(`explorer`, `/select,`, path)
|
||||
logger.Debugf("Running: %s", c.String())
|
||||
// explorer seems to return an error code even when it works, so ignore the error
|
||||
_ = c.Run()
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -147,6 +147,9 @@ func (t *SceneIdentifier) getOptions(source ScraperSource) MetadataOptions {
|
|||
if source.Options.IncludeMalePerformers != nil {
|
||||
options.IncludeMalePerformers = source.Options.IncludeMalePerformers
|
||||
}
|
||||
if source.Options.PerformerGenders != nil {
|
||||
options.PerformerGenders = source.Options.PerformerGenders
|
||||
}
|
||||
if source.Options.SkipMultipleMatches != nil {
|
||||
options.SkipMultipleMatches = source.Options.SkipMultipleMatches
|
||||
}
|
||||
|
|
@ -204,13 +207,23 @@ func (t *SceneIdentifier) getSceneUpdater(ctx context.Context, s *models.Scene,
|
|||
ret.Partial.StudioID = models.NewOptionalInt(*studioID)
|
||||
}
|
||||
|
||||
includeMalePerformers := true
|
||||
if options.IncludeMalePerformers != nil {
|
||||
includeMalePerformers = *options.IncludeMalePerformers
|
||||
// Determine allowed genders for performer filtering
|
||||
var allowedGenders []models.GenderEnum
|
||||
if options.PerformerGenders != nil {
|
||||
// New field takes precedence
|
||||
allowedGenders = options.PerformerGenders
|
||||
} else if options.IncludeMalePerformers != nil && !*options.IncludeMalePerformers {
|
||||
// Legacy: if includeMalePerformers is false, include all genders except male
|
||||
for _, g := range models.AllGenderEnum {
|
||||
if g != models.GenderEnumMale {
|
||||
allowedGenders = append(allowedGenders, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
// nil allowedGenders means include all performers
|
||||
|
||||
addSkipSingleNamePerformerTag := false
|
||||
performerIDs, err := rel.performers(ctx, !includeMalePerformers)
|
||||
performerIDs, err := rel.performers(ctx, allowedGenders)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrSkipSingleNamePerformer) {
|
||||
addSkipSingleNamePerformerTag = true
|
||||
|
|
|
|||
|
|
@ -60,9 +60,15 @@ func TestSceneIdentifier_Identify(t *testing.T) {
|
|||
)
|
||||
|
||||
defaultOptions := &MetadataOptions{
|
||||
SetOrganized: &boolFalse,
|
||||
SetCoverImage: &boolFalse,
|
||||
IncludeMalePerformers: &boolFalse,
|
||||
SetOrganized: &boolFalse,
|
||||
SetCoverImage: &boolFalse,
|
||||
PerformerGenders: []models.GenderEnum{
|
||||
models.GenderEnumFemale,
|
||||
models.GenderEnumTransgenderFemale,
|
||||
models.GenderEnumTransgenderMale,
|
||||
models.GenderEnumIntersex,
|
||||
models.GenderEnumNonBinary,
|
||||
},
|
||||
SkipSingleNamePerformers: &boolFalse,
|
||||
}
|
||||
sources := []ScraperSource{
|
||||
|
|
@ -216,9 +222,15 @@ func TestSceneIdentifier_modifyScene(t *testing.T) {
|
|||
|
||||
boolFalse := false
|
||||
defaultOptions := &MetadataOptions{
|
||||
SetOrganized: &boolFalse,
|
||||
SetCoverImage: &boolFalse,
|
||||
IncludeMalePerformers: &boolFalse,
|
||||
SetOrganized: &boolFalse,
|
||||
SetCoverImage: &boolFalse,
|
||||
PerformerGenders: []models.GenderEnum{
|
||||
models.GenderEnumFemale,
|
||||
models.GenderEnumTransgenderFemale,
|
||||
models.GenderEnumTransgenderMale,
|
||||
models.GenderEnumIntersex,
|
||||
models.GenderEnumNonBinary,
|
||||
},
|
||||
SkipSingleNamePerformers: &boolFalse,
|
||||
}
|
||||
tr := &SceneIdentifier{
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"io"
|
||||
"strconv"
|
||||
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/scraper"
|
||||
)
|
||||
|
||||
|
|
@ -32,7 +33,10 @@ type MetadataOptions struct {
|
|||
SetCoverImage *bool `json:"setCoverImage"`
|
||||
SetOrganized *bool `json:"setOrganized"`
|
||||
// defaults to true if not provided
|
||||
// Deprecated: use PerformerGenders instead
|
||||
IncludeMalePerformers *bool `json:"includeMalePerformers"`
|
||||
// Filter to only include performers with these genders. If not provided, all genders are included.
|
||||
PerformerGenders []models.GenderEnum `json:"performerGenders"`
|
||||
// defaults to true if not provided
|
||||
SkipMultipleMatches *bool `json:"skipMultipleMatches"`
|
||||
// ID of tag to tag skipped multiple matches with
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -69,7 +70,7 @@ func (g sceneRelationships) studio(ctx context.Context) (*int, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (g sceneRelationships) performers(ctx context.Context, ignoreMale bool) ([]int, error) {
|
||||
func (g sceneRelationships) performers(ctx context.Context, allowedGenders []models.GenderEnum) ([]int, error) {
|
||||
fieldStrategy := g.fieldOptions["performers"]
|
||||
scraped := g.result.result.Performers
|
||||
|
||||
|
|
@ -97,8 +98,11 @@ func (g sceneRelationships) performers(ctx context.Context, ignoreMale bool) ([]
|
|||
singleNamePerformerSkipped := false
|
||||
|
||||
for _, p := range scraped {
|
||||
if ignoreMale && p.Gender != nil && strings.EqualFold(*p.Gender, models.GenderEnumMale.String()) {
|
||||
continue
|
||||
if allowedGenders != nil && p.Gender != nil {
|
||||
gender := models.GenderEnum(strings.ToUpper(*p.Gender))
|
||||
if !slices.Contains(allowedGenders, gender) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
performerID, err := getPerformerID(ctx, endpoint, g.performerCreator, p, createMissing, g.skipSingleNamePerformers)
|
||||
|
|
|
|||
|
|
@ -183,13 +183,13 @@ func Test_sceneRelationships_performers(t *testing.T) {
|
|||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
scene *models.Scene
|
||||
fieldOptions *FieldOptions
|
||||
scraped []*models.ScrapedPerformer
|
||||
ignoreMale bool
|
||||
want []int
|
||||
wantErr bool
|
||||
name string
|
||||
scene *models.Scene
|
||||
fieldOptions *FieldOptions
|
||||
scraped []*models.ScrapedPerformer
|
||||
allowedGenders []models.GenderEnum
|
||||
want []int
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
"ignore",
|
||||
|
|
@ -202,7 +202,7 @@ func Test_sceneRelationships_performers(t *testing.T) {
|
|||
StoredID: &validStoredID,
|
||||
},
|
||||
},
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
|
|
@ -211,7 +211,7 @@ func Test_sceneRelationships_performers(t *testing.T) {
|
|||
emptyScene,
|
||||
defaultOptions,
|
||||
[]*models.ScrapedPerformer{},
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
|
|
@ -225,7 +225,7 @@ func Test_sceneRelationships_performers(t *testing.T) {
|
|||
StoredID: &existingPerformerStr,
|
||||
},
|
||||
},
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
|
|
@ -239,7 +239,7 @@ func Test_sceneRelationships_performers(t *testing.T) {
|
|||
StoredID: &validStoredID,
|
||||
},
|
||||
},
|
||||
false,
|
||||
nil,
|
||||
[]int{existingPerformerID, validStoredIDInt},
|
||||
false,
|
||||
},
|
||||
|
|
@ -254,7 +254,7 @@ func Test_sceneRelationships_performers(t *testing.T) {
|
|||
Gender: &male,
|
||||
},
|
||||
},
|
||||
true,
|
||||
[]models.GenderEnum{models.GenderEnumFemale, models.GenderEnumTransgenderMale, models.GenderEnumTransgenderFemale, models.GenderEnumIntersex, models.GenderEnumNonBinary},
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
|
|
@ -270,7 +270,7 @@ func Test_sceneRelationships_performers(t *testing.T) {
|
|||
StoredID: &validStoredID,
|
||||
},
|
||||
},
|
||||
false,
|
||||
nil,
|
||||
[]int{validStoredIDInt},
|
||||
false,
|
||||
},
|
||||
|
|
@ -287,7 +287,7 @@ func Test_sceneRelationships_performers(t *testing.T) {
|
|||
Gender: &female,
|
||||
},
|
||||
},
|
||||
true,
|
||||
[]models.GenderEnum{models.GenderEnumFemale, models.GenderEnumTransgenderMale, models.GenderEnumTransgenderFemale, models.GenderEnumIntersex, models.GenderEnumNonBinary},
|
||||
[]int{validStoredIDInt},
|
||||
false,
|
||||
},
|
||||
|
|
@ -304,7 +304,7 @@ func Test_sceneRelationships_performers(t *testing.T) {
|
|||
StoredID: &invalidStoredID,
|
||||
},
|
||||
},
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
|
|
@ -319,7 +319,7 @@ func Test_sceneRelationships_performers(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
got, err := tr.performers(testCtx, tt.ignoreMale)
|
||||
got, err := tr.performers(testCtx, tt.allowedGenders)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("sceneRelationships.performers() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
|
|
|
|||
185
internal/manager/backup.go
Normal file
|
|
@ -0,0 +1,185 @@
|
|||
package manager
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/stashapp/stash/internal/manager/config"
|
||||
"github.com/stashapp/stash/pkg/fsutil"
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
)
|
||||
|
||||
type databaseBackupZip struct {
|
||||
*zip.Writer
|
||||
}
|
||||
|
||||
func (z *databaseBackupZip) zipFileRename(fn, outDir, outFn string) error {
|
||||
p := filepath.Join(outDir, outFn)
|
||||
p = filepath.ToSlash(p)
|
||||
|
||||
f, err := z.Create(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating zip entry for %s: %v", fn, err)
|
||||
}
|
||||
|
||||
i, err := os.Open(fn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening %s: %v", fn, err)
|
||||
}
|
||||
|
||||
defer i.Close()
|
||||
|
||||
if _, err := io.Copy(f, i); err != nil {
|
||||
return fmt.Errorf("error writing %s to zip: %v", fn, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *databaseBackupZip) zipFile(fn, outDir string) error {
|
||||
return z.zipFileRename(fn, outDir, filepath.Base(fn))
|
||||
}
|
||||
|
||||
func (s *Manager) BackupDatabase(download bool, includeBlobs bool) (string, string, error) {
|
||||
var backupPath string
|
||||
var backupName string
|
||||
|
||||
// if we include blobs, then the output is a zip file
|
||||
// if not, using the same backup logic as before, which creates a sqlite file
|
||||
if !includeBlobs || s.Config.GetBlobsStorage() != config.BlobStorageTypeFilesystem {
|
||||
return s.backupDatabaseOnly(download)
|
||||
}
|
||||
|
||||
// use tmp directory for the backup
|
||||
backupDir := s.Paths.Generated.Tmp
|
||||
if err := fsutil.EnsureDir(backupDir); err != nil {
|
||||
return "", "", fmt.Errorf("could not create backup directory %v: %w", backupDir, err)
|
||||
}
|
||||
f, err := os.CreateTemp(backupDir, "backup*.sqlite")
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
backupPath = f.Name()
|
||||
backupName = s.Database.DatabaseBackupPath("")
|
||||
f.Close()
|
||||
|
||||
// delete the temp file so that the backup operation can create it
|
||||
if err := os.Remove(backupPath); err != nil {
|
||||
return "", "", fmt.Errorf("could not remove temporary backup file %v: %w", backupPath, err)
|
||||
}
|
||||
|
||||
if err := s.Database.Backup(backupPath); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// create a zip file
|
||||
zipFileDir := s.Paths.Generated.Downloads
|
||||
if !download {
|
||||
zipFileDir = s.Config.GetBackupDirectoryPathOrDefault()
|
||||
if zipFileDir != "" {
|
||||
if err := fsutil.EnsureDir(zipFileDir); err != nil {
|
||||
return "", "", fmt.Errorf("could not create backup directory %v: %w", zipFileDir, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
zipFileName := backupName + ".zip"
|
||||
zipFilePath := filepath.Join(zipFileDir, zipFileName)
|
||||
|
||||
logger.Debugf("Preparing zip file for database backup at %v", zipFilePath)
|
||||
|
||||
zf, err := os.Create(zipFilePath)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not create zip file %v: %w", zipFilePath, err)
|
||||
}
|
||||
defer zf.Close()
|
||||
|
||||
z := databaseBackupZip{
|
||||
Writer: zip.NewWriter(zf),
|
||||
}
|
||||
|
||||
defer z.Close()
|
||||
|
||||
// move the database file into the zip
|
||||
dbFn := filepath.Base(s.Config.GetDatabasePath())
|
||||
if err := z.zipFileRename(backupPath, "", dbFn); err != nil {
|
||||
return "", "", fmt.Errorf("could not add database backup to zip file: %w", err)
|
||||
}
|
||||
|
||||
if err := os.Remove(backupPath); err != nil {
|
||||
return "", "", fmt.Errorf("could not remove temporary backup file %v: %w", backupPath, err)
|
||||
}
|
||||
|
||||
// walk the blobs directory and add files to the zip
|
||||
blobsDir := s.Config.GetBlobsPath()
|
||||
err = filepath.WalkDir(blobsDir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// calculate out dir by removing the blobsDir prefix from the path
|
||||
outDir := filepath.Join("blobs", strings.TrimPrefix(filepath.Dir(path), blobsDir))
|
||||
if err := z.zipFile(path, outDir); err != nil {
|
||||
return fmt.Errorf("could not add blob %v to zip file: %w", path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("error walking blobs directory: %w", err)
|
||||
}
|
||||
|
||||
return zipFilePath, zipFileName, nil
|
||||
}
|
||||
|
||||
func (s *Manager) backupDatabaseOnly(download bool) (string, string, error) {
|
||||
var backupPath string
|
||||
var backupName string
|
||||
|
||||
if download {
|
||||
backupDir := s.Paths.Generated.Downloads
|
||||
if err := fsutil.EnsureDir(backupDir); err != nil {
|
||||
return "", "", fmt.Errorf("could not create backup directory %v: %w", backupDir, err)
|
||||
}
|
||||
f, err := os.CreateTemp(backupDir, "backup*.sqlite")
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
backupPath = f.Name()
|
||||
backupName = s.Database.DatabaseBackupPath("")
|
||||
f.Close()
|
||||
|
||||
// delete the temp file so that the backup operation can create it
|
||||
if err := os.Remove(backupPath); err != nil {
|
||||
return "", "", fmt.Errorf("could not remove temporary backup file %v: %w", backupPath, err)
|
||||
}
|
||||
} else {
|
||||
backupDir := s.Config.GetBackupDirectoryPathOrDefault()
|
||||
if backupDir != "" {
|
||||
if err := fsutil.EnsureDir(backupDir); err != nil {
|
||||
return "", "", fmt.Errorf("could not create backup directory %v: %w", backupDir, err)
|
||||
}
|
||||
}
|
||||
backupPath = s.Database.DatabaseBackupPath(backupDir)
|
||||
backupName = filepath.Base(backupPath)
|
||||
}
|
||||
|
||||
err := s.Database.Backup(backupPath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return backupPath, backupName, nil
|
||||
}
|
||||
|
|
@ -83,6 +83,21 @@ const (
|
|||
ParallelTasks = "parallel_tasks"
|
||||
parallelTasksDefault = 1
|
||||
|
||||
UseCustomSpriteInterval = "use_custom_sprite_interval"
|
||||
UseCustomSpriteIntervalDefault = false
|
||||
|
||||
SpriteInterval = "sprite_interval"
|
||||
SpriteIntervalDefault = 30
|
||||
|
||||
MinimumSprites = "minimum_sprites"
|
||||
MinimumSpritesDefault = 10
|
||||
|
||||
MaximumSprites = "maximum_sprites"
|
||||
MaximumSpritesDefault = 500
|
||||
|
||||
SpriteScreenshotSize = "sprite_screenshot_width"
|
||||
spriteScreenshotSizeDefault = 160
|
||||
|
||||
PreviewPreset = "preview_preset"
|
||||
TranscodeHardwareAcceleration = "ffmpeg.hardware_acceleration"
|
||||
|
||||
|
|
@ -975,6 +990,50 @@ func (i *Config) GetParallelTasksWithAutoDetection() int {
|
|||
return parallelTasks
|
||||
}
|
||||
|
||||
// GetUseCustomSpriteInterval returns true if the sprite minimum, maximum, and interval settings
|
||||
// should be used instead of the default
|
||||
func (i *Config) GetUseCustomSpriteInterval() bool {
|
||||
value := i.getBool(UseCustomSpriteInterval)
|
||||
return value
|
||||
}
|
||||
|
||||
// GetSpriteInterval returns the time (in seconds) to be between each scrubber sprite
|
||||
// A value of 0 indicates that the sprite interval should be automatically determined
|
||||
// based on the minimum sprite setting.
|
||||
func (i *Config) GetSpriteInterval() float64 {
|
||||
value := i.getFloat64(SpriteInterval)
|
||||
return value
|
||||
}
|
||||
|
||||
// GetMinimumSprites returns the minimum number of sprites that have to be generated
|
||||
// A value of 0 will be overridden with the default of 10.
|
||||
func (i *Config) GetMinimumSprites() int {
|
||||
value := i.getInt(MinimumSprites)
|
||||
if value <= 0 {
|
||||
return MinimumSpritesDefault
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// GetMaximumSprites returns the maximum number of sprites that can be generated
|
||||
// A value of 0 indicates no maximum.
|
||||
func (i *Config) GetMaximumSprites() int {
|
||||
value := i.getInt(MaximumSprites)
|
||||
return value
|
||||
}
|
||||
|
||||
// GetSpriteScreenshotSize returns the required size of the screenshots to be taken
|
||||
// during sprite generation in pixels. This will be the width for landscape scenes
|
||||
// and the height for portrait scenes, with the other dimension being scaled to maintain
|
||||
// the aspect ratio. If the value is less than or equal to 0, the default will be used.
|
||||
func (i *Config) GetSpriteScreenshotSize() int {
|
||||
value := i.getInt(SpriteScreenshotSize)
|
||||
if value <= 0 {
|
||||
return spriteScreenshotSizeDefault
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (i *Config) GetPreviewAudio() bool {
|
||||
return i.getBool(PreviewAudio)
|
||||
}
|
||||
|
|
@ -1861,6 +1920,12 @@ func (i *Config) setDefaultValues() {
|
|||
i.setDefault(PreviewAudio, previewAudioDefault)
|
||||
i.setDefault(SoundOnPreview, false)
|
||||
|
||||
i.setDefault(UseCustomSpriteInterval, UseCustomSpriteIntervalDefault)
|
||||
i.setDefault(SpriteInterval, SpriteIntervalDefault)
|
||||
i.setDefault(MinimumSprites, MinimumSpritesDefault)
|
||||
i.setDefault(MaximumSprites, MaximumSpritesDefault)
|
||||
i.setDefault(SpriteScreenshotSize, spriteScreenshotSizeDefault)
|
||||
|
||||
i.setDefault(ThemeColor, DefaultThemeColor)
|
||||
|
||||
i.setDefault(WriteImageThumbnails, writeImageThumbnailsDefault)
|
||||
|
|
|
|||
|
|
@ -38,3 +38,12 @@ func (s StashConfigs) GetStashFromDirPath(dirPath string) *StashConfig {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s StashConfigs) Paths() []string {
|
||||
paths := make([]string, len(s))
|
||||
for i, c := range s {
|
||||
// #6618 - clean the path to ensure comparison works correctly
|
||||
paths[i] = filepath.Clean(c.Path)
|
||||
}
|
||||
return paths
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,8 +21,7 @@ type SpriteGenerator struct {
|
|||
VideoChecksum string
|
||||
ImageOutputPath string
|
||||
VTTOutputPath string
|
||||
Rows int
|
||||
Columns int
|
||||
Config SpriteGeneratorConfig
|
||||
SlowSeek bool // use alternate seek function, very slow!
|
||||
|
||||
Overwrite bool
|
||||
|
|
@ -30,13 +29,81 @@ type SpriteGenerator struct {
|
|||
g *generate.Generator
|
||||
}
|
||||
|
||||
func NewSpriteGenerator(videoFile ffmpeg.VideoFile, videoChecksum string, imageOutputPath string, vttOutputPath string, rows int, cols int) (*SpriteGenerator, error) {
|
||||
// SpriteGeneratorConfig holds configuration for the SpriteGenerator
|
||||
type SpriteGeneratorConfig struct {
|
||||
// MinimumSprites is the minimum number of sprites to generate, even if the video duration is short
|
||||
// SpriteInterval will be adjusted accordingly to ensure at least this many sprites are generated.
|
||||
// A value of 0 means no minimum, and the generator will use the provided SpriteInterval or
|
||||
// calculate it based on the video duration and MaximumSprites
|
||||
MinimumSprites int
|
||||
|
||||
// MaximumSprites is the maximum number of sprites to generate, even if the video duration is long
|
||||
// SpriteInterval will be adjusted accordingly to ensure no more than this many sprites are generated
|
||||
// A value of 0 means no maximum, and the generator will use the provided SpriteInterval or
|
||||
// calculate it based on the video duration and MinimumSprites
|
||||
MaximumSprites int
|
||||
|
||||
// SpriteInterval is the default interval in seconds between each sprite.
|
||||
// If MinimumSprites or MaximumSprites are set, this value will be adjusted accordingly
|
||||
// to ensure the desired number of sprites are generated
|
||||
// A value of 0 means the generator will calculate the interval based on the video duration and
|
||||
// the provided MinimumSprites and MaximumSprites
|
||||
SpriteInterval float64
|
||||
|
||||
// SpriteSize is the size in pixels of the longest dimension of each sprite image.
|
||||
// The other dimension will be automatically calculated to maintain the aspect ratio of the video
|
||||
SpriteSize int
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultSpriteAmount is the default number of sprites to generate if no configuration is provided
|
||||
// This corresponds to the legacy behavior of the generator, which generates 81 sprites at equal
|
||||
// intervals across the video duration
|
||||
DefaultSpriteAmount = 81
|
||||
|
||||
// DefaultSpriteSize is the default size in pixels of the longest dimension of each sprite image
|
||||
// if no configuration is provided. This corresponds to the legacy behavior of the generator.
|
||||
DefaultSpriteSize = 160
|
||||
)
|
||||
|
||||
var DefaultSpriteGeneratorConfig = SpriteGeneratorConfig{
|
||||
MinimumSprites: DefaultSpriteAmount,
|
||||
MaximumSprites: DefaultSpriteAmount,
|
||||
SpriteInterval: 0,
|
||||
SpriteSize: DefaultSpriteSize,
|
||||
}
|
||||
|
||||
// NewSpriteGenerator creates a new SpriteGenerator for the given video file and configuration
|
||||
// It calculates the appropriate sprite interval and count based on the video duration and the provided configuration
|
||||
func NewSpriteGenerator(videoFile ffmpeg.VideoFile, videoChecksum string, imageOutputPath string, vttOutputPath string, config SpriteGeneratorConfig) (*SpriteGenerator, error) {
|
||||
exists, err := fsutil.FileExists(videoFile.Path)
|
||||
if !exists {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if videoFile.VideoStreamDuration <= 0 {
|
||||
s := fmt.Sprintf("video %s: duration(%.3f)/frame count(%d) invalid, skipping sprite creation", videoFile.Path, videoFile.VideoStreamDuration, videoFile.FrameCount)
|
||||
return nil, errors.New(s)
|
||||
}
|
||||
|
||||
config.SpriteInterval = calculateSpriteInterval(videoFile, config)
|
||||
chunkCount := int(math.Ceil(videoFile.VideoStreamDuration / config.SpriteInterval))
|
||||
|
||||
// adjust the chunk count to the next highest perfect square, to ensure the sprite image
|
||||
// is completely filled (no empty space in the grid) and the grid is as square as possible (minimizing the number of rows/columns)
|
||||
gridSize := generate.GetSpriteGridSize(chunkCount)
|
||||
newChunkCount := gridSize * gridSize
|
||||
|
||||
if newChunkCount != chunkCount {
|
||||
logger.Debugf("[generator] adjusting chunk count from %d to %d to fit a %dx%d grid", chunkCount, newChunkCount, gridSize, gridSize)
|
||||
chunkCount = newChunkCount
|
||||
}
|
||||
|
||||
if config.SpriteSize <= 0 {
|
||||
config.SpriteSize = DefaultSpriteSize
|
||||
}
|
||||
|
||||
slowSeek := false
|
||||
chunkCount := rows * cols
|
||||
|
||||
// For files with small duration / low frame count try to seek using frame number intead of seconds
|
||||
if videoFile.VideoStreamDuration < 5 || (0 < videoFile.FrameCount && videoFile.FrameCount <= int64(chunkCount)) { // some files can have FrameCount == 0, only use SlowSeek if duration < 5
|
||||
|
|
@ -71,9 +138,8 @@ func NewSpriteGenerator(videoFile ffmpeg.VideoFile, videoChecksum string, imageO
|
|||
VideoChecksum: videoChecksum,
|
||||
ImageOutputPath: imageOutputPath,
|
||||
VTTOutputPath: vttOutputPath,
|
||||
Rows: rows,
|
||||
Config: config,
|
||||
SlowSeek: slowSeek,
|
||||
Columns: cols,
|
||||
g: &generate.Generator{
|
||||
Encoder: instance.FFMpeg,
|
||||
FFMpegConfig: instance.Config,
|
||||
|
|
@ -83,6 +149,40 @@ func NewSpriteGenerator(videoFile ffmpeg.VideoFile, videoChecksum string, imageO
|
|||
}, nil
|
||||
}
|
||||
|
||||
func calculateSpriteInterval(videoFile ffmpeg.VideoFile, config SpriteGeneratorConfig) float64 {
|
||||
// If a custom sprite interval is provided, start with that
|
||||
spriteInterval := config.SpriteInterval
|
||||
|
||||
// If no custom interval is provided, calculate the interval based on the
|
||||
// video duration and minimum sprite count
|
||||
if spriteInterval <= 0 {
|
||||
minSprites := config.MinimumSprites
|
||||
if minSprites <= 0 {
|
||||
panic("invalid configuration: MinimumSprites must be greater than 0 if SpriteInterval is not set")
|
||||
}
|
||||
|
||||
logger.Debugf("[generator] calculating sprite interval for video duration %.3fs with minimum sprites %d", videoFile.VideoStreamDuration, minSprites)
|
||||
return videoFile.VideoStreamDuration / float64(minSprites)
|
||||
}
|
||||
|
||||
// Calculate the number of sprites that would be generated with the provided interval
|
||||
spriteCount := int(math.Ceil(videoFile.VideoStreamDuration / spriteInterval))
|
||||
|
||||
// If the calculated sprite count is greater than the maximum, adjust the interval to meet the maximum
|
||||
if config.MaximumSprites > 0 && spriteCount > int(config.MaximumSprites) {
|
||||
spriteInterval = videoFile.VideoStreamDuration / float64(config.MaximumSprites)
|
||||
logger.Debugf("[generator] provided sprite interval %.1fs results in %d sprites, which exceeds the maximum of %d, adjusting interval to %.1fs", config.SpriteInterval, spriteCount, config.MaximumSprites, spriteInterval)
|
||||
}
|
||||
|
||||
// If the calculated sprite count is less than the minimum, adjust the interval to meet the minimum
|
||||
if config.MinimumSprites > 0 && spriteCount < int(config.MinimumSprites) {
|
||||
spriteInterval = videoFile.VideoStreamDuration / float64(config.MinimumSprites)
|
||||
logger.Debugf("[generator] provided sprite interval %.1fs results in %d sprites, which is less than the minimum of %d, adjusting interval to %.1fs", config.SpriteInterval, spriteCount, config.MinimumSprites, spriteInterval)
|
||||
}
|
||||
|
||||
return spriteInterval
|
||||
}
|
||||
|
||||
func (g *SpriteGenerator) Generate() error {
|
||||
if err := g.generateSpriteImage(); err != nil {
|
||||
return err
|
||||
|
|
@ -100,6 +200,8 @@ func (g *SpriteGenerator) generateSpriteImage() error {
|
|||
|
||||
var images []image.Image
|
||||
|
||||
isPortrait := g.Info.VideoFile.Height > g.Info.VideoFile.Width
|
||||
|
||||
if !g.SlowSeek {
|
||||
logger.Infof("[generator] generating sprite image for %s", g.Info.VideoFile.Path)
|
||||
// generate `ChunkCount` thumbnails
|
||||
|
|
@ -107,8 +209,7 @@ func (g *SpriteGenerator) generateSpriteImage() error {
|
|||
|
||||
for i := 0; i < g.Info.ChunkCount; i++ {
|
||||
time := float64(i) * stepSize
|
||||
|
||||
img, err := g.g.SpriteScreenshot(context.TODO(), g.Info.VideoFile.Path, time)
|
||||
img, err := g.g.SpriteScreenshot(context.TODO(), g.Info.VideoFile.Path, time, g.Config.SpriteSize, isPortrait)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -126,7 +227,7 @@ func (g *SpriteGenerator) generateSpriteImage() error {
|
|||
return errors.New("invalid frame number conversion")
|
||||
}
|
||||
|
||||
img, err := g.g.SpriteScreenshotSlow(context.TODO(), g.Info.VideoFile.Path, int(frame))
|
||||
img, err := g.g.SpriteScreenshotSlow(context.TODO(), g.Info.VideoFile.Path, int(frame), g.Config.SpriteSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -158,7 +259,7 @@ func (g *SpriteGenerator) generateSpriteVTT() error {
|
|||
stepSize /= g.Info.FrameRate
|
||||
}
|
||||
|
||||
return g.g.SpriteVTT(context.TODO(), g.VTTOutputPath, g.ImageOutputPath, stepSize)
|
||||
return g.g.SpriteVTT(context.TODO(), g.VTTOutputPath, g.ImageOutputPath, stepSize, g.Info.ChunkCount)
|
||||
}
|
||||
|
||||
func (g *SpriteGenerator) imageExists() bool {
|
||||
|
|
|
|||
|
|
@ -313,46 +313,6 @@ func (s *Manager) validateFFmpeg() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Manager) BackupDatabase(download bool) (string, string, error) {
|
||||
var backupPath string
|
||||
var backupName string
|
||||
if download {
|
||||
backupDir := s.Paths.Generated.Downloads
|
||||
if err := fsutil.EnsureDir(backupDir); err != nil {
|
||||
return "", "", fmt.Errorf("could not create backup directory %v: %w", backupDir, err)
|
||||
}
|
||||
f, err := os.CreateTemp(backupDir, "backup*.sqlite")
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
backupPath = f.Name()
|
||||
backupName = s.Database.DatabaseBackupPath("")
|
||||
f.Close()
|
||||
|
||||
// delete the temp file so that the backup operation can create it
|
||||
if err := os.Remove(backupPath); err != nil {
|
||||
return "", "", fmt.Errorf("could not remove temporary backup file %v: %w", backupPath, err)
|
||||
}
|
||||
} else {
|
||||
backupDir := s.Config.GetBackupDirectoryPathOrDefault()
|
||||
if backupDir != "" {
|
||||
if err := fsutil.EnsureDir(backupDir); err != nil {
|
||||
return "", "", fmt.Errorf("could not create backup directory %v: %w", backupDir, err)
|
||||
}
|
||||
}
|
||||
backupPath = s.Database.DatabaseBackupPath(backupDir)
|
||||
backupName = filepath.Base(backupPath)
|
||||
}
|
||||
|
||||
err := s.Database.Backup(backupPath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return backupPath, backupName, nil
|
||||
}
|
||||
|
||||
func (s *Manager) AnonymiseDatabase(download bool) (string, string, error) {
|
||||
var outPath string
|
||||
var outName string
|
||||
|
|
|
|||
|
|
@ -74,6 +74,28 @@ func getScanPaths(inputPaths []string) []*config.StashConfig {
|
|||
return ret
|
||||
}
|
||||
|
||||
// Filters the input array for paths that are within the paths managed by stash
|
||||
func filterStashPaths(inputPaths []string) []string {
|
||||
if len(inputPaths) == 0 {
|
||||
return inputPaths
|
||||
}
|
||||
|
||||
stashPaths := config.GetInstance().GetStashPaths()
|
||||
|
||||
var ret []string
|
||||
for _, p := range inputPaths {
|
||||
s := stashPaths.GetStashFromDirPath(p)
|
||||
if s == nil {
|
||||
logger.Warnf("%s is not in the configured stash paths", p)
|
||||
continue
|
||||
}
|
||||
|
||||
ret = append(ret, p)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// ScanSubscribe subscribes to a notification that is triggered when a
|
||||
// scan or clean is complete.
|
||||
func (s *Manager) ScanSubscribe(ctx context.Context) <-chan bool {
|
||||
|
|
@ -123,7 +145,8 @@ func (s *Manager) Scan(ctx context.Context, input ScanMetadataInput) (int, error
|
|||
ZipFileExtensions: cfg.GetGalleryExtensions(),
|
||||
// ScanFilters is set in ScanJob.Execute
|
||||
// HandlerRequiredFilters is set in ScanJob.Execute
|
||||
Rescan: input.Rescan,
|
||||
RootPaths: cfg.GetStashPaths().Paths(),
|
||||
Rescan: input.Rescan,
|
||||
}
|
||||
|
||||
scanJob := ScanJob{
|
||||
|
|
@ -291,6 +314,8 @@ type CleanMetadataInput struct {
|
|||
Paths []string `json:"paths"`
|
||||
// Do a dry run. Don't delete any files
|
||||
DryRun bool `json:"dryRun"`
|
||||
|
||||
IgnoreZipFileContents bool `json:"ignoreZipFileContents"`
|
||||
}
|
||||
|
||||
func (s *Manager) Clean(ctx context.Context, input CleanMetadataInput) int {
|
||||
|
|
@ -408,7 +433,7 @@ type StashBoxBatchTagInput struct {
|
|||
ExcludeFields []string `json:"exclude_fields"`
|
||||
// Refresh items already tagged by StashBox if true. Only tag items with no StashBox tagging if false
|
||||
Refresh bool `json:"refresh"`
|
||||
// If batch adding studios, should their parent studios also be created?
|
||||
// If batch adding studios or tags, should their parent entities also be created?
|
||||
CreateParent bool `json:"createParent"`
|
||||
// IDs in stash of the items to update.
|
||||
// If set, names and stash_ids fields will be ignored.
|
||||
|
|
@ -704,3 +729,137 @@ func (s *Manager) StashBoxBatchStudioTag(ctx context.Context, box *models.StashB
|
|||
|
||||
return s.JobManager.Add(ctx, "Batch stash-box studio tag...", j)
|
||||
}
|
||||
|
||||
func (s *Manager) batchTagTagsByIds(ctx context.Context, input StashBoxBatchTagInput, box *models.StashBox) ([]Task, error) {
|
||||
var tasks []Task
|
||||
|
||||
err := s.Repository.WithTxn(ctx, func(ctx context.Context) error {
|
||||
tagQuery := s.Repository.Tag
|
||||
|
||||
for _, tagID := range input.Ids {
|
||||
if id, err := strconv.Atoi(tagID); err == nil {
|
||||
t, err := tagQuery.Find(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.LoadStashIDs(ctx, tagQuery); err != nil {
|
||||
return fmt.Errorf("loading tag stash ids: %w", err)
|
||||
}
|
||||
|
||||
hasStashID := t.StashIDs.ForEndpoint(box.Endpoint) != nil
|
||||
if (input.Refresh && hasStashID) || (!input.Refresh && !hasStashID) {
|
||||
tasks = append(tasks, &stashBoxBatchTagTagTask{
|
||||
tag: t,
|
||||
createParent: input.CreateParent,
|
||||
box: box,
|
||||
excludedFields: input.ExcludeFields,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return tasks, err
|
||||
}
|
||||
|
||||
func (s *Manager) batchTagTagsByNamesOrStashIds(input StashBoxBatchTagInput, box *models.StashBox) []Task {
|
||||
var tasks []Task
|
||||
|
||||
for i := range input.StashIDs {
|
||||
stashID := input.StashIDs[i]
|
||||
if len(stashID) > 0 {
|
||||
tasks = append(tasks, &stashBoxBatchTagTagTask{
|
||||
stashID: &stashID,
|
||||
createParent: input.CreateParent,
|
||||
box: box,
|
||||
excludedFields: input.ExcludeFields,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for i := range input.Names {
|
||||
name := input.Names[i]
|
||||
if len(name) > 0 {
|
||||
tasks = append(tasks, &stashBoxBatchTagTagTask{
|
||||
name: &name,
|
||||
createParent: input.CreateParent,
|
||||
box: box,
|
||||
excludedFields: input.ExcludeFields,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return tasks
|
||||
}
|
||||
|
||||
func (s *Manager) batchTagAllTags(ctx context.Context, input StashBoxBatchTagInput, box *models.StashBox) ([]Task, error) {
|
||||
var tasks []Task
|
||||
|
||||
err := s.Repository.WithTxn(ctx, func(ctx context.Context) error {
|
||||
tagQuery := s.Repository.Tag
|
||||
var tags []*models.Tag
|
||||
var err error
|
||||
|
||||
tags, err = tagQuery.FindByStashIDStatus(ctx, input.Refresh, box.Endpoint)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("error querying tags: %v", err)
|
||||
}
|
||||
|
||||
for _, t := range tags {
|
||||
tasks = append(tasks, &stashBoxBatchTagTagTask{
|
||||
tag: t,
|
||||
createParent: input.CreateParent,
|
||||
box: box,
|
||||
excludedFields: input.ExcludeFields,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return tasks, err
|
||||
}
|
||||
|
||||
func (s *Manager) StashBoxBatchTagTag(ctx context.Context, box *models.StashBox, input StashBoxBatchTagInput) int {
|
||||
j := job.MakeJobExec(func(ctx context.Context, progress *job.Progress) error {
|
||||
logger.Infof("Initiating stash-box batch tag tag")
|
||||
|
||||
var tasks []Task
|
||||
var err error
|
||||
|
||||
switch input.getBatchTagType(false) {
|
||||
case batchTagByIds:
|
||||
tasks, err = s.batchTagTagsByIds(ctx, input, box)
|
||||
case batchTagByNamesOrStashIds:
|
||||
tasks = s.batchTagTagsByNamesOrStashIds(input, box)
|
||||
case batchTagAll:
|
||||
tasks, err = s.batchTagAllTags(ctx, input, box)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(tasks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
progress.SetTotal(len(tasks))
|
||||
|
||||
logger.Infof("Starting stash-box batch operation for %d tags", len(tasks))
|
||||
|
||||
for _, task := range tasks {
|
||||
progress.ExecuteTask(task.GetDescription(), func() {
|
||||
task.Start(ctx)
|
||||
})
|
||||
|
||||
progress.Increment()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return s.JobManager.Add(ctx, "Batch stash-box tag tag...", j)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import (
|
|||
)
|
||||
|
||||
type SceneService interface {
|
||||
Create(ctx context.Context, input *models.Scene, fileIDs []models.FileID, coverImage []byte) (*models.Scene, error)
|
||||
Create(ctx context.Context, input models.CreateSceneInput) (*models.Scene, error)
|
||||
AssignFile(ctx context.Context, sceneID int, fileID models.FileID) error
|
||||
Merge(ctx context.Context, sourceIDs []int, destinationID int, fileDeleter *scene.FileDeleter, options scene.MergeOptions) error
|
||||
Destroy(ctx context.Context, scene *models.Scene, fileDeleter *scene.FileDeleter, deleteGenerated, deleteFile, destroyFileEntry bool) error
|
||||
|
|
@ -39,7 +39,7 @@ type GalleryService interface {
|
|||
}
|
||||
|
||||
type GroupService interface {
|
||||
Create(ctx context.Context, group *models.Group, frontimageData []byte, backimageData []byte) error
|
||||
Create(ctx context.Context, input *models.CreateGroupInput) error
|
||||
UpdatePartial(ctx context.Context, id int, updatedGroup models.GroupPartial, frontImage group.ImageInput, backImage group.ImageInput) (*models.Group, error)
|
||||
|
||||
AddSubGroups(ctx context.Context, groupID int, subGroups []models.GroupIDDescription, insertIndex *int) error
|
||||
|
|
|
|||
268
internal/manager/scan_stashignore_test.go
Normal file
|
|
@ -0,0 +1,268 @@
|
|||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stashapp/stash/pkg/file"
|
||||
|
||||
// Necessary to register custom migrations.
|
||||
_ "github.com/stashapp/stash/pkg/sqlite/migrations"
|
||||
)
|
||||
|
||||
// stashIgnorePathFilter wraps StashIgnoreFilter to implement PathFilter for testing.
|
||||
// It provides a fixed library root for the filter.
|
||||
type stashIgnorePathFilter struct {
|
||||
filter *file.StashIgnoreFilter
|
||||
libraryRoot string
|
||||
}
|
||||
|
||||
func (f *stashIgnorePathFilter) Accept(ctx context.Context, path string, info fs.FileInfo, zipFilePath string) bool {
|
||||
return f.filter.Accept(ctx, path, info, f.libraryRoot, zipFilePath)
|
||||
}
|
||||
|
||||
// createTestFileOnDisk creates a file with some content.
|
||||
func createTestFileOnDisk(t *testing.T, dir, name string) string {
|
||||
t.Helper()
|
||||
path := filepath.Join(dir, name)
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
t.Fatalf("failed to create directory for %s: %v", path, err)
|
||||
}
|
||||
// Write some content so the file has a non-zero size.
|
||||
if err := os.WriteFile(path, []byte("test content for "+name), 0644); err != nil {
|
||||
t.Fatalf("failed to create file %s: %v", path, err)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// createStashIgnoreFile creates a .stashignore file with the given content.
|
||||
func createStashIgnoreFile(t *testing.T, dir, content string) {
|
||||
t.Helper()
|
||||
path := filepath.Join(dir, ".stashignore")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("failed to create .stashignore: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScannerWithStashIgnore(t *testing.T) {
|
||||
// Create temp directory structure.
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create test files.
|
||||
createTestFileOnDisk(t, tmpDir, "video1.mp4")
|
||||
createTestFileOnDisk(t, tmpDir, "video2.mp4")
|
||||
createTestFileOnDisk(t, tmpDir, "ignore_me.mp4")
|
||||
createTestFileOnDisk(t, tmpDir, "subdir/video3.mp4")
|
||||
createTestFileOnDisk(t, tmpDir, "subdir/skip_this.mp4")
|
||||
createTestFileOnDisk(t, tmpDir, "excluded_dir/video4.mp4")
|
||||
createTestFileOnDisk(t, tmpDir, "temp/processing.mp4")
|
||||
|
||||
// Create .stashignore file.
|
||||
stashignore := `# Ignore specific files
|
||||
ignore_me.mp4
|
||||
subdir/skip_this.mp4
|
||||
|
||||
# Ignore directories
|
||||
excluded_dir/
|
||||
temp/
|
||||
`
|
||||
createStashIgnoreFile(t, tmpDir, stashignore)
|
||||
|
||||
// Create stashignore filter with library root.
|
||||
stashIgnoreFilter := &stashIgnorePathFilter{
|
||||
filter: file.NewStashIgnoreFilter(),
|
||||
libraryRoot: tmpDir,
|
||||
}
|
||||
|
||||
// Create scanner.
|
||||
scanner := &file.Scanner{
|
||||
ScanFilters: []file.PathFilter{stashIgnoreFilter},
|
||||
}
|
||||
|
||||
testScenarios := []struct {
|
||||
path string
|
||||
accepted bool
|
||||
}{
|
||||
{filepath.Join(tmpDir, "video1.mp4"), true},
|
||||
{filepath.Join(tmpDir, "video2.mp4"), true},
|
||||
{filepath.Join(tmpDir, "ignore_me.mp4"), false},
|
||||
{filepath.Join(tmpDir, "subdir/video3.mp4"), true},
|
||||
{filepath.Join(tmpDir, "subdir/skip_this.mp4"), false},
|
||||
{filepath.Join(tmpDir, "excluded_dir/video4.mp4"), false},
|
||||
{filepath.Join(tmpDir, "temp/processing.mp4"), false},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
for _, scenario := range testScenarios {
|
||||
info, err := os.Stat(scenario.path)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to stat file %s: %v", scenario.path, err)
|
||||
}
|
||||
accepted := scanner.AcceptEntry(ctx, scenario.path, info, "")
|
||||
|
||||
if accepted != scenario.accepted {
|
||||
t.Errorf("unexpected accept result for %s: expected %v, got %v",
|
||||
scenario.path, scenario.accepted, accepted)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestScannerWithNestedStashIgnore(t *testing.T) {
|
||||
// Create temp directory structure.
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create test files.
|
||||
createTestFileOnDisk(t, tmpDir, "root.mp4")
|
||||
createTestFileOnDisk(t, tmpDir, "root.tmp")
|
||||
createTestFileOnDisk(t, tmpDir, "subdir/sub.mp4")
|
||||
createTestFileOnDisk(t, tmpDir, "subdir/sub.log")
|
||||
createTestFileOnDisk(t, tmpDir, "subdir/sub.tmp")
|
||||
|
||||
// Root .stashignore excludes *.tmp.
|
||||
createStashIgnoreFile(t, tmpDir, "*.tmp\n")
|
||||
|
||||
// Subdir .stashignore excludes *.log.
|
||||
createStashIgnoreFile(t, filepath.Join(tmpDir, "subdir"), "*.log\n")
|
||||
|
||||
// Create stashignore filter with library root.
|
||||
stashIgnoreFilter := &stashIgnorePathFilter{
|
||||
filter: file.NewStashIgnoreFilter(),
|
||||
libraryRoot: tmpDir,
|
||||
}
|
||||
|
||||
// Create scanner.
|
||||
scanner := &file.Scanner{
|
||||
ScanFilters: []file.PathFilter{stashIgnoreFilter},
|
||||
}
|
||||
|
||||
testScenarios := []struct {
|
||||
path string
|
||||
accepted bool
|
||||
}{
|
||||
{filepath.Join(tmpDir, "root.mp4"), true},
|
||||
{filepath.Join(tmpDir, "root.tmp"), false},
|
||||
{filepath.Join(tmpDir, "subdir/sub.mp4"), true},
|
||||
{filepath.Join(tmpDir, "subdir/sub.log"), false},
|
||||
{filepath.Join(tmpDir, "subdir/sub.tmp"), false},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
for _, scenario := range testScenarios {
|
||||
info, err := os.Stat(scenario.path)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to stat file %s: %v", scenario.path, err)
|
||||
}
|
||||
accepted := scanner.AcceptEntry(ctx, scenario.path, info, "")
|
||||
|
||||
if accepted != scenario.accepted {
|
||||
t.Errorf("unexpected accept result for %s: expected %v, got %v",
|
||||
scenario.path, scenario.accepted, accepted)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestScannerWithoutStashIgnore(t *testing.T) {
|
||||
// Create temp directory structure (no .stashignore).
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create test files.
|
||||
createTestFileOnDisk(t, tmpDir, "video1.mp4")
|
||||
createTestFileOnDisk(t, tmpDir, "video2.mp4")
|
||||
createTestFileOnDisk(t, tmpDir, "subdir/video3.mp4")
|
||||
|
||||
// Create stashignore filter with library root (but no .stashignore file exists).
|
||||
stashIgnoreFilter := &stashIgnorePathFilter{
|
||||
filter: file.NewStashIgnoreFilter(),
|
||||
libraryRoot: tmpDir,
|
||||
}
|
||||
|
||||
// Create scanner.
|
||||
scanner := &file.Scanner{
|
||||
ScanFilters: []file.PathFilter{stashIgnoreFilter},
|
||||
}
|
||||
|
||||
testScenarios := []struct {
|
||||
path string
|
||||
accepted bool
|
||||
}{
|
||||
{filepath.Join(tmpDir, "video1.mp4"), true},
|
||||
{filepath.Join(tmpDir, "video2.mp4"), true},
|
||||
{filepath.Join(tmpDir, "subdir/video3.mp4"), true},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
for _, scenario := range testScenarios {
|
||||
info, err := os.Stat(scenario.path)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to stat file %s: %v", scenario.path, err)
|
||||
}
|
||||
accepted := scanner.AcceptEntry(ctx, scenario.path, info, "")
|
||||
|
||||
if accepted != scenario.accepted {
|
||||
t.Errorf("unexpected accept result for %s: expected %v, got %v",
|
||||
scenario.path, scenario.accepted, accepted)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestScannerWithNegationPattern(t *testing.T) {
|
||||
// Create temp directory structure.
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create test files.
|
||||
createTestFileOnDisk(t, tmpDir, "file1.tmp")
|
||||
createTestFileOnDisk(t, tmpDir, "file2.tmp")
|
||||
createTestFileOnDisk(t, tmpDir, "keep_this.tmp")
|
||||
createTestFileOnDisk(t, tmpDir, "video.mp4")
|
||||
|
||||
// Create .stashignore with negation.
|
||||
stashignore := `*.tmp
|
||||
!keep_this.tmp
|
||||
`
|
||||
createStashIgnoreFile(t, tmpDir, stashignore)
|
||||
|
||||
// Create stashignore filter with library root.
|
||||
stashIgnoreFilter := &stashIgnorePathFilter{
|
||||
filter: file.NewStashIgnoreFilter(),
|
||||
libraryRoot: tmpDir,
|
||||
}
|
||||
|
||||
// Create scanner.
|
||||
scanner := &file.Scanner{
|
||||
ScanFilters: []file.PathFilter{stashIgnoreFilter},
|
||||
}
|
||||
|
||||
testScenarios := []struct {
|
||||
path string
|
||||
accepted bool
|
||||
}{
|
||||
{filepath.Join(tmpDir, "file1.tmp"), false},
|
||||
{filepath.Join(tmpDir, "file2.tmp"), false},
|
||||
{filepath.Join(tmpDir, "keep_this.tmp"), true},
|
||||
{filepath.Join(tmpDir, "video.mp4"), true},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
for _, scenario := range testScenarios {
|
||||
info, err := os.Stat(scenario.path)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to stat file %s: %v", scenario.path, err)
|
||||
}
|
||||
accepted := scanner.AcceptEntry(ctx, scenario.path, info, "")
|
||||
|
||||
if accepted != scenario.accepted {
|
||||
t.Errorf("unexpected accept result for %s: expected %v, got %v",
|
||||
scenario.path, scenario.accepted, accepted)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -313,9 +313,36 @@ func (j *CleanGeneratedJob) cleanBlobFiles(ctx context.Context, progress *job.Pr
|
|||
return err
|
||||
}
|
||||
|
||||
// remove empty hash prefix subdirectories
|
||||
j.removeEmptyDirs(j.Paths.Blobs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *CleanGeneratedJob) removeEmptyDirs(root string) {
|
||||
entries, err := os.ReadDir(root)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
dirPath := filepath.Join(root, entry.Name())
|
||||
subEntries, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(subEntries) == 0 {
|
||||
j.logDelete("removing empty directory: %s", entry.Name())
|
||||
j.deleteDir(dirPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (j *CleanGeneratedJob) getScenesWithHash(ctx context.Context, hash string) ([]*models.Scene, error) {
|
||||
fp := models.Fingerprint{
|
||||
Fingerprint: hash,
|
||||
|
|
@ -565,6 +592,7 @@ func (j *CleanGeneratedJob) cleanMarkerFiles(ctx context.Context, progress *job.
|
|||
j.setProgressFromFilename(sceneHash[0:2], progress)
|
||||
|
||||
// check if the scene exists
|
||||
var walkErr error
|
||||
if err := j.Repository.WithReadTxn(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
scenes, err = j.getScenesWithHash(ctx, sceneHash)
|
||||
|
|
@ -575,15 +603,18 @@ func (j *CleanGeneratedJob) cleanMarkerFiles(ctx context.Context, progress *job.
|
|||
if len(scenes) == 0 {
|
||||
j.logDelete("deleting unused marker directory: %s", sceneHash)
|
||||
j.deleteDir(path)
|
||||
} else {
|
||||
// get the markers now
|
||||
for _, scene := range scenes {
|
||||
thisMarkers, err := j.Repository.SceneMarker.FindBySceneID(ctx, scene.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting markers for scene: %v", err)
|
||||
}
|
||||
markers = append(markers, thisMarkers...)
|
||||
// #5911 - we've just deleted the directory, so skip it in the walk to avoid errors
|
||||
walkErr = fs.SkipDir
|
||||
return nil
|
||||
}
|
||||
|
||||
// get the markers now
|
||||
for _, scene := range scenes {
|
||||
thisMarkers, err := j.Repository.SceneMarker.FindBySceneID(ctx, scene.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting markers for scene: %v", err)
|
||||
}
|
||||
markers = append(markers, thisMarkers...)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -591,7 +622,7 @@ func (j *CleanGeneratedJob) cleanMarkerFiles(ctx context.Context, progress *job.
|
|||
logger.Error(err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
return walkErr
|
||||
}
|
||||
|
||||
filename := info.Name()
|
||||
|
|
@ -633,6 +664,8 @@ func (j *CleanGeneratedJob) cleanMarkerFiles(ctx context.Context, progress *job.
|
|||
return err
|
||||
}
|
||||
|
||||
j.removeEmptyDirs(j.Paths.Generated.Markers)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -726,5 +759,7 @@ func (j *CleanGeneratedJob) cleanThumbnailFiles(ctx context.Context, progress *j
|
|||
return err
|
||||
}
|
||||
|
||||
j.removeEmptyDirs(j.Paths.Generated.Thumbnails)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/stashapp/stash/internal/manager/config"
|
||||
"github.com/stashapp/stash/pkg/fsutil"
|
||||
"github.com/stashapp/stash/pkg/job"
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
"github.com/stashapp/stash/pkg/sqlite"
|
||||
|
|
@ -29,6 +31,21 @@ type databaseSchemaInfo struct {
|
|||
StepsRequired uint
|
||||
}
|
||||
|
||||
// PreExecute validates the environment before executing the migration.
|
||||
// It returns an error if the migration cannot be performed.
|
||||
func (s *MigrateJob) PreExecute() error {
|
||||
// ensure backup directory exists and is writable
|
||||
backupDir := s.Config.GetBackupDirectoryPathOrDefault()
|
||||
if backupDir != "" {
|
||||
if err := fsutil.EnsureDir(backupDir); err != nil {
|
||||
logger.Errorf("error ensuring backup directory exists: %s", err)
|
||||
logger.Warnf("Backup directory (%s) must be modified to a valid directory or removed from the config file", config.BackupDirectoryPath)
|
||||
return fmt.Errorf("error creating backup directory: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MigrateJob) Execute(ctx context.Context, progress *job.Progress) error {
|
||||
schemaInfo, err := s.required()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -40,9 +40,10 @@ func (j *cleanJob) Execute(ctx context.Context, progress *job.Progress) error {
|
|||
}
|
||||
|
||||
j.cleaner.Clean(ctx, file.CleanOptions{
|
||||
Paths: j.input.Paths,
|
||||
DryRun: j.input.DryRun,
|
||||
PathFilter: newCleanFilter(instance.Config),
|
||||
Paths: j.input.Paths,
|
||||
DryRun: j.input.DryRun,
|
||||
IgnoreZipFileContents: j.input.IgnoreZipFileContents,
|
||||
PathFilter: newCleanFilter(instance.Config),
|
||||
}, progress)
|
||||
|
||||
if job.IsCancelled(ctx) {
|
||||
|
|
@ -154,11 +155,12 @@ func newCleanFilter(c *config.Config) *cleanFilter {
|
|||
generatedPath: c.GetGeneratedPath(),
|
||||
videoExcludeRegex: generateRegexps(c.GetExcludes()),
|
||||
imageExcludeRegex: generateRegexps(c.GetImageExcludes()),
|
||||
stashIgnoreFilter: file.NewStashIgnoreFilter(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *cleanFilter) Accept(ctx context.Context, path string, info fs.FileInfo) bool {
|
||||
func (f *cleanFilter) Accept(ctx context.Context, path string, info fs.FileInfo, zipFilePath string) bool {
|
||||
// #1102 - clean anything in generated path
|
||||
generatedPath := f.generatedPath
|
||||
|
||||
|
|
@ -173,12 +175,18 @@ func (f *cleanFilter) Accept(ctx context.Context, path string, info fs.FileInfo)
|
|||
}
|
||||
|
||||
if stash == nil {
|
||||
logger.Infof("%s not in any stash library directories. Marking to clean: \"%s\"", fileOrFolder, path)
|
||||
logger.Infof("%s not in any stash library directories. Marking to clean: %q", fileOrFolder, path)
|
||||
return false
|
||||
}
|
||||
|
||||
if fsutil.IsPathInDir(generatedPath, path) {
|
||||
logger.Infof("%s is in generated path. Marking to clean: \"%s\"", fileOrFolder, path)
|
||||
logger.Infof("%s is in generated path. Marking to clean: %q", fileOrFolder, path)
|
||||
return false
|
||||
}
|
||||
|
||||
// Check .stashignore files, bounded to the library root.
|
||||
if !f.stashIgnoreFilter.Accept(ctx, path, info, stash.Path, zipFilePath) {
|
||||
logger.Infof("%s is excluded due to .stashignore. Marking to clean: %q", fileOrFolder, path)
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -651,6 +651,7 @@ func (t *ExportTask) exportImage(ctx context.Context, wg *sync.WaitGroup, jobCha
|
|||
galleryReader := r.Gallery
|
||||
performerReader := r.Performer
|
||||
tagReader := r.Tag
|
||||
imageReader := r.Image
|
||||
|
||||
for s := range jobChan {
|
||||
imageHash := s.Checksum
|
||||
|
|
@ -665,14 +666,17 @@ func (t *ExportTask) exportImage(ctx context.Context, wg *sync.WaitGroup, jobCha
|
|||
continue
|
||||
}
|
||||
|
||||
newImageJSON := image.ToBasicJSON(s)
|
||||
newImageJSON, err := image.ToBasicJSON(ctx, imageReader, s)
|
||||
if err != nil {
|
||||
logger.Errorf("[images] <%s> error converting image to JSON: %v", imageHash, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// export files
|
||||
for _, f := range s.Files.List() {
|
||||
t.exportFile(f)
|
||||
}
|
||||
|
||||
var err error
|
||||
newImageJSON.Studio, err = image.GetStudioName(ctx, studioReader, s)
|
||||
if err != nil {
|
||||
logger.Errorf("[images] <%s> error getting image studio name: %v", imageHash, err)
|
||||
|
|
@ -779,6 +783,7 @@ func (t *ExportTask) exportGallery(ctx context.Context, wg *sync.WaitGroup, jobC
|
|||
studioReader := r.Studio
|
||||
performerReader := r.Performer
|
||||
tagReader := r.Tag
|
||||
galleryReader := r.Gallery
|
||||
galleryChapterReader := r.GalleryChapter
|
||||
|
||||
for g := range jobChan {
|
||||
|
|
@ -847,6 +852,12 @@ func (t *ExportTask) exportGallery(ctx context.Context, wg *sync.WaitGroup, jobC
|
|||
|
||||
newGalleryJSON.Tags = tag.GetNames(tags)
|
||||
|
||||
newGalleryJSON.CustomFields, err = galleryReader.GetCustomFields(ctx, g.ID)
|
||||
if err != nil {
|
||||
logger.Errorf("[galleries] <%s> error getting gallery custom fields: %v", g.DisplayName(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
if t.includeDependencies {
|
||||
if g.StudioID != nil {
|
||||
t.studios.IDs = sliceutil.AppendUnique(t.studios.IDs, *g.StudioID)
|
||||
|
|
|
|||
|
|
@ -43,6 +43,8 @@ type GenerateMetadataInput struct {
|
|||
GalleryIDs []string `json:"galleryIDs"`
|
||||
// overwrite existing media
|
||||
Overwrite bool `json:"overwrite"`
|
||||
// paths to run generate on, in addition to the other ID lists
|
||||
Paths []string `json:"paths"`
|
||||
}
|
||||
|
||||
type GeneratePreviewOptionsInput struct {
|
||||
|
|
@ -133,8 +135,13 @@ func (j *GenerateJob) Execute(ctx context.Context, progress *job.Progress) error
|
|||
r := j.repository
|
||||
if err := r.WithReadTxn(ctx, func(ctx context.Context) error {
|
||||
qb := r.Scene
|
||||
if len(j.input.SceneIDs) == 0 && len(j.input.MarkerIDs) == 0 && len(j.input.ImageIDs) == 0 && len(j.input.GalleryIDs) == 0 {
|
||||
j.queueTasks(ctx, g, queue)
|
||||
if len(j.input.SceneIDs) == 0 &&
|
||||
len(j.input.MarkerIDs) == 0 &&
|
||||
len(j.input.ImageIDs) == 0 &&
|
||||
len(j.input.GalleryIDs) == 0 &&
|
||||
len(j.input.Paths) == 0 {
|
||||
|
||||
j.queueTasks(ctx, g, nil, queue)
|
||||
} else {
|
||||
if len(j.input.SceneIDs) > 0 {
|
||||
scenes, err = qb.FindMany(ctx, sceneIDs)
|
||||
|
|
@ -183,6 +190,11 @@ func (j *GenerateJob) Execute(ctx context.Context, progress *job.Progress) error
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(j.input.Paths) > 0 {
|
||||
paths := filterStashPaths(j.input.Paths)
|
||||
j.queueTasks(ctx, g, paths, queue)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -221,10 +233,10 @@ func (j *GenerateJob) Execute(ctx context.Context, progress *job.Progress) error
|
|||
logMsg += fmt.Sprintf(" %d heatmaps & speeds", totals.interactiveHeatmapSpeeds)
|
||||
}
|
||||
if j.input.ClipPreviews {
|
||||
logMsg += fmt.Sprintf(" %d Image Clip Previews", totals.clipPreviews)
|
||||
logMsg += fmt.Sprintf(" %d image clip previews", totals.clipPreviews)
|
||||
}
|
||||
if j.input.ImageThumbnails {
|
||||
logMsg += fmt.Sprintf(" %d Image Thumbnails", totals.imageThumbnails)
|
||||
logMsg += fmt.Sprintf(" %d image thumbnails", totals.imageThumbnails)
|
||||
}
|
||||
if logMsg == "Generating" {
|
||||
logMsg = "Nothing selected to generate"
|
||||
|
|
@ -250,7 +262,9 @@ func (j *GenerateJob) Execute(ctx context.Context, progress *job.Progress) error
|
|||
|
||||
for f := range queue {
|
||||
if job.IsCancelled(ctx) {
|
||||
break
|
||||
// keep draining the queue so the producer goroutine can finish
|
||||
// and release its read transaction, otherwise the DB stays locked
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add()
|
||||
|
|
@ -276,17 +290,18 @@ func (j *GenerateJob) Execute(ctx context.Context, progress *job.Progress) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func (j *GenerateJob) queueTasks(ctx context.Context, g *generate.Generator, queue chan<- Task) {
|
||||
func (j *GenerateJob) queueTasks(ctx context.Context, g *generate.Generator, paths []string, queue chan<- Task) {
|
||||
j.totals = totalsGenerate{}
|
||||
|
||||
j.queueScenesTasks(ctx, g, queue)
|
||||
j.queueImagesTasks(ctx, g, queue)
|
||||
j.queueScenesTasks(ctx, g, paths, queue)
|
||||
j.queueImagesTasks(ctx, g, paths, queue)
|
||||
}
|
||||
|
||||
func (j *GenerateJob) queueScenesTasks(ctx context.Context, g *generate.Generator, queue chan<- Task) {
|
||||
func (j *GenerateJob) queueScenesTasks(ctx context.Context, g *generate.Generator, paths []string, queue chan<- Task) {
|
||||
const batchSize = 1000
|
||||
|
||||
findFilter := models.BatchFindFilter(batchSize)
|
||||
sceneFilter := scene.FilterFromPaths(paths)
|
||||
|
||||
r := j.repository
|
||||
|
||||
|
|
@ -295,7 +310,7 @@ func (j *GenerateJob) queueScenesTasks(ctx context.Context, g *generate.Generato
|
|||
return
|
||||
}
|
||||
|
||||
scenes, err := scene.Query(ctx, r.Scene, nil, findFilter)
|
||||
scenes, err := scene.Query(ctx, r.Scene, sceneFilter, findFilter)
|
||||
if err != nil {
|
||||
logger.Errorf("Error encountered queuing files to scan: %s", err.Error())
|
||||
return
|
||||
|
|
@ -322,10 +337,11 @@ func (j *GenerateJob) queueScenesTasks(ctx context.Context, g *generate.Generato
|
|||
}
|
||||
}
|
||||
|
||||
func (j *GenerateJob) queueImagesTasks(ctx context.Context, g *generate.Generator, queue chan<- Task) {
|
||||
func (j *GenerateJob) queueImagesTasks(ctx context.Context, g *generate.Generator, paths []string, queue chan<- Task) {
|
||||
const batchSize = 1000
|
||||
|
||||
findFilter := models.BatchFindFilter(batchSize)
|
||||
imageFilter := image.FilterFromPaths(paths)
|
||||
|
||||
r := j.repository
|
||||
|
||||
|
|
@ -334,7 +350,7 @@ func (j *GenerateJob) queueImagesTasks(ctx context.Context, g *generate.Generato
|
|||
return
|
||||
}
|
||||
|
||||
images, err := image.Query(ctx, r.Image, nil, findFilter)
|
||||
images, err := image.Query(ctx, r.Image, imageFilter, findFilter)
|
||||
if err != nil {
|
||||
logger.Errorf("Error encountered queuing files to scan: %s", err.Error())
|
||||
return
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ func (t *GenerateImagePhashTask) Start(ctx context.Context) {
|
|||
}
|
||||
|
||||
if !set {
|
||||
generated, err := imagephash.Generate(t.File)
|
||||
generated, err := imagephash.Generate(instance.FFMpeg, t.File)
|
||||
if err != nil {
|
||||
logger.Errorf("Error generating phash for %q: %v", t.File.Path, err)
|
||||
logErrorOutput(err)
|
||||
|
|
|
|||
|
|
@ -34,7 +34,17 @@ func (t *GenerateSpriteTask) Start(ctx context.Context) {
|
|||
sceneHash := t.Scene.GetHash(t.fileNamingAlgorithm)
|
||||
imagePath := instance.Paths.Scene.GetSpriteImageFilePath(sceneHash)
|
||||
vttPath := instance.Paths.Scene.GetSpriteVttFilePath(sceneHash)
|
||||
generator, err := NewSpriteGenerator(*videoFile, sceneHash, imagePath, vttPath, 9, 9)
|
||||
|
||||
cfg := DefaultSpriteGeneratorConfig
|
||||
cfg.SpriteSize = instance.Config.GetSpriteScreenshotSize()
|
||||
|
||||
if instance.Config.GetUseCustomSpriteInterval() {
|
||||
cfg.MinimumSprites = instance.Config.GetMinimumSprites()
|
||||
cfg.MaximumSprites = instance.Config.GetMaximumSprites()
|
||||
cfg.SpriteInterval = instance.Config.GetSpriteInterval()
|
||||
}
|
||||
|
||||
generator, err := NewSpriteGenerator(*videoFile, sceneHash, imagePath, vttPath, cfg)
|
||||
|
||||
if err != nil {
|
||||
logger.Errorf("error creating sprite generator: %s", err.Error())
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/stashapp/stash/pkg/scene"
|
||||
"github.com/stashapp/stash/pkg/scene/generate"
|
||||
"github.com/stashapp/stash/pkg/txn"
|
||||
"github.com/stashapp/stash/pkg/utils"
|
||||
)
|
||||
|
||||
type ScanJob struct {
|
||||
|
|
@ -35,6 +36,8 @@ type ScanJob struct {
|
|||
|
||||
fileQueue chan file.ScannedFile
|
||||
count int
|
||||
|
||||
unmatchedCaptionFiles utils.MutexField[[]string]
|
||||
}
|
||||
|
||||
func (j *ScanJob) Execute(ctx context.Context, progress *job.Progress) error {
|
||||
|
|
@ -73,6 +76,8 @@ func (j *ScanJob) Execute(ctx context.Context, progress *job.Progress) error {
|
|||
j.scanner.ScanFilters = []file.PathFilter{newScanFilter(c, repo, minModTime)}
|
||||
j.scanner.HandlerRequiredFilters = []file.Filter{newHandlerRequiredFilter(cfg, repo)}
|
||||
|
||||
logger.Infof("Starting scan of %d paths with %d parallel tasks", len(paths), nTasks)
|
||||
|
||||
j.runJob(ctx, paths, nTasks, progress)
|
||||
|
||||
taskQueue.Close()
|
||||
|
|
@ -83,7 +88,7 @@ func (j *ScanJob) Execute(ctx context.Context, progress *job.Progress) error {
|
|||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
logger.Info(fmt.Sprintf("Scan finished (%s)", elapsed))
|
||||
logger.Infof("Scan finished (%s)", elapsed)
|
||||
|
||||
j.subscriptions.notify()
|
||||
return nil
|
||||
|
|
@ -166,12 +171,33 @@ func (j *ScanJob) queueFileFunc(ctx context.Context, f models.FS, zipFile *file.
|
|||
return nil
|
||||
}
|
||||
|
||||
if !j.scanner.AcceptEntry(ctx, path, info) {
|
||||
zipFilePath := ""
|
||||
if zipFile != nil {
|
||||
zipFilePath = zipFile.Path
|
||||
}
|
||||
|
||||
if !j.scanner.AcceptEntry(ctx, path, info, zipFilePath) {
|
||||
if info.IsDir() {
|
||||
logger.Debugf("Skipping directory %s", path)
|
||||
return fs.SkipDir
|
||||
}
|
||||
|
||||
// we don't include caption files in the file scan, but we do need
|
||||
// to handle them
|
||||
if fsutil.MatchExtension(path, video.CaptionExts) {
|
||||
fileRepo := j.scanner.Repository.File
|
||||
matched := video.AssociateCaptions(ctx, path, j.scanner.Repository.TxnManager, fileRepo, fileRepo)
|
||||
|
||||
if !matched {
|
||||
logger.Debugf("No matching video file found for caption file %s", path)
|
||||
j.unmatchedCaptionFiles.SetFunc(func(files []string) []string {
|
||||
return append(files, path)
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Debugf("Skipping file %s", path)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -257,8 +283,10 @@ func (j *ScanJob) processQueue(ctx context.Context, parallelTasks int, progress
|
|||
|
||||
for f := range j.fileQueue {
|
||||
logger.Tracef("Processing queued file %s", f.Path)
|
||||
if err := ctx.Err(); err != nil {
|
||||
return
|
||||
if ctx.Err() != nil {
|
||||
// Keep receiving until queueFiles closes the channel; otherwise
|
||||
// the walker can block on send (full buffer) and never finish.
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add()
|
||||
|
|
@ -309,10 +337,53 @@ func (j *ScanJob) handleFile(ctx context.Context, f file.ScannedFile, progress *
|
|||
return err
|
||||
}
|
||||
|
||||
// handle rename should have already handled the contents of the zip file
|
||||
// so shouldn't need to scan it again
|
||||
// if this is a new video file, match it with any unmatched caption files
|
||||
if r.New && len(j.unmatchedCaptionFiles.Get()) > 0 {
|
||||
videoFile, _ := r.File.(*models.VideoFile)
|
||||
|
||||
if (r.New || r.Updated) && j.scanner.IsZipFile(f.Info.Name()) {
|
||||
if videoFile != nil {
|
||||
// try to match any unmatched caption files to this video file
|
||||
for _, captionPath := range j.unmatchedCaptionFiles.Get() {
|
||||
if video.MatchesCaption(videoFile.Path, captionPath) {
|
||||
video.AssociateCaptions(ctx, captionPath, j.scanner.Repository.TxnManager, j.scanner.Repository.File, j.scanner.Repository.File)
|
||||
|
||||
// remove from the unmatched list
|
||||
j.unmatchedCaptionFiles.SetFunc(func(files []string) []string {
|
||||
newFiles := make([]string, 0, len(files)-1)
|
||||
for _, f := range files {
|
||||
if f != captionPath {
|
||||
newFiles = append(newFiles, f)
|
||||
}
|
||||
}
|
||||
return newFiles
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// clean captions - scene handler handles this as well, but
|
||||
// unchanged files aren't processed by the scene handler
|
||||
if r.IsUnchanged() {
|
||||
videoFile, _ := r.File.(*models.VideoFile)
|
||||
|
||||
if videoFile != nil {
|
||||
txnMgr := j.scanner.Repository.TxnManager
|
||||
fileRepo := j.scanner.Repository.File
|
||||
if err := txn.WithDatabase(ctx, txnMgr, func(ctx context.Context) error {
|
||||
return video.CleanCaptions(ctx, videoFile, txnMgr, fileRepo)
|
||||
}); err != nil {
|
||||
logger.Errorf("Error cleaning captions: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle rename should have already handled the contents of the zip file
|
||||
// so shouldn't need to scan it again.
|
||||
// Only scan zip contents if the file is new, the fingerprint changed,
|
||||
// or if a force rescan was requested.
|
||||
|
||||
if j.scanner.IsZipFile(f.Info.Name()) && (r.New || r.FingerprintChanged || j.scanner.Rescan) {
|
||||
ff := r.File
|
||||
f.BaseFile = ff.Base()
|
||||
|
||||
|
|
@ -324,6 +395,8 @@ func (j *ScanJob) handleFile(ctx context.Context, f file.ScannedFile, progress *
|
|||
if err := j.scanZipFile(zipCtx, f, progress); err != nil {
|
||||
logger.Errorf("Error scanning zip file %q: %v", f.Path, err)
|
||||
}
|
||||
} else if r.Updated && j.scanner.IsZipFile(f.Info.Name()) {
|
||||
logger.Debugf("Skipping zip file scan for %q: fingerprint unchanged", f.Path)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -378,11 +451,10 @@ type sceneFinder interface {
|
|||
// handlerRequiredFilter returns true if a File's handler needs to be executed despite the file not being updated.
|
||||
type handlerRequiredFilter struct {
|
||||
extensionConfig
|
||||
txnManager txn.Manager
|
||||
SceneFinder sceneFinder
|
||||
ImageFinder fileCounter
|
||||
GalleryFinder galleryFinder
|
||||
CaptionUpdater video.CaptionUpdater
|
||||
txnManager txn.Manager
|
||||
SceneFinder sceneFinder
|
||||
ImageFinder fileCounter
|
||||
GalleryFinder galleryFinder
|
||||
|
||||
FolderCache *lru.LRU[bool]
|
||||
|
||||
|
|
@ -398,7 +470,6 @@ func newHandlerRequiredFilter(c *config.Config, repo models.Repository) *handler
|
|||
SceneFinder: repo.Scene,
|
||||
ImageFinder: repo.Image,
|
||||
GalleryFinder: repo.Gallery,
|
||||
CaptionUpdater: repo.File,
|
||||
FolderCache: lru.New[bool](processes * 2),
|
||||
videoFileNamingAlgorithm: c.GetVideoFileNamingAlgorithm(),
|
||||
}
|
||||
|
|
@ -473,65 +544,35 @@ func (f *handlerRequiredFilter) Accept(ctx context.Context, ff models.File) bool
|
|||
}
|
||||
}
|
||||
|
||||
if isVideoFile {
|
||||
// TODO - check if the cover exists
|
||||
// hash := scene.GetHash(ff, f.videoFileNamingAlgorithm)
|
||||
// ssPath := instance.Paths.Scene.GetScreenshotPath(hash)
|
||||
// if exists, _ := fsutil.FileExists(ssPath); !exists {
|
||||
// // if not, check if the file is a primary file for a scene
|
||||
// scenes, err := f.SceneFinder.FindByPrimaryFileID(ctx, ff.Base().ID)
|
||||
// if err != nil {
|
||||
// // just ignore
|
||||
// return false
|
||||
// }
|
||||
|
||||
// if len(scenes) > 0 {
|
||||
// // if it is, then it needs to be re-generated
|
||||
// return true
|
||||
// }
|
||||
// }
|
||||
|
||||
// clean captions - scene handler handles this as well, but
|
||||
// unchanged files aren't processed by the scene handler
|
||||
videoFile, _ := ff.(*models.VideoFile)
|
||||
if videoFile != nil {
|
||||
if err := video.CleanCaptions(ctx, videoFile, f.txnManager, f.CaptionUpdater); err != nil {
|
||||
logger.Errorf("Error cleaning captions: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type scanFilter struct {
|
||||
extensionConfig
|
||||
txnManager txn.Manager
|
||||
FileFinder models.FileFinder
|
||||
CaptionUpdater video.CaptionUpdater
|
||||
txnManager txn.Manager
|
||||
|
||||
stashPaths config.StashConfigs
|
||||
generatedPath string
|
||||
videoExcludeRegex []*regexp.Regexp
|
||||
imageExcludeRegex []*regexp.Regexp
|
||||
minModTime time.Time
|
||||
stashIgnoreFilter *file.StashIgnoreFilter
|
||||
}
|
||||
|
||||
func newScanFilter(c *config.Config, repo models.Repository, minModTime time.Time) *scanFilter {
|
||||
return &scanFilter{
|
||||
extensionConfig: newExtensionConfig(c),
|
||||
txnManager: repo.TxnManager,
|
||||
FileFinder: repo.File,
|
||||
CaptionUpdater: repo.File,
|
||||
stashPaths: c.GetStashPaths(),
|
||||
generatedPath: c.GetGeneratedPath(),
|
||||
videoExcludeRegex: generateRegexps(c.GetExcludes()),
|
||||
imageExcludeRegex: generateRegexps(c.GetImageExcludes()),
|
||||
minModTime: minModTime,
|
||||
stashIgnoreFilter: file.NewStashIgnoreFilter(),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *scanFilter) Accept(ctx context.Context, path string, info fs.FileInfo) bool {
|
||||
func (f *scanFilter) Accept(ctx context.Context, path string, info fs.FileInfo, zipFilePath string) bool {
|
||||
if fsutil.IsPathInDir(f.generatedPath, path) {
|
||||
logger.Warnf("Skipping %q as it overlaps with the generated folder", path)
|
||||
return false
|
||||
|
|
@ -548,19 +589,16 @@ func (f *scanFilter) Accept(ctx context.Context, path string, info fs.FileInfo)
|
|||
return false
|
||||
}
|
||||
|
||||
// Check .stashignore files, bounded to the library root.
|
||||
if !f.stashIgnoreFilter.Accept(ctx, path, info, s.Path, zipFilePath) {
|
||||
logger.Debugf("Skipping %s due to .stashignore", path)
|
||||
return false
|
||||
}
|
||||
|
||||
isVideoFile := useAsVideo(path)
|
||||
isImageFile := useAsImage(path)
|
||||
isZipFile := fsutil.MatchExtension(path, f.zipExt)
|
||||
|
||||
// handle caption files
|
||||
if fsutil.MatchExtension(path, video.CaptionExts) {
|
||||
// we don't include caption files in the file scan, but we do need
|
||||
// to handle them
|
||||
video.AssociateCaptions(ctx, path, f.txnManager, f.FileFinder, f.CaptionUpdater)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if !info.IsDir() && !isVideoFile && !isImageFile && !isZipFile {
|
||||
logger.Debugf("Skipping %s as it does not match any known file extensions", path)
|
||||
return false
|
||||
|
|
@ -624,8 +662,9 @@ func getScanHandlers(options ScanMetadataInput, taskQueue *job.TaskQueue, progre
|
|||
&file.FilteredHandler{
|
||||
Filter: file.FilterFunc(imageFileFilter),
|
||||
Handler: &image.ScanHandler{
|
||||
CreatorUpdater: r.Image,
|
||||
GalleryFinder: r.Gallery,
|
||||
CreatorUpdater: r.Image,
|
||||
GalleryFinder: r.Gallery,
|
||||
SceneFinderUpdater: r.Scene,
|
||||
ScanGenerator: &imageGenerators{
|
||||
input: options,
|
||||
taskQueue: taskQueue,
|
||||
|
|
@ -654,9 +693,10 @@ func getScanHandlers(options ScanMetadataInput, taskQueue *job.TaskQueue, progre
|
|||
&file.FilteredHandler{
|
||||
Filter: file.FilterFunc(videoFileFilter),
|
||||
Handler: &scene.ScanHandler{
|
||||
CreatorUpdater: r.Scene,
|
||||
CaptionUpdater: r.File,
|
||||
PluginCache: pluginCache,
|
||||
CreatorUpdater: r.Scene,
|
||||
GalleryFinderUpdater: r.Gallery,
|
||||
CaptionUpdater: r.File,
|
||||
PluginCache: pluginCache,
|
||||
ScanGenerator: &sceneGenerators{
|
||||
input: options,
|
||||
taskQueue: taskQueue,
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
"github.com/stashapp/stash/pkg/match"
|
||||
|
|
@ -12,6 +13,7 @@ import (
|
|||
"github.com/stashapp/stash/pkg/sliceutil"
|
||||
"github.com/stashapp/stash/pkg/stashbox"
|
||||
"github.com/stashapp/stash/pkg/studio"
|
||||
"github.com/stashapp/stash/pkg/tag"
|
||||
)
|
||||
|
||||
// stashBoxBatchPerformerTagTask is used to tag or create performers from stash-box.
|
||||
|
|
@ -275,6 +277,12 @@ func (t *stashBoxBatchStudioTagTask) getName() string {
|
|||
}
|
||||
|
||||
func (t *stashBoxBatchStudioTagTask) Start(ctx context.Context) {
|
||||
// Skip organized studios
|
||||
if t.studio != nil && t.studio.Organized {
|
||||
logger.Infof("Skipping organized studio %s", t.studio.Name)
|
||||
return
|
||||
}
|
||||
|
||||
studio, err := t.findStashBoxStudio(ctx)
|
||||
if err != nil {
|
||||
logger.Errorf("Error fetching studio data from stash-box: %v", err)
|
||||
|
|
@ -523,3 +531,235 @@ func (t *stashBoxBatchStudioTagTask) processParentStudio(ctx context.Context, pa
|
|||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// stashBoxBatchTagTagTask is used to tag or create tags from stash-box.
|
||||
//
|
||||
// Two modes of operation:
|
||||
// - Update existing tag: set tag to update from stash-box data
|
||||
// - Create new tag: set name or stashID to search stash-box and create locally
|
||||
type stashBoxBatchTagTagTask struct {
|
||||
box *models.StashBox
|
||||
name *string
|
||||
stashID *string
|
||||
tag *models.Tag
|
||||
createParent bool
|
||||
excludedFields []string
|
||||
}
|
||||
|
||||
func (t *stashBoxBatchTagTagTask) getName() string {
|
||||
switch {
|
||||
case t.name != nil:
|
||||
return *t.name
|
||||
case t.stashID != nil:
|
||||
return *t.stashID
|
||||
case t.tag != nil:
|
||||
return t.tag.Name
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func (t *stashBoxBatchTagTagTask) Start(ctx context.Context) {
|
||||
scrapedTag, err := t.findStashBoxTag(ctx)
|
||||
if err != nil {
|
||||
logger.Errorf("Error fetching tag data from stash-box: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
excluded := map[string]bool{}
|
||||
for _, field := range t.excludedFields {
|
||||
excluded[field] = true
|
||||
}
|
||||
|
||||
if scrapedTag != nil {
|
||||
t.processMatchedTag(ctx, scrapedTag, excluded)
|
||||
} else {
|
||||
logger.Infof("No match found for %s", t.getName())
|
||||
}
|
||||
}
|
||||
|
||||
func (t *stashBoxBatchTagTagTask) GetDescription() string {
|
||||
return fmt.Sprintf("Tagging tag %s from stash-box", t.getName())
|
||||
}
|
||||
|
||||
func (t *stashBoxBatchTagTagTask) findStashBoxTag(ctx context.Context) (*models.ScrapedTag, error) {
|
||||
var results []*models.ScrapedTag
|
||||
var err error
|
||||
|
||||
r := instance.Repository
|
||||
|
||||
client := stashbox.NewClient(*t.box, stashbox.ExcludeTagPatterns(instance.Config.GetScraperExcludeTagPatterns()))
|
||||
|
||||
nameQuery := ""
|
||||
|
||||
switch {
|
||||
case t.name != nil:
|
||||
nameQuery = *t.name
|
||||
results, err = client.QueryTag(ctx, *t.name)
|
||||
case t.stashID != nil:
|
||||
results, err = client.QueryTag(ctx, *t.stashID)
|
||||
case t.tag != nil:
|
||||
var remoteID string
|
||||
if err := r.WithReadTxn(ctx, func(ctx context.Context) error {
|
||||
if !t.tag.StashIDs.Loaded() {
|
||||
err = t.tag.LoadStashIDs(ctx, r.Tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, id := range t.tag.StashIDs.List() {
|
||||
if id.Endpoint == t.box.Endpoint {
|
||||
remoteID = id.StashID
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if remoteID != "" {
|
||||
results, err = client.QueryTag(ctx, remoteID)
|
||||
} else {
|
||||
nameQuery = t.tag.Name
|
||||
results, err = client.QueryTag(ctx, t.tag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(results) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var result *models.ScrapedTag
|
||||
|
||||
// QueryTag returns tags that partially match the name, so find the exact match if searching by name
|
||||
if nameQuery != "" {
|
||||
for _, r := range results {
|
||||
if strings.EqualFold(r.Name, nameQuery) {
|
||||
result = r
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
result = results[0]
|
||||
}
|
||||
|
||||
if result == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if err := r.WithReadTxn(ctx, func(ctx context.Context) error {
|
||||
return match.ScrapedTagHierarchy(ctx, r.Tag, result, t.box.Endpoint)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (t *stashBoxBatchTagTagTask) processParentTag(ctx context.Context, parent *models.ScrapedTag, excluded map[string]bool) error {
|
||||
if parent.StoredID == nil {
|
||||
// Create new parent tag
|
||||
newParentTag := parent.ToTag(t.box.Endpoint, excluded)
|
||||
|
||||
r := instance.Repository
|
||||
err := r.WithTxn(ctx, func(ctx context.Context) error {
|
||||
qb := r.Tag
|
||||
|
||||
if err := tag.ValidateCreate(ctx, *newParentTag, qb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := qb.Create(ctx, &models.CreateTagInput{Tag: newParentTag}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
storedID := strconv.Itoa(newParentTag.ID)
|
||||
parent.StoredID = &storedID
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to create parent tag %s: %v", parent.Name, err)
|
||||
} else {
|
||||
logger.Infof("Created parent tag %s", parent.Name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Parent already exists — nothing to update for categories
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *stashBoxBatchTagTagTask) processMatchedTag(ctx context.Context, s *models.ScrapedTag, excluded map[string]bool) {
|
||||
// Determine the tag ID to update — either from the task's tag or from the
|
||||
// StoredID set by match.ScrapedTag (when batch adding by name and the tag
|
||||
// already exists locally).
|
||||
tagID := 0
|
||||
if t.tag != nil {
|
||||
tagID = t.tag.ID
|
||||
} else if s.StoredID != nil {
|
||||
tagID, _ = strconv.Atoi(*s.StoredID)
|
||||
}
|
||||
|
||||
if s.Parent != nil && t.createParent {
|
||||
if err := t.processParentTag(ctx, s.Parent, excluded); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if tagID > 0 {
|
||||
r := instance.Repository
|
||||
err := r.WithTxn(ctx, func(ctx context.Context) error {
|
||||
qb := r.Tag
|
||||
|
||||
existingStashIDs, err := qb.GetStashIDs(ctx, tagID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
storedID := strconv.Itoa(tagID)
|
||||
partial := s.ToPartial(storedID, t.box.Endpoint, excluded, existingStashIDs)
|
||||
|
||||
if err := tag.ValidateUpdate(ctx, tagID, partial, qb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := qb.UpdatePartial(ctx, tagID, partial); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to update tag %s: %v", s.Name, err)
|
||||
} else {
|
||||
logger.Infof("Updated tag %s", s.Name)
|
||||
}
|
||||
} else if s.Name != "" {
|
||||
// no existing tag, create a new one
|
||||
newTag := s.ToTag(t.box.Endpoint, excluded)
|
||||
|
||||
r := instance.Repository
|
||||
err := r.WithTxn(ctx, func(ctx context.Context) error {
|
||||
qb := r.Tag
|
||||
|
||||
if err := tag.ValidateCreate(ctx, *newTag, qb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := qb.Create(ctx, &models.CreateTagInput{Tag: newTag}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to create tag %s: %v", s.Name, err)
|
||||
} else {
|
||||
logger.Infof("Created tag %s", s.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
Before Width: | Height: | Size: 13 KiB |
|
Before Width: | Height: | Size: 11 KiB |
1
internal/static/performer/NoName02.svg
Normal file
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 850 1250"><path fill="#fff" fill-rule="evenodd" d="M480.72 36.58c-10.56.01-21.69 1.97-34.35 9.32-28.85 23.98-32.38 59.58-34.6 98.83 2.47 24.57 6.03 47.44-4.17 73.49-11.7 1.42-24.56 5.66-37.6 3.92-13.16-1.77-28.8.54-35.24 7.21-20.4 27.97-29.09 64.17-40.61 100.3-11.53 36.13-18.01 65.15-18.22 70.6-2.83 15.39-2.51 33.25 2.95 50.89 11.33 29.32 21.89 56.36 33.98 87.95.79 1.81-3.34 3.11-2.83 5.1s1.53 4.44 1.83 6.54c.31 2.09 1.54 4.43 1.7 6.54.16 2.12 3.58 4.21 3.68 6.28.2 4.13.72 8.09 1.09 11.57.35 3.46.94 6.44 2.03 8.64l10.12 14.39c1.57 2.4 7.54-.99 8.58.93 2.17 4.07 8.13 2.75 10.29.62 1.53-1.52 3.99 2.03 6.07 1.8 2.09-.21 4.15-.6 6.13-1.25-7.69 22.24-20 44.47-23.07 66.71-8.78 29.38-21.69 44.27-26.36 88.13-1.3 17.3-.07 34.6 0 51.89l-9.88 32.94L280 901.69c-19.57 58.62-20.55 113.74-24.72 174.61l-6.59 12.36v18.93c-3.64 30.03-16.39 58.76-20.53 88.73.83 2.15 1.18 4.75 3.06 6.7 3.02 3.11 8.17 5.46 11.77 8.47 4.47 3.09 9.69 2.36 14.92 1.66 54.03-16.43 20.05-34.21 27.03-52.83.98-17.36 1.37-34.58 13.18-51.07 2.31-9.33-1.25-18.67-3.3-28.01 5.88-43.68 48.11-124.8 50.24-134.25l15.66-65.07c16.14-20.59 23.3-52.18 32.94-80.72l43.65-86.48 21.31-51.73c9.84-.07 19-10.61 23.16-16.63l9.88.82c-.86 10.43.07 20.87 10.71 31.29l27.19 132.61 16.47 37.88 11.54 52.71 15.65 104.61 1.64 42-4.11 14.83.82 30.48-4.94 38.72-11.1 31.68c-1.36 4.76-.24 11.37 2.86 14.2 20.97 20.41 52.34-.35 55.72-11.59.63-18.29 1.25-32.22 4.12-50.52l-.54-35.66-2.47-4.94v-12.36l-3.3-18.12c4.08-41.75 10.1-82.84 9.89-126.02-.54-35.83-6.63-70.28-16.48-103.77-2.39-22.61-4.97-45.14-6.92-67.98-1.4-16.4-2.48-32.94-2.95-49.8 1.15-11.25 2.29-21.55 3.17-31.35 2.86-32.14 2.9-58.75-8.16-94.67l-2.43-6.59 4.95-3.29c3.65-.82 3.05 4.44 11.53-3.3 1.52 1.27 4.12.91 8.72-2.47 3.65-9.09 11.8-16.92 9.84-28.1-6.39-4.95-2.79-3.45-21.03-13.08-2.21-9.68.25-18.2.82-27.17.17-15.34-1.29-29.93-5.11-44.61-5.09-19.73-8.47-39.44-13-59.18-5.76-20.28-4.09-56.63-11.97-89.87-9.42-15.61-19.5-15.96-21.42-17.38l-11.08-31.12c3.56 6.11 6.41 12.23 10.71 18.33l-5.77-19.15c5.23 9.53 16.02 18.6 12.92 28.83 3.22-8.21-1.64-14.95-5.61-21.85 3.91 6.17 10.05 11.8 14.92 17.73-7.23-10.16-14.41-20.32-14.83-30.48l-4.94-24.71c3.06 7.81 2.43 15.63 9.65 23.44-4.6-8.91-1.76-17.82-2.24-26.75 1.76 9.07 3.58 18.13 9.89 27.19-6.47-12.36-6.92-24.71-9.06-37.06-2.43-8.59-5.1-17.18-6.13-25.77-1.33-11.18-.67-22.37-1.29-33.54l.83-42.84c-2.94-12.9-5.05-25.81-16.48-38.72-7.84-10.63-17.89-16.1-28.01-21.41-8-.74-15.89-1.91-24.11-1.91Zm69.42 210.28c.48 1.07 1.02 2.13 1.6 3.19-.6-1.05-1.15-2.11-1.6-3.19Zm-208.4 107.77c1.56 22.71.19 45.21 3.62 67.92 5.74 22.85 11.94 41.69 16.71 66.52.16 12.4 1.33 22.33-6.98 55.89l-4.83-13.46c-1.32-2.05-2.21-4.2-6.7-5.73-4.36-16.84-11.3-31.44-13.26-48.73-2.25-19.97-6.79-34.65-11.92-51.22 1.11-16.12 7.62-28.97 12.69-46.1l10.66-25.09Zm205.09 80.48 7.85 29.71c2.19 15.92 5.13 31.36 15.73 41.92l-6.7 17.77c.05-5.06.11-10.09-.59-15.14-3.01-7.45-6-12.83-9-17.16l-19.23-27.39c-1.85-2.04-4.76-4.67-8.16-7l-.87-7.26-1.76-7.29c10.68-.94 17.45-4.12 22.73-8.16Zm38.42 99.9 1.17 8.13-4.07 11.07-6.13-12.51 7-3.5 2.03-3.19Z"/></svg>
|
||||
|
After Width: | Height: | Size: 3 KiB |
|
Before Width: | Height: | Size: 11 KiB |
|
Before Width: | Height: | Size: 10 KiB |
|
Before Width: | Height: | Size: 11 KiB |
1
internal/static/performer/NoName05.svg
Normal file
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 850 1250"><path fill="#fff" d="M376.15 24.84c-3.58 1.02-10.08 2.26-14.39 2.85-9.64 1.31-12.93 2.19-18.26 4.82-14.17 7.01-23.67 19.21-29.66 37.99-3.36 10.52-4.53 19.14-5.26 37.62-.37 9.64-1.1 18.99-1.75 21.99-3.43 16.14-10.3 24.47-23.3 28.12-5.33 1.53-7.52 1.75-20.09 1.75-7.67 0-14.03.07-14.03.15 0 .15 1.46.58 3.29 1.02s3.29 1.02 3.29 1.31-3.07.95-6.72 1.46c-12.2 1.75-22.72 5.41-27.98 9.86-1.68 1.46-1.97 1.97-1.17 2.56.73.44.37 1.1-1.68 2.7-3.36 2.7-6.72 7.38-8.18 11.4-1.31 3.8-1.46 11.25-.22 13.95s2.12 2.48 2.12-.58c0-7.38 8.04-12.2 16.44-9.86 1.83.51 3.29 1.02 3.29 1.17 0 .22-1.39 2.05-3.14 4.16-1.75 2.19-3.87 5.33-4.75 7.16-1.83 3.94-2.92 9.13-1.68 8.4 1.17-.73 1.1-.44-1.39 4.82-2.63 5.62-4.38 13.22-4.38 19.21 0 5.11 1.02 10.08 2.12 10.45.44.15.8 1.53.8 3.07 0 3.87 2.48 11.4 5.04 15.41 2.19 3.58 5.84 6.72 8.69 7.6l1.68.51-1.53-2.48c-.8-1.31-1.46-3.29-1.46-4.38 0-2.48 2.78-10.08 3.73-10.08.37 0 .66 1.75.66 3.8.07 2.12.51 4.68 1.1 5.7 1.02 1.75 1.1 1.75.66-1.17-.51-3.29.88-11.25 1.97-11.25.58 0 .22 5.55-.51 7.89-.58 2.05 1.31 10.67 3.36 15.19 2.19 4.82 3.36 6.06 2.78 2.85-.58-3.07.66-2.05 1.53 1.31.95 3.14 5.04 10.45 5.62 9.86.15-.22 0-2.7-.37-5.55-.73-5.84 0-13.88 1.83-19.87 1.24-4.09 5.84-13.81 7.82-16.44s2.12-.58.22 3.14c-3.36 6.72-5.62 15.92-6.06 24.69-.44 8.62.15 14.9 1.31 14.9.37 0 .66-.44.58-.95-.22-2.05.15-4.89.66-4.89.29 0 .44 2.12.29 4.68-.73 11.47 4.46 26.44 10.23 29.51 3.8 2.05 3.94 2.7 3.43 25.86-.51 24.4-1.24 31.63-8.33 81.3-5.92 41.57-6.57 46.9-8.4 66.26-2.19 24.11-2.12 23.74-12.35 43.25-2.19 4.16-3 6.57-3 8.91q0 3.21 4.24 5.26l4.16 1.97.22 6.65c.22 7.52.22 7.6 6.57 7.6h3.14l-.44 5.26c-1.1 12.71-2.78 24.18-4.53 32-3 13.22-2.85 14.76 2.41 25.93 4.09 8.84 13.37 25.71 18.85 34.33 2.05 3.07 3 3.87 5.99 4.82 3.43 1.02 3.58 1.24 4.09 4.31.29 1.83 1.1 19.72 1.68 39.81.58 20.09 1.75 45.07 2.56 55.52 3.43 43.83 3.36 54.64-.73 100.08-1.17 13.22-3.29 43.03-4.75 66.11-1.39 23.08-4.02 65.16-5.84 93.5-3.65 58.07-6.65 108.33-7.38 124.33l-.51 10.74 1.75.37c.95.22 4.53 1.1 8.04 1.83 8.99 2.12 14.83 4.46 21.33 8.55 5.33 3.36 5.62 3.73 5.26 5.77-1.9 11.18-.58 19.87 3.87 24.62 7.52 7.96 25.06 8.55 34.84 1.17 5.92-4.46 8.4-11.4 8.4-23.52v-6.79l7.74-3.43c4.31-1.97 9.42-4.09 11.54-4.82l3.73-1.24-.22-3.21c-.07-1.75-.95-19.65-1.97-39.74-3.8-75.83-4.68-90.58-9.13-148.29-4.46-57.71-5.19-71.44-5.62-104.83-.58-41.71.44-58.37 7.38-121.7 4.82-43.83 8.33-79.26 9.2-93.8.95-14.61 2.26-23.96 4.38-30.68 1.97-6.5 2.63-7.38 3.51-5.11.44 1.02 4.38 11.03 8.77 22.28 4.38 11.25 14.46 36.96 22.35 57.2 7.96 20.16 20.75 52.23 28.49 71.22 32.14 78.75 37.55 96.06 49.02 159.03 2.26 12.27 5.41 29.37 7.01 37.99 6.72 36.6 12.56 72.32 27.03 165.38 6.28 40.1 6.94 43.76 8.33 44.19.8.29 10.88 3.8 22.35 7.74l20.89 7.16.51 6.28c1.46 19.07 7.89 26.3 24.03 26.96 6.5.29 7.38.15 11.1-1.61 4.46-2.26 9.72-7.6 12.05-12.49 1.97-4.09 2.19-12.49.44-19.07-.66-2.48-1.17-4.68-1.17-4.89 0-.15 2.19-1.61 4.82-3.14 5.7-3.29 9.57-7.52 10.23-11.03.73-3.94-.88-11.91-7.38-36.31-7.67-28.78-33.24-118.05-39.45-137.77-2.63-8.25-10.52-32.07-17.6-52.96-17.82-52.81-19.21-57.93-34.63-124.91-8.99-39.3-18.7-80.79-30.68-130.76-5.55-23.3-11.18-47.77-12.42-54.42-2.63-14.03-5.84-24.11-13.15-41.27-2.92-6.87-6.65-16.07-8.25-20.45l-2.92-8.04-.22-18.63c-.07-10.23-.51-22.06-.95-26.22s-.58-7.89-.37-8.25c.29-.44 1.46-1.17 2.78-1.68 2.19-.95 2.34-.88 5.99 2.56 7.16 6.79 15.49 7.38 27.03 1.9 3.07-1.46 6.43-3.29 7.6-4.09l2.05-1.46 4.09 5.55 4.16 5.48 4.53-3.58c6.36-5.04 11.25-7.96 15.41-9.06 3-.8 4.02-1.61 6.79-5.11 4.53-5.99 8.99-14.54 13.73-26.88 2.26-5.84 6.28-15.19 8.91-20.82 18.34-39.23 23.08-55.96 19.58-69.32-.66-2.41-4.31-10.74-8.11-18.63-17.17-34.99-29.37-64.87-43.03-105.26-8.18-24.25-11.4-31.78-15.71-37.04-1.97-2.34-4.97-6.43-6.57-9.2-3.87-6.43-4.97-7.09-11.69-7.01-3.87.07-8.77-.73-16.8-2.56-7.6-1.83-14.83-2.92-21.99-3.51l-10.74-.88 1.31-3.8c11.18-32.43 14.03-69.18 7.38-96.79-6.43-27.25-27.9-59.46-47.92-71.95-3.07-1.9-8.4-4.46-11.91-5.62-5.7-2.05-7.38-2.26-16.58-2.48-9.06-.22-10.96 0-16.8 1.61Zm125.64 306.44c7.3 11.25 14.76 27.76 25.2 55.74 9.72 26.15 10.96 32.51 8.33 41.42-1.31 4.31-13.37 28.85-15.85 32.29-.95 1.31-3.14 2.63-5.84 3.65-5.55 1.97-8.55 4.82-8.55 7.89 0 1.83.73 3.07 3.29 5.55 1.83 1.75 3.29 3.58 3.29 4.09 0 .58-.95 1.53-2.05 2.19-1.17.73-3.51 2.78-5.19 4.6-2.92 3.21-3.07 3.51-1.9 4.75 1.1 1.24 1.1 1.39-.15 1.68-5.62 1.61-5.26 1.75-13.44-6.21-4.24-4.16-8.91-8.18-10.37-8.91-1.39-.8-2.78-2.12-3-2.92-.22-.88-.8-6.57-1.31-12.78-1.61-20.97 1.97-48.36 10.81-82.25 1.75-6.87 4.82-18.7 6.79-26.3 1.97-7.67 4.16-17.39 4.89-21.77.66-4.31 1.39-7.82 1.53-7.82s1.75 2.26 3.51 5.11ZM283.01 578.92c0 9.72-.66 11.61-2.19 6.79-.95-2.7-2.19-14.76-1.68-15.78.15-.29 1.1-.51 2.12-.51h1.75v9.5Zm-.73 60.92c.58.07 1.31.88 1.53 1.9.73 2.78.73 10.23.07 10.23-1.53 0-7.45-10.3-7.45-12.93 0-.22 1.1-.07 2.41.22 1.24.29 2.85.51 3.43.58Z" vector-effect="non-scaling-stroke"/></svg>
|
||||
|
After Width: | Height: | Size: 4.8 KiB |
|
Before Width: | Height: | Size: 10 KiB After Width: | Height: | Size: 3 KiB |
|
Before Width: | Height: | Size: 8.5 KiB |
1
internal/static/performer/NoName07.svg
Normal file
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 850 1250"><path fill="#fff" d="M472.98 38.67c-13.15.96-26.67 7.37-39.17 14.9-15.4 4.08-26.21 21.1-32.37 33.99-5.93 21.36-10.44 43.16-12.62 65.39-8.56 18.78 2.69 49.48-20.08 58.28-26.46 3.29-45.91 24.89-57.3 47.58-7.59 25.25-21.44 47.64-34.64 70.25-8.14 19.94-32.81 48.39-10.36 66.7 26.26 16.49 58.12 9.34 86.76 5.18 19.26 6.03 13.11 40.3 13.27 57.62.75 24.57-7.78 47.72-11.98 71.55-5.73 30.16 7.91 58.68 9.71 88.38 5.03 13.95-6.07 56.76 17.49 46.62 13.87 33.68 36.67 62.85 51.48 96.15 8.34 20.57 26.8 43.12 16.18 66.04-13.64 21.93-14.66 48.16-13.27 73.17-.62 26.72 8.46 52.33 10.04 78.64.81 38.12-21.73 71.17-25.9 108.47-1.34 21.76-26.4 39.99-12.31 61.54.39 2.59 6.27 16.37 9.72 29.42 2.95 11.19 7.33 18.65 11.32 25.9 7.35 14.5 20.87 3.11 23.96-3.83 8.76-19.79 14.33-29.42 13.27-5.18-1.06 24.24 47.37-4.77 52.13-24.35.58-2.38-.94-3.52 1.62-11.29 2.44-19.89-13.36-38.12-11.98-60.81-1.27-32.43-9.15-64.23-10.69-96.55-2.57-23.93 7.7-45.17 15.87-66.99 19.16-49.34 5.91-103.12 15.54-154.1 10.93-42.36 20.98-85.33 38.52-125.61-.89-18.39 20.38-20.42 25.58-30.76-17.12-45.49-12.18-96.36-31.73-141.15-11.79-19.53-23.53-41.51-34.95-59.57 10.41 8.04 27.29 13.76 37.22-2.59 2.46-11.07 14.4-21 9.72-32.7-8.61-21.13-4.68-43.27-8.42-65.72-10.33-20.34 26.7-1.53 19.42-11.33 21.04-14.01-21.31 8.01-11.98-12.3-4.57-23.82-10.07-47.05-19.75-69.29-7.05-48.59-12.56-98.26-29.46-144.71-7.04-10.12-4.91-16.98-13.6-28.49-10.4-14.73-23.1-19.4-36.26-18.45ZM343.16 329.72c5.59.8 1.72 20.91-.96 29.46-.23 6.92-4.89 5.55-9.4 4.21-23.94.47-6.9-16.97 1.29-26.87 4.43-5.2 7.2-7.07 9.06-6.8Z"/></svg>
|
||||
|
After Width: | Height: | Size: 1.6 KiB |
|
Before Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 9.7 KiB |
1
internal/static/performer/NoName09.svg
Normal file
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 850 1250"><path fill="#fff" d="M390.27 1215.71c-26.85-.4-39.98-26.54-37.66-50.52-5.05-22.51-3.56-45.85-4.53-68.85 2.06-23.14 21.76-41.17 15.57-66.3-3.11-37.23-4.87-74.88-.58-112.07 4.2-24.94 13.46-48.45 25.58-70.54.29-20.04-3.9-39.59-12.25-57.87-14.58-37.71-27.84-75.94-45.89-112.18-8.06-20.45-4.48-48.36-34.09-48.93-17.01-1.88-35.19-2.94-30.5-26.06-1.18-31.57 3.95-62.99 2.66-94.64 4.55-28.15-22.55-45.52-37.4-65.53-15.7-21.87-32.9-42.98-45.36-67-4.35-29.81 14.01-56.22 24.4-82.85 12.15-30.72 29.69-58.93 42.97-89.06 10.76-17.8 24.8-40.58 48.45-39.99 22.64-.45 46.86.54 65.3-14.85 17.26-15.49 22.96-38.33 32.33-58.42 12.24-28.4 35.57-54.65 67.74-58.99 24.33-3.89 50.81 5.21 68.9 21.61 3.9 22.95 19.71 38.48 25.56 60.65 2.49 21.53-2.29 45.5 24.99 52.18 24.74 16.83 50.28 32.79 73.42 51.79 14.52 18.65-5.63 46.05-27.33 47.31-28.5 13.29-59.15 21.79-90.55 24.61-27.88-4.29-18.63 31.66-1.72 39.88 23.56 21.16-22.88-12.64-23.18-12.36-.26 12.99-3.78 24.58-8.09 5.3-11.15-22.15 3.96 31.49-5.43 19.37-1.78-11.09-13.78-34.88-13.28-9.82 1.3 8.03 14.21 33.96-.67 15.64-6.99-15.67-3.58-33.86-5.09-50.74-26.39-2.77-11.88 22.66-7.91 36.5 5.97 25-25.13 39.55-22.68 61.96 12.94 16.43 31.57 29.5 42.84 49.13 24.41 33.32 29.99 74.92 40.63 113.67-1.49 14.89 32.93 29.94 3.81 30.29-16.72 11.07-10.87 39.91-21.52 57.74-7.71 38.93-19.29 77-31.3 114.78-8.06 27.2-18.27 54.63-17.23 83.57-.36 40.67-6.09 81.1-16.62 120.35-5.81 25.47-15.31 50.35-14.89 77.06-2.19 24.53 2.41 48.98 11.71 71.6 7.37 20.12-4.17 48.71-29.66 40.63-24.51-5.69-10.97 27.47 2.91 33.26 14.53 16.75 1.15 34.28-19.32 29.02-5.02 0-10.06.1-15.06-.33Zm-121.8-810.49c.16-22.09 10.41-42.25 5.24-64.23-2.08-9.98 4.9-41.86-5.44-38.66-6.72 22.85-29.25 40.55-28.37 65.34 8.51 16.52 21.61 30.41 27.22 48.67 1.8-3.45.92-7.45 1.35-11.12Zm252.68-105.3c.93-13.95-20.6-7.37-6.22 2.41 3.97 6.03 8.46 5.04 6.22-2.41Zm59.8-74.98c-11.22-15.73-26.65-27.69-40.46-40.91-11.99-1.74-12.72 24.78-21.55 33.89 1.23 17.12 35.65 4.3 49.7 7.82 4.1-.25 8.26-.03 12.31-.8Zm-20.67-81.28c4.51-20.53-17.82 12.62-1.15 4.06l1.15-4.06Zm-5.14-8.5c8.84-11.18-12.11-43.92-7.45-15.82 1.26 3.6-.62 29.43 7.45 15.82Z"/></svg>
|
||||
|
After Width: | Height: | Size: 2.1 KiB |
|
Before Width: | Height: | Size: 10 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 3.5 KiB |