Able to Generate

Lots of errors to fix and TODO notes
This commit is contained in:
Bob 2026-04-22 21:43:27 -07:00
parent 31b69c1e8b
commit bb76aff557
73 changed files with 5061 additions and 1314 deletions

87
.golangci.bck.yml Normal file
View file

@ -0,0 +1,87 @@
# options for analysis running
run:
timeout: 5m
linters:
disable-all: true
enable:
# Default set of linters from golangci-lint
- errcheck
- gosimple
- govet
- ineffassign
- staticcheck
- typecheck
- unused
# Linters added by the stash project.
# - contextcheck
- copyloopvar
- dogsled
- errchkjson
- errorlint
# - exhaustive
- gocritic
# - goerr113
- gofmt
# - gomnd
# - ifshort
- misspell
# - nakedret
- noctx
- revive
- rowserrcheck
- sqlclosecheck
# Project-specific linter overrides
linters-settings:
gofmt:
simplify: false
errorlint:
# Disable errorf because there are false positives, where you don't want to wrap
# an error.
errorf: false
asserts: true
comparison: true
revive:
ignore-generated-header: true
severity: error
confidence: 0.8
rules:
- name: blank-imports
disabled: true
- name: context-as-argument
- name: context-keys-type
- name: dot-imports
- name: error-return
- name: error-strings
- name: error-naming
- name: exported
disabled: true
- name: if-return
disabled: true
- name: increment-decrement
- name: var-naming
disabled: true
- name: var-declaration
- name: package-comments
- name: range
- name: receiver-naming
- name: time-naming
- name: unexported-return
disabled: true
- name: indent-error-flow
disabled: true
- name: errorf
- name: empty-block
disabled: true
- name: superfluous-else
- name: unused-parameter
disabled: true
- name: unreachable-code
- name: redefines-builtin-id
rowserrcheck:
packages:
- github.com/jmoiron/sqlx

View file

@ -1,87 +1,86 @@
# options for analysis running
run:
timeout: 5m
version: "2"
linters:
disable-all: true
default: none
enable:
# Default set of linters from golangci-lint
- errcheck
- gosimple
- govet
- ineffassign
- staticcheck
- typecheck
- unused
# Linters added by the stash project.
# - contextcheck
- copyloopvar
- dogsled
- errcheck
- errchkjson
- errorlint
# - exhaustive
- gocritic
# - goerr113
- gofmt
# - gomnd
# - ifshort
- govet
- ineffassign
- misspell
# - nakedret
- noctx
- revive
- rowserrcheck
- sqlclosecheck
# Project-specific linter overrides
linters-settings:
gofmt:
simplify: false
errorlint:
# Disable errorf because there are false positives, where you don't want to wrap
# an error.
errorf: false
asserts: true
comparison: true
revive:
ignore-generated-header: true
severity: error
confidence: 0.8
rules:
- name: blank-imports
disabled: true
- name: context-as-argument
- name: context-keys-type
- name: dot-imports
- name: error-return
- name: error-strings
- name: error-naming
- name: exported
disabled: true
- name: if-return
disabled: true
- name: increment-decrement
- name: var-naming
disabled: true
- name: var-declaration
- name: package-comments
- name: range
- name: receiver-naming
- name: time-naming
- name: unexported-return
disabled: true
- name: indent-error-flow
disabled: true
- name: errorf
- name: empty-block
disabled: true
- name: superfluous-else
- name: unused-parameter
disabled: true
- name: unreachable-code
- name: redefines-builtin-id
rowserrcheck:
packages:
- github.com/jmoiron/sqlx
- staticcheck
- unused
settings:
errorlint:
errorf: false
asserts: true
comparison: true
revive:
confidence: 0.8
severity: error
rules:
- name: blank-imports
disabled: true
- name: context-as-argument
- name: context-keys-type
- name: dot-imports
- name: error-return
- name: error-strings
- name: error-naming
- name: exported
disabled: true
- name: if-return
disabled: true
- name: increment-decrement
- name: var-naming
disabled: true
- name: var-declaration
- name: package-comments
- name: range
- name: receiver-naming
- name: time-naming
- name: unexported-return
disabled: true
- name: indent-error-flow
disabled: true
- name: errorf
- name: empty-block
disabled: true
- name: superfluous-else
- name: unused-parameter
disabled: true
- name: unreachable-code
- name: redefines-builtin-id
rowserrcheck:
packages:
- github.com/jmoiron/sqlx
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
paths:
- third_party$
- builtin$
- examples$
formatters:
enable:
- gofmt
settings:
gofmt:
simplify: false
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

View file

@ -24,7 +24,7 @@ The `Audio` datatype is similar to `Scene` but stores audio-only media (i.e. Aud
- Audio File metadata:
- duration
- audio codec
- OPTIONAL (can be added now or later)
- FUTURE (to be considered at a later date)
- channels (mono, stereo, 5.1, 7.1)
- bitrate
- sample rate

View file

@ -44,6 +44,13 @@ models:
fieldName: DurationFinite
frame_rate:
fieldName: FrameRateFinite
AudioFile:
fields:
# override float fields - #1572
duration:
fieldName: DurationFinite
sample_rate:
fieldName: SampleRateFinite
# movie is group under the hood
Movie:
model: github.com/stashapp/stash/pkg/models.Group
@ -96,6 +103,8 @@ models:
model: github.com/stashapp/stash/internal/manager.StashBoxBatchTagInput
SceneStreamEndpoint:
model: github.com/stashapp/stash/internal/manager.SceneStreamEndpoint
AudioStreamEndpoint:
model: github.com/stashapp/stash/internal/manager.AudioStreamEndpoint
ExportObjectTypeInput:
model: github.com/stashapp/stash/internal/manager.ExportObjectTypeInput
ExportObjectsInput:

View file

@ -3,11 +3,8 @@
type AudioFileType {
size: String
duration: Float
video_codec: String
audio_codec: String
width: Int
height: Int
framerate: Float
samplerate: Float
bitrate: Int
}
@ -19,31 +16,25 @@ type AudioPathsType {
vtt: String # Resolver
sprite: String # Resolver
funscript: String # Resolver
interactive_heatmap: String # Resolver
caption: String # Resolver
}
type AudioMovie {
movie: Movie!
audio_index: Int
}
type AudioGroup {
group: Group!
audio_index: Int
}
type VideoCaption {
language_code: String!
caption_type: String!
}
# TODO(audio|AudioCaption): need to update IF AudioCaption required
# type AudioCaption {
# language_code: String!
# caption_type: String!
# }
type Audio {
id: ID!
title: String
code: String
details: String
director: String
url: String @deprecated(reason: "Use urls")
urls: [String!]!
date: String
@ -51,8 +42,8 @@ type Audio {
rating100: Int
organized: Boolean!
o_counter: Int
interactive: Boolean!
interactive_speed: Int
# TODO(audio|AudioCaption): need to update IF AudioCaption required
# captions: [AudioCaption!]
captions: [VideoCaption!]
created_at: Time!
updated_at: Time!
@ -70,16 +61,14 @@ type Audio {
"Times the o counter was incremented"
o_history: [Time!]!
files: [VideoFile!]!
files: [AudioFile!]!
paths: AudioPathsType! # Resolver
audio_markers: [AudioMarker!]!
galleries: [Gallery!]!
# TODO(future|audio_markers): add in audio markers
# audio_markers: [AudioMarker!]!
studio: Studio
groups: [AudioGroup!]!
movies: [AudioMovie!]! @deprecated(reason: "Use groups")
tags: [Tag!]!
performers: [Performer!]!
stash_ids: [StashID!]!
custom_fields: Map!
@ -87,11 +76,6 @@ type Audio {
audioStreams: [AudioStreamEndpoint!]!
}
input AudioMovieInput {
movie_id: ID!
audio_index: Int
}
input AudioGroupInput {
group_id: ID!
audio_index: Int
@ -101,7 +85,6 @@ input AudioCreateInput {
title: String
code: String
details: String
director: String
url: String @deprecated(reason: "Use urls")
urls: [String!]
date: String
@ -109,14 +92,11 @@ input AudioCreateInput {
rating100: Int
organized: Boolean
studio_id: ID
gallery_ids: [ID!]
performer_ids: [ID!]
groups: [AudioGroupInput!]
movies: [AudioMovieInput!] @deprecated(reason: "Use groups")
tag_ids: [ID!]
"This should be a URL or a base64 encoded data URL"
cover_image: String
stash_ids: [StashIDInput!]
"""
The first id will be assigned as primary.
@ -134,7 +114,6 @@ input AudioUpdateInput {
title: String
code: String
details: String
director: String
url: String @deprecated(reason: "Use urls")
urls: [String!]
date: String
@ -144,14 +123,11 @@ input AudioUpdateInput {
@deprecated(reason: "Unsupported - Use audioIncrementO/audioDecrementO")
organized: Boolean
studio_id: ID
gallery_ids: [ID!]
performer_ids: [ID!]
groups: [AudioGroupInput!]
movies: [AudioMovieInput!] @deprecated(reason: "Use groups")
tag_ids: [ID!]
"This should be a URL or a base64 encoded data URL"
cover_image: String
stash_ids: [StashIDInput!]
"The time index a audio was left at"
resume_time: Float
@ -168,24 +144,12 @@ input AudioUpdateInput {
custom_fields: CustomFieldsInput
}
enum BulkUpdateIdMode {
SET
ADD
REMOVE
}
input BulkUpdateIds {
ids: [ID!]
mode: BulkUpdateIdMode!
}
input BulkAudioUpdateInput {
clientMutationId: String
ids: [ID!]
title: String
code: String
details: String
director: String
url: String @deprecated(reason: "Use urls")
urls: BulkUpdateStrings
date: String
@ -193,11 +157,9 @@ input BulkAudioUpdateInput {
rating100: Int
organized: Boolean
studio_id: ID
gallery_ids: BulkUpdateIds
performer_ids: BulkUpdateIds
tag_ids: BulkUpdateIds
group_ids: BulkUpdateIds
movie_ids: BulkUpdateIds @deprecated(reason: "Use group_ids")
custom_fields: CustomFieldsInput
}
@ -234,27 +196,17 @@ input AudioParserInput {
ignoreOrganized: Boolean
}
type AudioMovieID {
movie_id: ID!
audio_index: String
}
type AudioParserResult {
audio: Audio!
title: String
code: String
details: String
director: String
url: String
date: String
# rating expressed as 1-5
rating: Int @deprecated(reason: "Use 1-100 range with rating100")
# rating expressed as 1-100
rating100: Int
studio_id: ID
gallery_ids: [ID!]
performer_ids: [ID!]
movies: [AudioMovieID!]
tag_ids: [ID!]
}
@ -293,8 +245,3 @@ input AudioMergeInput {
play_history: Boolean
o_history: Boolean
}
type HistoryMutationResult {
count: Int!
history: [Time!]!
}

View file

@ -124,6 +124,34 @@ type ImageFile implements BaseFile {
union VisualFile = VideoFile | ImageFile
type AudioFile implements BaseFile {
# TODO: edit this
id: ID!
path: String!
basename: String!
parent_folder_id: ID! @deprecated(reason: "Use parent_folder instead")
zip_file_id: ID @deprecated(reason: "Use zip_file instead")
parent_folder: Folder!
zip_file: BasicFile
mod_time: Time!
size: Int64!
fingerprint(type: String!): String
fingerprints: [Fingerprint!]!
format: String!
duration: Float!
audio_codec: String!
sample_rate: Float!
bit_rate: Int!
created_at: Time!
updated_at: Time!
}
type GalleryFile implements BaseFile {
id: ID!
path: String!

View file

@ -779,6 +779,97 @@ input ImageFilterType {
custom_fields: [CustomFieldCriterionInput!]
}
input AudioFilterType {
AND: AudioFilterType
OR: AudioFilterType
NOT: AudioFilterType
id: IntCriterionInput
title: StringCriterionInput
code: StringCriterionInput
details: StringCriterionInput
"Filter by file oshash"
oshash: StringCriterionInput
"Filter by file checksum"
checksum: StringCriterionInput
"Filter by file phash"
phash: StringCriterionInput @deprecated(reason: "Use phash_distance instead")
"Filter by file phash distance"
phash_distance: PhashDistanceCriterionInput
"Filter by path"
path: StringCriterionInput
"Filter by file count"
file_count: IntCriterionInput
# rating expressed as 1-100
rating100: IntCriterionInput
"Filter by organized"
organized: Boolean
"Filter by o-counter"
o_counter: IntCriterionInput
"Filter Scenes by duplication criteria"
duplicated: DuplicationCriterionInput
"Filter by sample rate"
samplerate: IntCriterionInput
"Filter by bit rate"
bitrate: IntCriterionInput
"Filter by audio codec"
audio_codec: StringCriterionInput
"Filter by duration (in seconds)"
duration: IntCriterionInput
"Filter to only include scenes missing this property"
is_missing: String
"Filter to only include scenes with this studio"
studios: HierarchicalMultiCriterionInput
"Filter to only include scenes with this group"
groups: HierarchicalMultiCriterionInput
"Filter to only include scenes with these tags"
tags: HierarchicalMultiCriterionInput
"Filter by tag count"
tag_count: IntCriterionInput
"Filter to only include scenes with performers with these tags"
performer_tags: HierarchicalMultiCriterionInput
"Filter scenes that have performers that have been favorited"
performer_favorite: Boolean
"Filter scenes by performer age at time of scene"
performer_age: IntCriterionInput
"Filter to only include scenes with these performers"
performers: MultiCriterionInput
"Filter by performer count"
performer_count: IntCriterionInput
"Filter by url"
url: StringCriterionInput
"Filter by captions"
captions: StringCriterionInput
"Filter by resume time"
resume_time: IntCriterionInput
"Filter by play count"
play_count: IntCriterionInput
"Filter by play duration (in seconds)"
play_duration: IntCriterionInput
"Filter by scene last played time"
last_played_at: TimestampCriterionInput
"Filter by date"
date: DateCriterionInput
"Filter by creation time"
created_at: TimestampCriterionInput
"Filter by last update time"
updated_at: TimestampCriterionInput
"Filter by related performers that meet this criteria"
performers_filter: PerformerFilterType
"Filter by related studios that meet this criteria"
studios_filter: StudioFilterType
"Filter by related tags that meet this criteria"
tags_filter: TagFilterType
"Filter by related groups that meet this criteria"
groups_filter: GroupFilterType
"Filter by related files that meet this criteria"
files_filter: FileFilterType
custom_fields: [CustomFieldCriterionInput!]
}
input FileFilterType {
AND: FileFilterType
OR: FileFilterType
@ -864,6 +955,17 @@ input VideoFileFilterInput {
interactive: Boolean
interactive_speed: IntCriterionInput
}
input AudioFileFilterInput {
samplerate: IntCriterionInput
bitrate: IntCriterionInput
format: StringCriterionInput
audio_codec: StringCriterionInput
"in seconds"
duration: IntCriterionInput
captions: StringCriterionInput
}
input ImageFileFilterInput {
format: StringCriterionInput

View file

@ -395,10 +395,37 @@ func (t changesetTranslator) relatedGroups(value []models.SceneGroupInput) (mode
if err != nil {
return models.RelatedGroups{}, err
}
return models.NewRelatedGroups(groupsScenes), nil
}
func groupsAudioFromGroupInput(input []models.AudioGroupInput) ([]models.GroupsAudios, error) {
ret := make([]models.GroupsAudios, len(input))
for i, v := range input {
mID, err := strconv.Atoi(v.GroupID)
if err != nil {
return nil, fmt.Errorf("invalid group ID: %s", v.GroupID)
}
ret[i] = models.GroupsAudios{
GroupID: mID,
AudioIndex: v.AudioIndex,
}
}
return ret, nil
}
func (t changesetTranslator) relatedGroupsAudio(value []models.AudioGroupInput) (models.RelatedGroupsAudio, error) {
groupsAudios, err := groupsAudioFromGroupInput(value)
if err != nil {
return models.RelatedGroupsAudio{}, err
}
return models.NewRelatedGroupsAudio(groupsAudios), nil
}
func (t changesetTranslator) updateGroupIDsFromMovies(value []models.SceneMovieInput, field string) (*models.UpdateGroupIDs, error) {
if !t.hasField(field) {
return nil, nil
@ -452,6 +479,44 @@ func (t changesetTranslator) updateGroupIDsBulk(value *BulkUpdateIds, field stri
}, nil
}
func (t changesetTranslator) updateGroupIDsAudio(value []models.AudioGroupInput, field string) (*models.UpdateGroupIDsAudio, error) {
if !t.hasField(field) {
return nil, nil
}
groupsAudios, err := groupsAudioFromGroupInput(value)
if err != nil {
return nil, err
}
return &models.UpdateGroupIDsAudio{
Groups: groupsAudios,
Mode: models.RelationshipUpdateModeSet,
}, nil
}
func (t changesetTranslator) updateGroupIDsBulkAudio(value *BulkUpdateIds, field string) (*models.UpdateGroupIDsAudio, error) {
if !t.hasField(field) || value == nil {
return nil, nil
}
ids, err := stringslice.StringSliceToIntSlice(value.Ids)
if err != nil {
return nil, fmt.Errorf("converting ids [%v]: %w", value.Ids, err)
}
groups := make([]models.GroupsAudios, len(ids))
for i, id := range ids {
groups[i] = models.GroupsAudios{GroupID: id}
}
return &models.UpdateGroupIDsAudio{
Groups: groups,
Mode: value.Mode,
}, nil
}
func groupsDescriptionsFromGroupInput(input []*GroupDescriptionInput) ([]models.GroupIDDescription, error) {
ret := make([]models.GroupIDDescription, len(input))

View file

@ -0,0 +1,225 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"github.com/stashapp/stash/pkg/models"
)
// AudioFileIDsLoaderConfig captures the config to create a new AudioFileIDsLoader
type AudioFileIDsLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([][]models.FileID, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewAudioFileIDsLoader creates a new AudioFileIDsLoader given a fetch, wait, and maxBatch
func NewAudioFileIDsLoader(config AudioFileIDsLoaderConfig) *AudioFileIDsLoader {
return &AudioFileIDsLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// AudioFileIDsLoader batches and caches requests
type AudioFileIDsLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([][]models.FileID, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int][]models.FileID
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *audioFileIDsLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type audioFileIDsLoaderBatch struct {
keys []int
data [][]models.FileID
error []error
closing bool
done chan struct{}
}
// Load a FileID by key, batching and caching will be applied automatically
func (l *AudioFileIDsLoader) Load(key int) ([]models.FileID, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a FileID.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioFileIDsLoader) LoadThunk(key int) func() ([]models.FileID, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() ([]models.FileID, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &audioFileIDsLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() ([]models.FileID, error) {
<-batch.done
var data []models.FileID
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *AudioFileIDsLoader) LoadAll(keys []int) ([][]models.FileID, []error) {
results := make([]func() ([]models.FileID, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
fileIDs := make([][]models.FileID, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
fileIDs[i], errors[i] = thunk()
}
return fileIDs, errors
}
// LoadAllThunk returns a function that when called will block waiting for a FileIDs.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioFileIDsLoader) LoadAllThunk(keys []int) func() ([][]models.FileID, []error) {
results := make([]func() ([]models.FileID, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([][]models.FileID, []error) {
fileIDs := make([][]models.FileID, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
fileIDs[i], errors[i] = thunk()
}
return fileIDs, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *AudioFileIDsLoader) Prime(key int, value []models.FileID) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := make([]models.FileID, len(value))
copy(cpy, value)
l.unsafeSet(key, cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *AudioFileIDsLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *AudioFileIDsLoader) unsafeSet(key int, value []models.FileID) {
if l.cache == nil {
l.cache = map[int][]models.FileID{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *audioFileIDsLoaderBatch) keyIndex(l *AudioFileIDsLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *audioFileIDsLoaderBatch) startTimer(l *AudioFileIDsLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *audioFileIDsLoaderBatch) end(l *AudioFileIDsLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View file

@ -0,0 +1,222 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
)
// AudioLastPlayedLoaderConfig captures the config to create a new AudioLastPlayedLoader
type AudioLastPlayedLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*time.Time, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewAudioLastPlayedLoader creates a new AudioLastPlayedLoader given a fetch, wait, and maxBatch
func NewAudioLastPlayedLoader(config AudioLastPlayedLoaderConfig) *AudioLastPlayedLoader {
return &AudioLastPlayedLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// AudioLastPlayedLoader batches and caches requests
type AudioLastPlayedLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*time.Time, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*time.Time
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *audioLastPlayedLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type audioLastPlayedLoaderBatch struct {
keys []int
data []*time.Time
error []error
closing bool
done chan struct{}
}
// Load a Time by key, batching and caching will be applied automatically
func (l *AudioLastPlayedLoader) Load(key int) (*time.Time, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Time.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioLastPlayedLoader) LoadThunk(key int) func() (*time.Time, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*time.Time, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &audioLastPlayedLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*time.Time, error) {
<-batch.done
var data *time.Time
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *AudioLastPlayedLoader) LoadAll(keys []int) ([]*time.Time, []error) {
results := make([]func() (*time.Time, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
times := make([]*time.Time, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
times[i], errors[i] = thunk()
}
return times, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Times.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioLastPlayedLoader) LoadAllThunk(keys []int) func() ([]*time.Time, []error) {
results := make([]func() (*time.Time, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*time.Time, []error) {
times := make([]*time.Time, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
times[i], errors[i] = thunk()
}
return times, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *AudioLastPlayedLoader) Prime(key int, value *time.Time) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *AudioLastPlayedLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *AudioLastPlayedLoader) unsafeSet(key int, value *time.Time) {
if l.cache == nil {
l.cache = map[int]*time.Time{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *audioLastPlayedLoaderBatch) keyIndex(l *AudioLastPlayedLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *audioLastPlayedLoaderBatch) startTimer(l *AudioLastPlayedLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *audioLastPlayedLoaderBatch) end(l *AudioLastPlayedLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View file

@ -0,0 +1,224 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"github.com/stashapp/stash/pkg/models"
)
// AudioLoaderConfig captures the config to create a new AudioLoader
type AudioLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*models.Audio, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewAudioLoader creates a new AudioLoader given a fetch, wait, and maxBatch
func NewAudioLoader(config AudioLoaderConfig) *AudioLoader {
return &AudioLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// AudioLoader batches and caches requests
type AudioLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*models.Audio, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*models.Audio
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *audioLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type audioLoaderBatch struct {
keys []int
data []*models.Audio
error []error
closing bool
done chan struct{}
}
// Load a Audio by key, batching and caching will be applied automatically
func (l *AudioLoader) Load(key int) (*models.Audio, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Audio.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioLoader) LoadThunk(key int) func() (*models.Audio, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*models.Audio, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &audioLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*models.Audio, error) {
<-batch.done
var data *models.Audio
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *AudioLoader) LoadAll(keys []int) ([]*models.Audio, []error) {
results := make([]func() (*models.Audio, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
audios := make([]*models.Audio, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
audios[i], errors[i] = thunk()
}
return audios, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Audios.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioLoader) LoadAllThunk(keys []int) func() ([]*models.Audio, []error) {
results := make([]func() (*models.Audio, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*models.Audio, []error) {
audios := make([]*models.Audio, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
audios[i], errors[i] = thunk()
}
return audios, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *AudioLoader) Prime(key int, value *models.Audio) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *AudioLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *AudioLoader) unsafeSet(key int, value *models.Audio) {
if l.cache == nil {
l.cache = map[int]*models.Audio{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *audioLoaderBatch) keyIndex(l *AudioLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *audioLoaderBatch) startTimer(l *AudioLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *audioLoaderBatch) end(l *AudioLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View file

@ -0,0 +1,219 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
)
// AudioOCountLoaderConfig captures the config to create a new AudioOCountLoader
type AudioOCountLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]int, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewAudioOCountLoader creates a new AudioOCountLoader given a fetch, wait, and maxBatch
func NewAudioOCountLoader(config AudioOCountLoaderConfig) *AudioOCountLoader {
return &AudioOCountLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// AudioOCountLoader batches and caches requests
type AudioOCountLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]int, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]int
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *audioOCountLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type audioOCountLoaderBatch struct {
keys []int
data []int
error []error
closing bool
done chan struct{}
}
// Load a int by key, batching and caching will be applied automatically
func (l *AudioOCountLoader) Load(key int) (int, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a int.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioOCountLoader) LoadThunk(key int) func() (int, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (int, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &audioOCountLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (int, error) {
<-batch.done
var data int
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *AudioOCountLoader) LoadAll(keys []int) ([]int, []error) {
results := make([]func() (int, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
ints := make([]int, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
ints[i], errors[i] = thunk()
}
return ints, errors
}
// LoadAllThunk returns a function that when called will block waiting for a ints.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioOCountLoader) LoadAllThunk(keys []int) func() ([]int, []error) {
results := make([]func() (int, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]int, []error) {
ints := make([]int, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
ints[i], errors[i] = thunk()
}
return ints, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *AudioOCountLoader) Prime(key int, value int) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
l.unsafeSet(key, value)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *AudioOCountLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *AudioOCountLoader) unsafeSet(key int, value int) {
if l.cache == nil {
l.cache = map[int]int{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *audioOCountLoaderBatch) keyIndex(l *AudioOCountLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *audioOCountLoaderBatch) startTimer(l *AudioOCountLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *audioOCountLoaderBatch) end(l *AudioOCountLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View file

@ -0,0 +1,223 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
)
// AudioOHistoryLoaderConfig captures the config to create a new AudioOHistoryLoader
type AudioOHistoryLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([][]time.Time, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewAudioOHistoryLoader creates a new AudioOHistoryLoader given a fetch, wait, and maxBatch
func NewAudioOHistoryLoader(config AudioOHistoryLoaderConfig) *AudioOHistoryLoader {
return &AudioOHistoryLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// AudioOHistoryLoader batches and caches requests
type AudioOHistoryLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([][]time.Time, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int][]time.Time
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *audioOHistoryLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type audioOHistoryLoaderBatch struct {
keys []int
data [][]time.Time
error []error
closing bool
done chan struct{}
}
// Load a Time by key, batching and caching will be applied automatically
func (l *AudioOHistoryLoader) Load(key int) ([]time.Time, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Time.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioOHistoryLoader) LoadThunk(key int) func() ([]time.Time, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() ([]time.Time, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &audioOHistoryLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() ([]time.Time, error) {
<-batch.done
var data []time.Time
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *AudioOHistoryLoader) LoadAll(keys []int) ([][]time.Time, []error) {
results := make([]func() ([]time.Time, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
times := make([][]time.Time, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
times[i], errors[i] = thunk()
}
return times, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Times.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioOHistoryLoader) LoadAllThunk(keys []int) func() ([][]time.Time, []error) {
results := make([]func() ([]time.Time, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([][]time.Time, []error) {
times := make([][]time.Time, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
times[i], errors[i] = thunk()
}
return times, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *AudioOHistoryLoader) Prime(key int, value []time.Time) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := make([]time.Time, len(value))
copy(cpy, value)
l.unsafeSet(key, cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *AudioOHistoryLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *AudioOHistoryLoader) unsafeSet(key int, value []time.Time) {
if l.cache == nil {
l.cache = map[int][]time.Time{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *audioOHistoryLoaderBatch) keyIndex(l *AudioOHistoryLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *audioOHistoryLoaderBatch) startTimer(l *AudioOHistoryLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *audioOHistoryLoaderBatch) end(l *AudioOHistoryLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View file

@ -0,0 +1,219 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
)
// AudioPlayCountLoaderConfig captures the config to create a new AudioPlayCountLoader
type AudioPlayCountLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]int, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewAudioPlayCountLoader creates a new AudioPlayCountLoader given a fetch, wait, and maxBatch
func NewAudioPlayCountLoader(config AudioPlayCountLoaderConfig) *AudioPlayCountLoader {
return &AudioPlayCountLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// AudioPlayCountLoader batches and caches requests
type AudioPlayCountLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]int, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]int
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *audioPlayCountLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type audioPlayCountLoaderBatch struct {
keys []int
data []int
error []error
closing bool
done chan struct{}
}
// Load a int by key, batching and caching will be applied automatically
func (l *AudioPlayCountLoader) Load(key int) (int, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a int.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioPlayCountLoader) LoadThunk(key int) func() (int, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (int, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &audioPlayCountLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (int, error) {
<-batch.done
var data int
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *AudioPlayCountLoader) LoadAll(keys []int) ([]int, []error) {
results := make([]func() (int, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
ints := make([]int, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
ints[i], errors[i] = thunk()
}
return ints, errors
}
// LoadAllThunk returns a function that when called will block waiting for a ints.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioPlayCountLoader) LoadAllThunk(keys []int) func() ([]int, []error) {
results := make([]func() (int, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]int, []error) {
ints := make([]int, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
ints[i], errors[i] = thunk()
}
return ints, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *AudioPlayCountLoader) Prime(key int, value int) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
l.unsafeSet(key, value)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *AudioPlayCountLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *AudioPlayCountLoader) unsafeSet(key int, value int) {
if l.cache == nil {
l.cache = map[int]int{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *audioPlayCountLoaderBatch) keyIndex(l *AudioPlayCountLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *audioPlayCountLoaderBatch) startTimer(l *AudioPlayCountLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *audioPlayCountLoaderBatch) end(l *AudioPlayCountLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View file

@ -0,0 +1,223 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
)
// AudioPlayHistoryLoaderConfig captures the config to create a new AudioPlayHistoryLoader
type AudioPlayHistoryLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([][]time.Time, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewAudioPlayHistoryLoader creates a new AudioPlayHistoryLoader given a fetch, wait, and maxBatch
func NewAudioPlayHistoryLoader(config AudioPlayHistoryLoaderConfig) *AudioPlayHistoryLoader {
return &AudioPlayHistoryLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// AudioPlayHistoryLoader batches and caches requests
type AudioPlayHistoryLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([][]time.Time, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int][]time.Time
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *audioPlayHistoryLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type audioPlayHistoryLoaderBatch struct {
keys []int
data [][]time.Time
error []error
closing bool
done chan struct{}
}
// Load a Time by key, batching and caching will be applied automatically
func (l *AudioPlayHistoryLoader) Load(key int) ([]time.Time, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Time.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioPlayHistoryLoader) LoadThunk(key int) func() ([]time.Time, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() ([]time.Time, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &audioPlayHistoryLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() ([]time.Time, error) {
<-batch.done
var data []time.Time
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *AudioPlayHistoryLoader) LoadAll(keys []int) ([][]time.Time, []error) {
results := make([]func() ([]time.Time, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
times := make([][]time.Time, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
times[i], errors[i] = thunk()
}
return times, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Times.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *AudioPlayHistoryLoader) LoadAllThunk(keys []int) func() ([][]time.Time, []error) {
results := make([]func() ([]time.Time, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([][]time.Time, []error) {
times := make([][]time.Time, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
times[i], errors[i] = thunk()
}
return times, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *AudioPlayHistoryLoader) Prime(key int, value []time.Time) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := make([]time.Time, len(value))
copy(cpy, value)
l.unsafeSet(key, cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *AudioPlayHistoryLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *AudioPlayHistoryLoader) unsafeSet(key int, value []time.Time) {
if l.cache == nil {
l.cache = map[int][]time.Time{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *audioPlayHistoryLoaderBatch) keyIndex(l *AudioPlayHistoryLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *audioPlayHistoryLoaderBatch) startTimer(l *AudioPlayHistoryLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *audioPlayHistoryLoaderBatch) end(l *AudioPlayHistoryLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View file

@ -3,6 +3,7 @@
// The dataloaders are used to batch requests to the database.
//go:generate go run github.com/vektah/dataloaden SceneLoader int *github.com/stashapp/stash/pkg/models.Scene
//go:generate go run github.com/vektah/dataloaden AudioLoader int *github.com/stashapp/stash/pkg/models.Audio
//go:generate go run github.com/vektah/dataloaden GalleryLoader int *github.com/stashapp/stash/pkg/models.Gallery
//go:generate go run github.com/vektah/dataloaden ImageLoader int *github.com/stashapp/stash/pkg/models.Image
//go:generate go run github.com/vektah/dataloaden PerformerLoader int *github.com/stashapp/stash/pkg/models.Performer
@ -13,6 +14,7 @@
//go:generate go run github.com/vektah/dataloaden FolderLoader github.com/stashapp/stash/pkg/models.FolderID *github.com/stashapp/stash/pkg/models.Folder
//go:generate go run github.com/vektah/dataloaden FolderRelatedFolderIDsLoader github.com/stashapp/stash/pkg/models.FolderID []github.com/stashapp/stash/pkg/models.FolderID
//go:generate go run github.com/vektah/dataloaden SceneFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID
//go:generate go run github.com/vektah/dataloaden AudioFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID
//go:generate go run github.com/vektah/dataloaden ImageFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID
//go:generate go run github.com/vektah/dataloaden GalleryFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID
//go:generate go run github.com/vektah/dataloaden CustomFieldsLoader int github.com/stashapp/stash/pkg/models.CustomFieldMap
@ -21,6 +23,11 @@
//go:generate go run github.com/vektah/dataloaden SceneOHistoryLoader int []time.Time
//go:generate go run github.com/vektah/dataloaden ScenePlayHistoryLoader int []time.Time
//go:generate go run github.com/vektah/dataloaden SceneLastPlayedLoader int *time.Time
//go:generate go run github.com/vektah/dataloaden AudioOCountLoader int int
//go:generate go run github.com/vektah/dataloaden AudioPlayCountLoader int int
//go:generate go run github.com/vektah/dataloaden AudioOHistoryLoader int []time.Time
//go:generate go run github.com/vektah/dataloaden AudioPlayHistoryLoader int []time.Time
//go:generate go run github.com/vektah/dataloaden AudioLastPlayedLoader int *time.Time
package loaders
import (
@ -52,6 +59,15 @@ type Loaders struct {
SceneLastPlayed *SceneLastPlayedLoader
SceneCustomFields *CustomFieldsLoader
AudioByID *AudioLoader
AudioFiles *AudioFileIDsLoader
AudioPlayCount *AudioPlayCountLoader
AudioOCount *AudioOCountLoader
AudioPlayHistory *AudioPlayHistoryLoader
AudioOHistory *AudioOHistoryLoader
AudioLastPlayed *AudioLastPlayedLoader
AudioCustomFields *CustomFieldsLoader
ImageFiles *ImageFileIDsLoader
GalleryFiles *GalleryFileIDsLoader
@ -217,6 +233,32 @@ func (m Middleware) Middleware(next http.Handler) http.Handler {
maxBatch: maxBatch,
fetch: m.fetchScenesOHistory(ctx),
},
// Audio
AudioPlayCount: &AudioPlayCountLoader{
wait: wait,
maxBatch: maxBatch,
fetch: m.fetchAudiosPlayCount(ctx),
},
AudioOCount: &AudioOCountLoader{
wait: wait,
maxBatch: maxBatch,
fetch: m.fetchAudiosOCount(ctx),
},
AudioPlayHistory: &AudioPlayHistoryLoader{
wait: wait,
maxBatch: maxBatch,
fetch: m.fetchAudiosPlayHistory(ctx),
},
AudioLastPlayed: &AudioLastPlayedLoader{
wait: wait,
maxBatch: maxBatch,
fetch: m.fetchAudiosLastPlayed(ctx),
},
AudioOHistory: &AudioOHistoryLoader{
wait: wait,
maxBatch: maxBatch,
fetch: m.fetchAudiosOHistory(ctx),
},
}
newCtx := context.WithValue(r.Context(), loadersCtxKey, ldrs)
@ -531,3 +573,59 @@ func (m Middleware) fetchScenesLastPlayed(ctx context.Context) func(keys []int)
return ret, toErrorSlice(err)
}
}
// Audio
func (m Middleware) fetchAudiosOCount(ctx context.Context) func(keys []int) ([]int, []error) {
return func(keys []int) (ret []int, errs []error) {
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
var err error
ret, err = m.Repository.Audio.GetManyOCount(ctx, keys)
return err
})
return ret, toErrorSlice(err)
}
}
func (m Middleware) fetchAudiosPlayCount(ctx context.Context) func(keys []int) ([]int, []error) {
return func(keys []int) (ret []int, errs []error) {
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
var err error
ret, err = m.Repository.Audio.GetManyViewCount(ctx, keys)
return err
})
return ret, toErrorSlice(err)
}
}
func (m Middleware) fetchAudiosOHistory(ctx context.Context) func(keys []int) ([][]time.Time, []error) {
return func(keys []int) (ret [][]time.Time, errs []error) {
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
var err error
ret, err = m.Repository.Audio.GetManyODates(ctx, keys)
return err
})
return ret, toErrorSlice(err)
}
}
func (m Middleware) fetchAudiosPlayHistory(ctx context.Context) func(keys []int) ([][]time.Time, []error) {
return func(keys []int) (ret [][]time.Time, errs []error) {
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
var err error
ret, err = m.Repository.Audio.GetManyViewDates(ctx, keys)
return err
})
return ret, toErrorSlice(err)
}
}
func (m Middleware) fetchAudiosLastPlayed(ctx context.Context) func(keys []int) ([]*time.Time, []error) {
return func(keys []int) (ret []*time.Time, errs []error) {
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
var err error
ret, err = m.Repository.Audio.GetManyLastViewed(ctx, keys)
return err
})
return ret, toErrorSlice(err)
}
}

View file

@ -75,6 +75,18 @@ func (f *VideoFile) Fingerprints() []models.Fingerprint {
return f.VideoFile.Fingerprints
}
type AudioFile struct {
*models.AudioFile
}
func (AudioFile) IsBaseFile() {}
func (AudioFile) IsVisualFile() {}
func (f *AudioFile) Fingerprints() []models.Fingerprint {
return f.AudioFile.Fingerprints
}
type ImageFile struct {
*models.ImageFile
}

View file

@ -35,6 +35,7 @@ type hookExecutor interface {
type Resolver struct {
repository models.Repository
sceneService manager.SceneService
audioService manager.AudioService
imageService manager.ImageService
galleryService manager.GalleryService
groupService manager.GroupService
@ -64,6 +65,9 @@ func (r *Resolver) Query() QueryResolver {
func (r *Resolver) Scene() SceneResolver {
return &sceneResolver{r}
}
func (r *Resolver) Audio() AudioResolver {
return &audioResolver{r}
}
func (r *Resolver) Image() ImageResolver {
return &imageResolver{r}
}
@ -93,6 +97,9 @@ func (r *Resolver) GalleryFile() GalleryFileResolver {
func (r *Resolver) VideoFile() VideoFileResolver {
return &videoFileResolver{r}
}
func (r *Resolver) AudioFile() AudioFileResolver {
return &audioFileResolver{r}
}
func (r *Resolver) ImageFile() ImageFileResolver {
return &imageFileResolver{r}
}
@ -121,6 +128,7 @@ type galleryChapterResolver struct{ *Resolver }
type performerResolver struct{ *Resolver }
type sceneResolver struct{ *Resolver }
type sceneMarkerResolver struct{ *Resolver }
type audioResolver struct{ *Resolver }
type imageResolver struct{ *Resolver }
type studioResolver struct{ *Resolver }
@ -131,6 +139,7 @@ type movieResolver struct{ *groupResolver }
type tagResolver struct{ *Resolver }
type galleryFileResolver struct{ *Resolver }
type videoFileResolver struct{ *Resolver }
type audioFileResolver struct{ *Resolver }
type imageFileResolver struct{ *Resolver }
type basicFileResolver struct{ *Resolver }
type folderResolver struct{ *Resolver }

View file

@ -13,22 +13,22 @@ import (
"github.com/stashapp/stash/pkg/models"
)
func convertVideoFile(f models.File) (*models.VideoFile, error) {
vf, ok := f.(*models.VideoFile)
func convertAudioFile(f models.File) (*models.AudioFile, error) {
vf, ok := f.(*models.AudioFile)
if !ok {
return nil, fmt.Errorf("file %T is not a video file", f)
}
return vf, nil
}
func (r *audioResolver) getPrimaryFile(ctx context.Context, obj *models.Audio) (*models.VideoFile, error) {
func (r *audioResolver) getPrimaryFile(ctx context.Context, obj *models.Audio) (*models.AudioFile, error) {
if obj.PrimaryFileID != nil {
f, err := loaders.From(ctx).FileByID.Load(*obj.PrimaryFileID)
if err != nil {
return nil, err
}
ret, err := convertVideoFile(f)
ret, err := convertAudioFile(f)
if err != nil {
return nil, err
}
@ -43,7 +43,7 @@ func (r *audioResolver) getPrimaryFile(ctx context.Context, obj *models.Audio) (
return nil, nil
}
func (r *audioResolver) getFiles(ctx context.Context, obj *models.Audio) ([]*models.VideoFile, error) {
func (r *audioResolver) getFiles(ctx context.Context, obj *models.Audio) ([]*models.AudioFile, error) {
fileIDs, err := loaders.From(ctx).AudioFiles.Load(obj.ID)
if err != nil {
return nil, err
@ -55,9 +55,9 @@ func (r *audioResolver) getFiles(ctx context.Context, obj *models.Audio) ([]*mod
return nil, err
}
ret := make([]*models.VideoFile, len(files))
ret := make([]*models.AudioFile, len(files))
for i, f := range files {
ret[i], err = convertVideoFile(f)
ret[i], err = convertAudioFile(f)
if err != nil {
return nil, err
}
@ -76,17 +76,17 @@ func (r *audioResolver) Date(ctx context.Context, obj *models.Audio) (*string, e
return nil, nil
}
func (r *audioResolver) Files(ctx context.Context, obj *models.Audio) ([]*VideoFile, error) {
func (r *audioResolver) Files(ctx context.Context, obj *models.Audio) ([]*AudioFile, error) {
files, err := r.getFiles(ctx, obj)
if err != nil {
return nil, err
}
ret := make([]*VideoFile, len(files))
ret := make([]*AudioFile, len(files))
for i, f := range files {
ret[i] = &VideoFile{
VideoFile: f,
ret[i] = &AudioFile{
AudioFile: f,
}
}
@ -113,37 +113,25 @@ func (r *audioResolver) Paths(ctx context.Context, obj *models.Audio) (*AudioPat
previewPath := builder.GetStreamPreviewURL()
streamPath := builder.GetStreamURL(config.GetAPIKey()).String()
webpPath := builder.GetStreamPreviewImageURL()
objHash := obj.GetHash(config.GetVideoFileNamingAlgorithm())
objHash := obj.GetHash(config.GetAudioFileNamingAlgorithm())
vttPath := builder.GetSpriteVTTURL(objHash)
spritePath := builder.GetSpriteURL(objHash)
funscriptPath := builder.GetFunscriptURL()
captionBasePath := builder.GetCaptionURL()
interactiveHeatmap := builder.GetInteractiveHeatmapURL()
return &AudioPathsType{
Screenshot: &screenshotPath,
Preview: &previewPath,
Stream: &streamPath,
Webp: &webpPath,
Vtt: &vttPath,
Sprite: &spritePath,
Funscript: &funscriptPath,
InteractiveHeatmap: &interactiveHeatmap,
Caption: &captionBasePath,
Screenshot: &screenshotPath,
Preview: &previewPath,
Stream: &streamPath,
Webp: &webpPath,
Vtt: &vttPath,
Sprite: &spritePath,
Funscript: &funscriptPath,
Caption: &captionBasePath,
}, nil
}
func (r *audioResolver) AudioMarkers(ctx context.Context, obj *models.Audio) (ret []*models.AudioMarker, err error) {
if err := r.withReadTxn(ctx, func(ctx context.Context) error {
ret, err = r.repository.AudioMarker.FindByAudioID(ctx, obj.ID)
return err
}); err != nil {
return nil, err
}
return ret, nil
}
// TODO(audio|AudioCaption): need to update IF AudioCaption required
func (r *audioResolver) Captions(ctx context.Context, obj *models.Audio) (ret []*models.VideoCaption, err error) {
primaryFile, err := r.getPrimaryFile(ctx, obj)
if err != nil {
@ -185,37 +173,6 @@ func (r *audioResolver) Studio(ctx context.Context, obj *models.Audio) (ret *mod
return loaders.From(ctx).StudioByID.Load(*obj.StudioID)
}
func (r *audioResolver) Movies(ctx context.Context, obj *models.Audio) (ret []*AudioMovie, err error) {
if !obj.Groups.Loaded() {
if err := r.withReadTxn(ctx, func(ctx context.Context) error {
qb := r.repository.Audio
return obj.LoadGroups(ctx, qb)
}); err != nil {
return nil, err
}
}
loader := loaders.From(ctx).GroupByID
for _, sm := range obj.Groups.List() {
movie, err := loader.Load(sm.GroupID)
if err != nil {
return nil, err
}
audioIdx := sm.AudioIndex
audioMovie := &AudioMovie{
Movie: movie,
AudioIndex: audioIdx,
}
ret = append(ret, audioMovie)
}
return ret, nil
}
func (r *audioResolver) Groups(ctx context.Context, obj *models.Audio) (ret []*AudioGroup, err error) {
if !obj.Groups.Loaded() {
if err := r.withReadTxn(ctx, func(ctx context.Context) error {
@ -275,16 +232,6 @@ func (r *audioResolver) Performers(ctx context.Context, obj *models.Audio) (ret
return ret, firstError(errs)
}
func (r *audioResolver) StashIds(ctx context.Context, obj *models.Audio) (ret []*models.StashID, err error) {
if err := r.withReadTxn(ctx, func(ctx context.Context) error {
return obj.LoadStashIDs(ctx, r.repository.Audio)
}); err != nil {
return nil, err
}
return stashIDsSliceToPtrSlice(obj.StashIDs.List()), nil
}
func (r *audioResolver) AudioStreams(ctx context.Context, obj *models.Audio) ([]*manager.AudioStreamEndpoint, error) {
// load the primary file into the audio
_, err := r.getPrimaryFile(ctx, obj)
@ -301,30 +248,6 @@ func (r *audioResolver) AudioStreams(ctx context.Context, obj *models.Audio) ([]
return manager.GetAudioStreamPaths(obj, builder.GetStreamURL(apiKey), config.GetMaxStreamingTranscodeSize())
}
func (r *audioResolver) Interactive(ctx context.Context, obj *models.Audio) (bool, error) {
primaryFile, err := r.getPrimaryFile(ctx, obj)
if err != nil {
return false, err
}
if primaryFile == nil {
return false, nil
}
return primaryFile.Interactive, nil
}
func (r *audioResolver) InteractiveSpeed(ctx context.Context, obj *models.Audio) (*int, error) {
primaryFile, err := r.getPrimaryFile(ctx, obj)
if err != nil {
return nil, err
}
if primaryFile == nil {
return nil, nil
}
return primaryFile.InteractiveSpeed, nil
}
func (r *audioResolver) URL(ctx context.Context, obj *models.Audio) (*string, error) {
if !obj.URLs.Loaded() {
if err := r.withReadTxn(ctx, func(ctx context.Context) error {

View file

@ -28,6 +28,10 @@ func (r *videoFileResolver) Fingerprint(ctx context.Context, obj *VideoFile, typ
return fingerprintResolver(obj.VideoFile.Fingerprints, type_)
}
func (r *audioFileResolver) Fingerprint(ctx context.Context, obj *AudioFile, type_ string) (*string, error) {
return fingerprintResolver(obj.AudioFile.Fingerprints, type_)
}
func (r *basicFileResolver) Fingerprint(ctx context.Context, obj *BasicFile, type_ string) (*string, error) {
return fingerprintResolver(obj.BaseFile.Fingerprints, type_)
}
@ -43,6 +47,9 @@ func (r *imageFileResolver) ParentFolder(ctx context.Context, obj *ImageFile) (*
func (r *videoFileResolver) ParentFolder(ctx context.Context, obj *VideoFile) (*models.Folder, error) {
return loaders.From(ctx).FolderByID.Load(obj.ParentFolderID)
}
func (r *audioFileResolver) ParentFolder(ctx context.Context, obj *AudioFile) (*models.Folder, error) {
return loaders.From(ctx).FolderByID.Load(obj.ParentFolderID)
}
func (r *basicFileResolver) ParentFolder(ctx context.Context, obj *BasicFile) (*models.Folder, error) {
return loaders.From(ctx).FolderByID.Load(obj.ParentFolderID)
@ -74,6 +81,9 @@ func (r *imageFileResolver) ZipFile(ctx context.Context, obj *ImageFile) (*Basic
func (r *videoFileResolver) ZipFile(ctx context.Context, obj *VideoFile) (*BasicFile, error) {
return zipFileResolver(ctx, obj.ZipFileID)
}
func (r *audioFileResolver) ZipFile(ctx context.Context, obj *AudioFile) (*BasicFile, error) {
return zipFileResolver(ctx, obj.ZipFileID)
}
func (r *basicFileResolver) ZipFile(ctx context.Context, obj *BasicFile) (*BasicFile, error) {
return zipFileResolver(ctx, obj.ZipFileID)

View file

@ -11,12 +11,12 @@ import (
"time"
"github.com/stashapp/stash/internal/manager"
"github.com/stashapp/stash/pkg/audio"
"github.com/stashapp/stash/pkg/file"
"github.com/stashapp/stash/pkg/logger"
"github.com/stashapp/stash/pkg/models"
"github.com/stashapp/stash/pkg/plugin"
"github.com/stashapp/stash/pkg/plugin/hook"
"github.com/stashapp/stash/pkg/audio"
"github.com/stashapp/stash/pkg/sliceutil"
"github.com/stashapp/stash/pkg/sliceutil/stringslice"
"github.com/stashapp/stash/pkg/utils"
@ -50,10 +50,8 @@ func (r *mutationResolver) AudioCreate(ctx context.Context, input models.AudioCr
newAudio.Title = translator.string(input.Title)
newAudio.Code = translator.string(input.Code)
newAudio.Details = translator.string(input.Details)
newAudio.Director = translator.string(input.Director)
newAudio.Rating = input.Rating100
newAudio.Organized = translator.bool(input.Organized)
newAudio.StashIDs = models.NewRelatedStashIDs(models.StashIDInputs(input.StashIds).ToStashIDs())
newAudio.Date, err = translator.datePtr(input.Date)
if err != nil {
@ -78,22 +76,12 @@ func (r *mutationResolver) AudioCreate(ctx context.Context, input models.AudioCr
if err != nil {
return nil, fmt.Errorf("converting tag ids: %w", err)
}
newAudio.GalleryIDs, err = translator.relatedIds(input.GalleryIds)
if err != nil {
return nil, fmt.Errorf("converting gallery ids: %w", err)
}
// prefer groups over movies
if len(input.Groups) > 0 {
newAudio.Groups, err = translator.relatedGroups(input.Groups)
newAudio.Groups, err = translator.relatedGroupsAudio(input.Groups)
if err != nil {
return nil, fmt.Errorf("converting groups: %w", err)
}
} else if len(input.Movies) > 0 {
newAudio.Groups, err = translator.relatedGroupsFromMovies(input.Movies)
if err != nil {
return nil, fmt.Errorf("converting movies: %w", err)
}
}
var coverImageData []byte
@ -188,7 +176,6 @@ func audioPartialFromInput(input models.AudioUpdateInput, translator changesetTr
updatedAudio.Title = translator.optionalString(input.Title, "title")
updatedAudio.Code = translator.optionalString(input.Code, "code")
updatedAudio.Details = translator.optionalString(input.Details, "details")
updatedAudio.Director = translator.optionalString(input.Director, "director")
updatedAudio.Rating = translator.optionalInt(input.Rating100, "rating100")
if input.OCounter != nil {
@ -201,7 +188,6 @@ func audioPartialFromInput(input models.AudioUpdateInput, translator changesetTr
updatedAudio.PlayDuration = translator.optionalFloat64(input.PlayDuration, "play_duration")
updatedAudio.Organized = translator.optionalBool(input.Organized, "organized")
updatedAudio.StashIDs = translator.updateStashIDs(input.StashIds, "stash_ids")
var err error
@ -229,21 +215,12 @@ func audioPartialFromInput(input models.AudioUpdateInput, translator changesetTr
if err != nil {
return nil, fmt.Errorf("converting tag ids: %w", err)
}
updatedAudio.GalleryIDs, err = translator.updateIds(input.GalleryIds, "gallery_ids")
if err != nil {
return nil, fmt.Errorf("converting gallery ids: %w", err)
}
if translator.hasField("groups") {
updatedAudio.GroupIDs, err = translator.updateGroupIDs(input.Groups, "groups")
updatedAudio.GroupIDs, err = translator.updateGroupIDsAudio(input.Groups, "groups")
if err != nil {
return nil, fmt.Errorf("converting groups: %w", err)
}
} else if translator.hasField("movies") {
updatedAudio.GroupIDs, err = translator.updateGroupIDsFromMovies(input.Movies, "movies")
if err != nil {
return nil, fmt.Errorf("converting movies: %w", err)
}
}
return &updatedAudio, nil
@ -293,7 +270,7 @@ func (r *mutationResolver) audioUpdate(ctx context.Context, input models.AudioUp
}
// ensure that new primary file is associated with audio
var f *models.VideoFile
var f *models.AudioFile
for _, ff := range originalAudio.Files.List() {
if ff.ID == newPrimaryFileID {
f = ff
@ -371,7 +348,6 @@ func (r *mutationResolver) BulkAudioUpdate(ctx context.Context, input BulkAudioU
updatedAudio.Title = translator.optionalString(input.Title, "title")
updatedAudio.Code = translator.optionalString(input.Code, "code")
updatedAudio.Details = translator.optionalString(input.Details, "details")
updatedAudio.Director = translator.optionalString(input.Director, "director")
updatedAudio.Rating = translator.optionalInt(input.Rating100, "rating100")
updatedAudio.Organized = translator.optionalBool(input.Organized, "organized")
@ -394,21 +370,12 @@ func (r *mutationResolver) BulkAudioUpdate(ctx context.Context, input BulkAudioU
if err != nil {
return nil, fmt.Errorf("converting tag ids: %w", err)
}
updatedAudio.GalleryIDs, err = translator.updateIdsBulk(input.GalleryIds, "gallery_ids")
if err != nil {
return nil, fmt.Errorf("converting gallery ids: %w", err)
}
if translator.hasField("group_ids") {
updatedAudio.GroupIDs, err = translator.updateGroupIDsBulk(input.GroupIds, "group_ids")
updatedAudio.GroupIDs, err = translator.updateGroupIDsBulkAudio(input.GroupIds, "group_ids")
if err != nil {
return nil, fmt.Errorf("converting group ids: %w", err)
}
} else if translator.hasField("movie_ids") {
updatedAudio.GroupIDs, err = translator.updateGroupIDsBulk(input.MovieIds, "movie_ids")
if err != nil {
return nil, fmt.Errorf("converting movie ids: %w", err)
}
}
var customFields *models.CustomFieldsInput
@ -465,7 +432,7 @@ func (r *mutationResolver) AudioDestroy(ctx context.Context, input models.AudioD
return false, fmt.Errorf("converting id: %w", err)
}
fileNamingAlgo := manager.GetInstance().Config.GetVideoFileNamingAlgorithm()
fileNamingAlgo := manager.GetInstance().Config.GetAudioFileNamingAlgorithm()
trashPath := manager.GetInstance().Config.GetDeleteTrashPath()
var s *models.Audio
@ -492,7 +459,7 @@ func (r *mutationResolver) AudioDestroy(ctx context.Context, input models.AudioD
}
// kill any running encoders
manager.KillRunningStreams(s, fileNamingAlgo)
manager.KillRunningStreamsAudio(s, fileNamingAlgo)
return r.audioService.Destroy(ctx, s, fileDeleter, deleteGenerated, deleteFile, destroyFileEntry)
}); err != nil {
@ -521,7 +488,7 @@ func (r *mutationResolver) AudiosDestroy(ctx context.Context, input models.Audio
}
var audios []*models.Audio
fileNamingAlgo := manager.GetInstance().Config.GetVideoFileNamingAlgorithm()
fileNamingAlgo := manager.GetInstance().Config.GetAudioFileNamingAlgorithm()
trashPath := manager.GetInstance().Config.GetDeleteTrashPath()
fileDeleter := &audio.FileDeleter{
@ -549,7 +516,7 @@ func (r *mutationResolver) AudiosDestroy(ctx context.Context, input models.Audio
audios = append(audios, audio)
// kill any running encoders
manager.KillRunningStreams(audio, fileNamingAlgo)
manager.KillRunningStreamsAudio(audio, fileNamingAlgo)
if err := r.audioService.Destroy(ctx, audio, fileDeleter, deleteGenerated, deleteFile, destroyFileEntry); err != nil {
return err
@ -644,7 +611,7 @@ func (r *mutationResolver) AudioMerge(ctx context.Context, input AudioMergeInput
trashPath := mgr.Config.GetDeleteTrashPath()
fileDeleter := &audio.FileDeleter{
Deleter: file.NewDeleterWithTrash(trashPath),
FileNamingAlgo: mgr.Config.GetVideoFileNamingAlgorithm(),
FileNamingAlgo: mgr.Config.GetAudioFileNamingAlgorithm(),
Paths: mgr.Paths,
}
@ -687,386 +654,6 @@ func (r *mutationResolver) AudioMerge(ctx context.Context, input AudioMergeInput
return ret, nil
}
func (r *mutationResolver) getAudioMarker(ctx context.Context, id int) (ret *models.AudioMarker, err error) {
if err := r.withTxn(ctx, func(ctx context.Context) error {
ret, err = r.repository.AudioMarker.Find(ctx, id)
return err
}); err != nil {
return nil, err
}
return ret, nil
}
func (r *mutationResolver) AudioMarkerCreate(ctx context.Context, input AudioMarkerCreateInput) (*models.AudioMarker, error) {
audioID, err := strconv.Atoi(input.AudioID)
if err != nil {
return nil, fmt.Errorf("converting audio id: %w", err)
}
primaryTagID, err := strconv.Atoi(input.PrimaryTagID)
if err != nil {
return nil, fmt.Errorf("converting primary tag id: %w", err)
}
// Populate a new audio marker from the input
newMarker := models.NewAudioMarker()
newMarker.Title = strings.TrimSpace(input.Title)
newMarker.Seconds = input.Seconds
newMarker.PrimaryTagID = primaryTagID
newMarker.AudioID = audioID
if input.EndSeconds != nil {
if err := validateAudioMarkerEndSeconds(newMarker.Seconds, *input.EndSeconds); err != nil {
return nil, err
}
newMarker.EndSeconds = input.EndSeconds
}
tagIDs, err := stringslice.StringSliceToIntSlice(input.TagIds)
if err != nil {
return nil, fmt.Errorf("converting tag ids: %w", err)
}
if err := r.withTxn(ctx, func(ctx context.Context) error {
qb := r.repository.AudioMarker
err := qb.Create(ctx, &newMarker)
if err != nil {
return err
}
// Save the marker tags
// If this tag is the primary tag, then let's not add it.
tagIDs = sliceutil.Exclude(tagIDs, []int{newMarker.PrimaryTagID})
return qb.UpdateTags(ctx, newMarker.ID, tagIDs)
}); err != nil {
return nil, err
}
r.hookExecutor.ExecutePostHooks(ctx, newMarker.ID, hook.AudioMarkerCreatePost, input, nil)
return r.getAudioMarker(ctx, newMarker.ID)
}
func validateAudioMarkerEndSeconds(seconds, endSeconds float64) error {
if endSeconds < seconds {
return fmt.Errorf("end_seconds (%f) must be greater than or equal to seconds (%f)", endSeconds, seconds)
}
return nil
}
func float64OrZero(f *float64) float64 {
if f == nil {
return 0
}
return *f
}
func (r *mutationResolver) AudioMarkerUpdate(ctx context.Context, input AudioMarkerUpdateInput) (*models.AudioMarker, error) {
markerID, err := strconv.Atoi(input.ID)
if err != nil {
return nil, fmt.Errorf("converting id: %w", err)
}
translator := changesetTranslator{
inputMap: getUpdateInputMap(ctx),
}
// Populate audio marker from the input
updatedMarker := models.NewAudioMarkerPartial()
updatedMarker.Title = translator.optionalString(input.Title, "title")
updatedMarker.Seconds = translator.optionalFloat64(input.Seconds, "seconds")
updatedMarker.EndSeconds = translator.optionalFloat64(input.EndSeconds, "end_seconds")
updatedMarker.AudioID, err = translator.optionalIntFromString(input.AudioID, "audio_id")
if err != nil {
return nil, fmt.Errorf("converting audio id: %w", err)
}
updatedMarker.PrimaryTagID, err = translator.optionalIntFromString(input.PrimaryTagID, "primary_tag_id")
if err != nil {
return nil, fmt.Errorf("converting primary tag id: %w", err)
}
var tagIDs []int
tagIdsIncluded := translator.hasField("tag_ids")
if input.TagIds != nil {
tagIDs, err = stringslice.StringSliceToIntSlice(input.TagIds)
if err != nil {
return nil, fmt.Errorf("converting tag ids: %w", err)
}
}
mgr := manager.GetInstance()
trashPath := mgr.Config.GetDeleteTrashPath()
fileDeleter := &audio.FileDeleter{
Deleter: file.NewDeleterWithTrash(trashPath),
FileNamingAlgo: mgr.Config.GetVideoFileNamingAlgorithm(),
Paths: mgr.Paths,
}
// Start the transaction and save the audio marker
if err := r.withTxn(ctx, func(ctx context.Context) error {
qb := r.repository.AudioMarker
sqb := r.repository.Audio
// check to see if timestamp was changed
existingMarker, err := qb.Find(ctx, markerID)
if err != nil {
return err
}
if existingMarker == nil {
return fmt.Errorf("audio marker with id %d not found", markerID)
}
// Validate end_seconds
shouldValidateEndSeconds := (updatedMarker.Seconds.Set || updatedMarker.EndSeconds.Set) && !updatedMarker.EndSeconds.Null
if shouldValidateEndSeconds {
seconds := existingMarker.Seconds
if updatedMarker.Seconds.Set {
seconds = updatedMarker.Seconds.Value
}
endSeconds := existingMarker.EndSeconds
if updatedMarker.EndSeconds.Set {
endSeconds = &updatedMarker.EndSeconds.Value
}
if endSeconds != nil {
if err := validateAudioMarkerEndSeconds(seconds, *endSeconds); err != nil {
return err
}
}
}
newMarker, err := qb.UpdatePartial(ctx, markerID, updatedMarker)
if err != nil {
return err
}
existingAudio, err := sqb.Find(ctx, existingMarker.AudioID)
if err != nil {
return err
}
if existingAudio == nil {
return fmt.Errorf("audio with id %d not found", existingMarker.AudioID)
}
// remove the marker preview if the audio changed or if the timestamp was changed
if existingMarker.AudioID != newMarker.AudioID || existingMarker.Seconds != newMarker.Seconds || float64OrZero(existingMarker.EndSeconds) != float64OrZero(newMarker.EndSeconds) {
seconds := int(existingMarker.Seconds)
if err := fileDeleter.MarkMarkerFiles(existingAudio, seconds); err != nil {
return err
}
}
if tagIdsIncluded {
// Save the marker tags
// If this tag is the primary tag, then let's not add it.
tagIDs = sliceutil.Exclude(tagIDs, []int{newMarker.PrimaryTagID})
if err := qb.UpdateTags(ctx, markerID, tagIDs); err != nil {
return err
}
}
return nil
}); err != nil {
fileDeleter.Rollback()
return nil, err
}
// perform the post-commit actions
fileDeleter.Commit()
r.hookExecutor.ExecutePostHooks(ctx, markerID, hook.AudioMarkerUpdatePost, input, translator.getFields())
return r.getAudioMarker(ctx, markerID)
}
func (r *mutationResolver) BulkAudioMarkerUpdate(ctx context.Context, input BulkAudioMarkerUpdateInput) ([]*models.AudioMarker, error) {
ids, err := stringslice.StringSliceToIntSlice(input.Ids)
if err != nil {
return nil, fmt.Errorf("converting ids: %w", err)
}
translator := changesetTranslator{
inputMap: getUpdateInputMap(ctx),
}
// Populate performer from the input
partial := models.NewAudioMarkerPartial()
partial.Title = translator.optionalString(input.Title, "title")
partial.PrimaryTagID, err = translator.optionalIntFromString(input.PrimaryTagID, "primary_tag_id")
if err != nil {
return nil, fmt.Errorf("converting primary tag id: %w", err)
}
partial.TagIDs, err = translator.updateIdsBulk(input.TagIds, "tag_ids")
if err != nil {
return nil, fmt.Errorf("converting tag ids: %w", err)
}
ret := []*models.AudioMarker{}
// Start the transaction and save the performers
if err := r.withTxn(ctx, func(ctx context.Context) error {
qb := r.repository.AudioMarker
for _, id := range ids {
l := partial
if err := adjustMarkerPartialForTagExclusion(ctx, r.repository.AudioMarker, id, &l); err != nil {
return err
}
updated, err := qb.UpdatePartial(ctx, id, l)
if err != nil {
return err
}
ret = append(ret, updated)
}
return nil
}); err != nil {
return nil, err
}
// execute post hooks outside of txn
var newRet []*models.AudioMarker
for _, m := range ret {
r.hookExecutor.ExecutePostHooks(ctx, m.ID, hook.AudioMarkerUpdatePost, input, translator.getFields())
m, err = r.getAudioMarker(ctx, m.ID)
if err != nil {
return nil, err
}
newRet = append(newRet, m)
}
return newRet, nil
}
// adjustMarkerPartialForTagExclusion adjusts the AudioMarkerPartial to exclude the primary tag from tag updates.
func adjustMarkerPartialForTagExclusion(ctx context.Context, r models.AudioMarkerReader, id int, partial *models.AudioMarkerPartial) error {
if partial.TagIDs == nil && !partial.PrimaryTagID.Set {
return nil
}
// exclude primary tag from tag updates
var primaryTagID int
if partial.PrimaryTagID.Set {
primaryTagID = partial.PrimaryTagID.Value
} else {
existing, err := r.Find(ctx, id)
if err != nil {
return fmt.Errorf("finding existing primary tag id: %w", err)
}
primaryTagID = existing.PrimaryTagID
}
existingTagIDs, err := r.GetTagIDs(ctx, id)
if err != nil {
return fmt.Errorf("getting existing tag ids: %w", err)
}
tagIDAttr := partial.TagIDs
if tagIDAttr == nil {
tagIDAttr = &models.UpdateIDs{
IDs: existingTagIDs,
Mode: models.RelationshipUpdateModeSet,
}
}
newTagIDs := tagIDAttr.Apply(existingTagIDs)
// Remove primary tag from newTagIDs if present
newTagIDs = sliceutil.Exclude(newTagIDs, []int{primaryTagID})
if len(existingTagIDs) != len(newTagIDs) {
partial.TagIDs = &models.UpdateIDs{
IDs: newTagIDs,
Mode: models.RelationshipUpdateModeSet,
}
} else {
// no change to tags required
partial.TagIDs = nil
}
return nil
}
func (r *mutationResolver) AudioMarkerDestroy(ctx context.Context, id string) (bool, error) {
return r.AudioMarkersDestroy(ctx, []string{id})
}
func (r *mutationResolver) AudioMarkersDestroy(ctx context.Context, markerIDs []string) (bool, error) {
ids, err := stringslice.StringSliceToIntSlice(markerIDs)
if err != nil {
return false, fmt.Errorf("converting ids: %w", err)
}
var markers []*models.AudioMarker
fileNamingAlgo := manager.GetInstance().Config.GetVideoFileNamingAlgorithm()
trashPath := manager.GetInstance().Config.GetDeleteTrashPath()
fileDeleter := &audio.FileDeleter{
Deleter: file.NewDeleterWithTrash(trashPath),
FileNamingAlgo: fileNamingAlgo,
Paths: manager.GetInstance().Paths,
}
if err := r.withTxn(ctx, func(ctx context.Context) error {
qb := r.repository.AudioMarker
sqb := r.repository.Audio
for _, markerID := range ids {
marker, err := qb.Find(ctx, markerID)
if err != nil {
return err
}
if marker == nil {
return fmt.Errorf("audio marker with id %d not found", markerID)
}
s, err := sqb.Find(ctx, marker.AudioID)
if err != nil {
return err
}
if s == nil {
return fmt.Errorf("audio with id %d not found", marker.AudioID)
}
markers = append(markers, marker)
if err := audio.DestroyMarker(ctx, s, marker, qb, fileDeleter); err != nil {
return err
}
}
return nil
}); err != nil {
fileDeleter.Rollback()
return false, err
}
fileDeleter.Commit()
for _, marker := range markers {
r.hookExecutor.ExecutePostHooks(ctx, marker.ID, hook.AudioMarkerDestroyPost, markerIDs, nil)
}
return true, nil
}
func (r *mutationResolver) AudioSaveActivity(ctx context.Context, id string, resumeTime *float64, playDuration *float64) (ret bool, err error) {
audioID, err := strconv.Atoi(id)
if err != nil {

View file

@ -0,0 +1,45 @@
package api
import (
"context"
"fmt"
"strconv"
"github.com/stashapp/stash/internal/api/urlbuilders"
"github.com/stashapp/stash/internal/manager"
"github.com/stashapp/stash/pkg/models"
)
func (r *queryResolver) AudioStreams(ctx context.Context, id *string) ([]*manager.AudioStreamEndpoint, error) {
audioID, err := strconv.Atoi(*id)
if err != nil {
return nil, err
}
// find the audio
var audio *models.Audio
if err := r.withReadTxn(ctx, func(ctx context.Context) error {
var err error
audio, err = r.repository.Audio.Find(ctx, audioID)
if audio != nil {
err = audio.LoadPrimaryFile(ctx, r.repository.File)
}
return err
}); err != nil {
return nil, err
}
if audio == nil {
return nil, fmt.Errorf("audio with id %d not found", audioID)
}
config := manager.GetInstance().Config
baseURL, _ := ctx.Value(BaseURLCtxKey).(string)
builder := urlbuilders.NewAudioURLBuilder(baseURL, audio)
apiKey := config.GetAPIKey()
return manager.GetAudioStreamPaths(audio, builder.GetStreamURL(apiKey), config.GetMaxStreamingTranscodeSize())
}

View file

@ -159,12 +159,14 @@ func Initialize() (*Server, error) {
pluginCache := mgr.PluginCache
sceneService := mgr.SceneService
audioService := mgr.AudioService
imageService := mgr.ImageService
galleryService := mgr.GalleryService
groupService := mgr.GroupService
resolver := &Resolver{
repository: repo,
sceneService: sceneService,
audioService: audioService,
imageService: imageService,
galleryService: galleryService,
groupService: groupService,

View file

@ -0,0 +1,68 @@
// TODO(audio): updaqte this file
package urlbuilders
import (
"fmt"
"net/url"
"strconv"
"github.com/stashapp/stash/pkg/models"
)
type AudioURLBuilder struct {
BaseURL string
AudioID string
UpdatedAt string
}
func NewAudioURLBuilder(baseURL string, audio *models.Audio) AudioURLBuilder {
return AudioURLBuilder{
BaseURL: baseURL,
AudioID: strconv.Itoa(audio.ID),
UpdatedAt: strconv.FormatInt(audio.UpdatedAt.Unix(), 10),
}
}
func (b AudioURLBuilder) GetStreamURL(apiKey string) *url.URL {
u, err := url.Parse(fmt.Sprintf("%s/audio/%s/stream", b.BaseURL, b.AudioID))
if err != nil {
// shouldn't happen
panic(err)
}
if apiKey != "" {
v := u.Query()
v.Set("apikey", apiKey)
u.RawQuery = v.Encode()
}
return u
}
func (b AudioURLBuilder) GetStreamPreviewURL() string {
return b.BaseURL + "/audio/" + b.AudioID + "/preview"
}
func (b AudioURLBuilder) GetStreamPreviewImageURL() string {
return b.BaseURL + "/audio/" + b.AudioID + "/webp"
}
func (b AudioURLBuilder) GetSpriteVTTURL(checksum string) string {
return b.BaseURL + "/audio/" + checksum + "_thumbs.vtt"
}
func (b AudioURLBuilder) GetSpriteURL(checksum string) string {
return b.BaseURL + "/audio/" + checksum + "_sprite.jpg"
}
func (b AudioURLBuilder) GetScreenshotURL() string {
return b.BaseURL + "/audio/" + b.AudioID + "/screenshot?t=" + b.UpdatedAt
}
func (b AudioURLBuilder) GetFunscriptURL() string {
return b.BaseURL + "/audio/" + b.AudioID + "/funscript"
}
func (b AudioURLBuilder) GetCaptionURL() string {
return b.BaseURL + "/audio/" + b.AudioID + "/caption"
}

View file

@ -1,3 +1,4 @@
// TODO(audio): update this file
// Package identify provides the scene identification functionality for the application.
// The identify functionality uses scene scrapers to identify a given scene and
// set its metadata based on the scraped data.

213
internal/manager/audio.go Normal file
View file

@ -0,0 +1,213 @@
// TODO(audio): update this file
package manager
import (
"fmt"
"net/url"
"github.com/stashapp/stash/internal/manager/config"
"github.com/stashapp/stash/pkg/ffmpeg"
"github.com/stashapp/stash/pkg/fsutil"
"github.com/stashapp/stash/pkg/models"
)
type AudioStreamEndpoint struct {
URL string `json:"url"`
MimeType *string `json:"mime_type"`
Label *string `json:"label"`
}
var (
// TODO(audio): figure out what stream types we need, and what we can support
directAudioEndpointType = endpointType{
label: "Direct stream",
mimeType: ffmpeg.MimeMp4Audio,
extension: "",
}
mp3AudioEndpointType = endpointType{
label: "MP3",
mimeType: ffmpeg.MimeMp3Audio,
extension: ".mp3",
}
)
func GetAudioFileContainer(file *models.AudioFile) (ffmpeg.Container, error) {
var container ffmpeg.Container
format := file.Format
if format != "" {
container = ffmpeg.Container(format)
} else { // container isn't in the DB
// shouldn't happen, fallback to ffprobe
ffprobe := GetInstance().FFProbe
tmpAudioFile, err := ffprobe.NewAudioFile(file.Path)
if err != nil {
return ffmpeg.Container(""), fmt.Errorf("error reading video file: %v", err)
}
return ffmpeg.MatchContainer(tmpAudioFile.Container, file.Path)
}
return container, nil
}
func GetAudioStreamPaths(audio *models.Audio, directStreamURL *url.URL, maxStreamingTranscodeSize models.StreamingResolutionEnum) ([]*AudioStreamEndpoint, error) {
if audio == nil {
return nil, fmt.Errorf("nil audio")
}
pf := audio.Files.Primary()
if pf == nil {
return nil, nil
}
// convert StreamingResolutionEnum to ResolutionEnum
maxStreamingResolution := models.ResolutionEnum(maxStreamingTranscodeSize)
audioResolution := models.GetMinResolution(pf)
includeAudioStreamPath := func(streamingResolution models.StreamingResolutionEnum) bool {
var minResolution int
if streamingResolution == models.StreamingResolutionEnumOriginal {
minResolution = audioResolution
} else {
// convert StreamingResolutionEnum to ResolutionEnum so we can get the min
// resolution
convertedRes := models.ResolutionEnum(streamingResolution)
minResolution = convertedRes.GetMinResolution()
// don't include if audio resolution is smaller than the streamingResolution
if audioResolution != 0 && audioResolution < minResolution {
return false
}
}
// if we always allow everything, then return true
if maxStreamingTranscodeSize == models.StreamingResolutionEnumOriginal {
return true
}
return maxStreamingResolution.GetMinResolution() >= minResolution
}
makeStreamEndpoint := func(t endpointType, resolution models.StreamingResolutionEnum) *AudioStreamEndpoint {
url := *directStreamURL
url.Path += t.extension
label := t.label
if resolution != "" {
v := url.Query()
v.Set("resolution", resolution.String())
url.RawQuery = v.Encode()
switch resolution {
case models.StreamingResolutionEnumFourK:
label += " 4K (2160p)"
case models.StreamingResolutionEnumFullHd:
label += " Full HD (1080p)"
case models.StreamingResolutionEnumStandardHd:
label += " HD (720p)"
case models.StreamingResolutionEnumStandard:
label += " Standard (480p)"
case models.StreamingResolutionEnumLow:
label += " Low (240p)"
}
}
return &AudioStreamEndpoint{
URL: url.String(),
MimeType: &t.mimeType,
Label: &label,
}
}
var endpoints []*AudioStreamEndpoint
// direct stream should only apply when the audio codec is supported
audioCodec := ffmpeg.MissingUnsupported
if pf.AudioCodec != "" {
audioCodec = ffmpeg.ProbeAudioCodec(pf.AudioCodec)
}
// don't care if we can't get the container
container, _ := GetAudioFileContainer(pf)
if HasAudioTranscode(audio, config.GetInstance().GetAudioFileNamingAlgorithm()) || ffmpeg.IsValidAudioForContainer(audioCodec, container) {
endpoints = append(endpoints, makeStreamEndpoint(directAudioEndpointType, ""))
}
// only add mkv stream endpoint if the audio container is an mkv already
if container == ffmpeg.Matroska {
endpoints = append(endpoints, makeStreamEndpoint(mkvAudioEndpointType, ""))
}
mp4Streams := []*AudioStreamEndpoint{}
webmStreams := []*AudioStreamEndpoint{}
hlsStreams := []*AudioStreamEndpoint{}
dashStreams := []*AudioStreamEndpoint{}
if includeAudioStreamPath(models.StreamingResolutionEnumOriginal) {
mp4Streams = append(mp4Streams, makeStreamEndpoint(mp3AudioEndpointType, models.StreamingResolutionEnumOriginal))
webmStreams = append(webmStreams, makeStreamEndpoint(webmAudioEndpointType, models.StreamingResolutionEnumOriginal))
hlsStreams = append(hlsStreams, makeStreamEndpoint(hlsAudioEndpointType, models.StreamingResolutionEnumOriginal))
dashStreams = append(dashStreams, makeStreamEndpoint(dashAudioEndpointType, models.StreamingResolutionEnumOriginal))
}
if includeAudioStreamPath(models.StreamingResolutionEnumFourK) {
mp4Streams = append(mp4Streams, makeStreamEndpoint(mp3AudioEndpointType, models.StreamingResolutionEnumFourK))
webmStreams = append(webmStreams, makeStreamEndpoint(webmAudioEndpointType, models.StreamingResolutionEnumFourK))
hlsStreams = append(hlsStreams, makeStreamEndpoint(hlsAudioEndpointType, models.StreamingResolutionEnumFourK))
dashStreams = append(dashStreams, makeStreamEndpoint(dashAudioEndpointType, models.StreamingResolutionEnumFourK))
}
if includeAudioStreamPath(models.StreamingResolutionEnumFullHd) {
mp4Streams = append(mp4Streams, makeStreamEndpoint(mp3AudioEndpointType, models.StreamingResolutionEnumFullHd))
webmStreams = append(webmStreams, makeStreamEndpoint(webmAudioEndpointType, models.StreamingResolutionEnumFullHd))
hlsStreams = append(hlsStreams, makeStreamEndpoint(hlsAudioEndpointType, models.StreamingResolutionEnumFullHd))
dashStreams = append(dashStreams, makeStreamEndpoint(dashAudioEndpointType, models.StreamingResolutionEnumFullHd))
}
if includeAudioStreamPath(models.StreamingResolutionEnumStandardHd) {
mp4Streams = append(mp4Streams, makeStreamEndpoint(mp3AudioEndpointType, models.StreamingResolutionEnumStandardHd))
webmStreams = append(webmStreams, makeStreamEndpoint(webmAudioEndpointType, models.StreamingResolutionEnumStandardHd))
hlsStreams = append(hlsStreams, makeStreamEndpoint(hlsAudioEndpointType, models.StreamingResolutionEnumStandardHd))
dashStreams = append(dashStreams, makeStreamEndpoint(dashAudioEndpointType, models.StreamingResolutionEnumStandardHd))
}
if includeAudioStreamPath(models.StreamingResolutionEnumStandard) {
mp4Streams = append(mp4Streams, makeStreamEndpoint(mp3AudioEndpointType, models.StreamingResolutionEnumStandard))
webmStreams = append(webmStreams, makeStreamEndpoint(webmAudioEndpointType, models.StreamingResolutionEnumStandard))
hlsStreams = append(hlsStreams, makeStreamEndpoint(hlsAudioEndpointType, models.StreamingResolutionEnumStandard))
dashStreams = append(dashStreams, makeStreamEndpoint(dashAudioEndpointType, models.StreamingResolutionEnumStandard))
}
if includeAudioStreamPath(models.StreamingResolutionEnumLow) {
mp4Streams = append(mp4Streams, makeStreamEndpoint(mp3AudioEndpointType, models.StreamingResolutionEnumLow))
webmStreams = append(webmStreams, makeStreamEndpoint(webmAudioEndpointType, models.StreamingResolutionEnumLow))
hlsStreams = append(hlsStreams, makeStreamEndpoint(hlsAudioEndpointType, models.StreamingResolutionEnumLow))
dashStreams = append(dashStreams, makeStreamEndpoint(dashAudioEndpointType, models.StreamingResolutionEnumLow))
}
endpoints = append(endpoints, mp4Streams...)
endpoints = append(endpoints, webmStreams...)
endpoints = append(endpoints, hlsStreams...)
endpoints = append(endpoints, dashStreams...)
return endpoints, nil
}
// HasAudioTranscode returns true if a transcoded video exists for the provided
// audio. It will check using the OSHash of the audio first, then fall back
// to the checksum.
func HasAudioTranscode(audio *models.Audio, fileNamingAlgo models.HashAlgorithm) bool {
if audio == nil {
return false
}
audioHash := audio.GetHash(fileNamingAlgo)
if audioHash == "" {
return false
}
transcodePath := instance.Paths.Audio.GetTranscodePath(audioHash)
ret, _ := fsutil.FileExists(transcodePath)
return ret
}

View file

@ -836,6 +836,20 @@ func (i *Config) GetVideoFileNamingAlgorithm() models.HashAlgorithm {
return models.HashAlgorithm(ret)
}
// GetAudioFileNamingAlgorithm returns what hash algorithm should be used for
// naming generated audio files.
func (i *Config) GetAudioFileNamingAlgorithm() models.HashAlgorithm {
// TODO(audio): update this to AudioFileNamingAlgorithm?
ret := i.getString(VideoFileNamingAlgorithm)
// default to oshash
if ret == "" {
return models.HashAlgorithmOshash
}
return models.HashAlgorithm(ret)
}
func (i *Config) GetSequentialScanning() bool {
return i.getBool(SequentialScanning)
}

View file

@ -55,6 +55,14 @@ func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) {
Config: cfg,
}
audioService := &audio.Service{
File: db.File,
Repository: db.Audio,
PluginCache: pluginCache,
Paths: mgrPaths,
Config: cfg,
}
imageService := &image.Service{
File: db.File,
Repository: db.Image,
@ -102,6 +110,7 @@ func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) {
Repository: repo,
SceneService: sceneService,
AudioService: audioService,
ImageService: imageService,
GalleryService: galleryService,
GroupService: groupService,

View file

@ -64,6 +64,7 @@ type Manager struct {
Repository models.Repository
SceneService SceneService
AudioService AudioService
ImageService ImageService
GalleryService GalleryService
GroupService GroupService

View file

@ -331,6 +331,7 @@ func (s *Manager) Clean(ctx context.Context, input CleanMetadataInput) int {
j := cleanJob{
cleaner: cleaner,
repository: s.Repository,
audioService: s.AudioService,
sceneService: s.SceneService,
imageService: s.ImageService,
input: input,

View file

@ -7,6 +7,7 @@ import (
"github.com/stashapp/stash/pkg/image"
"github.com/stashapp/stash/pkg/models"
"github.com/stashapp/stash/pkg/scene"
"github.com/stashapp/stash/pkg/audio"
)
type SceneService interface {
@ -19,6 +20,16 @@ type SceneService interface {
sceneFingerprintGetter
}
type AudioService interface {
Create(ctx context.Context, input models.CreateAudioInput) (*models.Audio, error)
AssignFile(ctx context.Context, audioID int, fileID models.FileID) error
Merge(ctx context.Context, sourceIDs []int, destinationID int, fileDeleter *audio.FileDeleter, options audio.MergeOptions) error
Destroy(ctx context.Context, audio *models.Audio, fileDeleter *audio.FileDeleter, deleteGenerated, deleteFile, destroyFileEntry bool) error
FindByIDs(ctx context.Context, ids []int, load ...audio.LoadRelationshipOption) ([]*models.Audio, error)
audioFingerprintGetter
}
type ImageService interface {
Destroy(ctx context.Context, image *models.Image, fileDeleter *image.FileDeleter, deleteGenerated, deleteFile, destroyFileEntry bool) error
DestroyZipImages(ctx context.Context, zipFile models.File, fileDeleter *image.FileDeleter, deleteGenerated bool) ([]*models.Image, error)

View file

@ -30,6 +30,19 @@ func KillRunningStreams(scene *models.Scene, fileNamingAlgo models.HashAlgorithm
instance.ReadLockManager.Cancel(transcodePath)
}
func KillRunningStreamsAudio(audio *models.Audio, fileNamingAlgo models.HashAlgorithm) {
instance.ReadLockManager.Cancel(audio.Path)
audioHash := audio.GetHash(fileNamingAlgo)
if audioHash == "" {
return
}
transcodePath := GetInstance().Paths.Audio.GetTranscodePath(audioHash)
instance.ReadLockManager.Cancel(transcodePath)
}
type SceneCoverGetter interface {
GetCover(ctx context.Context, sceneID int) ([]byte, error)
}

View file

@ -27,6 +27,7 @@ type cleanJob struct {
cleaner cleaner
repository models.Repository
input CleanMetadataInput
audioService AudioService
sceneService SceneService
imageService ImageService
scanSubs *subscriptionManager

View file

@ -21,7 +21,8 @@ import (
"github.com/stashapp/stash/pkg/file/video"
"github.com/stashapp/stash/pkg/fsutil"
"github.com/stashapp/stash/pkg/gallery"
"github.com/stashapp/stash/pkg/audio"
// TODO(audio): uncomment
// "github.com/stashapp/stash/pkg/audio"
"github.com/stashapp/stash/pkg/image"
"github.com/stashapp/stash/pkg/job"
"github.com/stashapp/stash/pkg/logger"
@ -685,6 +686,28 @@ func getScanHandlers(options ScanMetadataInput, taskQueue *job.TaskQueue, progre
Paths: instance.Paths,
},
},
// &file.FilteredHandler{
// Filter: file.FilterFunc(audioFileFilter),
// Handler: &audio.ScanHandler{
// CreatorUpdater: r.Audio,
// GalleryFinder: r.Gallery,
// SceneFinderUpdater: r.Scene,
// // ScanGenerator: &audioGenerators{
// // input: options,
// // taskQueue: taskQueue,
// // progress: progress,
// // paths: mgr.Paths,
// // sequentialScanning: c.GetSequentialScanning(),
// // },
// // ScanConfig: &scanConfig{
// // isGenerateThumbnails: options.ScanGenerateThumbnails,
// // isGenerateClipPreviews: options.ScanGenerateClipPreviews,
// // createGalleriesFromFolders: c.GetCreateGalleriesFromFolders(),
// // },
// PluginCache: pluginCache,
// Paths: instance.Paths,
// },
// },
&file.FilteredHandler{
Filter: file.FilterFunc(galleryFileFilter),
Handler: &gallery.ScanHandler{

View file

@ -72,40 +72,6 @@ func (d *FileDeleter) MarkGeneratedFiles(audio *models.Audio) error {
files = append(files, vttPath)
}
heatmapPath := d.Paths.Audio.GetInteractiveHeatmapPath(audioHash)
exists, _ = fsutil.FileExists(heatmapPath)
if exists {
files = append(files, heatmapPath)
}
return d.FilesWithoutTrash(files)
}
// MarkMarkerFiles deletes generated files for a audio marker with the
// provided audio and timestamp.
// Generated files bypass trash and are permanently deleted since they can be regenerated.
func (d *FileDeleter) MarkMarkerFiles(audio *models.Audio, seconds int) error {
videoPath := d.Paths.AudioMarkers.GetVideoPreviewPath(audio.GetHash(d.FileNamingAlgo), seconds)
imagePath := d.Paths.AudioMarkers.GetWebpPreviewPath(audio.GetHash(d.FileNamingAlgo), seconds)
screenshotPath := d.Paths.AudioMarkers.GetScreenshotPath(audio.GetHash(d.FileNamingAlgo), seconds)
var files []string
exists, _ := fsutil.FileExists(videoPath)
if exists {
files = append(files, videoPath)
}
exists, _ = fsutil.FileExists(imagePath)
if exists {
files = append(files, imagePath)
}
exists, _ = fsutil.FileExists(screenshotPath)
if exists {
files = append(files, screenshotPath)
}
return d.FilesWithoutTrash(files)
}

View file

@ -38,7 +38,6 @@ func ToBasicJSON(ctx context.Context, reader ExportGetter, audio *models.Audio)
Code: audio.Code,
URLs: audio.URLs.List(),
Details: audio.Details,
Director: audio.Director,
CreatedAt: json.JSONTime{Time: audio.CreatedAt},
UpdatedAt: json.JSONTime{Time: audio.UpdatedAt},
}
@ -66,17 +65,6 @@ func ToBasicJSON(ctx context.Context, reader ExportGetter, audio *models.Audio)
newAudioJSON.Cover = utils.GetBase64StringFromData(cover)
}
var ret []models.StashID
for _, stashID := range audio.StashIDs.List() {
newJoin := models.StashID{
StashID: stashID.StashID,
Endpoint: stashID.Endpoint,
}
ret = append(ret, newJoin)
}
newAudioJSON.StashIDs = ret
dates, err := reader.GetViewDates(ctx, audio.ID)
if err != nil {
return nil, fmt.Errorf("error getting view dates: %v", err)

View file

@ -109,9 +109,6 @@ func createFullAudio(id int) models.Audio {
},
},
}),
StashIDs: models.NewRelatedStashIDs([]models.StashID{
stashID,
}),
CreatedAt: createTime,
UpdatedAt: updateTime,
}
@ -128,7 +125,6 @@ func createEmptyAudio(id int) models.Audio {
},
}),
URLs: models.NewRelatedStrings([]string{}),
StashIDs: models.NewRelatedStashIDs([]models.StashID{}),
CreatedAt: createTime,
UpdatedAt: updateTime,
}
@ -149,10 +145,7 @@ func createFullJSONAudio(image string, customFields map[string]interface{}) *jso
UpdatedAt: json.JSONTime{
Time: updateTime,
},
Cover: image,
StashIDs: []models.StashID{
stashID,
},
Cover: image,
CustomFields: customFields,
}
}
@ -454,177 +447,3 @@ func TestGetAudioGroupsJSON(t *testing.T) {
db.AssertExpectations(t)
}
const (
validMarkerID1 = 1
validMarkerID2 = 2
invalidMarkerID1 = 3
invalidMarkerID2 = 4
validTagID1 = 1
validTagID2 = 2
validTagName1 = "validTagName1"
validTagName2 = "validTagName2"
invalidTagID = 3
markerTitle1 = "markerTitle1"
markerTitle2 = "markerTitle2"
markerSeconds1 = 1.0
markerSeconds2 = 2.3
markerSeconds1Str = "1.0"
markerSeconds2Str = "2.3"
)
type audioMarkersTestScenario struct {
input models.Audio
expected []jsonschema.AudioMarker
err bool
}
var getAudioMarkersJSONScenarios = []audioMarkersTestScenario{
{
createEmptyAudio(audioID),
[]jsonschema.AudioMarker{
{
Title: markerTitle1,
PrimaryTag: validTagName1,
Seconds: markerSeconds1Str,
Tags: []string{
validTagName1,
validTagName2,
},
CreatedAt: json.JSONTime{
Time: createTime,
},
UpdatedAt: json.JSONTime{
Time: updateTime,
},
},
{
Title: markerTitle2,
PrimaryTag: validTagName2,
Seconds: markerSeconds2Str,
Tags: []string{
validTagName2,
},
CreatedAt: json.JSONTime{
Time: createTime,
},
UpdatedAt: json.JSONTime{
Time: updateTime,
},
},
},
false,
},
{
createEmptyAudio(noMarkersID),
nil,
false,
},
{
createEmptyAudio(errMarkersID),
nil,
true,
},
{
createEmptyAudio(errFindPrimaryTagID),
nil,
true,
},
{
createEmptyAudio(errFindByMarkerID),
nil,
true,
},
}
var validMarkers = []*models.AudioMarker{
{
ID: validMarkerID1,
Title: markerTitle1,
PrimaryTagID: validTagID1,
Seconds: markerSeconds1,
CreatedAt: createTime,
UpdatedAt: updateTime,
},
{
ID: validMarkerID2,
Title: markerTitle2,
PrimaryTagID: validTagID2,
Seconds: markerSeconds2,
CreatedAt: createTime,
UpdatedAt: updateTime,
},
}
var invalidMarkers1 = []*models.AudioMarker{
{
ID: invalidMarkerID1,
PrimaryTagID: invalidTagID,
},
}
var invalidMarkers2 = []*models.AudioMarker{
{
ID: invalidMarkerID2,
PrimaryTagID: validTagID1,
},
}
func TestGetAudioMarkersJSON(t *testing.T) {
db := mocks.NewDatabase()
markersErr := errors.New("error getting audio markers")
tagErr := errors.New("error getting tags")
db.AudioMarker.On("FindByAudioID", testCtx, audioID).Return(validMarkers, nil).Once()
db.AudioMarker.On("FindByAudioID", testCtx, noMarkersID).Return(nil, nil).Once()
db.AudioMarker.On("FindByAudioID", testCtx, errMarkersID).Return(nil, markersErr).Once()
db.AudioMarker.On("FindByAudioID", testCtx, errFindPrimaryTagID).Return(invalidMarkers1, nil).Once()
db.AudioMarker.On("FindByAudioID", testCtx, errFindByMarkerID).Return(invalidMarkers2, nil).Once()
db.Tag.On("Find", testCtx, validTagID1).Return(&models.Tag{
Name: validTagName1,
}, nil)
db.Tag.On("Find", testCtx, validTagID2).Return(&models.Tag{
Name: validTagName2,
}, nil)
db.Tag.On("Find", testCtx, invalidTagID).Return(nil, tagErr)
db.Tag.On("FindByAudioMarkerID", testCtx, validMarkerID1).Return([]*models.Tag{
{
Name: validTagName1,
},
{
Name: validTagName2,
},
}, nil)
db.Tag.On("FindByAudioMarkerID", testCtx, validMarkerID2).Return([]*models.Tag{
{
Name: validTagName2,
},
}, nil)
db.Tag.On("FindByAudioMarkerID", testCtx, invalidMarkerID2).Return(nil, tagErr).Once()
for i, s := range getAudioMarkersJSONScenarios {
audio := s.input
json, err := GetAudioMarkersJSON(testCtx, db.AudioMarker, db.Tag, &audio)
switch {
case !s.err && err != nil:
t.Errorf("[%d] unexpected error: %s", i, err.Error())
case s.err && err == nil:
t.Errorf("[%d] expected error not returned", i)
default:
assert.Equal(t, s.expected, json, "[%d]", i)
}
}
db.AssertExpectations(t)
}

View file

@ -675,8 +675,8 @@ func (p *FilenameParser) setGroups(ctx context.Context, qb GroupNameFinder, h au
group := p.queryGroup(ctx, qb, groupName)
if group != nil {
if _, found := groupsSet[group.ID]; !found {
result.Movies = append(result.Movies, &models.AudioMovieID{
MovieID: strconv.Itoa(group.ID),
result.Groups = append(result.Groups, &models.AudioGroupID{
GroupID: strconv.Itoa(group.ID),
})
groupsSet[group.ID] = true
}

View file

@ -19,14 +19,6 @@ func LoadURLs(ctx context.Context, audio *models.Audio, r models.AudioReader) er
return nil
}
func LoadStashIDs(ctx context.Context, audio *models.Audio, r models.AudioReader) error {
if err := audio.LoadStashIDs(ctx, r); err != nil {
return fmt.Errorf("failed to load stash IDs for audio %d: %w", audio.ID, err)
}
return nil
}
func LoadFiles(ctx context.Context, audio *models.Audio, r models.AudioReader) error {
if err := audio.LoadFiles(ctx, r); err != nil {
return fmt.Errorf("failed to load files for audio %d: %w", audio.ID, err)

View file

@ -92,12 +92,10 @@ func (i *Importer) audioJSONToAudio(audioJSON jsonschema.Audio) models.Audio {
Title: audioJSON.Title,
Code: audioJSON.Code,
Details: audioJSON.Details,
Director: audioJSON.Director,
PerformerIDs: models.NewRelatedIDs([]int{}),
TagIDs: models.NewRelatedIDs([]int{}),
GalleryIDs: models.NewRelatedIDs([]int{}),
Groups: models.NewRelatedGroups([]models.GroupsAudios{}),
StashIDs: models.NewRelatedStashIDs(audioJSON.StashIDs),
}
if len(audioJSON.URLs) > 0 {
@ -166,7 +164,7 @@ func (i *Importer) populateOHistory() {
}
func (i *Importer) populateFiles(ctx context.Context) error {
files := make([]*models.VideoFile, 0)
files := make([]*models.AudioFile, 0)
for _, ref := range i.Input.Files {
path := ref
@ -178,11 +176,11 @@ func (i *Importer) populateFiles(ctx context.Context) error {
if f == nil {
return fmt.Errorf("audio file '%s' not found", path)
} else {
files = append(files, f.(*models.VideoFile))
files = append(files, f.(*models.AudioFile))
}
}
i.audio.Files = models.NewRelatedVideoFiles(files)
i.audio.Files = models.NewRelatedAudioFiles(files)
return nil
}

View file

@ -49,11 +49,6 @@ func TestImporterPreImport(t *testing.T) {
title = "title"
code = "code"
details = "details"
director = "director"
endpoint1 = "endpoint1"
stashID1 = "stashID1"
endpoint2 = "endpoint2"
stashID2 = "stashID2"
url1 = "url1"
url2 = "url2"
rating = 3
@ -73,14 +68,9 @@ func TestImporterPreImport(t *testing.T) {
{
"basic",
jsonschema.Audio{
Title: title,
Code: code,
Details: details,
Director: director,
StashIDs: []models.StashID{
{Endpoint: endpoint1, StashID: stashID1},
{Endpoint: endpoint2, StashID: stashID2},
},
Title: title,
Code: code,
Details: details,
URLs: []string{url1, url2},
Rating: rating,
Organized: organized,
@ -90,14 +80,9 @@ func TestImporterPreImport(t *testing.T) {
PlayDuration: playDuration,
},
models.Audio{
Title: title,
Code: code,
Details: details,
Director: director,
StashIDs: models.NewRelatedStashIDs([]models.StashID{
{Endpoint: endpoint1, StashID: stashID1},
{Endpoint: endpoint2, StashID: stashID2},
}),
Title: title,
Code: code,
Details: details,
URLs: models.NewRelatedStrings([]string{url1, url2}),
Rating: &rating,
Organized: organized,

View file

@ -6,16 +6,11 @@ import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"slices"
"time"
"github.com/stashapp/stash/pkg/fsutil"
"github.com/stashapp/stash/pkg/logger"
"github.com/stashapp/stash/pkg/models"
"github.com/stashapp/stash/pkg/sliceutil"
"github.com/stashapp/stash/pkg/txn"
)
type MergeOptions struct {
@ -55,10 +50,6 @@ func (s *Service) Merge(ctx context.Context, sourceIDs []int, destinationID int,
for _, f := range src.Files.List() {
fileIDs = append(fileIDs, f.Base().ID)
}
if err := s.mergeAudioMarkers(ctx, dest, src); err != nil {
return err
}
}
// move files to destination audio
@ -130,70 +121,3 @@ func (s *Service) Merge(ctx context.Context, sourceIDs []int, destinationID int,
return nil
}
func (s *Service) mergeAudioMarkers(ctx context.Context, dest *models.Audio, src *models.Audio) error {
markers, err := s.MarkerRepository.FindByAudioID(ctx, src.ID)
if err != nil {
return fmt.Errorf("finding audio markers: %w", err)
}
type rename struct {
src string
dest string
}
var toRename []rename
destHash := dest.GetHash(s.Config.GetVideoFileNamingAlgorithm())
for _, m := range markers {
srcHash := src.GetHash(s.Config.GetVideoFileNamingAlgorithm())
// updated the audio id
m.AudioID = dest.ID
if err := s.MarkerRepository.Update(ctx, m); err != nil {
return fmt.Errorf("updating audio marker %d: %w", m.ID, err)
}
// move generated files to new location
toRename = append(toRename, []rename{
{
src: s.Paths.AudioMarkers.GetScreenshotPath(srcHash, int(m.Seconds)),
dest: s.Paths.AudioMarkers.GetScreenshotPath(destHash, int(m.Seconds)),
},
{
src: s.Paths.AudioMarkers.GetThumbnailPath(srcHash, int(m.Seconds)),
dest: s.Paths.AudioMarkers.GetThumbnailPath(destHash, int(m.Seconds)),
},
{
src: s.Paths.AudioMarkers.GetWebpPreviewPath(srcHash, int(m.Seconds)),
dest: s.Paths.AudioMarkers.GetWebpPreviewPath(destHash, int(m.Seconds)),
},
}...)
}
if len(toRename) > 0 {
txn.AddPostCommitHook(ctx, func(ctx context.Context) {
// rename the files if they exist
for _, e := range toRename {
srcExists, _ := fsutil.FileExists(e.src)
destExists, _ := fsutil.FileExists(e.dest)
if srcExists && !destExists {
destDir := filepath.Dir(e.dest)
if err := fsutil.EnsureDir(destDir); err != nil {
logger.Errorf("Error creating generated marker folder %s: %v", destDir, err)
continue
}
if err := os.Rename(e.src, e.dest); err != nil {
logger.Errorf("Error renaming generated marker file from %s to %s: %v", e.src, e.dest, err)
}
}
}
})
}
return nil
}

View file

@ -9,7 +9,7 @@ import (
"path/filepath"
"strings"
"github.com/stashapp/stash/pkg/file/video"
"github.com/stashapp/stash/pkg/file/audio"
"github.com/stashapp/stash/pkg/logger"
"github.com/stashapp/stash/pkg/models"
"github.com/stashapp/stash/pkg/models/paths"
@ -19,7 +19,7 @@ import (
)
var (
ErrNotVideoFile = errors.New("not a video file")
ErrNotAudioFile = errors.New("not a audio file")
// fingerprint types to match with
// only try to match by data fingerprints, _not_ perceptual fingerprints
@ -29,7 +29,7 @@ var (
type ScanCreatorUpdater interface {
FindByFileID(ctx context.Context, fileID models.FileID) ([]*models.Audio, error)
FindByFingerprints(ctx context.Context, fp []models.Fingerprint) ([]*models.Audio, error)
GetFiles(ctx context.Context, relatedID int) ([]*models.VideoFile, error)
GetFiles(ctx context.Context, relatedID int) ([]*models.AudioFile, error)
Create(ctx context.Context, newAudio *models.Audio, fileIDs []models.FileID) error
UpdatePartial(ctx context.Context, id int, updatedAudio models.AudioPartial) (*models.Audio, error)
@ -42,7 +42,7 @@ type ScanGalleryFinderUpdater interface {
}
type ScanGenerator interface {
Generate(ctx context.Context, s *models.Audio, f *models.VideoFile) error
Generate(ctx context.Context, s *models.Audio, f *models.AudioFile) error
}
type ScanHandler struct {
@ -50,7 +50,7 @@ type ScanHandler struct {
GalleryFinderUpdater ScanGalleryFinderUpdater
ScanGenerator ScanGenerator
CaptionUpdater video.CaptionUpdater
CaptionUpdater audio.CaptionUpdater
PluginCache *plugin.Cache
FileNamingAlgorithm models.HashAlgorithm
@ -82,13 +82,13 @@ func (h *ScanHandler) Handle(ctx context.Context, f models.File, oldFile models.
return err
}
videoFile, ok := f.(*models.VideoFile)
AudioFile, ok := f.(*models.AudioFile)
if !ok {
return ErrNotVideoFile
return ErrNotAudioFile
}
if oldFile != nil {
if err := video.CleanCaptions(ctx, videoFile, nil, h.CaptionUpdater); err != nil {
if err := audio.CleanCaptions(ctx, AudioFile, nil, h.CaptionUpdater); err != nil {
return fmt.Errorf("cleaning captions: %w", err)
}
}
@ -101,7 +101,7 @@ func (h *ScanHandler) Handle(ctx context.Context, f models.File, oldFile models.
if len(existing) == 0 {
// try also to match file by fingerprints
existing, err = h.CreatorUpdater.FindByFingerprints(ctx, videoFile.Fingerprints.Filter(matchableFingerprintTypes...))
existing, err = h.CreatorUpdater.FindByFingerprints(ctx, AudioFile.Fingerprints.Filter(matchableFingerprintTypes...))
if err != nil {
return fmt.Errorf("finding existing audio by fingerprints: %w", err)
}
@ -109,7 +109,7 @@ func (h *ScanHandler) Handle(ctx context.Context, f models.File, oldFile models.
if len(existing) > 0 {
updateExisting := oldFile != nil
if err := h.associateExisting(ctx, existing, videoFile, updateExisting); err != nil {
if err := h.associateExisting(ctx, existing, AudioFile, updateExisting); err != nil {
return err
}
} else {
@ -118,7 +118,7 @@ func (h *ScanHandler) Handle(ctx context.Context, f models.File, oldFile models.
logger.Infof("%s doesn't exist. Creating new audio...", f.Base().Path)
if err := h.CreatorUpdater.Create(ctx, &newAudio, []models.FileID{videoFile.ID}); err != nil {
if err := h.CreatorUpdater.Create(ctx, &newAudio, []models.FileID{AudioFile.ID}); err != nil {
return fmt.Errorf("creating new audio: %w", err)
}
@ -144,9 +144,9 @@ func (h *ScanHandler) Handle(ctx context.Context, f models.File, oldFile models.
// do this after the commit so that cover generation doesn't hold up the transaction
txn.AddPostCommitHook(ctx, func(ctx context.Context) {
for _, s := range existing {
if err := h.ScanGenerator.Generate(ctx, s, videoFile); err != nil {
if err := h.ScanGenerator.Generate(ctx, s, AudioFile); err != nil {
// just log if cover generation fails. We can try again on rescan
logger.Errorf("Error generating content for %s: %v", videoFile.Path, err)
logger.Errorf("Error generating content for %s: %v", AudioFile.Path, err)
}
}
})
@ -154,7 +154,7 @@ func (h *ScanHandler) Handle(ctx context.Context, f models.File, oldFile models.
return nil
}
func (h *ScanHandler) associateExisting(ctx context.Context, existing []*models.Audio, f *models.VideoFile, updateExisting bool) error {
func (h *ScanHandler) associateExisting(ctx context.Context, existing []*models.Audio, f *models.AudioFile, updateExisting bool) error {
for _, s := range existing {
if err := s.LoadFiles(ctx, h.CreatorUpdater); err != nil {
return err

View file

@ -11,7 +11,7 @@ import (
)
type Config interface {
GetVideoFileNamingAlgorithm() models.HashAlgorithm
GetAudioFileNamingAlgorithm() models.HashAlgorithm
}
type Service struct {

View file

@ -6,7 +6,6 @@ import (
"errors"
"strconv"
"testing"
"time"
"github.com/stashapp/stash/pkg/models"
"github.com/stashapp/stash/pkg/models/mocks"
@ -18,9 +17,6 @@ import (
func TestUpdater_IsEmpty(t *testing.T) {
organized := true
ids := []int{1}
stashIDs := []models.StashID{
{},
}
cover := []byte{1}
tests := []struct {
@ -66,18 +62,6 @@ func TestUpdater_IsEmpty(t *testing.T) {
},
false,
},
{
"performer set",
&UpdateSet{
Partial: models.AudioPartial{
StashIDs: &models.UpdateStashIDs{
StashIDs: stashIDs,
Mode: models.RelationshipUpdateModeSet,
},
},
},
false,
},
{
"cover set",
&UpdateSet{
@ -101,7 +85,6 @@ func TestUpdater_Update(t *testing.T) {
badUpdateID
badPerformersID
badTagsID
badStashIDsID
badCoverID
performerID
tagID
@ -109,8 +92,6 @@ func TestUpdater_Update(t *testing.T) {
performerIDs := []int{performerID}
tagIDs := []int{tagID}
stashID := "stashID"
endpoint := "endpoint"
title := "title"
cover := []byte("cover")
@ -156,15 +137,6 @@ func TestUpdater_Update(t *testing.T) {
IDs: tagIDs,
Mode: models.RelationshipUpdateModeSet,
},
StashIDs: &models.UpdateStashIDs{
StashIDs: []models.StashID{
{
StashID: stashID,
Endpoint: endpoint,
},
},
Mode: models.RelationshipUpdateModeSet,
},
},
CoverImage: cover,
},
@ -225,7 +197,6 @@ func TestUpdateSet_UpdateInput(t *testing.T) {
badUpdateID
badPerformersID
badTagsID
badStashIDsID
badCoverID
performerID
tagID
@ -237,23 +208,6 @@ func TestUpdateSet_UpdateInput(t *testing.T) {
performerIDStrs := intslice.IntSliceToStringSlice(performerIDs)
tagIDs := []int{tagID}
tagIDStrs := intslice.IntSliceToStringSlice(tagIDs)
stashID := "stashID"
endpoint := "endpoint"
updatedAt := time.Now()
stashIDs := []models.StashID{
{
StashID: stashID,
Endpoint: endpoint,
UpdatedAt: updatedAt,
},
}
stashIDInputs := []models.StashIDInput{
{
StashID: stashID,
Endpoint: endpoint,
UpdatedAt: &updatedAt,
},
}
title := "title"
cover := []byte("cover")
@ -286,10 +240,6 @@ func TestUpdateSet_UpdateInput(t *testing.T) {
IDs: tagIDs,
Mode: models.RelationshipUpdateModeSet,
},
StashIDs: &models.UpdateStashIDs{
StashIDs: stashIDs,
Mode: models.RelationshipUpdateModeSet,
},
},
CoverImage: cover,
},
@ -297,7 +247,6 @@ func TestUpdateSet_UpdateInput(t *testing.T) {
ID: audioIDStr,
PerformerIds: performerIDStrs,
TagIds: tagIDStrs,
StashIds: stashIDInputs,
CoverImage: &coverB64,
},
},

View file

@ -118,11 +118,11 @@ type VideoFile struct {
// TranscodeScale calculates the dimension scaling for a transcode, where maxSize is the maximum size of the longest dimension of the input video.
// If no scaling is required, then returns 0, 0.
// Returns -2 for the dimension that will scale to maintain aspect ratio.
func (v *VideoFile) TranscodeScale(maxSize int) (int, int) {
func (a *VideoFile) TranscodeScale(maxSize int) (int, int) {
// get the smaller dimension of the video file
videoSize := v.Height
if v.Width < videoSize {
videoSize = v.Width
videoSize := a.Height
if a.Width < videoSize {
videoSize = a.Width
}
// if our streaming resolution is larger than the video dimension
@ -134,7 +134,7 @@ func (v *VideoFile) TranscodeScale(maxSize int) (int, int) {
// we're setting either the width or height
// we'll set the smaller dimesion
if v.Width > v.Height {
if a.Width > a.Height {
// set the height
return -2, maxSize
}
@ -365,23 +365,150 @@ func isRotated(s *FFProbeStream) bool {
return false
}
func (v *VideoFile) getAudioStream() *FFProbeStream {
index := v.getStreamIndex("audio", v.JSON)
func (a *VideoFile) getAudioStream() *FFProbeStream {
index := a.getStreamIndex("audio", a.JSON)
if index != -1 {
return &v.JSON.Streams[index]
return &a.JSON.Streams[index]
}
return nil
}
func (v *VideoFile) getVideoStream() *FFProbeStream {
index := v.getStreamIndex("video", v.JSON)
func (a *VideoFile) getVideoStream() *FFProbeStream {
index := a.getStreamIndex("video", a.JSON)
if index != -1 {
return &v.JSON.Streams[index]
return &a.JSON.Streams[index]
}
return nil
}
func (v *VideoFile) getStreamIndex(fileType string, probeJSON FFProbeJSON) int {
func (a *VideoFile) getStreamIndex(fileType string, probeJSON FFProbeJSON) int {
ret := -1
for i, stream := range probeJSON.Streams {
// skip cover art/thumbnails
if stream.CodecType == fileType && stream.Disposition.AttachedPic == 0 {
// prefer default stream
if stream.Disposition.Default == 1 {
return i
}
// backwards compatible behaviour - fallback to first matching stream
if ret == -1 {
ret = i
}
}
}
return ret
}
// AUDIO
// AudioFile represents the ffprobe output for a audio file.
type AudioFile struct {
JSON FFProbeJSON
AudioStream *FFProbeStream
Path string
Title string
Comment string
Container string
// FileDuration is the declared (meta-data) duration of the *file*.
// In most cases (sprites, previews, etc.) we actually care about the duration of the audio stream specifically,
// because those two can differ slightly (e.g. audio stream longer than the audio stream, making the whole file
// longer).
FileDuration float64
AudioStreamDuration float64
StartTime float64
Bitrate int64
Size int64
CreationTime time.Time
AudioCodec string
SampleRate float64
}
// NewAudioFile runs ffprobe on the given path and returns a AudioFile.
func (f *FFProbe) NewAudioFile(audioPath string) (*AudioFile, error) {
args := []string{
"-v",
"quiet",
"-print_format", "json",
"-show_format",
"-show_streams",
"-show_error",
}
// show_entries stream_side_data=rotation requires 5.x or later ffprobe
if f.version.major >= 5 {
args = append(args, "-show_entries", "stream_side_data=rotation")
}
args = append(args, audioPath)
cmd := stashExec.Command(f.path, args...)
out, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("FFProbe encountered an error with <%s>.\nError JSON:\n%s\nError: %s", audioPath, string(out), err.Error())
}
probeJSON := &FFProbeJSON{}
if err := json.Unmarshal(out, probeJSON); err != nil {
return nil, fmt.Errorf("error unmarshalling audio data for <%s>: %s", audioPath, err.Error())
}
return parseAudio(audioPath, probeJSON)
}
func parseAudio(filePath string, probeJSON *FFProbeJSON) (*AudioFile, error) {
if probeJSON == nil {
return nil, fmt.Errorf("failed to get ffprobe json for <%s>", filePath)
}
result := &AudioFile{}
result.JSON = *probeJSON
if result.JSON.Error.Code != 0 {
return nil, fmt.Errorf("ffprobe error code %d: %s", result.JSON.Error.Code, result.JSON.Error.String)
}
result.Path = filePath
result.Title = probeJSON.Format.Tags.Title
result.Comment = probeJSON.Format.Tags.Comment
result.Bitrate, _ = strconv.ParseInt(probeJSON.Format.BitRate, 10, 64)
result.Container = probeJSON.Format.FormatName
duration, _ := strconv.ParseFloat(probeJSON.Format.Duration, 64)
result.FileDuration = math.Round(duration*100) / 100
fileStat, err := os.Stat(filePath)
if err != nil {
statErr := fmt.Errorf("error statting file <%s>: %w", filePath, err)
logger.Errorf("%v", statErr)
return nil, statErr
}
result.Size = fileStat.Size()
result.StartTime, _ = strconv.ParseFloat(probeJSON.Format.StartTime, 64)
result.CreationTime = probeJSON.Format.Tags.CreationTime.Time
audioStream := result.getAudioStream()
if audioStream != nil {
result.AudioCodec = audioStream.CodecName
result.AudioStream = audioStream
}
return result, nil
}
func (a *AudioFile) getAudioStream() *FFProbeStream {
index := a.getStreamIndex("audio", a.JSON)
if index != -1 {
return &a.JSON.Streams[index]
}
return nil
}
func (a *AudioFile) getStreamIndex(fileType string, probeJSON FFProbeJSON) int {
ret := -1
for i, stream := range probeJSON.Streams {
// skip cover art/thumbnails

View file

@ -18,6 +18,7 @@ const (
MimeMkvAudio string = "audio/x-matroska"
MimeMp4Video string = "video/mp4"
MimeMp4Audio string = "audio/mp4"
MimeMp3Audio string = "audio/mp3"
)
type StreamManager struct {

209
pkg/file/audio/caption.go Normal file
View file

@ -0,0 +1,209 @@
// TODO(audio): update this file
package audio
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/asticode/go-astisub"
"github.com/stashapp/stash/pkg/logger"
"github.com/stashapp/stash/pkg/models"
"github.com/stashapp/stash/pkg/txn"
"golang.org/x/text/language"
)
var CaptionExts = []string{"vtt", "srt"} // in a case where vtt and srt files are both provided prioritize vtt file due to native support
// to be used for captions without a language code in the filename
// ISO 639-1 uses 2 or 3 a-z chars for codes so 00 is a safe non valid choise
// https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
const LangUnknown = "00"
// GetCaptionPath generates the path of a caption
// from a given file path, wanted language and caption sufffix
func GetCaptionPath(path, lang, suffix string) string {
ext := filepath.Ext(path)
fn := strings.TrimSuffix(path, ext)
captionExt := ""
if len(lang) == 0 || lang == LangUnknown {
captionExt = suffix
} else {
captionExt = lang + "." + suffix
}
return fn + "." + captionExt
}
// ReadSubs reads a captions file
func ReadSubs(path string) (*astisub.Subtitles, error) {
return astisub.OpenFile(path)
}
// IsValidLanguage checks whether the given string is a valid
// ISO 639 language code
func IsValidLanguage(lang string) bool {
_, err := language.ParseBase(lang)
return err == nil
}
// IsLangInCaptions returns true if lang is present
// in the captions
func IsLangInCaptions(lang string, ext string, captions []*models.VideoCaption) bool {
for _, caption := range captions {
if lang == caption.LanguageCode && ext == caption.CaptionType {
return true
}
}
return false
}
// getCaptionPrefix returns the prefix used to search for audio files for the provided caption path
func getCaptionPrefix(captionPath string) string {
basename := strings.TrimSuffix(captionPath, filepath.Ext(captionPath)) // caption filename without the extension
// a caption file can be something like scene_filename.srt or scene_filename.en.srt
// if a language code is present and valid remove it from the basename
languageExt := filepath.Ext(basename)
if len(languageExt) > 2 && IsValidLanguage(languageExt[1:]) {
basename = strings.TrimSuffix(basename, languageExt)
}
return basename + "."
}
// GetCaptionsLangFromPath returns the language code from a given captions path
// If no valid language is present LangUknown is returned
func getCaptionsLangFromPath(captionPath string) string {
langCode := LangUnknown
basename := strings.TrimSuffix(captionPath, filepath.Ext(captionPath)) // caption filename without the extension
languageExt := filepath.Ext(basename)
if len(languageExt) > 2 && IsValidLanguage(languageExt[1:]) {
langCode = languageExt[1:]
}
return langCode
}
type CaptionUpdater interface {
GetCaptions(ctx context.Context, fileID models.FileID) ([]*models.VideoCaption, error)
UpdateCaptions(ctx context.Context, fileID models.FileID, captions []*models.VideoCaption) error
}
// MatchesCaption returns true if the caption file matches the audio file based on the filename
func MatchesCaption(audioPath, captionPath string) bool {
captionPrefix := getCaptionPrefix(captionPath)
audioPrefix := strings.TrimSuffix(audioPath, filepath.Ext(audioPath)) + "."
return captionPrefix == audioPrefix
}
// associates captions to scene/s with the same basename
// returns true if the caption file was matched to a audio file and processed, false otherwise
func AssociateCaptions(ctx context.Context, captionPath string, txnMgr txn.Manager, fqb models.FileFinder, w CaptionUpdater) bool {
captionLang := getCaptionsLangFromPath(captionPath)
captionPrefix := getCaptionPrefix(captionPath)
matched := false
if err := txn.WithTxn(ctx, txnMgr, func(ctx context.Context) error {
var err error
files, er := fqb.FindAllByPath(ctx, captionPrefix+"*", true)
if er != nil {
return fmt.Errorf("searching for scene %s: %w", captionPrefix, er)
}
for _, f := range files {
// found some files
// filter out non audio files
switch f.(type) {
case *models.AudioFile:
break
default:
continue
}
fileID := f.Base().ID
path := f.Base().Path
logger.Debugf("Matched captions to file %s", path)
matched = true
captions, er := w.GetCaptions(ctx, fileID)
if er != nil {
return fmt.Errorf("getting captions for file %s: %w", path, er)
}
fileExt := filepath.Ext(captionPath)
ext := fileExt[1:]
if !IsLangInCaptions(captionLang, ext, captions) { // only update captions if language code is not present
newCaption := &models.VideoCaption{
LanguageCode: captionLang,
Filename: filepath.Base(captionPath),
CaptionType: ext,
}
captions = append(captions, newCaption)
er = w.UpdateCaptions(ctx, fileID, captions)
if er != nil {
return fmt.Errorf("updating captions for file %s: %w", path, er)
}
logger.Debugf("Updated captions for file %s. Added %s", path, captionLang)
}
}
return err
}); err != nil {
logger.Error(err.Error())
}
return matched
}
// CleanCaptions removes non existent/accessible language codes from captions
func CleanCaptions(ctx context.Context, f *models.AudioFile, txnMgr txn.Manager, w CaptionUpdater) error {
captions, err := w.GetCaptions(ctx, f.ID)
if err != nil {
return fmt.Errorf("getting captions for file %s: %w", f.Path, err)
}
if len(captions) == 0 {
return nil
}
filePath := f.Path
changed := false
var newCaptions []*models.VideoCaption
for _, caption := range captions {
captionPath := caption.Path(filePath)
_, err := os.Stat(captionPath)
if errors.Is(err, os.ErrNotExist) {
logger.Infof("Removing non existent caption %s for %s", caption.Filename, f.Path)
changed = true
} else {
// other errors are ignored for the purposes of cleaning
newCaptions = append(newCaptions, caption)
}
}
if changed {
fn := func(ctx context.Context) error {
return w.UpdateCaptions(ctx, f.ID, newCaptions)
}
// possible that we are already in a transaction and txnMgr is nil
// in that case just call the function directly
if txnMgr == nil {
err = fn(ctx)
} else {
err = txn.WithTxn(ctx, txnMgr, fn)
}
if err != nil {
return fmt.Errorf("updating captions for file %s: %w", f.Path, err)
}
}
return nil
}

View file

@ -0,0 +1,54 @@
// TODO(audio): update this file
package audio
import (
"testing"
"github.com/stretchr/testify/assert"
)
type testCase struct {
captionPath string
expectedLang string
expectedResult string
}
var testCases = []testCase{
{
captionPath: "/stash/video.vtt",
expectedLang: LangUnknown,
expectedResult: "/stash/video.",
},
{
captionPath: "/stash/video.en.vtt",
expectedLang: "en",
expectedResult: "/stash/video.", // lang code valid, remove en part
},
{
captionPath: "/stash/video.test.srt",
expectedLang: LangUnknown,
expectedResult: "/stash/video.test.", // no lang code/lang code invalid test should remain
},
{
captionPath: "C:\\videos\\video.fr.srt",
expectedLang: "fr",
expectedResult: "C:\\videos\\video.",
},
{
captionPath: "C:\\videos\\video.xx.srt",
expectedLang: LangUnknown,
expectedResult: "C:\\videos\\video.xx.", // no lang code/lang code invalid xx should remain
},
}
func TestGenerateCaptionCandidates(t *testing.T) {
for _, c := range testCases {
assert.Equal(t, c.expectedResult, getCaptionPrefix(c.captionPath))
}
}
func TestGetCaptionsLangFromPath(t *testing.T) {
for _, l := range testCases {
assert.Equal(t, l.expectedLang, getCaptionsLangFromPath(l.captionPath))
}
}

View file

@ -0,0 +1,15 @@
// TODO(audio): update this file
package audio
import (
"path/filepath"
"strings"
)
// GetFunscriptPath returns the path of a file
// with the extension changed to .funscript
func GetFunscriptPath(path string) string {
ext := filepath.Ext(path)
fn := strings.TrimSuffix(path, ext)
return fn + ".funscript"
}

65
pkg/file/audio/scan.go Normal file
View file

@ -0,0 +1,65 @@
// TODO(audio): update this file
package audio
import (
"context"
"errors"
"fmt"
"github.com/stashapp/stash/pkg/ffmpeg"
"github.com/stashapp/stash/pkg/file"
"github.com/stashapp/stash/pkg/models"
)
// Decorator adds audio specific fields to a File.
type Decorator struct {
FFProbe *ffmpeg.FFProbe
}
func (d *Decorator) Decorate(ctx context.Context, fs models.FS, f models.File) (models.File, error) {
if d.FFProbe == nil {
return f, errors.New("ffprobe not configured")
}
base := f.Base()
// TODO - copy to temp file if not an OsFS
if _, isOs := fs.(*file.OsFS); !isOs {
return f, fmt.Errorf("audio.constructFile: only OsFS is supported")
}
probe := d.FFProbe
audioFile, err := probe.NewAudioFile(base.Path)
if err != nil {
return f, fmt.Errorf("running ffprobe on %q: %w", base.Path, err)
}
container, err := ffmpeg.MatchContainer(audioFile.Container, base.Path)
if err != nil {
return f, fmt.Errorf("matching container for %q: %w", base.Path, err)
}
return &models.AudioFile{
BaseFile: base,
Format: string(container),
AudioCodec: audioFile.AudioCodec,
Duration: audioFile.FileDuration,
SampleRate: audioFile.SampleRate,
BitRate: audioFile.Bitrate,
}, nil
}
func (d *Decorator) IsMissingMetadata(ctx context.Context, fs models.FS, f models.File) bool {
const (
unsetString = "unset"
unsetNumber = -1
)
vf, ok := f.(*models.AudioFile)
if !ok {
return true
}
return vf.AudioCodec == unsetString ||
vf.Format == unsetString || vf.SampleRate == unsetNumber ||
vf.Duration == unsetNumber || vf.BitRate == unsetNumber
}

View file

@ -4,37 +4,12 @@ package models
import "context"
type DuplicationCriterionInput struct {
// Deprecated: Use Phash field instead. Kept for backwards compatibility.
Duplicated *bool `json:"duplicated"`
// Currently unimplemented. Intended for phash distance matching.
Distance *int `json:"distance"`
// Filter by phash duplication
Phash *bool `json:"phash"`
// Filter by URL duplication
URL *bool `json:"url"`
// Filter by Stash ID duplication
StashID *bool `json:"stash_id"`
// Filter by title duplication
Title *bool `json:"title"`
}
type FileDuplicationCriterionInput struct {
// Deprecated: Use Phash field instead. Kept for backwards compatibility.
Duplicated *bool `json:"duplicated"`
// Currently unimplemented. Intended for phash distance matching.
Distance *int `json:"distance"`
// Filter by phash duplication
Phash *bool `json:"phash"`
}
type AudioFilterType struct {
OperatorFilter[AudioFilterType]
ID *IntCriterionInput `json:"id"`
Title *StringCriterionInput `json:"title"`
Code *StringCriterionInput `json:"code"`
Details *StringCriterionInput `json:"details"`
Director *StringCriterionInput `json:"director"`
ID *IntCriterionInput `json:"id"`
Title *StringCriterionInput `json:"title"`
Code *StringCriterionInput `json:"code"`
Details *StringCriterionInput `json:"details"`
// Filter by file oshash
Oshash *StringCriterionInput `json:"oshash"`
// Filter by file checksum
@ -59,12 +34,10 @@ type AudioFilterType struct {
Resolution *ResolutionCriterionInput `json:"resolution"`
// Filter by orientation
Orientation *OrientationCriterionInput `json:"orientation"`
// Filter by framerate
Framerate *IntCriterionInput `json:"framerate"`
// Filter by samplerate
Samplerate *IntCriterionInput `json:"samplerate"`
// Filter by bitrate
Bitrate *IntCriterionInput `json:"bitrate"`
// Filter by video codec
VideoCodec *StringCriterionInput `json:"video_codec"`
// Filter by audio codec
AudioCodec *StringCriterionInput `json:"audio_codec"`
// Filter by duration (in seconds)
@ -95,20 +68,8 @@ type AudioFilterType struct {
Performers *MultiCriterionInput `json:"performers"`
// Filter by performer count
PerformerCount *IntCriterionInput `json:"performer_count"`
// Filter by StashID
StashID *StringCriterionInput `json:"stash_id"`
// Filter by StashID Endpoint
StashIDEndpoint *StashIDCriterionInput `json:"stash_id_endpoint"`
// Filter by StashIDs Endpoint
StashIDsEndpoint *StashIDsCriterionInput `json:"stash_ids_endpoint"`
// Filter by StashID count
StashIDCount *IntCriterionInput `json:"stash_id_count"`
// Filter by url
URL *StringCriterionInput `json:"url"`
// Filter by interactive
Interactive *bool `json:"interactive"`
// Filter by InteractiveSpeed
InteractiveSpeed *IntCriterionInput `json:"interactive_speed"`
// Filter by captions
Captions *StringCriterionInput `json:"captions"`
// Filter by resume time
@ -131,10 +92,6 @@ type AudioFilterType struct {
TagsFilter *TagFilterType `json:"tags_filter"`
// Filter by related groups that meet this criteria
GroupsFilter *GroupFilterType `json:"groups_filter"`
// Filter by related movies that meet this criteria
MoviesFilter *GroupFilterType `json:"movies_filter"`
// Filter by related markers that meet this criteria
MarkersFilter *AudioMarkerFilterType `json:"markers_filter"`
// Filter by related files that meet this criteria
FilesFilter *FileFilterType `json:"files_filter"`
// Filter by created at
@ -164,12 +121,6 @@ type AudioQueryResult struct {
resolveErr error
}
// AudioMovieInput is used for groups and movies
type AudioMovieInput struct {
MovieID string `json:"movie_id"`
AudioIndex *int `json:"audio_index"`
}
type AudioGroupInput struct {
GroupID string `json:"group_id"`
AudioIndex *int `json:"audio_index"`
@ -179,7 +130,6 @@ type AudioCreateInput struct {
Title *string `json:"title"`
Code *string `json:"code"`
Details *string `json:"details"`
Director *string `json:"director"`
URL *string `json:"url"`
Urls []string `json:"urls"`
Date *string `json:"date"`
@ -188,12 +138,10 @@ type AudioCreateInput struct {
StudioID *string `json:"studio_id"`
GalleryIds []string `json:"gallery_ids"`
PerformerIds []string `json:"performer_ids"`
Movies []AudioMovieInput `json:"movies"`
Groups []AudioGroupInput `json:"groups"`
TagIds []string `json:"tag_ids"`
// This should be a URL or a base64 encoded data URL
CoverImage *string `json:"cover_image"`
StashIds []StashIDInput `json:"stash_ids"`
CoverImage *string `json:"cover_image"`
// The first id will be assigned as primary.
// Files will be reassigned from existing audios if applicable.
// Files must not already be primary for another audio.
@ -207,7 +155,6 @@ type AudioUpdateInput struct {
Title *string `json:"title"`
Code *string `json:"code"`
Details *string `json:"details"`
Director *string `json:"director"`
URL *string `json:"url"`
Urls []string `json:"urls"`
Date *string `json:"date"`
@ -217,16 +164,14 @@ type AudioUpdateInput struct {
StudioID *string `json:"studio_id"`
GalleryIds []string `json:"gallery_ids"`
PerformerIds []string `json:"performer_ids"`
Movies []AudioMovieInput `json:"movies"`
Groups []AudioGroupInput `json:"groups"`
TagIds []string `json:"tag_ids"`
// This should be a URL or a base64 encoded data URL
CoverImage *string `json:"cover_image"`
StashIds []StashIDInput `json:"stash_ids"`
ResumeTime *float64 `json:"resume_time"`
PlayDuration *float64 `json:"play_duration"`
PlayCount *int `json:"play_count"`
PrimaryFileID *string `json:"primary_file_id"`
CoverImage *string `json:"cover_image"`
ResumeTime *float64 `json:"resume_time"`
PlayDuration *float64 `json:"play_duration"`
PlayCount *int `json:"play_count"`
PrimaryFileID *string `json:"primary_file_id"`
CustomFields *CustomFieldsInput
}

View file

@ -33,6 +33,7 @@ type FileFilterType struct {
SceneCount *IntCriterionInput `json:"scene_count"`
ImageCount *IntCriterionInput `json:"image_count"`
GalleryCount *IntCriterionInput `json:"gallery_count"`
AudioFilter *AudioFilterType `json:"audio_filter"`
ScenesFilter *SceneFilterType `json:"scenes_filter"`
ImagesFilter *ImageFilterType `json:"images_filter"`
GalleriesFilter *GalleryFilterType `json:"galleries_filter"`

View file

@ -28,3 +28,32 @@ type SceneMovieID struct {
MovieID string `json:"movie_id"`
SceneIndex *string `json:"scene_index"`
}
// Audio
type AudioParserInput struct {
IgnoreWords []string `json:"ignoreWords"`
WhitespaceCharacters *string `json:"whitespaceCharacters"`
CapitalizeTitle *bool `json:"capitalizeTitle"`
IgnoreOrganized *bool `json:"ignoreOrganized"`
}
type AudioParserResult struct {
Audio *Audio `json:"scene"`
Title *string `json:"title"`
Code *string `json:"code"`
Details *string `json:"details"`
URL *string `json:"url"`
Date *string `json:"date"`
Rating *int `json:"rating"`
Rating100 *int `json:"rating100"`
StudioID *string `json:"studio_id"`
PerformerIds []string `json:"performer_ids"`
Groups []*AudioGroupID `json:"groups"`
TagIds []string `json:"tag_ids"`
}
type AudioGroupID struct {
GroupID string `json:"group_id"`
AudioIndex *string `json:"scene_index"`
}

View file

@ -0,0 +1,110 @@
package jsonschema
import (
"fmt"
"os"
"strconv"
jsoniter "github.com/json-iterator/go"
"github.com/stashapp/stash/pkg/fsutil"
"github.com/stashapp/stash/pkg/models/json"
)
type AudioFile struct {
ModTime json.JSONTime `json:"mod_time,omitempty"`
Size string `json:"size"`
Duration string `json:"duration"`
AudioCodec string `json:"audio_codec"`
Format string `json:"format"`
Width int `json:"width"`
Height int `json:"height"`
Samplerate string `json:"samplerate"`
Bitrate int `json:"bitrate"`
}
type AudioGroup struct {
GroupName string `json:"movieName,omitempty"`
AudioIndex int `json:"audio_index,omitempty"`
}
type Audio struct {
Title string `json:"title,omitempty"`
Code string `json:"code,omitempty"`
Studio string `json:"studio,omitempty"`
// deprecated - for import only
URL string `json:"url,omitempty"`
URLs []string `json:"urls,omitempty"`
Date string `json:"date,omitempty"`
Rating int `json:"rating,omitempty"`
Organized bool `json:"organized,omitempty"`
// deprecated - for import only
OCounter int `json:"o_counter,omitempty"`
Details string `json:"details,omitempty"`
Galleries []GalleryRef `json:"galleries,omitempty"`
Performers []string `json:"performers,omitempty"`
Groups []AudioGroup `json:"movies,omitempty"`
Tags []string `json:"tags,omitempty"`
// Markers []AudioMarker `json:"markers,omitempty"`
Files []string `json:"files,omitempty"`
Cover string `json:"cover,omitempty"`
CreatedAt json.JSONTime `json:"created_at,omitempty"`
UpdatedAt json.JSONTime `json:"updated_at,omitempty"`
// deprecated - for import only
LastPlayedAt json.JSONTime `json:"last_played_at,omitempty"`
ResumeTime float64 `json:"resume_time,omitempty"`
// deprecated - for import only
PlayCount int `json:"play_count,omitempty"`
PlayHistory []json.JSONTime `json:"play_history,omitempty"`
OHistory []json.JSONTime `json:"o_history,omitempty"`
PlayDuration float64 `json:"play_duration,omitempty"`
CustomFields map[string]interface{} `json:"custom_fields,omitempty"`
}
func (s Audio) Filename(id int, basename string, hash string) string {
ret := fsutil.SanitiseBasename(s.Title)
if ret == "" {
ret = basename
}
if hash != "" {
ret += "." + hash
} else {
// audios may have no file and therefore no hash
ret += "." + strconv.Itoa(id)
}
return ret + ".json"
}
func LoadAudioFile(filePath string) (*Audio, error) {
var audio Audio
file, err := os.Open(filePath)
if err != nil {
return nil, err
}
defer file.Close()
var json = jsoniter.ConfigCompatibleWithStandardLibrary
jsonParser := json.NewDecoder(file)
err = jsonParser.Decode(&audio)
if err != nil {
return nil, err
}
return &audio, nil
}
func SaveAudioFile(filePath string, audio *Audio) error {
if audio == nil {
return fmt.Errorf("audio must not be nil")
}
return marshalToFile(filePath, audio)
}

File diff suppressed because it is too large Load diff

View file

@ -18,6 +18,7 @@ type Database struct {
Image *ImageReaderWriter
Group *GroupReaderWriter
Performer *PerformerReaderWriter
Audio *AudioReaderWriter
Scene *SceneReaderWriter
SceneMarker *SceneMarkerReaderWriter
Studio *StudioReaderWriter
@ -67,6 +68,7 @@ func NewDatabase() *Database {
Image: &ImageReaderWriter{},
Group: &GroupReaderWriter{},
Performer: &PerformerReaderWriter{},
Audio: &AudioReaderWriter{},
Scene: &SceneReaderWriter{},
SceneMarker: &SceneMarkerReaderWriter{},
Studio: &StudioReaderWriter{},
@ -83,6 +85,7 @@ func (db *Database) AssertExpectations(t mock.TestingT) {
db.Image.AssertExpectations(t)
db.Group.AssertExpectations(t)
db.Performer.AssertExpectations(t)
db.Audio.AssertExpectations(t)
db.Scene.AssertExpectations(t)
db.SceneMarker.AssertExpectations(t)
db.Studio.AssertExpectations(t)
@ -110,6 +113,7 @@ func (db *Database) Repository() models.Repository {
Image: db.Image,
Group: db.Group,
Performer: db.Performer,
Audio: db.Audio,
Scene: db.Scene,
SceneMarker: db.SceneMarker,
Studio: db.Studio,

View file

@ -31,6 +31,31 @@ func SceneQueryResult(scenes []*models.Scene, count int) *models.SceneQueryResul
return ret
}
type audioResolver struct {
audios []*models.Audio
}
func (s *audioResolver) Find(ctx context.Context, id int) (*models.Audio, error) {
panic("not implemented")
}
func (s *audioResolver) FindMany(ctx context.Context, ids []int) ([]*models.Audio, error) {
return s.audios, nil
}
func (s *audioResolver) FindByIDs(ctx context.Context, ids []int) ([]*models.Audio, error) {
return s.audios, nil
}
func AudioQueryResult(audios []*models.Audio, count int) *models.AudioQueryResult {
ret := models.NewAudioQueryResult(&audioResolver{
audios: audios,
})
ret.Count = count
return ret
}
type imageResolver struct {
images []*models.Image
}

View file

@ -1,3 +1,5 @@
// TODO(audio): update this file
package models
import (
@ -10,19 +12,19 @@ import (
// Audio stores the metadata for a single video audio.
type Audio struct {
ID int `json:"id"`
Title string `json:"title"`
Code string `json:"code"`
Details string `json:"details"`
Artists string `json:"artists"`
Date *Date `json:"date"`
ID int `json:"id"`
Title string `json:"title"`
Code string `json:"code"`
Details string `json:"details"`
Artists string `json:"artists"`
Date *Date `json:"date"`
// Rating expressed in 1-100 scale
Rating *int `json:"rating"`
Organized bool `json:"organized"`
StudioID *int `json:"studio_id"`
// transient - not persisted
Files RelatedVideoFiles
Files RelatedAudioFiles
PrimaryFileID *FileID
// transient - path of primary file - empty if no files
Path string
@ -37,12 +39,11 @@ type Audio struct {
ResumeTime float64 `json:"resume_time"`
PlayDuration float64 `json:"play_duration"`
URLs RelatedStrings `json:"urls"`
GalleryIDs RelatedIDs `json:"gallery_ids"`
TagIDs RelatedIDs `json:"tag_ids"`
PerformerIDs RelatedIDs `json:"performer_ids"`
Groups RelatedGroups `json:"groups"`
StashIDs RelatedStashIDs `json:"stash_ids"`
URLs RelatedStrings `json:"urls"`
GalleryIDs RelatedIDs `json:"gallery_ids"`
TagIDs RelatedIDs `json:"tag_ids"`
PerformerIDs RelatedIDs `json:"performer_ids"`
Groups RelatedGroupsAudio `json:"groups"`
}
func NewAudio() Audio {
@ -70,11 +71,10 @@ type UpdateAudioInput struct {
// AudioPartial represents part of a Audio object. It is used to update
// the database entry.
type AudioPartial struct {
Title OptionalString
Code OptionalString
Details OptionalString
Director OptionalString
Date OptionalDate
Title OptionalString
Code OptionalString
Details OptionalString
Date OptionalDate
// Rating expressed in 1-100 scale
Rating OptionalInt
Organized OptionalBool
@ -88,8 +88,7 @@ type AudioPartial struct {
GalleryIDs *UpdateIDs
TagIDs *UpdateIDs
PerformerIDs *UpdateIDs
GroupIDs *UpdateGroupIDs
StashIDs *UpdateStashIDs
GroupIDs *UpdateGroupIDsAudio
PrimaryFileID *FileID
}
@ -106,14 +105,14 @@ func (s *Audio) LoadURLs(ctx context.Context, l URLLoader) error {
})
}
func (s *Audio) LoadFiles(ctx context.Context, l VideoFileLoader) error {
return s.Files.load(func() ([]*VideoFile, error) {
func (s *Audio) LoadFiles(ctx context.Context, l AudioFileLoader) error {
return s.Files.load(func() ([]*AudioFile, error) {
return l.GetFiles(ctx, s.ID)
})
}
func (s *Audio) LoadPrimaryFile(ctx context.Context, l FileGetter) error {
return s.Files.loadPrimary(func() (*VideoFile, error) {
return s.Files.loadPrimary(func() (*AudioFile, error) {
if s.PrimaryFileID == nil {
return nil, nil
}
@ -123,10 +122,10 @@ func (s *Audio) LoadPrimaryFile(ctx context.Context, l FileGetter) error {
return nil, err
}
var vf *VideoFile
var vf *AudioFile
if len(f) > 0 {
var ok bool
vf, ok = f[0].(*VideoFile)
vf, ok = f[0].(*AudioFile)
if !ok {
return nil, errors.New("not a video file")
}
@ -159,12 +158,6 @@ func (s *Audio) LoadGroups(ctx context.Context, l AudioGroupLoader) error {
})
}
func (s *Audio) LoadStashIDs(ctx context.Context, l StashIDLoader) error {
return s.StashIDs.load(func() ([]StashID, error) {
return l.GetStashIDs(ctx, s.ID)
})
}
func (s *Audio) LoadRelationships(ctx context.Context, l AudioReader) error {
if err := s.LoadURLs(ctx, l); err != nil {
return err
@ -186,10 +179,6 @@ func (s *Audio) LoadRelationships(ctx context.Context, l AudioReader) error {
return err
}
if err := s.LoadStashIDs(ctx, l); err != nil {
return err
}
if err := s.LoadFiles(ctx, l); err != nil {
return err
}
@ -206,27 +195,19 @@ func (s AudioPartial) UpdateInput(id int) AudioUpdateInput {
dateStr = &v
}
var stashIDs StashIDs
if s.StashIDs != nil {
stashIDs = StashIDs(s.StashIDs.StashIDs)
}
ret := AudioUpdateInput{
ID: strconv.Itoa(id),
Title: s.Title.Ptr(),
Code: s.Code.Ptr(),
Details: s.Details.Ptr(),
Director: s.Director.Ptr(),
Urls: s.URLs.Strings(),
Date: dateStr,
Rating100: s.Rating.Ptr(),
Organized: s.Organized.Ptr(),
StudioID: s.StudioID.StringPtr(),
GalleryIds: s.GalleryIDs.IDStrings(),
PerformerIds: s.PerformerIDs.IDStrings(),
Movies: s.GroupIDs.AudioMovieInputs(),
Groups: s.GroupIDs.GroupInputs(),
TagIds: s.TagIDs.IDStrings(),
StashIds: stashIDs.ToStashIDInputs(),
}
return ret
@ -269,20 +250,18 @@ func (s Audio) GetHash(hashAlgorithm HashAlgorithm) string {
type AudioFileType struct {
Size *string `graphql:"size" json:"size"`
Duration *float64 `graphql:"duration" json:"duration"`
VideoCodec *string `graphql:"video_codec" json:"video_codec"`
AudioCodec *string `graphql:"audio_codec" json:"audio_codec"`
Width *int `graphql:"width" json:"width"`
Height *int `graphql:"height" json:"height"`
Framerate *float64 `graphql:"framerate" json:"framerate"`
Samplerate *float64 `graphql:"samplerate" json:"samplerate"`
Bitrate *int `graphql:"bitrate" json:"bitrate"`
}
type VideoCaption struct {
LanguageCode string `json:"language_code"`
Filename string `json:"filename"`
CaptionType string `json:"caption_type"`
}
// TODO(audio): don't know if we need this, using VideoCaption for now due to `pkg/models/repository_file.go` and `FileReader` using
// type AudioCaption struct {
// LanguageCode string `json:"language_code"`
// Filename string `json:"filename"`
// CaptionType string `json:"caption_type"`
// }
func (c VideoCaption) Path(filePath string) string {
return filepath.Join(filepath.Dir(filePath), c.Filename)
}
// func (c AudioCaption) Path(filePath string) string {
// return filepath.Join(filepath.Dir(filePath), c.Filename)
// }

View file

@ -328,3 +328,43 @@ func (f VideoFile) FrameRateFinite() float64 {
}
return ret
}
// AudioFile is an extension of BaseFile to represent audio files.
type AudioFile struct {
*BaseFile
Format string `json:"format"`
Duration float64 `json:"duration"`
AudioCodec string `json:"audio_codec"`
SampleRate float64 `json:"sample_rate"`
BitRate int64 `json:"bitrate"`
}
func (f AudioFile) GetFormat() string {
return f.Format
}
func (f AudioFile) Clone() (ret File) {
clone := f
clone.BaseFile = f.BaseFile.Clone().(*BaseFile)
ret = &clone
return
}
// #1572 - Inf and NaN values cause the JSON marshaller to fail
// Replace these values with 0 rather than erroring
func (f AudioFile) DurationFinite() float64 {
ret := f.Duration
if math.IsInf(ret, 0) || math.IsNaN(ret) {
return 0
}
return ret
}
func (f AudioFile) SampleRateFinite() float64 {
ret := f.SampleRate
if math.IsInf(ret, 0) || math.IsNaN(ret) {
return 0
}
return ret
}

View file

@ -73,3 +73,49 @@ type GroupIDDescription struct {
GroupID int `json:"group_id"`
Description string `json:"description"`
}
// Audio
type GroupsAudios struct {
GroupID int `json:"group_id"`
AudioIndex *int `json:"audio_index"`
}
func (s GroupsAudios) AudioGroupInput() AudioGroupInput {
return AudioGroupInput{
GroupID: strconv.Itoa(s.GroupID),
AudioIndex: s.AudioIndex,
}
}
func (s GroupsAudios) Equal(o GroupsAudios) bool {
return o.GroupID == s.GroupID && ((o.AudioIndex == nil && s.AudioIndex == nil) ||
(o.AudioIndex != nil && s.AudioIndex != nil && *o.AudioIndex == *s.AudioIndex))
}
type UpdateGroupIDsAudio struct {
Groups []GroupsAudios `json:"groups"`
Mode RelationshipUpdateMode `json:"mode"`
}
func (u *UpdateGroupIDsAudio) GroupInputs() []AudioGroupInput {
if u == nil {
return nil
}
ret := make([]AudioGroupInput, 0, len(u.Groups))
for _, id := range u.Groups {
ret = append(ret, id.AudioGroupInput())
}
return ret
}
func (u *UpdateGroupIDsAudio) AddUnique(v GroupsAudios) {
for _, vv := range u.Groups {
if vv.GroupID == v.GroupID {
return
}
}
u.Groups = append(u.Groups, v)
}

View file

@ -11,6 +11,7 @@ type Paths struct {
Generated *generatedPaths
Scene *scenePaths
Audio *audioPaths
SceneMarkers *sceneMarkerPaths
Blobs string
}

View file

@ -0,0 +1,56 @@
// TODO(audio): update this file
package paths
import (
"path/filepath"
"github.com/stashapp/stash/pkg/fsutil"
)
type audioPaths struct {
generatedPaths
}
func newAudioPaths(p Paths) *audioPaths {
sp := audioPaths{
generatedPaths: *p.Generated,
}
return &sp
}
func (sp *audioPaths) GetLegacyScreenshotPath(checksum string) string {
return filepath.Join(sp.Screenshots, checksum+".jpg")
}
func (sp *audioPaths) GetTranscodePath(checksum string) string {
return filepath.Join(sp.Transcodes, checksum+".mp4")
}
func (sp *audioPaths) GetStreamPath(audioPath string, checksum string) string {
transcodePath := sp.GetTranscodePath(checksum)
transcodeExists, _ := fsutil.FileExists(transcodePath)
if transcodeExists {
return transcodePath
}
return audioPath
}
func (sp *audioPaths) GetVideoPreviewPath(checksum string) string {
return filepath.Join(sp.Screenshots, checksum+".mp4")
}
func (sp *audioPaths) GetWebpPreviewPath(checksum string) string {
return filepath.Join(sp.Screenshots, checksum+".webp")
}
func (sp *audioPaths) GetSpriteImageFilePath(checksum string) string {
return filepath.Join(sp.Vtt, checksum+"_sprite.jpg")
}
func (sp *audioPaths) GetSpriteVttFilePath(checksum string) string {
return filepath.Join(sp.Vtt, checksum+"_thumbs.vtt")
}
func (sp *audioPaths) GetInteractiveHeatmapPath(checksum string) string {
return filepath.Join(sp.InteractiveHeatmap, checksum+".png")
}

View file

@ -35,6 +35,10 @@ type FileIDLoader interface {
GetManyFileIDs(ctx context.Context, ids []int) ([][]FileID, error)
}
type AudioGroupLoader interface {
GetGroups(ctx context.Context, id int) ([]GroupsAudios, error)
}
type SceneGroupLoader interface {
GetGroups(ctx context.Context, id int) ([]GroupsScenes, error)
}
@ -54,6 +58,9 @@ type StashIDLoader interface {
type VideoFileLoader interface {
GetFiles(ctx context.Context, relatedID int) ([]*VideoFile, error)
}
type AudioFileLoader interface {
GetFiles(ctx context.Context, relatedID int) ([]*AudioFile, error)
}
type FileLoader interface {
GetFiles(ctx context.Context, relatedID int) ([]File, error)
@ -195,6 +202,77 @@ func (r *RelatedGroups) load(fn func() ([]GroupsScenes, error)) error {
return nil
}
// Audio
// RelatedGroupsAudio represents a list of related Groups.
type RelatedGroupsAudio struct {
list []GroupsAudios
}
// NewRelatedGroups returns a loaded RelateGroups object with the provided groups.
// Loaded will return true when called on the returned object if the provided slice is not nil.
func NewRelatedGroupsAudio(list []GroupsAudios) RelatedGroupsAudio {
return RelatedGroupsAudio{
list: list,
}
}
// Loaded returns true if the relationship has been loaded.
func (r RelatedGroupsAudio) Loaded() bool {
return r.list != nil
}
func (r RelatedGroupsAudio) mustLoaded() {
if !r.Loaded() {
panic("list has not been loaded")
}
}
// List returns the related Groups. Panics if the relationship has not been loaded.
func (r RelatedGroupsAudio) List() []GroupsAudios {
r.mustLoaded()
return r.list
}
// Add adds the provided ids to the list. Panics if the relationship has not been loaded.
func (r *RelatedGroupsAudio) Add(groups ...GroupsAudios) {
r.mustLoaded()
r.list = append(r.list, groups...)
}
// ForID returns the GroupsAudios object for the given group ID. Returns nil if not found.
func (r *RelatedGroupsAudio) ForID(id int) *GroupsAudios {
r.mustLoaded()
for _, v := range r.list {
if v.GroupID == id {
return &v
}
}
return nil
}
func (r *RelatedGroupsAudio) load(fn func() ([]GroupsAudios, error)) error {
if r.Loaded() {
return nil
}
ids, err := fn()
if err != nil {
return err
}
if ids == nil {
ids = []GroupsAudios{}
}
r.list = ids
return nil
}
type RelatedGroupDescriptions struct {
list []GroupIDDescription
}
@ -430,6 +508,105 @@ func (r *RelatedVideoFiles) loadPrimary(fn func() (*VideoFile, error)) error {
return nil
}
// Audio
type RelatedAudioFiles struct {
primaryFile *AudioFile
files []*AudioFile
primaryLoaded bool
}
func NewRelatedAudioFiles(files []*AudioFile) RelatedAudioFiles {
ret := RelatedAudioFiles{
files: files,
primaryLoaded: true,
}
if len(files) > 0 {
ret.primaryFile = files[0]
}
return ret
}
func (r *RelatedAudioFiles) SetPrimary(f *AudioFile) {
r.primaryFile = f
r.primaryLoaded = true
}
func (r *RelatedAudioFiles) Set(f []*AudioFile) {
r.files = f
if len(r.files) > 0 {
r.primaryFile = r.files[0]
}
r.primaryLoaded = true
}
// Loaded returns true if the relationship has been loaded.
func (r RelatedAudioFiles) Loaded() bool {
return r.files != nil
}
// Loaded returns true if the primary file relationship has been loaded.
func (r RelatedAudioFiles) PrimaryLoaded() bool {
return r.primaryLoaded
}
// List returns the related files. Panics if the relationship has not been loaded.
func (r RelatedAudioFiles) List() []*AudioFile {
if !r.Loaded() {
panic("relationship has not been loaded")
}
return r.files
}
// Primary returns the primary file. Panics if the relationship has not been loaded.
func (r RelatedAudioFiles) Primary() *AudioFile {
if !r.PrimaryLoaded() {
panic("relationship has not been loaded")
}
return r.primaryFile
}
func (r *RelatedAudioFiles) load(fn func() ([]*AudioFile, error)) error {
if r.Loaded() {
return nil
}
var err error
r.files, err = fn()
if err != nil {
return err
}
if len(r.files) > 0 {
r.primaryFile = r.files[0]
}
r.primaryLoaded = true
return nil
}
func (r *RelatedAudioFiles) loadPrimary(fn func() (*AudioFile, error)) error {
if r.PrimaryLoaded() {
return nil
}
var err error
r.primaryFile, err = fn()
if err != nil {
return err
}
r.primaryLoaded = true
return nil
}
type RelatedFiles struct {
primaryFile File
files []File

View file

@ -22,6 +22,7 @@ type Repository struct {
Image ImageReaderWriter
Group GroupReaderWriter
Performer PerformerReaderWriter
Audio AudioReaderWriter
Scene SceneReaderWriter
SceneMarker SceneMarkerReaderWriter
Studio StudioReaderWriter

View file

@ -4,7 +4,7 @@ package models
import (
"context"
"time"
// "time"
)
// AudioGetter provides methods to get audios by ID.
@ -72,24 +72,6 @@ type AudioCreatorUpdater interface {
AudioUpdater
}
type ViewDateReader interface {
CountViews(ctx context.Context, id int) (int, error)
CountAllViews(ctx context.Context) (int, error)
CountUniqueViews(ctx context.Context) (int, error)
GetManyViewCount(ctx context.Context, ids []int) ([]int, error)
GetViewDates(ctx context.Context, relatedID int) ([]time.Time, error)
GetManyViewDates(ctx context.Context, ids []int) ([][]time.Time, error)
GetManyLastViewed(ctx context.Context, ids []int) ([]*time.Time, error)
}
type ODateReader interface {
GetOCount(ctx context.Context, id int) (int, error)
GetManyOCount(ctx context.Context, ids []int) ([]int, error)
GetAllOCount(ctx context.Context) (int, error)
GetODates(ctx context.Context, relatedID int) ([]time.Time, error)
GetManyODates(ctx context.Context, ids []int) ([][]time.Time, error)
}
// AudioReader provides all methods to read audios.
type AudioReader interface {
AudioFinder
@ -104,8 +86,7 @@ type AudioReader interface {
PerformerIDLoader
TagIDLoader
AudioGroupLoader
StashIDLoader
VideoFileLoader
AudioFileLoader
CustomFieldsReader
All(ctx context.Context) ([]*Audio, error)
@ -117,18 +98,6 @@ type AudioReader interface {
HasCover(ctx context.Context, audioID int) (bool, error)
}
type OHistoryWriter interface {
AddO(ctx context.Context, id int, dates []time.Time) ([]time.Time, error)
DeleteO(ctx context.Context, id int, dates []time.Time) ([]time.Time, error)
ResetO(ctx context.Context, id int) (int, error)
}
type ViewHistoryWriter interface {
AddViews(ctx context.Context, audioID int, dates []time.Time) ([]time.Time, error)
DeleteViews(ctx context.Context, id int, dates []time.Time) ([]time.Time, error)
DeleteAllViews(ctx context.Context, id int) (int, error)
}
// AudioWriter provides all methods to modify audios.
type AudioWriter interface {
AudioCreator

View file

@ -14,6 +14,10 @@ const (
SceneUpdatePost TriggerEnum = "Scene.Update.Post"
SceneDestroyPost TriggerEnum = "Scene.Destroy.Post"
AudioCreatePost TriggerEnum = "Audio.Create.Post"
AudioUpdatePost TriggerEnum = "Audio.Update.Post"
AudioDestroyPost TriggerEnum = "Audio.Destroy.Post"
ImageCreatePost TriggerEnum = "Image.Create.Post"
ImageUpdatePost TriggerEnum = "Image.Update.Post"
ImageDestroyPost TriggerEnum = "Image.Destroy.Post"

View file

@ -31,6 +31,21 @@ type ScenesDestroyInput struct {
Path string `json:"path"`
}
// Audio
type AudioDestroyInput struct {
models.AudioDestroyInput
Checksum string `json:"checksum"`
OSHash string `json:"oshash"`
Path string `json:"path"`
}
type AudiosDestroyInput struct {
models.AudiosDestroyInput
Checksum string `json:"checksum"`
OSHash string `json:"oshash"`
Path string `json:"path"`
}
type GalleryDestroyInput struct {
models.GalleryDestroyInput
Checksum string `json:"checksum"`

View file

@ -1,3 +1,4 @@
// TODO(audio): update this file
package scraper
import (

View file

@ -182,8 +182,6 @@ type audioRepositoryType struct {
groups repository
files filesRepository
stashIDs stashIDRepository
}
var (
@ -225,12 +223,6 @@ var (
idColumn: audioIDColumn,
},
},
stashIDs: stashIDRepository{
repository{
tableName: "audio_stash_ids",
idColumn: audioIDColumn,
},
},
}
)
@ -348,12 +340,6 @@ func (qb *AudioStore) Create(ctx context.Context, newObject *models.Audio, fileI
}
}
if newObject.StashIDs.Loaded() {
if err := audiosStashIDsTableMgr.insertJoins(ctx, id, newObject.StashIDs.List()); err != nil {
return err
}
}
if newObject.Groups.Loaded() {
if err := audiosGroupsTableMgr.insertJoins(ctx, id, newObject.Groups.List()); err != nil {
return err
@ -405,11 +391,6 @@ func (qb *AudioStore) UpdatePartial(ctx context.Context, id int, partial models.
return nil, err
}
}
if partial.StashIDs != nil {
if err := audiosStashIDsTableMgr.modifyJoins(ctx, id, partial.StashIDs.StashIDs, partial.StashIDs.Mode); err != nil {
return nil, err
}
}
if partial.GroupIDs != nil {
if err := audiosGroupsTableMgr.modifyJoins(ctx, id, partial.GroupIDs.Groups, partial.GroupIDs.Mode); err != nil {
return nil, err
@ -456,12 +437,6 @@ func (qb *AudioStore) Update(ctx context.Context, updatedObject *models.Audio) e
}
}
if updatedObject.StashIDs.Loaded() {
if err := audiosStashIDsTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.StashIDs.List()); err != nil {
return err
}
}
if updatedObject.Groups.Loaded() {
if err := audiosGroupsTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.Groups.List()); err != nil {
return err
@ -612,7 +587,7 @@ func (qb *AudioStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*mo
return ret, nil
}
func (qb *AudioStore) GetFiles(ctx context.Context, id int) ([]*models.VideoFile, error) {
func (qb *AudioStore) GetFiles(ctx context.Context, id int) ([]*models.AudioFile, error) {
fileIDs, err := audioRepository.files.get(ctx, id)
if err != nil {
return nil, err
@ -624,12 +599,12 @@ func (qb *AudioStore) GetFiles(ctx context.Context, id int) ([]*models.VideoFile
return nil, err
}
ret := make([]*models.VideoFile, len(files))
ret := make([]*models.AudioFile, len(files))
for i, f := range files {
var ok bool
ret[i], ok = f.(*models.VideoFile)
ret[i], ok = f.(*models.AudioFile)
if !ok {
return nil, fmt.Errorf("expected file to be *file.VideoFile not %T", f)
return nil, fmt.Errorf("expected file to be *file.AudioFile not %T", f)
}
}
@ -885,16 +860,16 @@ func (qb *AudioStore) Size(ctx context.Context) (float64, error) {
func (qb *AudioStore) Duration(ctx context.Context) (float64, error) {
table := qb.table()
videoFileTable := videoFileTableMgr.table
AudioFileTable := AudioFileTableMgr.table
q := dialect.Select(
goqu.COALESCE(goqu.SUM(videoFileTable.Col("duration")), 0),
goqu.COALESCE(goqu.SUM(AudioFileTable.Col("duration")), 0),
).From(table).InnerJoin(
audiosFilesJoinTable,
goqu.On(audiosFilesJoinTable.Col("audio_id").Eq(table.Col(idColumn))),
).InnerJoin(
videoFileTable,
goqu.On(videoFileTable.Col("file_id").Eq(audiosFilesJoinTable.Col("file_id"))),
AudioFileTable,
goqu.On(AudioFileTable.Col("file_id").Eq(audiosFilesJoinTable.Col("file_id"))),
)
var ret float64
@ -1068,7 +1043,7 @@ func (qb *AudioStore) queryGroupedFields(ctx context.Context, options models.Aud
onClause: "audios_files.audio_id = audios.id",
},
join{
table: videoFileTable,
table: AudioFileTable,
onClause: "audios_files.file_id = audio_files.file_id",
},
)
@ -1128,14 +1103,11 @@ var audioSortOptions = sortOptions{
"filesize",
"duration",
"file_mod_time",
"framerate",
"samplerate",
"group_audio_number",
"id",
"interactive",
"interactive_speed",
"last_o_at",
"last_played_at",
"movie_audio_number",
"o_counter",
"organized",
"performer_count",
@ -1143,7 +1115,6 @@ var audioSortOptions = sortOptions{
"play_duration",
"resume_time",
"path",
"perceptual_similarity",
"random",
"rating",
"resolution",
@ -1180,12 +1151,12 @@ func (qb *AudioStore) setAudioSort(query *queryBuilder, findFilter *models.FindF
)
}
addVideoFileTable := func() {
addAudioFileTable := func() {
addFileTable()
query.addJoins(
join{
sort: true,
table: videoFileTable,
table: audioFileTable,
onClause: "audio_files.file_id = audios_files.file_id",
},
)
@ -1203,9 +1174,6 @@ func (qb *AudioStore) setAudioSort(query *queryBuilder, findFilter *models.FindF
direction := findFilter.GetDirection()
switch sort {
case "movie_audio_number":
query.joinSort(groupsAudiosTable, "", "audios.id = groups_audios.audio_id")
query.sortAndPagination += getSort("audio_index", direction, groupsAudiosTable)
case "group_audio_number":
query.joinSort(groupsAudiosTable, "audio_group", "audios.id = audio_group.audio_id")
query.sortAndPagination += getSort("audio_index", direction, "audio_group")
@ -1220,43 +1188,27 @@ func (qb *AudioStore) setAudioSort(query *queryBuilder, findFilter *models.FindF
addFileTable()
addFolderTable()
query.sortAndPagination += fmt.Sprintf(" ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction)
case "perceptual_similarity":
// special handling for phash
addFileTable()
query.addJoins(
join{
sort: true,
table: fingerprintTable,
as: "fingerprints_phash",
onClause: "audios_files.file_id = fingerprints_phash.file_id AND fingerprints_phash.type = 'phash'",
},
)
query.sortAndPagination += " ORDER BY fingerprints_phash.fingerprint " + direction + ", files.size DESC"
case "bitrate":
sort = "bit_rate"
addVideoFileTable()
query.sortAndPagination += getSort(sort, direction, videoFileTable)
addAudioFileTable()
query.sortAndPagination += getSort(sort, direction, audioFileTable)
case "file_mod_time":
sort = "mod_time"
addFileTable()
query.sortAndPagination += getSort(sort, direction, fileTable)
case "framerate":
sort = "frame_rate"
addVideoFileTable()
query.sortAndPagination += getSort(sort, direction, videoFileTable)
case "samplerate":
sort = "sample_rate"
addAudioFileTable()
query.sortAndPagination += getSort(sort, direction, audioFileTable)
case "resolution":
addVideoFileTable()
query.sortAndPagination += fmt.Sprintf(" ORDER BY MIN(%s.width, %s.height) %s", videoFileTable, videoFileTable, getSortDirection(direction))
addAudioFileTable()
query.sortAndPagination += fmt.Sprintf(" ORDER BY MIN(%s.width, %s.height) %s", audioFileTable, audioFileTable, getSortDirection(direction))
case "filesize":
addFileTable()
query.sortAndPagination += getSort(sort, direction, fileTable)
case "duration":
addVideoFileTable()
query.sortAndPagination += getSort(sort, direction, videoFileTable)
case "interactive", "interactive_speed":
addVideoFileTable()
query.sortAndPagination += getSort(sort, direction, videoFileTable)
addAudioFileTable()
query.sortAndPagination += getSort(sort, direction, audioFileTable)
case "title":
addFileTable()
addFolderTable()
@ -1428,10 +1380,6 @@ func (qb *AudioStore) AddGalleryIDs(ctx context.Context, audioID int, galleryIDs
return audiosGalleriesTableMgr.addJoins(ctx, audioID, galleryIDs)
}
func (qb *AudioStore) GetStashIDs(ctx context.Context, audioID int) ([]models.StashID, error) {
return audioRepository.stashIDs.get(ctx, audioID)
}
func (qb *AudioStore) FindDuplicates(ctx context.Context, distance int, durationDiff float64) ([][]*models.Audio, error) {
var dupeIds [][]int
if distance == 0 {

View file

@ -1,3 +1,4 @@
// TODO(audio): update this file
package sqlite
import (
@ -20,10 +21,12 @@ import (
const (
fileTable = "files"
videoFileTable = "video_files"
audioFileTable = "audio_files"
imageFileTable = "image_files"
fileIDColumn = "file_id"
videoCaptionsTable = "video_captions"
audioCaptionsTable = "audio_captions"
captionCodeColumn = "language_code"
captionFilenameColumn = "filename"
captionTypeColumn = "caption_type"

View file

@ -12,8 +12,6 @@ CREATE TABLE "audios" (
`created_at` datetime not null,
`updated_at` datetime not null,
`code` text,
`artists` text,
`album` text,
`resume_time` float not null default 0,
`play_duration` float not null default 0,
"date_precision" TINYINT,
@ -61,7 +59,7 @@ CREATE TABLE "groups_audios" (
foreign key(`audio_id`) references `audios`(`id`) on delete cascade,
PRIMARY KEY("group_id", `audio_id`)
);
CREATE INDEX `index_movies_audios_on_movie_id` on "groups_audios" ("group_id");
CREATE INDEX `index_group_audios_on_group_id` on "groups_audios" ("group_id");
--------------------------------------------
-- performers_audios definition
--

View file

@ -257,6 +257,11 @@ var (
idColumn: goqu.T(videoFileTable).Col(fileIDColumn),
}
audioFileTableMgr = &table{
table: goqu.T(audioFileTable),
idColumn: goqu.T(audioFileTable).Col(fileIDColumn),
}
imageFileTableMgr = &table{
table: goqu.T(imageFileTable),
idColumn: goqu.T(imageFileTable).Col(fileIDColumn),