Add scenes/images/galleries fields to graphql file types

This commit is contained in:
WithoutPants 2026-03-16 13:00:57 +11:00
parent b8bd8953f7
commit 8b346377b8
16 changed files with 498 additions and 3 deletions

View file

@ -90,6 +90,8 @@ type VideoFile implements BaseFile {
frame_rate: Float!
bit_rate: Int!
scenes: [Scene!]!
created_at: Time!
updated_at: Time!
}
@ -115,6 +117,8 @@ type ImageFile implements BaseFile {
width: Int!
height: Int!
images: [Image!]!
created_at: Time!
updated_at: Time!
}
@ -138,6 +142,8 @@ type GalleryFile implements BaseFile {
fingerprint(type: String!): String
fingerprints: [Fingerprint!]!
galleries: [Gallery!]!
created_at: Time!
updated_at: Time!
}

View file

@ -14,6 +14,7 @@
//go:generate go run github.com/vektah/dataloaden FolderParentFolderIDsLoader github.com/stashapp/stash/pkg/models.FolderID []github.com/stashapp/stash/pkg/models.FolderID
//go:generate go run github.com/vektah/dataloaden SceneFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID
//go:generate go run github.com/vektah/dataloaden ImageFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID
//go:generate go run github.com/vektah/dataloaden FileIDsRelatedIDsLoader github.com/stashapp/stash/pkg/models.FileID []int
//go:generate go run github.com/vektah/dataloaden GalleryFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID
//go:generate go run github.com/vektah/dataloaden CustomFieldsLoader int github.com/stashapp/stash/pkg/models.CustomFieldMap
//go:generate go run github.com/vektah/dataloaden SceneOCountLoader int int
@ -44,6 +45,7 @@ const (
type Loaders struct {
SceneByID *SceneLoader
SceneIDsByFileID *FileIDsRelatedIDsLoader
SceneFiles *SceneFileIDsLoader
ScenePlayCount *ScenePlayCountLoader
SceneOCount *SceneOCountLoader
@ -56,8 +58,10 @@ type Loaders struct {
GalleryFiles *GalleryFileIDsLoader
GalleryByID *GalleryLoader
GalleryIDsByFileID *FileIDsRelatedIDsLoader
GalleryCustomFields *CustomFieldsLoader
ImageByID *ImageLoader
ImageIDsByFileID *FileIDsRelatedIDsLoader
ImageCustomFields *CustomFieldsLoader
PerformerByID *PerformerLoader
@ -91,11 +95,21 @@ func (m Middleware) Middleware(next http.Handler) http.Handler {
maxBatch: maxBatch,
fetch: m.fetchScenes(ctx),
},
SceneIDsByFileID: &FileIDsRelatedIDsLoader{
wait: wait,
maxBatch: maxBatch,
fetch: m.fetchSceneIDsByFileID(ctx),
},
GalleryByID: &GalleryLoader{
wait: wait,
maxBatch: maxBatch,
fetch: m.fetchGalleries(ctx),
},
GalleryIDsByFileID: &FileIDsRelatedIDsLoader{
wait: wait,
maxBatch: maxBatch,
fetch: m.fetchGalleryIDsByFileID(ctx),
},
GalleryCustomFields: &CustomFieldsLoader{
wait: wait,
maxBatch: maxBatch,
@ -106,6 +120,11 @@ func (m Middleware) Middleware(next http.Handler) http.Handler {
maxBatch: maxBatch,
fetch: m.fetchImages(ctx),
},
ImageIDsByFileID: &FileIDsRelatedIDsLoader{
wait: wait,
maxBatch: maxBatch,
fetch: m.fetchImageIDsByFileID(ctx),
},
ImageCustomFields: &CustomFieldsLoader{
wait: wait,
maxBatch: maxBatch,
@ -241,6 +260,17 @@ func (m Middleware) fetchScenes(ctx context.Context) func(keys []int) ([]*models
}
}
func (m Middleware) fetchSceneIDsByFileID(ctx context.Context) func(keys []models.FileID) ([][]int, []error) {
return func(keys []models.FileID) (ret [][]int, errs []error) {
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
var err error
ret, err = m.Repository.Scene.GetManyIDsByFileIDs(ctx, keys)
return err
})
return ret, toErrorSlice(err)
}
}
func (m Middleware) fetchSceneCustomFields(ctx context.Context) func(keys []int) ([]models.CustomFieldMap, []error) {
return func(keys []int) (ret []models.CustomFieldMap, errs []error) {
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
@ -265,6 +295,17 @@ func (m Middleware) fetchImages(ctx context.Context) func(keys []int) ([]*models
}
}
func (m Middleware) fetchImageIDsByFileID(ctx context.Context) func(keys []models.FileID) ([][]int, []error) {
return func(keys []models.FileID) (ret [][]int, errs []error) {
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
var err error
ret, err = m.Repository.Image.GetManyIDsByFileIDs(ctx, keys)
return err
})
return ret, toErrorSlice(err)
}
}
func (m Middleware) fetchImageCustomFields(ctx context.Context) func(keys []int) ([]models.CustomFieldMap, []error) {
return func(keys []int) (ret []models.CustomFieldMap, errs []error) {
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
@ -289,6 +330,17 @@ func (m Middleware) fetchGalleries(ctx context.Context) func(keys []int) ([]*mod
}
}
func (m Middleware) fetchGalleryIDsByFileID(ctx context.Context) func(keys []models.FileID) ([][]int, []error) {
return func(keys []models.FileID) (ret [][]int, errs []error) {
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
var err error
ret, err = m.Repository.Gallery.GetManyIDsByFileIDs(ctx, keys)
return err
})
return ret, toErrorSlice(err)
}
}
func (m Middleware) fetchPerformers(ctx context.Context) func(keys []int) ([]*models.Performer, []error) {
return func(keys []int) (ret []*models.Performer, errs []error) {
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {

View file

@ -0,0 +1,225 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"github.com/stashapp/stash/pkg/models"
)
// FileIDsRelatedIDsLoaderConfig captures the config to create a new FileIDsRelatedIDsLoader
type FileIDsRelatedIDsLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []models.FileID) ([][]int, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewFileIDsRelatedIDsLoader creates a new FileIDsRelatedIDsLoader given a fetch, wait, and maxBatch
func NewFileIDsRelatedIDsLoader(config FileIDsRelatedIDsLoaderConfig) *FileIDsRelatedIDsLoader {
return &FileIDsRelatedIDsLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// FileIDsRelatedIDsLoader batches and caches requests
type FileIDsRelatedIDsLoader struct {
// this method provides the data for the loader
fetch func(keys []models.FileID) ([][]int, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[models.FileID][]int
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *fileIDsRelatedIDsLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type fileIDsRelatedIDsLoaderBatch struct {
keys []models.FileID
data [][]int
error []error
closing bool
done chan struct{}
}
// Load a int by key, batching and caching will be applied automatically
func (l *FileIDsRelatedIDsLoader) Load(key models.FileID) ([]int, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a int.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *FileIDsRelatedIDsLoader) LoadThunk(key models.FileID) func() ([]int, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() ([]int, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &fileIDsRelatedIDsLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() ([]int, error) {
<-batch.done
var data []int
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *FileIDsRelatedIDsLoader) LoadAll(keys []models.FileID) ([][]int, []error) {
results := make([]func() ([]int, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
ints := make([][]int, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
ints[i], errors[i] = thunk()
}
return ints, errors
}
// LoadAllThunk returns a function that when called will block waiting for a ints.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *FileIDsRelatedIDsLoader) LoadAllThunk(keys []models.FileID) func() ([][]int, []error) {
results := make([]func() ([]int, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([][]int, []error) {
ints := make([][]int, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
ints[i], errors[i] = thunk()
}
return ints, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *FileIDsRelatedIDsLoader) Prime(key models.FileID, value []int) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := make([]int, len(value))
copy(cpy, value)
l.unsafeSet(key, cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *FileIDsRelatedIDsLoader) Clear(key models.FileID) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *FileIDsRelatedIDsLoader) unsafeSet(key models.FileID, value []int) {
if l.cache == nil {
l.cache = map[models.FileID][]int{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *fileIDsRelatedIDsLoaderBatch) keyIndex(l *FileIDsRelatedIDsLoader, key models.FileID) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *fileIDsRelatedIDsLoaderBatch) startTimer(l *FileIDsRelatedIDsLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *fileIDsRelatedIDsLoaderBatch) end(l *FileIDsRelatedIDsLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View file

@ -41,7 +41,8 @@ func convertBaseFile(f models.File) BaseFile {
case *models.ImageFile:
return &ImageFile{ImageFile: f}
case *models.BaseFile:
return &BasicFile{BaseFile: f}
// assume gallery file if it's not a video or image file
return &GalleryFile{BaseFile: f}
default:
panic("unknown file type")
}
@ -57,8 +58,6 @@ type GalleryFile struct {
func (GalleryFile) IsBaseFile() {}
func (GalleryFile) IsVisualFile() {}
func (f *GalleryFile) Fingerprints() []models.Fingerprint {
return f.BaseFile.Fingerprints
}

View file

@ -40,10 +40,32 @@ func (r *imageFileResolver) ParentFolder(ctx context.Context, obj *ImageFile) (*
return loaders.From(ctx).FolderByID.Load(obj.ParentFolderID)
}
func (r *imageFileResolver) Images(ctx context.Context, obj *ImageFile) ([]*models.Image, error) {
imageIDs, err := loaders.From(ctx).ImageIDsByFileID.Load(obj.ID)
if err != nil {
return nil, err
}
var errs []error
ret, errs := loaders.From(ctx).ImageByID.LoadAll(imageIDs)
return ret, firstError(errs)
}
func (r *videoFileResolver) ParentFolder(ctx context.Context, obj *VideoFile) (*models.Folder, error) {
return loaders.From(ctx).FolderByID.Load(obj.ParentFolderID)
}
func (r *videoFileResolver) Scenes(ctx context.Context, obj *VideoFile) ([]*models.Scene, error) {
sceneIDs, err := loaders.From(ctx).SceneIDsByFileID.Load(obj.ID)
if err != nil {
return nil, err
}
var errs []error
ret, errs := loaders.From(ctx).SceneByID.LoadAll(sceneIDs)
return ret, firstError(errs)
}
func (r *basicFileResolver) ParentFolder(ctx context.Context, obj *BasicFile) (*models.Folder, error) {
return loaders.From(ctx).FolderByID.Load(obj.ParentFolderID)
}
@ -67,6 +89,17 @@ func (r *galleryFileResolver) ZipFile(ctx context.Context, obj *GalleryFile) (*B
return zipFileResolver(ctx, obj.ZipFileID)
}
func (r *galleryFileResolver) Galleries(ctx context.Context, obj *GalleryFile) ([]*models.Gallery, error) {
galleryIDs, err := loaders.From(ctx).GalleryIDsByFileID.Load(obj.ID)
if err != nil {
return nil, err
}
var errs []error
ret, errs := loaders.From(ctx).GalleryByID.LoadAll(galleryIDs)
return ret, firstError(errs)
}
func (r *imageFileResolver) ZipFile(ctx context.Context, obj *ImageFile) (*BasicFile, error) {
return zipFileResolver(ctx, obj.ZipFileID)
}

View file

@ -510,6 +510,29 @@ func (_m *GalleryReaderWriter) GetManyFileIDs(ctx context.Context, ids []int) ([
return r0, r1
}
// GetManyIDsByFileIDs provides a mock function with given fields: ctx, fileIDs
func (_m *GalleryReaderWriter) GetManyIDsByFileIDs(ctx context.Context, fileIDs []models.FileID) ([][]int, error) {
ret := _m.Called(ctx, fileIDs)
var r0 [][]int
if rf, ok := ret.Get(0).(func(context.Context, []models.FileID) [][]int); ok {
r0 = rf(ctx, fileIDs)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([][]int)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, []models.FileID) error); ok {
r1 = rf(ctx, fileIDs)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetPerformerIDs provides a mock function with given fields: ctx, relatedID
func (_m *GalleryReaderWriter) GetPerformerIDs(ctx context.Context, relatedID int) ([]int, error) {
ret := _m.Called(ctx, relatedID)

View file

@ -508,6 +508,29 @@ func (_m *ImageReaderWriter) GetManyFileIDs(ctx context.Context, ids []int) ([][
return r0, r1
}
// GetManyIDsByFileIDs provides a mock function with given fields: ctx, fileIDs
func (_m *ImageReaderWriter) GetManyIDsByFileIDs(ctx context.Context, fileIDs []models.FileID) ([][]int, error) {
ret := _m.Called(ctx, fileIDs)
var r0 [][]int
if rf, ok := ret.Get(0).(func(context.Context, []models.FileID) [][]int); ok {
r0 = rf(ctx, fileIDs)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([][]int)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, []models.FileID) error); ok {
r1 = rf(ctx, fileIDs)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetPerformerIDs provides a mock function with given fields: ctx, relatedID
func (_m *ImageReaderWriter) GetPerformerIDs(ctx context.Context, relatedID int) ([]int, error) {
ret := _m.Called(ctx, relatedID)

View file

@ -892,6 +892,29 @@ func (_m *SceneReaderWriter) GetManyFileIDs(ctx context.Context, ids []int) ([][
return r0, r1
}
// GetManyIDsByFileIDs provides a mock function with given fields: ctx, fileIDs
func (_m *SceneReaderWriter) GetManyIDsByFileIDs(ctx context.Context, fileIDs []models.FileID) ([][]int, error) {
ret := _m.Called(ctx, fileIDs)
var r0 [][]int
if rf, ok := ret.Get(0).(func(context.Context, []models.FileID) [][]int); ok {
r0 = rf(ctx, fileIDs)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([][]int)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, []models.FileID) error); ok {
r1 = rf(ctx, fileIDs)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetManyLastViewed provides a mock function with given fields: ctx, ids
func (_m *SceneReaderWriter) GetManyLastViewed(ctx context.Context, ids []int) ([]*time.Time, error) {
ret := _m.Called(ctx, ids)

View file

@ -92,3 +92,7 @@ type FileReaderWriter interface {
FileReader
FileWriter
}
type IDsFromFileIDsLoader interface {
GetManyIDsByFileIDs(ctx context.Context, fileIDs []FileID) ([][]int, error)
}

View file

@ -12,6 +12,7 @@ type GalleryGetter interface {
// GalleryFinder provides methods to find galleries.
type GalleryFinder interface {
GalleryGetter
IDsFromFileIDsLoader
FindByFingerprints(ctx context.Context, fp []Fingerprint) ([]*Gallery, error)
FindByChecksum(ctx context.Context, checksum string) ([]*Gallery, error)
FindByChecksums(ctx context.Context, checksums []string) ([]*Gallery, error)

View file

@ -12,6 +12,7 @@ type ImageGetter interface {
// ImageFinder provides methods to find images.
type ImageFinder interface {
ImageGetter
IDsFromFileIDsLoader
FindByFingerprints(ctx context.Context, fp []Fingerprint) ([]*Image, error)
FindByChecksum(ctx context.Context, checksum string) ([]*Image, error)
FindByFileID(ctx context.Context, fileID FileID) ([]*Image, error)

View file

@ -18,6 +18,7 @@ type SceneGetter interface {
// SceneFinder provides methods to find scenes.
type SceneFinder interface {
SceneGetter
IDsFromFileIDsLoader
FindByFingerprints(ctx context.Context, fp []Fingerprint) ([]*Scene, error)
FindByChecksum(ctx context.Context, checksum string) ([]*Scene, error)
FindByOSHash(ctx context.Context, oshash string) ([]*Scene, error)

View file

@ -528,6 +528,38 @@ func (qb *GalleryStore) FindByFileID(ctx context.Context, fileID models.FileID)
return ret, nil
}
func (qb *GalleryStore) GetManyIDsByFileIDs(ctx context.Context, fileIDs []models.FileID) ([][]int, error) {
sq := dialect.From(galleriesFilesJoinTable).Select(galleriesFilesJoinTable.Col(galleryIDColumn), galleriesFilesJoinTable.Col(fileIDColumn)).Where(
galleriesFilesJoinTable.Col(fileIDColumn).In(fileIDs),
)
sql, args, err := sq.ToSQL()
if err != nil {
return nil, fmt.Errorf("building query: %w", err)
}
var results []struct {
GalleryID int `db:"gallery_id"`
FileID models.FileID `db:"file_id"`
}
if err := querySelect(ctx, sql, args, &results); err != nil {
return nil, fmt.Errorf("getting galleries by file ids %v: %w", fileIDs, err)
}
retMap := make(map[models.FileID][]int)
for _, r := range results {
retMap[r.FileID] = append(retMap[r.FileID], r.GalleryID)
}
ret := make([][]int, len(fileIDs))
for i, id := range fileIDs {
ret[i] = retMap[id]
}
return ret, nil
}
func (qb *GalleryStore) CountByFileID(ctx context.Context, fileID models.FileID) (int, error) {
joinTable := galleriesFilesJoinTable

View file

@ -576,6 +576,38 @@ func (qb *ImageStore) FindByFileID(ctx context.Context, fileID models.FileID) ([
return ret, nil
}
func (qb *ImageStore) GetManyIDsByFileIDs(ctx context.Context, fileIDs []models.FileID) ([][]int, error) {
sq := dialect.From(imagesFilesJoinTable).Select(imagesFilesJoinTable.Col(imageIDColumn), imagesFilesJoinTable.Col(fileIDColumn)).Where(
imagesFilesJoinTable.Col(fileIDColumn).In(fileIDs),
)
sql, args, err := sq.ToSQL()
if err != nil {
return nil, fmt.Errorf("building query: %w", err)
}
var results []struct {
ImageID int `db:"image_id"`
FileID models.FileID `db:"file_id"`
}
if err := querySelect(ctx, sql, args, &results); err != nil {
return nil, fmt.Errorf("getting images by file ids %v: %w", fileIDs, err)
}
retMap := make(map[models.FileID][]int)
for _, r := range results {
retMap[r.FileID] = append(retMap[r.FileID], r.ImageID)
}
ret := make([][]int, len(fileIDs))
for i, id := range fileIDs {
ret[i] = retMap[id]
}
return ret, nil
}
func (qb *ImageStore) CountByFileID(ctx context.Context, fileID models.FileID) (int, error) {
joinTable := imagesFilesJoinTable

View file

@ -652,6 +652,38 @@ func (qb *SceneStore) FindByFileID(ctx context.Context, fileID models.FileID) ([
return ret, nil
}
func (qb *SceneStore) GetManyIDsByFileIDs(ctx context.Context, fileIDs []models.FileID) ([][]int, error) {
sq := dialect.From(scenesFilesJoinTable).Select(scenesFilesJoinTable.Col(sceneIDColumn), scenesFilesJoinTable.Col(fileIDColumn)).Where(
scenesFilesJoinTable.Col(fileIDColumn).In(fileIDs),
)
sql, args, err := sq.ToSQL()
if err != nil {
return nil, fmt.Errorf("building query: %w", err)
}
var results []struct {
SceneID int `db:"scene_id"`
FileID models.FileID `db:"file_id"`
}
if err := querySelect(ctx, sql, args, &results); err != nil {
return nil, fmt.Errorf("getting scenes by file ids %v: %w", fileIDs, err)
}
retMap := make(map[models.FileID][]int)
for _, r := range results {
retMap[r.FileID] = append(retMap[r.FileID], r.SceneID)
}
ret := make([][]int, len(fileIDs))
for i, id := range fileIDs {
ret[i] = retMap[id]
}
return ret, nil
}
func (qb *SceneStore) FindByPrimaryFileID(ctx context.Context, fileID models.FileID) ([]*models.Scene, error) {
sq := dialect.From(scenesFilesJoinTable).Select(scenesFilesJoinTable.Col(sceneIDColumn)).Where(
scenesFilesJoinTable.Col(fileIDColumn).Eq(fileID),

View file

@ -1209,6 +1209,14 @@ func querySimple(ctx context.Context, query *goqu.SelectDataset, out interface{}
return nil
}
func querySelect(ctx context.Context, query string, args []interface{}, dest interface{}) error {
if err := dbWrapper.Select(ctx, dest, query, args...); err != nil && !errors.Is(err, sql.ErrNoRows) {
return fmt.Errorf("running query: %s [%v]: %w", query, args, err)
}
return nil
}
// func cols(table exp.IdentifierExpression, cols []string) []interface{} {
// var ret []interface{}
// for _, c := range cols {