nidus-sync/platform/upload.go

254 lines
7.7 KiB
Go

package platform
import (
"context"
"errors"
"fmt"
"time"
"github.com/Gleipnir-Technology/bob"
"github.com/Gleipnir-Technology/bob/dialect/psql"
"github.com/Gleipnir-Technology/bob/dialect/psql/sm"
"github.com/Gleipnir-Technology/bob/dialect/psql/um"
"github.com/Gleipnir-Technology/nidus-sync/db"
"github.com/Gleipnir-Technology/nidus-sync/db/enums"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/platform/background"
"github.com/Gleipnir-Technology/nidus-sync/platform/file"
"github.com/Gleipnir-Technology/nidus-sync/platform/types"
"github.com/aarondl/opt/omit"
"github.com/aarondl/opt/omitnull"
"github.com/rs/zerolog/log"
"github.com/stephenafamo/scan"
)
type UploadType = int
const (
UploadTypePool UploadType = iota
)
type UploadStatus = int
const (
UploadStatusComplete UploadStatus = iota
)
type Upload struct {
Created time.Time `db:"created"`
ID int32 `db:"id"`
Status string `db:"status"`
}
type UploadSummary struct {
Created time.Time `db:"created"`
Filename string `db:"filename"`
ID int32 `db:"id"`
RecordCount int `db:"recordcount"`
Status string `db:"status"`
Type string `db:"type"`
}
type UploadPoolDetailCount struct {
Existing int `json:"existing"`
New int `json:"new"`
Outside int `json:"outside"`
}
type UploadPoolDetail struct {
Count UploadPoolDetailCount `json:"count"`
Created time.Time `json:"created"`
Errors []UploadPoolError `json:"errors"`
ID int32 `json:"id"`
Name string `json:"name"`
Pools []UploadPoolRow `json:"pools"`
Status string `json:"status"`
}
func GetUploadDetail(ctx context.Context, organization_id int32, file_id int32) (*UploadPoolDetail, error) {
file, err := models.FindFileuploadFile(ctx, db.PGInstance.BobDB, file_id)
if err != nil {
return nil, fmt.Errorf("Failed to lookup file %d: %w", file_id, err)
}
csv, err := models.FindFileuploadCSV(ctx, db.PGInstance.BobDB, file_id)
if err != nil {
return nil, fmt.Errorf("Failed to lookup csv %d: %w", file_id, err)
}
switch csv.Type {
case enums.FileuploadCsvtypeFlyover:
return getUploadPoollistDetail(ctx, file)
case enums.FileuploadCsvtypePoollist:
return getUploadPoollistDetail(ctx, file)
}
return nil, errors.New("No idea what to do with upload type")
}
func NewUpload(ctx context.Context, u User, upload file.FileUpload, t enums.FileuploadCsvtype) (*Upload, error) {
txn, err := db.PGInstance.BobDB.BeginTx(ctx, nil)
if err != nil {
return nil, fmt.Errorf("Failed to begin transaction: %w", err)
}
defer txn.Rollback(ctx)
file, err := models.FileuploadFiles.Insert(&models.FileuploadFileSetter{
ContentType: omit.From(upload.ContentType),
Created: omit.From(time.Now()),
CreatorID: omit.From(int32(u.ID)),
Deleted: omitnull.FromPtr[time.Time](nil),
Name: omit.From(upload.Name),
OrganizationID: omit.From(u.Organization.ID),
Status: omit.From(enums.FileuploadFilestatustypeUploaded),
SizeBytes: omit.From(int32(upload.SizeBytes)),
FileUUID: omit.From(upload.UUID),
}).One(ctx, txn)
if err != nil {
return nil, fmt.Errorf("Failed to create file upload: %w", err)
}
_, err = models.FileuploadCSVS.Insert(&models.FileuploadCSVSetter{
Committed: omitnull.FromPtr[time.Time](nil),
FileID: omit.From(file.ID),
Rowcount: omit.From(int32(0)),
Type: omit.From(t),
}).One(ctx, txn)
if err != nil {
return nil, fmt.Errorf("Failed to create csv: %w", err)
}
log.Info().Int32("id", file.ID).Msg("Created new pool CSV upload")
err = background.NewCSVImport(ctx, txn, file.ID)
if err != nil {
return nil, fmt.Errorf("background job create: %w", err)
}
txn.Commit(ctx)
return &Upload{
ID: file.ID,
}, nil
}
func UploadCommit(ctx context.Context, org Organization, file_id int32, committer User) error {
txn, err := db.PGInstance.BobDB.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("Failed to begin transaction: %w", err)
}
defer txn.Rollback(ctx)
_, err = psql.Update(
um.Table(models.FileuploadFiles.Alias()),
um.SetCol("status").ToArg("committing"),
um.SetCol("committer").ToArg(committer.ID),
um.Where(psql.Quote("id").EQ(psql.Arg(file_id))),
um.Where(psql.Quote("organization_id").EQ(psql.Arg(org.ID))),
).Exec(ctx, txn)
if err != nil {
return fmt.Errorf("update upload: %w", err)
}
err = background.NewCSVCommit(ctx, txn, file_id)
if err != nil {
return fmt.Errorf("background csv commit: %w", err)
}
err = txn.Commit(ctx)
return err
}
func UploadDiscard(ctx context.Context, org Organization, file_id int32) error {
_, err := psql.Update(
um.Table(models.FileuploadFiles.Alias()),
um.SetCol("status").ToArg("discarded"),
um.Where(psql.Quote("id").EQ(psql.Arg(file_id))),
um.Where(psql.Quote("organization_id").EQ(psql.Arg(org.ID))),
).Exec(ctx, db.PGInstance.BobDB)
return err
}
func UploadSummaryList(ctx context.Context, org Organization) ([]UploadSummary, error) {
results := make([]UploadSummary, 0)
rows, err := bob.All(ctx, db.PGInstance.BobDB, psql.Select(
sm.Columns(
// fileupload.csv columns
//"csv.file_id",
//"csv.committed",
"csv.rowcount AS recordcount",
"csv.type_ AS type",
// fileupload.file columns
//"file.content_type",
"file.created AS created",
//"file.creator_id",
//"file.deleted",
"file.id AS id",
"file.name AS filename",
//"file.organization_id",
"file.status AS status",
//"file.size_bytes",
//"file.file_uuid",
),
sm.From("fileupload.csv").As("csv"),
sm.InnerJoin("fileupload.file").As("file").OnEQ(psql.Raw("csv.file_id"), psql.Raw("file.id")),
sm.Where(psql.Quote("file", "organization_id").EQ(psql.Arg(org.ID))),
sm.OrderBy("created").Desc(),
), scan.StructMapper[UploadSummary]())
if err != nil {
return results, fmt.Errorf("Failed to query pool upload rows: %w", err)
}
return rows, nil
}
func getUploadPoollistDetail(ctx context.Context, file *models.FileuploadFile) (*UploadPoolDetail, error) {
file_errors, errors_by_line, err := errorsByLine(ctx, file)
if err != nil {
return nil, fmt.Errorf("get errors by line: %w", err)
}
pool_rows, err := models.FileuploadPools.Query(
models.SelectWhere.FileuploadPools.CSVFile.EQ(file.ID),
).All(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to query pools for %d: %w", file.ID, err)
}
pools := make([]UploadPoolRow, 0)
count_existing := 0
count_new := 0
count_outside := 0
status := "unknown"
for _, r := range pool_rows {
if r.IsNew {
count_new = count_new + 1
status = "new"
} else {
count_existing = count_existing + 1
status = "existing"
}
if !r.IsInDistrict {
count_outside++
status = "outside"
}
tags := db.ConvertFromPGData(r.Tags)
// add 2 here because our file lines are 1-indexed and we skip the header line, but we are ranging 0-indexed
errors, ok := errors_by_line[r.LineNumber]
if !ok {
errors = []UploadPoolError{}
}
pools = append(pools, UploadPoolRow{
Address: types.Address{
Country: "usa",
Locality: r.AddressLocality,
Number: r.AddressNumber,
PostalCode: r.AddressPostalCode,
Region: r.AddressRegion,
Street: r.AddressStreet,
},
Condition: r.Condition.String(),
Errors: errors,
Status: status,
Tags: tags,
})
}
log.Debug().Str("status", file.Status.String()).Int32("id", file.ID).Msg("returning")
return &UploadPoolDetail{
Count: UploadPoolDetailCount{
Existing: count_existing,
Outside: count_outside,
New: count_new,
},
Errors: file_errors,
ID: file.ID,
Name: file.Name,
Pools: pools,
Status: file.Status.String(),
}, nil
}