Massive rework of platform layer user/organization

The goal of this rework is to make it so I can pass around platform.User
instead of a pair of models.Organization and models.User. This is useful
for reason I kind of forget now, but it started with working on
notifications and ballooned massively from there into refactoring a
number of things that were bugging me.

This also includes a tiny amount of work on server-side events (SSE).

 * background stuff lives inside the platform now, which I need for
   having it push updates through SSE
 * userfile now lives in the platform, under file, so other platform
   functions can safely use it
 * oauth is broken into pieces and inside platform because other stuff
   was calling it already, but badly.
 * notifications go into the platform as well
This commit is contained in:
Eli Ribble 2026-03-12 23:49:16 +00:00
parent 32dcc50c94
commit 44c4f17f32
No known key found for this signature in database
85 changed files with 1492 additions and 1384 deletions

1
platform/audio.go Normal file
View file

@ -0,0 +1 @@
package platform

14
platform/background.go Normal file
View file

@ -0,0 +1,14 @@
package platform
import (
"context"
"github.com/Gleipnir-Technology/nidus-sync/platform/background"
)
func BackgroundStart(ctx context.Context) {
background.Start(ctx)
}
func BackgroundWaitForExit() {
background.WaitForExit()
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,71 @@
package background
import (
"context"
"fmt"
"github.com/Gleipnir-Technology/nidus-sync/platform/subprocess"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
)
// AudioJob represents a job to process an audio file.
type jobAudio struct {
AudioUUID uuid.UUID
}
var channelJobAudio chan jobAudio
func AudioTranscode(audio_uuid uuid.UUID) {
enqueueAudioJob(jobAudio{
AudioUUID: audio_uuid,
})
}
// startAudioWorker initializes the audio job channel and starts the worker goroutine.
func startWorkerAudio(ctx context.Context, audioJobChannel chan jobAudio) {
go func() {
for {
select {
case <-ctx.Done():
log.Info().Msg("Audio worker shutting down.")
return
case job := <-audioJobChannel:
log.Info().Str("uuid", job.AudioUUID.String()).Msg("Processing audio job")
err := processAudioFile(job.AudioUUID)
if err != nil {
log.Error().Err(err).Str("uuid", job.AudioUUID.String()).Msg("Error processing audio file")
}
}
}
}()
}
// EnqueueAudioJob sends an audio processing job to the worker.
func enqueueAudioJob(job jobAudio) {
select {
case channelJobAudio <- job:
log.Info().Str("uuid", job.AudioUUID.String()).Msg("Enqueued audio job")
default:
log.Warn().Str("uuid", job.AudioUUID.String()).Msg("Audio job channel is full, dropping job")
}
}
func processAudioFile(audioUUID uuid.UUID) error {
// Normalize audio
err := subprocess.NormalizeAudio(audioUUID)
if err != nil {
return fmt.Errorf("failed to normalize audio %s: %v", audioUUID, err)
}
// Transcode to OGG
err = subprocess.TranscodeToOgg(audioUUID)
if err != nil {
return fmt.Errorf("failed to transcode audio %s to OGG: %v", audioUUID, err)
}
enqueueLabelStudioJob(jobLabelStudio{
UUID: audioUUID,
})
return nil
}

View file

@ -0,0 +1,84 @@
package background
import (
"context"
"fmt"
"sync"
//commsemail "github.com/Gleipnir-Technology/nidus-sync/comms/email"
//"github.com/Gleipnir-Technology/nidus-sync/config"
"github.com/Gleipnir-Technology/nidus-sync/platform/email"
"github.com/Gleipnir-Technology/nidus-sync/platform/text"
"github.com/rs/zerolog/log"
)
var waitGroup sync.WaitGroup
func Start(ctx context.Context) {
newOAuthTokenChannel = make(chan struct{}, 10)
channelJobAudio = make(chan jobAudio, 100) // Buffered channel to prevent blocking
channelJobCSV = make(chan jobCSV, 100) // Buffered channel to prevent blocking
channelJobEmail = make(chan email.Job, 100) // Buffered channel to prevent blocking
channelJobText = make(chan text.Job, 100) // Buffered channel to prevent blocking
/*
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
commsemail.StartWebsocket(ctx, config.ForwardEmailAPIToken)
}()
*/
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
refreshFieldseekerData(ctx, newOAuthTokenChannel)
}()
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
startWorkerAudio(ctx, channelJobAudio)
}()
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
startWorkerCSV(ctx, channelJobCSV)
}()
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
startWorkerEmail(ctx, channelJobEmail)
}()
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
startWorkerText(ctx, channelJobText)
}()
err := addWaitingJobs(ctx)
if err != nil {
log.Error().Err(err).Msg("Failed to add waiting background jobs")
}
}
func WaitForExit() {
waitGroup.Wait()
}
func addWaitingJobs(ctx context.Context) error {
err := addWaitingJobsCommit(ctx)
if err != nil {
return fmt.Errorf("commit: %w", err)
}
err = addWaitingJobsImport(ctx)
if err != nil {
return fmt.Errorf("commit: %w", err)
}
return nil
}

View file

@ -0,0 +1,44 @@
package background
import (
"context"
"github.com/Gleipnir-Technology/nidus-sync/platform/email"
"github.com/rs/zerolog/log"
)
var channelJobEmail chan email.Job
func ReportSubscriptionConfirmationEmail(destination, report_id string) {
enqueueJobEmail(email.NewJobReportNotificationConfirmation(
destination,
report_id,
))
}
func enqueueJobEmail(job email.Job) {
select {
case channelJobEmail <- job:
return
default:
log.Warn().Msg("email job channel is full, dropping job")
}
}
func startWorkerEmail(ctx context.Context, channel chan email.Job) {
go func() {
log.Debug().Msg("Email worker started")
for {
select {
case <-ctx.Done():
log.Info().Msg("Email worker shutting down.")
return
case job := <-channel:
err := email.Handle(ctx, job)
if err != nil {
log.Error().Err(err).Msg("Failed to handle email message")
}
}
}
}()
}

View file

@ -0,0 +1,228 @@
package background
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"os"
"github.com/Gleipnir-Technology/nidus-sync/config"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/label-studio"
"github.com/Gleipnir-Technology/nidus-sync/minio"
"github.com/google/uuid"
)
type jobLabelStudio struct {
UUID uuid.UUID
}
var channelJobLabelStudio chan jobLabelStudio
func enqueueLabelStudioJob(job jobLabelStudio) {
select {
case channelJobLabelStudio <- job:
log.Printf("Enqueued label job for UUID: %s", job.UUID)
default:
log.Printf("Label job channel is full, dropping job for UUID: %s", job.UUID)
}
}
func StartLabelStudioWorker(ctx context.Context) error {
// Initialize the minio client
minioBucket := os.Getenv("S3_BUCKET")
labelStudioClient, err := createLabelStudioClient()
if err != nil {
return fmt.Errorf("Failed to create label studio client: %v", err)
}
// Get the project we are going to upload to
project, err := findLabelStudioProject(labelStudioClient, "Nidus Speech-to-Text Transcriptions")
if err != nil {
return errors.New(fmt.Sprintf("Failed to find the label studio project"))
}
minioClient, err := createMinioClient()
if err != nil {
return fmt.Errorf("Failed to create minio client: %v", err)
}
buffer := 100
channelJobLabelStudio = make(chan jobLabelStudio, buffer) // Buffered channel to prevent blocking
log.Printf("Started label studio worker with buffer depth %d", buffer)
go func() {
for {
select {
case <-ctx.Done():
log.Println("Audio worker shutting down.")
return
case job := <-channelJobLabelStudio:
log.Printf("Processing label job for UUID: %s", job.UUID)
err := processLabelTask(ctx, minioClient, minioBucket, labelStudioClient, project, job)
if err != nil {
log.Printf("Error processing label job for audio file %s: %v", job.UUID, err)
}
}
}
}()
return nil
}
func createMinioClient() (*minio.Client, error) {
baseUrl := os.Getenv("S3_BASE_URL")
accessKeyID := os.Getenv("S3_ACCESS_KEY_ID")
secretAccessKey := os.Getenv("S3_SECRET_ACCESS_KEY")
client, err := minio.NewClient(baseUrl, accessKeyID, secretAccessKey)
if err != nil {
return nil, err
}
log.Println("Created minio client")
return client, err
}
func createLabelStudioClient() (*labelstudio.Client, error) {
// Initialize the client with your Label Studio base URL and API key
labelStudioApiKey := os.Getenv("LABEL_STUDIO_API_KEY")
labelStudioBaseUrl := os.Getenv("LABEL_STUDIO_BASE_URL")
labelStudioClient := labelstudio.NewClient(labelStudioBaseUrl, labelStudioApiKey)
log.Println("Created label studio client")
// Get and store the access token
err := labelStudioClient.GetAccessToken()
if err != nil {
return nil, errors.New(fmt.Sprintf("Failed to get access token: %v", err))
}
log.Println("Got label studio client access token")
return labelStudioClient, nil
}
func noteAudioGetLatest(ctx context.Context, uuid string) (*models.NoteAudio, error) {
return nil, nil
}
func processLabelTask(ctx context.Context, minioClient *minio.Client, minioBucket string, labelStudioClient *labelstudio.Client, project *labelstudio.Project, job jobLabelStudio) error {
customer := os.Getenv("CUSTOMER")
if customer == "" {
return errors.New("You must specify a CUSTOMER env var")
}
note, err := noteAudioGetLatest(ctx, job.UUID.String())
if err != nil {
return errors.New(fmt.Sprintf("Failed to get note %s", note.UUID))
}
if note.Version != 1 {
return errors.New(fmt.Sprintf("Got version %d of %s", note.Version, note.UUID))
}
task, err := findMatchingTask(labelStudioClient, project, customer, note)
if err != nil {
return errors.New(fmt.Sprintf("Failed to search for a task: %v", err))
}
// We already have a task, nothing to do.
if task != nil {
return nil
}
err = createTask(labelStudioClient, project, minioClient, minioBucket, customer, note)
if err != nil {
return errors.New(fmt.Sprintf("Failed to create a task: %v", err))
}
return nil
}
func createTask(client *labelstudio.Client, project *labelstudio.Project, minioClient *minio.Client, bucket string, customer string, note *models.NoteAudio) error {
audioRef := fmt.Sprintf("s3://%s/%s-normalized.m4a", bucket, note.UUID)
audioFile := fmt.Sprintf("%s/user/%s-normalized.m4a", config.FilesDirectory, note.UUID)
uploadPath := fmt.Sprintf("%s-normalized.m4a", note.UUID)
if !minioClient.ObjectExists(bucket, uploadPath) {
err := minioClient.UploadFile(bucket, audioFile, uploadPath)
if err != nil {
return fmt.Errorf("Failed to upload audio: %v", err)
}
}
var transcription string = ""
//if note.Transcription.IsValue() {
//transcription = note.Transcription.MustGet()
//}
transcription = note.Transcription.GetOr("")
simpleTasks := []map[string]interface{}{
{
"data": map[string]string{
"audio": audioRef,
"note_uuid": note.UUID.String(),
"transcription": transcription,
},
"meta": map[string]string{
"customer": customer,
"note_uuid": note.UUID.String(),
},
},
}
_, err := client.ImportTasks(project.ID, simpleTasks)
if err != nil {
log.Fatalf("Failed to import tasks: %v", err)
}
log.Printf("Created task for note audio %s", note.UUID)
return nil
}
func findLabelStudioProject(client *labelstudio.Client, title string) (*labelstudio.Project, error) {
// Attempt to get live projects
projects, err := client.Projects()
if err != nil {
log.Fatalf("Failed to get projects: %v", err)
}
fmt.Printf("Found %d projects:\n", projects.Count)
for i, p := range projects.Results {
fmt.Printf("%d. %s (ID: %d) - Tasks: %d\n",
i+1,
p.Title,
p.ID,
p.TaskNumber)
if p.Title == title {
return &p, nil
}
}
return nil, fmt.Errorf("No such project '%s'", title)
}
func findMatchingTask(client *labelstudio.Client, project *labelstudio.Project, customer string, note *models.NoteAudio) (*labelstudio.Task, error) {
/*meta := map[string]string{
"customer": customer,
"note_uuid": note.UUID,
}*/
items := []map[string]interface{}{
{"filter": "filter:tasks:data.note_uuid", "operator": "equal", "type": "string", "value": note.UUID},
}
filters := map[string]interface{}{
"conjunction": "and",
"items": items,
}
query := map[string]interface{}{
"filters": filters,
}
queryStr, err := json.Marshal(query)
if err != nil {
return nil, fmt.Errorf("Failed to marshal query JSON: %v", err)
}
// Get all tasks
options := &labelstudio.TasksListOptions{
ProjectID: project.ID,
Query: string(queryStr),
}
tasksResponse, err := client.ListTasks(options)
if err != nil {
return nil, fmt.Errorf("Failed to get tasks: %v", err)
}
if len(tasksResponse.Tasks) == 0 {
return nil, nil
} else if len(tasksResponse.Tasks) == 1 {
return &tasksResponse.Tasks[0], nil
} else {
return nil, fmt.Errorf("Got too many tasks: %d", len(tasksResponse.Tasks))
}
// Specify bucket name
//bucketNamePtr := flag.String("bucket", "label-studio", "The bucket to upload to")
//filePathPtr := flag.String("file", "example.txt", "The file to upload")
//flag.Parse()
}

View file

@ -0,0 +1,165 @@
package background
import (
"context"
"fmt"
"github.com/Gleipnir-Technology/bob"
"github.com/Gleipnir-Technology/bob/dialect/psql"
"github.com/Gleipnir-Technology/bob/dialect/psql/dialect"
"github.com/Gleipnir-Technology/bob/dialect/psql/dm"
"github.com/Gleipnir-Technology/bob/dialect/psql/im"
"github.com/Gleipnir-Technology/nidus-sync/db"
"github.com/Gleipnir-Technology/nidus-sync/db/enums"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/h3utils"
"github.com/rs/zerolog/log"
"github.com/uber/h3-go/v4"
)
func updateSummaryTables(ctx context.Context, org *models.Organization) {
updateSummaryMosquitoSource(ctx, org)
updateSummaryServiceRequest(ctx, org)
updateSummaryTrap(ctx, org)
}
func aggregateAtResolution(ctx context.Context, resolution int, org_id int32, type_ enums.H3aggregationtype, cells []h3.Cell) error {
var err error
log.Info().Int("resolution", resolution).Str("type", string(type_)).Msg("Working summary layer")
cellToCount := make(map[h3.Cell]int, 0)
for _, cell := range cells {
scaled, err := cell.Parent(resolution)
if err != nil {
log.Error().Err(err).Int("resolution", resolution).Msg("Failed to get cell's parent at resolution")
continue
}
cellToCount[scaled] = cellToCount[scaled] + 1
}
_, err = models.H3Aggregations.Delete(
dm.Where(
psql.And(
models.H3Aggregations.Columns.OrganizationID.EQ(psql.Arg(org_id)),
models.H3Aggregations.Columns.Resolution.EQ(psql.Arg(resolution)),
models.H3Aggregations.Columns.Type.EQ(psql.Arg(type_)),
),
),
).Exec(ctx, db.PGInstance.BobDB)
if err != nil {
return fmt.Errorf("Failed to clear previous aggregation: %w", err)
}
var to_insert []bob.Mod[*dialect.InsertQuery] = make([]bob.Mod[*dialect.InsertQuery], 0)
to_insert = append(to_insert, im.Into("h3_aggregation", "cell", "resolution", "count_", "type_", "organization_id", "geometry"))
for cell, count := range cellToCount {
polygon, err := h3utils.CellToPostgisGeometry(cell)
if err != nil {
log.Error().Err(err).Msg("Failed to get PostGIS geometry")
continue
}
// log.Info().Str("polygon", polygon).Msg("Going to insert")
to_insert = append(to_insert, im.Values(psql.Arg(cell.String(), resolution, count, type_, org_id), psql.F("st_geomfromtext", psql.S(polygon), 4326)))
}
to_insert = append(to_insert, im.OnConflict("cell, organization_id, type_").DoUpdate(
im.SetCol("count_").To(psql.Raw("EXCLUDED.count_")),
))
//log.Info().Str("sql", insertQueryToString(psql.Insert(to_insert...))).Msg("Updating...")
_, err = psql.Insert(to_insert...).Exec(ctx, db.PGInstance.BobDB)
if err != nil {
return fmt.Errorf("Failed to add h3 aggregation: %w", err)
}
return nil
}
func updateSummaryMosquitoSource(ctx context.Context, org *models.Organization) {
point_locations, err := org.Pointlocations().All(ctx, db.PGInstance.BobDB)
if err != nil {
log.Error().Err(err).Msg("Failed to get all point locations")
return
}
if len(point_locations) == 0 {
log.Info().Int("org_id", int(org.ID)).Msg("No updates to perform")
return
}
cells := make([]h3.Cell, 0)
for _, p := range point_locations {
if p.H3cell.IsNull() {
continue
}
cell, err := h3utils.ToCell(p.H3cell.MustGet())
if err != nil {
log.Error().Err(err).Msg("Failed to get geometry point")
continue
}
cells = append(cells, cell)
}
for i := range 16 {
err = aggregateAtResolution(ctx, i, org.ID, enums.H3aggregationtypeMosquitosource, cells)
if err != nil {
log.Error().Err(err).Int("resolution", i).Msg("Failed to aggregate mosquito source")
}
}
}
func updateSummaryServiceRequest(ctx context.Context, org *models.Organization) {
service_requests, err := org.Servicerequests().All(ctx, db.PGInstance.BobDB)
if err != nil {
log.Error().Err(err).Msg("Failed to get all service requests")
return
}
if len(service_requests) == 0 {
log.Info().Int("org_id", int(org.ID)).Msg("No updates to perform")
return
}
cells := make([]h3.Cell, 0)
for _, p := range service_requests {
if p.H3cell.IsNull() {
continue
}
cell, err := h3utils.ToCell(p.H3cell.MustGet())
if err != nil {
log.Error().Err(err).Msg("Failed to get geometry point")
continue
}
cells = append(cells, cell)
}
for i := range 16 {
err = aggregateAtResolution(ctx, i, org.ID, enums.H3aggregationtypeServicerequest, cells)
if err != nil {
log.Error().Err(err).Int("resolution", i).Msg("Failed to aggregate service request")
}
}
}
func updateSummaryTrap(ctx context.Context, org *models.Organization) {
traps, err := org.Traplocations().All(ctx, db.PGInstance.BobDB)
if err != nil {
log.Error().Err(err).Msg("Failed to get all trap locations")
return
}
if len(traps) == 0 {
log.Info().Int("org_id", int(org.ID)).Msg("No updates to perform")
return
}
cells := make([]h3.Cell, 0)
for _, t := range traps {
if t.H3cell.IsNull() {
continue
}
cell, err := h3utils.ToCell(t.H3cell.MustGet())
if err != nil {
log.Error().Err(err).Msg("Failed to get geometry point")
continue
}
cells = append(cells, cell)
}
for i := range 16 {
err = aggregateAtResolution(ctx, i, org.ID, enums.H3aggregationtypeTrap, cells)
if err != nil {
log.Error().Err(err).Int("resolution", i).Msg("Failed to aggregate trap")
}
}
}

View file

@ -0,0 +1,43 @@
package background
import (
"context"
"github.com/Gleipnir-Technology/nidus-sync/config"
"github.com/Gleipnir-Technology/nidus-sync/platform/text"
"github.com/rs/zerolog/log"
)
var channelJobText chan text.Job
func ReportSubscriptionConfirmationText(destination text.E164, report_id string) {
enqueueJobText(text.NewJobReportSubscriptionConfirmation(
destination,
report_id,
config.PhoneNumberReport,
))
}
func enqueueJobText(job text.Job) {
select {
case channelJobText <- job:
log.Info().Msg("Enqueued text job")
default:
log.Warn().Msg("sms job channel is full, dropping job")
}
}
func startWorkerText(ctx context.Context, channel chan text.Job) {
go func() {
log.Debug().Msg("Text worker started")
for {
select {
case <-ctx.Done():
log.Info().Msg("Text worker shutting down.")
return
case job := <-channel:
text.Handle(ctx, job)
}
}
}()
}

View file

@ -0,0 +1,123 @@
package background
import (
"context"
"fmt"
"github.com/Gleipnir-Technology/bob"
"github.com/Gleipnir-Technology/bob/dialect/psql"
"github.com/Gleipnir-Technology/bob/dialect/psql/sm"
"github.com/Gleipnir-Technology/nidus-sync/db"
"github.com/Gleipnir-Technology/nidus-sync/db/enums"
"github.com/Gleipnir-Technology/nidus-sync/platform/csv"
//"github.com/Gleipnir-Technology/nidus-sync/userfile"
//"github.com/google/uuid"
"github.com/rs/zerolog/log"
"github.com/stephenafamo/scan"
)
type jobCSVAction = int
const (
jobCSVActionCommit jobCSVAction = iota
jobCSVActionImport
)
type jobCSV struct {
action jobCSVAction
csvType enums.FileuploadCsvtype
fileID int32
}
var channelJobCSV chan jobCSV
func CommitUpload(file_id int32) {
enqueueJobCSV(jobCSV{
action: jobCSVActionCommit,
fileID: file_id,
})
}
func ProcessUpload(file_id int32, t enums.FileuploadCsvtype) {
enqueueJobCSV(jobCSV{
action: jobCSVActionImport,
csvType: t,
fileID: file_id,
})
}
func addWaitingJobsCommit(ctx context.Context) error {
return addWaitingJobsForType(ctx, enums.FileuploadFilestatustypeCommitting, jobCSVActionCommit)
}
func addWaitingJobsImport(ctx context.Context) error {
return addWaitingJobsForType(ctx, enums.FileuploadFilestatustypeUploaded, jobCSVActionImport)
}
func addWaitingJobsForType(ctx context.Context, status enums.FileuploadFilestatustype, action jobCSVAction) error {
type Row_ struct {
ID int32 `db:"id"`
Type enums.FileuploadCsvtype `db:"type"`
}
rows, err := bob.All(ctx, db.PGInstance.BobDB, psql.Select(
sm.Columns(
"file.id AS id",
"csv.type_ AS type",
),
sm.From("fileupload.file").As("file"),
sm.InnerJoin("fileupload.csv").As("csv").OnEQ(psql.Raw("file.id"), psql.Raw("csv.file_id")),
sm.Where(
psql.Raw("file.status").EQ(psql.Arg(status)),
),
), scan.StructMapper[Row_]())
if err != nil {
return fmt.Errorf("Failed to query file uploads: %w", err)
}
for _, row := range rows {
report_id := row.ID
enqueueJobCSV(jobCSV{
action: action,
fileID: report_id,
csvType: row.Type,
})
}
return nil
}
func enqueueJobCSV(job jobCSV) {
select {
case channelJobCSV <- job:
log.Info().Int32("file_id", job.fileID).Msg("Enqueued csv job")
default:
log.Warn().Int32("file_id", job.fileID).Msg("csv channel is full, dropping job")
}
}
func startWorkerCSV(ctx context.Context, channelJobImport chan jobCSV) {
go func() {
for {
select {
case <-ctx.Done():
log.Info().Msg("CSV worker shutting down.")
return
case job := <-channelJobImport:
switch job.action {
case jobCSVActionCommit:
log.Info().Int32("id", job.fileID).Msg("Processing CSV commit job")
err := csv.JobCommit(ctx, job.fileID)
if err != nil {
log.Error().Err(err).Int32("id", job.fileID).Msg("Error processing CSV file")
continue
}
case jobCSVActionImport:
log.Info().Int32("id", job.fileID).Msg("Processing CSV import job")
err := csv.JobImport(ctx, job.fileID, job.csvType)
if err != nil {
log.Error().Err(err).Int32("id", job.fileID).Msg("Error processing CSV file")
continue
}
default:
log.Error().Msg("Unrecognized job action")
return
}
log.Info().Int32("id", job.fileID).Msg("Done processing CSV job")
}
}
}()
}

24
platform/communication.go Normal file
View file

@ -0,0 +1,24 @@
package platform
import (
"context"
"fmt"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/platform/publicreport"
)
func NotificationCount(ctx context.Context, org *models.Organization, user *models.User) (result uint, err error) {
count_nreports, err := publicreport.NuisanceReportForOrganizationCount(ctx, org.ID)
if err != nil {
return 0, fmt.Errorf("nuisance report query: %w", err)
}
result += count_nreports
count_wreports, err := publicreport.WaterReportForOrganizationCount(ctx, org.ID)
if err != nil {
return 0, fmt.Errorf("water report query: %w", err)
}
result += count_wreports
return result, nil
}

View file

@ -113,7 +113,6 @@ func JobCommit(ctx context.Context, file_id int32) error {
feature, err = models.Features.Query(
models.SelectWhere.Features.OrganizationID.EQ(org.ID),
models.SelectWhere.Features.SiteID.EQ(site.ID),
models.SelectWhere.Features.SiteVersion.EQ(site.Version),
).One(ctx, txn)
if err != nil {
if err.Error() != "sql: no rows in result set" {
@ -125,7 +124,6 @@ func JobCommit(ctx context.Context, file_id int32) error {
//ID: row.Address,
OrganizationID: omit.From(org.ID),
SiteID: omit.From(site.ID),
SiteVersion: omit.From(site.Version),
}).One(ctx, txn)
if err != nil {
return fmt.Errorf("insert feature: %w", err)

View file

@ -16,8 +16,8 @@ import (
"github.com/Gleipnir-Technology/nidus-sync/db/enums"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/h3utils"
"github.com/Gleipnir-Technology/nidus-sync/platform/file"
"github.com/Gleipnir-Technology/nidus-sync/platform/geom"
"github.com/Gleipnir-Technology/nidus-sync/userfile"
"github.com/aarondl/opt/omit"
"github.com/aarondl/opt/omitnull"
"github.com/rs/zerolog/log"
@ -80,19 +80,19 @@ var parseCSVFlyover = makeParseCSV(
)
type insertModelFunc[ModelType any, HeaderType Enum] = func(context.Context, bob.Tx, *models.FileuploadFile, *models.FileuploadCSV, int32, []HeaderType, []string, []string) (ModelType, error)
type parseCSVFunc[ModelType any] = func(ctx context.Context, txn bob.Tx, file *models.FileuploadFile, c *models.FileuploadCSV) ([]ModelType, error)
type parseCSVFunc[ModelType any] = func(ctx context.Context, txn bob.Tx, f *models.FileuploadFile, c *models.FileuploadCSV) ([]ModelType, error)
func makeParseCSV[ModelType any, HeaderType Enum](parseHeader parseHeaderFunc[HeaderType], insertModel insertModelFunc[ModelType, HeaderType]) parseCSVFunc[ModelType] {
return func(ctx context.Context, txn bob.Tx, file *models.FileuploadFile, c *models.FileuploadCSV) ([]ModelType, error) {
return func(ctx context.Context, txn bob.Tx, f *models.FileuploadFile, c *models.FileuploadCSV) ([]ModelType, error) {
rows := make([]ModelType, 0)
r, err := userfile.NewFileReader(userfile.CollectionCSV, file.FileUUID)
r, err := file.NewFileReader(file.CollectionCSV, f.FileUUID)
if err != nil {
return rows, fmt.Errorf("Failed to get filereader for %d: %w", file.ID, err)
return rows, fmt.Errorf("Failed to get filereader for %d: %w", f.ID, err)
}
reader := csv.NewReader(r)
h, err := reader.Read()
if err != nil {
return rows, fmt.Errorf("Failed to read header of CSV for file %d: %w", file.ID, err)
return rows, fmt.Errorf("Failed to read header of CSV for file %d: %w", f.ID, err)
}
header_types, header_names := parseHeader(h)
/*
@ -114,9 +114,9 @@ func makeParseCSV[ModelType any, HeaderType Enum](parseHeader parseHeaderFunc[He
if err == io.EOF {
return rows, nil
}
return rows, fmt.Errorf("Failed to read all CSV records for file %d: %w", file.ID, err)
return rows, fmt.Errorf("Failed to read all CSV records for file %d: %w", f.ID, err)
}
m, err := insertModel(ctx, txn, file, c, line_number, header_types, header_names, row)
m, err := insertModel(ctx, txn, f, c, line_number, header_types, header_names, row)
if err != nil {
return rows, fmt.Errorf("insert models: %w", err)
}

View file

@ -16,12 +16,12 @@ import (
"github.com/Gleipnir-Technology/nidus-sync/db"
"github.com/Gleipnir-Technology/nidus-sync/db/enums"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/platform/file"
"github.com/Gleipnir-Technology/nidus-sync/platform/geocode"
"github.com/Gleipnir-Technology/nidus-sync/platform/geom"
"github.com/Gleipnir-Technology/nidus-sync/platform/text"
"github.com/Gleipnir-Technology/nidus-sync/platform/types"
"github.com/Gleipnir-Technology/nidus-sync/stadia"
"github.com/Gleipnir-Technology/nidus-sync/userfile"
"github.com/aarondl/opt/omit"
"github.com/aarondl/opt/omitnull"
"github.com/rs/zerolog/log"
@ -155,22 +155,22 @@ func geocodePool(ctx context.Context, txn bob.Tx, client *stadia.StadiaMaps, job
}
return nil
}
func parseCSVPoollist(ctx context.Context, txn bob.Tx, file *models.FileuploadFile, c *models.FileuploadCSV) ([]*models.FileuploadPool, error) {
func parseCSVPoollist(ctx context.Context, txn bob.Tx, f *models.FileuploadFile, c *models.FileuploadCSV) ([]*models.FileuploadPool, error) {
pools := make([]*models.FileuploadPool, 0)
r, err := userfile.NewFileReader(userfile.CollectionCSV, file.FileUUID)
r, err := file.NewFileReader(file.CollectionCSV, f.FileUUID)
if err != nil {
return pools, fmt.Errorf("Failed to get filereader for %d: %w", file.ID, err)
return pools, fmt.Errorf("Failed to get filereader for %d: %w", f.ID, err)
}
reader := csv.NewReader(r)
h, err := reader.Read()
if err != nil {
return pools, fmt.Errorf("Failed to read header of CSV for file %d: %w", file.ID, err)
return pools, fmt.Errorf("Failed to read header of CSV for file %d: %w", f.ID, err)
}
header_types, header_names := parseHeaders(h)
missing_headers := missingRequiredHeaders(header_types)
for _, mh := range missing_headers {
errorMissingHeader(ctx, txn, c, mh)
file.Update(ctx, txn, &models.FileuploadFileSetter{
f.Update(ctx, txn, &models.FileuploadFileSetter{
Status: omit.From(enums.FileuploadFilestatustypeError),
})
return pools, nil
@ -183,7 +183,7 @@ func parseCSVPoollist(ctx context.Context, txn bob.Tx, file *models.FileuploadFi
if err == io.EOF {
return pools, nil
}
return pools, fmt.Errorf("Failed to read all CSV records for file %d: %w", file.ID, err)
return pools, fmt.Errorf("Failed to read all CSV records for file %d: %w", f.ID, err)
}
tags := make(map[string]string, 0)
setter := models.FileuploadPoolSetter{
@ -196,8 +196,8 @@ func parseCSVPoollist(ctx context.Context, txn bob.Tx, file *models.FileuploadFi
Committed: omit.From(false),
Condition: omit.From(enums.PoolconditiontypeUnknown),
Created: omit.From(time.Now()),
CreatorID: omit.From(file.CreatorID),
CSVFile: omit.From(file.ID),
CreatorID: omit.From(f.CreatorID),
CSVFile: omit.From(f.ID),
Deleted: omitnull.FromPtr[time.Time](nil),
Geom: omitnull.FromPtr[string](nil),
H3cell: omitnull.FromPtr[string](nil),
@ -287,12 +287,12 @@ func parseCSVPoollist(ctx context.Context, txn bob.Tx, file *models.FileuploadFi
line_number = line_number + 1
}
}
func processCSVPoollist(ctx context.Context, txn bob.Tx, file *models.FileuploadFile, c *models.FileuploadCSV, parsed []*models.FileuploadPool) error {
org, err := models.FindOrganization(ctx, db.PGInstance.BobDB, file.OrganizationID)
func processCSVPoollist(ctx context.Context, txn bob.Tx, f *models.FileuploadFile, c *models.FileuploadCSV, parsed []*models.FileuploadPool) error {
org, err := models.FindOrganization(ctx, db.PGInstance.BobDB, f.OrganizationID)
if err != nil {
return fmt.Errorf("get org: %w", err)
}
err = bulkGeocode(ctx, txn, file, c, parsed, org)
err = bulkGeocode(ctx, txn, f, c, parsed, org)
if err != nil {
log.Error().Err(err).Msg("Failure during geocoding")
}

View file

Before

Width:  |  Height:  |  Size: 3.8 KiB

After

Width:  |  Height:  |  Size: 3.8 KiB

Before After
Before After

224
platform/fieldseeker.go Normal file
View file

@ -0,0 +1,224 @@
package platform
import (
"context"
"fmt"
"strings"
"time"
"github.com/Gleipnir-Technology/bob"
"github.com/Gleipnir-Technology/bob/dialect/psql"
"github.com/Gleipnir-Technology/bob/dialect/psql/sm"
"github.com/Gleipnir-Technology/nidus-sync/db"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/db/sql"
"github.com/google/uuid"
"github.com/uber/h3-go/v4"
)
type Inspection struct {
Action string
Date *time.Time
Notes string
Location string
LocationID uuid.UUID
}
func BreedingSourcesByCell(ctx context.Context, org Organization, c h3.Cell) ([]BreedingSourceSummary, error) {
boundary, err := c.Boundary()
if err != nil {
return nil, fmt.Errorf("Failed to get cell boundary: %w", err)
}
geom_query := gisStatement(boundary)
rows, err := org.model.Pointlocations(
sm.Where(
psql.F("ST_Within", "geospatial", geom_query),
),
sm.OrderBy("lasttreatdate"),
).All(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to query rows: %w", err)
}
return toBreedingSourceSummary(rows), nil
}
func SourceByGlobalID(ctx context.Context, org Organization, id uuid.UUID) (*BreedingSourceDetail, error) {
row, err := org.model.Pointlocations(
models.SelectWhere.FieldseekerPointlocations.Globalid.EQ(id),
).One(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to get point location: %w", err)
}
return toBreedingSource(row)
}
func TrapsBySource(ctx context.Context, org Organization, sourceID uuid.UUID) ([]TrapNearby, error) {
locations, err := sql.TrapLocationBySourceID(org.ID(), sourceID).All(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to query rows: %w", err)
}
location_ids := make([]uuid.UUID, 0)
var args []bob.Expression
for _, location := range locations {
location_ids = append(location_ids, location.TrapLocationGlobalid)
args = append(args, psql.Arg(location.TrapLocationGlobalid))
}
trap_data, err := sql.TrapDataByLocationIDRecent(org.ID(), location_ids).All(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to query trap data: %w", err)
}
counts, err := sql.TrapCountByLocationID(org.ID(), location_ids).All(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to query trap counts: %w", err)
}
traps, err := toTemplateTrapsNearby(locations, trap_data, counts)
if err != nil {
return nil, fmt.Errorf("Failed to convert trap data: %w", err)
}
return traps, nil
}
func TreatmentsBySource(ctx context.Context, org Organization, sourceID uuid.UUID) ([]Treatment, error) {
rows, err := org.model.Treatments(
sm.Where(
models.FieldseekerTreatments.Columns.Pointlocid.EQ(psql.Arg(sourceID)),
),
sm.OrderBy("enddatetime").Desc(),
).All(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to query rows: %w", err)
}
return toTreatment(rows)
}
func TrapByGlobalId(ctx context.Context, org Organization, id uuid.UUID) (*Trap, error) {
trap_location, err := org.model.Traplocations(
sm.Where(models.FieldseekerTraplocations.Columns.Globalid.EQ(psql.Arg(id))),
).One(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to get trap location: %w", err)
}
trap_data, err := sql.TrapDataByLocationIDRecent(org.ID(), []uuid.UUID{id}).All(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to query trap data: %w", err)
}
counts, err := sql.TrapCountByLocationID(org.ID(), []uuid.UUID{id}).All(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to query trap counts: %w", err)
}
result, err := toTrap(trap_location, trap_data, counts)
if err != nil {
return nil, fmt.Errorf("to trap: %w", err)
}
return &result, err
}
func TrapsByCell(ctx context.Context, org Organization, c h3.Cell) (results []TrapSummary, err error) {
boundary, err := c.Boundary()
if err != nil {
return results, fmt.Errorf("Failed to get cell boundary: %w", err)
}
geom_query := gisStatement(boundary)
rows, err := org.model.Traplocations(
sm.Where(
psql.F("ST_Within", "geospatial", geom_query),
),
sm.OrderBy("objectid"),
).All(ctx, db.PGInstance.BobDB)
if err != nil {
return results, fmt.Errorf("Failed to query rows: %w", err)
}
return toTemplateTrapSummary(rows)
}
func TreatmentsByCell(ctx context.Context, org Organization, c h3.Cell) ([]Treatment, error) {
var results []Treatment
boundary, err := c.Boundary()
if err != nil {
return results, fmt.Errorf("Failed to get cell boundary: %w", err)
}
geom_query := gisStatement(boundary)
rows, err := org.model.Treatments(
sm.Where(
psql.F("ST_Within", "geospatial", geom_query),
),
sm.OrderBy("pointlocid"),
sm.OrderBy("enddatetime"),
).All(ctx, db.PGInstance.BobDB)
if err != nil {
return results, fmt.Errorf("Failed to query rows: %w", err)
}
return toTreatment(rows)
}
func InspectionsByCell(ctx context.Context, org Organization, c h3.Cell) ([]Inspection, error) {
var results []Inspection
boundary, err := c.Boundary()
if err != nil {
return results, fmt.Errorf("Failed to get cell boundary: %w", err)
}
geom_query := gisStatement(boundary)
rows, err := org.model.Mosquitoinspections(
sm.Where(
psql.F("ST_Within", "geospatial", geom_query),
),
sm.OrderBy("pointlocid"),
sm.OrderBy("enddatetime"),
).All(ctx, db.PGInstance.BobDB)
if err != nil {
return results, fmt.Errorf("Failed to query rows: %w", err)
}
return toTemplateInspection(rows)
}
func InspectionsBySource(ctx context.Context, org Organization, sourceID uuid.UUID) ([]Inspection, error) {
var results []Inspection
rows, err := org.model.Mosquitoinspections(
sm.Where(
models.FieldseekerMosquitoinspections.Columns.Pointlocid.EQ(psql.Arg(sourceID)),
),
sm.OrderBy("enddatetime").Desc(),
).All(ctx, db.PGInstance.BobDB)
if err != nil {
return results, fmt.Errorf("Failed to query rows: %w", err)
}
return toTemplateInspection(rows)
}
func toBreedingSourceSummary(points []*models.FieldseekerPointlocation) []BreedingSourceSummary {
results := make([]BreedingSourceSummary, len(points))
for i, r := range points {
var last_inspected *time.Time
if !r.Lastinspectdate.IsNull() {
l := r.Lastinspectdate.MustGet()
last_inspected = &l
}
var last_treat_date *time.Time
if !r.Lasttreatdate.IsNull() {
l := r.Lasttreatdate.MustGet()
last_treat_date = &l
}
results[i] = BreedingSourceSummary{
ID: r.Globalid,
LastInspected: last_inspected,
LastTreated: last_treat_date,
Type: r.Habitat.GetOr("none"),
}
}
return results
}
func gisStatement(cb h3.CellBoundary) string {
var content strings.Builder
for i, p := range cb {
if i != 0 {
content.WriteString(", ")
}
content.WriteString(fmt.Sprintf("%f %f", p.Lng, p.Lat))
}
// Repeat the first coordinate to close the polygon
content.WriteString(fmt.Sprintf(", %f %f", cb[0].Lng, cb[0].Lat))
return fmt.Sprintf("ST_GeomFromText('POLYGON((%s))', 3857)", content.String())
}

100
platform/file/base.go Normal file
View file

@ -0,0 +1,100 @@
package file
import (
"fmt"
"io"
"net/http"
"os"
"github.com/Gleipnir-Technology/nidus-sync/config"
"github.com/google/uuid"
//"github.com/rs/zerolog/log"
)
func audioFileContentWrite(audioUUID uuid.UUID, body io.Reader) error {
return nil
}
var collectionToExtension map[Collection]string = map[Collection]string{
CollectionAudioNormalized: "ogg",
CollectionAudioRaw: "raw",
CollectionAudioTranscoded: "ogg",
CollectionCSV: "csv",
CollectionLogo: "png",
CollectionPublicImage: "img",
CollectionImageRaw: "raw",
}
var collectionToSubdir map[Collection]string = map[Collection]string{
CollectionAudioNormalized: "audio-normalized",
CollectionAudioRaw: "audio-raw",
CollectionAudioTranscoded: "audio-transcoded",
CollectionCSV: "csv",
CollectionLogo: "logo",
CollectionPublicImage: "public-image",
CollectionImageRaw: "image-raw",
}
func ContentPath(collection Collection, uid uuid.UUID) string {
return fileContentPath(collection, uid)
}
func fileContentPath(collection Collection, uid uuid.UUID) string {
subdir, ok := collectionToSubdir[collection]
if !ok {
panic(fmt.Sprintf("No subdir for collection %d", int(collection)))
}
extension, ok := collectionToExtension[collection]
return fmt.Sprintf("%s/%s/%s.%s", config.FilesDirectory, subdir, uid.String(), extension)
}
/*
func fileContentWrite(body io.Reader, subdir string, uid uuid.UUID, extension string) error {
// Create file in configured directory
filepath := fileContentPath(subdir, uid, extension)
dst, err := os.Create(filepath)
if err != nil {
log.Error().Err(err).Str("filepath", filepath).Msg("Failed to create file")
return fmt.Errorf("Failed to create file at %s: %v", filepath, err)
}
defer dst.Close()
// Copy rest of request body to file
_, err = io.Copy(dst, body)
if err != nil {
return fmt.Errorf("Unable to save content of %s: %v", filepath, err)
}
return nil
}
*/
func writeFileContent(w http.ResponseWriter, image_path string) {
// Open the file
file, err := os.Open(image_path)
if err != nil {
if os.IsNotExist(err) {
http.Error(w, "Image not found", http.StatusNotFound)
} else {
http.Error(w, "Failed to retrieve image", http.StatusInternalServerError)
}
return
}
defer file.Close()
// Get file info for Content-Length header
fileInfo, err := file.Stat()
if err != nil {
http.Error(w, "Failed to get image information", http.StatusInternalServerError)
return
}
// Set appropriate headers
w.Header().Set("Content-Type", "image/png")
w.Header().Set("Content-Length", fmt.Sprintf("%d", fileInfo.Size()))
// Copy file contents to response writer
_, err = io.Copy(w, file)
if err != nil {
// Note: At this point, we've already started writing the response,
// so we can't change the status code anymore. The best we can do
// is log the error and abandon the connection.
return
}
}

13
platform/file/enum.go Normal file
View file

@ -0,0 +1,13 @@
package file
type Collection int
const (
CollectionAudioRaw Collection = iota
CollectionAudioNormalized
CollectionAudioTranscoded
CollectionCSV
CollectionImageRaw
CollectionLogo
CollectionPublicImage
)

60
platform/file/image.go Normal file
View file

@ -0,0 +1,60 @@
package file
import (
"fmt"
"io"
"net/http"
"os"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
)
func ImageFileContentWrite(uid uuid.UUID, body io.Reader) error {
filepath := fileContentPath(CollectionImageRaw, uid)
// Create file in configured directory
dst, err := os.Create(filepath)
if err != nil {
return fmt.Errorf("Failed to create image file %s: %w", filepath, err)
}
defer dst.Close()
// Copy rest of request body to file
_, err = io.Copy(dst, body)
if err != nil {
return fmt.Errorf("Unable to save file %s: %w", filepath, err)
}
return nil
}
func ImageFileContentWriteLogo(w http.ResponseWriter, uid uuid.UUID) {
//image_path := imageFileContentPathLogoPng(uid.String())
image_path := fileContentPath(CollectionLogo, uid)
writeFileContent(w, image_path)
}
func PublicImageFileContentWrite(uid uuid.UUID, body io.Reader) error {
// Create file in configured directory
//filepath := PublicImageFileContentPathRaw(uid.String())
filepath := fileContentPath(CollectionPublicImage, uid)
dst, err := os.Create(filepath)
if err != nil {
log.Error().Err(err).Str("filepath", filepath).Msg("Failed to create public image file")
return fmt.Errorf("Failed to create public image file at %s: %v", filepath, err)
}
defer dst.Close()
// Copy rest of request body to file
_, err = io.Copy(dst, body)
if err != nil {
return fmt.Errorf("Unable to save file to create audio file at %s: %v", filepath, err)
}
log.Info().Str("filepath", filepath).Msg("Saved public report image file content")
return nil
}
func PublicImageFileToResponse(w http.ResponseWriter, uid uuid.UUID) {
//image_path := PublicImageFileContentPathRaw(uid)
image_path := fileContentPath(CollectionPublicImage, uid)
writeFileContent(w, image_path)
}

76
platform/file/upload.go Normal file
View file

@ -0,0 +1,76 @@
package file
import (
"bytes"
"fmt"
"io"
"mime/multipart"
"net/http"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
)
type FileUpload struct {
ContentType string
Name string
SizeBytes int
UUID uuid.UUID
}
func SaveFileUpload(r *http.Request, name string, collection Collection) ([]FileUpload, error) {
results := make([]FileUpload, 0)
for n, fheaders := range r.MultipartForm.File {
log.Debug().Str("n", n).Msg("looking at header")
if n != name {
continue
}
for _, headers := range fheaders {
f, err := saveFileUpload(headers, collection)
if err != nil {
return results, fmt.Errorf("Failed to extract photo upload: %w", err)
}
results = append(results, f)
}
}
return results, nil
}
func saveFileUploads(r *http.Request, collection Collection) ([]FileUpload, error) {
results := make([]FileUpload, 0)
for name, fheaders := range r.MultipartForm.File {
for _, headers := range fheaders {
upload, err := saveFileUpload(headers, collection)
if err != nil {
return results, fmt.Errorf("Failed to save upload '%s': %w", name, err)
}
results = append(results, upload)
}
}
return results, nil
}
func saveFileUpload(headers *multipart.FileHeader, collection Collection) (upload FileUpload, err error) {
file, err := headers.Open()
if err != nil {
return upload, fmt.Errorf("Failed to open header: %w", err)
}
defer file.Close()
file_bytes, err := io.ReadAll(file)
content_type := http.DetectContentType(file_bytes)
u, err := uuid.NewUUID()
if err != nil {
return upload, fmt.Errorf("Failed to create uuid", err)
}
err = FileContentWrite(bytes.NewReader(file_bytes), collection, u)
if err != nil {
return upload, fmt.Errorf("Failed to write file to disk: %w", err)
}
log.Info().Int("size", len(file_bytes)).Str("uploaded_filename", headers.Filename).Str("content-type", content_type).Str("uuid", u.String()).Msg("Saved an uploaded file to disk")
return FileUpload{
ContentType: content_type,
Name: headers.Filename,
SizeBytes: len(file_bytes),
UUID: u,
}, nil
}

50
platform/file/userfile.go Normal file
View file

@ -0,0 +1,50 @@
package file
import (
"fmt"
"io"
//"net/http"
"os"
"github.com/Gleipnir-Technology/nidus-sync/config"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
)
func CreateDirectories() error {
for _, subdir := range collectionToSubdir {
path := config.FilesDirectory + "/" + subdir
_, err := os.Stat(path)
if err == nil {
continue
}
err = os.MkdirAll(path, 0750)
if err != nil {
return fmt.Errorf("Failed to create userfile directory '%s': %w", path, err)
}
}
return nil
}
func FileContentWrite(body io.Reader, collection Collection, uid uuid.UUID) error {
// Create file in configured directory
filepath := fileContentPath(collection, uid)
dst, err := os.Create(filepath)
if err != nil {
log.Error().Err(err).Str("filepath", filepath).Msg("Failed to create upload file")
return fmt.Errorf("Failed to create upload file at %s: %v", filepath, err)
}
defer dst.Close()
// Copy rest of request body to file
_, err = io.Copy(dst, body)
if err != nil {
return fmt.Errorf("Unable to save file to copy file content to %s: %v", filepath, err)
}
log.Info().Str("filepath", filepath).Msg("Save upload file content")
return nil
}
func NewFileReader(collection Collection, uid uuid.UUID) (io.Reader, error) {
path := fileContentPath(collection, uid)
return os.Open(path)
}

View file

@ -1,116 +0,0 @@
package imagetile
import (
"context"
"embed"
"fmt"
"github.com/Gleipnir-Technology/arcgis-go"
"github.com/Gleipnir-Technology/arcgis-go/fieldseeker"
"github.com/Gleipnir-Technology/nidus-sync/background"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
//"github.com/rs/zerolog/log"
)
//go:embed empty-tile.png
var emptyTileFS embed.FS
var clientByOrgID = make(map[int32]*fieldseeker.FieldSeeker, 0)
var tileRasterPlaceholder *TileRaster
type TileRaster struct {
Content []byte
IsPlaceholder bool
}
func ImageAtPoint(ctx context.Context, org *models.Organization, level uint, lat, lng float64) (*TileRaster, error) {
fssync, err := getFieldseeker(ctx, org)
if err != nil {
return nil, fmt.Errorf("create fssync: %w", err)
}
map_service, err := aerialImageService(ctx, fssync.Arcgis)
if err != nil {
return nil, fmt.Errorf("no map service: %w", err)
}
data, e := map_service.TileGPS(ctx, level, lat, lng)
if e != nil {
return nil, fmt.Errorf("tilegps: %w", e)
}
if len(data) == 0 {
return TileRasterPlaceholder(), nil
}
return &TileRaster{
Content: data,
IsPlaceholder: false,
}, nil
}
func ImageAtTile(ctx context.Context, org *models.Organization, level, y, x uint) (*TileRaster, error) {
oauth, err := background.GetOAuthForOrg(ctx, org)
if err != nil {
return nil, fmt.Errorf("get oauth for org: %w", err)
}
fssync, err := background.NewFieldSeeker(
ctx,
oauth,
)
if err != nil {
return nil, fmt.Errorf("create fssync: %w", err)
}
map_service, err := aerialImageService(ctx, fssync.Arcgis)
if err != nil {
return nil, fmt.Errorf("no map service: %w", err)
}
data, e := map_service.Tile(ctx, level, y, x)
if e != nil {
return nil, fmt.Errorf("tile: %w", e)
}
// No data at this location, so supply the empty tile placeholder
if len(data) == 0 {
return TileRasterPlaceholder(), nil
}
return &TileRaster{
Content: data,
IsPlaceholder: false,
}, nil
}
func TileRasterPlaceholder() *TileRaster {
if tileRasterPlaceholder != nil {
return tileRasterPlaceholder
}
empty, err := emptyTileFS.ReadFile("empty-tile.png")
if err != nil {
panic(fmt.Sprintf("Failed to read empty-tile.png: %v", err))
}
tileRasterPlaceholder = &TileRaster{
Content: empty,
IsPlaceholder: true,
}
return tileRasterPlaceholder
}
func aerialImageService(ctx context.Context, gis *arcgis.ArcGIS) (*arcgis.MapService, error) {
map_services, err := gis.MapServices(ctx)
if err != nil {
return nil, fmt.Errorf("aerial image service: %w", err)
}
for _, ms := range map_services {
return &ms, nil
}
return nil, fmt.Errorf("non found")
}
func getFieldseeker(ctx context.Context, org *models.Organization) (*fieldseeker.FieldSeeker, error) {
fssync, ok := clientByOrgID[org.ID]
if ok {
return fssync, nil
}
oauth, err := background.GetOAuthForOrg(ctx, org)
if err != nil {
return nil, fmt.Errorf("get oauth for org: %w", err)
}
fssync, err = background.NewFieldSeeker(
ctx,
oauth,
)
clientByOrgID[org.ID] = fssync
return fssync, nil
}

View file

@ -10,20 +10,13 @@ import (
"github.com/google/uuid"
)
func fieldseeker(ctx context.Context, u *models.User, since *time.Time) (fsync FieldseekerRecordsSync, err error) {
if u == nil {
return fsync, fmt.Errorf("Wha! Nil user!")
}
org := u.R.Organization
if org == nil {
return fsync, fmt.Errorf("Whoa nil org from user %d and org %d.", u.ID, u.OrganizationID)
}
func getFieldseekerRecordsSync(ctx context.Context, u User, since *time.Time) (fsync FieldseekerRecordsSync, err error) {
db_connection := db.PGInstance.BobDB
pl, err := org.Pointlocations().All(ctx, db_connection)
pl, err := u.Organization.model.Pointlocations().All(ctx, db_connection)
if err != nil {
return fsync, fmt.Errorf("Failed to get point locations: %w", err)
}
inspections, err := u.R.Organization.Mosquitoinspections().All(ctx, db.PGInstance.BobDB)
inspections, err := u.Organization.model.Mosquitoinspections().All(ctx, db.PGInstance.BobDB)
if err != nil {
return fsync, fmt.Errorf("Failed to get mosquito inspections: %w", err)
}
@ -40,7 +33,7 @@ func fieldseeker(ctx context.Context, u *models.User, since *time.Time) (fsync F
insp = append(insp, i)
inspections_by_location[locid] = insp
}
treatments, err := u.R.Organization.Treatments().All(ctx, db.PGInstance.BobDB)
treatments, err := u.Organization.model.Treatments().All(ctx, db.PGInstance.BobDB)
if err != nil {
return fsync, fmt.Errorf("Failed to get treatment data: %w", err)
}
@ -78,8 +71,8 @@ func fieldseeker(ctx context.Context, u *models.User, since *time.Time) (fsync F
return fsync, err
}
func ContentClientIos(ctx context.Context, u *models.User, since *time.Time) (csync ClientSync, err error) {
fsync, err := fieldseeker(ctx, u, since)
func ContentClientIos(ctx context.Context, u User, since *time.Time) (csync ClientSync, err error) {
fsync, err := getFieldseekerRecordsSync(ctx, u, since)
return ClientSync{
Fieldseeker: fsync,
}, err

46
platform/note.go Normal file
View file

@ -0,0 +1,46 @@
package platform
import (
"context"
"github.com/Gleipnir-Technology/nidus-sync/db"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
)
func NoteAudioCreate(ctx context.Context, user User, setter models.NoteAudioSetter) error {
err := user.Organization.model.InsertNoteAudios(ctx, db.PGInstance.BobDB, &setter)
if err == nil {
return nil
}
// Just ignore this failure, it means we already have this content
if err.Error() == "insertOrganizationNoteAudios0: ERROR: duplicate key value violates unique constraint \"note_audio_pkey\" (SQLSTATE 23505)" {
return nil
}
log.Warn().Err(err).Msg("Unrecognized error creating note audio")
return err
}
func NoteAudioNormalized(uuid string) error {
return nil
}
func NoteAudioTranscodedToOgg(uuid string) error {
return nil
}
func NoteImageCreate(ctx context.Context, user User, setter models.NoteImageSetter) error {
err := user.Organization.model.InsertNoteImages(ctx, db.PGInstance.BobDB, &setter)
if err == nil {
return nil
}
// Just ignore this failure, it means we already have this content
if err.Error() == "insertOrganizationNoteImages0: ERROR: duplicate key value violates unique constraint \"note_image_pkey\" (SQLSTATE 23505)" {
return nil
}
log.Warn().Err(err).Msg("Unrecognized error creating note audio")
return err
}
func NoteUpdate(ctx context.Context, noteUUID uuid.UUID, setter models.NoteAudioSetter) error {
return nil
}

111
platform/notification.go Normal file
View file

@ -0,0 +1,111 @@
package platform
import (
"context"
"fmt"
"strings"
"time"
"github.com/Gleipnir-Technology/bob/dialect/psql"
"github.com/Gleipnir-Technology/bob/dialect/psql/im"
"github.com/Gleipnir-Technology/nidus-sync/db"
enums "github.com/Gleipnir-Technology/nidus-sync/db/enums"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/debug"
//"github.com/aarondl/opt/omit"
"github.com/aarondl/opt/omitnull"
"github.com/rs/zerolog/log"
)
var (
NotificationPathOauthReset string = "/oauth/refresh"
)
type Notification struct {
Link string
Message string
Time time.Time
Type string
}
// Clear all notifications for a given user with the given path
func ClearOauth(ctx context.Context, user *models.User) {
setter := models.NotificationSetter{
ResolvedAt: omitnull.From(time.Now()),
}
updater := models.Notifications.Update(
//models.SelectWhere.Notifications.Link.EQ(NotificationPathOauthReset),
models.UpdateWhere.Notifications.Link.EQ(NotificationPathOauthReset),
models.UpdateWhere.Notifications.UserID.EQ(user.ID),
setter.UpdateMod(),
)
updater.Exec(ctx, db.PGInstance.BobDB)
//user.UserNotifications(
//models.SelectWhere.Notifications.Link.EQ(NotificationPathOauthReset),
//).UpdateAll()
}
func NotifyOauthInvalid(ctx context.Context, user *models.User) {
msg := "Oauth token invalidated"
_, err := psql.Insert(
im.Into("notification", "created", "id", "link", "message", "resolved_at", "type", "user_id"),
im.Values(
psql.Arg(time.Now()),
psql.Raw("DEFAULT"),
psql.Arg(NotificationPathOauthReset),
psql.Arg(msg),
psql.Raw("NULL"),
psql.Arg(enums.NotificationtypeOauthTokenInvalidated),
psql.Arg(user.ID),
),
//im.OnConflict("user_id", "link").DoNothing(),
//im.OnConflictOnConstraint("unique_user_link_not_resolved").DoNothing(),
im.OnConflict("user_id", "link").Where("resolved_at IS NULL").DoNothing(),
).Exec(ctx, db.PGInstance.BobDB)
/*
notificationSetter := models.NotificationSetter{
Created: omit.From(time.Now()),
Message: omit.From(msg),
Link: omit.From(NotificationPathOauthReset),
Type: omit.From(enums.NotificationtypeOauthTokenInvalidated),
}
err := user.InsertUserNotifications(ctx, db.PGInstance.BobDB, &notificationSetter)
*/
if err != nil {
if strings.Contains(err.Error(), "ERROR: duplicate key value violates unique constraint") {
log.Info().Str("msg", msg).Int("user_id", int(user.ID)).Msg("Refusing to add another notification with the same type")
return
}
debug.LogErrorTypeInfo(err)
log.Error().Err(err).Msg("Failed to insert new notification. This is a programmer bug.")
return
}
}
func NotificationsForUser(ctx context.Context, u User) ([]Notification, error) {
results := make([]Notification, 0)
notifications, err := u.model.UserNotifications(
models.SelectWhere.Notifications.ResolvedAt.IsNull(),
).All(ctx, db.PGInstance.BobDB)
if err != nil {
return results, fmt.Errorf("Failed to get notifications: %w", err)
}
for _, n := range notifications {
results = append(results, Notification{
Link: n.Link,
Message: n.Message,
Time: n.Created,
Type: notificationTypeName(n.Type),
})
}
return results, nil
}
func notificationTypeName(t enums.Notificationtype) string {
switch t {
case enums.NotificationtypeOauthTokenInvalidated:
return "oauth-token-invalid"
default:
return "unknown-type"
}
}

77
platform/oauth.go Normal file
View file

@ -0,0 +1,77 @@
package platform
import (
"context"
"fmt"
"net/url"
"time"
"github.com/Gleipnir-Technology/bob/dialect/psql/sm"
"github.com/Gleipnir-Technology/nidus-sync/config"
"github.com/Gleipnir-Technology/nidus-sync/db"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/platform/background"
"github.com/Gleipnir-Technology/nidus-sync/platform/oauth"
"github.com/aarondl/opt/omit"
"github.com/aarondl/opt/omitnull"
)
// When there is no oauth for an organization
type NoOAuthForOrg struct{}
func (e NoOAuthForOrg) Error() string { return "No oauth available for organization" }
func GetOAuthForOrg(ctx context.Context, org Organization) (*models.ArcgisOauthToken, error) {
result, err := oauth.GetOAuthForOrg(ctx, org.model)
if result == nil && err == nil {
return nil, &NoOAuthForOrg{}
}
return result, err
}
func GetOAuthForUser(ctx context.Context, user User) (*models.ArcgisOauthToken, error) {
oauth, err := user.model.UserOauthTokens(
sm.OrderBy("created").Desc(),
).One(ctx, db.PGInstance.BobDB)
if err != nil {
if err.Error() == "sql: no rows in result set" {
return nil, nil
}
return nil, err
}
return oauth, nil
}
func HandleOauthAccessCode(ctx context.Context, user User, code string) error {
form := url.Values{
"grant_type": []string{"authorization_code"},
"code": []string{code},
"redirect_uri": []string{config.ArcGISOauthRedirectURL()},
}
token, err := oauth.DoTokenRequest(ctx, form)
if err != nil {
return fmt.Errorf("Failed to exchange authorization code for token: %w", err)
}
accessExpires := oauth.FutureUTCTimestamp(token.ExpiresIn)
refreshExpires := oauth.FutureUTCTimestamp(token.RefreshTokenExpiresIn)
setter := models.ArcgisOauthTokenSetter{
AccessToken: omit.From(token.AccessToken),
AccessTokenExpires: omit.From(accessExpires),
//ArcgisAccountID: omit.From(
ArcgisID: omitnull.FromPtr[string](nil),
ArcgisLicenseTypeID: omitnull.FromPtr[string](nil),
Created: omit.From(time.Now()),
InvalidatedAt: omitnull.FromPtr[time.Time](nil),
RefreshToken: omit.From(token.RefreshToken),
RefreshTokenExpires: omit.From(refreshExpires),
UserID: omit.From(int32(user.ID)),
Username: omit.From(token.Username),
}
oauth, err := models.ArcgisOauthTokens.Insert(&setter).One(ctx, db.PGInstance.BobDB)
if err != nil {
return fmt.Errorf("Failed to save token to database: %w", err)
}
go background.UpdateArcgisUserData(context.Background(), user.model, oauth)
return nil
}

160
platform/oauth/oauth.go Normal file
View file

@ -0,0 +1,160 @@
package oauth
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/Gleipnir-Technology/arcgis-go"
"github.com/Gleipnir-Technology/nidus-sync/config"
"github.com/Gleipnir-Technology/nidus-sync/db"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/aarondl/opt/omit"
"github.com/rs/zerolog/log"
)
// When the API responds that the token is now invalidated
type InvalidatedTokenError struct{}
func (e InvalidatedTokenError) Error() string { return "The token has been invalidated by the server" }
type OAuthTokenResponse struct {
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
RefreshToken string `json:"refresh_token"`
RefreshTokenExpiresIn int `json:"refresh_token_expires_in"`
SSL bool `json:"ssl"`
Username string `json:"username"`
}
func DoTokenRequest(ctx context.Context, form url.Values) (*OAuthTokenResponse, error) {
form.Set("client_id", config.ClientID)
baseURL := "https://www.arcgis.com/sharing/rest/oauth2/token/"
req, err := http.NewRequest("POST", baseURL, strings.NewReader(form.Encode()))
if err != nil {
return nil, fmt.Errorf("Failed to create request: %w", err)
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
client := http.Client{}
log.Info().Str("url", req.URL.String()).Msg("POST")
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("Failed to do request: %w", err)
}
defer resp.Body.Close()
bodyBytes, err := io.ReadAll(resp.Body)
log.Info().Int("status", resp.StatusCode).Msg("Token request")
if resp.StatusCode >= http.StatusBadRequest {
if err != nil {
return nil, fmt.Errorf("Got status code %d and failed to read response body: %w", resp.StatusCode, err)
}
bodyString := string(bodyBytes)
var errorResp arcgis.ErrorResponse
if err := json.Unmarshal(bodyBytes, &errorResp); err == nil {
if errorResp.Error.Code == 498 && errorResp.Error.Description == "invalidated refresh_token" {
return nil, InvalidatedTokenError{}
}
return nil, fmt.Errorf("API response JSON error: %d: %d %s", resp.StatusCode, errorResp.Error.Code, errorResp.Error.Description)
}
return nil, fmt.Errorf("API returned error status %d: %s", resp.StatusCode, bodyString)
}
//logResponseHeaders(resp)
var tokenResponse OAuthTokenResponse
err = json.Unmarshal(bodyBytes, &tokenResponse)
if err != nil {
return nil, fmt.Errorf("Failed to unmarshal JSON: %w", err)
}
// Just because we got a 200-level status code doesn't mean it worked. Experience has taught us that
// we can get errors without anything indicated in the headers or the status code
if tokenResponse == (OAuthTokenResponse{}) {
var errorResponse arcgis.ErrorResponse
err = json.Unmarshal(bodyBytes, &errorResponse)
if err != nil {
return nil, fmt.Errorf("Failed to unmarshal error JSON: %w", err)
}
if errorResponse.Error.Code > 0 {
return nil, errorResponse.AsError(ctx)
}
}
log.Info().Str("refresh token", tokenResponse.RefreshToken).Str("access token", tokenResponse.AccessToken).Int("access expires", tokenResponse.ExpiresIn).Int("refresh expires", tokenResponse.RefreshTokenExpiresIn).Msg("Oauth token acquired")
return &tokenResponse, nil
}
func FutureUTCTimestamp(secondsFromNow int) time.Time {
return time.Now().UTC().Add(time.Duration(secondsFromNow) * time.Second)
}
func GetOAuthForOrg(ctx context.Context, org *models.Organization) (*models.ArcgisOauthToken, error) {
users, err := org.User().All(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to query all users for org: %w", err)
}
for _, user := range users {
oauths, err := user.UserOauthTokens(models.SelectWhere.ArcgisOauthTokens.InvalidatedAt.IsNull()).All(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to query all oauth tokens for org: %w", err)
}
for _, oauth := range oauths {
return oauth, nil
}
}
return nil, nil
}
// Update the access token to keep it fresh and alive
func RefreshAccessToken(ctx context.Context, oauth *models.ArcgisOauthToken) error {
form := url.Values{
"grant_type": []string{"refresh_token"},
"client_id": []string{config.ClientID},
"refresh_token": []string{oauth.RefreshToken},
}
token, err := DoTokenRequest(ctx, form)
if err != nil {
return fmt.Errorf("Failed to handle request: %w", err)
}
accessExpires := FutureUTCTimestamp(token.ExpiresIn)
setter := models.ArcgisOauthTokenSetter{
AccessToken: omit.From(token.AccessToken),
AccessTokenExpires: omit.From(accessExpires),
Username: omit.From(token.Username),
}
err = oauth.Update(ctx, db.PGInstance.BobDB, &setter)
if err != nil {
return fmt.Errorf("Failed to update oauth in database: %w", err)
}
log.Info().Int("oauth token id", int(oauth.ID)).Msg("Updated oauth token")
return nil
}
// Update the refresh token to keep it fresh and alive
func RefreshRefreshToken(ctx context.Context, oauth *models.ArcgisOauthToken) error {
form := url.Values{
"grant_type": []string{"exchange_refresh_token"},
"redirect_uri": []string{config.ArcGISOauthRedirectURL()},
"refresh_token": []string{oauth.RefreshToken},
}
token, err := DoTokenRequest(ctx, form)
if err != nil {
return fmt.Errorf("Failed to handle request: %w", err)
}
refreshExpires := FutureUTCTimestamp(token.ExpiresIn)
setter := models.ArcgisOauthTokenSetter{
RefreshToken: omit.From(token.RefreshToken),
RefreshTokenExpires: omit.From(refreshExpires),
Username: omit.From(token.Username),
}
err = oauth.Update(ctx, db.PGInstance.BobDB, &setter)
if err != nil {
return fmt.Errorf("Failed to update oauth in database: %w", err)
}
log.Info().Int("oauth token id", int(oauth.ID)).Msg("Updated oauth token")
return nil
}

View file

@ -1,17 +1,115 @@
package platform
import (
"context"
"fmt"
"github.com/Gleipnir-Technology/bob/dialect/psql/sm"
"github.com/Gleipnir-Technology/nidus-sync/db"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/platform/background"
//"github.com/google/uuid"
)
type Organization struct {
ID int32
Name string
ServiceAreaXmax float64
ServiceAreaXmin float64
ServiceAreaYmax float64
ServiceAreaYmin float64
model *models.Organization
}
func NewOrganization(org *models.Organization) Organization {
return Organization{
ID: org.ID,
Name: org.Name,
func (o Organization) ArcgisAccountID() string {
if o.model.ArcgisAccountID.IsNull() {
return ""
}
return o.model.ArcgisAccountID.MustGet()
}
func (o Organization) CountServiceRequest(ctx context.Context) (uint, error) {
result, err := o.model.Servicerequests().Count(ctx, db.PGInstance.BobDB)
if err != nil {
return 0, fmt.Errorf("get service request count: %w", err)
}
return uint(result), nil
}
func (o Organization) CountSource(ctx context.Context) (uint, error) {
result, err := o.model.Pointlocations().Count(ctx, db.PGInstance.BobDB)
if err != nil {
return 0, fmt.Errorf("get source count: %w", err)
}
return uint(result), nil
}
func (o Organization) CountTrap(ctx context.Context) (uint, error) {
result, err := o.model.Traplocations().Count(ctx, db.PGInstance.BobDB)
if err != nil {
return 0, fmt.Errorf("get trap count: %w", err)
}
return uint(result), nil
}
func (o Organization) Name() string {
return o.model.Name
}
func (o Organization) ID() int32 {
return o.model.ID
}
func (o Organization) IsSyncOngoing() bool {
return background.IsSyncOngoing(o.ID())
}
func (o Organization) FieldseekerSyncLatest(ctx context.Context) (*models.FieldseekerSync, error) {
sync, err := o.model.FieldseekerSyncs(sm.OrderBy("created").Desc()).One(ctx, db.PGInstance.BobDB)
if err != nil {
if err.Error() == "sql: no rows in result set" {
return nil, nil
}
return nil, fmt.Errorf("get syncs: %w", err)
}
return sync, nil
}
type ServiceArea struct {
Min Point
Max Point
}
func (o Organization) ServiceArea() ServiceArea {
if o.model.ServiceAreaXmax.IsNull() ||
o.model.ServiceAreaXmin.IsNull() ||
o.model.ServiceAreaYmax.IsNull() ||
o.model.ServiceAreaYmin.IsNull() {
return ServiceArea{}
}
return ServiceArea{
Min: Point{
X: o.model.ServiceAreaXmin.MustGet(),
Y: o.model.ServiceAreaYmin.MustGet(),
},
Max: Point{
X: o.model.ServiceAreaXmax.MustGet(),
Y: o.model.ServiceAreaYmax.MustGet(),
},
}
}
func (o Organization) ServiceRequestRecent(ctx context.Context) ([]*models.FieldseekerServicerequest, error) {
results, err := o.model.Servicerequests(sm.OrderBy("creationdate").Desc(), sm.Limit(10)).All(ctx, db.PGInstance.BobDB)
if err != nil {
return []*models.FieldseekerServicerequest{}, fmt.Errorf("query service request: %w", err)
}
return results, nil
}
func OrganizationByID(ctx context.Context, id int) (*Organization, error) {
org, err := models.FindOrganization(ctx, db.PGInstance.BobDB, int32(id))
if err != nil {
if err.Error() == "sql: no rows in result set" {
return nil, nil
}
return nil, fmt.Errorf("query org: %w", err)
}
o := newOrganization(org)
return &o, nil
}
func newOrganization(org *models.Organization) Organization {
return Organization{
model: org,
}
}

6
platform/point.go Normal file
View file

@ -0,0 +1,6 @@
package platform
type Point struct {
X float64
Y float64
}

View file

@ -96,3 +96,19 @@ func NuisanceReportForOrganization(ctx context.Context, org_id int32) ([]Nuisanc
}
return reports, nil
}
func NuisanceReportForOrganizationCount(ctx context.Context, org_id int32) (uint, error) {
type _Row struct {
Count uint `db:"count"`
}
row, err := bob.One(ctx, db.PGInstance.BobDB, psql.Select(
sm.Columns(
"COUNT(*) AS count",
),
sm.From("publicreport.nuisance"),
sm.Where(psql.Quote("publicreport", "nuisance", "organization_id").EQ(psql.Arg(org_id))),
), scan.StructMapper[_Row]())
if err != nil {
return 0, fmt.Errorf("query count: %w", err)
}
return row.Count, nil
}

View file

@ -100,3 +100,19 @@ func WaterReportForOrganization(ctx context.Context, org_id int32) ([]Water, err
}
return reports, nil
}
func WaterReportForOrganizationCount(ctx context.Context, org_id int32) (uint, error) {
type _Row struct {
Count uint `db:"count"`
}
row, err := bob.One(ctx, db.PGInstance.BobDB, psql.Select(
sm.Columns(
"COUNT(*) AS count",
),
sm.From("publicreport.water"),
sm.Where(psql.Quote("publicreport", "water", "organization_id").EQ(psql.Arg(org_id))),
), scan.StructMapper[_Row]())
if err != nil {
return 0, fmt.Errorf("query count: %w", err)
}
return row.Count, nil
}

View file

@ -15,10 +15,10 @@ import (
//"github.com/Gleipnir-Technology/bob/dialect/psql"
//"github.com/Gleipnir-Technology/bob/dialect/psql/sm"
//"github.com/Gleipnir-Technology/bob/dialect/psql/um"
"github.com/Gleipnir-Technology/nidus-sync/background"
"github.com/Gleipnir-Technology/nidus-sync/db"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/db/sql"
"github.com/Gleipnir-Technology/nidus-sync/platform/background"
"github.com/Gleipnir-Technology/nidus-sync/platform/email"
"github.com/Gleipnir-Technology/nidus-sync/platform/text"
"github.com/rs/zerolog/log"

View file

@ -0,0 +1,60 @@
package subprocess
import (
"errors"
"fmt"
"os"
"os/exec"
"github.com/Gleipnir-Technology/nidus-sync/platform/file"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
)
func fileContentPathAudioNormalized(u uuid.UUID) string {
//destination := AudioFileContentPathNormalized(audioUUID.String())
return file.ContentPath(file.CollectionAudioNormalized, u)
}
func NormalizeAudio(audioUUID uuid.UUID) error {
//source := AudioFileContentPathRaw(audioUUID.String())
source := file.ContentPath(file.CollectionAudioRaw, audioUUID)
_, err := os.Stat(source)
if errors.Is(err, os.ErrNotExist) {
log.Warn().Str("source", source).Msg("file doesn't exist, skipping normalization")
return nil
}
log.Info().Str("source", source).Msg("Normalizing")
//destination := AudioFileContentPathNormalized(audioUUID.String())
destination := fileContentPathAudioNormalized(audioUUID)
// Use "ffmpeg" directly, assuming it's in the system PATH
cmd := exec.Command("ffmpeg", "-i", source, "-filter:a", "loudnorm", destination)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("FFmpeg output for normalization: %s", out)
return fmt.Errorf("ffmpeg normalization failed: %v", err)
}
log.Info().Str("destination", destination).Msg("Normalized audio")
return nil
}
func TranscodeToOgg(audioUUID uuid.UUID) error {
//source := AudioFileContentPathNormalized(audioUUID.String())
source := fileContentPathAudioNormalized(audioUUID)
_, err := os.Stat(source)
if errors.Is(err, os.ErrNotExist) {
log.Warn().Str("source", source).Msg("file doesn't exist, skipping OGG transcoding")
return nil
}
log.Info().Str("source", source).Msg("Transcoding to ogg")
//destination := userfile.AudioFileContentPathOgg(audioUUID.String())
destination := file.ContentPath(file.CollectionAudioTranscoded, audioUUID)
// Use "ffmpeg" directly, assuming it's in the system PATH
cmd := exec.Command("ffmpeg", "-i", source, "-vn", "-acodec", "libvorbis", destination)
out, err := cmd.CombinedOutput()
if err != nil {
log.Error().Err(err).Bytes("out", out).Msg("FFmpeg output for OGG transcoding")
return fmt.Errorf("ffmpeg OGG transcoding failed: %v", err)
}
log.Info().Str("destination", destination).Msg("Transcoded audio")
return nil
}

219
platform/tile.go Normal file
View file

@ -0,0 +1,219 @@
package platform
import (
"bytes"
"context"
"embed"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"github.com/Gleipnir-Technology/arcgis-go"
"github.com/Gleipnir-Technology/arcgis-go/fieldseeker"
"github.com/aarondl/opt/omit"
//"github.com/Gleipnir-Technology/bob"
//"github.com/Gleipnir-Technology/bob/dialect/psql"
"github.com/Gleipnir-Technology/nidus-sync/config"
"github.com/Gleipnir-Technology/nidus-sync/db"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/platform/background"
"github.com/Gleipnir-Technology/nidus-sync/platform/oauth"
"github.com/rs/zerolog/log"
)
//go:embed empty-tile.png
var emptyTileFS embed.FS
func GetTile(ctx context.Context, w http.ResponseWriter, org Organization, z, y, x uint) error {
if org.model.ArcgisMapServiceID.IsNull() {
return fmt.Errorf("no map service ID set")
}
map_service_id := org.model.ArcgisMapServiceID.MustGet()
tile_path := tilePath(map_service_id, z, y, x)
tile_row, err := models.TileCachedImages.Query(
models.SelectWhere.TileCachedImages.ArcgisID.EQ(map_service_id),
models.SelectWhere.TileCachedImages.X.EQ(int32(x)),
models.SelectWhere.TileCachedImages.Y.EQ(int32(y)),
models.SelectWhere.TileCachedImages.Z.EQ(int32(z)),
).One(ctx, db.PGInstance.BobDB)
if err == nil {
var tile *TileRaster
if tile_row.IsEmpty {
tile = TileRasterPlaceholder()
} else {
tile, err = loadTileFromDisk(tile_path)
if err != nil {
return fmt.Errorf("load tile from disk: %w", err)
}
}
log.Debug().Uint("z", z).Uint("y", y).Uint("x", x).Bool("is empty", tile_row.IsEmpty).Msg("tile from cache")
return writeTile(w, tile)
}
if err.Error() != "sql: no rows in result set" {
return fmt.Errorf("query db: %w", err)
}
image, err := ImageAtTile(ctx, org.model, uint(z), uint(y), uint(x))
if err != nil {
return fmt.Errorf("image at tile: %w", err)
}
if !image.IsPlaceholder {
err = saveTileToDisk(image, tile_path)
if err != nil {
return fmt.Errorf("save tile: %w", err)
}
}
_, err = models.TileCachedImages.Insert(&models.TileCachedImageSetter{
ArcgisID: omit.From(map_service_id),
X: omit.From(int32(x)),
Y: omit.From(int32(y)),
Z: omit.From(int32(z)),
IsEmpty: omit.From(image.IsPlaceholder),
}).One(ctx, db.PGInstance.BobDB)
if err != nil {
return fmt.Errorf("save to db: %w", err)
}
log.Debug().Uint("z", z).Uint("y", y).Uint("x", x).Bool("placeholder", image.IsPlaceholder).Msg("caching tile")
return writeTile(w, image)
}
func ImageAtPoint(ctx context.Context, org Organization, level uint, lat, lng float64) (*TileRaster, error) {
fssync, err := getFieldseeker(ctx, org.model)
if err != nil {
return nil, fmt.Errorf("create fssync: %w", err)
}
map_service, err := aerialImageService(ctx, fssync.Arcgis)
if err != nil {
return nil, fmt.Errorf("no map service: %w", err)
}
data, e := map_service.TileGPS(ctx, level, lat, lng)
if e != nil {
return nil, fmt.Errorf("tilegps: %w", e)
}
if len(data) == 0 {
return TileRasterPlaceholder(), nil
}
return &TileRaster{
Content: data,
IsPlaceholder: false,
}, nil
}
func loadTileFromDisk(tile_path string) (*TileRaster, error) {
file, err := os.Open(tile_path)
if err != nil {
return nil, fmt.Errorf("open: %w", err)
}
defer file.Close()
img, err := io.ReadAll(file)
if err != nil {
return nil, fmt.Errorf("readall from %s: %w", tile_path, err)
}
return &TileRaster{
Content: img,
IsPlaceholder: false,
}, nil
}
func saveTileToDisk(image *TileRaster, tile_path string) error {
parent := filepath.Dir(tile_path)
err := os.MkdirAll(parent, 0750)
if err != nil {
return fmt.Errorf("mkdirall: %w", err)
}
err = os.WriteFile(tile_path, image.Content, 0644)
if err != nil {
return fmt.Errorf("write image file: %w", err)
}
return nil
}
func tilePath(map_service_id string, z, y, x uint) string {
return fmt.Sprintf("%s/tile-cache/%s/%d/%d/%d.raw", config.FilesDirectory, map_service_id, z, y, x)
}
func writeTile(w http.ResponseWriter, image *TileRaster) error {
w.Header().Set("Content-Type", "image/png")
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(image.Content)))
_, err := io.Copy(w, bytes.NewBuffer(image.Content))
if err != nil {
return fmt.Errorf("io.copy: %w", err)
}
return nil
}
var clientByOrgID = make(map[int32]*fieldseeker.FieldSeeker, 0)
var tileRasterPlaceholder *TileRaster
type TileRaster struct {
Content []byte
IsPlaceholder bool
}
func ImageAtTile(ctx context.Context, org *models.Organization, level, y, x uint) (*TileRaster, error) {
oauth, err := oauth.GetOAuthForOrg(ctx, org)
if err != nil {
return nil, fmt.Errorf("get oauth for org: %w", err)
}
fssync, err := background.NewFieldSeeker(
ctx,
oauth,
)
if err != nil {
return nil, fmt.Errorf("create fssync: %w", err)
}
map_service, err := aerialImageService(ctx, fssync.Arcgis)
if err != nil {
return nil, fmt.Errorf("no map service: %w", err)
}
data, e := map_service.Tile(ctx, level, y, x)
if e != nil {
return nil, fmt.Errorf("tile: %w", e)
}
// No data at this location, so supply the empty tile placeholder
if len(data) == 0 {
return TileRasterPlaceholder(), nil
}
return &TileRaster{
Content: data,
IsPlaceholder: false,
}, nil
}
func TileRasterPlaceholder() *TileRaster {
if tileRasterPlaceholder != nil {
return tileRasterPlaceholder
}
empty, err := emptyTileFS.ReadFile("empty-tile.png")
if err != nil {
panic(fmt.Sprintf("Failed to read empty-tile.png: %v", err))
}
tileRasterPlaceholder = &TileRaster{
Content: empty,
IsPlaceholder: true,
}
return tileRasterPlaceholder
}
func aerialImageService(ctx context.Context, gis *arcgis.ArcGIS) (*arcgis.MapService, error) {
map_services, err := gis.MapServices(ctx)
if err != nil {
return nil, fmt.Errorf("aerial image service: %w", err)
}
for _, ms := range map_services {
return &ms, nil
}
return nil, fmt.Errorf("non found")
}
func getFieldseeker(ctx context.Context, org *models.Organization) (*fieldseeker.FieldSeeker, error) {
fssync, ok := clientByOrgID[org.ID]
if ok {
return fssync, nil
}
oauth, err := oauth.GetOAuthForOrg(ctx, org)
if err != nil {
return nil, fmt.Errorf("get oauth for org: %w", err)
}
fssync, err = background.NewFieldSeeker(
ctx,
oauth,
)
clientByOrgID[org.ID] = fssync
return fssync, nil
}

508
platform/trap.go Normal file
View file

@ -0,0 +1,508 @@
package platform
import (
"errors"
"fmt"
"time"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/db/sql"
"github.com/Gleipnir-Technology/nidus-sync/h3utils"
"github.com/aarondl/opt/null"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
"github.com/uber/h3-go/v4"
)
type BreedingSourceDetail struct {
// Basic Information
OrganizationID int32 `json:"organizationId"`
Name string `json:"name"`
Description string `json:"description"`
LocationNumber int64 `json:"locationNumber"`
ObjectID int64 `json:"objectId"`
GlobalID uuid.UUID `json:"globalId"`
ExternalID string `json:"externalId"`
// Status Information
Active bool `json:"active"`
DeactivateReason string `json:"deactivateReason"`
SourceStatus string `json:"sourceStatus"`
Priority string `json:"priority"`
ScalarPriority int64 `json:"scalarPriority"`
// Classification
SourceType string `json:"sourceType"`
Habitat string `json:"habitat"`
UseType string `json:"useType"`
WaterOrigin string `json:"waterOrigin"`
Symbology string `json:"symbology"`
// Geographical Data
H3Cell h3.Cell `json:"h3cell"`
Zone string `json:"zone"`
Zone2 string `json:"zone2"`
Jurisdiction string `json:"jurisdiction"`
AccessDescription string `json:"accessDescription"`
// Inspection Data
LarvaeInspectInterval int16 `json:"larvaeInspectInterval"`
LastInspectionDate *time.Time `json:"lastInspectionDate"`
LastInspectionActivity string `json:"lastInspectionActivity"`
LastInspectionActionTaken string `json:"lastInspectionActionTaken"`
LastInspectionAverageLarvae float64 `json:"lastInspectionAverageLarvae"`
LastInspectionAveragePupae float64 `json:"lastInspectionAveragePupae"`
LastInspectionBreeding string `json:"lastInspectionBreeding"`
LastInspectionConditions string `json:"lastInspectionConditions"`
LastInspectionFieldSpecies string `json:"lastInspectionFieldSpecies"`
LastInspectionLifeStages string `json:"lastInspectionLifeStages"`
// Treatment Data
LastTreatmentDate *time.Time `json:"lastTreatmentDate"`
LastTreatmentActivity string `json:"lastTreatmentActivity"`
LastTreatmentProduct string `json:"lastTreatmentProduct"`
LastTreatmentQuantity float64 `json:"lastTreatmentQuantity"`
LastTreatmentQuantityUnit string `json:"lastTreatmentQuantityUnit"`
// Assignment & Schedule
AssignedTechnician string `json:"assignedTechnician"`
NextActionScheduledDate *time.Time `json:"nextActionScheduledDate"`
// Metadata
Created *time.Time `json:"created"`
Creator string `json:"creator"`
EditedAt *time.Time `json:"editedAt"`
Editor string `json:"editor"`
Comments string `json:"comments"`
}
type BreedingSourceSummary struct {
ID uuid.UUID
Type string
LastInspected *time.Time
LastTreated *time.Time
}
type Trap struct {
Active bool
Comments string
Collections []TrapData
Description string
GlobalID uuid.UUID
H3Cell h3.Cell
}
type TrapCount struct {
Ended time.Time
Females int
ID uuid.UUID
Males int
Total int
}
type TrapData struct {
// Basic Identifiers
OrganizationID int32 `json:"organizationId"`
ObjectID int64 `json:"objectId"`
GlobalID uuid.UUID `json:"globalId"`
LocationName string `json:"locationName"`
LocationID uuid.UUID `json:"locationId"`
SRID uuid.UUID `json:"srid"`
Field int64 `json:"field"`
// Trap Information
TrapType string `json:"trapType"`
TrapCondition string `json:"trapCondition"`
TrapActivityType string `json:"trapActivityType"`
TrapNights int16 `json:"trapNights"`
Lure string `json:"lureType"`
// Personnel
FieldTechnician string `json:"fieldTechnician"`
IdentifiedByTechnician string `json:"identifiedByTechnician"`
SortedByTechnician string `json:"sortedByTechnician"`
// Timing
StartDateTime *time.Time `json:"startDateTime"`
EndDateTime *time.Time `json:"endDateTime"`
// Environmental Conditions
AverageTemperature float64 `json:"averageTemperature"`
Rainfall float64 `json:"rainfall"`
WindDirection string `json:"windDirection"`
WindSpeed float64 `json:"windSpeed"`
SiteCondition string `json:"siteCondition"`
// Status and Processing
Processed bool `json:"processed"`
RecordStatus int16 `json:"recordStatus"`
Reviewed bool `json:"reviewed"`
ReviewedBy string `json:"reviewedBy"`
ReviewedDate *time.Time `json:"reviewedDate"`
GatewaySynced bool `json:"gatewaySynced"`
LR bool `json:"laboratoryReported"`
Voltage float64 `json:"voltage"`
// Location Data
H3Cell h3.Cell `json:"h3cell"`
Zone string `json:"zone"`
Zone2 string `json:"zone2"`
// Vector Survey IDs
VectorSurveyTrapDataID string `json:"vectorSurveyTrapDataId"`
VectorSurveyTrapLocationID string `json:"vectorSurveyTrapLocationId"`
// Metadata
Created *time.Time `json:"created"`
Creator string `json:"creator"`
CreatedByUser string `json:"createdByUser"`
CreatedDateAlt *time.Time `json:"createdDateAlt"`
Edited *time.Time `json:"edited"`
Editor string `json:"editor"`
LastEditedDate *time.Time `json:"lastEditedDate"`
LastEditedUser string `json:"lastEditedUser"`
Comments string `json:"comments"`
// Stuff I actually use
Count TrapCount
}
type TrapNearby struct {
Counts []*TrapCount
Distance string
ID uuid.UUID
}
type TrapSummary struct {
Active bool
Comments string
Description string
GlobalID uuid.UUID
}
type Treatment struct {
CadenceDelta time.Duration
Date *time.Time
LocationID uuid.UUID
Notes string
Product string
}
func toTrap(trap *models.FieldseekerTraplocation, trap_data []sql.TrapDataByLocationIDRecentRow, count_slice []sql.TrapCountByLocationIDRow) (result Trap, err error) {
log.Debug().Str("globalid", trap.Globalid.String()).Msg("Working on trap")
cell, err := h3utils.ToCell(trap.H3cell.MustGet())
if err != nil {
return result, fmt.Errorf("Failed to convert h3 cell: %w", err)
}
count_by_trapdata_id := make(map[uuid.UUID]TrapCount, 0)
for _, count := range count_slice {
count_by_trapdata_id[count.TrapdataGlobalid] = TrapCount{
Ended: count.TrapdataEnddate.MustGet(),
Females: int(count.TotalFemales),
Males: int(count.TotalMales),
Total: int(count.Total),
}
}
data_by_id := make(map[uuid.UUID]TrapData, 0)
for _, dt := range trap_data {
if dt.LocID != trap.Globalid {
return result, fmt.Errorf("Bad query")
}
log.Debug().Str("trapdata", dt.Globalid.String()).Msg("Aggregating trapdata")
count, ok := count_by_trapdata_id[dt.Globalid]
if !ok {
count = TrapCount{}
}
data_by_id[dt.Globalid] = TrapData{
Count: count,
EndDateTime: &dt.Enddatetime,
GlobalID: dt.Globalid,
}
}
data := make([]TrapData, 0)
for _, v := range data_by_id {
data = append(data, v)
}
return Trap{
Active: toBool16Or(trap.Active, false),
Comments: trap.Comments.GetOr(""),
Collections: data,
Description: trap.Description.GetOr(""),
GlobalID: trap.Globalid,
H3Cell: cell,
}, nil
}
func toTemplateTrapSummary(traps models.FieldseekerTraplocationSlice) (results []TrapSummary, err error) {
for _, t := range traps {
results = append(results, TrapSummary{
Active: toBool16Or(t.Active, false),
Comments: t.Comments.GetOr(""),
Description: t.Description.GetOr(""),
GlobalID: t.Globalid,
})
}
return results, err
}
func toTemplateTrapsNearby(locations []sql.TrapLocationBySourceIDRow, trap_data []sql.TrapDataByLocationIDRecentRow, counts []sql.TrapCountByLocationIDRow) ([]TrapNearby, error) {
results := make([]TrapNearby, 0)
count_by_trap_data_id := make(map[uuid.UUID]*sql.TrapCountByLocationIDRow)
for _, c := range counts {
count_by_trap_data_id[c.TrapdataGlobalid] = &c
}
counts_by_location_id := make(map[uuid.UUID][]*TrapCount)
for _, td := range trap_data {
c, ok := count_by_trap_data_id[td.Globalid]
if !ok {
return results, errors.New(fmt.Sprintf("Failed to find trap count for %s", td.Globalid))
}
loc_id := td.LocID
count := &TrapCount{
Ended: td.Enddatetime,
Females: int(c.TotalFemales),
ID: td.Globalid,
Males: int(c.TotalMales),
Total: int(c.Total),
}
counts, ok := counts_by_location_id[loc_id]
if !ok {
counts = []*TrapCount{count}
} else {
counts = append(counts, count)
}
counts_by_location_id[loc_id] = counts
}
for _, location := range locations {
counts, ok := counts_by_location_id[location.TrapLocationGlobalid]
if !ok {
return results, errors.New(fmt.Sprintf("Failed to find counts for %s", location.TrapLocationGlobalid))
}
trap := TrapNearby{
Counts: counts,
Distance: location.Distance,
ID: location.TrapLocationGlobalid,
}
results = append(results, trap)
}
return results, nil
}
func toTemplateTrapData(trap_data models.FieldseekerTrapdatumSlice) ([]TrapData, error) {
var results []TrapData
for _, r := range trap_data {
if r.H3cell.IsNull() {
continue
}
cell, err := h3utils.ToCell(r.H3cell.MustGet())
if err != nil {
log.Error().Err(err).Msg("Failed to get location for trap data")
continue
}
results = append(results, TrapData{
// Basic Identifiers
OrganizationID: r.OrganizationID,
ObjectID: r.Objectid,
GlobalID: r.Globalid,
LocationName: r.Locationname.GetOr(""),
LocationID: r.LocID.GetOr(uuid.UUID{}),
SRID: r.Srid.GetOr(uuid.UUID{}),
Field: int64(r.Field.GetOr(0)),
// Trap Information
TrapType: r.Traptype.GetOr(""),
TrapCondition: r.Trapcondition.GetOr(""),
TrapActivityType: r.Trapactivitytype.GetOr(""),
TrapNights: r.Trapnights.GetOr(0),
Lure: r.Lure.GetOr(""),
// Personnel
FieldTechnician: r.Fieldtech.GetOr(""),
IdentifiedByTechnician: r.Idbytech.GetOr(""),
SortedByTechnician: r.Sortbytech.GetOr(""),
// Timing
StartDateTime: getTimeOrNull(r.Startdatetime),
EndDateTime: getTimeOrNull(r.Enddatetime),
// Environmental Conditions
AverageTemperature: r.Avetemp.GetOr(0),
Rainfall: r.Raingauge.GetOr(0),
WindDirection: r.Winddir.GetOr(""),
WindSpeed: r.Windspeed.GetOr(0),
SiteCondition: r.Sitecond.GetOr(""),
// Status and Processing
Processed: fsIntToBool(r.Processed),
RecordStatus: r.Recordstatus.GetOr(0),
Reviewed: fsIntToBool(r.Reviewed),
ReviewedBy: r.Reviewedby.GetOr(""),
ReviewedDate: getTimeOrNull(r.Revieweddate),
GatewaySynced: fsIntToBool(r.Gatewaysync),
LR: fsIntToBool(r.LR),
Voltage: r.Voltage.GetOr(0),
// Location Data
H3Cell: cell,
Zone: r.Zone.GetOr(""),
Zone2: r.Zone2.GetOr(""),
// Vector Survey IDs
VectorSurveyTrapDataID: r.Vectorsurvtrapdataid.GetOr(""),
VectorSurveyTrapLocationID: r.Vectorsurvtraplocationid.GetOr(""),
// Metadata
Created: getTimeOrNull(r.Creationdate),
Creator: r.Creator.GetOr(""),
CreatedByUser: r.CreatedUser.GetOr(""),
CreatedDateAlt: getTimeOrNull(r.CreatedDate),
Edited: getTimeOrNull(r.Editdate),
Editor: r.Editor.GetOr(""),
LastEditedDate: getTimeOrNull(r.LastEditedDate),
LastEditedUser: r.LastEditedUser.GetOr(""),
Comments: r.Comments.GetOr(""),
})
}
return results, nil
}
func toTreatment(rows models.FieldseekerTreatmentSlice) ([]Treatment, error) {
var results []Treatment
for _, r := range rows {
results = append(results, Treatment{
Date: getTimeOrNull(r.Enddatetime),
LocationID: r.Pointlocid.GetOr(uuid.UUID{}),
Notes: r.Comments.GetOr("none"),
Product: r.Product.GetOr("none"),
})
}
return results, nil
}
func toTemplateInspection(rows models.FieldseekerMosquitoinspectionSlice) ([]Inspection, error) {
var results []Inspection
for _, r := range rows {
results = append(results, Inspection{
Action: r.Actiontaken.GetOr("none"),
Date: getTimeOrNull(r.Enddatetime),
Notes: r.Comments.GetOr("none"),
Location: r.Locationname.GetOr("none"),
LocationID: r.Pointlocid.GetOr(uuid.UUID{}),
})
}
return results, nil
}
// Helper function to convert unix timestamp to time.Time
func fsToTime(val null.Val[int64]) time.Time {
v, ok := val.Get()
if !ok {
return time.UnixMilli(0)
}
t := time.UnixMilli(v)
return t
}
// Helper function to convert int16 to bool
func fsIntToBool(val null.Val[int16]) bool {
if !val.IsValue() {
return false
}
b := val.MustGet() != 0
return b
}
// toTemplateBreedingSource transforms the DB model into the display model
func toBreedingSource(source *models.FieldseekerPointlocation) (*BreedingSourceDetail, error) {
if source.H3cell.IsNull() {
return nil, fmt.Errorf("h3 cell is null")
}
cell, err := h3utils.ToCell(source.H3cell.MustGet())
if err != nil {
return nil, fmt.Errorf("Failed to get h3 cell from point location: %w", err)
}
return &BreedingSourceDetail{
// Basic Information
OrganizationID: source.OrganizationID,
Name: source.Name.MustGet(),
Description: source.Description.MustGet(),
LocationNumber: int64(source.Locationnumber.GetOr(0)),
ObjectID: source.Objectid,
GlobalID: source.Globalid,
ExternalID: source.Externalid.GetOr(""),
// Status Information
Active: fsIntToBool(source.Active),
DeactivateReason: source.DeactivateReason.GetOr(""),
SourceStatus: source.Sourcestatus.GetOr(""),
Priority: source.Priority.GetOr(""),
ScalarPriority: int64(source.Scalarpriority.GetOr(0)),
// Classification
SourceType: source.Stype.GetOr(""),
Habitat: source.Habitat.GetOr(""),
UseType: source.Usetype.GetOr(""),
WaterOrigin: source.Waterorigin.GetOr(""),
Symbology: source.Symbology.GetOr(""),
// Geographical Data
H3Cell: cell,
Zone: source.Zone.GetOr(""),
Zone2: source.Zone2.GetOr(""),
Jurisdiction: source.Jurisdiction.GetOr(""),
AccessDescription: source.Accessdesc.GetOr(""),
// Inspection Data
LarvaeInspectInterval: source.Larvinspectinterval.GetOr(0),
LastInspectionDate: getTimeOrNull(source.Lastinspectdate),
LastInspectionActivity: source.Lastinspectactivity.GetOr(""),
LastInspectionActionTaken: source.Lastinspectactiontaken.GetOr(""),
LastInspectionAverageLarvae: source.Lastinspectavglarvae.GetOr(0),
LastInspectionAveragePupae: source.Lastinspectavgpupae.GetOr(0),
LastInspectionBreeding: source.Lastinspectbreeding.GetOr(""),
LastInspectionConditions: source.Lastinspectconditions.GetOr(""),
LastInspectionFieldSpecies: source.Lastinspectfieldspecies.GetOr(""),
LastInspectionLifeStages: source.Lastinspectlstages.GetOr(""),
// Treatment Data
LastTreatmentDate: getTimeOrNull(source.Lasttreatdate),
LastTreatmentActivity: source.Lasttreatactivity.GetOr(""),
LastTreatmentProduct: source.Lasttreatproduct.GetOr(""),
LastTreatmentQuantity: source.Lasttreatqty.GetOr(0),
LastTreatmentQuantityUnit: source.Lasttreatqtyunit.GetOr(""),
// Assignment & Schedule
AssignedTechnician: source.Assignedtech.GetOr(""),
NextActionScheduledDate: getTimeOrNull(source.Nextactiondatescheduled),
// Metadata
Created: getTimeOrNull(source.Creationdate),
Creator: source.Creator.GetOr(""),
EditedAt: getTimeOrNull(source.Editdate),
Editor: source.Editor.GetOr(""),
Comments: source.Comments.GetOr(""),
}, nil
}
func getTimeOrNull(v null.Val[time.Time]) *time.Time {
if v.IsNull() {
return nil
}
val := v.MustGet()
return &val
}
func toBool16Or(t null.Val[int16], def bool) bool {
if t.IsNull() {
return def
}
val := t.MustGet()
var b bool
if val == 0 {
b = false
} else {
b = true
}
return b
}

163
platform/treatment.go Normal file
View file

@ -0,0 +1,163 @@
package platform
import (
"sort"
"time"
//"github.com/rs/zerolog/log"
)
// TreatmentModel represents the calculated model for a year's treatments
type TreatmentModel struct {
Year int
SeasonStart time.Time
SeasonEnd time.Time
Interval time.Duration
ActualDates []time.Time
PredictedDates []time.Time
Errors []time.Duration
}
func ModelTreatment(treatments []Treatment) []TreatmentModel {
treatment_times := make([]time.Time, 0)
for _, treatment := range treatments {
if treatment.Date != nil {
treatment_times = append(treatment_times, *treatment.Date)
}
}
models := calculateTreatmentModels(treatment_times)
/*models_by_year := make(map[int]TreatmentModel)
for _, m := range models {
models_by_year[m.Year] = m
}*/
offset := 0
for _, model := range models {
for _, e := range model.Errors {
treatments[offset].CadenceDelta = e
offset = offset + 1
}
}
/*
for i, treatment := range treatments {
model
treatment.CadenceDelta = deltas[i]
treatments[i] = treatment
}
*/
/*cadence, deltas := calculateCadenceVariance(treatment_times)
for i, treatment := range treatments {
if i >= len(deltas) {
break
}
treatment.CadenceDelta = deltas[i]
treatments[i] = treatment
}*/
return models
}
// calculateTreatmentModels segments treatments by year and calculates a model for each year
func calculateTreatmentModels(treatments []time.Time) []TreatmentModel {
// Group treatments by year
yearMap := make(map[int][]time.Time)
for _, t := range treatments {
year := t.Year()
yearMap[year] = append(yearMap[year], t)
}
// Calculate a model for each year
var models []TreatmentModel
for year, dates := range yearMap {
// Sort dates within the year
sort.Slice(dates, func(i, j int) bool {
return dates[i].Before(dates[j])
})
model := calculateYearModel(year, dates)
models = append(models, model)
}
// Sort models by year
sort.Slice(models, func(i, j int) bool {
return models[i].Year < models[j].Year
})
return models
}
// calculateYearModel creates a model for a specific year using linear regression
func calculateYearModel(year int, dates []time.Time) TreatmentModel {
n := len(dates)
if n < 2 {
// Not enough data for a model
return TreatmentModel{
Year: year,
ActualDates: dates,
}
}
// Convert dates to numeric values (seconds since epoch)
var x []float64
var y []float64
for i, date := range dates {
x = append(x, float64(i))
y = append(y, float64(date.Unix()))
}
// Calculate linear regression
slope, intercept := linearRegression(x, y)
// Convert back to time.Time and time.Duration
startTime := time.Unix(int64(intercept), 0)
intervalSeconds := int64(slope)
interval := time.Duration(intervalSeconds) * time.Second
// Calculate end of season
endTime := time.Unix(int64(intercept+slope*float64(n-1)), 0)
// Generate predicted dates and calculate errors
var predictedDates []time.Time
var errors []time.Duration
for i := 0; i < n; i++ {
predicted := time.Unix(int64(intercept+slope*float64(i)), 0)
predictedDates = append(predictedDates, predicted)
// Calculate error
actualTime := dates[i]
error := actualTime.Sub(predicted)
errors = append(errors, error)
}
return TreatmentModel{
Year: year,
SeasonStart: startTime,
SeasonEnd: endTime,
Interval: interval,
ActualDates: dates,
PredictedDates: predictedDates,
Errors: errors,
}
}
// linearRegression calculates the slope and intercept of the best-fit line
func linearRegression(x, y []float64) (float64, float64) {
n := float64(len(x))
if n < 2 {
return 0, 0
}
var sumX, sumY, sumXY, sumX2 float64
for i := 0; i < len(x); i++ {
sumX += x[i]
sumY += y[i]
sumXY += x[i] * y[i]
sumX2 += x[i] * x[i]
}
// Calculate slope
slope := (n*sumXY - sumX*sumY) / (n*sumX2 - sumX*sumX)
// Calculate intercept
intercept := (sumY - slope*sumX) / n
return slope, intercept
}

View file

@ -9,11 +9,11 @@ import (
"github.com/Gleipnir-Technology/bob/dialect/psql"
"github.com/Gleipnir-Technology/bob/dialect/psql/sm"
"github.com/Gleipnir-Technology/bob/dialect/psql/um"
"github.com/Gleipnir-Technology/nidus-sync/background"
"github.com/Gleipnir-Technology/nidus-sync/db"
"github.com/Gleipnir-Technology/nidus-sync/db/enums"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/userfile"
"github.com/Gleipnir-Technology/nidus-sync/platform/background"
"github.com/Gleipnir-Technology/nidus-sync/platform/file"
"github.com/aarondl/opt/omit"
"github.com/aarondl/opt/omitnull"
"github.com/rs/zerolog/log"
@ -41,7 +41,7 @@ type UploadSummary struct {
Type string `db:"type"`
}
func NewUpload(ctx context.Context, u *models.User, upload userfile.FileUpload, t enums.FileuploadCsvtype) (Upload, error) {
func NewUpload(ctx context.Context, u User, upload file.FileUpload, t enums.FileuploadCsvtype) (Upload, error) {
txn, err := db.PGInstance.BobDB.BeginTx(ctx, nil)
if err != nil {
return Upload{}, fmt.Errorf("Failed to begin transaction: %w", err)
@ -51,10 +51,10 @@ func NewUpload(ctx context.Context, u *models.User, upload userfile.FileUpload,
file, err := models.FileuploadFiles.Insert(&models.FileuploadFileSetter{
ContentType: omit.From(upload.ContentType),
Created: omit.From(time.Now()),
CreatorID: omit.From(u.ID),
CreatorID: omit.From(int32(u.ID)),
Deleted: omitnull.FromPtr[time.Time](nil),
Name: omit.From(upload.Name),
OrganizationID: omit.From(u.OrganizationID),
OrganizationID: omit.From(u.Organization.ID()),
Status: omit.From(enums.FileuploadFilestatustypeUploaded),
SizeBytes: omit.From(int32(upload.SizeBytes)),
FileUUID: omit.From(upload.UUID),
@ -78,7 +78,7 @@ func NewUpload(ctx context.Context, u *models.User, upload userfile.FileUpload,
ID: file.ID,
}, nil
}
func UploadCommit(ctx context.Context, org *models.Organization, file_id int32, committer *models.User) error {
func UploadCommit(ctx context.Context, org Organization, file_id int32, committer User) error {
// Create addresses for each row
// Create sites for each row
// Create pools for each row
@ -92,7 +92,7 @@ func UploadCommit(ctx context.Context, org *models.Organization, file_id int32,
background.CommitUpload(file_id)
return err
}
func UploadDiscard(ctx context.Context, org *models.Organization, file_id int32) error {
func UploadDiscard(ctx context.Context, org Organization, file_id int32) error {
_, err := psql.Update(
um.Table(models.FileuploadFiles.Alias()),
um.SetCol("status").ToArg("discarded"),
@ -101,7 +101,7 @@ func UploadDiscard(ctx context.Context, org *models.Organization, file_id int32)
).Exec(ctx, db.PGInstance.BobDB)
return err
}
func UploadSummaryList(ctx context.Context, org *models.Organization) ([]UploadSummary, error) {
func UploadSummaryList(ctx context.Context, org Organization) ([]UploadSummary, error) {
results := make([]UploadSummary, 0)
rows, err := bob.All(ctx, db.PGInstance.BobDB, psql.Select(
sm.Columns(

View file

@ -3,37 +3,133 @@ package platform
import (
"context"
"fmt"
"strings"
"github.com/aarondl/opt/omit"
//"github.com/Gleipnir-Technology/bob"
"github.com/Gleipnir-Technology/bob/dialect/psql/dialect"
"github.com/Gleipnir-Technology/bob/mods"
"github.com/Gleipnir-Technology/nidus-sync/db"
"github.com/Gleipnir-Technology/nidus-sync/db/enums"
"github.com/Gleipnir-Technology/nidus-sync/db/models"
"github.com/Gleipnir-Technology/nidus-sync/notification"
"github.com/Gleipnir-Technology/nidus-sync/debug"
"github.com/rs/zerolog/log"
)
type NoUserError struct{}
func (e NoUserError) Error() string { return "That user does not exist" }
type User struct {
DisplayName string `json:"display_name"`
Initials string
Notifications []notification.Notification
Organization Organization `json:"organization"`
Role string `json:"role"`
Username string `json:"username"`
DisplayName string `json:"display_name"`
ID int `json:"-"`
Initials string `json:"initials"`
Notifications []Notification `json:"-"`
Organization Organization `json:"organization"`
PasswordHash string `json:"-"`
PasswordHashType string `json:"-"`
Role string `json:"role"`
Username string `json:"username"`
model *models.User
}
func UsersByID(ctx context.Context, org *models.Organization) (map[int32]*User, error) {
users, err := org.User().All(ctx, db.PGInstance.BobDB)
func (u User) HasRoot() bool {
return u.model.Role != enums.UserroleRoot
}
func CreateUser(ctx context.Context, username string, name string, password_hash string) (*User, error) {
o_setter := models.OrganizationSetter{
Name: omit.From(fmt.Sprintf("%s's organization", username)),
}
o, err := models.Organizations.Insert(&o_setter).One(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to create organization: %w", err)
}
log.Info().Int32("id", o.ID).Msg("Created organization")
u_setter := models.UserSetter{
DisplayName: omit.From(name),
OrganizationID: omit.From(o.ID),
PasswordHash: omit.From(password_hash),
PasswordHashType: omit.From(enums.HashtypeBcrypt14),
Role: omit.From(enums.UserroleAccountOwner),
Username: omit.From(username),
}
user, err := models.Users.Insert(&u_setter).One(ctx, db.PGInstance.BobDB)
if err != nil {
return nil, fmt.Errorf("Failed to create user: %w", err)
}
log.Info().Int32("id", user.ID).Str("username", user.Username).Msg("Created user")
return &User{
DisplayName: user.DisplayName,
Initials: extractInitials(user.DisplayName),
Notifications: []Notification{},
Organization: newOrganization(o),
Role: user.Role.String(),
Username: user.Username,
model: user,
}, nil
}
func UserByID(ctx context.Context, user_id int) (*User, error) {
return getUser(ctx, models.SelectWhere.Users.ID.EQ(int32(user_id)))
}
func UserByUsername(ctx context.Context, username string) (*User, error) {
return getUser(ctx, models.SelectWhere.Users.Username.EQ(username))
}
func UsersByOrg(ctx context.Context, org Organization) (map[int32]*User, error) {
users, err := org.model.User().All(ctx, db.PGInstance.BobDB)
if err != nil {
return make(map[int32]*User, 0), fmt.Errorf("get all org users: %w", err)
}
organization := NewOrganization(org)
results := make(map[int32]*User, len(users))
for _, user := range users {
results[user.ID] = &User{
DisplayName: user.DisplayName,
Initials: "",
Notifications: []notification.Notification{},
Organization: organization,
Notifications: []Notification{},
Organization: org,
Role: user.Role.String(),
Username: user.Username,
model: user,
}
}
return results, nil
}
func getUser(ctx context.Context, where mods.Where[*dialect.SelectQuery]) (*User, error) {
user, err := models.Users.Query(
models.Preload.User.Organization(),
where,
).One(ctx, db.PGInstance.BobDB)
if err != nil {
if err.Error() == "No such user" || err.Error() == "sql: no rows in result set" {
return nil, &NoUserError{}
} else {
debug.LogErrorTypeInfo(err)
log.Error().Err(err).Msg("Unrecognized error. This should be updated in the findUser code")
return nil, err
}
}
org := newOrganization(user.R.Organization)
return &User{
DisplayName: user.DisplayName,
Initials: extractInitials(user.DisplayName),
Notifications: []Notification{},
Organization: org,
Role: user.Role.String(),
Username: user.Username,
}, nil
}
func extractInitials(name string) string {
parts := strings.Fields(name)
var initials strings.Builder
for _, part := range parts {
if len(part) > 0 {
initials.WriteString(strings.ToUpper(string(part[0])))
}
}
return initials.String()
}