This refactor was born out of the inter-dependency cycles developing between the "background" module and just about every other module which was caused by the background module becoming a dependency of every module that needed to background work and the fact that the background module was also supposedly responsible for the logic for processing those tasks. Instead the "background" module is now very, very shallow and relies entirely on the Postgres NOTIFY logic for triggering jobs. There's a new table, `job` which holds just a type and single row ID. All told, this means that jobs can be added to the queue as part of the API-level or platform-level transaction, ensuring atomicity, and processing coordination is handled by the platform module, which can depend on anything.
220 lines
6.3 KiB
Go
220 lines
6.3 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"flag"
|
|
"fmt"
|
|
"net/http"
|
|
"os"
|
|
"os/signal"
|
|
"runtime/debug"
|
|
"syscall"
|
|
"time"
|
|
|
|
"github.com/Gleipnir-Technology/nidus-sync/api"
|
|
"github.com/Gleipnir-Technology/nidus-sync/auth"
|
|
"github.com/Gleipnir-Technology/nidus-sync/config"
|
|
"github.com/Gleipnir-Technology/nidus-sync/db"
|
|
"github.com/Gleipnir-Technology/nidus-sync/html"
|
|
"github.com/Gleipnir-Technology/nidus-sync/llm"
|
|
"github.com/Gleipnir-Technology/nidus-sync/platform"
|
|
"github.com/Gleipnir-Technology/nidus-sync/rmo"
|
|
nidussync "github.com/Gleipnir-Technology/nidus-sync/sync"
|
|
"github.com/getsentry/sentry-go"
|
|
sentryhttp "github.com/getsentry/sentry-go/http"
|
|
"github.com/getsentry/sentry-go/zerolog"
|
|
"github.com/go-chi/chi/v5"
|
|
"github.com/go-chi/chi/v5/middleware"
|
|
"github.com/go-chi/hostrouter"
|
|
"github.com/rs/zerolog"
|
|
"github.com/rs/zerolog/log"
|
|
)
|
|
|
|
func main() {
|
|
err := config.Parse()
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to parse config")
|
|
os.Exit(1)
|
|
}
|
|
|
|
var prod = flag.Bool("prod", false, "Force into production mode")
|
|
flag.Parse()
|
|
if prod != nil && *prod {
|
|
log.Warn().Msg("Forcing production mode for testing templates")
|
|
config.Environment = "PRODUCTION"
|
|
}
|
|
log.Info().Str("environment", config.Environment).Bool("is-prod", config.IsProductionEnvironment()).Msg("Starting")
|
|
err = sentry.Init(sentry.ClientOptions{
|
|
Debug: false, //!config.IsProductionEnvironment(),
|
|
Dsn: config.SentryDSN,
|
|
EnableTracing: true,
|
|
SendDefaultPII: true,
|
|
TracesSampleRate: 1.0,
|
|
})
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to start sentry connection")
|
|
os.Exit(2)
|
|
}
|
|
defer sentry.Flush(2 * time.Second)
|
|
|
|
sentryWriter, err := sentryzerolog.New(sentryzerolog.Config{
|
|
ClientOptions: sentry.ClientOptions{
|
|
Dsn: config.SentryDSN,
|
|
},
|
|
Options: sentryzerolog.Options{
|
|
Levels: []zerolog.Level{zerolog.ErrorLevel, zerolog.FatalLevel, zerolog.PanicLevel},
|
|
WithBreadcrumbs: true,
|
|
FlushTimeout: 3 * time.Second,
|
|
},
|
|
})
|
|
if err != nil {
|
|
log.Fatal().Err(err).Msg("Failed to create sentry writer")
|
|
os.Exit(2)
|
|
}
|
|
defer sentryWriter.Close()
|
|
|
|
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
|
log.Logger = log.Output(zerolog.MultiLevelWriter(zerolog.ConsoleWriter{Out: os.Stderr}, sentryWriter))
|
|
if os.Getenv("VERBOSE") != "" {
|
|
log.Logger = log.Logger.Level(zerolog.DebugLevel)
|
|
} else {
|
|
log.Logger = log.Logger.Level(zerolog.InfoLevel)
|
|
}
|
|
|
|
err = db.InitializeDatabase(context.TODO(), config.PGDSN)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to connect to database")
|
|
os.Exit(3)
|
|
}
|
|
|
|
err = html.LoadTemplates()
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to load html templates")
|
|
os.Exit(4)
|
|
}
|
|
// Start up background processes
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
err = platform.StartAll(ctx)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed at platform.StartAll")
|
|
os.Exit(5)
|
|
}
|
|
router_logger := log.With().Logger()
|
|
sentryMiddleware := sentryhttp.New(sentryhttp.Options{
|
|
Repanic: true,
|
|
})
|
|
r := chi.NewRouter()
|
|
|
|
r.Use(LoggerMiddleware(&router_logger))
|
|
r.Use(middleware.RequestID)
|
|
r.Use(middleware.RealIP)
|
|
//r.Use(middleware.Logger)
|
|
r.Use(middleware.Recoverer)
|
|
r.Use(sentryMiddleware.Handle)
|
|
r.Use(auth.NewSessionManager().LoadAndSave)
|
|
|
|
hr := hostrouter.New()
|
|
|
|
// Set up routing by hostname
|
|
sr := nidussync.Router()
|
|
hr.Map("", sr) // default
|
|
hr.Map("*", sr) // default
|
|
hr.Map(config.DomainRMO, rmo.Router()) // report.mosquitoes.online
|
|
hr.Map(config.DomainNidus, sr)
|
|
r.Mount("/", hr)
|
|
|
|
log.Debug().Str("report url", config.DomainRMO).Str("sync url", config.DomainNidus).Msg("Serving at URLs")
|
|
|
|
openai_logger := log.With().Logger()
|
|
err = llm.CreateOpenAIClient(ctx, &openai_logger)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to start openAI client")
|
|
os.Exit(8)
|
|
}
|
|
server := &http.Server{
|
|
Addr: config.Bind,
|
|
Handler: r,
|
|
}
|
|
go func() {
|
|
log.Info().Str("address", config.Bind).Msg("Serving HTTP requests")
|
|
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
|
log.Error().Str("err", err.Error()).Msg("HTTP Server Error")
|
|
}
|
|
}()
|
|
|
|
chan_envelope := make(chan platform.Envelope, 10)
|
|
platform.SetEventChannel(chan_envelope)
|
|
api.SetEventChannel(chan_envelope)
|
|
|
|
// Wait for the interrupt signal to gracefully shut down
|
|
signalCh := make(chan os.Signal, 1)
|
|
signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM)
|
|
<-signalCh
|
|
|
|
log.Info().Msg("Received shutdown signal, shutting down...")
|
|
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
defer shutdownCancel()
|
|
|
|
if err := server.Shutdown(shutdownCtx); err != nil {
|
|
log.Error().Str("err", err.Error()).Msg("HTTP server shutdown error")
|
|
}
|
|
|
|
cancel()
|
|
close(chan_envelope)
|
|
platform.WaitForExit()
|
|
|
|
log.Info().Msg("Shutdown complete")
|
|
}
|
|
func LoggerMiddleware(logger *zerolog.Logger) func(next http.Handler) http.Handler {
|
|
return func(next http.Handler) http.Handler {
|
|
fn := func(w http.ResponseWriter, r *http.Request) {
|
|
log := logger.With().Logger()
|
|
|
|
ww := middleware.NewWrapResponseWriter(w, r.ProtoMajor)
|
|
|
|
t1 := time.Now()
|
|
defer func() {
|
|
t2 := time.Now()
|
|
|
|
// Recover and record stack traces in case of a panic
|
|
if rec := recover(); rec != nil {
|
|
log.Error().
|
|
Str("type", "error").
|
|
Timestamp().
|
|
Interface("recover_info", rec).
|
|
Bytes("debug_stack", debug.Stack()).
|
|
Msg("log system error")
|
|
fmt.Println("Stack:", string(debug.Stack()))
|
|
http.Error(ww, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
|
}
|
|
|
|
remote_addr := r.RemoteAddr
|
|
forwarded_for := r.Header.Get("X-Forwarded-For")
|
|
if forwarded_for != "" {
|
|
remote_addr = forwarded_for
|
|
}
|
|
// log end request
|
|
log.Info().
|
|
//Str("type", "access").
|
|
Timestamp().
|
|
Fields(map[string]interface{}{
|
|
"remote_ip": remote_addr,
|
|
"url": r.URL.Path,
|
|
//"proto": r.Proto,
|
|
"method": r.Method,
|
|
//"user_agent": r.Header.Get("User-Agent"),
|
|
"status": ww.Status(),
|
|
"latency_ms": float64(t2.Sub(t1).Nanoseconds()) / 1000000.0,
|
|
"bytes_in": r.Header.Get("Content-Length"),
|
|
"bytes_out": ww.BytesWritten(),
|
|
}).
|
|
Msg("incoming_request")
|
|
}()
|
|
|
|
next.ServeHTTP(ww, r)
|
|
}
|
|
return http.HandlerFunc(fn)
|
|
}
|
|
}
|