forked from gsn/predictor
feat: refactor
This commit is contained in:
parent
82ef1cb3b8
commit
51bbf3c579
44 changed files with 8589 additions and 0 deletions
98
cmd/api/main.go
Normal file
98
cmd/api/main.go
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"predictor-refactored/internal/downloader"
|
||||
"predictor-refactored/internal/service"
|
||||
"predictor-refactored/internal/transport/rest"
|
||||
"predictor-refactored/internal/transport/rest/handler"
|
||||
|
||||
"github.com/go-co-op/gocron"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log, err := zap.NewProduction()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer log.Sync()
|
||||
|
||||
cfg := downloader.LoadConfig()
|
||||
log.Info("configuration loaded",
|
||||
zap.String("data_dir", cfg.DataDir),
|
||||
zap.Int("parallel", cfg.Parallel),
|
||||
zap.Duration("update_interval", cfg.UpdateInterval),
|
||||
zap.Duration("dataset_ttl", cfg.DatasetTTL))
|
||||
|
||||
if err := os.MkdirAll(cfg.DataDir, 0o755); err != nil {
|
||||
log.Fatal("failed to create data directory", zap.Error(err))
|
||||
}
|
||||
|
||||
svc := service.New(cfg, log)
|
||||
defer svc.Close()
|
||||
|
||||
// Load elevation dataset (optional — falls back to sea-level termination)
|
||||
elevPath := "/srv/ruaumoko-dataset"
|
||||
if v := os.Getenv("PREDICTOR_ELEVATION_DATASET"); v != "" {
|
||||
elevPath = v
|
||||
}
|
||||
svc.LoadElevation(elevPath)
|
||||
|
||||
// Initial dataset load (async so the server starts immediately)
|
||||
go func() {
|
||||
log.Info("performing initial dataset update...")
|
||||
if err := svc.Update(context.Background()); err != nil {
|
||||
log.Error("initial dataset update failed", zap.Error(err))
|
||||
} else {
|
||||
log.Info("initial dataset update complete")
|
||||
}
|
||||
}()
|
||||
|
||||
// Scheduler for periodic dataset updates
|
||||
scheduler := gocron.NewScheduler(time.UTC)
|
||||
scheduler.Every(cfg.UpdateInterval).Do(func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute)
|
||||
defer cancel()
|
||||
log.Info("scheduled dataset update starting")
|
||||
if err := svc.Update(ctx); err != nil {
|
||||
log.Error("scheduled dataset update failed", zap.Error(err))
|
||||
} else {
|
||||
log.Info("scheduled dataset update complete")
|
||||
}
|
||||
})
|
||||
scheduler.StartAsync()
|
||||
defer scheduler.Stop()
|
||||
|
||||
// HTTP transport (ogen)
|
||||
port := 8080
|
||||
if p := os.Getenv("PREDICTOR_PORT"); p != "" {
|
||||
fmt.Sscanf(p, "%d", &port)
|
||||
}
|
||||
|
||||
h := handler.New(svc, log)
|
||||
transport, err := rest.New(h, port, log)
|
||||
if err != nil {
|
||||
log.Fatal("failed to create transport", zap.Error(err))
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := transport.Run(); err != nil {
|
||||
log.Fatal("HTTP server error", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("service started")
|
||||
|
||||
// Graceful shutdown
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
sig := <-sigChan
|
||||
log.Info("received shutdown signal", zap.String("signal", sig.String()))
|
||||
}
|
||||
195
cmd/compare_prediction/main.go
Normal file
195
cmd/compare_prediction/main.go
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"predictor-refactored/internal/dataset"
|
||||
"predictor-refactored/internal/downloader"
|
||||
"predictor-refactored/internal/prediction"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Downloads a few forecast steps and runs a prediction, then compares
|
||||
// against the public Tawhiri API.
|
||||
func main() {
|
||||
log, _ := zap.NewDevelopment()
|
||||
|
||||
cfg := &downloader.Config{
|
||||
DataDir: os.TempDir(),
|
||||
Parallel: 4,
|
||||
}
|
||||
dl := downloader.NewDownloader(cfg, log)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Find latest run
|
||||
run, err := dl.FindLatestRun(ctx)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "FindLatestRun: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Using run: %s\n", run.Format("2006010215"))
|
||||
|
||||
// Create dataset and download first 10 steps (0-27 hours, enough for a prediction)
|
||||
dsPath := fmt.Sprintf("/tmp/pred_test_%s.bin", run.Format("2006010215"))
|
||||
defer os.Remove(dsPath)
|
||||
|
||||
ds, err := dataset.Create(dsPath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Create: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
date := run.Format("20060102")
|
||||
runHour := run.Hour()
|
||||
stepsToDownload := []int{0, 3, 6, 9, 12, 15, 18, 21, 24, 27}
|
||||
|
||||
fmt.Printf("Downloading %d steps...\n", len(stepsToDownload))
|
||||
for _, step := range stepsToDownload {
|
||||
hourIdx := dataset.HourIndex(step)
|
||||
fmt.Printf(" step %d (hour idx %d)...\n", step, hourIdx)
|
||||
|
||||
urlA := dataset.GribURL(date, runHour, step)
|
||||
if err := dl.DownloadAndBlit(ctx, ds, urlA, hourIdx, dataset.LevelSetA); err != nil {
|
||||
fmt.Fprintf(os.Stderr, " pgrb2 step %d: %v\n", step, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
urlB := dataset.GribURLB(date, runHour, step)
|
||||
if err := dl.DownloadAndBlit(ctx, ds, urlB, hourIdx, dataset.LevelSetB); err != nil {
|
||||
fmt.Fprintf(os.Stderr, " pgrb2b step %d: %v\n", step, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
ds.Flush()
|
||||
fmt.Println("Download complete")
|
||||
|
||||
// Set dataset time
|
||||
ds.DSTime = run
|
||||
|
||||
// Run our prediction
|
||||
launchLat := 52.2135
|
||||
launchLon := 0.0964 // already in [0, 360)
|
||||
launchAlt := 0.0
|
||||
ascentRate := 5.0
|
||||
burstAlt := 30000.0
|
||||
descentRate := 5.0
|
||||
|
||||
// Launch 3 hours into the forecast
|
||||
launchTime := run.Add(3 * time.Hour)
|
||||
launchTimestamp := float64(launchTime.Unix())
|
||||
dsEpoch := float64(run.Unix())
|
||||
|
||||
warnings := &prediction.Warnings{}
|
||||
stages := prediction.StandardProfile(ascentRate, burstAlt, descentRate, ds, dsEpoch, warnings, nil)
|
||||
results := prediction.RunPrediction(launchTimestamp, launchLat, launchLon, launchAlt, stages)
|
||||
|
||||
fmt.Printf("\n=== Our prediction ===\n")
|
||||
for i, sr := range results {
|
||||
stage := "ascent"
|
||||
if i == 1 {
|
||||
stage = "descent"
|
||||
}
|
||||
first := sr.Points[0]
|
||||
last := sr.Points[len(sr.Points)-1]
|
||||
fmt.Printf(" %s: %d points, start=(%.4f, %.4f, %.0fm) end=(%.4f, %.4f, %.0fm)\n",
|
||||
stage, len(sr.Points),
|
||||
first.Lat, first.Lng, first.Alt,
|
||||
last.Lat, last.Lng, last.Alt)
|
||||
}
|
||||
|
||||
// Get landing point
|
||||
var ourLandLat, ourLandLon float64
|
||||
if len(results) >= 2 {
|
||||
last := results[1].Points[len(results[1].Points)-1]
|
||||
ourLandLat = last.Lat
|
||||
ourLandLon = last.Lng
|
||||
if ourLandLon > 180 {
|
||||
ourLandLon -= 360
|
||||
}
|
||||
}
|
||||
fmt.Printf(" Landing: lat=%.4f, lon=%.4f\n", ourLandLat, ourLandLon)
|
||||
|
||||
// Compare against public Tawhiri API
|
||||
fmt.Printf("\n=== Tawhiri API comparison ===\n")
|
||||
tawhiriLandLat, tawhiriLandLon, err := queryTawhiri(launchLat, launchLon, launchAlt, launchTime, ascentRate, burstAlt, descentRate)
|
||||
if err != nil {
|
||||
fmt.Printf(" Tawhiri API error: %v\n", err)
|
||||
fmt.Println(" (Cannot compare — Tawhiri may use a different dataset)")
|
||||
ds.Close()
|
||||
return
|
||||
}
|
||||
fmt.Printf(" Tawhiri landing: lat=%.4f, lon=%.4f\n", tawhiriLandLat, tawhiriLandLon)
|
||||
|
||||
dist := haversine(ourLandLat, ourLandLon, tawhiriLandLat, tawhiriLandLon)
|
||||
fmt.Printf(" Distance between landing points: %.2f km\n", dist/1000)
|
||||
|
||||
if dist < 1000 {
|
||||
fmt.Println(" CLOSE MATCH (< 1 km)")
|
||||
} else if dist < 50000 {
|
||||
fmt.Printf(" MODERATE DIFFERENCE (%.1f km) — likely different datasets\n", dist/1000)
|
||||
} else {
|
||||
fmt.Printf(" LARGE DIFFERENCE (%.1f km) — possible bug\n", dist/1000)
|
||||
}
|
||||
|
||||
ds.Close()
|
||||
}
|
||||
|
||||
func queryTawhiri(lat, lon, alt float64, launchTime time.Time, ascentRate, burstAlt, descentRate float64) (landLat, landLon float64, err error) {
|
||||
url := fmt.Sprintf(
|
||||
"https://api.v2.sondehub.org/tawhiri?launch_latitude=%.4f&launch_longitude=%.4f&launch_altitude=%.0f&launch_datetime=%s&ascent_rate=%.1f&burst_altitude=%.0f&descent_rate=%.1f",
|
||||
lat, lon, alt, launchTime.Format(time.RFC3339), ascentRate, burstAlt, descentRate)
|
||||
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
if resp.StatusCode != 200 {
|
||||
return 0, 0, fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Prediction []struct {
|
||||
Stage string `json:"stage"`
|
||||
Trajectory []struct {
|
||||
Latitude float64 `json:"latitude"`
|
||||
Longitude float64 `json:"longitude"`
|
||||
Altitude float64 `json:"altitude"`
|
||||
} `json:"trajectory"`
|
||||
} `json:"prediction"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &result); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
for _, stage := range result.Prediction {
|
||||
if stage.Stage == "descent" && len(stage.Trajectory) > 0 {
|
||||
last := stage.Trajectory[len(stage.Trajectory)-1]
|
||||
return last.Latitude, last.Longitude, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, 0, fmt.Errorf("no descent stage found")
|
||||
}
|
||||
|
||||
func haversine(lat1, lon1, lat2, lon2 float64) float64 {
|
||||
const R = 6371000.0
|
||||
phi1 := lat1 * math.Pi / 180
|
||||
phi2 := lat2 * math.Pi / 180
|
||||
dphi := (lat2 - lat1) * math.Pi / 180
|
||||
dlam := (lon2 - lon1) * math.Pi / 180
|
||||
a := math.Sin(dphi/2)*math.Sin(dphi/2) + math.Cos(phi1)*math.Cos(phi2)*math.Sin(dlam/2)*math.Sin(dlam/2)
|
||||
return R * 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
|
||||
}
|
||||
104
cmd/compare_step0/main.go
Normal file
104
cmd/compare_step0/main.go
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"predictor-refactored/internal/dataset"
|
||||
"predictor-refactored/internal/downloader"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Downloads step 0 of a given run and writes a minimal dataset for comparison.
|
||||
// Usage: go run ./cmd/compare_step0 <run_YYYYMMDDHH> <output_path>
|
||||
func main() {
|
||||
if len(os.Args) < 3 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: %s <run_YYYYMMDDHH> <output_path>\n", os.Args[0])
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
runStr := os.Args[1]
|
||||
outPath := os.Args[2]
|
||||
|
||||
run, err := time.Parse("2006010215", runStr)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Invalid run time %q: %v\n", runStr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
log, _ := zap.NewDevelopment()
|
||||
|
||||
// Create a full-size dataset (we only fill step 0)
|
||||
fmt.Printf("Creating dataset at %s (%d bytes)...\n", outPath, dataset.DatasetSize)
|
||||
ds, err := dataset.Create(outPath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Create dataset: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer ds.Close()
|
||||
|
||||
cfg := &downloader.Config{
|
||||
DataDir: os.TempDir(),
|
||||
Parallel: 4,
|
||||
}
|
||||
dl := downloader.NewDownloader(cfg, log)
|
||||
|
||||
ctx := context.Background()
|
||||
date := run.Format("20060102")
|
||||
runHour := run.Hour()
|
||||
|
||||
// Download and blit step 0 from pgrb2
|
||||
fmt.Println("Downloading pgrb2 step 0...")
|
||||
urlA := dataset.GribURL(date, runHour, 0)
|
||||
if err := dl.DownloadAndBlit(ctx, ds, urlA, 0, dataset.LevelSetA); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "pgrb2: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(" done")
|
||||
|
||||
// Download and blit step 0 from pgrb2b
|
||||
fmt.Println("Downloading pgrb2b step 0...")
|
||||
urlB := dataset.GribURLB(date, runHour, 0)
|
||||
if err := dl.DownloadAndBlit(ctx, ds, urlB, 0, dataset.LevelSetB); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "pgrb2b: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(" done")
|
||||
|
||||
if err := ds.Flush(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Flush: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Spot-check: print same values as the Python script
|
||||
fmt.Println("\n=== Go dataset values (spot check) ===")
|
||||
type testPoint struct {
|
||||
varName string
|
||||
varIdx int
|
||||
levelIdx int
|
||||
lat, lon int
|
||||
}
|
||||
|
||||
points := []testPoint{
|
||||
{"HGT", 0, 0, 0, 0}, // HGT @ 1000mb, lat=-90, lon=0
|
||||
{"HGT", 0, 0, 180, 0}, // HGT @ 1000mb, lat=0, lon=0
|
||||
{"HGT", 0, 0, 360, 0}, // HGT @ 1000mb, lat=+90, lon=0
|
||||
{"HGT", 0, 20, 180, 360}, // HGT @ 500mb, lat=0, lon=180
|
||||
{"UGRD", 1, 0, 180, 0}, // UGRD @ 1000mb, lat=0, lon=0
|
||||
{"VGRD", 2, 0, 180, 0}, // VGRD @ 1000mb, lat=0, lon=0
|
||||
{"UGRD", 1, 20, 284, 0}, // UGRD @ 500mb, lat=52N, lon=0
|
||||
}
|
||||
|
||||
for _, p := range points {
|
||||
val := ds.Val(0, p.levelIdx, p.varIdx, p.lat, p.lon)
|
||||
actualLat := -90.0 + float64(p.lat)*0.5
|
||||
actualLon := float64(p.lon) * 0.5
|
||||
fmt.Printf(" %-4s %4dmb lat=%+7.1f lon=%6.1f: %12.4f\n",
|
||||
p.varName, dataset.Pressures[p.levelIdx], actualLat, actualLon, val)
|
||||
}
|
||||
|
||||
fmt.Printf("\nDataset written to %s\n", outPath)
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue