From fe207f3fab9e155279a0136a762a6eaa7a9496d7 Mon Sep 17 00:00:00 2001 From: "afanasyev.aa" Date: Tue, 9 Dec 2025 18:25:16 +0900 Subject: [PATCH 1/8] added simulate stages. Changed GSN_PREDICTOR_GRIB_TTL to 48h --- .claude/settings.local.json | 10 + api/rest/predictor.swagger.yml | 9 +- force_update.bat | 23 ++ force_update.ps1 | 27 ++ go.mod | 11 +- go.sum | 37 +- internal/pkg/ds/predictor.go | 7 + internal/service/predictor.go | 183 +++++++--- internal/service/predictor_test.go | 492 ++++++++++++++++++++++++++ pkg/rest/oas_cfg_gen.go | 16 +- pkg/rest/oas_client_gen.go | 43 ++- pkg/rest/oas_handlers_gen.go | 29 +- pkg/rest/oas_json_gen.go | 3 +- pkg/rest/oas_parameters_gen.go | 107 +++++- pkg/rest/oas_response_decoders_gen.go | 1 - pkg/rest/oas_response_encoders_gen.go | 3 +- pkg/rest/oas_router_gen.go | 20 +- pkg/rest/oas_schemas_gen.go | 55 +++ pkg/rest/oas_validators_gen.go | 16 +- run.bat | 12 + run.ps1 | 11 + 21 files changed, 978 insertions(+), 137 deletions(-) create mode 100644 .claude/settings.local.json create mode 100644 force_update.bat create mode 100644 force_update.ps1 create mode 100644 internal/service/predictor_test.go create mode 100644 run.bat create mode 100644 run.ps1 diff --git a/.claude/settings.local.json b/.claude/settings.local.json new file mode 100644 index 0000000..c58df5e --- /dev/null +++ b/.claude/settings.local.json @@ -0,0 +1,10 @@ +{ + "permissions": { + "allow": [ + "Bash(cat:*)", + "Bash(xargs:*)" + ], + "deny": [], + "ask": [] + } +} diff --git a/api/rest/predictor.swagger.yml b/api/rest/predictor.swagger.yml index 73e3381..fc408f9 100644 --- a/api/rest/predictor.swagger.yml +++ b/api/rest/predictor.swagger.yml @@ -61,6 +61,13 @@ paths: name: descent_curve schema: type: string + - in: query + name: simulate_stages + schema: + type: array + items: + type: string + enum: [ascent, descent, float] - in: query name: interpolate schema: @@ -147,7 +154,7 @@ components: properties: stage: type: string - enum: ["ascent", "descent"] + enum: ["ascent", "descent", "float"] trajectory: type: array items: diff --git a/force_update.bat b/force_update.bat new file mode 100644 index 0000000..2dc3a3b --- /dev/null +++ b/force_update.bat @@ -0,0 +1,23 @@ +@echo off +REM Batch script to force download fresh GRIB data +REM This deletes old cube files and downloads the latest dataset + +echo Forcing fresh GRIB data download... +echo. + +REM Set environment +set GSN_PREDICTOR_GRIB_DIR=C:\tmp\grib +set GSN_PREDICTOR_GRIB_TTL=72h + +REM Delete old cube files to force fresh download +echo Deleting old cube files... +del /Q C:\tmp\grib\*.cube 2>nul +echo. + +echo Starting service to download fresh data... +echo This will download from S3 (may take several minutes) +echo Press Ctrl+C once download completes and you see "initial GRIB update complete" +echo. + +REM Run the service - it will download fresh data on startup +go run cmd/api/main.go diff --git a/force_update.ps1 b/force_update.ps1 new file mode 100644 index 0000000..e911409 --- /dev/null +++ b/force_update.ps1 @@ -0,0 +1,27 @@ +# PowerShell script to force download fresh GRIB data +# This deletes old cube files and downloads the latest dataset + +Write-Host "Forcing fresh GRIB data download..." -ForegroundColor Yellow + +# Set environment +$env:GSN_PREDICTOR_GRIB_DIR = "C:\tmp\grib" +$env:GSN_PREDICTOR_GRIB_TTL = "72h" + +# Delete old cube files to force fresh download +$cubeFiles = Get-ChildItem -Path "C:\tmp\grib\*.cube" -ErrorAction SilentlyContinue +if ($cubeFiles) { + Write-Host "Deleting old cube files:" -ForegroundColor Yellow + foreach ($file in $cubeFiles) { + Write-Host " - $($file.Name)" -ForegroundColor Gray + Remove-Item $file.FullName -Force + } +} else { + Write-Host "No old cube files found" -ForegroundColor Gray +} + +Write-Host "`nStarting service to download fresh data..." -ForegroundColor Green +Write-Host "This will download from S3 (may take several minutes)" -ForegroundColor Cyan +Write-Host "Press Ctrl+C once download completes and you see 'initial GRIB update complete'`n" -ForegroundColor Cyan + +# Run the service - it will download fresh data on startup +go run cmd/api/main.go diff --git a/go.mod b/go.mod index 079e12c..54fb283 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,9 @@ module git.intra.yksa.space/gsn/predictor go 1.24.4 require ( + github.com/aws/aws-sdk-go-v2 v1.39.3 + github.com/aws/aws-sdk-go-v2/config v1.31.13 + github.com/aws/aws-sdk-go-v2/service/s3 v1.88.5 github.com/caarlos0/env/v11 v11.3.1 github.com/edsrzf/mmap-go v1.2.0 github.com/go-co-op/gocron v1.37.0 @@ -11,6 +14,7 @@ require ( github.com/nilsmagnus/grib v1.2.8 github.com/ogen-go/ogen v1.16.0 github.com/rs/cors v1.11.1 + github.com/stretchr/testify v1.11.1 go.opentelemetry.io/otel v1.38.0 go.opentelemetry.io/otel/metric v1.38.0 go.opentelemetry.io/otel/trace v1.38.0 @@ -19,9 +23,7 @@ require ( ) require ( - github.com/aws/aws-sdk-go-v2 v1.39.3 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 // indirect - github.com/aws/aws-sdk-go-v2/config v1.31.13 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.18.17 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.10 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.10 // indirect @@ -32,11 +34,11 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.10 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.10 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.88.5 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.29.7 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.2 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.38.7 // indirect github.com/aws/smithy-go v1.23.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/dlclark/regexp2 v1.11.5 // indirect github.com/fatih/color v1.18.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect @@ -46,9 +48,11 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/segmentio/asm v1.2.1 // indirect github.com/shopspring/decimal v1.4.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect @@ -57,4 +61,5 @@ require ( golang.org/x/sys v0.37.0 // indirect golang.org/x/text v0.30.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index eaa1790..6957306 100644 --- a/go.sum +++ b/go.sum @@ -57,8 +57,6 @@ github.com/go-faster/jx v1.1.0/go.mod h1:vKDNikrKoyUmpzaJ0OkIkRQClNHFX/nF3dnTJZb github.com/go-faster/yaml v0.4.6 h1:lOK/EhI04gCpPgPhgt0bChS6bvw7G3WwI8xxVe0sw9I= github.com/go-faster/yaml v0.4.6/go.mod h1:390dRIvV4zbnO7qC9FGo6YYutc+wyyUSHBgbXL52eXk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -77,17 +75,12 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/nilsmagnus/grib v1.2.8 h1:H7ch/1/agaCqM3MC8hW1Ft+EJ+q2XB757uml/IfPvp4= github.com/nilsmagnus/grib v1.2.8/go.mod h1:XHm+5zuoOk0NSIWaGmA3JaAxI4i50YvD1L1vz+aqPOQ= -github.com/ogen-go/ogen v1.14.0 h1:TU1Nj4z9UBsAfTkf+IhuNNp7igdFQKqkk9+6/y4XuWg= -github.com/ogen-go/ogen v1.14.0/go.mod h1:Iw1vkqkx6SU7I9th5ceP+fVPJ6Wge4e3kAVzAxJEpPE= github.com/ogen-go/ogen v1.16.0 h1:fKHEYokW/QrMzVNXId74/6RObRIUs9T2oroGKtR25Iw= github.com/ogen-go/ogen v1.16.0/go.mod h1:s3nWiMzybSf8fhxckyO+wtto92+QHpEL8FmkPnhL3jI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -97,13 +90,10 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= -github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0= github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= @@ -111,30 +101,22 @@ github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+D github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -144,26 +126,15 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY= -golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/exp v0.0.0-20251017212417-90e834f514db h1:by6IehL4BH5k3e3SJmcoNbOobMey2SLpAF79iPOEBvw= golang.org/x/exp v0.0.0-20251017212417-90e834f514db/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= -golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/pkg/ds/predictor.go b/internal/pkg/ds/predictor.go index 753b0b2..c7c33c8 100644 --- a/internal/pkg/ds/predictor.go +++ b/internal/pkg/ds/predictor.go @@ -19,6 +19,7 @@ type PredictionParameters struct { StopDatetime *time.Time AscentCurve *string // base64 DescentCurve *string // base64 + SimulateStages []string Interpolate *bool Format *string Dataset *time.Time @@ -85,5 +86,11 @@ func ConvertFlatPredictionParams(params api.PerformPredictionParams) *Prediction if v, ok := params.Dataset.Get(); ok { out.Dataset = &v } + if len(params.SimulateStages) > 0 { + out.SimulateStages = make([]string, len(params.SimulateStages)) + for i, stage := range params.SimulateStages { + out.SimulateStages[i] = string(stage) + } + } return out } diff --git a/internal/service/predictor.go b/internal/service/predictor.go index e5c6fad..bb53b3f 100644 --- a/internal/service/predictor.go +++ b/internal/service/predictor.go @@ -23,6 +23,22 @@ type Stage struct { EndTime time.Time } +// shouldSimulateStage checks if a given stage should be simulated based on the SimulateStages filter +func shouldSimulateStage(params ds.PredictionParameters, stage string) bool { + // If no filter is specified, simulate all stages + if len(params.SimulateStages) == 0 { + return true + } + + // Check if the stage is in the filter list + for _, s := range params.SimulateStages { + if s == stage { + return true + } + } + return false +} + // CustomCurve represents a custom ascent/descent curve type CustomCurve struct { Altitude []float64 `json:"altitude"` @@ -103,16 +119,27 @@ func (s *Service) PerformPrediction(ctx context.Context, params ds.PredictionPar func (s *Service) standardProfile(ctx context.Context, params ds.PredictionParameters, ascentRate, burstAltitude, descentRate float64, ascentCurve, descentCurve *CustomCurve) []ds.PredicitonResult { var results []ds.PredicitonResult + var lastResult ds.PredicitonResult // Stage 1: Ascent - ascentResults := s.simulateAscent(ctx, params, ascentRate, burstAltitude, ascentCurve) - results = append(results, ascentResults...) + if shouldSimulateStage(params, "ascent") { + ascentResults := s.simulateAscent(ctx, params, ascentRate, burstAltitude, ascentCurve) + results = append(results, ascentResults...) + if len(ascentResults) > 0 { + lastResult = ascentResults[len(ascentResults)-1] + } + } else { + // If ascent is skipped, use initial position as starting point + lastResult = ds.PredicitonResult{ + Latitude: params.LaunchLatitude, + Longitude: params.LaunchLongitude, + Altitude: &burstAltitude, + Timestamp: params.LaunchDatetime, + } + } - if len(ascentResults) > 0 { - // Get final position from ascent - lastResult := ascentResults[len(ascentResults)-1] - - // Stage 2: Descent + // Stage 2: Descent + if shouldSimulateStage(params, "descent") && lastResult.Latitude != nil { descentParams := ds.PredictionParameters{ LaunchLatitude: lastResult.Latitude, LaunchLongitude: lastResult.Longitude, @@ -129,45 +156,36 @@ func (s *Service) standardProfile(ctx context.Context, params ds.PredictionParam func (s *Service) floatProfile(ctx context.Context, params ds.PredictionParameters, ascentRate, burstAltitude, floatAltitude, descentRate float64, ascentCurve, descentCurve *CustomCurve) []ds.PredicitonResult { var results []ds.PredicitonResult + var lastResult ds.PredicitonResult // Stage 1: Ascent to float altitude - ascentResults := s.simulateAscent(ctx, params, ascentRate, floatAltitude, ascentCurve) - results = append(results, ascentResults...) - - if len(ascentResults) > 0 { - // Stage 2: Float (simulate for some time) - lastResult := ascentResults[len(ascentResults)-1] - floatResults := s.simulateFloat(ctx, lastResult, 30*time.Minute) // Float for 30 minutes - results = append(results, floatResults...) - - if len(floatResults) > 0 { - // Stage 3: Descent - finalFloat := floatResults[len(floatResults)-1] - descentParams := ds.PredictionParameters{ - LaunchLatitude: finalFloat.Latitude, - LaunchLongitude: finalFloat.Longitude, - LaunchAltitude: finalFloat.Altitude, - LaunchDatetime: finalFloat.Timestamp, - } - - descentResults := s.simulateDescent(ctx, descentParams, descentRate, 0, descentCurve) - results = append(results, descentResults...) + if shouldSimulateStage(params, "ascent") { + ascentResults := s.simulateAscent(ctx, params, ascentRate, floatAltitude, ascentCurve) + results = append(results, ascentResults...) + if len(ascentResults) > 0 { + lastResult = ascentResults[len(ascentResults)-1] + } + } else { + // If ascent is skipped, use initial position at float altitude as starting point + lastResult = ds.PredicitonResult{ + Latitude: params.LaunchLatitude, + Longitude: params.LaunchLongitude, + Altitude: &floatAltitude, + Timestamp: params.LaunchDatetime, } } - return results -} + // Stage 2: Float (simulate for some time) + if shouldSimulateStage(params, "float") && lastResult.Latitude != nil { + floatResults := s.simulateFloat(ctx, lastResult, 30*time.Minute) // Float for 30 minutes + results = append(results, floatResults...) + if len(floatResults) > 0 { + lastResult = floatResults[len(floatResults)-1] + } + } -func (s *Service) reverseProfile(ctx context.Context, params ds.PredictionParameters, ascentRate, burstAltitude, descentRate float64, ascentCurve, descentCurve *CustomCurve) []ds.PredicitonResult { - var results []ds.PredicitonResult - - // Stage 1: Ascent - ascentResults := s.simulateAscent(ctx, params, ascentRate, burstAltitude, ascentCurve) - results = append(results, ascentResults...) - - if len(ascentResults) > 0 { - // Stage 2: Descent to float altitude - lastResult := ascentResults[len(ascentResults)-1] + // Stage 3: Descent + if shouldSimulateStage(params, "descent") && lastResult.Latitude != nil { descentParams := ds.PredictionParameters{ LaunchLatitude: lastResult.Latitude, LaunchLongitude: lastResult.Longitude, @@ -175,21 +193,67 @@ func (s *Service) reverseProfile(ctx context.Context, params ds.PredictionParame LaunchDatetime: lastResult.Timestamp, } - // Descent to float altitude (if specified) - floatAlt := 0.0 - if params.FloatAltitude != nil { - floatAlt = *params.FloatAltitude + descentResults := s.simulateDescent(ctx, descentParams, descentRate, 0, descentCurve) + results = append(results, descentResults...) + } + + return results +} + +func (s *Service) reverseProfile(ctx context.Context, params ds.PredictionParameters, ascentRate, burstAltitude, descentRate float64, ascentCurve, descentCurve *CustomCurve) []ds.PredicitonResult { + var results []ds.PredicitonResult + var lastResult ds.PredicitonResult + + // Stage 1: Ascent + if shouldSimulateStage(params, "ascent") { + ascentResults := s.simulateAscent(ctx, params, ascentRate, burstAltitude, ascentCurve) + results = append(results, ascentResults...) + if len(ascentResults) > 0 { + lastResult = ascentResults[len(ascentResults)-1] + } + } else { + // If ascent is skipped, use initial position at burst altitude as starting point + lastResult = ds.PredicitonResult{ + Latitude: params.LaunchLatitude, + Longitude: params.LaunchLongitude, + Altitude: &burstAltitude, + Timestamp: params.LaunchDatetime, + } + } + + // Stage 2: Descent to float altitude + floatAlt := 0.0 + if params.FloatAltitude != nil { + floatAlt = *params.FloatAltitude + } + + if shouldSimulateStage(params, "descent") && lastResult.Latitude != nil { + descentParams := ds.PredictionParameters{ + LaunchLatitude: lastResult.Latitude, + LaunchLongitude: lastResult.Longitude, + LaunchAltitude: lastResult.Altitude, + LaunchDatetime: lastResult.Timestamp, } descentResults := s.simulateDescent(ctx, descentParams, descentRate, floatAlt, descentCurve) results = append(results, descentResults...) - - if floatAlt > 0 && len(descentResults) > 0 { - // Stage 3: Float - finalDescent := descentResults[len(descentResults)-1] - floatResults := s.simulateFloat(ctx, finalDescent, 30*time.Minute) - results = append(results, floatResults...) + if len(descentResults) > 0 { + lastResult = descentResults[len(descentResults)-1] } + } else if floatAlt > 0 { + // If descent is skipped but we need to float, position at float altitude + lastResult = ds.PredicitonResult{ + Latitude: lastResult.Latitude, + Longitude: lastResult.Longitude, + Altitude: &floatAlt, + Timestamp: lastResult.Timestamp, + } + } + + // Stage 3: Float + if shouldSimulateStage(params, "float") && floatAlt > 0 && lastResult.Latitude != nil { + floatResults := s.simulateFloat(ctx, lastResult, 30*time.Minute) + results = append(results, floatResults...) } return results @@ -197,14 +261,27 @@ func (s *Service) reverseProfile(ctx context.Context, params ds.PredictionParame func (s *Service) customProfile(ctx context.Context, params ds.PredictionParameters, ascentCurve, descentCurve *CustomCurve) []ds.PredicitonResult { var results []ds.PredicitonResult + var lastResult ds.PredicitonResult - if ascentCurve != nil { + // Custom ascent + if shouldSimulateStage(params, "ascent") && ascentCurve != nil { ascentResults := s.simulateCustomAscent(ctx, params, ascentCurve) results = append(results, ascentResults...) + if len(ascentResults) > 0 { + lastResult = ascentResults[len(ascentResults)-1] + } + } else if len(results) == 0 { + // If ascent is skipped, use initial position + lastResult = ds.PredicitonResult{ + Latitude: params.LaunchLatitude, + Longitude: params.LaunchLongitude, + Altitude: params.LaunchAltitude, + Timestamp: params.LaunchDatetime, + } } - if descentCurve != nil && len(results) > 0 { - lastResult := results[len(results)-1] + // Custom descent + if shouldSimulateStage(params, "descent") && descentCurve != nil && lastResult.Latitude != nil { descentParams := ds.PredictionParameters{ LaunchLatitude: lastResult.Latitude, LaunchLongitude: lastResult.Longitude, diff --git a/internal/service/predictor_test.go b/internal/service/predictor_test.go new file mode 100644 index 0000000..66ab76e --- /dev/null +++ b/internal/service/predictor_test.go @@ -0,0 +1,492 @@ +package service + +import ( + "context" + "testing" + "time" + + "git.intra.yksa.space/gsn/predictor/internal/pkg/ds" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +// MockGrib is a mock implementation of the Grib interface +type MockGrib struct { + mock.Mock +} + +func (m *MockGrib) Update(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + +func (m *MockGrib) Extract(ctx context.Context, lat, lon, alt float64, t time.Time) ([2]float64, error) { + args := m.Called(ctx, lat, lon, alt, t) + return args.Get(0).([2]float64), args.Error(1) +} + +func (m *MockGrib) Close() error { + args := m.Called() + return args.Error(0) +} + +// Helper function to create a test service with mocked GRIB +func createTestService() (*Service, *MockGrib) { + mockGrib := new(MockGrib) + + // Default mock behavior: return constant wind (5 m/s east, 3 m/s north) + mockGrib.On("Extract", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return([2]float64{5.0, 3.0}, nil) + + service := &Service{ + grib: mockGrib, + } + + return service, mockGrib +} + +// Helper function to create basic prediction parameters +func createBasicParams() ds.PredictionParameters { + lat := 40.0 + lon := -105.0 + alt := 1000.0 + launchTime := time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC) + profile := "standard_profile" + ascentRate := 5.0 + burstAltitude := 10000.0 + descentRate := 5.0 + + return ds.PredictionParameters{ + LaunchLatitude: &lat, + LaunchLongitude: &lon, + LaunchAltitude: &alt, + LaunchDatetime: &launchTime, + Profile: &profile, + AscentRate: &ascentRate, + BurstAltitude: &burstAltitude, + DescentRate: &descentRate, + } +} + +func TestRestrictedPrediction_OnlyAscent(t *testing.T) { + service, _ := createTestService() + params := createBasicParams() + + // Restrict to ascent only + params.SimulateStages = []string{"ascent"} + + results, err := service.PerformPrediction(context.Background(), params) + + assert.NoError(t, err) + assert.NotEmpty(t, results) + + // Verify all results are during ascent phase (altitude increasing) + for i := 1; i < len(results); i++ { + assert.GreaterOrEqual(t, *results[i].Altitude, *results[i-1].Altitude, + "Altitude should be increasing or equal during ascent") + } + + // Last altitude should be near burst altitude + lastAlt := *results[len(results)-1].Altitude + burstAlt := *params.BurstAltitude + assert.InDelta(t, burstAlt, lastAlt, 500.0, "Last altitude should be near burst altitude") +} + +func TestRestrictedPrediction_OnlyDescent(t *testing.T) { + service, _ := createTestService() + params := createBasicParams() + + // Restrict to descent only + params.SimulateStages = []string{"descent"} + + results, err := service.PerformPrediction(context.Background(), params) + + assert.NoError(t, err) + assert.NotEmpty(t, results) + + // First result should be at burst altitude (since ascent was skipped) + firstAlt := *results[0].Altitude + burstAlt := *params.BurstAltitude + assert.Equal(t, burstAlt, firstAlt, "Should start at burst altitude when ascent is skipped") + + // Verify all results are during descent phase (altitude decreasing) + for i := 1; i < len(results); i++ { + assert.LessOrEqual(t, *results[i].Altitude, *results[i-1].Altitude, + "Altitude should be decreasing or equal during descent") + } + + // Last altitude should be near ground + lastAlt := *results[len(results)-1].Altitude + assert.Less(t, lastAlt, 1000.0, "Last altitude should be near ground") +} + +func TestRestrictedPrediction_AscentAndDescent(t *testing.T) { + service, _ := createTestService() + params := createBasicParams() + + // Include both ascent and descent + params.SimulateStages = []string{"ascent", "descent"} + + results, err := service.PerformPrediction(context.Background(), params) + + assert.NoError(t, err) + assert.NotEmpty(t, results) + + // Find the peak altitude (transition point) + maxAlt := 0.0 + maxIdx := 0 + for i, result := range results { + if *result.Altitude > maxAlt { + maxAlt = *result.Altitude + maxIdx = i + } + } + + // Verify ascent phase + for i := 1; i <= maxIdx; i++ { + assert.GreaterOrEqual(t, *results[i].Altitude, *results[i-1].Altitude, + "Altitude should increase during ascent phase") + } + + // Verify descent phase + for i := maxIdx + 1; i < len(results); i++ { + assert.LessOrEqual(t, *results[i].Altitude, *results[i-1].Altitude, + "Altitude should decrease during descent phase") + } +} + +func TestRestrictedPrediction_FloatProfile_OnlyFloat(t *testing.T) { + service, _ := createTestService() + params := createBasicParams() + + profile := "float_profile" + floatAlt := 15000.0 + params.Profile = &profile + params.FloatAltitude = &floatAlt + + // Restrict to float only + params.SimulateStages = []string{"float"} + + results, err := service.PerformPrediction(context.Background(), params) + + assert.NoError(t, err) + assert.NotEmpty(t, results) + + // All results should be at the float altitude + for _, result := range results { + assert.Equal(t, floatAlt, *result.Altitude, + "Altitude should remain constant at float altitude") + } + + // Verify horizontal movement (lat/lon changes due to wind) + firstLat := *results[0].Latitude + lastLat := *results[len(results)-1].Latitude + assert.NotEqual(t, firstLat, lastLat, "Latitude should change during float due to wind") +} + +func TestRestrictedPrediction_FloatProfile_AllStages(t *testing.T) { + service, _ := createTestService() + params := createBasicParams() + + profile := "float_profile" + floatAlt := 15000.0 + params.Profile = &profile + params.FloatAltitude = &floatAlt + + // Include all stages + params.SimulateStages = []string{"ascent", "float", "descent"} + + results, err := service.PerformPrediction(context.Background(), params) + + assert.NoError(t, err) + assert.NotEmpty(t, results) + + // Verify we have ascending, constant, and descending altitude patterns + hasAscent := false + hasFloat := false + hasDescent := false + + const altTolerance = 50.0 // Tolerance for altitude comparison + + for i := 1; i < len(results); i++ { + altDiff := *results[i].Altitude - *results[i-1].Altitude + + if altDiff > altTolerance { + hasAscent = true + } else if altDiff < -altTolerance { + hasDescent = true + } else if *results[i].Altitude > 10000 { // Float happens at high altitude + hasFloat = true + } + } + + assert.True(t, hasAscent, "Should have ascent phase") + assert.True(t, hasFloat, "Should have float phase") + assert.True(t, hasDescent, "Should have descent phase") + + // Verify maximum altitude is near float altitude + maxAlt := 0.0 + for _, result := range results { + if *result.Altitude > maxAlt { + maxAlt = *result.Altitude + } + } + assert.InDelta(t, floatAlt, maxAlt, 1000.0, "Max altitude should be near float altitude") +} + +func TestRestrictedPrediction_ReverseProfile_OnlyFloat(t *testing.T) { + service, _ := createTestService() + params := createBasicParams() + + profile := "reverse_profile" + floatAlt := 5000.0 + params.Profile = &profile + params.FloatAltitude = &floatAlt + + // Restrict to float only + params.SimulateStages = []string{"float"} + + results, err := service.PerformPrediction(context.Background(), params) + + assert.NoError(t, err) + assert.NotEmpty(t, results) + + // All results should be at the float altitude + for _, result := range results { + assert.InDelta(t, floatAlt, *result.Altitude, 10.0, + "Altitude should remain near float altitude") + } +} + +func TestRestrictedPrediction_EmptyStages_SimulatesAll(t *testing.T) { + service, _ := createTestService() + params := createBasicParams() + + // Empty SimulateStages should simulate all stages + params.SimulateStages = []string{} + + results, err := service.PerformPrediction(context.Background(), params) + + assert.NoError(t, err) + assert.NotEmpty(t, results) + + // Should have both ascent and descent + // Find the peak + maxAlt := 0.0 + hasAscent := false + hasDescent := false + + for i := 1; i < len(results); i++ { + if *results[i].Altitude > *results[i-1].Altitude { + hasAscent = true + } + if *results[i].Altitude < *results[i-1].Altitude { + hasDescent = true + } + if *results[i].Altitude > maxAlt { + maxAlt = *results[i].Altitude + } + } + + assert.True(t, hasAscent, "Should have ascent phase") + assert.True(t, hasDescent, "Should have descent phase") +} + +func TestRestrictedPrediction_NilStages_SimulatesAll(t *testing.T) { + service, _ := createTestService() + params := createBasicParams() + + // Nil SimulateStages should simulate all stages + params.SimulateStages = nil + + results, err := service.PerformPrediction(context.Background(), params) + + assert.NoError(t, err) + assert.NotEmpty(t, results) + + // Should have both ascent and descent + maxAlt := 0.0 + minAltAfterMax := 1000000.0 + + for _, result := range results { + if *result.Altitude > maxAlt { + maxAlt = *result.Altitude + } + } + + foundMax := false + for _, result := range results { + if *result.Altitude == maxAlt { + foundMax = true + } + if foundMax && *result.Altitude < minAltAfterMax { + minAltAfterMax = *result.Altitude + } + } + + // Should reach high altitude and come back down + assert.Greater(t, maxAlt, 5000.0, "Should reach high altitude") + assert.Less(t, minAltAfterMax, maxAlt, "Should descend after reaching max altitude") +} + +func TestRestrictedPrediction_InvalidStage_IgnoresInvalid(t *testing.T) { + service, _ := createTestService() + params := createBasicParams() + + // Include invalid stage name (should be ignored) + params.SimulateStages = []string{"ascent", "invalid_stage", "descent"} + + results, err := service.PerformPrediction(context.Background(), params) + + assert.NoError(t, err) + assert.NotEmpty(t, results) + // Should still simulate ascent and descent, ignoring the invalid stage +} + +func TestRestrictedPrediction_WindImpact(t *testing.T) { + service, mockGrib := createTestService() + + // Override mock to return strong eastward wind + mockGrib.ExpectedCalls = nil + mockGrib.On("Extract", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return([2]float64{20.0, 0.0}, nil) // Strong eastward wind + + params := createBasicParams() + params.SimulateStages = []string{"ascent"} + + results, err := service.PerformPrediction(context.Background(), params) + + assert.NoError(t, err) + assert.NotEmpty(t, results) + + // Longitude should increase significantly due to eastward wind + firstLon := *results[0].Longitude + lastLon := *results[len(results)-1].Longitude + assert.Greater(t, lastLon, firstLon, "Longitude should increase with eastward wind") + + // Verify wind values are captured in results + for _, result := range results { + if result.WindU != nil { + // Wind values should be present in results + assert.NotNil(t, result.WindV, "WindV should be present if WindU is present") + } + } +} + +func TestRestrictedPrediction_MissingRequiredParams(t *testing.T) { + service, _ := createTestService() + + testCases := []struct { + name string + params ds.PredictionParameters + }{ + { + name: "Missing latitude", + params: ds.PredictionParameters{ + LaunchLongitude: floatPtr(-105.0), + LaunchAltitude: floatPtr(1000.0), + LaunchDatetime: timePtr(time.Now()), + }, + }, + { + name: "Missing longitude", + params: ds.PredictionParameters{ + LaunchLatitude: floatPtr(40.0), + LaunchAltitude: floatPtr(1000.0), + LaunchDatetime: timePtr(time.Now()), + }, + }, + { + name: "Missing altitude", + params: ds.PredictionParameters{ + LaunchLatitude: floatPtr(40.0), + LaunchLongitude: floatPtr(-105.0), + LaunchDatetime: timePtr(time.Now()), + }, + }, + { + name: "Missing datetime", + params: ds.PredictionParameters{ + LaunchLatitude: floatPtr(40.0), + LaunchLongitude: floatPtr(-105.0), + LaunchAltitude: floatPtr(1000.0), + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.params.SimulateStages = []string{"ascent"} + results, err := service.PerformPrediction(context.Background(), tc.params) + + assert.Error(t, err) + assert.Equal(t, ErrInvalidParameters, err) + assert.Nil(t, results) + }) + } +} + +func TestShouldSimulateStage(t *testing.T) { + testCases := []struct { + name string + stages []string + queryStage string + shouldSimulate bool + }{ + { + name: "Empty filter simulates all", + stages: []string{}, + queryStage: "ascent", + shouldSimulate: true, + }, + { + name: "Nil filter simulates all", + stages: nil, + queryStage: "descent", + shouldSimulate: true, + }, + { + name: "Stage in filter", + stages: []string{"ascent", "descent"}, + queryStage: "ascent", + shouldSimulate: true, + }, + { + name: "Stage not in filter", + stages: []string{"ascent"}, + queryStage: "descent", + shouldSimulate: false, + }, + { + name: "Float stage in filter", + stages: []string{"float"}, + queryStage: "float", + shouldSimulate: true, + }, + { + name: "Multiple stages excluding one", + stages: []string{"ascent", "float"}, + queryStage: "descent", + shouldSimulate: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + params := ds.PredictionParameters{ + SimulateStages: tc.stages, + } + result := shouldSimulateStage(params, tc.queryStage) + assert.Equal(t, tc.shouldSimulate, result) + }) + } +} + +// Helper functions +func floatPtr(f float64) *float64 { + return &f +} + +func timePtr(t time.Time) *time.Time { + return &t +} diff --git a/pkg/rest/oas_cfg_gen.go b/pkg/rest/oas_cfg_gen.go index 1845c4f..6b8f72d 100644 --- a/pkg/rest/oas_cfg_gen.go +++ b/pkg/rest/oas_cfg_gen.go @@ -5,14 +5,14 @@ package gsn import ( "net/http" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" - ht "github.com/ogen-go/ogen/http" "github.com/ogen-go/ogen/middleware" "github.com/ogen-go/ogen/ogenerrors" "github.com/ogen-go/ogen/otelogen" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" ) var ( @@ -32,6 +32,7 @@ type otelConfig struct { Tracer trace.Tracer MeterProvider metric.MeterProvider Meter metric.Meter + Attributes []attribute.KeyValue } func (cfg *otelConfig) initOTEL() { @@ -215,6 +216,13 @@ func WithMeterProvider(provider metric.MeterProvider) Option { }) } +// WithAttributes specifies default otel attributes. +func WithAttributes(attributes ...attribute.KeyValue) Option { + return otelOptionFunc(func(cfg *otelConfig) { + cfg.Attributes = attributes + }) +} + // WithClient specifies http client to use. func WithClient(client ht.Client) ClientOption { return optionFunc[clientConfig](func(cfg *clientConfig) { diff --git a/pkg/rest/oas_client_gen.go b/pkg/rest/oas_client_gen.go index 19822e8..0484f94 100644 --- a/pkg/rest/oas_client_gen.go +++ b/pkg/rest/oas_client_gen.go @@ -9,16 +9,15 @@ import ( "time" "github.com/go-faster/errors" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" - "go.opentelemetry.io/otel/trace" - "github.com/ogen-go/ogen/conv" ht "github.com/ogen-go/ogen/http" "github.com/ogen-go/ogen/otelogen" "github.com/ogen-go/ogen/uri" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/trace" ) func trimTrailingSlashes(u *url.URL) { @@ -103,8 +102,9 @@ func (c *Client) sendPerformPrediction(ctx context.Context, params PerformPredic otelAttrs := []attribute.KeyValue{ otelogen.OperationID("performPrediction"), semconv.HTTPRequestMethodKey.String("GET"), - semconv.HTTPRouteKey.String("/api/v1/prediction"), + semconv.URLTemplateKey.String("/api/v1/prediction"), } + otelAttrs = append(otelAttrs, c.cfg.Attributes...) // Run stopwatch. startTime := time.Now() @@ -345,6 +345,32 @@ func (c *Client) sendPerformPrediction(ctx context.Context, params PerformPredic return res, errors.Wrap(err, "encode query") } } + { + // Encode "simulate_stages" parameter. + cfg := uri.QueryParameterEncodingConfig{ + Name: "simulate_stages", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.EncodeParam(cfg, func(e uri.Encoder) error { + if params.SimulateStages != nil { + return e.EncodeArray(func(e uri.Encoder) error { + for i, item := range params.SimulateStages { + if err := func() error { + return e.EncodeValue(conv.StringToString(string(item))) + }(); err != nil { + return errors.Wrapf(err, "[%d]", i) + } + } + return nil + }) + } + return nil + }); err != nil { + return res, errors.Wrap(err, "encode query") + } + } { // Encode "interpolate" parameter. cfg := uri.QueryParameterEncodingConfig{ @@ -434,8 +460,9 @@ func (c *Client) sendReadinessCheck(ctx context.Context) (res *ReadinessResponse otelAttrs := []attribute.KeyValue{ otelogen.OperationID("readinessCheck"), semconv.HTTPRequestMethodKey.String("GET"), - semconv.HTTPRouteKey.String("/ready"), + semconv.URLTemplateKey.String("/ready"), } + otelAttrs = append(otelAttrs, c.cfg.Attributes...) // Run stopwatch. startTime := time.Now() diff --git a/pkg/rest/oas_handlers_gen.go b/pkg/rest/oas_handlers_gen.go index 76ab8ad..b7e53e4 100644 --- a/pkg/rest/oas_handlers_gen.go +++ b/pkg/rest/oas_handlers_gen.go @@ -8,16 +8,15 @@ import ( "time" "github.com/go-faster/errors" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" - "go.opentelemetry.io/otel/trace" - ht "github.com/ogen-go/ogen/http" "github.com/ogen-go/ogen/middleware" "github.com/ogen-go/ogen/ogenerrors" "github.com/ogen-go/ogen/otelogen" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/trace" ) type codeRecorder struct { @@ -30,6 +29,10 @@ func (c *codeRecorder) WriteHeader(status int) { c.ResponseWriter.WriteHeader(status) } +func (c *codeRecorder) Unwrap() http.ResponseWriter { + return c.ResponseWriter +} + // handlePerformPredictionRequest handles performPrediction operation. // // Perform prediction. @@ -86,7 +89,7 @@ func (s *Server) handlePerformPredictionRequest(args [0]string, argsEscaped bool // unless there was another error (e.g., network error receiving the response body; or 3xx codes with // max redirects exceeded), in which case status MUST be set to Error. code := statusWriter.status - if code >= 100 && code < 500 { + if code < 100 || code >= 500 { span.SetStatus(codes.Error, stage) } @@ -115,6 +118,8 @@ func (s *Server) handlePerformPredictionRequest(args [0]string, argsEscaped bool return } + var rawBody []byte + var response *PredictionResult if m := s.cfg.Middleware; m != nil { mreq := middleware.Request{ @@ -123,6 +128,7 @@ func (s *Server) handlePerformPredictionRequest(args [0]string, argsEscaped bool OperationSummary: "Perform prediction", OperationID: "performPrediction", Body: nil, + RawBody: rawBody, Params: middleware.Parameters{ { Name: "launch_latitude", @@ -172,6 +178,10 @@ func (s *Server) handlePerformPredictionRequest(args [0]string, argsEscaped bool Name: "descent_curve", In: "query", }: params.DescentCurve, + { + Name: "simulate_stages", + In: "query", + }: params.SimulateStages, { Name: "interpolate", In: "query", @@ -291,7 +301,7 @@ func (s *Server) handleReadinessCheckRequest(args [0]string, argsEscaped bool, w // unless there was another error (e.g., network error receiving the response body; or 3xx codes with // max redirects exceeded), in which case status MUST be set to Error. code := statusWriter.status - if code >= 100 && code < 500 { + if code < 100 || code >= 500 { span.SetStatus(codes.Error, stage) } @@ -306,6 +316,8 @@ func (s *Server) handleReadinessCheckRequest(args [0]string, argsEscaped bool, w err error ) + var rawBody []byte + var response *ReadinessResponse if m := s.cfg.Middleware; m != nil { mreq := middleware.Request{ @@ -314,6 +326,7 @@ func (s *Server) handleReadinessCheckRequest(args [0]string, argsEscaped bool, w OperationSummary: "Readiness check", OperationID: "readinessCheck", Body: nil, + RawBody: rawBody, Params: middleware.Parameters{}, Raw: r, } diff --git a/pkg/rest/oas_json_gen.go b/pkg/rest/oas_json_gen.go index ea3d61c..9707b6f 100644 --- a/pkg/rest/oas_json_gen.go +++ b/pkg/rest/oas_json_gen.go @@ -9,7 +9,6 @@ import ( "github.com/go-faster/errors" "github.com/go-faster/jx" - "github.com/ogen-go/ogen/json" "github.com/ogen-go/ogen/validate" ) @@ -607,6 +606,8 @@ func (s *PredictionResultPredictionItemStage) Decode(d *jx.Decoder) error { *s = PredictionResultPredictionItemStageAscent case PredictionResultPredictionItemStageDescent: *s = PredictionResultPredictionItemStageDescent + case PredictionResultPredictionItemStageFloat: + *s = PredictionResultPredictionItemStageFloat default: *s = PredictionResultPredictionItemStage(v) } diff --git a/pkg/rest/oas_parameters_gen.go b/pkg/rest/oas_parameters_gen.go index 23cc5d8..da12715 100644 --- a/pkg/rest/oas_parameters_gen.go +++ b/pkg/rest/oas_parameters_gen.go @@ -3,11 +3,11 @@ package gsn import ( + "fmt" "net/http" "time" "github.com/go-faster/errors" - "github.com/ogen-go/ogen/conv" "github.com/ogen-go/ogen/middleware" "github.com/ogen-go/ogen/ogenerrors" @@ -17,21 +17,22 @@ import ( // PerformPredictionParams is parameters of performPrediction operation. type PerformPredictionParams struct { - LaunchLatitude OptFloat64 - LaunchLongitude OptFloat64 - LaunchDatetime OptDateTime - LaunchAltitude OptFloat64 - Profile OptPerformPredictionProfile - AscentRate OptFloat64 - BurstAltitude OptFloat64 - DescentRate OptFloat64 - FloatAltitude OptFloat64 - StopDatetime OptDateTime - AscentCurve OptString - DescentCurve OptString - Interpolate OptBool - Format OptPerformPredictionFormat - Dataset OptDateTime + LaunchLatitude OptFloat64 `json:",omitempty,omitzero"` + LaunchLongitude OptFloat64 `json:",omitempty,omitzero"` + LaunchDatetime OptDateTime `json:",omitempty,omitzero"` + LaunchAltitude OptFloat64 `json:",omitempty,omitzero"` + Profile OptPerformPredictionProfile `json:",omitempty,omitzero"` + AscentRate OptFloat64 `json:",omitempty,omitzero"` + BurstAltitude OptFloat64 `json:",omitempty,omitzero"` + DescentRate OptFloat64 `json:",omitempty,omitzero"` + FloatAltitude OptFloat64 `json:",omitempty,omitzero"` + StopDatetime OptDateTime `json:",omitempty,omitzero"` + AscentCurve OptString `json:",omitempty,omitzero"` + DescentCurve OptString `json:",omitempty,omitzero"` + SimulateStages []PerformPredictionSimulateStagesItem `json:",omitempty"` + Interpolate OptBool `json:",omitempty,omitzero"` + Format OptPerformPredictionFormat `json:",omitempty,omitzero"` + Dataset OptDateTime `json:",omitempty,omitzero"` } func unpackPerformPredictionParams(packed middleware.Parameters) (params PerformPredictionParams) { @@ -143,6 +144,15 @@ func unpackPerformPredictionParams(packed middleware.Parameters) (params Perform params.DescentCurve = v.(OptString) } } + { + key := middleware.ParameterKey{ + Name: "simulate_stages", + In: "query", + } + if v, ok := packed[key]; ok { + params.SimulateStages = v.([]PerformPredictionSimulateStagesItem) + } + } { key := middleware.ParameterKey{ Name: "interpolate", @@ -787,6 +797,71 @@ func decodePerformPredictionParams(args [0]string, argsEscaped bool, r *http.Req Err: err, } } + // Decode query: simulate_stages. + if err := func() error { + cfg := uri.QueryParameterDecodingConfig{ + Name: "simulate_stages", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.HasParam(cfg); err == nil { + if err := q.DecodeParam(cfg, func(d uri.Decoder) error { + return d.DecodeArray(func(d uri.Decoder) error { + var paramsDotSimulateStagesVal PerformPredictionSimulateStagesItem + if err := func() error { + val, err := d.DecodeValue() + if err != nil { + return err + } + + c, err := conv.ToString(val) + if err != nil { + return err + } + + paramsDotSimulateStagesVal = PerformPredictionSimulateStagesItem(c) + return nil + }(); err != nil { + return err + } + params.SimulateStages = append(params.SimulateStages, paramsDotSimulateStagesVal) + return nil + }) + }); err != nil { + return err + } + if err := func() error { + var failures []validate.FieldError + for i, elem := range params.SimulateStages { + if err := func() error { + if err := elem.Validate(); err != nil { + return err + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: fmt.Sprintf("[%d]", i), + Error: err, + }) + } + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + return params, &ogenerrors.DecodeParamError{ + Name: "simulate_stages", + In: "query", + Err: err, + } + } // Decode query: interpolate. if err := func() error { cfg := uri.QueryParameterDecodingConfig{ diff --git a/pkg/rest/oas_response_decoders_gen.go b/pkg/rest/oas_response_decoders_gen.go index 3c148fd..352068c 100644 --- a/pkg/rest/oas_response_decoders_gen.go +++ b/pkg/rest/oas_response_decoders_gen.go @@ -9,7 +9,6 @@ import ( "github.com/go-faster/errors" "github.com/go-faster/jx" - "github.com/ogen-go/ogen/ogenerrors" "github.com/ogen-go/ogen/validate" ) diff --git a/pkg/rest/oas_response_encoders_gen.go b/pkg/rest/oas_response_encoders_gen.go index 8f24cd5..a95167d 100644 --- a/pkg/rest/oas_response_encoders_gen.go +++ b/pkg/rest/oas_response_encoders_gen.go @@ -7,10 +7,9 @@ import ( "github.com/go-faster/errors" "github.com/go-faster/jx" + ht "github.com/ogen-go/ogen/http" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" - - ht "github.com/ogen-go/ogen/http" ) func encodePerformPredictionResponse(response *PredictionResult, w http.ResponseWriter, span trace.Span) error { diff --git a/pkg/rest/oas_router_gen.go b/pkg/rest/oas_router_gen.go index 1eea998..5f7617a 100644 --- a/pkg/rest/oas_router_gen.go +++ b/pkg/rest/oas_router_gen.go @@ -109,12 +109,13 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Route is route object. type Route struct { - name string - summary string - operationID string - pathPattern string - count int - args [0]string + name string + summary string + operationID string + operationGroup string + pathPattern string + count int + args [0]string } // Name returns ogen operation name. @@ -134,6 +135,11 @@ func (r Route) OperationID() string { return r.operationID } +// OperationGroup returns the x-ogen-operation-group value. +func (r Route) OperationGroup() string { + return r.operationGroup +} + // PathPattern returns OpenAPI path. func (r Route) PathPattern() string { return r.pathPattern @@ -209,6 +215,7 @@ func (s *Server) FindPath(method string, u *url.URL) (r Route, _ bool) { r.name = PerformPredictionOperation r.summary = "Perform prediction" r.operationID = "performPrediction" + r.operationGroup = "" r.pathPattern = "/api/v1/prediction" r.args = args r.count = 0 @@ -233,6 +240,7 @@ func (s *Server) FindPath(method string, u *url.URL) (r Route, _ bool) { r.name = ReadinessCheckOperation r.summary = "Readiness check" r.operationID = "readinessCheck" + r.operationGroup = "" r.pathPattern = "/ready" r.args = args r.count = 0 diff --git a/pkg/rest/oas_schemas_gen.go b/pkg/rest/oas_schemas_gen.go index 26808cc..b484e26 100644 --- a/pkg/rest/oas_schemas_gen.go +++ b/pkg/rest/oas_schemas_gen.go @@ -430,6 +430,54 @@ func (s *PerformPredictionProfile) UnmarshalText(data []byte) error { } } +type PerformPredictionSimulateStagesItem string + +const ( + PerformPredictionSimulateStagesItemAscent PerformPredictionSimulateStagesItem = "ascent" + PerformPredictionSimulateStagesItemDescent PerformPredictionSimulateStagesItem = "descent" + PerformPredictionSimulateStagesItemFloat PerformPredictionSimulateStagesItem = "float" +) + +// AllValues returns all PerformPredictionSimulateStagesItem values. +func (PerformPredictionSimulateStagesItem) AllValues() []PerformPredictionSimulateStagesItem { + return []PerformPredictionSimulateStagesItem{ + PerformPredictionSimulateStagesItemAscent, + PerformPredictionSimulateStagesItemDescent, + PerformPredictionSimulateStagesItemFloat, + } +} + +// MarshalText implements encoding.TextMarshaler. +func (s PerformPredictionSimulateStagesItem) MarshalText() ([]byte, error) { + switch s { + case PerformPredictionSimulateStagesItemAscent: + return []byte(s), nil + case PerformPredictionSimulateStagesItemDescent: + return []byte(s), nil + case PerformPredictionSimulateStagesItemFloat: + return []byte(s), nil + default: + return nil, errors.Errorf("invalid value: %q", s) + } +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (s *PerformPredictionSimulateStagesItem) UnmarshalText(data []byte) error { + switch PerformPredictionSimulateStagesItem(data) { + case PerformPredictionSimulateStagesItemAscent: + *s = PerformPredictionSimulateStagesItemAscent + return nil + case PerformPredictionSimulateStagesItemDescent: + *s = PerformPredictionSimulateStagesItemDescent + return nil + case PerformPredictionSimulateStagesItemFloat: + *s = PerformPredictionSimulateStagesItemFloat + return nil + default: + return errors.Errorf("invalid value: %q", data) + } +} + // Ref: #/components/schemas/PredictionResult type PredictionResult struct { Metadata PredictionResultMetadata `json:"metadata"` @@ -511,6 +559,7 @@ type PredictionResultPredictionItemStage string const ( PredictionResultPredictionItemStageAscent PredictionResultPredictionItemStage = "ascent" PredictionResultPredictionItemStageDescent PredictionResultPredictionItemStage = "descent" + PredictionResultPredictionItemStageFloat PredictionResultPredictionItemStage = "float" ) // AllValues returns all PredictionResultPredictionItemStage values. @@ -518,6 +567,7 @@ func (PredictionResultPredictionItemStage) AllValues() []PredictionResultPredict return []PredictionResultPredictionItemStage{ PredictionResultPredictionItemStageAscent, PredictionResultPredictionItemStageDescent, + PredictionResultPredictionItemStageFloat, } } @@ -528,6 +578,8 @@ func (s PredictionResultPredictionItemStage) MarshalText() ([]byte, error) { return []byte(s), nil case PredictionResultPredictionItemStageDescent: return []byte(s), nil + case PredictionResultPredictionItemStageFloat: + return []byte(s), nil default: return nil, errors.Errorf("invalid value: %q", s) } @@ -542,6 +594,9 @@ func (s *PredictionResultPredictionItemStage) UnmarshalText(data []byte) error { case PredictionResultPredictionItemStageDescent: *s = PredictionResultPredictionItemStageDescent return nil + case PredictionResultPredictionItemStageFloat: + *s = PredictionResultPredictionItemStageFloat + return nil default: return errors.Errorf("invalid value: %q", data) } diff --git a/pkg/rest/oas_validators_gen.go b/pkg/rest/oas_validators_gen.go index 0cd02b6..1207d8f 100644 --- a/pkg/rest/oas_validators_gen.go +++ b/pkg/rest/oas_validators_gen.go @@ -6,7 +6,6 @@ import ( "fmt" "github.com/go-faster/errors" - "github.com/ogen-go/ogen/validate" ) @@ -34,6 +33,19 @@ func (s PerformPredictionProfile) Validate() error { } } +func (s PerformPredictionSimulateStagesItem) Validate() error { + switch s { + case "ascent": + return nil + case "descent": + return nil + case "float": + return nil + default: + return errors.Errorf("invalid value: %v", s) + } +} + func (s *PredictionResult) Validate() error { if s == nil { return validate.ErrNilPointer @@ -131,6 +143,8 @@ func (s PredictionResultPredictionItemStage) Validate() error { return nil case "descent": return nil + case "float": + return nil default: return errors.Errorf("invalid value: %v", s) } diff --git a/run.bat b/run.bat new file mode 100644 index 0000000..5cb20f1 --- /dev/null +++ b/run.bat @@ -0,0 +1,12 @@ +@echo off +REM Batch startup script for predictor service +REM Sets required environment variables and starts the service + +set GSN_PREDICTOR_GRIB_DIR="c://tmp/grib" +set GSN_PREDICTOR_GRIB_TTL=48h + +echo Starting predictor service with GRIB directory: %GSN_PREDICTOR_GRIB_DIR% +echo Dataset TTL: %GSN_PREDICTOR_GRIB_TTL% + +REM Run the service +go run cmd/api/main.go diff --git a/run.ps1 b/run.ps1 new file mode 100644 index 0000000..67904c2 --- /dev/null +++ b/run.ps1 @@ -0,0 +1,11 @@ +# PowerShell startup script for predictor service +# Sets required environment variables and starts the service + +$env:GSN_PREDICTOR_GRIB_DIR = "C:/tmp/grib" +$env:GSN_PREDICTOR_GRIB_TTL = "48h" # Allow datasets up to 48 hours old + +Write-Host "Starting predictor service with GRIB directory: $env:GSN_PREDICTOR_GRIB_DIR" -ForegroundColor Green +Write-Host "Dataset TTL: $env:GSN_PREDICTOR_GRIB_TTL" -ForegroundColor Green + +# Run the service +go run cmd/api/main.go -- 2.47.3 From 0895be4b8fc8e166d26480a5fb6b785c855d5080 Mon Sep 17 00:00:00 2001 From: "afanasyev.aa" Date: Tue, 9 Dec 2025 18:37:36 +0900 Subject: [PATCH 2/8] removed scripts --- force_update.bat | 23 ----------------------- force_update.ps1 | 27 --------------------------- internal/pkg/grib/config.go | 4 ++-- run.bat | 12 ------------ run.ps1 | 11 ----------- 5 files changed, 2 insertions(+), 75 deletions(-) delete mode 100644 force_update.bat delete mode 100644 force_update.ps1 delete mode 100644 run.bat delete mode 100644 run.ps1 diff --git a/force_update.bat b/force_update.bat deleted file mode 100644 index 2dc3a3b..0000000 --- a/force_update.bat +++ /dev/null @@ -1,23 +0,0 @@ -@echo off -REM Batch script to force download fresh GRIB data -REM This deletes old cube files and downloads the latest dataset - -echo Forcing fresh GRIB data download... -echo. - -REM Set environment -set GSN_PREDICTOR_GRIB_DIR=C:\tmp\grib -set GSN_PREDICTOR_GRIB_TTL=72h - -REM Delete old cube files to force fresh download -echo Deleting old cube files... -del /Q C:\tmp\grib\*.cube 2>nul -echo. - -echo Starting service to download fresh data... -echo This will download from S3 (may take several minutes) -echo Press Ctrl+C once download completes and you see "initial GRIB update complete" -echo. - -REM Run the service - it will download fresh data on startup -go run cmd/api/main.go diff --git a/force_update.ps1 b/force_update.ps1 deleted file mode 100644 index e911409..0000000 --- a/force_update.ps1 +++ /dev/null @@ -1,27 +0,0 @@ -# PowerShell script to force download fresh GRIB data -# This deletes old cube files and downloads the latest dataset - -Write-Host "Forcing fresh GRIB data download..." -ForegroundColor Yellow - -# Set environment -$env:GSN_PREDICTOR_GRIB_DIR = "C:\tmp\grib" -$env:GSN_PREDICTOR_GRIB_TTL = "72h" - -# Delete old cube files to force fresh download -$cubeFiles = Get-ChildItem -Path "C:\tmp\grib\*.cube" -ErrorAction SilentlyContinue -if ($cubeFiles) { - Write-Host "Deleting old cube files:" -ForegroundColor Yellow - foreach ($file in $cubeFiles) { - Write-Host " - $($file.Name)" -ForegroundColor Gray - Remove-Item $file.FullName -Force - } -} else { - Write-Host "No old cube files found" -ForegroundColor Gray -} - -Write-Host "`nStarting service to download fresh data..." -ForegroundColor Green -Write-Host "This will download from S3 (may take several minutes)" -ForegroundColor Cyan -Write-Host "Press Ctrl+C once download completes and you see 'initial GRIB update complete'`n" -ForegroundColor Cyan - -# Run the service - it will download fresh data on startup -go run cmd/api/main.go diff --git a/internal/pkg/grib/config.go b/internal/pkg/grib/config.go index 009645d..0132a92 100644 --- a/internal/pkg/grib/config.go +++ b/internal/pkg/grib/config.go @@ -8,8 +8,8 @@ import ( ) type Config struct { - Dir string `env:"DIR" envDefault:"/tmp/grib"` - TTL time.Duration `env:"TTL" envDefault:"24h"` + Dir string `env:"DIR" envDefault:"C:/tmp/grib"` + TTL time.Duration `env:"TTL" envDefault:"48h"` CacheTTL time.Duration `env:"CACHE_TTL" envDefault:"1h"` Parallel int `env:"PARALLEL" envDefault:"8"` DatasetURL string `env:"DATASET_URL" envDefault:"https://nomads.ncep.noaa.gov/pub/data/nccf/com/gfs/prod"` diff --git a/run.bat b/run.bat deleted file mode 100644 index 5cb20f1..0000000 --- a/run.bat +++ /dev/null @@ -1,12 +0,0 @@ -@echo off -REM Batch startup script for predictor service -REM Sets required environment variables and starts the service - -set GSN_PREDICTOR_GRIB_DIR="c://tmp/grib" -set GSN_PREDICTOR_GRIB_TTL=48h - -echo Starting predictor service with GRIB directory: %GSN_PREDICTOR_GRIB_DIR% -echo Dataset TTL: %GSN_PREDICTOR_GRIB_TTL% - -REM Run the service -go run cmd/api/main.go diff --git a/run.ps1 b/run.ps1 deleted file mode 100644 index 67904c2..0000000 --- a/run.ps1 +++ /dev/null @@ -1,11 +0,0 @@ -# PowerShell startup script for predictor service -# Sets required environment variables and starts the service - -$env:GSN_PREDICTOR_GRIB_DIR = "C:/tmp/grib" -$env:GSN_PREDICTOR_GRIB_TTL = "48h" # Allow datasets up to 48 hours old - -Write-Host "Starting predictor service with GRIB directory: $env:GSN_PREDICTOR_GRIB_DIR" -ForegroundColor Green -Write-Host "Dataset TTL: $env:GSN_PREDICTOR_GRIB_TTL" -ForegroundColor Green - -# Run the service -go run cmd/api/main.go -- 2.47.3 From b355b41ed22047704b421bf56aaafd7dc8c387eb Mon Sep 17 00:00:00 2001 From: "afanasyev.aa" Date: Wed, 10 Dec 2025 22:54:18 +0900 Subject: [PATCH 3/8] fixed oas generated files and makefile --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index c6c35d9..42d11c5 100644 --- a/Makefile +++ b/Makefile @@ -105,7 +105,9 @@ help: @echo " run-local - Run locally" @echo " fmt - Format code" @echo " lint - Lint code" + @echo " generate-ogen - Generate OpenAPI code from swagger spec" @echo " help - Show this help" +.PHONY: generate-ogen generate-ogen: go run github.com/ogen-go/ogen/cmd/ogen@latest --target pkg/rest -package gsn --clean api/rest/predictor.swagger.yml \ No newline at end of file -- 2.47.3 From ca95e06ab731247ba6fa171d83b6d2f9b1e8e656 Mon Sep 17 00:00:00 2001 From: straitz Date: Tue, 16 Dec 2025 11:42:00 +0900 Subject: [PATCH 4/8] fixed wind data bug --- internal/pkg/grib/cube.go | 2 +- internal/pkg/grib/extractor.go | 7 ++++--- internal/pkg/grib/grib.go | 2 +- internal/pkg/grib/util.go | 8 ++++---- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/internal/pkg/grib/cube.go b/internal/pkg/grib/cube.go index d2015ec..64818dd 100644 --- a/internal/pkg/grib/cube.go +++ b/internal/pkg/grib/cube.go @@ -28,7 +28,7 @@ func openCube(path string) (*cube, error) { } const ( - nT = 97 // 0-96 hours with step 1 hour + nT = 33 // 0-96 hours with step 3 hours (33 time steps) nP = 47 // 47 pressure levels matching tawhiri nLat = 361 nLon = 720 diff --git a/internal/pkg/grib/extractor.go b/internal/pkg/grib/extractor.go index 769b7cd..850a0e3 100644 --- a/internal/pkg/grib/extractor.go +++ b/internal/pkg/grib/extractor.go @@ -17,9 +17,10 @@ func (d *dataset) uv(lat, lon, alt float64, tHours float64) (float64, float64) { x0 := int(math.Floor(ix)) % d.cube.lon x1 := (x0 + 1) % d.cube.lon wx := ix - float64(x0) - // For hourly data (step = 1 hour) - it0 := int(math.Floor(tHours)) - wt := tHours - float64(it0) + // For 3-hourly data (step = 3 hours) + // Convert tHours to 3-hour index (e.g., 1.5 hours -> index 0.5, interpolate between 0 and 1) + it0 := int(math.Floor(tHours / 3.0)) + wt := (tHours - float64(it0*3)) / 3.0 // Interpolation weight within 3-hour window p := pressureFromAlt(alt) ip0 := 0 for ip0+1 < len(pressureLevels) && pressureLevels[ip0+1] > p { diff --git a/internal/pkg/grib/grib.go b/internal/pkg/grib/grib.go index 32c4466..637a9aa 100644 --- a/internal/pkg/grib/grib.go +++ b/internal/pkg/grib/grib.go @@ -186,7 +186,7 @@ func (s *service) Update(ctx context.Context) error { } func assembleCube(dir string, run time.Time, cubePath string) error { - const sizePerVar = 97 * 47 * 361 * 720 * 4 // 97 time steps (0-96 hours), 47 pressure levels + const sizePerVar = 33 * 47 * 361 * 720 * 4 // 33 time steps (0-96 hours, 3-hour intervals), 47 pressure levels total := int64(sizePerVar * 3) // 3 variables: gh, u, v f, err := os.Create(cubePath) if err != nil { diff --git a/internal/pkg/grib/util.go b/internal/pkg/grib/util.go index 8de4af7..14b00be 100644 --- a/internal/pkg/grib/util.go +++ b/internal/pkg/grib/util.go @@ -6,11 +6,11 @@ import ( "time" ) -// Generate steps from 0 to 96 with step 1 hour (97 steps total) -// GFS provides hourly data for 0-120 hours, we use first 96 hours +// Generate steps from 0 to 96 with step 3 hours (33 steps total) +// GFS provides 3-hourly data for 0-120 hours, we use first 96 hours (0, 3, 6, ..., 96) var steps = func() []int { - result := make([]int, 0, 97) - for i := 0; i <= 96; i++ { + result := make([]int, 0, 33) + for i := 0; i <= 96; i += 3 { result = append(result, i) } return result -- 2.47.3 From 8e9f117799a5fd2ae5028950ab3b66a5a1b3148b Mon Sep 17 00:00:00 2001 From: straitz Date: Sun, 22 Mar 2026 16:29:53 +0900 Subject: [PATCH 5/8] updated downloader --- .claude/settings.local.json | 9 +- .dockerignore | 2 +- .gitignore | 8 +- assemble_cube.go | 26 ++ cmd/api/main.go | 1 + go.mod | 18 -- go.sum | 36 --- internal/pkg/grib/assemble_test.go | 25 ++ internal/pkg/grib/config.go | 129 ++++++++- internal/pkg/grib/create_dataset.go | 0 internal/pkg/grib/cube.go | 19 +- internal/pkg/grib/dataset.go | 1 + internal/pkg/grib/downloader.go | 91 ------ internal/pkg/grib/extractor.go | 89 +++++- internal/pkg/grib/grib.go | 114 +++----- internal/pkg/grib/partial_downloader.go | 350 ++++++++++++++++++++++++ internal/pkg/grib/pressure.go | 9 - internal/pkg/grib/s3_downloader.go | 265 ------------------ internal/pkg/grib/util.go | 14 - internal/service/predictor.go | 99 ++++++- scripts/compare.go | 114 ++++++++ scripts/rebuild_cube.go | 44 +++ scripts/test_download.go | 36 +++ scripts/test_gh.go | 60 ++++ scripts/test_grib_read.go | 38 +++ scripts/test_prediction.go | 87 ++++++ scripts/test_s3_download.go | 89 ------ scripts/test_s3_simple.go | 68 ----- scripts/test_wind.go | 55 ++++ start_with_http.sh | 11 + 30 files changed, 1209 insertions(+), 698 deletions(-) create mode 100644 assemble_cube.go create mode 100644 internal/pkg/grib/assemble_test.go create mode 100644 internal/pkg/grib/create_dataset.go delete mode 100644 internal/pkg/grib/downloader.go create mode 100644 internal/pkg/grib/partial_downloader.go delete mode 100644 internal/pkg/grib/s3_downloader.go create mode 100644 scripts/compare.go create mode 100644 scripts/rebuild_cube.go create mode 100644 scripts/test_download.go create mode 100644 scripts/test_gh.go create mode 100644 scripts/test_grib_read.go create mode 100644 scripts/test_prediction.go delete mode 100644 scripts/test_s3_download.go delete mode 100644 scripts/test_s3_simple.go create mode 100644 scripts/test_wind.go create mode 100644 start_with_http.sh diff --git a/.claude/settings.local.json b/.claude/settings.local.json index c58df5e..af05740 100644 --- a/.claude/settings.local.json +++ b/.claude/settings.local.json @@ -2,7 +2,14 @@ "permissions": { "allow": [ "Bash(cat:*)", - "Bash(xargs:*)" + "Bash(xargs:*)", + "Bash(ls:*)", + "Bash(done)", + "Bash(curl:*)", + "WebFetch(domain:raw.githubusercontent.com)", + "WebFetch(domain:github.com)", + "Bash(go run:*)", + "Bash(pkill:*)" ], "deny": [], "ask": [] diff --git a/.dockerignore b/.dockerignore index 287fc68..22e12b6 100644 --- a/.dockerignore +++ b/.dockerignore @@ -47,7 +47,7 @@ predictor # Temporary files /tmp/ /temp/ - +*.py # Test coverage *.out diff --git a/.gitignore b/.gitignore index 8519b69..9ffe4dd 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ *.dll *.so *.dylib +*.ps1 # Test binary, built with `go test -c` *.test @@ -28,6 +29,9 @@ go.work *.swp *.swo *~ +*.bak +*.py +*.json # OS generated files .DS_Store @@ -59,4 +63,6 @@ Thumbs.db # Tawhiri /tawhiri -/tawhiri/* \ No newline at end of file +/tawhiri/* + +*.md \ No newline at end of file diff --git a/assemble_cube.go b/assemble_cube.go new file mode 100644 index 0000000..c624e92 --- /dev/null +++ b/assemble_cube.go @@ -0,0 +1,26 @@ +package main + +import ( + "fmt" + "os" + "time" +) + +// This will be a simple wrapper that calls the internal assembleCube function +// We'll compile it as part of the grib package + +func main() { + dir := "C:/tmp/grib" + run := time.Date(2025, 12, 6, 0, 0, 0, 0, time.UTC) + cubePath := fmt.Sprintf("%s/%s.cube", dir, run.Format("20060102_15")) + + fmt.Printf("Assembling cube from existing GRIB files...\n") + fmt.Printf("Directory: %s\n", dir) + fmt.Printf("Run: %s\n", run.Format("2006-01-02 15:04 MST")) + fmt.Printf("Output: %s\n", cubePath) + fmt.Println() + + // Just print instructions - we'll do it directly + fmt.Println("Run this Go code to assemble:") + fmt.Printf("cd internal/pkg/grib && go test -run TestAssemble\n") +} diff --git a/cmd/api/main.go b/cmd/api/main.go index 2e9bf6b..f250199 100644 --- a/cmd/api/main.go +++ b/cmd/api/main.go @@ -24,6 +24,7 @@ func main() { panic(err) } defer lg.Sync() + zap.ReplaceGlobals(lg) ctx := log.ToCtx(context.Background(), lg) schedulerConfig, err := scheduler.NewConfig() diff --git a/go.mod b/go.mod index 54fb283..b186e37 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,6 @@ module git.intra.yksa.space/gsn/predictor go 1.24.4 require ( - github.com/aws/aws-sdk-go-v2 v1.39.3 - github.com/aws/aws-sdk-go-v2/config v1.31.13 - github.com/aws/aws-sdk-go-v2/service/s3 v1.88.5 github.com/caarlos0/env/v11 v11.3.1 github.com/edsrzf/mmap-go v1.2.0 github.com/go-co-op/gocron v1.37.0 @@ -23,21 +20,6 @@ require ( ) require ( - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.18.17 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.10 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.29.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.38.7 // indirect - github.com/aws/smithy-go v1.23.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dlclark/regexp2 v1.11.5 // indirect github.com/fatih/color v1.18.0 // indirect diff --git a/go.sum b/go.sum index 6957306..7c95f5c 100644 --- a/go.sum +++ b/go.sum @@ -1,39 +1,3 @@ -github.com/aws/aws-sdk-go-v2 v1.39.3 h1:h7xSsanJ4EQJXG5iuW4UqgP7qBopLpj84mpkNx3wPjM= -github.com/aws/aws-sdk-go-v2 v1.39.3/go.mod h1:yWSxrnioGUZ4WVv9TgMrNUeLV3PFESn/v+6T/Su8gnM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 h1:t9yYsydLYNBk9cJ73rgPhPWqOh/52fcWDQB5b1JsKSY= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2/go.mod h1:IusfVNTmiSN3t4rhxWFaBAqn+mcNdwKtPcV16eYdgko= -github.com/aws/aws-sdk-go-v2/config v1.31.13 h1:wcqQB3B0PgRPUF5ZE/QL1JVOyB0mbPevHFoAMpemR9k= -github.com/aws/aws-sdk-go-v2/config v1.31.13/go.mod h1:ySB5D5ybwqGbT6c3GszZ+u+3KvrlYCUQNo62+hkKOFk= -github.com/aws/aws-sdk-go-v2/credentials v1.18.17 h1:skpEwzN/+H8cdrrtT8y+rvWJGiWWv0DeNAe+4VTf+Vs= -github.com/aws/aws-sdk-go-v2/credentials v1.18.17/go.mod h1:Ed+nXsaYa5uBINovJhcAWkALvXw2ZLk36opcuiSZfJM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.10 h1:UuGVOX48oP4vgQ36oiKmW9RuSeT8jlgQgBFQD+HUiHY= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.10/go.mod h1:vM/Ini41PzvudT4YkQyE/+WiQJiQ6jzeDyU8pQKwCac= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.10 h1:mj/bdWleWEh81DtpdHKkw41IrS+r3uw1J/VQtbwYYp8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.10/go.mod h1:7+oEMxAZWP8gZCyjcm9VicI0M61Sx4DJtcGfKYv2yKQ= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.10 h1:wh+/mn57yhUrFtLIxyFPh2RgxgQz/u+Yrf7hiHGHqKY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.10/go.mod h1:7zirD+ryp5gitJJ2m1BBux56ai8RIRDykXZrJSp540w= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.10 h1:FHw90xCTsofzk6vjU808TSuDtDfOOKPNdz5Weyc3tUI= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.10/go.mod h1:n8jdIE/8F3UYkg8O4IGkQpn2qUmapg/1K1yl29/uf/c= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2 h1:xtuxji5CS0JknaXoACOunXOYOQzgfTvGAc9s2QdCJA4= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2/go.mod h1:zxwi0DIR0rcRcgdbl7E2MSOvxDyyXGBlScvBkARFaLQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.1 h1:ne+eepnDB2Wh5lHKzELgEncIqeVlQ1rSF9fEa4r5I+A= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.1/go.mod h1:u0Jkg0L+dcG1ozUq21uFElmpbmjBnhHR5DELHIme4wg= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.10 h1:DRND0dkCKtJzCj4Xl4OpVbXZgfttY5q712H9Zj7qc/0= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.10/go.mod h1:tGGNmJKOTernmR2+VJ0fCzQRurcPZj9ut60Zu5Fi6us= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.10 h1:DA+Hl5adieRyFvE7pCvBWm3VOZTRexGVkXw33SUqNoY= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.10/go.mod h1:L+A89dH3/gr8L4ecrdzuXUYd1znoko6myzndVGZx/DA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.88.5 h1:FlGScxzCGNzT+2AvHT1ZGMvxTwAMa6gsooFb1pO/AiM= -github.com/aws/aws-sdk-go-v2/service/s3 v1.88.5/go.mod h1:N/iojY+8bW3MYol9NUMuKimpSbPEur75cuI1SmtonFM= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.7 h1:fspVFg6qMx0svs40YgRmE7LZXh9VRZvTT35PfdQR6FM= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.7/go.mod h1:BQTKL3uMECaLaUV3Zc2L4Qybv8C6BIXjuu1dOPyxTQs= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.2 h1:scVnW+NLXasGOhy7HhkdT9AGb6kjgW7fJ5xYkUaqHs0= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.2/go.mod h1:FRNCY3zTEWZXBKm2h5UBUPvCVDOecTad9KhynDyGBc0= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.7 h1:VEO5dqFkMsl8QZ2yHsFDJAIZLAkEbaYDB+xdKi0Feic= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.7/go.mod h1:L1xxV3zAdB+qVrVW/pBIrIAnHFWHo6FBbFe4xOGsG/o= -github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M= -github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/caarlos0/env/v11 v11.3.1 h1:cArPWC15hWmEt+gWk7YBi7lEXTXCvpaSdCiZE2X5mCA= github.com/caarlos0/env/v11 v11.3.1/go.mod h1:qupehSf/Y0TUTsxKywqRt/vJjN5nz6vauiYEUUr8P4U= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= diff --git a/internal/pkg/grib/assemble_test.go b/internal/pkg/grib/assemble_test.go new file mode 100644 index 0000000..8d9d7ce --- /dev/null +++ b/internal/pkg/grib/assemble_test.go @@ -0,0 +1,25 @@ +package grib + +import ( + "testing" + "time" +) + +func TestAssembleCubeFromExisting(t *testing.T) { + dir := "C:/tmp/grib" + run := time.Date(2026, 1, 16, 6, 0, 0, 0, time.UTC) + cubePath := dir + "/" + run.Format("20060102_15") + ".cube" + + t.Logf("Assembling cube from existing GRIB files...") + t.Logf("Directory: %s", dir) + t.Logf("Run: %s", run.Format("2006-01-02 15:04 MST")) + t.Logf("Output: %s", cubePath) + + err := assembleCube(dir, run, cubePath) + if err != nil { + t.Fatalf("Failed to assemble cube: %v", err) + } + + t.Logf("✓ Cube assembled successfully!") + t.Logf("Cube file: %s", cubePath) +} diff --git a/internal/pkg/grib/config.go b/internal/pkg/grib/config.go index 0132a92..c80462e 100644 --- a/internal/pkg/grib/config.go +++ b/internal/pkg/grib/config.go @@ -1,23 +1,130 @@ package grib import ( + "fmt" "time" "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" env "github.com/caarlos0/env/v11" ) +// DatasetConfig описывает параметры GFS-датасета: сетку, временные шаги, +// уровни давления и URL для загрузки. +type DatasetConfig struct { + // Сетка + Resolution float64 // шаг сетки в градусах (0.25 или 0.5) + NLat int // точек по широте (721 для 0.25°, 361 для 0.5°) + NLon int // точек по долготе (1440 для 0.25°, 720 для 0.5°) + + // Время + NT int // кол-во временных шагов (97 для 0–96 ч с шагом 1) + MaxHour int // последний час прогноза (96) + TimeStep int // интервал между шагами, часы (1 или 3) + + // Вертикаль + NP int // кол-во уровней давления + Levels []float64 // уровни давления в гПа, по убыванию (1000 … 1) + + // Переменные в кубе (порядок важен: индексы 0, 1, 2, …) + NVar int // кол-во переменных + Variables []string // GRIB-имена для фильтрации idx (HGT, UGRD, VGRD) + + // URL загрузки (fmt-шаблоны: date, hour, hour, step) + URLMask string // основной pgrb2 + URLMaskB string // дополнительный pgrb2b + + // Имена файлов + FileSuffix string // токен разрешения в именах файлов ("0p25", "0p50") +} + +// SizePerVar возвращает размер одной переменной в кубе, байт. +func (dc *DatasetConfig) SizePerVar() int64 { + return int64(dc.NT) * int64(dc.NP) * int64(dc.NLat) * int64(dc.NLon) * 4 +} + +// CubeSize возвращает полный размер куба, байт. +func (dc *DatasetConfig) CubeSize() int64 { + return dc.SizePerVar() * int64(dc.NVar) +} + +// GridSize возвращает NLat * NLon. +func (dc *DatasetConfig) GridSize() int { + return dc.NLat * dc.NLon +} + +// InvResolution возвращает 1/Resolution — множитель для перевода координат в индексы. +func (dc *DatasetConfig) InvResolution() float64 { + return 1.0 / dc.Resolution +} + +// Steps возвращает список часов прогноза [0, TimeStep, 2*TimeStep, …, MaxHour]. +func (dc *DatasetConfig) Steps() []int { + out := make([]int, 0, dc.NT) + for h := 0; h <= dc.MaxHour; h += dc.TimeStep { + out = append(out, h) + } + return out +} + +// FileName возвращает имя основного GRIB-файла (pgrb2). +func (dc *DatasetConfig) FileName(run time.Time, step int) string { + return fmt.Sprintf("gfs.t%02dz.pgrb2.%s.f%03d", run.Hour(), dc.FileSuffix, step) +} + +// FileNameB возвращает имя вторичного GRIB-файла (pgrb2b). +func (dc *DatasetConfig) FileNameB(run time.Time, step int) string { + return fmt.Sprintf("gfs.t%02dz.pgrb2b.%s.f%03d", run.Hour(), dc.FileSuffix, step) +} + +// GribURL возвращает URL основного GRIB-файла. +func (dc *DatasetConfig) GribURL(run time.Time, step int) string { + return fmt.Sprintf(dc.URLMask, run.Format("20060102"), run.Hour(), run.Hour(), step) +} + +// GribURLB возвращает URL вторичного GRIB-файла. +func (dc *DatasetConfig) GribURLB(run time.Time, step int) string { + return fmt.Sprintf(dc.URLMaskB, run.Format("20060102"), run.Hour(), run.Hour(), step) +} + +// DefaultDatasetConfig возвращает конфиг GFS 0.25° / 1 час / 47 уровней. +func DefaultDatasetConfig() DatasetConfig { + return DatasetConfig{ + Resolution: 0.25, + NLat: 721, + NLon: 1440, + + NT: 97, + MaxHour: 96, + TimeStep: 1, + + NP: 47, + Levels: []float64{ + 1000, 975, 950, 925, 900, 875, 850, 825, 800, 775, + 750, 725, 700, 675, 650, 625, 600, 575, 550, 525, + 500, 475, 450, 425, 400, 375, 350, 325, 300, 275, + 250, 225, 200, 175, 150, 125, 100, 70, 50, 30, + 20, 10, 7, 5, 3, 2, 1, + }, + + NVar: 3, + Variables: []string{"HGT", "UGRD", "VGRD"}, + + URLMask: "https://noaa-gfs-bdp-pds.s3.amazonaws.com/gfs.%s/%02d/atmos/gfs.t%02dz.pgrb2.0p25.f%03d", + URLMaskB: "https://noaa-gfs-bdp-pds.s3.amazonaws.com/gfs.%s/%02d/atmos/gfs.t%02dz.pgrb2b.0p25.f%03d", + + FileSuffix: "0p25", + } +} + +// --------------------------------------------------------------------------- + type Config struct { - Dir string `env:"DIR" envDefault:"C:/tmp/grib"` - TTL time.Duration `env:"TTL" envDefault:"48h"` - CacheTTL time.Duration `env:"CACHE_TTL" envDefault:"1h"` - Parallel int `env:"PARALLEL" envDefault:"8"` - DatasetURL string `env:"DATASET_URL" envDefault:"https://nomads.ncep.noaa.gov/pub/data/nccf/com/gfs/prod"` - // S3 configuration - UseS3 bool `env:"USE_S3" envDefault:"true"` - S3Bucket string `env:"S3_BUCKET" envDefault:"noaa-gfs-bdp-pds"` - S3Region string `env:"S3_REGION" envDefault:"us-east-1"` - S3Timeout time.Duration `env:"S3_TIMEOUT" envDefault:"300s"` + Dir string `env:"DIR" envDefault:"C:/tmp/grib"` + TTL time.Duration `env:"TTL" envDefault:"48h"` + CacheTTL time.Duration `env:"CACHE_TTL" envDefault:"1h"` + Parallel int `env:"PARALLEL" envDefault:"8"` + + Dataset DatasetConfig } func NewConfig() (*Config, error) { @@ -27,6 +134,6 @@ func NewConfig() (*Config, error) { }); err != nil { return nil, errcodes.Wrap(err, "failed to parse GRIB config") } - + cfg.Dataset = DefaultDatasetConfig() return cfg, nil } diff --git a/internal/pkg/grib/create_dataset.go b/internal/pkg/grib/create_dataset.go new file mode 100644 index 0000000..e69de29 diff --git a/internal/pkg/grib/cube.go b/internal/pkg/grib/cube.go index 64818dd..dfc3606 100644 --- a/internal/pkg/grib/cube.go +++ b/internal/pkg/grib/cube.go @@ -15,7 +15,7 @@ type cube struct { file *os.File } -func openCube(path string) (*cube, error) { +func openCube(path string, dc *DatasetConfig) (*cube, error) { f, err := os.Open(path) if err != nil { return nil, err @@ -27,14 +27,15 @@ func openCube(path string) (*cube, error) { return nil, err } - const ( - nT = 33 // 0-96 hours with step 3 hours (33 time steps) - nP = 47 // 47 pressure levels matching tawhiri - nLat = 361 - nLon = 720 - ) - - return &cube{mm: mm, t: nT, p: nP, lat: nLat, lon: nLon, bytesPerVar: int64(nT * nP * nLat * nLon * 4), file: f}, nil + return &cube{ + mm: mm, + t: dc.NT, + p: dc.NP, + lat: dc.NLat, + lon: dc.NLon, + bytesPerVar: dc.SizePerVar(), + file: f, + }, nil } func (c *cube) val(varIdx, ti, pi, y, x int) float32 { diff --git a/internal/pkg/grib/dataset.go b/internal/pkg/grib/dataset.go index e539f65..f994dc9 100644 --- a/internal/pkg/grib/dataset.go +++ b/internal/pkg/grib/dataset.go @@ -2,6 +2,7 @@ package grib type dataset struct { cube *cube + ds *DatasetConfig runUTC int64 // unix seconds } diff --git a/internal/pkg/grib/downloader.go b/internal/pkg/grib/downloader.go deleted file mode 100644 index a93a64c..0000000 --- a/internal/pkg/grib/downloader.go +++ /dev/null @@ -1,91 +0,0 @@ -package grib - -import ( - "context" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" - "golang.org/x/sync/errgroup" -) - -type Downloader struct { - Dir string - Parallel int - Client *http.Client - DatasetURL string -} - -func (d *Downloader) fileURL(run string, hour int, step int) string { - return fmt.Sprintf("%s/gfs.%s/%02d/atmos/gfs.t%02dz.pgrb2.0p50.f%03d", d.DatasetURL, run, hour, hour, step) -} - -func (d *Downloader) fetch(ctx context.Context, url, dst string) (err error) { - // Check if final file already exists - if _, err := os.Stat(dst); err == nil { - return nil - } - - tmp := dst + ".part" - - // Remove old .part file if it exists (fixes race condition) - os.Remove(tmp) - - f, err := os.Create(tmp) - if err != nil { - return err - } - - // Cleanup .part file on any error (using named return value) - defer func() { - f.Close() - if err != nil { - os.Remove(tmp) - } - }() - - req, _ := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - resp, err := d.Client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return errcodes.Wrap(errcodes.ErrDownload, "bad status: "+resp.Status) - } - - if _, err := io.Copy(f, resp.Body); err != nil { - return err - } - - // Close file before rename - if err := f.Close(); err != nil { - return err - } - - // If rename fails, err will be set and defer will cleanup .part file - return os.Rename(tmp, dst) -} - -func (d *Downloader) Run(ctx context.Context, run time.Time) error { - runStr := run.Format("20060102") - hour := run.Hour() - g, ctx := errgroup.WithContext(ctx) - sem := make(chan struct{}, d.Parallel) - for _, step := range steps { - step := step - sem <- struct{}{} - g.Go(func() error { - defer func() { <-sem }() - url := d.fileURL(runStr, hour, step) - dst := filepath.Join(d.Dir, fileName(run, step)) - return d.fetch(ctx, url, dst) - }) - } - return g.Wait() -} diff --git a/internal/pkg/grib/extractor.go b/internal/pkg/grib/extractor.go index 850a0e3..d27f3a4 100644 --- a/internal/pkg/grib/extractor.go +++ b/internal/pkg/grib/extractor.go @@ -4,30 +4,100 @@ import "math" func lerp(a, b, t float64) float64 { return a + t*(b-a) } -// Interpolate 16‑point (time, p, lat, lon) +// ghInterp returns interpolated geopotential height at given time/pressure/lat/lon +func (d *dataset) ghInterp(ti, pi int, y0, y1, x0, x1 int, wy, wx float64) float64 { + g00 := d.cube.val(0, ti, pi, y0, x0) + g10 := d.cube.val(0, ti, pi, y0, x1) + g01 := d.cube.val(0, ti, pi, y1, x0) + g11 := d.cube.val(0, ti, pi, y1, x1) + return (1-wy)*((1-wx)*float64(g00)+wx*float64(g10)) + wy*((1-wx)*float64(g01)+wx*float64(g11)) +} + +// searchAltLevel uses geopotential height to find pressure level bracket for target altitude. +func (d *dataset) searchAltLevel(alt float64, ti, y0, y1, x0, x1 int, wy, wx float64) (int, float64) { + levels := d.ds.Levels + nLevels := len(levels) + + lo, hi := 0, nLevels-1 + for lo < hi-1 { + mid := (lo + hi) / 2 + ghMid := d.ghInterp(ti, mid, y0, y1, x0, x1, wy, wx) + if ghMid < alt { + lo = mid + } else { + hi = mid + } + } + + ghLo := d.ghInterp(ti, lo, y0, y1, x0, x1, wy, wx) + ghHi := d.ghInterp(ti, hi, y0, y1, x0, x1, wy, wx) + + wp := 0.0 + if ghHi != ghLo { + wp = (alt - ghLo) / (ghHi - ghLo) + } + if wp < 0 { + wp = 0 + } + if wp > 1 { + wp = 1 + } + + return lo, wp +} + +// uv выполняет интерполяцию ветра по 4 измерениям (time, pressure, lat, lon). func (d *dataset) uv(lat, lon, alt float64, tHours float64) (float64, float64) { if lon < 0 { lon += 360 } - iy := (lat + 90) * 2 + + inv := d.ds.InvResolution() + + // GRIB scan north→south: index 0 = 90°N + iy := (90 - lat) * inv y0 := int(math.Floor(iy)) + if y0 < 0 { + y0 = 0 + } + if y0 >= d.cube.lat-1 { + y0 = d.cube.lat - 2 + } y1 := y0 + 1 wy := iy - float64(y0) - ix := lon * 2 + + ix := lon * inv x0 := int(math.Floor(ix)) % d.cube.lon x1 := (x0 + 1) % d.cube.lon wx := ix - float64(x0) - // For 3-hourly data (step = 3 hours) - // Convert tHours to 3-hour index (e.g., 1.5 hours -> index 0.5, interpolate between 0 and 1) - it0 := int(math.Floor(tHours / 3.0)) - wt := (tHours - float64(it0*3)) / 3.0 // Interpolation weight within 3-hour window + + // Время: tHours делим на шаг, чтобы получить индекс в кубе + tIdx := tHours / float64(d.ds.TimeStep) + it0 := int(math.Floor(tIdx)) + if it0 < 0 { + it0 = 0 + } + if it0 >= d.cube.t-1 { + it0 = d.cube.t - 2 + } + wt := tIdx - float64(it0) + + // ISA: высота → давление → индекс уровня + levels := d.ds.Levels p := pressureFromAlt(alt) ip0 := 0 - for ip0+1 < len(pressureLevels) && pressureLevels[ip0+1] > p { + for ip0+1 < len(levels) && levels[ip0+1] > p { ip0++ } ip1 := ip0 + 1 - wp := (pressureLevels[ip0] - p) / (pressureLevels[ip0] - pressureLevels[ip1]) + if ip1 >= len(levels) { + ip1 = len(levels) - 1 + } + wp := 0.0 + if levels[ip0] != levels[ip1] { + wp = (levels[ip0] - p) / (levels[ip0] - levels[ip1]) + } + fetch := func(ti, pi int) (float64, float64) { u00 := d.cube.val(1, ti, pi, y0, x0) u10 := d.cube.val(1, ti, pi, y0, x1) @@ -41,6 +111,7 @@ func (d *dataset) uv(lat, lon, alt float64, tHours float64) (float64, float64) { vxy := (1-wy)*((1-wx)*float64(v00)+wx*float64(v10)) + wy*((1-wx)*float64(v01)+wx*float64(v11)) return uxy, vxy } + u0p0, v0p0 := fetch(it0, ip0) u0p1, v0p1 := fetch(it0, ip1) u1p0, v1p0 := fetch(it0+1, ip0) diff --git a/internal/pkg/grib/grib.go b/internal/pkg/grib/grib.go index 637a9aa..7bf6bfc 100644 --- a/internal/pkg/grib/grib.go +++ b/internal/pkg/grib/grib.go @@ -4,7 +4,6 @@ import ( "context" "encoding/binary" "math" - "net/http" "os" "path/filepath" "strings" @@ -41,15 +40,12 @@ func New(cfg *Config) (Service, error) { // Try to load existing dataset on startup if err := s.loadExistingDataset(); err != nil { // Log error but don't fail startup - dataset will be loaded on first Update() - // This allows the service to start even if no data is available yet } return s, nil } -// loadExistingDataset tries to load the most recent available dataset func (s *service) loadExistingDataset() error { - // Find the most recent cube file pattern := filepath.Join(s.cfg.Dir, "*.cube") matches, err := filepath.Glob(pattern) if err != nil { @@ -60,7 +56,6 @@ func (s *service) loadExistingDataset() error { return errcodes.ErrNoCubeFilesFound } - // Sort by modification time (newest first) var latestFile string var latestTime time.Time @@ -69,7 +64,6 @@ func (s *service) loadExistingDataset() error { if err != nil { continue } - if info.ModTime().After(latestTime) { latestTime = info.ModTime() latestFile = match @@ -80,18 +74,16 @@ func (s *service) loadExistingDataset() error { return errcodes.ErrNoValidCubeFilesFound } - // Check if the file is fresh enough if time.Since(latestTime) > s.cfg.TTL { return errcodes.Wrap(errcodes.ErrLatestCubeFileIsTooOld, "latest cube file is too old") } - // Load the dataset - c, err := openCube(latestFile) + dc := &s.cfg.Dataset + c, err := openCube(latestFile, dc) if err != nil { return err } - // Extract run time from filename base := filepath.Base(latestFile) runStr := strings.TrimSuffix(base, ".cube") run, err := time.Parse("20060102_15", runStr) @@ -100,94 +92,70 @@ func (s *service) loadExistingDataset() error { return err } - ds := &dataset{cube: c, runUTC: run.Unix()} - s.data.Store(ds) - + s.data.Store(&dataset{cube: c, ds: dc, runUTC: run.Unix()}) return nil } -// Update() downloads missing GRIBs, assembles cube into a single mmap‑file. func (s *service) Update(ctx context.Context) error { - // Check if we already have fresh data if d := s.data.Load(); d != nil { runTime := time.Unix(d.runUTC, 0) if time.Since(runTime) < s.cfg.TTL { - // Data is still fresh, no need to update return nil } } - // Check again after acquiring lock (double-checked locking pattern) if d := s.data.Load(); d != nil { runTime := time.Unix(d.runUTC, 0) if time.Since(runTime) < s.cfg.TTL { - // Another instance already updated the data return nil } } - run := nearestRun(time.Now().UTC().Add(-24 * time.Hour)) + dc := &s.cfg.Dataset + run := nearestRun(time.Now().UTC().Add(-6 * time.Hour)) - // Check if we already have this run cubePath := filepath.Join(s.cfg.Dir, run.Format("20060102_15")) + ".cube" if _, err := os.Stat(cubePath); err == nil { - // File exists, check if it's fresh info, err := os.Stat(cubePath) if err == nil && time.Since(info.ModTime()) < s.cfg.TTL { - // File is fresh, just load it - c, err := openCube(cubePath) + c, err := openCube(cubePath, dc) if err != nil { return err } - ds := &dataset{cube: c, runUTC: run.Unix()} - s.data.Store(ds) + s.data.Store(&dataset{cube: c, ds: dc, runUTC: run.Unix()}) s.cache = memCache{ttl: s.cfg.CacheTTL} return nil } } - // Download new data using S3 or HTTP - var downloadErr error - if s.cfg.UseS3 { - s3dl, err := NewS3Downloader(s.cfg.Dir, s.cfg.Parallel, s.cfg.S3Bucket, s.cfg.S3Region) - if err != nil { - return errcodes.Wrap(err, "failed to create S3 downloader") - } - downloadErr = s3dl.Run(ctx, run) - } else { - dl := Downloader{ - Dir: s.cfg.Dir, - Parallel: s.cfg.Parallel, - Client: http.DefaultClient, - DatasetURL: s.cfg.DatasetURL, - } - downloadErr = dl.Run(ctx, run) + downloadCtx, cancel := context.WithTimeout(ctx, 60*time.Minute) + defer cancel() + + dl := NewPartialDownloader(s.cfg.Dir, s.cfg.Parallel, dc) + if err := dl.Run(downloadCtx, run); err != nil { + return err } - if downloadErr != nil { - return downloadErr - } - - // Assemble cube if it doesn't exist if _, err := os.Stat(cubePath); err != nil { - if err := assembleCube(s.cfg.Dir, run, cubePath); err != nil { + if err := assembleCube(s.cfg.Dir, run, cubePath, dc); err != nil { return err } } - c, err := openCube(cubePath) + c, err := openCube(cubePath, dc) if err != nil { return err } - ds := &dataset{cube: c, runUTC: run.Unix()} - s.data.Store(ds) + s.data.Store(&dataset{cube: c, ds: dc, runUTC: run.Unix()}) s.cache = memCache{ttl: s.cfg.CacheTTL} return nil } -func assembleCube(dir string, run time.Time, cubePath string) error { - const sizePerVar = 33 * 47 * 361 * 720 * 4 // 33 time steps (0-96 hours, 3-hour intervals), 47 pressure levels - total := int64(sizePerVar * 3) // 3 variables: gh, u, v +func assembleCube(dir string, run time.Time, cubePath string, dc *DatasetConfig) error { + sizePerVar := dc.SizePerVar() + total := dc.CubeSize() + gridBytes := int64(dc.GridSize()) * 4 + f, err := os.Create(cubePath) if err != nil { return err @@ -203,27 +171,23 @@ func assembleCube(dir string, run time.Time, cubePath string) error { defer f.Close() pIndex := make(map[int]int) - for i, p := range pressureLevels { + for i, p := range dc.Levels { pIndex[int(math.Round(p))] = i } - for ti, step := range steps { - fn := filepath.Join(dir, fileName(run, step)) + processFile := func(fn string, ti int) error { file, err := os.Open(fn) if err != nil { return err } messages, err := griblib.ReadMessages(file) - file.Close() // Close immediately after reading + file.Close() if err != nil { return err } for _, m := range messages { - // Check if this is a wind component (u or v) or geopotential height - // ParameterCategory 2 = momentum, ParameterNumber 2 = u-wind, 3 = v-wind - // ParameterCategory 3 = mass, ParameterNumber 5 = geopotential height if m.Section4.ProductDefinitionTemplateNumber != 0 { continue } @@ -231,7 +195,6 @@ func assembleCube(dir string, run time.Time, cubePath string) error { product := m.Section4.ProductDefinitionTemplate var varIdx int - // Match tawhiri variable order: ['gh', 'u', 'v'] (indices 0, 1, 2) if product.ParameterCategory == 2 { switch product.ParameterNumber { case 2: // u-wind @@ -242,18 +205,15 @@ func assembleCube(dir string, run time.Time, cubePath string) error { continue } } else if product.ParameterCategory == 3 && product.ParameterNumber == 5 { - // geopotential height - varIdx = 0 + varIdx = 0 // geopotential height } else { continue } - // Check if this is a pressure level (type 100) if product.FirstSurface.Type != 100 { continue } - // Get pressure level in hPa pressure := float64(product.FirstSurface.Value) / 100.0 pIdx, ok := pIndex[int(math.Round(pressure))] if !ok { @@ -261,14 +221,27 @@ func assembleCube(dir string, run time.Time, cubePath string) error { } vals := m.Data() - // GRIB library returns scan north->south, west->east already in row-major order raw := make([]byte, len(vals)*4) for i, v := range vals { binary.LittleEndian.PutUint32(raw[i*4:], math.Float32bits(float32(v))) } - base := int64(varIdx*sizePerVar + (ti*47+pIdx)*361*720*4) + base := int64(varIdx)*sizePerVar + (int64(ti)*int64(dc.NP)+int64(pIdx))*gridBytes copy(mm[base:base+int64(len(raw))], raw) } + return nil + } + + steps := dc.Steps() + for ti, step := range steps { + fn := filepath.Join(dir, dc.FileName(run, step)) + if err := processFile(fn, ti); err != nil { + return err + } + + fnB := filepath.Join(dir, dc.FileNameB(run, step)) + if err := processFile(fnB, ti); err != nil { + return err + } } return mm.Flush() } @@ -279,24 +252,21 @@ func (s *service) Extract(ctx context.Context, lat, lon, alt float64, ts time.Ti if d == nil { return zero, errcodes.ErrNoDataset } - if ts.Before(time.Unix(d.runUTC, 0)) || ts.After(time.Unix(d.runUTC, 0).Add(96*time.Hour)) { + maxDur := time.Duration(s.cfg.Dataset.MaxHour) * time.Hour + if ts.Before(time.Unix(d.runUTC, 0)) || ts.After(time.Unix(d.runUTC, 0).Add(maxDur)) { return zero, errcodes.ErrOutOfBounds } - // Try memory cache first key := encodeKey(lat, lon, alt, ts) if v, ok := s.cache.get(key); ok { return [2]float64(v), nil } - // Calculate result td := ts.Sub(time.Unix(d.runUTC, 0)).Hours() u, v := d.uv(lat, lon, alt, td) out := [2]float64{u, v} - // Cache in memory s.cache.set(key, vec(out)) - return out, nil } diff --git a/internal/pkg/grib/partial_downloader.go b/internal/pkg/grib/partial_downloader.go new file mode 100644 index 0000000..3752504 --- /dev/null +++ b/internal/pkg/grib/partial_downloader.go @@ -0,0 +1,350 @@ +package grib + +import ( + "bufio" + "context" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" + "git.intra.yksa.space/gsn/predictor/internal/pkg/log" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +// PartialDownloader загружает только необходимые поля из GRIB файлов +// используя HTTP Range requests и .idx индексные файлы +type PartialDownloader struct { + Dir string + Parallel int + Client *http.Client + Variables []string + ds *DatasetConfig +} + +// NewPartialDownloader создаёт новый partial downloader +func NewPartialDownloader(dir string, parallel int, dc *DatasetConfig) *PartialDownloader { + return &PartialDownloader{ + Dir: dir, + Parallel: parallel, + Client: &http.Client{ + Timeout: 60 * time.Second, + }, + Variables: dc.Variables, + ds: dc, + } +} + +// idxEntry представляет запись из .idx файла +type idxEntry struct { + Index int + ByteStart int64 + Date string + Variable string + Level string + Forecast string +} + +type ProgressWriter struct { + Total int64 + Downloaded int64 + OnProgress func(percent float64) +} + +func (pw *ProgressWriter) Write(p []byte) (int, error) { + n := len(p) + pw.Downloaded += int64(n) + if pw.Total > 0 && pw.OnProgress != nil { + percent := float64(pw.Downloaded) / float64(pw.Total) * 100 + pw.OnProgress(percent) + } + return n, nil +} + +// parseIdx парсит .idx файл и возвращает записи +func (d *PartialDownloader) parseIdx(body []byte) []idxEntry { + var entries []idxEntry + lines := strings.Split(string(body), "\n") + + for _, line := range lines { + if line == "" { + continue + } + parts := strings.Split(line, ":") + if len(parts) < 7 { + continue + } + + byteStart, _ := strconv.ParseInt(parts[1], 10, 64) + entries = append(entries, idxEntry{ + Index: len(entries), + ByteStart: byteStart, + Date: parts[2], + Variable: parts[3], + Level: parts[4], + Forecast: parts[5], + }) + } + return entries +} + +// filterEntries фильтрует записи по нужным переменным и уровням давления +func (d *PartialDownloader) filterEntries(entries []idxEntry) []idxEntry { + var filtered []idxEntry + + for _, e := range entries { + isNeededVar := false + for _, v := range d.Variables { + if v == e.Variable { + isNeededVar = true + break + } + } + + isPressureLevel := strings.HasSuffix(e.Level, " mb") + + if isNeededVar && isPressureLevel { + filtered = append(filtered, e) + } + } + + return filtered +} + +// Вспомогательная функция для выполнения запроса с повторами +func (d *PartialDownloader) doWithRetry(ctx context.Context, req *http.Request) (*http.Response, error) { + var resp *http.Response + var err error + + backoff := 1 * time.Second + maxRetries := 3 + + for i := 0; i < maxRetries; i++ { + resp, err = d.Client.Do(req) + if err == nil && resp.StatusCode < 500 { + return resp, nil + } + + if resp != nil { + resp.Body.Close() + } + + log.Ctx(ctx).Warn("retry download", zap.Int("attempt", i+1), zap.Error(err)) + + select { + case <-time.After(backoff): + backoff *= 2 + case <-ctx.Done(): + return nil, ctx.Err() + } + } + return nil, err +} + +// downloadRange загружает диапазон байтов из URL +func (d *PartialDownloader) downloadRange(ctx context.Context, url string, start, end int64, out io.Writer) error { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return err + } + + rangeHeader := fmt.Sprintf("bytes=%d-", start) + if end > 0 { + rangeHeader = fmt.Sprintf("bytes=%d-%d", start, end) + } + req.Header.Set("Range", rangeHeader) + + resp, err := d.Client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK { + return errcodes.Wrap(errcodes.ErrDownload, "bad status: "+resp.Status) + } + + _, err = io.Copy(out, resp.Body) + return err +} + +func (d *PartialDownloader) downloadFieldsFromURL(ctx context.Context, url string, dst string, step int) (err error) { + idxURL := url + ".idx" + tmp := dst + ".part" + + if info, err := os.Stat(dst); err == nil && info.Size() > 0 { + return nil + } + + reqIdx, _ := http.NewRequestWithContext(ctx, http.MethodGet, idxURL, nil) + respIdx, err := d.doWithRetry(ctx, reqIdx) + if err != nil { + return errcodes.Wrap(err, "failed to get idx") + } + defer respIdx.Body.Close() + + idxBody, _ := io.ReadAll(respIdx.Body) + entries := d.parseIdx(idxBody) + filtered := d.filterEntries(entries) + if len(filtered) == 0 { + return nil + } + + var totalBytes int64 + type chunk struct{ start, end int64 } + chunks := make([]chunk, 0, len(filtered)) + + for _, entry := range filtered { + var endByte int64 = -1 + for j, e := range entries { + if e.ByteStart == entry.ByteStart && j+1 < len(entries) { + endByte = entries[j+1].ByteStart - 1 + break + } + } + chunks = append(chunks, chunk{entry.ByteStart, endByte}) + if endByte > 0 { + totalBytes += (endByte - entry.ByteStart + 1) + } + } + + f, err := os.OpenFile(tmp, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + + var downloaded int64 + + err = func() error { + defer f.Close() + bufWriter := bufio.NewWriterSize(f, 1024*1024) + + for i, c := range chunks { + countingWriter := &proxyWriter{ + Writer: bufWriter, + OnWrite: func(n int) { + downloaded += int64(n) + if totalBytes > 0 && i%20 == 0 { + pct := float64(downloaded) / float64(totalBytes) * 100 + log.Ctx(ctx).Debug("download progress", + zap.Int("step", step), + zap.String("pct", fmt.Sprintf("%.1f%%", pct))) + } + }, + } + + if err := d.downloadRange(ctx, url, c.start, c.end, countingWriter); err != nil { + return err + } + } + return bufWriter.Flush() + }() + + if err != nil { + f.Close() + os.Remove(tmp) + return err + } + + return d.safeRename(tmp, dst) +} + +type proxyWriter struct { + io.Writer + OnWrite func(int) +} + +func (p *proxyWriter) Write(data []byte) (int, error) { + n, err := p.Writer.Write(data) + if n > 0 && p.OnWrite != nil { + p.OnWrite(n) + } + return n, err +} + +func (d *PartialDownloader) safeRename(src, dst string) error { + var lastErr error + for i := 0; i < 5; i++ { + if err := os.Rename(src, dst); err == nil { + return nil + } else { + lastErr = err + } + time.Sleep(150 * time.Millisecond) + } + return fmt.Errorf("rename failed: %w", lastErr) +} + +// Run запускает загрузку всех необходимых файлов (pgrb2 + pgrb2b) +func (d *PartialDownloader) Run(ctx context.Context, run time.Time) error { + log.Ctx(ctx).Info("starting partial download", + zap.Time("run", run), + zap.Strings("variables", d.Variables)) + + g, ctx := errgroup.WithContext(ctx) + sem := make(chan struct{}, d.Parallel) + steps := d.ds.Steps() + + for _, step := range steps { + step := step + + // Download primary pgrb2 + sem <- struct{}{} + g.Go(func() error { + defer func() { <-sem }() + url := d.ds.GribURL(run, step) + dst := filepath.Join(d.Dir, d.ds.FileName(run, step)) + return d.downloadFieldsFromURL(ctx, url, dst, step) + }) + + // Download secondary pgrb2b + sem <- struct{}{} + g.Go(func() error { + defer func() { <-sem }() + url := d.ds.GribURLB(run, step) + dst := filepath.Join(d.Dir, d.ds.FileNameB(run, step)) + return d.downloadFieldsFromURL(ctx, url, dst, step) + }) + } + + return g.Wait() +} + +// GetLatestModelRun находит последний доступный прогноз GFS +func GetLatestModelRun(ctx context.Context, dc *DatasetConfig) (time.Time, error) { + now := time.Now().UTC() + hour := now.Hour() - (now.Hour() % 6) + current := time.Date(now.Year(), now.Month(), now.Day(), hour, 0, 0, 0, time.UTC) + + client := &http.Client{Timeout: 10 * time.Second} + + for i := 0; i < 8; i++ { + url := dc.GribURL(current, dc.MaxHour) + + req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil) + if err != nil { + current = current.Add(-6 * time.Hour) + continue + } + + resp, err := client.Do(req) + if err == nil && resp.StatusCode == http.StatusOK { + resp.Body.Close() + log.Ctx(ctx).Info("found latest model run", zap.Time("run", current)) + return current, nil + } + if resp != nil { + resp.Body.Close() + } + + current = current.Add(-6 * time.Hour) + } + + return time.Time{}, errcodes.Wrap(errcodes.ErrDownload, "no recent GFS forecast found") +} diff --git a/internal/pkg/grib/pressure.go b/internal/pkg/grib/pressure.go index add6ff0..1b6dec0 100644 --- a/internal/pkg/grib/pressure.go +++ b/internal/pkg/grib/pressure.go @@ -2,15 +2,6 @@ package grib import "math" -// 47 pressure levels matching tawhiri configuration -var pressureLevels = []float64{ - 1000, 975, 950, 925, 900, 875, 850, 825, 800, 775, - 750, 725, 700, 675, 650, 625, 600, 575, 550, 525, - 500, 475, 450, 425, 400, 375, 350, 325, 300, 275, - 250, 225, 200, 175, 150, 125, 100, 70, 50, 30, - 20, 10, 7, 5, 3, 2, 1, -} - func pressureFromAlt(alt float64) float64 { // ICAO ISA return 1013.25 * math.Pow(1-alt/44307.69396, 5.255877) } diff --git a/internal/pkg/grib/s3_downloader.go b/internal/pkg/grib/s3_downloader.go deleted file mode 100644 index 0fa4c70..0000000 --- a/internal/pkg/grib/s3_downloader.go +++ /dev/null @@ -1,265 +0,0 @@ -package grib - -import ( - "context" - "fmt" - "io" - "os" - "path/filepath" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/s3" - "golang.org/x/sync/errgroup" -) - -// S3Downloader downloads GRIB files from AWS S3 -type S3Downloader struct { - Dir string - Parallel int - Bucket string - Region string - Client *s3.Client -} - -// NewS3Downloader creates a new S3 downloader with anonymous access -func NewS3Downloader(dir string, parallel int, bucket, region string) (*S3Downloader, error) { - // Create AWS config with anonymous credentials for public bucket - cfg, err := config.LoadDefaultConfig(context.Background(), - config.WithRegion(region), - config.WithCredentialsProvider(aws.AnonymousCredentials{}), - ) - if err != nil { - return nil, errcodes.Wrap(err, "failed to load AWS config") - } - - client := s3.NewFromConfig(cfg) - - return &S3Downloader{ - Dir: dir, - Parallel: parallel, - Bucket: bucket, - Region: region, - Client: client, - }, nil -} - -// s3Key generates the S3 key for a GRIB file -// Path format: gfs.YYYYMMDD/HH/atmos/gfs.tHHz.pgrb2.0p50.fFFF -func (d *S3Downloader) s3Key(run string, hour int, step int) string { - return fmt.Sprintf("gfs.%s/%02d/atmos/gfs.t%02dz.pgrb2.0p50.f%03d", run, hour, hour, step) -} - -// CheckFileExists checks if a file exists in S3 using HeadObject -func (d *S3Downloader) CheckFileExists(ctx context.Context, key string) (bool, int64, error) { - input := &s3.HeadObjectInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(key), - } - - result, err := d.Client.HeadObject(ctx, input) - if err != nil { - // Check if error is NotFound - // AWS SDK v2 doesn't export specific error types, check error string - if isNotFoundError(err) { - return false, 0, nil - } - return false, 0, errcodes.Wrap(err, "failed to check file existence") - } - - size := int64(0) - if result.ContentLength != nil { - size = *result.ContentLength - } - - return true, size, nil -} - -// isNotFoundError checks if error is a NotFound error -func isNotFoundError(err error) bool { - if err == nil { - return false - } - // AWS SDK v2 error handling - errStr := err.Error() - return contains(errStr, "NotFound") || contains(errStr, "404") || contains(errStr, "NoSuchKey") -} - -func contains(s, substr string) bool { - return len(s) >= len(substr) && (s == substr || len(s) > len(substr) && findSubstring(s, substr)) -} - -func findSubstring(s, substr string) bool { - for i := 0; i <= len(s)-len(substr); i++ { - if s[i:i+len(substr)] == substr { - return true - } - } - return false -} - -// ListAvailableFiles lists all available files for a given run -func (d *S3Downloader) ListAvailableFiles(ctx context.Context, run string, hour int) ([]string, error) { - prefix := fmt.Sprintf("gfs.%s/%02d/atmos/", run, hour) - - input := &s3.ListObjectsV2Input{ - Bucket: aws.String(d.Bucket), - Prefix: aws.String(prefix), - } - - var files []string - paginator := s3.NewListObjectsV2Paginator(d.Client, input) - - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) - if err != nil { - return nil, errcodes.Wrap(err, "failed to list S3 objects") - } - - for _, obj := range page.Contents { - if obj.Key != nil { - files = append(files, *obj.Key) - } - } - } - - return files, nil -} - -// fetchFromS3 downloads a file from S3 to local disk with retry logic -func (d *S3Downloader) fetchFromS3(ctx context.Context, key, dst string) (err error) { - // Check if final file already exists - if _, err := os.Stat(dst); err == nil { - return nil - } - - const maxRetries = 3 - var lastErr error - - for attempt := 0; attempt < maxRetries; attempt++ { - if attempt > 0 { - // Exponential backoff: 2s, 4s, 8s - waitTime := time.Duration(1< 0 && written != size { - return errcodes.Wrap(errcodes.ErrDownload, fmt.Sprintf("size mismatch: got %d bytes, expected %d", written, size)) - } - - // Close file before rename - if err := f.Close(); err != nil { - return err - } - fileClosed = true - - // If rename fails, err will be set and defer will cleanup .part file - return os.Rename(tmp, dst) -} - -// Run downloads all required GRIB files for a forecast run -func (d *S3Downloader) Run(ctx context.Context, run time.Time) error { - runStr := run.Format("20060102") - hour := run.Hour() - - // First, list available files to verify they exist - availableFiles, err := d.ListAvailableFiles(ctx, runStr, hour) - if err != nil { - return errcodes.Wrap(err, "failed to list available files") - } - - if len(availableFiles) == 0 { - return errcodes.Wrap(errcodes.ErrDownload, fmt.Sprintf("no files found for run %s/%02d", runStr, hour)) - } - - // Build a map of available files for quick lookup - availableMap := make(map[string]bool) - for _, file := range availableFiles { - availableMap[file] = true - } - - g, ctx := errgroup.WithContext(ctx) - sem := make(chan struct{}, d.Parallel) - - for _, step := range steps { - step := step - key := d.s3Key(runStr, hour, step) - - // Check if file is available in S3 - if !availableMap[key] { - // Log warning but don't fail - some forecast hours might not be available yet - continue - } - - sem <- struct{}{} - g.Go(func() error { - defer func() { <-sem }() - dst := filepath.Join(d.Dir, fileName(run, step)) - return d.fetchFromS3(ctx, key, dst) - }) - } - - return g.Wait() -} diff --git a/internal/pkg/grib/util.go b/internal/pkg/grib/util.go index 14b00be..1145a61 100644 --- a/internal/pkg/grib/util.go +++ b/internal/pkg/grib/util.go @@ -6,25 +6,11 @@ import ( "time" ) -// Generate steps from 0 to 96 with step 3 hours (33 steps total) -// GFS provides 3-hourly data for 0-120 hours, we use first 96 hours (0, 3, 6, ..., 96) -var steps = func() []int { - result := make([]int, 0, 33) - for i := 0; i <= 96; i += 3 { - result = append(result, i) - } - return result -}() - func nearestRun(t time.Time) time.Time { h := t.UTC().Hour() - t.UTC().Hour()%6 return time.Date(t.Year(), t.Month(), t.Day(), h, 0, 0, 0, time.UTC) } -func fileName(run time.Time, step int) string { - return fmt.Sprintf("gfs.t%02dz.pgrb2.0p50.f%03d", run.Hour(), step) -} - func encodeKey(a ...any) uint64 { h := fnv.New64a() for _, v := range a { diff --git a/internal/service/predictor.go b/internal/service/predictor.go index bb53b3f..9138c5e 100644 --- a/internal/service/predictor.go +++ b/internal/service/predictor.go @@ -90,7 +90,7 @@ func (s *Service) PerformPrediction(ctx context.Context, params ds.PredictionPar } } - log.Ctx(ctx).Info("Starting prediction", + log.Ctx(ctx).Warn("🚀 PREDICTION STARTING", zap.String("profile", profile), zap.Float64("lat", *params.LaunchLatitude), zap.Float64("lon", *params.LaunchLongitude), @@ -325,6 +325,10 @@ func (s *Service) simulateAscent(ctx context.Context, params ds.PredictionParame const dt = 10.0 // simulation step in seconds const outputInterval = 60.0 // output every 60 seconds + log.Ctx(ctx).Warn("⬆️ ASCENT SIMULATION STARTING", + zap.Float64("ascentRate", ascentRate), + zap.Float64("targetAlt", targetAltitude)) + lat := *params.LaunchLatitude lon := *params.LaunchLongitude alt := *params.LaunchAltitude @@ -349,12 +353,29 @@ func (s *Service) simulateAscent(ctx context.Context, params ds.PredictionParame }) nextOutputTime := timeCur.Add(time.Duration(outputInterval) * time.Second) + firstExtraction := true windFunc := func(lat, lon, alt float64, t time.Time) (float64, float64) { w, err := s.ExtractWind(ctx, lat, lon, alt, t) if err != nil { - log.Ctx(ctx).Warn("Wind extraction failed during ascent", zap.Error(err)) + log.Ctx(ctx).Error("Wind extraction FAILED during ascent", + zap.Error(err), + zap.Float64("lat", lat), + zap.Float64("lon", lon), + zap.Float64("alt", alt), + zap.Time("time", t)) return 0, 0 } + // Log only first extraction and when wind is zero + if firstExtraction || (w[0] == 0 && w[1] == 0) { + log.Ctx(ctx).Warn("Wind data check", + zap.Bool("first", firstExtraction), + zap.Float64("lat", lat), + zap.Float64("lon", lon), + zap.Float64("alt", alt), + zap.Float64("u", w[0]), + zap.Float64("v", w[1])) + firstExtraction = false + } return w[0], w[1] } @@ -370,6 +391,23 @@ func (s *Service) simulateAscent(ctx context.Context, params ds.PredictionParame alt = altNew if alt >= targetAltitude { + alt = targetAltitude + // Record burst point + wU, wV := windFunc(lat, lon, alt, timeCur) + latCopy := lat + lonCopy := lon + altCopy := alt + timeCopy := timeCur + windU := wU + windV := wV + results = append(results, ds.PredicitonResult{ + Latitude: &latCopy, + Longitude: &lonCopy, + Altitude: &altCopy, + Timestamp: &timeCopy, + WindU: &windU, + WindV: &windV, + }) break } @@ -427,14 +465,19 @@ func (s *Service) simulateDescent(ctx context.Context, params ds.PredictionParam windFunc := func(lat, lon, alt float64, t time.Time) (float64, float64) { w, err := s.ExtractWind(ctx, lat, lon, alt, t) if err != nil { - log.Ctx(ctx).Warn("Wind extraction failed during descent", zap.Error(err)) + log.Ctx(ctx).Error("Wind extraction FAILED during descent", + zap.Error(err), + zap.Float64("lat", lat), + zap.Float64("lon", lon), + zap.Float64("alt", alt), + zap.Time("time", t)) return 0, 0 } return w[0], w[1] } for alt > targetAltitude { - altRate := -descentRate + altRate := -descentRateAtAlt(descentRate, alt) if customCurve != nil { altRate = -s.getCustomAltitudeRate(customCurve, alt, descentRate) } @@ -445,6 +488,23 @@ func (s *Service) simulateDescent(ctx context.Context, params ds.PredictionParam alt = altNew if alt <= targetAltitude { + alt = targetAltitude + // Record landing point + wU, wV := windFunc(lat, lon, alt, timeCur) + latCopy := lat + lonCopy := lon + altCopy := alt + timeCopy := timeCur + windU := wU + windV := wV + results = append(results, ds.PredicitonResult{ + Latitude: &latCopy, + Longitude: &lonCopy, + Altitude: &altCopy, + Timestamp: &timeCopy, + WindU: &windU, + WindV: &windV, + }) break } @@ -539,6 +599,37 @@ func (s *Service) simulateFloat(ctx context.Context, startResult ds.PredicitonRe return results } +// airDensity returns ISA air density in kg/m³ at given altitude in meters +func airDensity(h float64) float64 { + var T, p float64 + switch { + case h < 11000: + T = 288.15 - 0.0065*h + p = 101325 * math.Pow(T/288.15, 5.2561) + case h < 20000: + T = 216.65 + p = 22632.1 * math.Exp(-0.00015769*(h-11000)) + case h < 32000: + T = 216.65 + 0.001*(h-20000) + p = 5474.89 * math.Pow(T/216.65, -34.1632) + default: + T = 228.65 + 0.0028*(h-32000) + p = 868.019 * math.Pow(T/228.65, -12.2009) + } + return p / (287.05 * T) +} + +// descentRateAtAlt returns descent rate adjusted for air density at altitude. +// descent_rate parameter is the sea-level rate. At altitude, thinner air means faster descent. +func descentRateAtAlt(seaLevelRate, alt float64) float64 { + rho0 := airDensity(0) + rhoH := airDensity(alt) + if rhoH <= 0 { + return seaLevelRate + } + return seaLevelRate * math.Sqrt(rho0/rhoH) +} + func (s *Service) simulateCustomAscent(ctx context.Context, params ds.PredictionParameters, curve *CustomCurve) []ds.PredicitonResult { // Implementation for custom ascent curve // This would interpolate the altitude rate from the custom curve diff --git a/scripts/compare.go b/scripts/compare.go new file mode 100644 index 0000000..5e50653 --- /dev/null +++ b/scripts/compare.go @@ -0,0 +1,114 @@ +package main + +import ( + "encoding/json" + "fmt" + "math" + "os" +) + +type Point struct { + Datetime string `json:"datetime"` + Latitude float64 `json:"latitude"` + Longitude float64 `json:"longitude"` + Altitude float64 `json:"altitude"` +} + +type Stage struct { + Stage string `json:"stage"` + Trajectory []Point `json:"trajectory"` +} + +type Prediction struct { + Prediction []Stage `json:"prediction"` +} + +func haversine(lat1, lon1, lat2, lon2 float64) float64 { + R := 6371000.0 + phi1, phi2 := lat1*math.Pi/180, lat2*math.Pi/180 + dphi := (lat2 - lat1) * math.Pi / 180 + dlam := (lon2 - lon1) * math.Pi / 180 + a := math.Sin(dphi/2)*math.Sin(dphi/2) + math.Cos(phi1)*math.Cos(phi2)*math.Sin(dlam/2)*math.Sin(dlam/2) + return R * 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a)) +} + +func load(path string) Prediction { + data, _ := os.ReadFile(path) + var p Prediction + json.Unmarshal(data, &p) + return p +} + +func main() { + our := load("c:/tmp/our.json") + taw := load("c:/tmp/tawhiri.json") + + // Find burst and landing points + var ourBurst, ourLand, tawBurst, tawLand Point + for _, s := range our.Prediction { + t := s.Trajectory + if s.Stage == "ascent" { + ourBurst = t[len(t)-1] + } + if s.Stage == "descent" { + ourLand = t[len(t)-1] + } + } + for _, s := range taw.Prediction { + t := s.Trajectory + if s.Stage == "ascent" { + tawBurst = t[len(t)-1] + } + if s.Stage == "descent" { + tawLand = t[len(t)-1] + } + } + + fmt.Println("=== Burst Point ===") + fmt.Printf(" Our: lat=%.4f, lon=%.4f, alt=%.0f, time=%s\n", ourBurst.Latitude, ourBurst.Longitude, ourBurst.Altitude, ourBurst.Datetime) + fmt.Printf(" Tawhiri: lat=%.4f, lon=%.4f, alt=%.0f, time=%s\n", tawBurst.Latitude, tawBurst.Longitude, tawBurst.Altitude, tawBurst.Datetime) + burstDist := haversine(ourBurst.Latitude, ourBurst.Longitude, tawBurst.Latitude, tawBurst.Longitude) + fmt.Printf(" Distance: %.2f km\n", burstDist/1000) + + fmt.Println() + fmt.Println("=== Landing Point ===") + fmt.Printf(" Our: lat=%.4f, lon=%.4f, alt=%.0f, time=%s\n", ourLand.Latitude, ourLand.Longitude, ourLand.Altitude, ourLand.Datetime) + fmt.Printf(" Tawhiri: lat=%.4f, lon=%.4f, alt=%.0f, time=%s\n", tawLand.Latitude, tawLand.Longitude, tawLand.Altitude, tawLand.Datetime) + landDist := haversine(ourLand.Latitude, ourLand.Longitude, tawLand.Latitude, tawLand.Longitude) + fmt.Printf(" Distance: %.2f km\n", landDist/1000) + + fmt.Println() + fmt.Println("=== Trajectory Comparison (every 10 min) ===") + ourPts := map[string]Point{} + tawPts := map[string]Point{} + for _, s := range our.Prediction { + for _, p := range s.Trajectory { + ourPts[p.Datetime] = p + } + } + for _, s := range taw.Prediction { + for _, p := range s.Trajectory { + tawPts[p.Datetime] = p + } + } + + // Collect common times + var common []string + for _, s := range our.Prediction { + for _, p := range s.Trajectory { + if _, ok := tawPts[p.Datetime]; ok { + common = append(common, p.Datetime) + } + } + } + + for i, t := range common { + if i%10 == 0 { + o := ourPts[t] + tw := tawPts[t] + d := haversine(o.Latitude, o.Longitude, tw.Latitude, tw.Longitude) + fmt.Printf(" %s: dist=%.2f km (our: %.3f,%.3f vs taw: %.3f,%.3f)\n", + t, d/1000, o.Latitude, o.Longitude, tw.Latitude, tw.Longitude) + } + } +} diff --git a/scripts/rebuild_cube.go b/scripts/rebuild_cube.go new file mode 100644 index 0000000..240b707 --- /dev/null +++ b/scripts/rebuild_cube.go @@ -0,0 +1,44 @@ +package main + +import ( + "context" + "fmt" + "os" + "time" + + "git.intra.yksa.space/gsn/predictor/internal/pkg/grib" +) + +func main() { + ctx := context.Background() + + cfg := &grib.Config{ + Dir: "C:/tmp/grib", + TTL: 48 * time.Hour, + CacheTTL: 1 * time.Hour, + Parallel: 8, + } + + svc, err := grib.New(cfg) + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + + // Delete old cube to force rebuild + cubePath := "C:/tmp/grib/20260212_12.cube" + if err := os.Remove(cubePath); err != nil && !os.IsNotExist(err) { + fmt.Printf("Remove cube error: %v\n", err) + } else { + fmt.Println("Old cube removed") + } + + // Update will download missing pgrb2b files and rebuild cube + fmt.Println("Starting update (download pgrb2b + rebuild cube)...") + start := time.Now() + if err := svc.Update(ctx); err != nil { + fmt.Printf("Update error: %v\n", err) + return + } + fmt.Printf("Done in %v\n", time.Since(start)) +} diff --git a/scripts/test_download.go b/scripts/test_download.go new file mode 100644 index 0000000..73934a5 --- /dev/null +++ b/scripts/test_download.go @@ -0,0 +1,36 @@ +package main + +import ( + "context" + "fmt" + "time" + + "git.intra.yksa.space/gsn/predictor/internal/pkg/grib" +) + +func main() { + ctx := context.Background() + + // Найти последний доступный прогноз + run, err := grib.GetLatestModelRun(ctx) + if err != nil { + fmt.Printf("Error finding model run: %v\n", err) + return + } + fmt.Printf("Found model run: %v\n", run) + + // Создать downloader + dl := grib.NewPartialDownloader("C:/tmp/grib", 8) + + // Запустить загрузку + start := time.Now() + fmt.Println("Starting download...") + + err = dl.Run(ctx, run) + if err != nil { + fmt.Printf("Download error: %v\n", err) + return + } + + fmt.Printf("Download completed in %v\n", time.Since(start)) +} diff --git a/scripts/test_gh.go b/scripts/test_gh.go new file mode 100644 index 0000000..bfb592c --- /dev/null +++ b/scripts/test_gh.go @@ -0,0 +1,60 @@ +package main + +import ( + "encoding/binary" + "fmt" + "math" + "os" + + mmap "github.com/edsrzf/mmap-go" +) + +var pressureLevels = []float64{ + 1000, 975, 950, 925, 900, 875, 850, 825, 800, 775, + 750, 725, 700, 675, 650, 625, 600, 575, 550, 525, + 500, 475, 450, 425, 400, 375, 350, 325, 300, 275, + 250, 225, 200, 175, 150, 125, 100, 70, 50, 30, + 20, 10, 7, 5, 3, 2, 1, +} + +func main() { + f, _ := os.Open("C:/tmp/grib/20260212_12.cube") + mm, _ := mmap.Map(f, mmap.RDONLY, 0) + defer mm.Unmap() + defer f.Close() + + const ( + nT = 97 + nP = 47 + nLat = 721 + nLon = 1440 + ) + bytesPerVar := int64(nT * nP * nLat * nLon * 4) + + val := func(varIdx, ti, pi, y, x int) float32 { + idx := (((ti*nP + pi) * nLat) + y) * nLon + x + off := int64(varIdx)*bytesPerVar + int64(idx)*4 + bits := binary.LittleEndian.Uint32(mm[off : off+4]) + return math.Float32frombits(bits) + } + + // Check gh values at lat=52.2N (y=(90-52.2)*4=151.2 → y=151), lon=0.1E (x=0.1*4=0.4 → x=0) + // Time step 9 (9 hours into forecast) + ti := 9 + y := 151 + x := 0 + + fmt.Println("GH values at (52.25N, 0E), t=+9h:") + fmt.Printf("%8s %8s %10s\n", "Level", "hPa", "GH(m)") + for pi := 0; pi < nP; pi++ { + gh := val(0, ti, pi, y, x) + fmt.Printf("%8d %8.0f %10.1f\n", pi, pressureLevels[pi], gh) + } + + fmt.Println("\nU-wind values at same point:") + fmt.Printf("%8s %8s %10s\n", "Level", "hPa", "U(m/s)") + for pi := 0; pi < nP; pi++ { + u := val(1, ti, pi, y, x) + fmt.Printf("%8d %8.0f %10.2f\n", pi, pressureLevels[pi], u) + } +} diff --git a/scripts/test_grib_read.go b/scripts/test_grib_read.go new file mode 100644 index 0000000..3b8b7eb --- /dev/null +++ b/scripts/test_grib_read.go @@ -0,0 +1,38 @@ +package main + +import ( + "fmt" + "os" + + "github.com/nilsmagnus/grib/griblib" +) + +func main() { + f, err := os.Open("C:/tmp/grib/gfs.t18z.pgrb2.0p25.f000") + if err != nil { + fmt.Printf("Error opening file: %v\n", err) + return + } + defer f.Close() + + messages, err := griblib.ReadMessages(f) + if err != nil { + fmt.Printf("Error reading GRIB: %v\n", err) + return + } + + fmt.Printf("Found %d messages\n\n", len(messages)) + + for i, m := range messages { + product := m.Section4.ProductDefinitionTemplate + if product.ParameterCategory != 2 || product.ParameterNumber != 2 { + continue // only u-wind + } + fmt.Printf("UGRD Msg %d: SurfType=%d SurfValue=%d SurfScale=%d DataLen=%d\n", + i, + product.FirstSurface.Type, + product.FirstSurface.Value, + product.FirstSurface.Scale, + len(m.Data())) + } +} diff --git a/scripts/test_prediction.go b/scripts/test_prediction.go new file mode 100644 index 0000000..fb56c5a --- /dev/null +++ b/scripts/test_prediction.go @@ -0,0 +1,87 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "git.intra.yksa.space/gsn/predictor/internal/pkg/grib" +) + +func main() { + ctx := context.Background() + + // Инициализируем GRIB сервис + cfg := &grib.Config{ + Dir: "C:/tmp/grib", + TTL: 48 * time.Hour, + CacheTTL: 1 * time.Hour, + Parallel: 8, + } + + svc, err := grib.New(cfg) + if err != nil { + fmt.Printf("Error creating service: %v\n", err) + return + } + + // Обновляем данные (создаёт куб) + fmt.Println("Updating GRIB data (building cube)...") + start := time.Now() + if err := svc.Update(ctx); err != nil { + fmt.Printf("Update error: %v\n", err) + return + } + fmt.Printf("Cube built in %v\n", time.Since(start)) + + // Тестируем извлечение ветра + fmt.Println("\nTesting wind extraction...") + lat, lon, alt := 52.2, 0.1, 10000.0 + ts := time.Date(2026, 2, 11, 12, 0, 0, 0, time.UTC) + + wind, err := svc.Extract(ctx, lat, lon, alt, ts) + if err != nil { + fmt.Printf("Extract error: %v\n", err) + return + } + fmt.Printf("Wind at (%.2f, %.2f, %.0fm) at %v:\n", lat, lon, alt, ts) + fmt.Printf(" U (east): %.2f m/s\n", wind[0]) + fmt.Printf(" V (north): %.2f m/s\n", wind[1]) + + // Сравниваем с Tawhiri + fmt.Println("\nComparing with Tawhiri API...") + tawhiriURL := fmt.Sprintf( + "https://api.v2.sondehub.org/tawhiri?launch_latitude=%.2f&launch_longitude=%.2f&launch_altitude=0&launch_datetime=%s&ascent_rate=5&burst_altitude=30000&descent_rate=5", + lat, lon, ts.Format(time.RFC3339), + ) + + resp, err := http.Get(tawhiriURL) + if err != nil { + fmt.Printf("Tawhiri request error: %v\n", err) + return + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + var tawhiriResp map[string]interface{} + json.Unmarshal(body, &tawhiriResp) + + // Выводим финальную точку приземления + if prediction, ok := tawhiriResp["prediction"].([]interface{}); ok { + for _, stage := range prediction { + stageMap := stage.(map[string]interface{}) + if stageMap["stage"] == "descent" { + trajectory := stageMap["trajectory"].([]interface{}) + if len(trajectory) > 0 { + last := trajectory[len(trajectory)-1].(map[string]interface{}) + fmt.Printf("\nTawhiri landing point:\n") + fmt.Printf(" Lat: %.4f\n", last["latitude"]) + fmt.Printf(" Lon: %.4f\n", last["longitude"]) + } + } + } + } +} diff --git a/scripts/test_s3_download.go b/scripts/test_s3_download.go deleted file mode 100644 index 0391cd6..0000000 --- a/scripts/test_s3_download.go +++ /dev/null @@ -1,89 +0,0 @@ -package main - -import ( - "context" - "fmt" - "log" - "os" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/grib" -) - -func main() { - ctx := context.Background() - - // Create S3 downloader - downloader, err := grib.NewS3Downloader( - "/tmp/grib_test", - 4, // parallel downloads - "noaa-gfs-bdp-pds", - "us-east-1", - ) - if err != nil { - log.Fatalf("Failed to create S3 downloader: %v", err) - } - - // Ensure directory exists - if err := os.MkdirAll("/tmp/grib_test", 0o755); err != nil { - log.Fatalf("Failed to create directory: %v", err) - } - - // Find nearest run (6-hour intervals: 00, 06, 12, 18 UTC) - now := time.Now().UTC() - hour := now.Hour() - (now.Hour() % 6) - // Use data from 6 hours ago to ensure it's available - run := time.Date(now.Year(), now.Month(), now.Day(), hour, 0, 0, 0, time.UTC).Add(-6 * time.Hour) - - fmt.Printf("Testing S3 download for run: %s\n", run.Format("2006-01-02 15:04 MST")) - - // List available files first - runStr := run.Format("20060102") - fmt.Printf("Listing available files for %s/%02d...\n", runStr, run.Hour()) - files, err := downloader.ListAvailableFiles(ctx, runStr, run.Hour()) - if err != nil { - log.Fatalf("Failed to list files: %v", err) - } - - fmt.Printf("Found %d files in S3:\n", len(files)) - if len(files) > 0 { - // Show first 5 files - for i, file := range files { - if i >= 5 { - fmt.Printf("... and %d more files\n", len(files)-5) - break - } - fmt.Printf(" - %s\n", file) - } - } - - // Try downloading just first 3 forecast hours (f000, f001, f002) - fmt.Println("\nTesting download of first 3 forecast hours...") - testRun := run - - // Create a timeout context for the download - downloadCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) - defer cancel() - - if err := downloader.Run(downloadCtx, testRun); err != nil { - log.Fatalf("Failed to download: %v", err) - } - - fmt.Println("\nDownload completed successfully!") - - // Check downloaded files - entries, err := os.ReadDir("/tmp/grib_test") - if err != nil { - log.Fatalf("Failed to read directory: %v", err) - } - - fmt.Printf("\nDownloaded %d files:\n", len(entries)) - for i, entry := range entries { - if i >= 10 { - fmt.Printf("... and %d more files\n", len(entries)-10) - break - } - info, _ := entry.Info() - fmt.Printf(" - %s (%.2f MB)\n", entry.Name(), float64(info.Size())/1024/1024) - } -} diff --git a/scripts/test_s3_simple.go b/scripts/test_s3_simple.go deleted file mode 100644 index 2ae8414..0000000 --- a/scripts/test_s3_simple.go +++ /dev/null @@ -1,68 +0,0 @@ -package main - -import ( - "context" - "fmt" - "io" - "log" - "os" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/s3" -) - -func main() { - ctx := context.Background() - - // Create AWS config with anonymous credentials - cfg, err := config.LoadDefaultConfig(ctx, - config.WithRegion("us-east-1"), - config.WithCredentialsProvider(aws.AnonymousCredentials{}), - ) - if err != nil { - log.Fatalf("Failed to load config: %v", err) - } - - client := s3.NewFromConfig(cfg) - - // Try to download a single file - bucket := "noaa-gfs-bdp-pds" - key := "gfs.20251020/00/atmos/gfs.t00z.pgrb2.0p50.f000" - - fmt.Printf("Downloading: s3://%s/%s\n", bucket, key) - - input := &s3.GetObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - - result, err := client.GetObject(ctx, input) - if err != nil { - log.Fatalf("Failed to get object: %v", err) - } - defer result.Body.Close() - - // Create output file - outFile := "/tmp/test_grib.part" - f, err := os.Create(outFile) - if err != nil { - log.Fatalf("Failed to create file: %v", err) - } - defer f.Close() - - // Copy data - written, err := io.Copy(f, result.Body) - if err != nil { - log.Fatalf("Failed to copy data: %v (wrote %d bytes)", err, written) - } - - fmt.Printf("Successfully downloaded %d bytes\n", written) - - // Rename - if err := os.Rename(outFile, "/tmp/test_grib"); err != nil { - log.Fatalf("Failed to rename: %v", err) - } - - fmt.Println("Download complete!") -} diff --git a/scripts/test_wind.go b/scripts/test_wind.go new file mode 100644 index 0000000..61a231a --- /dev/null +++ b/scripts/test_wind.go @@ -0,0 +1,55 @@ +package main + +import ( + "context" + "fmt" + "time" + + "git.intra.yksa.space/gsn/predictor/internal/pkg/grib" +) + +func main() { + ctx := context.Background() + + cfg := &grib.Config{ + Dir: "C:/tmp/grib", + TTL: 48 * time.Hour, + CacheTTL: 1 * time.Hour, + Parallel: 8, + } + + svc, err := grib.New(cfg) + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + + if err := svc.Update(ctx); err != nil { + fmt.Printf("Update error: %v\n", err) + return + } + + // Test wind at lat=52.2, lon=0.1 at various altitudes + // Run is 2026-02-12T12:00Z, request time 21:00Z = +9 hours + ts := time.Date(2026, 2, 12, 21, 0, 0, 0, time.UTC) + lat, lon := 52.2, 0.1 + + fmt.Println("Wind at (52.2°N, 0.1°E) at 2026-02-12T21:00Z:") + fmt.Printf("%8s %8s %8s\n", "Alt(m)", "U(m/s)", "V(m/s)") + + for _, alt := range []float64{0, 1000, 3000, 5000, 7000, 10000, 15000, 20000, 25000, 30000} { + w, err := svc.Extract(ctx, lat, lon, alt, ts) + if err != nil { + fmt.Printf("%8.0f Error: %v\n", alt, err) + continue + } + fmt.Printf("%8.0f %8.2f %8.2f\n", alt, w[0], w[1]) + } + + // Also test at a few nearby points to check spatial consistency + fmt.Println("\nWind at 10km altitude, varying longitude:") + for _, testLon := range []float64{0.0, 0.25, 0.5, 1.0, 2.0, 5.0, 10.0, 350.0, 359.75} { + w, _ := svc.Extract(ctx, lat, testLon, 10000, ts) + fmt.Printf(" lon=%6.2f: U=%8.2f V=%8.2f\n", testLon, w[0], w[1]) + } +} diff --git a/start_with_http.sh b/start_with_http.sh new file mode 100644 index 0000000..4173c2b --- /dev/null +++ b/start_with_http.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# Start API with HTTP downloads instead of S3 + +export GSN_PREDICTOR_GRIB_USE_S3=false + +echo "Starting API with HTTP downloads from NOMADS..." +echo "USE_S3 = $GSN_PREDICTOR_GRIB_USE_S3" +echo "" + +cd "$(dirname "$0")" +go run ./cmd/api/main.go -- 2.47.3 From 82ef1cb3b89a8d2da5183fec33cf1f0838b9c9db Mon Sep 17 00:00:00 2001 From: "a.antonov" Date: Sat, 28 Mar 2026 00:38:16 +0900 Subject: [PATCH 6/8] feat: cleanup --- .claude/settings.local.json | 17 - .dockerignore | 55 -- .gitignore | 68 -- .vscode/settings.json | 3 - Dockerfile | 57 -- Makefile | 113 --- api/rest/predictor.swagger.yml | 191 ---- assemble_cube.go | 26 - cmd/api/main.go | 114 --- go.mod | 47 - go.sum | 113 --- internal/jobs/grib/updater/config.go | 23 - internal/jobs/grib/updater/deps.go | 8 - internal/jobs/grib/updater/updater.go | 51 - internal/pkg/ds/predictor.go | 96 -- internal/pkg/errcodes/errcodes.go | 102 -- internal/pkg/grib/README.md | 100 -- internal/pkg/grib/assemble_test.go | 25 - internal/pkg/grib/cache.go | 36 - internal/pkg/grib/config.go | 139 --- internal/pkg/grib/create_dataset.go | 0 internal/pkg/grib/cube.go | 56 -- internal/pkg/grib/dataset.go | 14 - internal/pkg/grib/extractor.go | 126 --- internal/pkg/grib/grib.go | 291 ------ internal/pkg/grib/partial_downloader.go | 350 ------- internal/pkg/grib/pressure.go | 7 - internal/pkg/grib/util.go | 20 - internal/pkg/log/log.go | 23 - internal/service/deps.go | 12 - internal/service/predictor.go | 684 ------------- internal/service/predictor_test.go | 492 ---------- internal/service/service.go | 60 -- internal/transport/middleware/log.go | 44 - internal/transport/rest/config.go | 24 - internal/transport/rest/handler/deps.go | 14 - internal/transport/rest/handler/handler.go | 194 ---- internal/transport/rest/transport.go | 47 - pkg/rest/oas_cfg_gen.go | 291 ------ pkg/rest/oas_client_gen.go | 520 ---------- pkg/rest/oas_handlers_gen.go | 379 -------- pkg/rest/oas_json_gen.go | 963 ------------------- pkg/rest/oas_labeler_gen.go | 42 - pkg/rest/oas_middleware_gen.go | 10 - pkg/rest/oas_operations_gen.go | 11 - pkg/rest/oas_parameters_gen.go | 1004 -------------------- pkg/rest/oas_request_decoders_gen.go | 3 - pkg/rest/oas_request_encoders_gen.go | 3 - pkg/rest/oas_response_decoders_gen.go | 198 ---- pkg/rest/oas_response_encoders_gen.go | 68 -- pkg/rest/oas_router_gen.go | 258 ----- pkg/rest/oas_schemas_gen.go | 746 --------------- pkg/rest/oas_server_gen.go | 46 - pkg/rest/oas_unimplemented_gen.go | 40 - pkg/rest/oas_validators_gen.go | 232 ----- pkg/scheduler/config.go | 20 - pkg/scheduler/scheduler.go | 97 -- scripts/compare.go | 114 --- scripts/rebuild_cube.go | 44 - scripts/test_download.go | 36 - scripts/test_gh.go | 60 -- scripts/test_grib_read.go | 38 - scripts/test_prediction.go | 87 -- scripts/test_predictor_vs_reference.py | 303 ------ scripts/test_wind.go | 55 -- start_with_http.sh | 11 - 66 files changed, 9521 deletions(-) delete mode 100644 .claude/settings.local.json delete mode 100644 .dockerignore delete mode 100644 .gitignore delete mode 100644 .vscode/settings.json delete mode 100644 Dockerfile delete mode 100644 Makefile delete mode 100644 api/rest/predictor.swagger.yml delete mode 100644 assemble_cube.go delete mode 100644 cmd/api/main.go delete mode 100644 go.mod delete mode 100644 go.sum delete mode 100644 internal/jobs/grib/updater/config.go delete mode 100644 internal/jobs/grib/updater/deps.go delete mode 100644 internal/jobs/grib/updater/updater.go delete mode 100644 internal/pkg/ds/predictor.go delete mode 100644 internal/pkg/errcodes/errcodes.go delete mode 100644 internal/pkg/grib/README.md delete mode 100644 internal/pkg/grib/assemble_test.go delete mode 100644 internal/pkg/grib/cache.go delete mode 100644 internal/pkg/grib/config.go delete mode 100644 internal/pkg/grib/create_dataset.go delete mode 100644 internal/pkg/grib/cube.go delete mode 100644 internal/pkg/grib/dataset.go delete mode 100644 internal/pkg/grib/extractor.go delete mode 100644 internal/pkg/grib/grib.go delete mode 100644 internal/pkg/grib/partial_downloader.go delete mode 100644 internal/pkg/grib/pressure.go delete mode 100644 internal/pkg/grib/util.go delete mode 100644 internal/pkg/log/log.go delete mode 100644 internal/service/deps.go delete mode 100644 internal/service/predictor.go delete mode 100644 internal/service/predictor_test.go delete mode 100644 internal/service/service.go delete mode 100644 internal/transport/middleware/log.go delete mode 100644 internal/transport/rest/config.go delete mode 100644 internal/transport/rest/handler/deps.go delete mode 100644 internal/transport/rest/handler/handler.go delete mode 100644 internal/transport/rest/transport.go delete mode 100644 pkg/rest/oas_cfg_gen.go delete mode 100644 pkg/rest/oas_client_gen.go delete mode 100644 pkg/rest/oas_handlers_gen.go delete mode 100644 pkg/rest/oas_json_gen.go delete mode 100644 pkg/rest/oas_labeler_gen.go delete mode 100644 pkg/rest/oas_middleware_gen.go delete mode 100644 pkg/rest/oas_operations_gen.go delete mode 100644 pkg/rest/oas_parameters_gen.go delete mode 100644 pkg/rest/oas_request_decoders_gen.go delete mode 100644 pkg/rest/oas_request_encoders_gen.go delete mode 100644 pkg/rest/oas_response_decoders_gen.go delete mode 100644 pkg/rest/oas_response_encoders_gen.go delete mode 100644 pkg/rest/oas_router_gen.go delete mode 100644 pkg/rest/oas_schemas_gen.go delete mode 100644 pkg/rest/oas_server_gen.go delete mode 100644 pkg/rest/oas_unimplemented_gen.go delete mode 100644 pkg/rest/oas_validators_gen.go delete mode 100644 pkg/scheduler/config.go delete mode 100644 pkg/scheduler/scheduler.go delete mode 100644 scripts/compare.go delete mode 100644 scripts/rebuild_cube.go delete mode 100644 scripts/test_download.go delete mode 100644 scripts/test_gh.go delete mode 100644 scripts/test_grib_read.go delete mode 100644 scripts/test_prediction.go delete mode 100644 scripts/test_predictor_vs_reference.py delete mode 100644 scripts/test_wind.go delete mode 100644 start_with_http.sh diff --git a/.claude/settings.local.json b/.claude/settings.local.json deleted file mode 100644 index af05740..0000000 --- a/.claude/settings.local.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "permissions": { - "allow": [ - "Bash(cat:*)", - "Bash(xargs:*)", - "Bash(ls:*)", - "Bash(done)", - "Bash(curl:*)", - "WebFetch(domain:raw.githubusercontent.com)", - "WebFetch(domain:github.com)", - "Bash(go run:*)", - "Bash(pkill:*)" - ], - "deny": [], - "ask": [] - } -} diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 22e12b6..0000000 --- a/.dockerignore +++ /dev/null @@ -1,55 +0,0 @@ -# Git -.git -.gitignore - -# Docker -Dockerfile -docker-compose.yml -.dockerignore - -# Documentation -README.md -*.md - -# IDE -.vscode/ -.idea/ -*.swp -*.swo -*~ - -# OS -.DS_Store -.DS_Store? -._* -.Spotlight-V100 -.Trashes -ehthumbs.db -Thumbs.db - -# Environment files -.env -.env.local -.env.*.local - -# Build artifacts -predictor -*.exe -*.exe~ -*.dll -*.so -*.dylib -*.test - -# Logs -*.log - -# Temporary files -/tmp/ -/temp/ -*.py -# Test coverage -*.out - -# Go workspace -go.work \ No newline at end of file diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 9ffe4dd..0000000 --- a/.gitignore +++ /dev/null @@ -1,68 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib -*.ps1 - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Dependency directories (remove the comment below to include it) -# vendor/ - -# Go workspace file -go.work - -# Environment variables -.env -.env.local -.env.*.local - -# IDE files -.vscode/ -.idea/ -*.swp -*.swo -*~ -*.bak -*.py -*.json - -# OS generated files -.DS_Store -.DS_Store? -._* -.Spotlight-V100 -.Trashes -ehthumbs.db -Thumbs.db - -# Logs -*.log - -# Temporary files -/tmp/ -/temp/ - -# Build artifacts -/build/ -/dist/ - -# GRIB files -/grib_data/ -/grib_data/* - -# Leaflet WebUI -/leaflet_predictor -/leaflet_predictor/* - -# Tawhiri -/tawhiri -/tawhiri/* - -*.md \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 082b194..0000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "makefile.configureOnOpen": false -} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index ede1ec1..0000000 --- a/Dockerfile +++ /dev/null @@ -1,57 +0,0 @@ -# Build stage -FROM golang:1.24.4-alpine AS builder - -# Install build dependencies -RUN apk add --no-cache git ca-certificates tzdata - -# Set working directory -WORKDIR /app - -# Copy go mod files -COPY go.mod go.sum ./ - -# Download dependencies -RUN go mod download - -# Copy source code -COPY . . - -# Build the application -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \ - -ldflags="-w -s" \ - -o predictor \ - ./cmd/api - -# Runtime stage -FROM alpine:3.19 - -# Install runtime dependencies -RUN apk add --no-cache ca-certificates tzdata - -# Create non-root user -RUN addgroup -g 1001 -S appgroup && \ - adduser -u 1001 -S appuser -G appgroup - -# Set working directory -WORKDIR /app - -# Copy binary from builder stage -COPY --from=builder /app/predictor . - -# Create necessary directories -RUN mkdir -p /tmp/grib && \ - chown -R appuser:appgroup /app && \ - chmod -R 777 /tmp/grib - -# Switch to non-root user -USER appuser - -# Expose port -EXPOSE 8080 - -# Health check -HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ - CMD wget --no-verbose --tries=1 --spider http://localhost:8080/ready || exit 1 - -# Run the application -CMD ["./predictor"] \ No newline at end of file diff --git a/Makefile b/Makefile deleted file mode 100644 index 42d11c5..0000000 --- a/Makefile +++ /dev/null @@ -1,113 +0,0 @@ -# Variables -IMAGE_NAME = predictor -TAG = latest -COMPOSE_FILE = docker-compose.yml - -# Validate Docker configuration -.PHONY: validate-docker -validate-docker: - ./scripts/validate-docker.sh - -# Build the Docker image -.PHONY: build -build: - docker build -t $(IMAGE_NAME):$(TAG) . - -# Run the application with docker-compose -.PHONY: up -up: - docker-compose -f $(COMPOSE_FILE) up -d - -# Run the application with docker-compose and rebuild -.PHONY: up-build -up-build: - docker-compose -f $(COMPOSE_FILE) up -d --build - -# Stop the application -.PHONY: down -down: - docker-compose -f $(COMPOSE_FILE) down - -# Stop the application and remove volumes -.PHONY: down-volumes -down-volumes: - docker-compose -f $(COMPOSE_FILE) down -v - -# View logs -.PHONY: logs -logs: - docker-compose -f $(COMPOSE_FILE) logs -f - -# View logs for specific service -.PHONY: logs-predictor -logs-predictor: - docker-compose -f $(COMPOSE_FILE) logs -f predictor - - -# Check service status -.PHONY: ps -ps: - docker-compose -f $(COMPOSE_FILE) ps - -# Execute command in predictor container -.PHONY: exec -exec: - docker-compose -f $(COMPOSE_FILE) exec predictor sh - -# Clean up Docker resources -.PHONY: clean -clean: - docker-compose -f $(COMPOSE_FILE) down -v --rmi all - docker system prune -f - -# Run tests -.PHONY: test -test: - go test ./... - -# Build locally -.PHONY: build-local -build-local: - go build -o predictor ./cmd/api - -# Run locally -.PHONY: run-local -run-local: - cd cmd/api && go run . - -# Format code -.PHONY: fmt -fmt: - go fmt ./... - -# Lint code -.PHONY: lint -lint: - golangci-lint run - -# Show help -.PHONY: help -help: - @echo "Available commands:" - @echo " validate-docker - Validate Docker configuration" - @echo " build - Build Docker image" - @echo " up - Start services with docker-compose" - @echo " up-build - Start services and rebuild images" - @echo " down - Stop services" - @echo " down-volumes - Stop services and remove volumes" - @echo " logs - View all logs" - @echo " logs-predictor - View predictor logs" - @echo " ps - Show service status" - @echo " exec - Execute shell in predictor container" - @echo " clean - Clean up Docker resources" - @echo " test - Run tests" - @echo " build-local - Build locally" - @echo " run-local - Run locally" - @echo " fmt - Format code" - @echo " lint - Lint code" - @echo " generate-ogen - Generate OpenAPI code from swagger spec" - @echo " help - Show this help" - -.PHONY: generate-ogen -generate-ogen: - go run github.com/ogen-go/ogen/cmd/ogen@latest --target pkg/rest -package gsn --clean api/rest/predictor.swagger.yml \ No newline at end of file diff --git a/api/rest/predictor.swagger.yml b/api/rest/predictor.swagger.yml deleted file mode 100644 index fc408f9..0000000 --- a/api/rest/predictor.swagger.yml +++ /dev/null @@ -1,191 +0,0 @@ -openapi: 3.0.4 -info: - title: GSN Predictor - OpenAPI 3.0 - version: 0.0.1 -paths: - /api/v1/prediction: - get: - tags: - - Prediction - summary: Perform prediction - operationId: performPrediction - parameters: - - in: query - name: launch_latitude - schema: - type: number - - in: query - name: launch_longitude - schema: - type: number - - in: query - name: launch_datetime - schema: - type: string - format: date-time - - in: query - name: launch_altitude - schema: - type: number - - in: query - name: profile - schema: - type: string - enum: [standard_profile, float_profile, reverse_profile, custom_profile] - - in: query - name: ascent_rate - schema: - type: number - - in: query - name: burst_altitude - schema: - type: number - - in: query - name: descent_rate - schema: - type: number - - in: query - name: float_altitude - schema: - type: number - - in: query - name: stop_datetime - schema: - type: string - format: date-time - - in: query - name: ascent_curve - schema: - type: string - - in: query - name: descent_curve - schema: - type: string - - in: query - name: simulate_stages - schema: - type: array - items: - type: string - enum: [ascent, descent, float] - - in: query - name: interpolate - schema: - type: boolean - - in: query - name: format - schema: - type: string - enum: [json] - - in: query - name: dataset - schema: - type: string - format: date-time - responses: - "200": - description: "Prediction response" - content: - application/json: - schema: - $ref: '#/components/schemas/PredictionResult' - default: - description: Error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /ready: - get: - tags: - - Health - summary: Readiness check - operationId: readinessCheck - responses: - "200": - description: Readiness status - content: - application/json: - schema: - $ref: '#/components/schemas/ReadinessResponse' - default: - description: Error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - -components: - schemas: - Error: - type: object - required: - - message - properties: - message: - type: string - details: - type: string - PredictionResult: - type: object - required: - - metadata - - prediction - properties: - metadata: - type: object - required: - - complete_datetime - - start_datetime - properties: - complete_datetime: - type: string - format: date-time - start_datetime: - type: string - format: date-time - prediction: - type: array - items: - type: object - required: - - stage - - trajectory - properties: - stage: - type: string - enum: ["ascent", "descent", "float"] - trajectory: - type: array - items: - type: object - required: - - datetime - - latitude - - longitude - - altitude - properties: - datetime: - type: string - format: date-time - latitude: - type: number - longitude: - type: number - altitude: - type: number - ReadinessResponse: - type: object - properties: - status: - type: string - enum: [ok, not_ready, error] - last_update: - type: string - format: date-time - is_fresh: - type: boolean - error_message: - type: string - required: - - status \ No newline at end of file diff --git a/assemble_cube.go b/assemble_cube.go deleted file mode 100644 index c624e92..0000000 --- a/assemble_cube.go +++ /dev/null @@ -1,26 +0,0 @@ -package main - -import ( - "fmt" - "os" - "time" -) - -// This will be a simple wrapper that calls the internal assembleCube function -// We'll compile it as part of the grib package - -func main() { - dir := "C:/tmp/grib" - run := time.Date(2025, 12, 6, 0, 0, 0, 0, time.UTC) - cubePath := fmt.Sprintf("%s/%s.cube", dir, run.Format("20060102_15")) - - fmt.Printf("Assembling cube from existing GRIB files...\n") - fmt.Printf("Directory: %s\n", dir) - fmt.Printf("Run: %s\n", run.Format("2006-01-02 15:04 MST")) - fmt.Printf("Output: %s\n", cubePath) - fmt.Println() - - // Just print instructions - we'll do it directly - fmt.Println("Run this Go code to assemble:") - fmt.Printf("cd internal/pkg/grib && go test -run TestAssemble\n") -} diff --git a/cmd/api/main.go b/cmd/api/main.go deleted file mode 100644 index f250199..0000000 --- a/cmd/api/main.go +++ /dev/null @@ -1,114 +0,0 @@ -package main - -import ( - "context" - "os" - "os/signal" - "syscall" - - "git.intra.yksa.space/gsn/predictor/internal/jobs/grib/updater" - "git.intra.yksa.space/gsn/predictor/internal/pkg/grib" - "git.intra.yksa.space/gsn/predictor/internal/pkg/log" - "git.intra.yksa.space/gsn/predictor/internal/service" - "git.intra.yksa.space/gsn/predictor/internal/transport/rest" - "git.intra.yksa.space/gsn/predictor/internal/transport/rest/handler" - "git.intra.yksa.space/gsn/predictor/pkg/scheduler" - "go.uber.org/zap" -) - -const servicePrefix = "GSN_PREDICTOR" - -func main() { - lg, err := zap.NewProduction() - if err != nil { - panic(err) - } - defer lg.Sync() - zap.ReplaceGlobals(lg) - ctx := log.ToCtx(context.Background(), lg) - - schedulerConfig, err := scheduler.NewConfig() - if err != nil { - log.Ctx(ctx).Fatal("failed to load scheduler configuration", zap.Error(err)) - } - - gribUpdaterConfig, err := updater.NewConfig() - if err != nil { - log.Ctx(ctx).Fatal("failed to load GRIB updater configuration", zap.Error(err)) - } - - gribCfg, err := grib.NewConfig() - if err != nil { - log.Ctx(ctx).Fatal("failed to load GRIB configuration", zap.Error(err)) - } - - gribService, err := grib.New(gribCfg) - if err != nil { - log.Ctx(ctx).Fatal("failed to initialize GRIB service", zap.Error(err)) - } - defer gribService.Close() - - // Force GRIB update on startup in a goroutine - go func() { - log.Ctx(ctx).Info("Performing initial GRIB update (async)...") - if err := gribService.Update(ctx); err != nil { - log.Ctx(ctx).Error("initial GRIB update failed", zap.Error(err)) - } else { - log.Ctx(ctx).Info("initial GRIB update complete") - } - }() - - svc, err := service.New(gribService) - if err != nil { - log.Ctx(ctx).Fatal("failed to initialize service", zap.Error(err)) - } - defer svc.Close() - - var sched *scheduler.Scheduler - if schedulerConfig.Enabled { - sched = scheduler.New() - - gribJob := updater.New(gribService, gribUpdaterConfig) - if err := sched.AddJob(gribJob); err != nil { - log.Ctx(ctx).Error("failed to add GRIB update job to scheduler", zap.Error(err)) - } - - log.Ctx(ctx).Info("scheduler initialized with jobs") - } - - handler := handler.New(svc) - - restConfig, err := rest.NewConfig() - if err != nil { - lg.Fatal("failed to init transport config", zap.Error(err)) - } - - transport, err := rest.New(handler, restConfig) - if err != nil { - lg.Fatal("failed to init transport", zap.Error(err)) - } - - svc.Start() - if sched != nil { - sched.Start() - lg.Info("scheduler started") - } - - lg.Info("service started successfully") - - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - - go func() { - lg.Info("starting HTTP server on port", zap.Int("port", restConfig.Port)) - transport.Run() - }() - - <-sigChan - lg.Info("received shutdown signal, stopping service") - - if sched != nil { - sched.Stop() - lg.Info("scheduler stopped") - } -} diff --git a/go.mod b/go.mod deleted file mode 100644 index b186e37..0000000 --- a/go.mod +++ /dev/null @@ -1,47 +0,0 @@ -module git.intra.yksa.space/gsn/predictor - -go 1.24.4 - -require ( - github.com/caarlos0/env/v11 v11.3.1 - github.com/edsrzf/mmap-go v1.2.0 - github.com/go-co-op/gocron v1.37.0 - github.com/go-faster/errors v0.7.1 - github.com/go-faster/jx v1.1.0 - github.com/nilsmagnus/grib v1.2.8 - github.com/ogen-go/ogen v1.16.0 - github.com/rs/cors v1.11.1 - github.com/stretchr/testify v1.11.1 - go.opentelemetry.io/otel v1.38.0 - go.opentelemetry.io/otel/metric v1.38.0 - go.opentelemetry.io/otel/trace v1.38.0 - go.uber.org/zap v1.27.0 - golang.org/x/sync v0.17.0 -) - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dlclark/regexp2 v1.11.5 // indirect - github.com/fatih/color v1.18.0 // indirect - github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-faster/yaml v0.4.6 // indirect - github.com/go-logr/logr v1.4.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/mattn/go-colorable v0.1.14 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/robfig/cron/v3 v3.0.1 // indirect - github.com/segmentio/asm v1.2.1 // indirect - github.com/shopspring/decimal v1.4.0 // indirect - github.com/stretchr/objx v0.5.2 // indirect - go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.uber.org/atomic v1.11.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20251017212417-90e834f514db // indirect - golang.org/x/net v0.46.0 // indirect - golang.org/x/sys v0.37.0 // indirect - golang.org/x/text v0.30.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/go.sum b/go.sum deleted file mode 100644 index 7c95f5c..0000000 --- a/go.sum +++ /dev/null @@ -1,113 +0,0 @@ -github.com/caarlos0/env/v11 v11.3.1 h1:cArPWC15hWmEt+gWk7YBi7lEXTXCvpaSdCiZE2X5mCA= -github.com/caarlos0/env/v11 v11.3.1/go.mod h1:qupehSf/Y0TUTsxKywqRt/vJjN5nz6vauiYEUUr8P4U= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= -github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84= -github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0= -github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY= -github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= -github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= -github.com/go-faster/jx v1.1.0 h1:ZsW3wD+snOdmTDy9eIVgQdjUpXRRV4rqW8NS3t+20bg= -github.com/go-faster/jx v1.1.0/go.mod h1:vKDNikrKoyUmpzaJ0OkIkRQClNHFX/nF3dnTJZb3skg= -github.com/go-faster/yaml v0.4.6 h1:lOK/EhI04gCpPgPhgt0bChS6bvw7G3WwI8xxVe0sw9I= -github.com/go-faster/yaml v0.4.6/go.mod h1:390dRIvV4zbnO7qC9FGo6YYutc+wyyUSHBgbXL52eXk= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/nilsmagnus/grib v1.2.8 h1:H7ch/1/agaCqM3MC8hW1Ft+EJ+q2XB757uml/IfPvp4= -github.com/nilsmagnus/grib v1.2.8/go.mod h1:XHm+5zuoOk0NSIWaGmA3JaAxI4i50YvD1L1vz+aqPOQ= -github.com/ogen-go/ogen v1.16.0 h1:fKHEYokW/QrMzVNXId74/6RObRIUs9T2oroGKtR25Iw= -github.com/ogen-go/ogen v1.16.0/go.mod h1:s3nWiMzybSf8fhxckyO+wtto92+QHpEL8FmkPnhL3jI= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= -github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= -github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0= -github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= -github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= -github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= -go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/exp v0.0.0-20251017212417-90e834f514db h1:by6IehL4BH5k3e3SJmcoNbOobMey2SLpAF79iPOEBvw= -golang.org/x/exp v0.0.0-20251017212417-90e834f514db/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= -golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= -golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= -golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/jobs/grib/updater/config.go b/internal/jobs/grib/updater/config.go deleted file mode 100644 index fa5132b..0000000 --- a/internal/jobs/grib/updater/config.go +++ /dev/null @@ -1,23 +0,0 @@ -package updater - -import ( - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" - env "github.com/caarlos0/env/v11" -) - -type Config struct { - Interval time.Duration `env:"INTERVAL" envDefault:"6h"` - Timeout time.Duration `env:"TIMEOUT" envDefault:"45m"` -} - -func NewConfig() (*Config, error) { - cfg := &Config{} - if err := env.ParseWithOptions(cfg, env.Options{ - PrefixTagName: "GSN_PREDICTOR_GRIB_UPDATER_", - }); err != nil { - return nil, errcodes.Wrap(err, "failed to parse GRIB updater config") - } - return cfg, nil -} diff --git a/internal/jobs/grib/updater/deps.go b/internal/jobs/grib/updater/deps.go deleted file mode 100644 index f2e3ced..0000000 --- a/internal/jobs/grib/updater/deps.go +++ /dev/null @@ -1,8 +0,0 @@ -package updater - -import "context" - -// GribService defines the interface for GRIB operations needed by the updater job -type GribService interface { - Update(ctx context.Context) error -} diff --git a/internal/jobs/grib/updater/updater.go b/internal/jobs/grib/updater/updater.go deleted file mode 100644 index ce28d02..0000000 --- a/internal/jobs/grib/updater/updater.go +++ /dev/null @@ -1,51 +0,0 @@ -package updater - -import ( - "context" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" - "git.intra.yksa.space/gsn/predictor/internal/pkg/log" - "go.uber.org/zap" -) - -type Job struct { - service GribService - config *Config -} - -func New(service GribService, config *Config) *Job { - return &Job{ - service: service, - config: config, - } -} - -func (j *Job) GetInterval() time.Duration { - return j.config.Interval -} - -func (j *Job) GetTimeout() time.Duration { - return j.config.Timeout -} - -func (j *Job) GetCount() int { - return 1 -} - -func (j *Job) GetAsync() bool { - return false -} - -func (j *Job) Execute(ctx context.Context) error { - log := log.Ctx(ctx) - log.Info("executing GRIB update job") - - if err := j.service.Update(ctx); err != nil { - log.Error("GRIB update failed", zap.Error(err)) - return errcodes.Wrap(err, "failed to update GRIB data") - } - - log.Info("GRIB update completed successfully") - return nil -} diff --git a/internal/pkg/ds/predictor.go b/internal/pkg/ds/predictor.go deleted file mode 100644 index c7c33c8..0000000 --- a/internal/pkg/ds/predictor.go +++ /dev/null @@ -1,96 +0,0 @@ -package ds - -import ( - "time" - - api "git.intra.yksa.space/gsn/predictor/pkg/rest" -) - -type PredictionParameters struct { - LaunchLatitude *float64 - LaunchLongitude *float64 - LaunchDatetime *time.Time - LaunchAltitude *float64 - Profile *string - AscentRate *float64 - BurstAltitude *float64 - DescentRate *float64 - FloatAltitude *float64 - StopDatetime *time.Time - AscentCurve *string // base64 - DescentCurve *string // base64 - SimulateStages []string - Interpolate *bool - Format *string - Dataset *time.Time - // Add other parameters as needed -} - -type PredicitonResult struct { - Latitude *float64 - Longitude *float64 - Altitude *float64 - Timestamp *time.Time - WindU *float64 - WindV *float64 - // Add other result fields as needed -} - -// Converts flat ogen params to internal pointer-based model -func ConvertFlatPredictionParams(params api.PerformPredictionParams) *PredictionParameters { - out := &PredictionParameters{} - if v, ok := params.LaunchLatitude.Get(); ok { - out.LaunchLatitude = &v - } - if v, ok := params.LaunchLongitude.Get(); ok { - out.LaunchLongitude = &v - } - if v, ok := params.LaunchDatetime.Get(); ok { - out.LaunchDatetime = &v - } - if v, ok := params.LaunchAltitude.Get(); ok { - out.LaunchAltitude = &v - } - if v, ok := params.Profile.Get(); ok { - s := string(v) - out.Profile = &s - } - if v, ok := params.AscentRate.Get(); ok { - out.AscentRate = &v - } - if v, ok := params.BurstAltitude.Get(); ok { - out.BurstAltitude = &v - } - if v, ok := params.DescentRate.Get(); ok { - out.DescentRate = &v - } - if v, ok := params.FloatAltitude.Get(); ok { - out.FloatAltitude = &v - } - if v, ok := params.StopDatetime.Get(); ok { - out.StopDatetime = &v - } - if v, ok := params.AscentCurve.Get(); ok { - out.AscentCurve = &v - } - if v, ok := params.DescentCurve.Get(); ok { - out.DescentCurve = &v - } - if v, ok := params.Interpolate.Get(); ok { - out.Interpolate = &v - } - if v, ok := params.Format.Get(); ok { - s := string(v) - out.Format = &s - } - if v, ok := params.Dataset.Get(); ok { - out.Dataset = &v - } - if len(params.SimulateStages) > 0 { - out.SimulateStages = make([]string, len(params.SimulateStages)) - for i, stage := range params.SimulateStages { - out.SimulateStages[i] = string(stage) - } - } - return out -} diff --git a/internal/pkg/errcodes/errcodes.go b/internal/pkg/errcodes/errcodes.go deleted file mode 100644 index fca56f9..0000000 --- a/internal/pkg/errcodes/errcodes.go +++ /dev/null @@ -1,102 +0,0 @@ -package errcodes - -import ( - "net/http" - "strings" -) - -type ErrorCode struct { - StatusCode int - Message string - Details string -} - -func New(statusCode int, message string, details ...string) *ErrorCode { - return &ErrorCode{ - StatusCode: statusCode, - Message: message, - Details: strings.Join(details, " "), - } -} - -func (e *ErrorCode) Error() string { - return e.Message -} - -func IsErr(err error) bool { - _, ok := err.(*ErrorCode) - return ok -} - -func AsErr(err error) (*ErrorCode, bool) { - if err == nil { - return nil, false - } - errcode, ok := err.(*ErrorCode) - return errcode, ok -} - -func Join(errs ...error) error { - if len(errs) == 0 { - return nil - } - - var messages []string - var details []string - - for _, err := range errs { - if err == nil { - continue - } - - if errcode, ok := AsErr(err); ok { - messages = append(messages, errcode.Message) - if errcode.Details != "" { - details = append(details, errcode.Details) - } - } else { - messages = append(messages, err.Error()) - } - } - - if len(messages) == 0 { - return nil - } - - statusCode := http.StatusInternalServerError - if len(errs) > 0 { - if errcode, ok := AsErr(errs[0]); ok { - statusCode = errcode.StatusCode - } - } - - return New(statusCode, strings.Join(messages, "; "), details...) -} - -func Wrap(err error, message string) error { - if err == nil { - return nil - } - - if errcode, ok := AsErr(err); ok { - return New(errcode.StatusCode, message, errcode.Message, errcode.Details) - } - - return New(http.StatusInternalServerError, message, err.Error()) -} - -var ( - ErrNoDataset = New(http.StatusNotFound, "no grib dataset found") - ErrOutOfBounds = New(http.StatusBadRequest, "requested time is out of bounds") - ErrConfig = New(http.StatusInternalServerError, "configuration error") - ErrConfigInvalidEnv = New(http.StatusInternalServerError, "invalid environment configuration") - ErrConfigMissingRequired = New(http.StatusInternalServerError, "missing required configuration") - ErrDownload = New(http.StatusInternalServerError, "download error") - ErrProcessing = New(http.StatusInternalServerError, "data processing error") - ErrNoCubeFilesFound = New(http.StatusNotFound, "no cube files found") - ErrNoValidCubeFilesFound = New(http.StatusNotFound, "no valid cube files found") - ErrLatestCubeFileIsTooOld = New(http.StatusNotFound, "latest cube file is too old") - ErrScheduler = New(http.StatusInternalServerError, "scheduler error") - ErrSchedulerInvalidJob = New(http.StatusBadRequest, "invalid job configuration") - ErrSchedulerTimeoutTooLong = New(http.StatusBadRequest, "job timeout too long", "timeout cannot exceed interval") -) diff --git a/internal/pkg/grib/README.md b/internal/pkg/grib/README.md deleted file mode 100644 index bab933f..0000000 --- a/internal/pkg/grib/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# GRIB Module - -Этот модуль реализует функциональность для работы с GRIB-файлами, аналогичную tawhiri-downloader и tawhiri, но на Go. - -## Основные возможности - -- **Скачивание GRIB-файлов** с NOMADS (GFS прогнозы) -- **Сборка 5D-куба** (время, давление, широта, долгота, переменные u/v) -- **Эффективное хранение** с использованием mmap -- **Интерполяция** ветровых данных для произвольных координат и времени -- **Кэширование** результатов (in-memory) -- **Распределенные блокировки** для предотвращения дублирования загрузок - -## Архитектура - -### Основные компоненты - -- **Downloader** - скачивает GRIB-файлы с NOMADS -- **Cube** - управляет 5D-массивом данных через mmap -- **Extractor** - выполняет интерполяцию данных -- **Cache** - кэширует результаты запросов -- **Service** - основной интерфейс для работы с модулем - -### Структура данных - -5D-куб содержит: -- **Время**: 17 временных срезов (0, 3, 6, ..., 48 часов) -- **Давление**: 34 уровня давления (1000, 975, 950, ..., 2 hPa) -- **Широта**: 361 точка (-90° до +90°) -- **Долгота**: 720 точек (0° до 359.5°) -- **Переменные**: u-ветер и v-ветер - -## Использование - -```go -// Создание сервиса -cfg := grib.ServiceConfig{ - Dir: "/tmp/grib", - TTL: 24 * time.Hour, - CacheTTL: 1 * time.Hour, - Parallel: 4, - Client: &http.Client{Timeout: 30 * time.Second}, -} - -service, err := grib.New(cfg) -if err != nil { - log.Fatal(err) -} -defer service.Close() - -// Обновление данных -err = service.Update(ctx) - -// Извлечение ветровых данных -wind, err := service.Extract(ctx, lat, lon, alt, timestamp) -// wind[0] - u-компонента ветра -// wind[1] - v-компонента ветра -``` - -## Интерполяция - -Модуль выполняет 16-точечную интерполяцию: -1. **Временная интерполяция** между двумя ближайшими срезами -2. **Интерполяция по давлению** между двумя ближайшими уровнями -3. **Билинейная интерполяция** по широте и долготе - -## Кэширование - -- **In-memory кэш**: быстрый доступ к недавно запрошенным данным - -## Расписание обновлений - -Рекомендуемая частота вызова `Update()`: -- **Каждые 6 часов** - для получения свежих GFS прогнозов -- **При запуске** - для загрузки начальных данных -- **По требованию** - при отсутствии данных для запрашиваемого времени - -## Отличия от tawhiri - -### Преимущества Go-реализации: -- **Высокая производительность** (mmap, конкурентные загрузки) -- **Эффективное использование памяти** (не загружает весь массив в RAM) -- **Горизонтальное масштабирование** (stateless, множество реплик) -- **Встроенное кэширование** (in-memory) - -### Особенности: -- Использует `github.com/nilsmagnus/grib` вместо pygrib -- Реализует собственную логику интерполяции - -## Конфигурация - -### Переменные окружения: -- `PREDICTOR_GRIB_DATASET_URL` - URL источника данных (опционально) - -### Параметры ServiceConfig: -- `Dir` - директория для хранения файлов -- `TTL` - время жизни данных (по умолчанию 24 часа) -- `CacheTTL` - время жизни кэша (по умолчанию 1 час) -- `Parallel` - количество параллельных загрузок -- `Client` - HTTP клиент для загрузок \ No newline at end of file diff --git a/internal/pkg/grib/assemble_test.go b/internal/pkg/grib/assemble_test.go deleted file mode 100644 index 8d9d7ce..0000000 --- a/internal/pkg/grib/assemble_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package grib - -import ( - "testing" - "time" -) - -func TestAssembleCubeFromExisting(t *testing.T) { - dir := "C:/tmp/grib" - run := time.Date(2026, 1, 16, 6, 0, 0, 0, time.UTC) - cubePath := dir + "/" + run.Format("20060102_15") + ".cube" - - t.Logf("Assembling cube from existing GRIB files...") - t.Logf("Directory: %s", dir) - t.Logf("Run: %s", run.Format("2006-01-02 15:04 MST")) - t.Logf("Output: %s", cubePath) - - err := assembleCube(dir, run, cubePath) - if err != nil { - t.Fatalf("Failed to assemble cube: %v", err) - } - - t.Logf("✓ Cube assembled successfully!") - t.Logf("Cube file: %s", cubePath) -} diff --git a/internal/pkg/grib/cache.go b/internal/pkg/grib/cache.go deleted file mode 100644 index 7d40f43..0000000 --- a/internal/pkg/grib/cache.go +++ /dev/null @@ -1,36 +0,0 @@ -package grib - -import ( - "sync" - "time" -) - -type vec [2]float64 - -type item struct { - v vec - exp time.Time -} - -type memCache struct { - ttl time.Duration - m sync.Map -} - -func (c *memCache) get(k uint64) (vec, bool) { - if v, ok := c.m.Load(k); ok { - it := v.(item) - - if time.Now().Before(it.exp) { - return it.v, true - } - - c.m.Delete(k) - } - - return vec{}, false -} - -func (c *memCache) set(k uint64, v vec) { - c.m.Store(k, item{v, time.Now().Add(c.ttl)}) -} diff --git a/internal/pkg/grib/config.go b/internal/pkg/grib/config.go deleted file mode 100644 index c80462e..0000000 --- a/internal/pkg/grib/config.go +++ /dev/null @@ -1,139 +0,0 @@ -package grib - -import ( - "fmt" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" - env "github.com/caarlos0/env/v11" -) - -// DatasetConfig описывает параметры GFS-датасета: сетку, временные шаги, -// уровни давления и URL для загрузки. -type DatasetConfig struct { - // Сетка - Resolution float64 // шаг сетки в градусах (0.25 или 0.5) - NLat int // точек по широте (721 для 0.25°, 361 для 0.5°) - NLon int // точек по долготе (1440 для 0.25°, 720 для 0.5°) - - // Время - NT int // кол-во временных шагов (97 для 0–96 ч с шагом 1) - MaxHour int // последний час прогноза (96) - TimeStep int // интервал между шагами, часы (1 или 3) - - // Вертикаль - NP int // кол-во уровней давления - Levels []float64 // уровни давления в гПа, по убыванию (1000 … 1) - - // Переменные в кубе (порядок важен: индексы 0, 1, 2, …) - NVar int // кол-во переменных - Variables []string // GRIB-имена для фильтрации idx (HGT, UGRD, VGRD) - - // URL загрузки (fmt-шаблоны: date, hour, hour, step) - URLMask string // основной pgrb2 - URLMaskB string // дополнительный pgrb2b - - // Имена файлов - FileSuffix string // токен разрешения в именах файлов ("0p25", "0p50") -} - -// SizePerVar возвращает размер одной переменной в кубе, байт. -func (dc *DatasetConfig) SizePerVar() int64 { - return int64(dc.NT) * int64(dc.NP) * int64(dc.NLat) * int64(dc.NLon) * 4 -} - -// CubeSize возвращает полный размер куба, байт. -func (dc *DatasetConfig) CubeSize() int64 { - return dc.SizePerVar() * int64(dc.NVar) -} - -// GridSize возвращает NLat * NLon. -func (dc *DatasetConfig) GridSize() int { - return dc.NLat * dc.NLon -} - -// InvResolution возвращает 1/Resolution — множитель для перевода координат в индексы. -func (dc *DatasetConfig) InvResolution() float64 { - return 1.0 / dc.Resolution -} - -// Steps возвращает список часов прогноза [0, TimeStep, 2*TimeStep, …, MaxHour]. -func (dc *DatasetConfig) Steps() []int { - out := make([]int, 0, dc.NT) - for h := 0; h <= dc.MaxHour; h += dc.TimeStep { - out = append(out, h) - } - return out -} - -// FileName возвращает имя основного GRIB-файла (pgrb2). -func (dc *DatasetConfig) FileName(run time.Time, step int) string { - return fmt.Sprintf("gfs.t%02dz.pgrb2.%s.f%03d", run.Hour(), dc.FileSuffix, step) -} - -// FileNameB возвращает имя вторичного GRIB-файла (pgrb2b). -func (dc *DatasetConfig) FileNameB(run time.Time, step int) string { - return fmt.Sprintf("gfs.t%02dz.pgrb2b.%s.f%03d", run.Hour(), dc.FileSuffix, step) -} - -// GribURL возвращает URL основного GRIB-файла. -func (dc *DatasetConfig) GribURL(run time.Time, step int) string { - return fmt.Sprintf(dc.URLMask, run.Format("20060102"), run.Hour(), run.Hour(), step) -} - -// GribURLB возвращает URL вторичного GRIB-файла. -func (dc *DatasetConfig) GribURLB(run time.Time, step int) string { - return fmt.Sprintf(dc.URLMaskB, run.Format("20060102"), run.Hour(), run.Hour(), step) -} - -// DefaultDatasetConfig возвращает конфиг GFS 0.25° / 1 час / 47 уровней. -func DefaultDatasetConfig() DatasetConfig { - return DatasetConfig{ - Resolution: 0.25, - NLat: 721, - NLon: 1440, - - NT: 97, - MaxHour: 96, - TimeStep: 1, - - NP: 47, - Levels: []float64{ - 1000, 975, 950, 925, 900, 875, 850, 825, 800, 775, - 750, 725, 700, 675, 650, 625, 600, 575, 550, 525, - 500, 475, 450, 425, 400, 375, 350, 325, 300, 275, - 250, 225, 200, 175, 150, 125, 100, 70, 50, 30, - 20, 10, 7, 5, 3, 2, 1, - }, - - NVar: 3, - Variables: []string{"HGT", "UGRD", "VGRD"}, - - URLMask: "https://noaa-gfs-bdp-pds.s3.amazonaws.com/gfs.%s/%02d/atmos/gfs.t%02dz.pgrb2.0p25.f%03d", - URLMaskB: "https://noaa-gfs-bdp-pds.s3.amazonaws.com/gfs.%s/%02d/atmos/gfs.t%02dz.pgrb2b.0p25.f%03d", - - FileSuffix: "0p25", - } -} - -// --------------------------------------------------------------------------- - -type Config struct { - Dir string `env:"DIR" envDefault:"C:/tmp/grib"` - TTL time.Duration `env:"TTL" envDefault:"48h"` - CacheTTL time.Duration `env:"CACHE_TTL" envDefault:"1h"` - Parallel int `env:"PARALLEL" envDefault:"8"` - - Dataset DatasetConfig -} - -func NewConfig() (*Config, error) { - cfg := &Config{} - if err := env.ParseWithOptions(cfg, env.Options{ - PrefixTagName: "GSN_PREDICTOR_GRIB_", - }); err != nil { - return nil, errcodes.Wrap(err, "failed to parse GRIB config") - } - cfg.Dataset = DefaultDatasetConfig() - return cfg, nil -} diff --git a/internal/pkg/grib/create_dataset.go b/internal/pkg/grib/create_dataset.go deleted file mode 100644 index e69de29..0000000 diff --git a/internal/pkg/grib/cube.go b/internal/pkg/grib/cube.go deleted file mode 100644 index dfc3606..0000000 --- a/internal/pkg/grib/cube.go +++ /dev/null @@ -1,56 +0,0 @@ -package grib - -import ( - "encoding/binary" - "math" - "os" - - mmap "github.com/edsrzf/mmap-go" -) - -type cube struct { - mm mmap.MMap - t, p, lat, lon int - bytesPerVar int64 - file *os.File -} - -func openCube(path string, dc *DatasetConfig) (*cube, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - - mm, err := mmap.Map(f, mmap.RDONLY, 0) - if err != nil { - f.Close() - return nil, err - } - - return &cube{ - mm: mm, - t: dc.NT, - p: dc.NP, - lat: dc.NLat, - lon: dc.NLon, - bytesPerVar: dc.SizePerVar(), - file: f, - }, nil -} - -func (c *cube) val(varIdx, ti, pi, y, x int) float32 { - idx := (((ti*c.p+pi)*c.lat + y) * c.lon) + x - off := int64(varIdx)*c.bytesPerVar + int64(idx)*4 - bits := binary.LittleEndian.Uint32(c.mm[off : off+4]) - return math.Float32frombits(bits) -} - -func (c *cube) Close() error { - if c.mm != nil { - c.mm.Unmap() - } - if c.file != nil { - return c.file.Close() - } - return nil -} diff --git a/internal/pkg/grib/dataset.go b/internal/pkg/grib/dataset.go deleted file mode 100644 index f994dc9..0000000 --- a/internal/pkg/grib/dataset.go +++ /dev/null @@ -1,14 +0,0 @@ -package grib - -type dataset struct { - cube *cube - ds *DatasetConfig - runUTC int64 // unix seconds -} - -func (d *dataset) Close() error { - if d.cube != nil { - return d.cube.Close() - } - return nil -} diff --git a/internal/pkg/grib/extractor.go b/internal/pkg/grib/extractor.go deleted file mode 100644 index d27f3a4..0000000 --- a/internal/pkg/grib/extractor.go +++ /dev/null @@ -1,126 +0,0 @@ -package grib - -import "math" - -func lerp(a, b, t float64) float64 { return a + t*(b-a) } - -// ghInterp returns interpolated geopotential height at given time/pressure/lat/lon -func (d *dataset) ghInterp(ti, pi int, y0, y1, x0, x1 int, wy, wx float64) float64 { - g00 := d.cube.val(0, ti, pi, y0, x0) - g10 := d.cube.val(0, ti, pi, y0, x1) - g01 := d.cube.val(0, ti, pi, y1, x0) - g11 := d.cube.val(0, ti, pi, y1, x1) - return (1-wy)*((1-wx)*float64(g00)+wx*float64(g10)) + wy*((1-wx)*float64(g01)+wx*float64(g11)) -} - -// searchAltLevel uses geopotential height to find pressure level bracket for target altitude. -func (d *dataset) searchAltLevel(alt float64, ti, y0, y1, x0, x1 int, wy, wx float64) (int, float64) { - levels := d.ds.Levels - nLevels := len(levels) - - lo, hi := 0, nLevels-1 - for lo < hi-1 { - mid := (lo + hi) / 2 - ghMid := d.ghInterp(ti, mid, y0, y1, x0, x1, wy, wx) - if ghMid < alt { - lo = mid - } else { - hi = mid - } - } - - ghLo := d.ghInterp(ti, lo, y0, y1, x0, x1, wy, wx) - ghHi := d.ghInterp(ti, hi, y0, y1, x0, x1, wy, wx) - - wp := 0.0 - if ghHi != ghLo { - wp = (alt - ghLo) / (ghHi - ghLo) - } - if wp < 0 { - wp = 0 - } - if wp > 1 { - wp = 1 - } - - return lo, wp -} - -// uv выполняет интерполяцию ветра по 4 измерениям (time, pressure, lat, lon). -func (d *dataset) uv(lat, lon, alt float64, tHours float64) (float64, float64) { - if lon < 0 { - lon += 360 - } - - inv := d.ds.InvResolution() - - // GRIB scan north→south: index 0 = 90°N - iy := (90 - lat) * inv - y0 := int(math.Floor(iy)) - if y0 < 0 { - y0 = 0 - } - if y0 >= d.cube.lat-1 { - y0 = d.cube.lat - 2 - } - y1 := y0 + 1 - wy := iy - float64(y0) - - ix := lon * inv - x0 := int(math.Floor(ix)) % d.cube.lon - x1 := (x0 + 1) % d.cube.lon - wx := ix - float64(x0) - - // Время: tHours делим на шаг, чтобы получить индекс в кубе - tIdx := tHours / float64(d.ds.TimeStep) - it0 := int(math.Floor(tIdx)) - if it0 < 0 { - it0 = 0 - } - if it0 >= d.cube.t-1 { - it0 = d.cube.t - 2 - } - wt := tIdx - float64(it0) - - // ISA: высота → давление → индекс уровня - levels := d.ds.Levels - p := pressureFromAlt(alt) - ip0 := 0 - for ip0+1 < len(levels) && levels[ip0+1] > p { - ip0++ - } - ip1 := ip0 + 1 - if ip1 >= len(levels) { - ip1 = len(levels) - 1 - } - wp := 0.0 - if levels[ip0] != levels[ip1] { - wp = (levels[ip0] - p) / (levels[ip0] - levels[ip1]) - } - - fetch := func(ti, pi int) (float64, float64) { - u00 := d.cube.val(1, ti, pi, y0, x0) - u10 := d.cube.val(1, ti, pi, y0, x1) - u01 := d.cube.val(1, ti, pi, y1, x0) - u11 := d.cube.val(1, ti, pi, y1, x1) - v00 := d.cube.val(2, ti, pi, y0, x0) - v10 := d.cube.val(2, ti, pi, y0, x1) - v01 := d.cube.val(2, ti, pi, y1, x0) - v11 := d.cube.val(2, ti, pi, y1, x1) - uxy := (1-wy)*((1-wx)*float64(u00)+wx*float64(u10)) + wy*((1-wx)*float64(u01)+wx*float64(u11)) - vxy := (1-wy)*((1-wx)*float64(v00)+wx*float64(v10)) + wy*((1-wx)*float64(v01)+wx*float64(v11)) - return uxy, vxy - } - - u0p0, v0p0 := fetch(it0, ip0) - u0p1, v0p1 := fetch(it0, ip1) - u1p0, v1p0 := fetch(it0+1, ip0) - u1p1, v1p1 := fetch(it0+1, ip1) - uLow := lerp(u0p0, u0p1, wp) - vLow := lerp(v0p0, v0p1, wp) - uHig := lerp(u1p0, u1p1, wp) - vHig := lerp(v1p0, v1p1, wp) - u := lerp(uLow, uHig, wt) - v := lerp(vLow, vHig, wt) - return u, v -} diff --git a/internal/pkg/grib/grib.go b/internal/pkg/grib/grib.go deleted file mode 100644 index 7bf6bfc..0000000 --- a/internal/pkg/grib/grib.go +++ /dev/null @@ -1,291 +0,0 @@ -package grib - -import ( - "context" - "encoding/binary" - "math" - "os" - "path/filepath" - "strings" - "sync/atomic" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" - "github.com/edsrzf/mmap-go" - "github.com/nilsmagnus/grib/griblib" -) - -type Service interface { - Update(ctx context.Context) error - Extract(ctx context.Context, lat, lon, alt float64, ts time.Time) ([2]float64, error) - Close() error - GetStatus() (ready bool, lastUpdate time.Time, isFresh bool, errMsg string) -} - -type service struct { - cfg *Config - cache memCache - data atomic.Pointer[dataset] -} - -func New(cfg *Config) (Service, error) { - if cfg.TTL == 0 { - cfg.TTL = 24 * time.Hour - } - if err := os.MkdirAll(cfg.Dir, 0o755); err != nil { - return nil, err - } - s := &service{cfg: cfg, cache: memCache{ttl: cfg.CacheTTL}} - - // Try to load existing dataset on startup - if err := s.loadExistingDataset(); err != nil { - // Log error but don't fail startup - dataset will be loaded on first Update() - } - - return s, nil -} - -func (s *service) loadExistingDataset() error { - pattern := filepath.Join(s.cfg.Dir, "*.cube") - matches, err := filepath.Glob(pattern) - if err != nil { - return err - } - - if len(matches) == 0 { - return errcodes.ErrNoCubeFilesFound - } - - var latestFile string - var latestTime time.Time - - for _, match := range matches { - info, err := os.Stat(match) - if err != nil { - continue - } - if info.ModTime().After(latestTime) { - latestTime = info.ModTime() - latestFile = match - } - } - - if latestFile == "" { - return errcodes.ErrNoValidCubeFilesFound - } - - if time.Since(latestTime) > s.cfg.TTL { - return errcodes.Wrap(errcodes.ErrLatestCubeFileIsTooOld, "latest cube file is too old") - } - - dc := &s.cfg.Dataset - c, err := openCube(latestFile, dc) - if err != nil { - return err - } - - base := filepath.Base(latestFile) - runStr := strings.TrimSuffix(base, ".cube") - run, err := time.Parse("20060102_15", runStr) - if err != nil { - c.Close() - return err - } - - s.data.Store(&dataset{cube: c, ds: dc, runUTC: run.Unix()}) - return nil -} - -func (s *service) Update(ctx context.Context) error { - if d := s.data.Load(); d != nil { - runTime := time.Unix(d.runUTC, 0) - if time.Since(runTime) < s.cfg.TTL { - return nil - } - } - - if d := s.data.Load(); d != nil { - runTime := time.Unix(d.runUTC, 0) - if time.Since(runTime) < s.cfg.TTL { - return nil - } - } - - dc := &s.cfg.Dataset - run := nearestRun(time.Now().UTC().Add(-6 * time.Hour)) - - cubePath := filepath.Join(s.cfg.Dir, run.Format("20060102_15")) + ".cube" - if _, err := os.Stat(cubePath); err == nil { - info, err := os.Stat(cubePath) - if err == nil && time.Since(info.ModTime()) < s.cfg.TTL { - c, err := openCube(cubePath, dc) - if err != nil { - return err - } - s.data.Store(&dataset{cube: c, ds: dc, runUTC: run.Unix()}) - s.cache = memCache{ttl: s.cfg.CacheTTL} - return nil - } - } - - downloadCtx, cancel := context.WithTimeout(ctx, 60*time.Minute) - defer cancel() - - dl := NewPartialDownloader(s.cfg.Dir, s.cfg.Parallel, dc) - if err := dl.Run(downloadCtx, run); err != nil { - return err - } - - if _, err := os.Stat(cubePath); err != nil { - if err := assembleCube(s.cfg.Dir, run, cubePath, dc); err != nil { - return err - } - } - - c, err := openCube(cubePath, dc) - if err != nil { - return err - } - s.data.Store(&dataset{cube: c, ds: dc, runUTC: run.Unix()}) - s.cache = memCache{ttl: s.cfg.CacheTTL} - return nil -} - -func assembleCube(dir string, run time.Time, cubePath string, dc *DatasetConfig) error { - sizePerVar := dc.SizePerVar() - total := dc.CubeSize() - gridBytes := int64(dc.GridSize()) * 4 - - f, err := os.Create(cubePath) - if err != nil { - return err - } - if err := f.Truncate(total); err != nil { - return err - } - mm, err := mmap.MapRegion(f, int(total), mmap.RDWR, 0, 0) - if err != nil { - return err - } - defer mm.Unmap() - defer f.Close() - - pIndex := make(map[int]int) - for i, p := range dc.Levels { - pIndex[int(math.Round(p))] = i - } - - processFile := func(fn string, ti int) error { - file, err := os.Open(fn) - if err != nil { - return err - } - - messages, err := griblib.ReadMessages(file) - file.Close() - if err != nil { - return err - } - - for _, m := range messages { - if m.Section4.ProductDefinitionTemplateNumber != 0 { - continue - } - - product := m.Section4.ProductDefinitionTemplate - - var varIdx int - if product.ParameterCategory == 2 { - switch product.ParameterNumber { - case 2: // u-wind - varIdx = 1 - case 3: // v-wind - varIdx = 2 - default: - continue - } - } else if product.ParameterCategory == 3 && product.ParameterNumber == 5 { - varIdx = 0 // geopotential height - } else { - continue - } - - if product.FirstSurface.Type != 100 { - continue - } - - pressure := float64(product.FirstSurface.Value) / 100.0 - pIdx, ok := pIndex[int(math.Round(pressure))] - if !ok { - continue - } - - vals := m.Data() - raw := make([]byte, len(vals)*4) - for i, v := range vals { - binary.LittleEndian.PutUint32(raw[i*4:], math.Float32bits(float32(v))) - } - base := int64(varIdx)*sizePerVar + (int64(ti)*int64(dc.NP)+int64(pIdx))*gridBytes - copy(mm[base:base+int64(len(raw))], raw) - } - return nil - } - - steps := dc.Steps() - for ti, step := range steps { - fn := filepath.Join(dir, dc.FileName(run, step)) - if err := processFile(fn, ti); err != nil { - return err - } - - fnB := filepath.Join(dir, dc.FileNameB(run, step)) - if err := processFile(fnB, ti); err != nil { - return err - } - } - return mm.Flush() -} - -func (s *service) Extract(ctx context.Context, lat, lon, alt float64, ts time.Time) ([2]float64, error) { - var zero [2]float64 - d := s.data.Load() - if d == nil { - return zero, errcodes.ErrNoDataset - } - maxDur := time.Duration(s.cfg.Dataset.MaxHour) * time.Hour - if ts.Before(time.Unix(d.runUTC, 0)) || ts.After(time.Unix(d.runUTC, 0).Add(maxDur)) { - return zero, errcodes.ErrOutOfBounds - } - - key := encodeKey(lat, lon, alt, ts) - if v, ok := s.cache.get(key); ok { - return [2]float64(v), nil - } - - td := ts.Sub(time.Unix(d.runUTC, 0)).Hours() - u, v := d.uv(lat, lon, alt, td) - out := [2]float64{u, v} - - s.cache.set(key, vec(out)) - return out, nil -} - -func (s *service) Close() error { - if d := s.data.Load(); d != nil { - return d.Close() - } - return nil -} - -func (s *service) GetStatus() (ready bool, lastUpdate time.Time, isFresh bool, errMsg string) { - d := s.data.Load() - if d == nil { - return false, time.Time{}, false, "no dataset loaded" - } - runTime := time.Unix(d.runUTC, 0) - fresh := time.Since(runTime) < s.cfg.TTL - if !fresh { - return false, runTime, false, "dataset is too old" - } - return true, runTime, true, "" -} diff --git a/internal/pkg/grib/partial_downloader.go b/internal/pkg/grib/partial_downloader.go deleted file mode 100644 index 3752504..0000000 --- a/internal/pkg/grib/partial_downloader.go +++ /dev/null @@ -1,350 +0,0 @@ -package grib - -import ( - "bufio" - "context" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" - "git.intra.yksa.space/gsn/predictor/internal/pkg/log" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -// PartialDownloader загружает только необходимые поля из GRIB файлов -// используя HTTP Range requests и .idx индексные файлы -type PartialDownloader struct { - Dir string - Parallel int - Client *http.Client - Variables []string - ds *DatasetConfig -} - -// NewPartialDownloader создаёт новый partial downloader -func NewPartialDownloader(dir string, parallel int, dc *DatasetConfig) *PartialDownloader { - return &PartialDownloader{ - Dir: dir, - Parallel: parallel, - Client: &http.Client{ - Timeout: 60 * time.Second, - }, - Variables: dc.Variables, - ds: dc, - } -} - -// idxEntry представляет запись из .idx файла -type idxEntry struct { - Index int - ByteStart int64 - Date string - Variable string - Level string - Forecast string -} - -type ProgressWriter struct { - Total int64 - Downloaded int64 - OnProgress func(percent float64) -} - -func (pw *ProgressWriter) Write(p []byte) (int, error) { - n := len(p) - pw.Downloaded += int64(n) - if pw.Total > 0 && pw.OnProgress != nil { - percent := float64(pw.Downloaded) / float64(pw.Total) * 100 - pw.OnProgress(percent) - } - return n, nil -} - -// parseIdx парсит .idx файл и возвращает записи -func (d *PartialDownloader) parseIdx(body []byte) []idxEntry { - var entries []idxEntry - lines := strings.Split(string(body), "\n") - - for _, line := range lines { - if line == "" { - continue - } - parts := strings.Split(line, ":") - if len(parts) < 7 { - continue - } - - byteStart, _ := strconv.ParseInt(parts[1], 10, 64) - entries = append(entries, idxEntry{ - Index: len(entries), - ByteStart: byteStart, - Date: parts[2], - Variable: parts[3], - Level: parts[4], - Forecast: parts[5], - }) - } - return entries -} - -// filterEntries фильтрует записи по нужным переменным и уровням давления -func (d *PartialDownloader) filterEntries(entries []idxEntry) []idxEntry { - var filtered []idxEntry - - for _, e := range entries { - isNeededVar := false - for _, v := range d.Variables { - if v == e.Variable { - isNeededVar = true - break - } - } - - isPressureLevel := strings.HasSuffix(e.Level, " mb") - - if isNeededVar && isPressureLevel { - filtered = append(filtered, e) - } - } - - return filtered -} - -// Вспомогательная функция для выполнения запроса с повторами -func (d *PartialDownloader) doWithRetry(ctx context.Context, req *http.Request) (*http.Response, error) { - var resp *http.Response - var err error - - backoff := 1 * time.Second - maxRetries := 3 - - for i := 0; i < maxRetries; i++ { - resp, err = d.Client.Do(req) - if err == nil && resp.StatusCode < 500 { - return resp, nil - } - - if resp != nil { - resp.Body.Close() - } - - log.Ctx(ctx).Warn("retry download", zap.Int("attempt", i+1), zap.Error(err)) - - select { - case <-time.After(backoff): - backoff *= 2 - case <-ctx.Done(): - return nil, ctx.Err() - } - } - return nil, err -} - -// downloadRange загружает диапазон байтов из URL -func (d *PartialDownloader) downloadRange(ctx context.Context, url string, start, end int64, out io.Writer) error { - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err != nil { - return err - } - - rangeHeader := fmt.Sprintf("bytes=%d-", start) - if end > 0 { - rangeHeader = fmt.Sprintf("bytes=%d-%d", start, end) - } - req.Header.Set("Range", rangeHeader) - - resp, err := d.Client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK { - return errcodes.Wrap(errcodes.ErrDownload, "bad status: "+resp.Status) - } - - _, err = io.Copy(out, resp.Body) - return err -} - -func (d *PartialDownloader) downloadFieldsFromURL(ctx context.Context, url string, dst string, step int) (err error) { - idxURL := url + ".idx" - tmp := dst + ".part" - - if info, err := os.Stat(dst); err == nil && info.Size() > 0 { - return nil - } - - reqIdx, _ := http.NewRequestWithContext(ctx, http.MethodGet, idxURL, nil) - respIdx, err := d.doWithRetry(ctx, reqIdx) - if err != nil { - return errcodes.Wrap(err, "failed to get idx") - } - defer respIdx.Body.Close() - - idxBody, _ := io.ReadAll(respIdx.Body) - entries := d.parseIdx(idxBody) - filtered := d.filterEntries(entries) - if len(filtered) == 0 { - return nil - } - - var totalBytes int64 - type chunk struct{ start, end int64 } - chunks := make([]chunk, 0, len(filtered)) - - for _, entry := range filtered { - var endByte int64 = -1 - for j, e := range entries { - if e.ByteStart == entry.ByteStart && j+1 < len(entries) { - endByte = entries[j+1].ByteStart - 1 - break - } - } - chunks = append(chunks, chunk{entry.ByteStart, endByte}) - if endByte > 0 { - totalBytes += (endByte - entry.ByteStart + 1) - } - } - - f, err := os.OpenFile(tmp, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - if err != nil { - return err - } - - var downloaded int64 - - err = func() error { - defer f.Close() - bufWriter := bufio.NewWriterSize(f, 1024*1024) - - for i, c := range chunks { - countingWriter := &proxyWriter{ - Writer: bufWriter, - OnWrite: func(n int) { - downloaded += int64(n) - if totalBytes > 0 && i%20 == 0 { - pct := float64(downloaded) / float64(totalBytes) * 100 - log.Ctx(ctx).Debug("download progress", - zap.Int("step", step), - zap.String("pct", fmt.Sprintf("%.1f%%", pct))) - } - }, - } - - if err := d.downloadRange(ctx, url, c.start, c.end, countingWriter); err != nil { - return err - } - } - return bufWriter.Flush() - }() - - if err != nil { - f.Close() - os.Remove(tmp) - return err - } - - return d.safeRename(tmp, dst) -} - -type proxyWriter struct { - io.Writer - OnWrite func(int) -} - -func (p *proxyWriter) Write(data []byte) (int, error) { - n, err := p.Writer.Write(data) - if n > 0 && p.OnWrite != nil { - p.OnWrite(n) - } - return n, err -} - -func (d *PartialDownloader) safeRename(src, dst string) error { - var lastErr error - for i := 0; i < 5; i++ { - if err := os.Rename(src, dst); err == nil { - return nil - } else { - lastErr = err - } - time.Sleep(150 * time.Millisecond) - } - return fmt.Errorf("rename failed: %w", lastErr) -} - -// Run запускает загрузку всех необходимых файлов (pgrb2 + pgrb2b) -func (d *PartialDownloader) Run(ctx context.Context, run time.Time) error { - log.Ctx(ctx).Info("starting partial download", - zap.Time("run", run), - zap.Strings("variables", d.Variables)) - - g, ctx := errgroup.WithContext(ctx) - sem := make(chan struct{}, d.Parallel) - steps := d.ds.Steps() - - for _, step := range steps { - step := step - - // Download primary pgrb2 - sem <- struct{}{} - g.Go(func() error { - defer func() { <-sem }() - url := d.ds.GribURL(run, step) - dst := filepath.Join(d.Dir, d.ds.FileName(run, step)) - return d.downloadFieldsFromURL(ctx, url, dst, step) - }) - - // Download secondary pgrb2b - sem <- struct{}{} - g.Go(func() error { - defer func() { <-sem }() - url := d.ds.GribURLB(run, step) - dst := filepath.Join(d.Dir, d.ds.FileNameB(run, step)) - return d.downloadFieldsFromURL(ctx, url, dst, step) - }) - } - - return g.Wait() -} - -// GetLatestModelRun находит последний доступный прогноз GFS -func GetLatestModelRun(ctx context.Context, dc *DatasetConfig) (time.Time, error) { - now := time.Now().UTC() - hour := now.Hour() - (now.Hour() % 6) - current := time.Date(now.Year(), now.Month(), now.Day(), hour, 0, 0, 0, time.UTC) - - client := &http.Client{Timeout: 10 * time.Second} - - for i := 0; i < 8; i++ { - url := dc.GribURL(current, dc.MaxHour) - - req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil) - if err != nil { - current = current.Add(-6 * time.Hour) - continue - } - - resp, err := client.Do(req) - if err == nil && resp.StatusCode == http.StatusOK { - resp.Body.Close() - log.Ctx(ctx).Info("found latest model run", zap.Time("run", current)) - return current, nil - } - if resp != nil { - resp.Body.Close() - } - - current = current.Add(-6 * time.Hour) - } - - return time.Time{}, errcodes.Wrap(errcodes.ErrDownload, "no recent GFS forecast found") -} diff --git a/internal/pkg/grib/pressure.go b/internal/pkg/grib/pressure.go deleted file mode 100644 index 1b6dec0..0000000 --- a/internal/pkg/grib/pressure.go +++ /dev/null @@ -1,7 +0,0 @@ -package grib - -import "math" - -func pressureFromAlt(alt float64) float64 { // ICAO ISA - return 1013.25 * math.Pow(1-alt/44307.69396, 5.255877) -} diff --git a/internal/pkg/grib/util.go b/internal/pkg/grib/util.go deleted file mode 100644 index 1145a61..0000000 --- a/internal/pkg/grib/util.go +++ /dev/null @@ -1,20 +0,0 @@ -package grib - -import ( - "fmt" - "hash/fnv" - "time" -) - -func nearestRun(t time.Time) time.Time { - h := t.UTC().Hour() - t.UTC().Hour()%6 - return time.Date(t.Year(), t.Month(), t.Day(), h, 0, 0, 0, time.UTC) -} - -func encodeKey(a ...any) uint64 { - h := fnv.New64a() - for _, v := range a { - fmt.Fprint(h, v) - } - return h.Sum64() -} diff --git a/internal/pkg/log/log.go b/internal/pkg/log/log.go deleted file mode 100644 index 58db255..0000000 --- a/internal/pkg/log/log.go +++ /dev/null @@ -1,23 +0,0 @@ -package log - -import ( - "context" - - "go.uber.org/zap" -) - -type ctxLogKey struct{} - -func ToCtx(ctx context.Context, lg *zap.Logger) context.Context { - return context.WithValue(ctx, ctxLogKey{}, lg) -} - -func Ctx(ctx context.Context) *zap.Logger { - lg, ok := ctx.Value(ctxLogKey{}).(*zap.Logger) - if !ok || lg == nil { - zap.L().Error("no logger in context, using global") - return zap.L() - } - - return lg -} diff --git a/internal/service/deps.go b/internal/service/deps.go deleted file mode 100644 index 80a945c..0000000 --- a/internal/service/deps.go +++ /dev/null @@ -1,12 +0,0 @@ -package service - -import ( - "context" - "time" -) - -type Grib interface { - Update(ctx context.Context) error - Extract(ctx context.Context, lat, lon, alt float64, ts time.Time) ([2]float64, error) - Close() error -} diff --git a/internal/service/predictor.go b/internal/service/predictor.go deleted file mode 100644 index 9138c5e..0000000 --- a/internal/service/predictor.go +++ /dev/null @@ -1,684 +0,0 @@ -package service - -import ( - "context" - "encoding/base64" - "encoding/json" - "math" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/ds" - "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" - "git.intra.yksa.space/gsn/predictor/internal/pkg/log" - "go.uber.org/zap" -) - -var ErrInvalidParameters = errcodes.New(400, "missing required prediction parameters") - -// Stage represents a prediction stage (ascent, descent, float) -type Stage struct { - Name string - Results []ds.PredicitonResult - StartTime time.Time - EndTime time.Time -} - -// shouldSimulateStage checks if a given stage should be simulated based on the SimulateStages filter -func shouldSimulateStage(params ds.PredictionParameters, stage string) bool { - // If no filter is specified, simulate all stages - if len(params.SimulateStages) == 0 { - return true - } - - // Check if the stage is in the filter list - for _, s := range params.SimulateStages { - if s == stage { - return true - } - } - return false -} - -// CustomCurve represents a custom ascent/descent curve -type CustomCurve struct { - Altitude []float64 `json:"altitude"` - Time []float64 `json:"time"` // seconds from start -} - -func (s *Service) PerformPrediction(ctx context.Context, params ds.PredictionParameters) ([]ds.PredicitonResult, error) { - // Validate required parameters - if params.LaunchLatitude == nil || params.LaunchLongitude == nil || params.LaunchAltitude == nil || params.LaunchDatetime == nil { - return nil, ErrInvalidParameters - } - - // Get default values - profile := "standard_profile" - if params.Profile != nil { - profile = *params.Profile - } - - ascentRate := 5.0 - if params.AscentRate != nil { - ascentRate = *params.AscentRate - } - - burstAltitude := 30000.0 - if params.BurstAltitude != nil { - burstAltitude = *params.BurstAltitude - } - - descentRate := 5.0 - if params.DescentRate != nil { - descentRate = *params.DescentRate - } - - floatAltitude := 0.0 - if params.FloatAltitude != nil { - floatAltitude = *params.FloatAltitude - } - - // Parse custom curves if provided - var ascentCurve, descentCurve *CustomCurve - if params.AscentCurve != nil && *params.AscentCurve != "" { - if curve, err := parseCustomCurve(*params.AscentCurve); err == nil { - ascentCurve = curve - } - } - if params.DescentCurve != nil && *params.DescentCurve != "" { - if curve, err := parseCustomCurve(*params.DescentCurve); err == nil { - descentCurve = curve - } - } - - log.Ctx(ctx).Warn("🚀 PREDICTION STARTING", - zap.String("profile", profile), - zap.Float64("lat", *params.LaunchLatitude), - zap.Float64("lon", *params.LaunchLongitude), - zap.Float64("alt", *params.LaunchAltitude), - zap.Time("time", *params.LaunchDatetime), - ) - - var allResults []ds.PredicitonResult - - switch profile { - case "standard_profile": - allResults = s.standardProfile(ctx, params, ascentRate, burstAltitude, descentRate, ascentCurve, descentCurve) - case "float_profile": - allResults = s.floatProfile(ctx, params, ascentRate, burstAltitude, floatAltitude, descentRate, ascentCurve, descentCurve) - case "reverse_profile": - allResults = s.reverseProfile(ctx, params, ascentRate, burstAltitude, descentRate, ascentCurve, descentCurve) - case "custom_profile": - allResults = s.customProfile(ctx, params, ascentCurve, descentCurve) - default: - return nil, errcodes.New(400, "unsupported profile: "+profile) - } - - log.Ctx(ctx).Info("Prediction complete", zap.Int("total_steps", len(allResults))) - return allResults, nil -} - -func (s *Service) standardProfile(ctx context.Context, params ds.PredictionParameters, ascentRate, burstAltitude, descentRate float64, ascentCurve, descentCurve *CustomCurve) []ds.PredicitonResult { - var results []ds.PredicitonResult - var lastResult ds.PredicitonResult - - // Stage 1: Ascent - if shouldSimulateStage(params, "ascent") { - ascentResults := s.simulateAscent(ctx, params, ascentRate, burstAltitude, ascentCurve) - results = append(results, ascentResults...) - if len(ascentResults) > 0 { - lastResult = ascentResults[len(ascentResults)-1] - } - } else { - // If ascent is skipped, use initial position as starting point - lastResult = ds.PredicitonResult{ - Latitude: params.LaunchLatitude, - Longitude: params.LaunchLongitude, - Altitude: &burstAltitude, - Timestamp: params.LaunchDatetime, - } - } - - // Stage 2: Descent - if shouldSimulateStage(params, "descent") && lastResult.Latitude != nil { - descentParams := ds.PredictionParameters{ - LaunchLatitude: lastResult.Latitude, - LaunchLongitude: lastResult.Longitude, - LaunchAltitude: lastResult.Altitude, - LaunchDatetime: lastResult.Timestamp, - } - - descentResults := s.simulateDescent(ctx, descentParams, descentRate, 0, descentCurve) - results = append(results, descentResults...) - } - - return results -} - -func (s *Service) floatProfile(ctx context.Context, params ds.PredictionParameters, ascentRate, burstAltitude, floatAltitude, descentRate float64, ascentCurve, descentCurve *CustomCurve) []ds.PredicitonResult { - var results []ds.PredicitonResult - var lastResult ds.PredicitonResult - - // Stage 1: Ascent to float altitude - if shouldSimulateStage(params, "ascent") { - ascentResults := s.simulateAscent(ctx, params, ascentRate, floatAltitude, ascentCurve) - results = append(results, ascentResults...) - if len(ascentResults) > 0 { - lastResult = ascentResults[len(ascentResults)-1] - } - } else { - // If ascent is skipped, use initial position at float altitude as starting point - lastResult = ds.PredicitonResult{ - Latitude: params.LaunchLatitude, - Longitude: params.LaunchLongitude, - Altitude: &floatAltitude, - Timestamp: params.LaunchDatetime, - } - } - - // Stage 2: Float (simulate for some time) - if shouldSimulateStage(params, "float") && lastResult.Latitude != nil { - floatResults := s.simulateFloat(ctx, lastResult, 30*time.Minute) // Float for 30 minutes - results = append(results, floatResults...) - if len(floatResults) > 0 { - lastResult = floatResults[len(floatResults)-1] - } - } - - // Stage 3: Descent - if shouldSimulateStage(params, "descent") && lastResult.Latitude != nil { - descentParams := ds.PredictionParameters{ - LaunchLatitude: lastResult.Latitude, - LaunchLongitude: lastResult.Longitude, - LaunchAltitude: lastResult.Altitude, - LaunchDatetime: lastResult.Timestamp, - } - - descentResults := s.simulateDescent(ctx, descentParams, descentRate, 0, descentCurve) - results = append(results, descentResults...) - } - - return results -} - -func (s *Service) reverseProfile(ctx context.Context, params ds.PredictionParameters, ascentRate, burstAltitude, descentRate float64, ascentCurve, descentCurve *CustomCurve) []ds.PredicitonResult { - var results []ds.PredicitonResult - var lastResult ds.PredicitonResult - - // Stage 1: Ascent - if shouldSimulateStage(params, "ascent") { - ascentResults := s.simulateAscent(ctx, params, ascentRate, burstAltitude, ascentCurve) - results = append(results, ascentResults...) - if len(ascentResults) > 0 { - lastResult = ascentResults[len(ascentResults)-1] - } - } else { - // If ascent is skipped, use initial position at burst altitude as starting point - lastResult = ds.PredicitonResult{ - Latitude: params.LaunchLatitude, - Longitude: params.LaunchLongitude, - Altitude: &burstAltitude, - Timestamp: params.LaunchDatetime, - } - } - - // Stage 2: Descent to float altitude - floatAlt := 0.0 - if params.FloatAltitude != nil { - floatAlt = *params.FloatAltitude - } - - if shouldSimulateStage(params, "descent") && lastResult.Latitude != nil { - descentParams := ds.PredictionParameters{ - LaunchLatitude: lastResult.Latitude, - LaunchLongitude: lastResult.Longitude, - LaunchAltitude: lastResult.Altitude, - LaunchDatetime: lastResult.Timestamp, - } - - descentResults := s.simulateDescent(ctx, descentParams, descentRate, floatAlt, descentCurve) - results = append(results, descentResults...) - if len(descentResults) > 0 { - lastResult = descentResults[len(descentResults)-1] - } - } else if floatAlt > 0 { - // If descent is skipped but we need to float, position at float altitude - lastResult = ds.PredicitonResult{ - Latitude: lastResult.Latitude, - Longitude: lastResult.Longitude, - Altitude: &floatAlt, - Timestamp: lastResult.Timestamp, - } - } - - // Stage 3: Float - if shouldSimulateStage(params, "float") && floatAlt > 0 && lastResult.Latitude != nil { - floatResults := s.simulateFloat(ctx, lastResult, 30*time.Minute) - results = append(results, floatResults...) - } - - return results -} - -func (s *Service) customProfile(ctx context.Context, params ds.PredictionParameters, ascentCurve, descentCurve *CustomCurve) []ds.PredicitonResult { - var results []ds.PredicitonResult - var lastResult ds.PredicitonResult - - // Custom ascent - if shouldSimulateStage(params, "ascent") && ascentCurve != nil { - ascentResults := s.simulateCustomAscent(ctx, params, ascentCurve) - results = append(results, ascentResults...) - if len(ascentResults) > 0 { - lastResult = ascentResults[len(ascentResults)-1] - } - } else if len(results) == 0 { - // If ascent is skipped, use initial position - lastResult = ds.PredicitonResult{ - Latitude: params.LaunchLatitude, - Longitude: params.LaunchLongitude, - Altitude: params.LaunchAltitude, - Timestamp: params.LaunchDatetime, - } - } - - // Custom descent - if shouldSimulateStage(params, "descent") && descentCurve != nil && lastResult.Latitude != nil { - descentParams := ds.PredictionParameters{ - LaunchLatitude: lastResult.Latitude, - LaunchLongitude: lastResult.Longitude, - LaunchAltitude: lastResult.Altitude, - LaunchDatetime: lastResult.Timestamp, - } - - descentResults := s.simulateCustomDescent(ctx, descentParams, descentCurve) - results = append(results, descentResults...) - } - - return results -} - -func rk4Step(lat, lon, alt float64, t time.Time, dt float64, windFunc func(lat, lon, alt float64, t time.Time) (float64, float64), altRate float64) (float64, float64, float64) { - // Helper for RK4 integration step - toRad := math.Pi / 180.0 - toDeg := 180.0 / math.Pi - R := func(alt float64) float64 { return 6371009.0 + alt } - - f := func(lat, lon, alt float64, t time.Time) (float64, float64, float64) { - windU, windV := windFunc(lat, lon, alt, t) - Rnow := R(alt) - dlat := toDeg * windV / Rnow - dlon := toDeg * windU / (Rnow * math.Cos(lat*toRad)) - return dlat, dlon, altRate - } - - k1_lat, k1_lon, k1_alt := f(lat, lon, alt, t) - k2_lat, k2_lon, k2_alt := f(lat+0.5*k1_lat*dt, lon+0.5*k1_lon*dt, alt+0.5*k1_alt*dt, t.Add(time.Duration(0.5*dt)*time.Second)) - k3_lat, k3_lon, k3_alt := f(lat+0.5*k2_lat*dt, lon+0.5*k2_lon*dt, alt+0.5*k2_alt*dt, t.Add(time.Duration(0.5*dt)*time.Second)) - k4_lat, k4_lon, k4_alt := f(lat+k3_lat*dt, lon+k3_lon*dt, alt+k3_alt*dt, t.Add(time.Duration(dt)*time.Second)) - - latNew := lat + (dt/6.0)*(k1_lat+2*k2_lat+2*k3_lat+k4_lat) - lonNew := lon + (dt/6.0)*(k1_lon+2*k2_lon+2*k3_lon+k4_lon) - altNew := alt + (dt/6.0)*(k1_alt+2*k2_alt+2*k3_alt+k4_alt) - return latNew, lonNew, altNew -} - -func (s *Service) simulateAscent(ctx context.Context, params ds.PredictionParameters, ascentRate, targetAltitude float64, customCurve *CustomCurve) []ds.PredicitonResult { - const dt = 10.0 // simulation step in seconds - const outputInterval = 60.0 // output every 60 seconds - - log.Ctx(ctx).Warn("⬆️ ASCENT SIMULATION STARTING", - zap.Float64("ascentRate", ascentRate), - zap.Float64("targetAlt", targetAltitude)) - - lat := *params.LaunchLatitude - lon := *params.LaunchLongitude - alt := *params.LaunchAltitude - timeCur := *params.LaunchDatetime - - results := make([]ds.PredicitonResult, 0, 1000) - - latCopy := lat - lonCopy := lon - altCopy := alt - timeCopy := timeCur - wind := [2]float64{0, 0} - windU := wind[0] - windV := wind[1] - results = append(results, ds.PredicitonResult{ - Latitude: &latCopy, - Longitude: &lonCopy, - Altitude: &altCopy, - Timestamp: &timeCopy, - WindU: &windU, - WindV: &windV, - }) - - nextOutputTime := timeCur.Add(time.Duration(outputInterval) * time.Second) - firstExtraction := true - windFunc := func(lat, lon, alt float64, t time.Time) (float64, float64) { - w, err := s.ExtractWind(ctx, lat, lon, alt, t) - if err != nil { - log.Ctx(ctx).Error("Wind extraction FAILED during ascent", - zap.Error(err), - zap.Float64("lat", lat), - zap.Float64("lon", lon), - zap.Float64("alt", alt), - zap.Time("time", t)) - return 0, 0 - } - // Log only first extraction and when wind is zero - if firstExtraction || (w[0] == 0 && w[1] == 0) { - log.Ctx(ctx).Warn("Wind data check", - zap.Bool("first", firstExtraction), - zap.Float64("lat", lat), - zap.Float64("lon", lon), - zap.Float64("alt", alt), - zap.Float64("u", w[0]), - zap.Float64("v", w[1])) - firstExtraction = false - } - return w[0], w[1] - } - - for alt < targetAltitude { - altRate := ascentRate - if customCurve != nil { - altRate = s.getCustomAltitudeRate(customCurve, alt, ascentRate) - } - latNew, lonNew, altNew := rk4Step(lat, lon, alt, timeCur, dt, windFunc, altRate) - timeCur = timeCur.Add(time.Duration(dt) * time.Second) - lat = latNew - lon = lonNew - alt = altNew - - if alt >= targetAltitude { - alt = targetAltitude - // Record burst point - wU, wV := windFunc(lat, lon, alt, timeCur) - latCopy := lat - lonCopy := lon - altCopy := alt - timeCopy := timeCur - windU := wU - windV := wV - results = append(results, ds.PredicitonResult{ - Latitude: &latCopy, - Longitude: &lonCopy, - Altitude: &altCopy, - Timestamp: &timeCopy, - WindU: &windU, - WindV: &windV, - }) - break - } - - if !timeCur.Before(nextOutputTime) { - wU, wV := windFunc(lat, lon, alt, timeCur) - latCopy := lat - lonCopy := lon - altCopy := alt - timeCopy := timeCur - windU := wU - windV := wV - results = append(results, ds.PredicitonResult{ - Latitude: &latCopy, - Longitude: &lonCopy, - Altitude: &altCopy, - Timestamp: &timeCopy, - WindU: &windU, - WindV: &windV, - }) - nextOutputTime = nextOutputTime.Add(time.Duration(outputInterval) * time.Second) - } - } - - return results -} - -func (s *Service) simulateDescent(ctx context.Context, params ds.PredictionParameters, descentRate, targetAltitude float64, customCurve *CustomCurve) []ds.PredicitonResult { - const dt = 10.0 // simulation step in seconds - const outputInterval = 60.0 // output every 60 seconds - - lat := *params.LaunchLatitude - lon := *params.LaunchLongitude - alt := *params.LaunchAltitude - timeCur := *params.LaunchDatetime - - results := make([]ds.PredicitonResult, 0, 1000) - - latCopy := lat - lonCopy := lon - altCopy := alt - timeCopy := timeCur - wind := [2]float64{0, 0} - windU := wind[0] - windV := wind[1] - results = append(results, ds.PredicitonResult{ - Latitude: &latCopy, - Longitude: &lonCopy, - Altitude: &altCopy, - Timestamp: &timeCopy, - WindU: &windU, - WindV: &windV, - }) - - nextOutputTime := timeCur.Add(time.Duration(outputInterval) * time.Second) - windFunc := func(lat, lon, alt float64, t time.Time) (float64, float64) { - w, err := s.ExtractWind(ctx, lat, lon, alt, t) - if err != nil { - log.Ctx(ctx).Error("Wind extraction FAILED during descent", - zap.Error(err), - zap.Float64("lat", lat), - zap.Float64("lon", lon), - zap.Float64("alt", alt), - zap.Time("time", t)) - return 0, 0 - } - return w[0], w[1] - } - - for alt > targetAltitude { - altRate := -descentRateAtAlt(descentRate, alt) - if customCurve != nil { - altRate = -s.getCustomAltitudeRate(customCurve, alt, descentRate) - } - latNew, lonNew, altNew := rk4Step(lat, lon, alt, timeCur, dt, windFunc, altRate) - timeCur = timeCur.Add(time.Duration(dt) * time.Second) - lat = latNew - lon = lonNew - alt = altNew - - if alt <= targetAltitude { - alt = targetAltitude - // Record landing point - wU, wV := windFunc(lat, lon, alt, timeCur) - latCopy := lat - lonCopy := lon - altCopy := alt - timeCopy := timeCur - windU := wU - windV := wV - results = append(results, ds.PredicitonResult{ - Latitude: &latCopy, - Longitude: &lonCopy, - Altitude: &altCopy, - Timestamp: &timeCopy, - WindU: &windU, - WindV: &windV, - }) - break - } - - if !timeCur.Before(nextOutputTime) { - wU, wV := windFunc(lat, lon, alt, timeCur) - latCopy := lat - lonCopy := lon - altCopy := alt - timeCopy := timeCur - windU := wU - windV := wV - results = append(results, ds.PredicitonResult{ - Latitude: &latCopy, - Longitude: &lonCopy, - Altitude: &altCopy, - Timestamp: &timeCopy, - WindU: &windU, - WindV: &windV, - }) - nextOutputTime = nextOutputTime.Add(time.Duration(outputInterval) * time.Second) - } - } - - return results -} - -func (s *Service) simulateFloat(ctx context.Context, startResult ds.PredicitonResult, duration time.Duration) []ds.PredicitonResult { - const dt = 10.0 // simulation step in seconds - const outputInterval = 60.0 // output every 60 seconds - - lat := *startResult.Latitude - lon := *startResult.Longitude - alt := *startResult.Altitude - timeCur := *startResult.Timestamp - endTime := timeCur.Add(duration) - - results := make([]ds.PredicitonResult, 0, 1000) - - // Always include the initial float point - latCopy := lat - lonCopy := lon - altCopy := alt - timeCopy := timeCur - wind := [2]float64{0, 0} - windU := wind[0] - windV := wind[1] - results = append(results, ds.PredicitonResult{ - Latitude: &latCopy, - Longitude: &lonCopy, - Altitude: &altCopy, - Timestamp: &timeCopy, - WindU: &windU, - WindV: &windV, - }) - - var nextOutputTime = timeCur.Add(time.Duration(outputInterval) * time.Second) - - for timeCur.Before(endTime) { - wind, err := s.ExtractWind(ctx, lat, lon, alt, timeCur) - if err != nil { - log.Ctx(ctx).Warn("Wind extraction failed during float", zap.Error(err)) - break - } - - latDot := (wind[1] / 111320.0) - lonDot := (wind[0] / (40075000.0 * math.Cos(lat*math.Pi/180) / 360.0)) - - lat += latDot * dt - lon += lonDot * dt - // alt remains constant during float - timeCur = timeCur.Add(time.Duration(dt) * time.Second) - - if !timeCur.Before(nextOutputTime) { - latCopy := lat - lonCopy := lon - altCopy := alt - timeCopy := timeCur - windU := wind[0] - windV := wind[1] - results = append(results, ds.PredicitonResult{ - Latitude: &latCopy, - Longitude: &lonCopy, - Altitude: &altCopy, - Timestamp: &timeCopy, - WindU: &windU, - WindV: &windV, - }) - nextOutputTime = nextOutputTime.Add(time.Duration(outputInterval) * time.Second) - } - } - - return results -} - -// airDensity returns ISA air density in kg/m³ at given altitude in meters -func airDensity(h float64) float64 { - var T, p float64 - switch { - case h < 11000: - T = 288.15 - 0.0065*h - p = 101325 * math.Pow(T/288.15, 5.2561) - case h < 20000: - T = 216.65 - p = 22632.1 * math.Exp(-0.00015769*(h-11000)) - case h < 32000: - T = 216.65 + 0.001*(h-20000) - p = 5474.89 * math.Pow(T/216.65, -34.1632) - default: - T = 228.65 + 0.0028*(h-32000) - p = 868.019 * math.Pow(T/228.65, -12.2009) - } - return p / (287.05 * T) -} - -// descentRateAtAlt returns descent rate adjusted for air density at altitude. -// descent_rate parameter is the sea-level rate. At altitude, thinner air means faster descent. -func descentRateAtAlt(seaLevelRate, alt float64) float64 { - rho0 := airDensity(0) - rhoH := airDensity(alt) - if rhoH <= 0 { - return seaLevelRate - } - return seaLevelRate * math.Sqrt(rho0/rhoH) -} - -func (s *Service) simulateCustomAscent(ctx context.Context, params ds.PredictionParameters, curve *CustomCurve) []ds.PredicitonResult { - // Implementation for custom ascent curve - // This would interpolate the altitude rate from the custom curve - return s.simulateAscent(ctx, params, 5.0, 30000.0, curve) -} - -func (s *Service) simulateCustomDescent(ctx context.Context, params ds.PredictionParameters, curve *CustomCurve) []ds.PredicitonResult { - // Implementation for custom descent curve - // This would interpolate the altitude rate from the custom curve - return s.simulateDescent(ctx, params, 5.0, 0.0, curve) -} - -func (s *Service) getCustomAltitudeRate(curve *CustomCurve, currentAltitude, defaultRate float64) float64 { - if curve == nil || len(curve.Altitude) < 2 { - return defaultRate - } - - // Find the two points in the curve that bracket the current altitude - for i := 0; i < len(curve.Altitude)-1; i++ { - if curve.Altitude[i] <= currentAltitude && currentAltitude <= curve.Altitude[i+1] { - // Linear interpolation - alt1, alt2 := curve.Altitude[i], curve.Altitude[i+1] - time1, time2 := curve.Time[i], curve.Time[i+1] - - if alt2 == alt1 { - return defaultRate - } - - // Calculate rate (change in altitude per second) - if time2 > time1 { - return (alt2 - alt1) / (time2 - time1) - } - return defaultRate - } - } - - return defaultRate -} - -func parseCustomCurve(base64Data string) (*CustomCurve, error) { - data, err := base64.StdEncoding.DecodeString(base64Data) - if err != nil { - return nil, err - } - - var curve CustomCurve - if err := json.Unmarshal(data, &curve); err != nil { - return nil, err - } - - return &curve, nil -} diff --git a/internal/service/predictor_test.go b/internal/service/predictor_test.go deleted file mode 100644 index 66ab76e..0000000 --- a/internal/service/predictor_test.go +++ /dev/null @@ -1,492 +0,0 @@ -package service - -import ( - "context" - "testing" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/ds" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -// MockGrib is a mock implementation of the Grib interface -type MockGrib struct { - mock.Mock -} - -func (m *MockGrib) Update(ctx context.Context) error { - args := m.Called(ctx) - return args.Error(0) -} - -func (m *MockGrib) Extract(ctx context.Context, lat, lon, alt float64, t time.Time) ([2]float64, error) { - args := m.Called(ctx, lat, lon, alt, t) - return args.Get(0).([2]float64), args.Error(1) -} - -func (m *MockGrib) Close() error { - args := m.Called() - return args.Error(0) -} - -// Helper function to create a test service with mocked GRIB -func createTestService() (*Service, *MockGrib) { - mockGrib := new(MockGrib) - - // Default mock behavior: return constant wind (5 m/s east, 3 m/s north) - mockGrib.On("Extract", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return([2]float64{5.0, 3.0}, nil) - - service := &Service{ - grib: mockGrib, - } - - return service, mockGrib -} - -// Helper function to create basic prediction parameters -func createBasicParams() ds.PredictionParameters { - lat := 40.0 - lon := -105.0 - alt := 1000.0 - launchTime := time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC) - profile := "standard_profile" - ascentRate := 5.0 - burstAltitude := 10000.0 - descentRate := 5.0 - - return ds.PredictionParameters{ - LaunchLatitude: &lat, - LaunchLongitude: &lon, - LaunchAltitude: &alt, - LaunchDatetime: &launchTime, - Profile: &profile, - AscentRate: &ascentRate, - BurstAltitude: &burstAltitude, - DescentRate: &descentRate, - } -} - -func TestRestrictedPrediction_OnlyAscent(t *testing.T) { - service, _ := createTestService() - params := createBasicParams() - - // Restrict to ascent only - params.SimulateStages = []string{"ascent"} - - results, err := service.PerformPrediction(context.Background(), params) - - assert.NoError(t, err) - assert.NotEmpty(t, results) - - // Verify all results are during ascent phase (altitude increasing) - for i := 1; i < len(results); i++ { - assert.GreaterOrEqual(t, *results[i].Altitude, *results[i-1].Altitude, - "Altitude should be increasing or equal during ascent") - } - - // Last altitude should be near burst altitude - lastAlt := *results[len(results)-1].Altitude - burstAlt := *params.BurstAltitude - assert.InDelta(t, burstAlt, lastAlt, 500.0, "Last altitude should be near burst altitude") -} - -func TestRestrictedPrediction_OnlyDescent(t *testing.T) { - service, _ := createTestService() - params := createBasicParams() - - // Restrict to descent only - params.SimulateStages = []string{"descent"} - - results, err := service.PerformPrediction(context.Background(), params) - - assert.NoError(t, err) - assert.NotEmpty(t, results) - - // First result should be at burst altitude (since ascent was skipped) - firstAlt := *results[0].Altitude - burstAlt := *params.BurstAltitude - assert.Equal(t, burstAlt, firstAlt, "Should start at burst altitude when ascent is skipped") - - // Verify all results are during descent phase (altitude decreasing) - for i := 1; i < len(results); i++ { - assert.LessOrEqual(t, *results[i].Altitude, *results[i-1].Altitude, - "Altitude should be decreasing or equal during descent") - } - - // Last altitude should be near ground - lastAlt := *results[len(results)-1].Altitude - assert.Less(t, lastAlt, 1000.0, "Last altitude should be near ground") -} - -func TestRestrictedPrediction_AscentAndDescent(t *testing.T) { - service, _ := createTestService() - params := createBasicParams() - - // Include both ascent and descent - params.SimulateStages = []string{"ascent", "descent"} - - results, err := service.PerformPrediction(context.Background(), params) - - assert.NoError(t, err) - assert.NotEmpty(t, results) - - // Find the peak altitude (transition point) - maxAlt := 0.0 - maxIdx := 0 - for i, result := range results { - if *result.Altitude > maxAlt { - maxAlt = *result.Altitude - maxIdx = i - } - } - - // Verify ascent phase - for i := 1; i <= maxIdx; i++ { - assert.GreaterOrEqual(t, *results[i].Altitude, *results[i-1].Altitude, - "Altitude should increase during ascent phase") - } - - // Verify descent phase - for i := maxIdx + 1; i < len(results); i++ { - assert.LessOrEqual(t, *results[i].Altitude, *results[i-1].Altitude, - "Altitude should decrease during descent phase") - } -} - -func TestRestrictedPrediction_FloatProfile_OnlyFloat(t *testing.T) { - service, _ := createTestService() - params := createBasicParams() - - profile := "float_profile" - floatAlt := 15000.0 - params.Profile = &profile - params.FloatAltitude = &floatAlt - - // Restrict to float only - params.SimulateStages = []string{"float"} - - results, err := service.PerformPrediction(context.Background(), params) - - assert.NoError(t, err) - assert.NotEmpty(t, results) - - // All results should be at the float altitude - for _, result := range results { - assert.Equal(t, floatAlt, *result.Altitude, - "Altitude should remain constant at float altitude") - } - - // Verify horizontal movement (lat/lon changes due to wind) - firstLat := *results[0].Latitude - lastLat := *results[len(results)-1].Latitude - assert.NotEqual(t, firstLat, lastLat, "Latitude should change during float due to wind") -} - -func TestRestrictedPrediction_FloatProfile_AllStages(t *testing.T) { - service, _ := createTestService() - params := createBasicParams() - - profile := "float_profile" - floatAlt := 15000.0 - params.Profile = &profile - params.FloatAltitude = &floatAlt - - // Include all stages - params.SimulateStages = []string{"ascent", "float", "descent"} - - results, err := service.PerformPrediction(context.Background(), params) - - assert.NoError(t, err) - assert.NotEmpty(t, results) - - // Verify we have ascending, constant, and descending altitude patterns - hasAscent := false - hasFloat := false - hasDescent := false - - const altTolerance = 50.0 // Tolerance for altitude comparison - - for i := 1; i < len(results); i++ { - altDiff := *results[i].Altitude - *results[i-1].Altitude - - if altDiff > altTolerance { - hasAscent = true - } else if altDiff < -altTolerance { - hasDescent = true - } else if *results[i].Altitude > 10000 { // Float happens at high altitude - hasFloat = true - } - } - - assert.True(t, hasAscent, "Should have ascent phase") - assert.True(t, hasFloat, "Should have float phase") - assert.True(t, hasDescent, "Should have descent phase") - - // Verify maximum altitude is near float altitude - maxAlt := 0.0 - for _, result := range results { - if *result.Altitude > maxAlt { - maxAlt = *result.Altitude - } - } - assert.InDelta(t, floatAlt, maxAlt, 1000.0, "Max altitude should be near float altitude") -} - -func TestRestrictedPrediction_ReverseProfile_OnlyFloat(t *testing.T) { - service, _ := createTestService() - params := createBasicParams() - - profile := "reverse_profile" - floatAlt := 5000.0 - params.Profile = &profile - params.FloatAltitude = &floatAlt - - // Restrict to float only - params.SimulateStages = []string{"float"} - - results, err := service.PerformPrediction(context.Background(), params) - - assert.NoError(t, err) - assert.NotEmpty(t, results) - - // All results should be at the float altitude - for _, result := range results { - assert.InDelta(t, floatAlt, *result.Altitude, 10.0, - "Altitude should remain near float altitude") - } -} - -func TestRestrictedPrediction_EmptyStages_SimulatesAll(t *testing.T) { - service, _ := createTestService() - params := createBasicParams() - - // Empty SimulateStages should simulate all stages - params.SimulateStages = []string{} - - results, err := service.PerformPrediction(context.Background(), params) - - assert.NoError(t, err) - assert.NotEmpty(t, results) - - // Should have both ascent and descent - // Find the peak - maxAlt := 0.0 - hasAscent := false - hasDescent := false - - for i := 1; i < len(results); i++ { - if *results[i].Altitude > *results[i-1].Altitude { - hasAscent = true - } - if *results[i].Altitude < *results[i-1].Altitude { - hasDescent = true - } - if *results[i].Altitude > maxAlt { - maxAlt = *results[i].Altitude - } - } - - assert.True(t, hasAscent, "Should have ascent phase") - assert.True(t, hasDescent, "Should have descent phase") -} - -func TestRestrictedPrediction_NilStages_SimulatesAll(t *testing.T) { - service, _ := createTestService() - params := createBasicParams() - - // Nil SimulateStages should simulate all stages - params.SimulateStages = nil - - results, err := service.PerformPrediction(context.Background(), params) - - assert.NoError(t, err) - assert.NotEmpty(t, results) - - // Should have both ascent and descent - maxAlt := 0.0 - minAltAfterMax := 1000000.0 - - for _, result := range results { - if *result.Altitude > maxAlt { - maxAlt = *result.Altitude - } - } - - foundMax := false - for _, result := range results { - if *result.Altitude == maxAlt { - foundMax = true - } - if foundMax && *result.Altitude < minAltAfterMax { - minAltAfterMax = *result.Altitude - } - } - - // Should reach high altitude and come back down - assert.Greater(t, maxAlt, 5000.0, "Should reach high altitude") - assert.Less(t, minAltAfterMax, maxAlt, "Should descend after reaching max altitude") -} - -func TestRestrictedPrediction_InvalidStage_IgnoresInvalid(t *testing.T) { - service, _ := createTestService() - params := createBasicParams() - - // Include invalid stage name (should be ignored) - params.SimulateStages = []string{"ascent", "invalid_stage", "descent"} - - results, err := service.PerformPrediction(context.Background(), params) - - assert.NoError(t, err) - assert.NotEmpty(t, results) - // Should still simulate ascent and descent, ignoring the invalid stage -} - -func TestRestrictedPrediction_WindImpact(t *testing.T) { - service, mockGrib := createTestService() - - // Override mock to return strong eastward wind - mockGrib.ExpectedCalls = nil - mockGrib.On("Extract", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return([2]float64{20.0, 0.0}, nil) // Strong eastward wind - - params := createBasicParams() - params.SimulateStages = []string{"ascent"} - - results, err := service.PerformPrediction(context.Background(), params) - - assert.NoError(t, err) - assert.NotEmpty(t, results) - - // Longitude should increase significantly due to eastward wind - firstLon := *results[0].Longitude - lastLon := *results[len(results)-1].Longitude - assert.Greater(t, lastLon, firstLon, "Longitude should increase with eastward wind") - - // Verify wind values are captured in results - for _, result := range results { - if result.WindU != nil { - // Wind values should be present in results - assert.NotNil(t, result.WindV, "WindV should be present if WindU is present") - } - } -} - -func TestRestrictedPrediction_MissingRequiredParams(t *testing.T) { - service, _ := createTestService() - - testCases := []struct { - name string - params ds.PredictionParameters - }{ - { - name: "Missing latitude", - params: ds.PredictionParameters{ - LaunchLongitude: floatPtr(-105.0), - LaunchAltitude: floatPtr(1000.0), - LaunchDatetime: timePtr(time.Now()), - }, - }, - { - name: "Missing longitude", - params: ds.PredictionParameters{ - LaunchLatitude: floatPtr(40.0), - LaunchAltitude: floatPtr(1000.0), - LaunchDatetime: timePtr(time.Now()), - }, - }, - { - name: "Missing altitude", - params: ds.PredictionParameters{ - LaunchLatitude: floatPtr(40.0), - LaunchLongitude: floatPtr(-105.0), - LaunchDatetime: timePtr(time.Now()), - }, - }, - { - name: "Missing datetime", - params: ds.PredictionParameters{ - LaunchLatitude: floatPtr(40.0), - LaunchLongitude: floatPtr(-105.0), - LaunchAltitude: floatPtr(1000.0), - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - tc.params.SimulateStages = []string{"ascent"} - results, err := service.PerformPrediction(context.Background(), tc.params) - - assert.Error(t, err) - assert.Equal(t, ErrInvalidParameters, err) - assert.Nil(t, results) - }) - } -} - -func TestShouldSimulateStage(t *testing.T) { - testCases := []struct { - name string - stages []string - queryStage string - shouldSimulate bool - }{ - { - name: "Empty filter simulates all", - stages: []string{}, - queryStage: "ascent", - shouldSimulate: true, - }, - { - name: "Nil filter simulates all", - stages: nil, - queryStage: "descent", - shouldSimulate: true, - }, - { - name: "Stage in filter", - stages: []string{"ascent", "descent"}, - queryStage: "ascent", - shouldSimulate: true, - }, - { - name: "Stage not in filter", - stages: []string{"ascent"}, - queryStage: "descent", - shouldSimulate: false, - }, - { - name: "Float stage in filter", - stages: []string{"float"}, - queryStage: "float", - shouldSimulate: true, - }, - { - name: "Multiple stages excluding one", - stages: []string{"ascent", "float"}, - queryStage: "descent", - shouldSimulate: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - params := ds.PredictionParameters{ - SimulateStages: tc.stages, - } - result := shouldSimulateStage(params, tc.queryStage) - assert.Equal(t, tc.shouldSimulate, result) - }) - } -} - -// Helper functions -func floatPtr(f float64) *float64 { - return &f -} - -func timePtr(t time.Time) *time.Time { - return &t -} diff --git a/internal/service/service.go b/internal/service/service.go deleted file mode 100644 index 3496f96..0000000 --- a/internal/service/service.go +++ /dev/null @@ -1,60 +0,0 @@ -package service - -import ( - "context" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/log" -) - -type Service struct { - grib Grib -} - -func New(gribService Grib) (*Service, error) { - svc := &Service{ - grib: gribService, - } - - return svc, nil -} - -// UpdateWeatherData updates weather forecast data using the configured grib service -func (s *Service) UpdateWeatherData(ctx context.Context) error { - return s.grib.Update(ctx) -} - -// ExtractWind extracts wind data for given coordinates and time -func (s *Service) ExtractWind(ctx context.Context, lat, lon, alt float64, ts time.Time) ([2]float64, error) { - return s.grib.Extract(ctx, lat, lon, alt, ts) -} - -// Update updates the GRIB data (implements updater.GribService) -func (s *Service) Update(ctx context.Context) error { - return s.UpdateWeatherData(ctx) -} - -// Start starts the service -func (s *Service) Start() { - log.Ctx(context.Background()).Info("service started") -} - -// Stop stops the service -func (s *Service) Stop() { - log.Ctx(context.Background()).Info("service stopped") -} - -// Close closes the service and releases resources -func (s *Service) Close() error { - s.Stop() - return nil -} - -func (s *Service) GetGribStatus(ctx context.Context) (ready bool, lastUpdate time.Time, isFresh bool, errMsg string) { - if gribStatus, ok := s.grib.(interface { - GetStatus() (ready bool, lastUpdate time.Time, isFresh bool, errMsg string) - }); ok { - return gribStatus.GetStatus() - } - return false, time.Time{}, false, "grib service does not implement GetStatus" -} diff --git a/internal/transport/middleware/log.go b/internal/transport/middleware/log.go deleted file mode 100644 index ebf2003..0000000 --- a/internal/transport/middleware/log.go +++ /dev/null @@ -1,44 +0,0 @@ -package middleware - -import ( - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" - "git.intra.yksa.space/gsn/predictor/internal/pkg/log" - "github.com/ogen-go/ogen/middleware" - "go.uber.org/zap" -) - -func Logging() middleware.Middleware { - return func(req middleware.Request, next func(req middleware.Request) (middleware.Response, error)) (middleware.Response, error) { - lg := log.Ctx(req.Context).With( - zap.String("operationId", req.OperationID), - ) - - lg.Info("started request") - - req.Context = log.ToCtx(req.Context, lg) - - start := time.Now() - resp, err := next(req) - dur := time.Since(start).Microseconds() - - if err != nil { - if errcode, ok := err.(*errcodes.ErrorCode); ok { - lg.Error("request error", - zap.Int("status_code", errcode.StatusCode), - zap.String("message", errcode.Message), - zap.String("details", errcode.Details), - ) - } else { - lg.Error("request internal error", - zap.Error(err), - ) - } - } - - lg.Info("done request", zap.Float64("duration_ms", float64(dur)/float64(1000))) - - return resp, err - } -} diff --git a/internal/transport/rest/config.go b/internal/transport/rest/config.go deleted file mode 100644 index 1b55959..0000000 --- a/internal/transport/rest/config.go +++ /dev/null @@ -1,24 +0,0 @@ -package rest - -import ( - "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" - env "github.com/caarlos0/env/v11" -) - -type Config struct { - Host string `env:"HOST" envDefault:"0.0.0.0"` - Port int `env:"PORT" envDefault:"8080"` - ReadTimeout string `env:"READ_TIMEOUT" envDefault:"30s"` - WriteTimeout string `env:"WRITE_TIMEOUT" envDefault:"30s"` - IdleTimeout string `env:"IDLE_TIMEOUT" envDefault:"60s"` -} - -func NewConfig() (*Config, error) { - cfg := &Config{} - if err := env.ParseWithOptions(cfg, env.Options{ - PrefixTagName: "GSN_PREDICTOR_REST_", - }); err != nil { - return nil, errcodes.Wrap(err, "failed to parse REST config") - } - return cfg, nil -} diff --git a/internal/transport/rest/handler/deps.go b/internal/transport/rest/handler/deps.go deleted file mode 100644 index 2d930cb..0000000 --- a/internal/transport/rest/handler/deps.go +++ /dev/null @@ -1,14 +0,0 @@ -package handler - -import ( - "context" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/ds" -) - -type Service interface { - UpdateWeatherData(ctx context.Context) error - ExtractWind(ctx context.Context, lat, lon, alt float64, ts time.Time) ([2]float64, error) - PerformPrediction(ctx context.Context, params ds.PredictionParameters) ([]ds.PredicitonResult, error) -} diff --git a/internal/transport/rest/handler/handler.go b/internal/transport/rest/handler/handler.go deleted file mode 100644 index 5b7141d..0000000 --- a/internal/transport/rest/handler/handler.go +++ /dev/null @@ -1,194 +0,0 @@ -package handler - -import ( - "context" - "net/http" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/ds" - "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" - api "git.intra.yksa.space/gsn/predictor/pkg/rest" -) - -var ( - _ api.Handler = (*Handler)(nil) -) - -type Handler struct { - svc Service -} - -func New(svc Service) *Handler { - return &Handler{ - svc: svc, - } -} - -func (h *Handler) PerformPrediction(ctx context.Context, params api.PerformPredictionParams) (*api.PredictionResult, error) { - internalParams := ds.ConvertFlatPredictionParams(params) - if internalParams == nil { - return nil, errcodes.New(http.StatusBadRequest, "invalid or missing parameters") - } - results, err := h.svc.PerformPrediction(ctx, *internalParams) - if err != nil { - return nil, err - } - if len(results) == 0 { - return nil, errcodes.New(http.StatusInternalServerError, "no prediction results") - } - - // Group results into stages (ascent and descent) - stages := h.groupResultsIntoStages(results) - - // Map to OpenAPI schema - var predictionItems []api.PredictionResultPredictionItem - - for _, stage := range stages { - var trajectory []api.PredictionResultPredictionItemTrajectoryItem - - for _, result := range stage.Results { - traj := api.PredictionResultPredictionItemTrajectoryItem{ - Datetime: *result.Timestamp, - Latitude: *result.Latitude, - Longitude: *result.Longitude, - Altitude: *result.Altitude, - } - trajectory = append(trajectory, traj) - } - - item := api.PredictionResultPredictionItem{ - Stage: stage.Stage, - Trajectory: trajectory, - } - predictionItems = append(predictionItems, item) - } - - metadata := api.PredictionResultMetadata{ - StartDatetime: *results[0].Timestamp, - CompleteDatetime: *results[len(results)-1].Timestamp, - } - - resp := &api.PredictionResult{ - Metadata: metadata, - Prediction: predictionItems, - } - return resp, nil -} - -// StageResult represents a stage with its results -type StageResult struct { - Stage api.PredictionResultPredictionItemStage - Results []ds.PredicitonResult -} - -// groupResultsIntoStages groups the prediction results into ascent and descent stages -func (h *Handler) groupResultsIntoStages(results []ds.PredicitonResult) []StageResult { - if len(results) == 0 { - return nil - } - - var stages []StageResult - var currentStage []ds.PredicitonResult - var currentStageType api.PredictionResultPredictionItemStage - - // Determine if we're in ascent or descent based on altitude changes - prevAlt := *results[0].Altitude - currentStage = append(currentStage, results[0]) - currentStageType = api.PredictionResultPredictionItemStageAscent - - for i := 1; i < len(results); i++ { - result := results[i] - currentAlt := *result.Altitude - - // Determine if we're still in the same stage - var stageType api.PredictionResultPredictionItemStage - if currentAlt > prevAlt { - stageType = api.PredictionResultPredictionItemStageAscent - } else if currentAlt < prevAlt { - stageType = api.PredictionResultPredictionItemStageDescent - } else { - // Same altitude - continue with current stage - stageType = currentStageType - } - - // If stage type changed, finalize current stage and start new one - if stageType != currentStageType && len(currentStage) > 0 { - stages = append(stages, StageResult{ - Stage: currentStageType, - Results: currentStage, - }) - currentStage = nil - currentStageType = stageType - } - - currentStage = append(currentStage, result) - prevAlt = currentAlt - } - - // Add the final stage - if len(currentStage) > 0 { - stages = append(stages, StageResult{ - Stage: currentStageType, - Results: currentStage, - }) - } - - return stages -} - -func (h *Handler) NewError(ctx context.Context, err error) *api.ErrorStatusCode { - if errcode, ok := err.(*errcodes.ErrorCode); ok { - resp := api.Error{ - Message: errcode.Message, - } - - if errcode.Details != "" { - resp.Details = api.NewOptString(errcode.Details) - } - - return &api.ErrorStatusCode{ - StatusCode: errcode.StatusCode, - Response: resp, - } - } - - return &api.ErrorStatusCode{ - StatusCode: http.StatusInternalServerError, - Response: api.Error{ - Message: "undefined internal error", - Details: api.NewOptString(err.Error()), - }, - } -} - -func (h *Handler) ReadinessCheck(ctx context.Context) (*api.ReadinessResponse, error) { - status := api.ReadinessResponseStatusNotReady - var lastUpdate time.Time - var isFresh bool - var errMsg string - - if s, ok := h.svc.(interface { - GetGribStatus(ctx context.Context) (ready bool, lastUpdate time.Time, isFresh bool, errMsg string) - }); ok { - ready, lu, fresh, em := s.GetGribStatus(ctx) - lastUpdate = lu - isFresh = fresh - errMsg = em - if ready { - status = api.ReadinessResponseStatusOk - } else if em != "" { - status = api.ReadinessResponseStatusError - } - } else { - errMsg = "service does not implement GetGribStatus" - status = api.ReadinessResponseStatusError - } - - resp := &api.ReadinessResponse{ - Status: status, - IsFresh: api.NewOptBool(isFresh), - LastUpdate: api.NewOptDateTime(lastUpdate), - ErrorMessage: api.NewOptString(errMsg), - } - return resp, nil -} diff --git a/internal/transport/rest/transport.go b/internal/transport/rest/transport.go deleted file mode 100644 index c89cf8f..0000000 --- a/internal/transport/rest/transport.go +++ /dev/null @@ -1,47 +0,0 @@ -package rest - -import ( - "context" - "fmt" - "net/http" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/log" - "git.intra.yksa.space/gsn/predictor/internal/transport/middleware" - handler "git.intra.yksa.space/gsn/predictor/internal/transport/rest/handler" - api "git.intra.yksa.space/gsn/predictor/pkg/rest" - "github.com/rs/cors" -) - -type Transport struct { - cfg *Config - srv *api.Server - handler *handler.Handler -} - -func New(handler *handler.Handler, cfg *Config) (*Transport, error) { - srv, err := api.NewServer( - handler, - api.WithMiddleware(middleware.Logging()), - ) - if err != nil { - return nil, err - } - - return &Transport{ - srv: srv, - cfg: cfg, - handler: handler, - }, nil -} - -func (t *Transport) Run() { - log.Ctx(context.Background()).Info("started") - - mux := http.NewServeMux() - mux.Handle("/", t.srv) - cors.AllowAll().Handler(mux) - - if err := http.ListenAndServe(fmt.Sprintf(":%d", t.cfg.Port), t.srv); err != nil { - panic(err) - } -} diff --git a/pkg/rest/oas_cfg_gen.go b/pkg/rest/oas_cfg_gen.go deleted file mode 100644 index 6b8f72d..0000000 --- a/pkg/rest/oas_cfg_gen.go +++ /dev/null @@ -1,291 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "net/http" - - ht "github.com/ogen-go/ogen/http" - "github.com/ogen-go/ogen/middleware" - "github.com/ogen-go/ogen/ogenerrors" - "github.com/ogen-go/ogen/otelogen" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" -) - -var ( - // Allocate option closure once. - clientSpanKind = trace.WithSpanKind(trace.SpanKindClient) - // Allocate option closure once. - serverSpanKind = trace.WithSpanKind(trace.SpanKindServer) -) - -type ( - optionFunc[C any] func(*C) - otelOptionFunc func(*otelConfig) -) - -type otelConfig struct { - TracerProvider trace.TracerProvider - Tracer trace.Tracer - MeterProvider metric.MeterProvider - Meter metric.Meter - Attributes []attribute.KeyValue -} - -func (cfg *otelConfig) initOTEL() { - if cfg.TracerProvider == nil { - cfg.TracerProvider = otel.GetTracerProvider() - } - if cfg.MeterProvider == nil { - cfg.MeterProvider = otel.GetMeterProvider() - } - cfg.Tracer = cfg.TracerProvider.Tracer(otelogen.Name, - trace.WithInstrumentationVersion(otelogen.SemVersion()), - ) - cfg.Meter = cfg.MeterProvider.Meter(otelogen.Name, - metric.WithInstrumentationVersion(otelogen.SemVersion()), - ) -} - -// ErrorHandler is error handler. -type ErrorHandler = ogenerrors.ErrorHandler - -type serverConfig struct { - otelConfig - NotFound http.HandlerFunc - MethodNotAllowed func(w http.ResponseWriter, r *http.Request, allowed string) - ErrorHandler ErrorHandler - Prefix string - Middleware Middleware - MaxMultipartMemory int64 -} - -// ServerOption is server config option. -type ServerOption interface { - applyServer(*serverConfig) -} - -var _ ServerOption = (optionFunc[serverConfig])(nil) - -func (o optionFunc[C]) applyServer(c *C) { - o(c) -} - -var _ ServerOption = (otelOptionFunc)(nil) - -func (o otelOptionFunc) applyServer(c *serverConfig) { - o(&c.otelConfig) -} - -func newServerConfig(opts ...ServerOption) serverConfig { - cfg := serverConfig{ - NotFound: http.NotFound, - MethodNotAllowed: func(w http.ResponseWriter, r *http.Request, allowed string) { - status := http.StatusMethodNotAllowed - if r.Method == "OPTIONS" { - w.Header().Set("Access-Control-Allow-Methods", allowed) - w.Header().Set("Access-Control-Allow-Headers", "Content-Type") - status = http.StatusNoContent - } else { - w.Header().Set("Allow", allowed) - } - w.WriteHeader(status) - }, - ErrorHandler: ogenerrors.DefaultErrorHandler, - Middleware: nil, - MaxMultipartMemory: 32 << 20, // 32 MB - } - for _, opt := range opts { - opt.applyServer(&cfg) - } - cfg.initOTEL() - return cfg -} - -type baseServer struct { - cfg serverConfig - requests metric.Int64Counter - errors metric.Int64Counter - duration metric.Float64Histogram -} - -func (s baseServer) notFound(w http.ResponseWriter, r *http.Request) { - s.cfg.NotFound(w, r) -} - -func (s baseServer) notAllowed(w http.ResponseWriter, r *http.Request, allowed string) { - s.cfg.MethodNotAllowed(w, r, allowed) -} - -func (cfg serverConfig) baseServer() (s baseServer, err error) { - s = baseServer{cfg: cfg} - if s.requests, err = otelogen.ServerRequestCountCounter(s.cfg.Meter); err != nil { - return s, err - } - if s.errors, err = otelogen.ServerErrorsCountCounter(s.cfg.Meter); err != nil { - return s, err - } - if s.duration, err = otelogen.ServerDurationHistogram(s.cfg.Meter); err != nil { - return s, err - } - return s, nil -} - -type clientConfig struct { - otelConfig - Client ht.Client -} - -// ClientOption is client config option. -type ClientOption interface { - applyClient(*clientConfig) -} - -var _ ClientOption = (optionFunc[clientConfig])(nil) - -func (o optionFunc[C]) applyClient(c *C) { - o(c) -} - -var _ ClientOption = (otelOptionFunc)(nil) - -func (o otelOptionFunc) applyClient(c *clientConfig) { - o(&c.otelConfig) -} - -func newClientConfig(opts ...ClientOption) clientConfig { - cfg := clientConfig{ - Client: http.DefaultClient, - } - for _, opt := range opts { - opt.applyClient(&cfg) - } - cfg.initOTEL() - return cfg -} - -type baseClient struct { - cfg clientConfig - requests metric.Int64Counter - errors metric.Int64Counter - duration metric.Float64Histogram -} - -func (cfg clientConfig) baseClient() (c baseClient, err error) { - c = baseClient{cfg: cfg} - if c.requests, err = otelogen.ClientRequestCountCounter(c.cfg.Meter); err != nil { - return c, err - } - if c.errors, err = otelogen.ClientErrorsCountCounter(c.cfg.Meter); err != nil { - return c, err - } - if c.duration, err = otelogen.ClientDurationHistogram(c.cfg.Meter); err != nil { - return c, err - } - return c, nil -} - -// Option is config option. -type Option interface { - ServerOption - ClientOption -} - -// WithTracerProvider specifies a tracer provider to use for creating a tracer. -// -// If none is specified, the global provider is used. -func WithTracerProvider(provider trace.TracerProvider) Option { - return otelOptionFunc(func(cfg *otelConfig) { - if provider != nil { - cfg.TracerProvider = provider - } - }) -} - -// WithMeterProvider specifies a meter provider to use for creating a meter. -// -// If none is specified, the otel.GetMeterProvider() is used. -func WithMeterProvider(provider metric.MeterProvider) Option { - return otelOptionFunc(func(cfg *otelConfig) { - if provider != nil { - cfg.MeterProvider = provider - } - }) -} - -// WithAttributes specifies default otel attributes. -func WithAttributes(attributes ...attribute.KeyValue) Option { - return otelOptionFunc(func(cfg *otelConfig) { - cfg.Attributes = attributes - }) -} - -// WithClient specifies http client to use. -func WithClient(client ht.Client) ClientOption { - return optionFunc[clientConfig](func(cfg *clientConfig) { - if client != nil { - cfg.Client = client - } - }) -} - -// WithNotFound specifies Not Found handler to use. -func WithNotFound(notFound http.HandlerFunc) ServerOption { - return optionFunc[serverConfig](func(cfg *serverConfig) { - if notFound != nil { - cfg.NotFound = notFound - } - }) -} - -// WithMethodNotAllowed specifies Method Not Allowed handler to use. -func WithMethodNotAllowed(methodNotAllowed func(w http.ResponseWriter, r *http.Request, allowed string)) ServerOption { - return optionFunc[serverConfig](func(cfg *serverConfig) { - if methodNotAllowed != nil { - cfg.MethodNotAllowed = methodNotAllowed - } - }) -} - -// WithErrorHandler specifies error handler to use. -func WithErrorHandler(h ErrorHandler) ServerOption { - return optionFunc[serverConfig](func(cfg *serverConfig) { - if h != nil { - cfg.ErrorHandler = h - } - }) -} - -// WithPathPrefix specifies server path prefix. -func WithPathPrefix(prefix string) ServerOption { - return optionFunc[serverConfig](func(cfg *serverConfig) { - cfg.Prefix = prefix - }) -} - -// WithMiddleware specifies middlewares to use. -func WithMiddleware(m ...Middleware) ServerOption { - return optionFunc[serverConfig](func(cfg *serverConfig) { - switch len(m) { - case 0: - cfg.Middleware = nil - case 1: - cfg.Middleware = m[0] - default: - cfg.Middleware = middleware.ChainMiddlewares(m...) - } - }) -} - -// WithMaxMultipartMemory specifies limit of memory for storing file parts. -// File parts which can't be stored in memory will be stored on disk in temporary files. -func WithMaxMultipartMemory(max int64) ServerOption { - return optionFunc[serverConfig](func(cfg *serverConfig) { - if max > 0 { - cfg.MaxMultipartMemory = max - } - }) -} diff --git a/pkg/rest/oas_client_gen.go b/pkg/rest/oas_client_gen.go deleted file mode 100644 index 0484f94..0000000 --- a/pkg/rest/oas_client_gen.go +++ /dev/null @@ -1,520 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "context" - "net/url" - "strings" - "time" - - "github.com/go-faster/errors" - "github.com/ogen-go/ogen/conv" - ht "github.com/ogen-go/ogen/http" - "github.com/ogen-go/ogen/otelogen" - "github.com/ogen-go/ogen/uri" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.37.0" - "go.opentelemetry.io/otel/trace" -) - -func trimTrailingSlashes(u *url.URL) { - u.Path = strings.TrimRight(u.Path, "/") - u.RawPath = strings.TrimRight(u.RawPath, "/") -} - -// Invoker invokes operations described by OpenAPI v3 specification. -type Invoker interface { - // PerformPrediction invokes performPrediction operation. - // - // Perform prediction. - // - // GET /api/v1/prediction - PerformPrediction(ctx context.Context, params PerformPredictionParams) (*PredictionResult, error) - // ReadinessCheck invokes readinessCheck operation. - // - // Readiness check. - // - // GET /ready - ReadinessCheck(ctx context.Context) (*ReadinessResponse, error) -} - -// Client implements OAS client. -type Client struct { - serverURL *url.URL - baseClient -} -type errorHandler interface { - NewError(ctx context.Context, err error) *ErrorStatusCode -} - -var _ Handler = struct { - errorHandler - *Client -}{} - -// NewClient initializes new Client defined by OAS. -func NewClient(serverURL string, opts ...ClientOption) (*Client, error) { - u, err := url.Parse(serverURL) - if err != nil { - return nil, err - } - trimTrailingSlashes(u) - - c, err := newClientConfig(opts...).baseClient() - if err != nil { - return nil, err - } - return &Client{ - serverURL: u, - baseClient: c, - }, nil -} - -type serverURLKey struct{} - -// WithServerURL sets context key to override server URL. -func WithServerURL(ctx context.Context, u *url.URL) context.Context { - return context.WithValue(ctx, serverURLKey{}, u) -} - -func (c *Client) requestURL(ctx context.Context) *url.URL { - u, ok := ctx.Value(serverURLKey{}).(*url.URL) - if !ok { - return c.serverURL - } - return u -} - -// PerformPrediction invokes performPrediction operation. -// -// Perform prediction. -// -// GET /api/v1/prediction -func (c *Client) PerformPrediction(ctx context.Context, params PerformPredictionParams) (*PredictionResult, error) { - res, err := c.sendPerformPrediction(ctx, params) - return res, err -} - -func (c *Client) sendPerformPrediction(ctx context.Context, params PerformPredictionParams) (res *PredictionResult, err error) { - otelAttrs := []attribute.KeyValue{ - otelogen.OperationID("performPrediction"), - semconv.HTTPRequestMethodKey.String("GET"), - semconv.URLTemplateKey.String("/api/v1/prediction"), - } - otelAttrs = append(otelAttrs, c.cfg.Attributes...) - - // Run stopwatch. - startTime := time.Now() - defer func() { - // Use floating point division here for higher precision (instead of Millisecond method). - elapsedDuration := time.Since(startTime) - c.duration.Record(ctx, float64(elapsedDuration)/float64(time.Millisecond), metric.WithAttributes(otelAttrs...)) - }() - - // Increment request counter. - c.requests.Add(ctx, 1, metric.WithAttributes(otelAttrs...)) - - // Start a span for this request. - ctx, span := c.cfg.Tracer.Start(ctx, PerformPredictionOperation, - trace.WithAttributes(otelAttrs...), - clientSpanKind, - ) - // Track stage for error reporting. - var stage string - defer func() { - if err != nil { - span.RecordError(err) - span.SetStatus(codes.Error, stage) - c.errors.Add(ctx, 1, metric.WithAttributes(otelAttrs...)) - } - span.End() - }() - - stage = "BuildURL" - u := uri.Clone(c.requestURL(ctx)) - var pathParts [1]string - pathParts[0] = "/api/v1/prediction" - uri.AddPathParts(u, pathParts[:]...) - - stage = "EncodeQueryParams" - q := uri.NewQueryEncoder() - { - // Encode "launch_latitude" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "launch_latitude", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.LaunchLatitude.Get(); ok { - return e.EncodeValue(conv.Float64ToString(val)) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "launch_longitude" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "launch_longitude", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.LaunchLongitude.Get(); ok { - return e.EncodeValue(conv.Float64ToString(val)) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "launch_datetime" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "launch_datetime", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.LaunchDatetime.Get(); ok { - return e.EncodeValue(conv.DateTimeToString(val)) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "launch_altitude" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "launch_altitude", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.LaunchAltitude.Get(); ok { - return e.EncodeValue(conv.Float64ToString(val)) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "profile" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "profile", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.Profile.Get(); ok { - return e.EncodeValue(conv.StringToString(string(val))) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "ascent_rate" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "ascent_rate", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.AscentRate.Get(); ok { - return e.EncodeValue(conv.Float64ToString(val)) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "burst_altitude" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "burst_altitude", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.BurstAltitude.Get(); ok { - return e.EncodeValue(conv.Float64ToString(val)) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "descent_rate" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "descent_rate", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.DescentRate.Get(); ok { - return e.EncodeValue(conv.Float64ToString(val)) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "float_altitude" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "float_altitude", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.FloatAltitude.Get(); ok { - return e.EncodeValue(conv.Float64ToString(val)) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "stop_datetime" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "stop_datetime", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.StopDatetime.Get(); ok { - return e.EncodeValue(conv.DateTimeToString(val)) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "ascent_curve" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "ascent_curve", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.AscentCurve.Get(); ok { - return e.EncodeValue(conv.StringToString(val)) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "descent_curve" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "descent_curve", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.DescentCurve.Get(); ok { - return e.EncodeValue(conv.StringToString(val)) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "simulate_stages" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "simulate_stages", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if params.SimulateStages != nil { - return e.EncodeArray(func(e uri.Encoder) error { - for i, item := range params.SimulateStages { - if err := func() error { - return e.EncodeValue(conv.StringToString(string(item))) - }(); err != nil { - return errors.Wrapf(err, "[%d]", i) - } - } - return nil - }) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "interpolate" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "interpolate", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.Interpolate.Get(); ok { - return e.EncodeValue(conv.BoolToString(val)) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "format" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "format", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.Format.Get(); ok { - return e.EncodeValue(conv.StringToString(string(val))) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - { - // Encode "dataset" parameter. - cfg := uri.QueryParameterEncodingConfig{ - Name: "dataset", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.EncodeParam(cfg, func(e uri.Encoder) error { - if val, ok := params.Dataset.Get(); ok { - return e.EncodeValue(conv.DateTimeToString(val)) - } - return nil - }); err != nil { - return res, errors.Wrap(err, "encode query") - } - } - u.RawQuery = q.Values().Encode() - - stage = "EncodeRequest" - r, err := ht.NewRequest(ctx, "GET", u) - if err != nil { - return res, errors.Wrap(err, "create request") - } - - stage = "SendRequest" - resp, err := c.cfg.Client.Do(r) - if err != nil { - return res, errors.Wrap(err, "do request") - } - defer resp.Body.Close() - - stage = "DecodeResponse" - result, err := decodePerformPredictionResponse(resp) - if err != nil { - return res, errors.Wrap(err, "decode response") - } - - return result, nil -} - -// ReadinessCheck invokes readinessCheck operation. -// -// Readiness check. -// -// GET /ready -func (c *Client) ReadinessCheck(ctx context.Context) (*ReadinessResponse, error) { - res, err := c.sendReadinessCheck(ctx) - return res, err -} - -func (c *Client) sendReadinessCheck(ctx context.Context) (res *ReadinessResponse, err error) { - otelAttrs := []attribute.KeyValue{ - otelogen.OperationID("readinessCheck"), - semconv.HTTPRequestMethodKey.String("GET"), - semconv.URLTemplateKey.String("/ready"), - } - otelAttrs = append(otelAttrs, c.cfg.Attributes...) - - // Run stopwatch. - startTime := time.Now() - defer func() { - // Use floating point division here for higher precision (instead of Millisecond method). - elapsedDuration := time.Since(startTime) - c.duration.Record(ctx, float64(elapsedDuration)/float64(time.Millisecond), metric.WithAttributes(otelAttrs...)) - }() - - // Increment request counter. - c.requests.Add(ctx, 1, metric.WithAttributes(otelAttrs...)) - - // Start a span for this request. - ctx, span := c.cfg.Tracer.Start(ctx, ReadinessCheckOperation, - trace.WithAttributes(otelAttrs...), - clientSpanKind, - ) - // Track stage for error reporting. - var stage string - defer func() { - if err != nil { - span.RecordError(err) - span.SetStatus(codes.Error, stage) - c.errors.Add(ctx, 1, metric.WithAttributes(otelAttrs...)) - } - span.End() - }() - - stage = "BuildURL" - u := uri.Clone(c.requestURL(ctx)) - var pathParts [1]string - pathParts[0] = "/ready" - uri.AddPathParts(u, pathParts[:]...) - - stage = "EncodeRequest" - r, err := ht.NewRequest(ctx, "GET", u) - if err != nil { - return res, errors.Wrap(err, "create request") - } - - stage = "SendRequest" - resp, err := c.cfg.Client.Do(r) - if err != nil { - return res, errors.Wrap(err, "do request") - } - defer resp.Body.Close() - - stage = "DecodeResponse" - result, err := decodeReadinessCheckResponse(resp) - if err != nil { - return res, errors.Wrap(err, "decode response") - } - - return result, nil -} diff --git a/pkg/rest/oas_handlers_gen.go b/pkg/rest/oas_handlers_gen.go deleted file mode 100644 index b7e53e4..0000000 --- a/pkg/rest/oas_handlers_gen.go +++ /dev/null @@ -1,379 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "context" - "net/http" - "time" - - "github.com/go-faster/errors" - ht "github.com/ogen-go/ogen/http" - "github.com/ogen-go/ogen/middleware" - "github.com/ogen-go/ogen/ogenerrors" - "github.com/ogen-go/ogen/otelogen" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.37.0" - "go.opentelemetry.io/otel/trace" -) - -type codeRecorder struct { - http.ResponseWriter - status int -} - -func (c *codeRecorder) WriteHeader(status int) { - c.status = status - c.ResponseWriter.WriteHeader(status) -} - -func (c *codeRecorder) Unwrap() http.ResponseWriter { - return c.ResponseWriter -} - -// handlePerformPredictionRequest handles performPrediction operation. -// -// Perform prediction. -// -// GET /api/v1/prediction -func (s *Server) handlePerformPredictionRequest(args [0]string, argsEscaped bool, w http.ResponseWriter, r *http.Request) { - statusWriter := &codeRecorder{ResponseWriter: w} - w = statusWriter - otelAttrs := []attribute.KeyValue{ - otelogen.OperationID("performPrediction"), - semconv.HTTPRequestMethodKey.String("GET"), - semconv.HTTPRouteKey.String("/api/v1/prediction"), - } - - // Start a span for this request. - ctx, span := s.cfg.Tracer.Start(r.Context(), PerformPredictionOperation, - trace.WithAttributes(otelAttrs...), - serverSpanKind, - ) - defer span.End() - - // Add Labeler to context. - labeler := &Labeler{attrs: otelAttrs} - ctx = contextWithLabeler(ctx, labeler) - - // Run stopwatch. - startTime := time.Now() - defer func() { - elapsedDuration := time.Since(startTime) - - attrSet := labeler.AttributeSet() - attrs := attrSet.ToSlice() - code := statusWriter.status - if code != 0 { - codeAttr := semconv.HTTPResponseStatusCode(code) - attrs = append(attrs, codeAttr) - span.SetAttributes(codeAttr) - } - attrOpt := metric.WithAttributes(attrs...) - - // Increment request counter. - s.requests.Add(ctx, 1, attrOpt) - - // Use floating point division here for higher precision (instead of Millisecond method). - s.duration.Record(ctx, float64(elapsedDuration)/float64(time.Millisecond), attrOpt) - }() - - var ( - recordError = func(stage string, err error) { - span.RecordError(err) - - // https://opentelemetry.io/docs/specs/semconv/http/http-spans/#status - // Span Status MUST be left unset if HTTP status code was in the 1xx, 2xx or 3xx ranges, - // unless there was another error (e.g., network error receiving the response body; or 3xx codes with - // max redirects exceeded), in which case status MUST be set to Error. - code := statusWriter.status - if code < 100 || code >= 500 { - span.SetStatus(codes.Error, stage) - } - - attrSet := labeler.AttributeSet() - attrs := attrSet.ToSlice() - if code != 0 { - attrs = append(attrs, semconv.HTTPResponseStatusCode(code)) - } - - s.errors.Add(ctx, 1, metric.WithAttributes(attrs...)) - } - err error - opErrContext = ogenerrors.OperationContext{ - Name: PerformPredictionOperation, - ID: "performPrediction", - } - ) - params, err := decodePerformPredictionParams(args, argsEscaped, r) - if err != nil { - err = &ogenerrors.DecodeParamsError{ - OperationContext: opErrContext, - Err: err, - } - defer recordError("DecodeParams", err) - s.cfg.ErrorHandler(ctx, w, r, err) - return - } - - var rawBody []byte - - var response *PredictionResult - if m := s.cfg.Middleware; m != nil { - mreq := middleware.Request{ - Context: ctx, - OperationName: PerformPredictionOperation, - OperationSummary: "Perform prediction", - OperationID: "performPrediction", - Body: nil, - RawBody: rawBody, - Params: middleware.Parameters{ - { - Name: "launch_latitude", - In: "query", - }: params.LaunchLatitude, - { - Name: "launch_longitude", - In: "query", - }: params.LaunchLongitude, - { - Name: "launch_datetime", - In: "query", - }: params.LaunchDatetime, - { - Name: "launch_altitude", - In: "query", - }: params.LaunchAltitude, - { - Name: "profile", - In: "query", - }: params.Profile, - { - Name: "ascent_rate", - In: "query", - }: params.AscentRate, - { - Name: "burst_altitude", - In: "query", - }: params.BurstAltitude, - { - Name: "descent_rate", - In: "query", - }: params.DescentRate, - { - Name: "float_altitude", - In: "query", - }: params.FloatAltitude, - { - Name: "stop_datetime", - In: "query", - }: params.StopDatetime, - { - Name: "ascent_curve", - In: "query", - }: params.AscentCurve, - { - Name: "descent_curve", - In: "query", - }: params.DescentCurve, - { - Name: "simulate_stages", - In: "query", - }: params.SimulateStages, - { - Name: "interpolate", - In: "query", - }: params.Interpolate, - { - Name: "format", - In: "query", - }: params.Format, - { - Name: "dataset", - In: "query", - }: params.Dataset, - }, - Raw: r, - } - - type ( - Request = struct{} - Params = PerformPredictionParams - Response = *PredictionResult - ) - response, err = middleware.HookMiddleware[ - Request, - Params, - Response, - ]( - m, - mreq, - unpackPerformPredictionParams, - func(ctx context.Context, request Request, params Params) (response Response, err error) { - response, err = s.h.PerformPrediction(ctx, params) - return response, err - }, - ) - } else { - response, err = s.h.PerformPrediction(ctx, params) - } - if err != nil { - if errRes, ok := errors.Into[*ErrorStatusCode](err); ok { - if err := encodeErrorResponse(errRes, w, span); err != nil { - defer recordError("Internal", err) - } - return - } - if errors.Is(err, ht.ErrNotImplemented) { - s.cfg.ErrorHandler(ctx, w, r, err) - return - } - if err := encodeErrorResponse(s.h.NewError(ctx, err), w, span); err != nil { - defer recordError("Internal", err) - } - return - } - - if err := encodePerformPredictionResponse(response, w, span); err != nil { - defer recordError("EncodeResponse", err) - if !errors.Is(err, ht.ErrInternalServerErrorResponse) { - s.cfg.ErrorHandler(ctx, w, r, err) - } - return - } -} - -// handleReadinessCheckRequest handles readinessCheck operation. -// -// Readiness check. -// -// GET /ready -func (s *Server) handleReadinessCheckRequest(args [0]string, argsEscaped bool, w http.ResponseWriter, r *http.Request) { - statusWriter := &codeRecorder{ResponseWriter: w} - w = statusWriter - otelAttrs := []attribute.KeyValue{ - otelogen.OperationID("readinessCheck"), - semconv.HTTPRequestMethodKey.String("GET"), - semconv.HTTPRouteKey.String("/ready"), - } - - // Start a span for this request. - ctx, span := s.cfg.Tracer.Start(r.Context(), ReadinessCheckOperation, - trace.WithAttributes(otelAttrs...), - serverSpanKind, - ) - defer span.End() - - // Add Labeler to context. - labeler := &Labeler{attrs: otelAttrs} - ctx = contextWithLabeler(ctx, labeler) - - // Run stopwatch. - startTime := time.Now() - defer func() { - elapsedDuration := time.Since(startTime) - - attrSet := labeler.AttributeSet() - attrs := attrSet.ToSlice() - code := statusWriter.status - if code != 0 { - codeAttr := semconv.HTTPResponseStatusCode(code) - attrs = append(attrs, codeAttr) - span.SetAttributes(codeAttr) - } - attrOpt := metric.WithAttributes(attrs...) - - // Increment request counter. - s.requests.Add(ctx, 1, attrOpt) - - // Use floating point division here for higher precision (instead of Millisecond method). - s.duration.Record(ctx, float64(elapsedDuration)/float64(time.Millisecond), attrOpt) - }() - - var ( - recordError = func(stage string, err error) { - span.RecordError(err) - - // https://opentelemetry.io/docs/specs/semconv/http/http-spans/#status - // Span Status MUST be left unset if HTTP status code was in the 1xx, 2xx or 3xx ranges, - // unless there was another error (e.g., network error receiving the response body; or 3xx codes with - // max redirects exceeded), in which case status MUST be set to Error. - code := statusWriter.status - if code < 100 || code >= 500 { - span.SetStatus(codes.Error, stage) - } - - attrSet := labeler.AttributeSet() - attrs := attrSet.ToSlice() - if code != 0 { - attrs = append(attrs, semconv.HTTPResponseStatusCode(code)) - } - - s.errors.Add(ctx, 1, metric.WithAttributes(attrs...)) - } - err error - ) - - var rawBody []byte - - var response *ReadinessResponse - if m := s.cfg.Middleware; m != nil { - mreq := middleware.Request{ - Context: ctx, - OperationName: ReadinessCheckOperation, - OperationSummary: "Readiness check", - OperationID: "readinessCheck", - Body: nil, - RawBody: rawBody, - Params: middleware.Parameters{}, - Raw: r, - } - - type ( - Request = struct{} - Params = struct{} - Response = *ReadinessResponse - ) - response, err = middleware.HookMiddleware[ - Request, - Params, - Response, - ]( - m, - mreq, - nil, - func(ctx context.Context, request Request, params Params) (response Response, err error) { - response, err = s.h.ReadinessCheck(ctx) - return response, err - }, - ) - } else { - response, err = s.h.ReadinessCheck(ctx) - } - if err != nil { - if errRes, ok := errors.Into[*ErrorStatusCode](err); ok { - if err := encodeErrorResponse(errRes, w, span); err != nil { - defer recordError("Internal", err) - } - return - } - if errors.Is(err, ht.ErrNotImplemented) { - s.cfg.ErrorHandler(ctx, w, r, err) - return - } - if err := encodeErrorResponse(s.h.NewError(ctx, err), w, span); err != nil { - defer recordError("Internal", err) - } - return - } - - if err := encodeReadinessCheckResponse(response, w, span); err != nil { - defer recordError("EncodeResponse", err) - if !errors.Is(err, ht.ErrInternalServerErrorResponse) { - s.cfg.ErrorHandler(ctx, w, r, err) - } - return - } -} diff --git a/pkg/rest/oas_json_gen.go b/pkg/rest/oas_json_gen.go deleted file mode 100644 index 9707b6f..0000000 --- a/pkg/rest/oas_json_gen.go +++ /dev/null @@ -1,963 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "math/bits" - "strconv" - "time" - - "github.com/go-faster/errors" - "github.com/go-faster/jx" - "github.com/ogen-go/ogen/json" - "github.com/ogen-go/ogen/validate" -) - -// Encode implements json.Marshaler. -func (s *Error) Encode(e *jx.Encoder) { - e.ObjStart() - s.encodeFields(e) - e.ObjEnd() -} - -// encodeFields encodes fields. -func (s *Error) encodeFields(e *jx.Encoder) { - { - e.FieldStart("message") - e.Str(s.Message) - } - { - if s.Details.Set { - e.FieldStart("details") - s.Details.Encode(e) - } - } -} - -var jsonFieldsNameOfError = [2]string{ - 0: "message", - 1: "details", -} - -// Decode decodes Error from json. -func (s *Error) Decode(d *jx.Decoder) error { - if s == nil { - return errors.New("invalid: unable to decode Error to nil") - } - var requiredBitSet [1]uint8 - - if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { - switch string(k) { - case "message": - requiredBitSet[0] |= 1 << 0 - if err := func() error { - v, err := d.Str() - s.Message = string(v) - if err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"message\"") - } - case "details": - if err := func() error { - s.Details.Reset() - if err := s.Details.Decode(d); err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"details\"") - } - default: - return d.Skip() - } - return nil - }); err != nil { - return errors.Wrap(err, "decode Error") - } - // Validate required fields. - var failures []validate.FieldError - for i, mask := range [1]uint8{ - 0b00000001, - } { - if result := (requiredBitSet[i] & mask) ^ mask; result != 0 { - // Mask only required fields and check equality to mask using XOR. - // - // If XOR result is not zero, result is not equal to expected, so some fields are missed. - // Bits of fields which would be set are actually bits of missed fields. - missed := bits.OnesCount8(result) - for bitN := 0; bitN < missed; bitN++ { - bitIdx := bits.TrailingZeros8(result) - fieldIdx := i*8 + bitIdx - var name string - if fieldIdx < len(jsonFieldsNameOfError) { - name = jsonFieldsNameOfError[fieldIdx] - } else { - name = strconv.Itoa(fieldIdx) - } - failures = append(failures, validate.FieldError{ - Name: name, - Error: validate.ErrFieldRequired, - }) - // Reset bit. - result &^= 1 << bitIdx - } - } - } - if len(failures) > 0 { - return &validate.Error{Fields: failures} - } - - return nil -} - -// MarshalJSON implements stdjson.Marshaler. -func (s *Error) MarshalJSON() ([]byte, error) { - e := jx.Encoder{} - s.Encode(&e) - return e.Bytes(), nil -} - -// UnmarshalJSON implements stdjson.Unmarshaler. -func (s *Error) UnmarshalJSON(data []byte) error { - d := jx.DecodeBytes(data) - return s.Decode(d) -} - -// Encode encodes bool as json. -func (o OptBool) Encode(e *jx.Encoder) { - if !o.Set { - return - } - e.Bool(bool(o.Value)) -} - -// Decode decodes bool from json. -func (o *OptBool) Decode(d *jx.Decoder) error { - if o == nil { - return errors.New("invalid: unable to decode OptBool to nil") - } - o.Set = true - v, err := d.Bool() - if err != nil { - return err - } - o.Value = bool(v) - return nil -} - -// MarshalJSON implements stdjson.Marshaler. -func (s OptBool) MarshalJSON() ([]byte, error) { - e := jx.Encoder{} - s.Encode(&e) - return e.Bytes(), nil -} - -// UnmarshalJSON implements stdjson.Unmarshaler. -func (s *OptBool) UnmarshalJSON(data []byte) error { - d := jx.DecodeBytes(data) - return s.Decode(d) -} - -// Encode encodes time.Time as json. -func (o OptDateTime) Encode(e *jx.Encoder, format func(*jx.Encoder, time.Time)) { - if !o.Set { - return - } - format(e, o.Value) -} - -// Decode decodes time.Time from json. -func (o *OptDateTime) Decode(d *jx.Decoder, format func(*jx.Decoder) (time.Time, error)) error { - if o == nil { - return errors.New("invalid: unable to decode OptDateTime to nil") - } - o.Set = true - v, err := format(d) - if err != nil { - return err - } - o.Value = v - return nil -} - -// MarshalJSON implements stdjson.Marshaler. -func (s OptDateTime) MarshalJSON() ([]byte, error) { - e := jx.Encoder{} - s.Encode(&e, json.EncodeDateTime) - return e.Bytes(), nil -} - -// UnmarshalJSON implements stdjson.Unmarshaler. -func (s *OptDateTime) UnmarshalJSON(data []byte) error { - d := jx.DecodeBytes(data) - return s.Decode(d, json.DecodeDateTime) -} - -// Encode encodes string as json. -func (o OptString) Encode(e *jx.Encoder) { - if !o.Set { - return - } - e.Str(string(o.Value)) -} - -// Decode decodes string from json. -func (o *OptString) Decode(d *jx.Decoder) error { - if o == nil { - return errors.New("invalid: unable to decode OptString to nil") - } - o.Set = true - v, err := d.Str() - if err != nil { - return err - } - o.Value = string(v) - return nil -} - -// MarshalJSON implements stdjson.Marshaler. -func (s OptString) MarshalJSON() ([]byte, error) { - e := jx.Encoder{} - s.Encode(&e) - return e.Bytes(), nil -} - -// UnmarshalJSON implements stdjson.Unmarshaler. -func (s *OptString) UnmarshalJSON(data []byte) error { - d := jx.DecodeBytes(data) - return s.Decode(d) -} - -// Encode implements json.Marshaler. -func (s *PredictionResult) Encode(e *jx.Encoder) { - e.ObjStart() - s.encodeFields(e) - e.ObjEnd() -} - -// encodeFields encodes fields. -func (s *PredictionResult) encodeFields(e *jx.Encoder) { - { - e.FieldStart("metadata") - s.Metadata.Encode(e) - } - { - e.FieldStart("prediction") - e.ArrStart() - for _, elem := range s.Prediction { - elem.Encode(e) - } - e.ArrEnd() - } -} - -var jsonFieldsNameOfPredictionResult = [2]string{ - 0: "metadata", - 1: "prediction", -} - -// Decode decodes PredictionResult from json. -func (s *PredictionResult) Decode(d *jx.Decoder) error { - if s == nil { - return errors.New("invalid: unable to decode PredictionResult to nil") - } - var requiredBitSet [1]uint8 - - if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { - switch string(k) { - case "metadata": - requiredBitSet[0] |= 1 << 0 - if err := func() error { - if err := s.Metadata.Decode(d); err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"metadata\"") - } - case "prediction": - requiredBitSet[0] |= 1 << 1 - if err := func() error { - s.Prediction = make([]PredictionResultPredictionItem, 0) - if err := d.Arr(func(d *jx.Decoder) error { - var elem PredictionResultPredictionItem - if err := elem.Decode(d); err != nil { - return err - } - s.Prediction = append(s.Prediction, elem) - return nil - }); err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"prediction\"") - } - default: - return d.Skip() - } - return nil - }); err != nil { - return errors.Wrap(err, "decode PredictionResult") - } - // Validate required fields. - var failures []validate.FieldError - for i, mask := range [1]uint8{ - 0b00000011, - } { - if result := (requiredBitSet[i] & mask) ^ mask; result != 0 { - // Mask only required fields and check equality to mask using XOR. - // - // If XOR result is not zero, result is not equal to expected, so some fields are missed. - // Bits of fields which would be set are actually bits of missed fields. - missed := bits.OnesCount8(result) - for bitN := 0; bitN < missed; bitN++ { - bitIdx := bits.TrailingZeros8(result) - fieldIdx := i*8 + bitIdx - var name string - if fieldIdx < len(jsonFieldsNameOfPredictionResult) { - name = jsonFieldsNameOfPredictionResult[fieldIdx] - } else { - name = strconv.Itoa(fieldIdx) - } - failures = append(failures, validate.FieldError{ - Name: name, - Error: validate.ErrFieldRequired, - }) - // Reset bit. - result &^= 1 << bitIdx - } - } - } - if len(failures) > 0 { - return &validate.Error{Fields: failures} - } - - return nil -} - -// MarshalJSON implements stdjson.Marshaler. -func (s *PredictionResult) MarshalJSON() ([]byte, error) { - e := jx.Encoder{} - s.Encode(&e) - return e.Bytes(), nil -} - -// UnmarshalJSON implements stdjson.Unmarshaler. -func (s *PredictionResult) UnmarshalJSON(data []byte) error { - d := jx.DecodeBytes(data) - return s.Decode(d) -} - -// Encode implements json.Marshaler. -func (s *PredictionResultMetadata) Encode(e *jx.Encoder) { - e.ObjStart() - s.encodeFields(e) - e.ObjEnd() -} - -// encodeFields encodes fields. -func (s *PredictionResultMetadata) encodeFields(e *jx.Encoder) { - { - e.FieldStart("complete_datetime") - json.EncodeDateTime(e, s.CompleteDatetime) - } - { - e.FieldStart("start_datetime") - json.EncodeDateTime(e, s.StartDatetime) - } -} - -var jsonFieldsNameOfPredictionResultMetadata = [2]string{ - 0: "complete_datetime", - 1: "start_datetime", -} - -// Decode decodes PredictionResultMetadata from json. -func (s *PredictionResultMetadata) Decode(d *jx.Decoder) error { - if s == nil { - return errors.New("invalid: unable to decode PredictionResultMetadata to nil") - } - var requiredBitSet [1]uint8 - - if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { - switch string(k) { - case "complete_datetime": - requiredBitSet[0] |= 1 << 0 - if err := func() error { - v, err := json.DecodeDateTime(d) - s.CompleteDatetime = v - if err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"complete_datetime\"") - } - case "start_datetime": - requiredBitSet[0] |= 1 << 1 - if err := func() error { - v, err := json.DecodeDateTime(d) - s.StartDatetime = v - if err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"start_datetime\"") - } - default: - return d.Skip() - } - return nil - }); err != nil { - return errors.Wrap(err, "decode PredictionResultMetadata") - } - // Validate required fields. - var failures []validate.FieldError - for i, mask := range [1]uint8{ - 0b00000011, - } { - if result := (requiredBitSet[i] & mask) ^ mask; result != 0 { - // Mask only required fields and check equality to mask using XOR. - // - // If XOR result is not zero, result is not equal to expected, so some fields are missed. - // Bits of fields which would be set are actually bits of missed fields. - missed := bits.OnesCount8(result) - for bitN := 0; bitN < missed; bitN++ { - bitIdx := bits.TrailingZeros8(result) - fieldIdx := i*8 + bitIdx - var name string - if fieldIdx < len(jsonFieldsNameOfPredictionResultMetadata) { - name = jsonFieldsNameOfPredictionResultMetadata[fieldIdx] - } else { - name = strconv.Itoa(fieldIdx) - } - failures = append(failures, validate.FieldError{ - Name: name, - Error: validate.ErrFieldRequired, - }) - // Reset bit. - result &^= 1 << bitIdx - } - } - } - if len(failures) > 0 { - return &validate.Error{Fields: failures} - } - - return nil -} - -// MarshalJSON implements stdjson.Marshaler. -func (s *PredictionResultMetadata) MarshalJSON() ([]byte, error) { - e := jx.Encoder{} - s.Encode(&e) - return e.Bytes(), nil -} - -// UnmarshalJSON implements stdjson.Unmarshaler. -func (s *PredictionResultMetadata) UnmarshalJSON(data []byte) error { - d := jx.DecodeBytes(data) - return s.Decode(d) -} - -// Encode implements json.Marshaler. -func (s *PredictionResultPredictionItem) Encode(e *jx.Encoder) { - e.ObjStart() - s.encodeFields(e) - e.ObjEnd() -} - -// encodeFields encodes fields. -func (s *PredictionResultPredictionItem) encodeFields(e *jx.Encoder) { - { - e.FieldStart("stage") - s.Stage.Encode(e) - } - { - e.FieldStart("trajectory") - e.ArrStart() - for _, elem := range s.Trajectory { - elem.Encode(e) - } - e.ArrEnd() - } -} - -var jsonFieldsNameOfPredictionResultPredictionItem = [2]string{ - 0: "stage", - 1: "trajectory", -} - -// Decode decodes PredictionResultPredictionItem from json. -func (s *PredictionResultPredictionItem) Decode(d *jx.Decoder) error { - if s == nil { - return errors.New("invalid: unable to decode PredictionResultPredictionItem to nil") - } - var requiredBitSet [1]uint8 - - if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { - switch string(k) { - case "stage": - requiredBitSet[0] |= 1 << 0 - if err := func() error { - if err := s.Stage.Decode(d); err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"stage\"") - } - case "trajectory": - requiredBitSet[0] |= 1 << 1 - if err := func() error { - s.Trajectory = make([]PredictionResultPredictionItemTrajectoryItem, 0) - if err := d.Arr(func(d *jx.Decoder) error { - var elem PredictionResultPredictionItemTrajectoryItem - if err := elem.Decode(d); err != nil { - return err - } - s.Trajectory = append(s.Trajectory, elem) - return nil - }); err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"trajectory\"") - } - default: - return d.Skip() - } - return nil - }); err != nil { - return errors.Wrap(err, "decode PredictionResultPredictionItem") - } - // Validate required fields. - var failures []validate.FieldError - for i, mask := range [1]uint8{ - 0b00000011, - } { - if result := (requiredBitSet[i] & mask) ^ mask; result != 0 { - // Mask only required fields and check equality to mask using XOR. - // - // If XOR result is not zero, result is not equal to expected, so some fields are missed. - // Bits of fields which would be set are actually bits of missed fields. - missed := bits.OnesCount8(result) - for bitN := 0; bitN < missed; bitN++ { - bitIdx := bits.TrailingZeros8(result) - fieldIdx := i*8 + bitIdx - var name string - if fieldIdx < len(jsonFieldsNameOfPredictionResultPredictionItem) { - name = jsonFieldsNameOfPredictionResultPredictionItem[fieldIdx] - } else { - name = strconv.Itoa(fieldIdx) - } - failures = append(failures, validate.FieldError{ - Name: name, - Error: validate.ErrFieldRequired, - }) - // Reset bit. - result &^= 1 << bitIdx - } - } - } - if len(failures) > 0 { - return &validate.Error{Fields: failures} - } - - return nil -} - -// MarshalJSON implements stdjson.Marshaler. -func (s *PredictionResultPredictionItem) MarshalJSON() ([]byte, error) { - e := jx.Encoder{} - s.Encode(&e) - return e.Bytes(), nil -} - -// UnmarshalJSON implements stdjson.Unmarshaler. -func (s *PredictionResultPredictionItem) UnmarshalJSON(data []byte) error { - d := jx.DecodeBytes(data) - return s.Decode(d) -} - -// Encode encodes PredictionResultPredictionItemStage as json. -func (s PredictionResultPredictionItemStage) Encode(e *jx.Encoder) { - e.Str(string(s)) -} - -// Decode decodes PredictionResultPredictionItemStage from json. -func (s *PredictionResultPredictionItemStage) Decode(d *jx.Decoder) error { - if s == nil { - return errors.New("invalid: unable to decode PredictionResultPredictionItemStage to nil") - } - v, err := d.StrBytes() - if err != nil { - return err - } - // Try to use constant string. - switch PredictionResultPredictionItemStage(v) { - case PredictionResultPredictionItemStageAscent: - *s = PredictionResultPredictionItemStageAscent - case PredictionResultPredictionItemStageDescent: - *s = PredictionResultPredictionItemStageDescent - case PredictionResultPredictionItemStageFloat: - *s = PredictionResultPredictionItemStageFloat - default: - *s = PredictionResultPredictionItemStage(v) - } - - return nil -} - -// MarshalJSON implements stdjson.Marshaler. -func (s PredictionResultPredictionItemStage) MarshalJSON() ([]byte, error) { - e := jx.Encoder{} - s.Encode(&e) - return e.Bytes(), nil -} - -// UnmarshalJSON implements stdjson.Unmarshaler. -func (s *PredictionResultPredictionItemStage) UnmarshalJSON(data []byte) error { - d := jx.DecodeBytes(data) - return s.Decode(d) -} - -// Encode implements json.Marshaler. -func (s *PredictionResultPredictionItemTrajectoryItem) Encode(e *jx.Encoder) { - e.ObjStart() - s.encodeFields(e) - e.ObjEnd() -} - -// encodeFields encodes fields. -func (s *PredictionResultPredictionItemTrajectoryItem) encodeFields(e *jx.Encoder) { - { - e.FieldStart("datetime") - json.EncodeDateTime(e, s.Datetime) - } - { - e.FieldStart("latitude") - e.Float64(s.Latitude) - } - { - e.FieldStart("longitude") - e.Float64(s.Longitude) - } - { - e.FieldStart("altitude") - e.Float64(s.Altitude) - } -} - -var jsonFieldsNameOfPredictionResultPredictionItemTrajectoryItem = [4]string{ - 0: "datetime", - 1: "latitude", - 2: "longitude", - 3: "altitude", -} - -// Decode decodes PredictionResultPredictionItemTrajectoryItem from json. -func (s *PredictionResultPredictionItemTrajectoryItem) Decode(d *jx.Decoder) error { - if s == nil { - return errors.New("invalid: unable to decode PredictionResultPredictionItemTrajectoryItem to nil") - } - var requiredBitSet [1]uint8 - - if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { - switch string(k) { - case "datetime": - requiredBitSet[0] |= 1 << 0 - if err := func() error { - v, err := json.DecodeDateTime(d) - s.Datetime = v - if err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"datetime\"") - } - case "latitude": - requiredBitSet[0] |= 1 << 1 - if err := func() error { - v, err := d.Float64() - s.Latitude = float64(v) - if err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"latitude\"") - } - case "longitude": - requiredBitSet[0] |= 1 << 2 - if err := func() error { - v, err := d.Float64() - s.Longitude = float64(v) - if err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"longitude\"") - } - case "altitude": - requiredBitSet[0] |= 1 << 3 - if err := func() error { - v, err := d.Float64() - s.Altitude = float64(v) - if err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"altitude\"") - } - default: - return d.Skip() - } - return nil - }); err != nil { - return errors.Wrap(err, "decode PredictionResultPredictionItemTrajectoryItem") - } - // Validate required fields. - var failures []validate.FieldError - for i, mask := range [1]uint8{ - 0b00001111, - } { - if result := (requiredBitSet[i] & mask) ^ mask; result != 0 { - // Mask only required fields and check equality to mask using XOR. - // - // If XOR result is not zero, result is not equal to expected, so some fields are missed. - // Bits of fields which would be set are actually bits of missed fields. - missed := bits.OnesCount8(result) - for bitN := 0; bitN < missed; bitN++ { - bitIdx := bits.TrailingZeros8(result) - fieldIdx := i*8 + bitIdx - var name string - if fieldIdx < len(jsonFieldsNameOfPredictionResultPredictionItemTrajectoryItem) { - name = jsonFieldsNameOfPredictionResultPredictionItemTrajectoryItem[fieldIdx] - } else { - name = strconv.Itoa(fieldIdx) - } - failures = append(failures, validate.FieldError{ - Name: name, - Error: validate.ErrFieldRequired, - }) - // Reset bit. - result &^= 1 << bitIdx - } - } - } - if len(failures) > 0 { - return &validate.Error{Fields: failures} - } - - return nil -} - -// MarshalJSON implements stdjson.Marshaler. -func (s *PredictionResultPredictionItemTrajectoryItem) MarshalJSON() ([]byte, error) { - e := jx.Encoder{} - s.Encode(&e) - return e.Bytes(), nil -} - -// UnmarshalJSON implements stdjson.Unmarshaler. -func (s *PredictionResultPredictionItemTrajectoryItem) UnmarshalJSON(data []byte) error { - d := jx.DecodeBytes(data) - return s.Decode(d) -} - -// Encode implements json.Marshaler. -func (s *ReadinessResponse) Encode(e *jx.Encoder) { - e.ObjStart() - s.encodeFields(e) - e.ObjEnd() -} - -// encodeFields encodes fields. -func (s *ReadinessResponse) encodeFields(e *jx.Encoder) { - { - e.FieldStart("status") - s.Status.Encode(e) - } - { - if s.LastUpdate.Set { - e.FieldStart("last_update") - s.LastUpdate.Encode(e, json.EncodeDateTime) - } - } - { - if s.IsFresh.Set { - e.FieldStart("is_fresh") - s.IsFresh.Encode(e) - } - } - { - if s.ErrorMessage.Set { - e.FieldStart("error_message") - s.ErrorMessage.Encode(e) - } - } -} - -var jsonFieldsNameOfReadinessResponse = [4]string{ - 0: "status", - 1: "last_update", - 2: "is_fresh", - 3: "error_message", -} - -// Decode decodes ReadinessResponse from json. -func (s *ReadinessResponse) Decode(d *jx.Decoder) error { - if s == nil { - return errors.New("invalid: unable to decode ReadinessResponse to nil") - } - var requiredBitSet [1]uint8 - - if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { - switch string(k) { - case "status": - requiredBitSet[0] |= 1 << 0 - if err := func() error { - if err := s.Status.Decode(d); err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"status\"") - } - case "last_update": - if err := func() error { - s.LastUpdate.Reset() - if err := s.LastUpdate.Decode(d, json.DecodeDateTime); err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"last_update\"") - } - case "is_fresh": - if err := func() error { - s.IsFresh.Reset() - if err := s.IsFresh.Decode(d); err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"is_fresh\"") - } - case "error_message": - if err := func() error { - s.ErrorMessage.Reset() - if err := s.ErrorMessage.Decode(d); err != nil { - return err - } - return nil - }(); err != nil { - return errors.Wrap(err, "decode field \"error_message\"") - } - default: - return d.Skip() - } - return nil - }); err != nil { - return errors.Wrap(err, "decode ReadinessResponse") - } - // Validate required fields. - var failures []validate.FieldError - for i, mask := range [1]uint8{ - 0b00000001, - } { - if result := (requiredBitSet[i] & mask) ^ mask; result != 0 { - // Mask only required fields and check equality to mask using XOR. - // - // If XOR result is not zero, result is not equal to expected, so some fields are missed. - // Bits of fields which would be set are actually bits of missed fields. - missed := bits.OnesCount8(result) - for bitN := 0; bitN < missed; bitN++ { - bitIdx := bits.TrailingZeros8(result) - fieldIdx := i*8 + bitIdx - var name string - if fieldIdx < len(jsonFieldsNameOfReadinessResponse) { - name = jsonFieldsNameOfReadinessResponse[fieldIdx] - } else { - name = strconv.Itoa(fieldIdx) - } - failures = append(failures, validate.FieldError{ - Name: name, - Error: validate.ErrFieldRequired, - }) - // Reset bit. - result &^= 1 << bitIdx - } - } - } - if len(failures) > 0 { - return &validate.Error{Fields: failures} - } - - return nil -} - -// MarshalJSON implements stdjson.Marshaler. -func (s *ReadinessResponse) MarshalJSON() ([]byte, error) { - e := jx.Encoder{} - s.Encode(&e) - return e.Bytes(), nil -} - -// UnmarshalJSON implements stdjson.Unmarshaler. -func (s *ReadinessResponse) UnmarshalJSON(data []byte) error { - d := jx.DecodeBytes(data) - return s.Decode(d) -} - -// Encode encodes ReadinessResponseStatus as json. -func (s ReadinessResponseStatus) Encode(e *jx.Encoder) { - e.Str(string(s)) -} - -// Decode decodes ReadinessResponseStatus from json. -func (s *ReadinessResponseStatus) Decode(d *jx.Decoder) error { - if s == nil { - return errors.New("invalid: unable to decode ReadinessResponseStatus to nil") - } - v, err := d.StrBytes() - if err != nil { - return err - } - // Try to use constant string. - switch ReadinessResponseStatus(v) { - case ReadinessResponseStatusOk: - *s = ReadinessResponseStatusOk - case ReadinessResponseStatusNotReady: - *s = ReadinessResponseStatusNotReady - case ReadinessResponseStatusError: - *s = ReadinessResponseStatusError - default: - *s = ReadinessResponseStatus(v) - } - - return nil -} - -// MarshalJSON implements stdjson.Marshaler. -func (s ReadinessResponseStatus) MarshalJSON() ([]byte, error) { - e := jx.Encoder{} - s.Encode(&e) - return e.Bytes(), nil -} - -// UnmarshalJSON implements stdjson.Unmarshaler. -func (s *ReadinessResponseStatus) UnmarshalJSON(data []byte) error { - d := jx.DecodeBytes(data) - return s.Decode(d) -} diff --git a/pkg/rest/oas_labeler_gen.go b/pkg/rest/oas_labeler_gen.go deleted file mode 100644 index 47bce6a..0000000 --- a/pkg/rest/oas_labeler_gen.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" -) - -// Labeler is used to allow adding custom attributes to the server request metrics. -type Labeler struct { - attrs []attribute.KeyValue -} - -// Add attributes to the Labeler. -func (l *Labeler) Add(attrs ...attribute.KeyValue) { - l.attrs = append(l.attrs, attrs...) -} - -// AttributeSet returns the attributes added to the Labeler as an attribute.Set. -func (l *Labeler) AttributeSet() attribute.Set { - return attribute.NewSet(l.attrs...) -} - -type labelerContextKey struct{} - -// LabelerFromContext retrieves the Labeler from the provided context, if present. -// -// If no Labeler was found in the provided context a new, empty Labeler is returned and the second -// return value is false. In this case it is safe to use the Labeler but any attributes added to -// it will not be used. -func LabelerFromContext(ctx context.Context) (*Labeler, bool) { - if l, ok := ctx.Value(labelerContextKey{}).(*Labeler); ok { - return l, true - } - return &Labeler{}, false -} - -func contextWithLabeler(ctx context.Context, l *Labeler) context.Context { - return context.WithValue(ctx, labelerContextKey{}, l) -} diff --git a/pkg/rest/oas_middleware_gen.go b/pkg/rest/oas_middleware_gen.go deleted file mode 100644 index 9d62f34..0000000 --- a/pkg/rest/oas_middleware_gen.go +++ /dev/null @@ -1,10 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "github.com/ogen-go/ogen/middleware" -) - -// Middleware is middleware type. -type Middleware = middleware.Middleware diff --git a/pkg/rest/oas_operations_gen.go b/pkg/rest/oas_operations_gen.go deleted file mode 100644 index 873f44a..0000000 --- a/pkg/rest/oas_operations_gen.go +++ /dev/null @@ -1,11 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -// OperationName is the ogen operation name -type OperationName = string - -const ( - PerformPredictionOperation OperationName = "PerformPrediction" - ReadinessCheckOperation OperationName = "ReadinessCheck" -) diff --git a/pkg/rest/oas_parameters_gen.go b/pkg/rest/oas_parameters_gen.go deleted file mode 100644 index da12715..0000000 --- a/pkg/rest/oas_parameters_gen.go +++ /dev/null @@ -1,1004 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "fmt" - "net/http" - "time" - - "github.com/go-faster/errors" - "github.com/ogen-go/ogen/conv" - "github.com/ogen-go/ogen/middleware" - "github.com/ogen-go/ogen/ogenerrors" - "github.com/ogen-go/ogen/uri" - "github.com/ogen-go/ogen/validate" -) - -// PerformPredictionParams is parameters of performPrediction operation. -type PerformPredictionParams struct { - LaunchLatitude OptFloat64 `json:",omitempty,omitzero"` - LaunchLongitude OptFloat64 `json:",omitempty,omitzero"` - LaunchDatetime OptDateTime `json:",omitempty,omitzero"` - LaunchAltitude OptFloat64 `json:",omitempty,omitzero"` - Profile OptPerformPredictionProfile `json:",omitempty,omitzero"` - AscentRate OptFloat64 `json:",omitempty,omitzero"` - BurstAltitude OptFloat64 `json:",omitempty,omitzero"` - DescentRate OptFloat64 `json:",omitempty,omitzero"` - FloatAltitude OptFloat64 `json:",omitempty,omitzero"` - StopDatetime OptDateTime `json:",omitempty,omitzero"` - AscentCurve OptString `json:",omitempty,omitzero"` - DescentCurve OptString `json:",omitempty,omitzero"` - SimulateStages []PerformPredictionSimulateStagesItem `json:",omitempty"` - Interpolate OptBool `json:",omitempty,omitzero"` - Format OptPerformPredictionFormat `json:",omitempty,omitzero"` - Dataset OptDateTime `json:",omitempty,omitzero"` -} - -func unpackPerformPredictionParams(packed middleware.Parameters) (params PerformPredictionParams) { - { - key := middleware.ParameterKey{ - Name: "launch_latitude", - In: "query", - } - if v, ok := packed[key]; ok { - params.LaunchLatitude = v.(OptFloat64) - } - } - { - key := middleware.ParameterKey{ - Name: "launch_longitude", - In: "query", - } - if v, ok := packed[key]; ok { - params.LaunchLongitude = v.(OptFloat64) - } - } - { - key := middleware.ParameterKey{ - Name: "launch_datetime", - In: "query", - } - if v, ok := packed[key]; ok { - params.LaunchDatetime = v.(OptDateTime) - } - } - { - key := middleware.ParameterKey{ - Name: "launch_altitude", - In: "query", - } - if v, ok := packed[key]; ok { - params.LaunchAltitude = v.(OptFloat64) - } - } - { - key := middleware.ParameterKey{ - Name: "profile", - In: "query", - } - if v, ok := packed[key]; ok { - params.Profile = v.(OptPerformPredictionProfile) - } - } - { - key := middleware.ParameterKey{ - Name: "ascent_rate", - In: "query", - } - if v, ok := packed[key]; ok { - params.AscentRate = v.(OptFloat64) - } - } - { - key := middleware.ParameterKey{ - Name: "burst_altitude", - In: "query", - } - if v, ok := packed[key]; ok { - params.BurstAltitude = v.(OptFloat64) - } - } - { - key := middleware.ParameterKey{ - Name: "descent_rate", - In: "query", - } - if v, ok := packed[key]; ok { - params.DescentRate = v.(OptFloat64) - } - } - { - key := middleware.ParameterKey{ - Name: "float_altitude", - In: "query", - } - if v, ok := packed[key]; ok { - params.FloatAltitude = v.(OptFloat64) - } - } - { - key := middleware.ParameterKey{ - Name: "stop_datetime", - In: "query", - } - if v, ok := packed[key]; ok { - params.StopDatetime = v.(OptDateTime) - } - } - { - key := middleware.ParameterKey{ - Name: "ascent_curve", - In: "query", - } - if v, ok := packed[key]; ok { - params.AscentCurve = v.(OptString) - } - } - { - key := middleware.ParameterKey{ - Name: "descent_curve", - In: "query", - } - if v, ok := packed[key]; ok { - params.DescentCurve = v.(OptString) - } - } - { - key := middleware.ParameterKey{ - Name: "simulate_stages", - In: "query", - } - if v, ok := packed[key]; ok { - params.SimulateStages = v.([]PerformPredictionSimulateStagesItem) - } - } - { - key := middleware.ParameterKey{ - Name: "interpolate", - In: "query", - } - if v, ok := packed[key]; ok { - params.Interpolate = v.(OptBool) - } - } - { - key := middleware.ParameterKey{ - Name: "format", - In: "query", - } - if v, ok := packed[key]; ok { - params.Format = v.(OptPerformPredictionFormat) - } - } - { - key := middleware.ParameterKey{ - Name: "dataset", - In: "query", - } - if v, ok := packed[key]; ok { - params.Dataset = v.(OptDateTime) - } - } - return params -} - -func decodePerformPredictionParams(args [0]string, argsEscaped bool, r *http.Request) (params PerformPredictionParams, _ error) { - q := uri.NewQueryDecoder(r.URL.Query()) - // Decode query: launch_latitude. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "launch_latitude", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotLaunchLatitudeVal float64 - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToFloat64(val) - if err != nil { - return err - } - - paramsDotLaunchLatitudeVal = c - return nil - }(); err != nil { - return err - } - params.LaunchLatitude.SetTo(paramsDotLaunchLatitudeVal) - return nil - }); err != nil { - return err - } - if err := func() error { - if value, ok := params.LaunchLatitude.Get(); ok { - if err := func() error { - if err := (validate.Float{}).Validate(float64(value)); err != nil { - return errors.Wrap(err, "float") - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "launch_latitude", - In: "query", - Err: err, - } - } - // Decode query: launch_longitude. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "launch_longitude", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotLaunchLongitudeVal float64 - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToFloat64(val) - if err != nil { - return err - } - - paramsDotLaunchLongitudeVal = c - return nil - }(); err != nil { - return err - } - params.LaunchLongitude.SetTo(paramsDotLaunchLongitudeVal) - return nil - }); err != nil { - return err - } - if err := func() error { - if value, ok := params.LaunchLongitude.Get(); ok { - if err := func() error { - if err := (validate.Float{}).Validate(float64(value)); err != nil { - return errors.Wrap(err, "float") - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "launch_longitude", - In: "query", - Err: err, - } - } - // Decode query: launch_datetime. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "launch_datetime", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotLaunchDatetimeVal time.Time - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToDateTime(val) - if err != nil { - return err - } - - paramsDotLaunchDatetimeVal = c - return nil - }(); err != nil { - return err - } - params.LaunchDatetime.SetTo(paramsDotLaunchDatetimeVal) - return nil - }); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "launch_datetime", - In: "query", - Err: err, - } - } - // Decode query: launch_altitude. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "launch_altitude", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotLaunchAltitudeVal float64 - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToFloat64(val) - if err != nil { - return err - } - - paramsDotLaunchAltitudeVal = c - return nil - }(); err != nil { - return err - } - params.LaunchAltitude.SetTo(paramsDotLaunchAltitudeVal) - return nil - }); err != nil { - return err - } - if err := func() error { - if value, ok := params.LaunchAltitude.Get(); ok { - if err := func() error { - if err := (validate.Float{}).Validate(float64(value)); err != nil { - return errors.Wrap(err, "float") - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "launch_altitude", - In: "query", - Err: err, - } - } - // Decode query: profile. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "profile", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotProfileVal PerformPredictionProfile - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToString(val) - if err != nil { - return err - } - - paramsDotProfileVal = PerformPredictionProfile(c) - return nil - }(); err != nil { - return err - } - params.Profile.SetTo(paramsDotProfileVal) - return nil - }); err != nil { - return err - } - if err := func() error { - if value, ok := params.Profile.Get(); ok { - if err := func() error { - if err := value.Validate(); err != nil { - return err - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "profile", - In: "query", - Err: err, - } - } - // Decode query: ascent_rate. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "ascent_rate", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotAscentRateVal float64 - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToFloat64(val) - if err != nil { - return err - } - - paramsDotAscentRateVal = c - return nil - }(); err != nil { - return err - } - params.AscentRate.SetTo(paramsDotAscentRateVal) - return nil - }); err != nil { - return err - } - if err := func() error { - if value, ok := params.AscentRate.Get(); ok { - if err := func() error { - if err := (validate.Float{}).Validate(float64(value)); err != nil { - return errors.Wrap(err, "float") - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "ascent_rate", - In: "query", - Err: err, - } - } - // Decode query: burst_altitude. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "burst_altitude", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotBurstAltitudeVal float64 - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToFloat64(val) - if err != nil { - return err - } - - paramsDotBurstAltitudeVal = c - return nil - }(); err != nil { - return err - } - params.BurstAltitude.SetTo(paramsDotBurstAltitudeVal) - return nil - }); err != nil { - return err - } - if err := func() error { - if value, ok := params.BurstAltitude.Get(); ok { - if err := func() error { - if err := (validate.Float{}).Validate(float64(value)); err != nil { - return errors.Wrap(err, "float") - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "burst_altitude", - In: "query", - Err: err, - } - } - // Decode query: descent_rate. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "descent_rate", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotDescentRateVal float64 - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToFloat64(val) - if err != nil { - return err - } - - paramsDotDescentRateVal = c - return nil - }(); err != nil { - return err - } - params.DescentRate.SetTo(paramsDotDescentRateVal) - return nil - }); err != nil { - return err - } - if err := func() error { - if value, ok := params.DescentRate.Get(); ok { - if err := func() error { - if err := (validate.Float{}).Validate(float64(value)); err != nil { - return errors.Wrap(err, "float") - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "descent_rate", - In: "query", - Err: err, - } - } - // Decode query: float_altitude. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "float_altitude", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotFloatAltitudeVal float64 - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToFloat64(val) - if err != nil { - return err - } - - paramsDotFloatAltitudeVal = c - return nil - }(); err != nil { - return err - } - params.FloatAltitude.SetTo(paramsDotFloatAltitudeVal) - return nil - }); err != nil { - return err - } - if err := func() error { - if value, ok := params.FloatAltitude.Get(); ok { - if err := func() error { - if err := (validate.Float{}).Validate(float64(value)); err != nil { - return errors.Wrap(err, "float") - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "float_altitude", - In: "query", - Err: err, - } - } - // Decode query: stop_datetime. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "stop_datetime", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotStopDatetimeVal time.Time - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToDateTime(val) - if err != nil { - return err - } - - paramsDotStopDatetimeVal = c - return nil - }(); err != nil { - return err - } - params.StopDatetime.SetTo(paramsDotStopDatetimeVal) - return nil - }); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "stop_datetime", - In: "query", - Err: err, - } - } - // Decode query: ascent_curve. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "ascent_curve", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotAscentCurveVal string - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToString(val) - if err != nil { - return err - } - - paramsDotAscentCurveVal = c - return nil - }(); err != nil { - return err - } - params.AscentCurve.SetTo(paramsDotAscentCurveVal) - return nil - }); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "ascent_curve", - In: "query", - Err: err, - } - } - // Decode query: descent_curve. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "descent_curve", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotDescentCurveVal string - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToString(val) - if err != nil { - return err - } - - paramsDotDescentCurveVal = c - return nil - }(); err != nil { - return err - } - params.DescentCurve.SetTo(paramsDotDescentCurveVal) - return nil - }); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "descent_curve", - In: "query", - Err: err, - } - } - // Decode query: simulate_stages. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "simulate_stages", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - return d.DecodeArray(func(d uri.Decoder) error { - var paramsDotSimulateStagesVal PerformPredictionSimulateStagesItem - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToString(val) - if err != nil { - return err - } - - paramsDotSimulateStagesVal = PerformPredictionSimulateStagesItem(c) - return nil - }(); err != nil { - return err - } - params.SimulateStages = append(params.SimulateStages, paramsDotSimulateStagesVal) - return nil - }) - }); err != nil { - return err - } - if err := func() error { - var failures []validate.FieldError - for i, elem := range params.SimulateStages { - if err := func() error { - if err := elem.Validate(); err != nil { - return err - } - return nil - }(); err != nil { - failures = append(failures, validate.FieldError{ - Name: fmt.Sprintf("[%d]", i), - Error: err, - }) - } - } - if len(failures) > 0 { - return &validate.Error{Fields: failures} - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "simulate_stages", - In: "query", - Err: err, - } - } - // Decode query: interpolate. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "interpolate", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotInterpolateVal bool - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToBool(val) - if err != nil { - return err - } - - paramsDotInterpolateVal = c - return nil - }(); err != nil { - return err - } - params.Interpolate.SetTo(paramsDotInterpolateVal) - return nil - }); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "interpolate", - In: "query", - Err: err, - } - } - // Decode query: format. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "format", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotFormatVal PerformPredictionFormat - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToString(val) - if err != nil { - return err - } - - paramsDotFormatVal = PerformPredictionFormat(c) - return nil - }(); err != nil { - return err - } - params.Format.SetTo(paramsDotFormatVal) - return nil - }); err != nil { - return err - } - if err := func() error { - if value, ok := params.Format.Get(); ok { - if err := func() error { - if err := value.Validate(); err != nil { - return err - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "format", - In: "query", - Err: err, - } - } - // Decode query: dataset. - if err := func() error { - cfg := uri.QueryParameterDecodingConfig{ - Name: "dataset", - Style: uri.QueryStyleForm, - Explode: true, - } - - if err := q.HasParam(cfg); err == nil { - if err := q.DecodeParam(cfg, func(d uri.Decoder) error { - var paramsDotDatasetVal time.Time - if err := func() error { - val, err := d.DecodeValue() - if err != nil { - return err - } - - c, err := conv.ToDateTime(val) - if err != nil { - return err - } - - paramsDotDatasetVal = c - return nil - }(); err != nil { - return err - } - params.Dataset.SetTo(paramsDotDatasetVal) - return nil - }); err != nil { - return err - } - } - return nil - }(); err != nil { - return params, &ogenerrors.DecodeParamError{ - Name: "dataset", - In: "query", - Err: err, - } - } - return params, nil -} diff --git a/pkg/rest/oas_request_decoders_gen.go b/pkg/rest/oas_request_decoders_gen.go deleted file mode 100644 index d99e2f0..0000000 --- a/pkg/rest/oas_request_decoders_gen.go +++ /dev/null @@ -1,3 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn diff --git a/pkg/rest/oas_request_encoders_gen.go b/pkg/rest/oas_request_encoders_gen.go deleted file mode 100644 index d99e2f0..0000000 --- a/pkg/rest/oas_request_encoders_gen.go +++ /dev/null @@ -1,3 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn diff --git a/pkg/rest/oas_response_decoders_gen.go b/pkg/rest/oas_response_decoders_gen.go deleted file mode 100644 index 352068c..0000000 --- a/pkg/rest/oas_response_decoders_gen.go +++ /dev/null @@ -1,198 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "io" - "mime" - "net/http" - - "github.com/go-faster/errors" - "github.com/go-faster/jx" - "github.com/ogen-go/ogen/ogenerrors" - "github.com/ogen-go/ogen/validate" -) - -func decodePerformPredictionResponse(resp *http.Response) (res *PredictionResult, _ error) { - switch resp.StatusCode { - case 200: - // Code 200. - ct, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) - if err != nil { - return res, errors.Wrap(err, "parse media type") - } - switch { - case ct == "application/json": - buf, err := io.ReadAll(resp.Body) - if err != nil { - return res, err - } - d := jx.DecodeBytes(buf) - - var response PredictionResult - if err := func() error { - if err := response.Decode(d); err != nil { - return err - } - if err := d.Skip(); err != io.EOF { - return errors.New("unexpected trailing data") - } - return nil - }(); err != nil { - err = &ogenerrors.DecodeBodyError{ - ContentType: ct, - Body: buf, - Err: err, - } - return res, err - } - // Validate response. - if err := func() error { - if err := response.Validate(); err != nil { - return err - } - return nil - }(); err != nil { - return res, errors.Wrap(err, "validate") - } - return &response, nil - default: - return res, validate.InvalidContentType(ct) - } - } - // Convenient error response. - defRes, err := func() (res *ErrorStatusCode, err error) { - ct, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) - if err != nil { - return res, errors.Wrap(err, "parse media type") - } - switch { - case ct == "application/json": - buf, err := io.ReadAll(resp.Body) - if err != nil { - return res, err - } - d := jx.DecodeBytes(buf) - - var response Error - if err := func() error { - if err := response.Decode(d); err != nil { - return err - } - if err := d.Skip(); err != io.EOF { - return errors.New("unexpected trailing data") - } - return nil - }(); err != nil { - err = &ogenerrors.DecodeBodyError{ - ContentType: ct, - Body: buf, - Err: err, - } - return res, err - } - return &ErrorStatusCode{ - StatusCode: resp.StatusCode, - Response: response, - }, nil - default: - return res, validate.InvalidContentType(ct) - } - }() - if err != nil { - return res, errors.Wrapf(err, "default (code %d)", resp.StatusCode) - } - return res, errors.Wrap(defRes, "error") -} - -func decodeReadinessCheckResponse(resp *http.Response) (res *ReadinessResponse, _ error) { - switch resp.StatusCode { - case 200: - // Code 200. - ct, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) - if err != nil { - return res, errors.Wrap(err, "parse media type") - } - switch { - case ct == "application/json": - buf, err := io.ReadAll(resp.Body) - if err != nil { - return res, err - } - d := jx.DecodeBytes(buf) - - var response ReadinessResponse - if err := func() error { - if err := response.Decode(d); err != nil { - return err - } - if err := d.Skip(); err != io.EOF { - return errors.New("unexpected trailing data") - } - return nil - }(); err != nil { - err = &ogenerrors.DecodeBodyError{ - ContentType: ct, - Body: buf, - Err: err, - } - return res, err - } - // Validate response. - if err := func() error { - if err := response.Validate(); err != nil { - return err - } - return nil - }(); err != nil { - return res, errors.Wrap(err, "validate") - } - return &response, nil - default: - return res, validate.InvalidContentType(ct) - } - } - // Convenient error response. - defRes, err := func() (res *ErrorStatusCode, err error) { - ct, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) - if err != nil { - return res, errors.Wrap(err, "parse media type") - } - switch { - case ct == "application/json": - buf, err := io.ReadAll(resp.Body) - if err != nil { - return res, err - } - d := jx.DecodeBytes(buf) - - var response Error - if err := func() error { - if err := response.Decode(d); err != nil { - return err - } - if err := d.Skip(); err != io.EOF { - return errors.New("unexpected trailing data") - } - return nil - }(); err != nil { - err = &ogenerrors.DecodeBodyError{ - ContentType: ct, - Body: buf, - Err: err, - } - return res, err - } - return &ErrorStatusCode{ - StatusCode: resp.StatusCode, - Response: response, - }, nil - default: - return res, validate.InvalidContentType(ct) - } - }() - if err != nil { - return res, errors.Wrapf(err, "default (code %d)", resp.StatusCode) - } - return res, errors.Wrap(defRes, "error") -} diff --git a/pkg/rest/oas_response_encoders_gen.go b/pkg/rest/oas_response_encoders_gen.go deleted file mode 100644 index a95167d..0000000 --- a/pkg/rest/oas_response_encoders_gen.go +++ /dev/null @@ -1,68 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "net/http" - - "github.com/go-faster/errors" - "github.com/go-faster/jx" - ht "github.com/ogen-go/ogen/http" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" -) - -func encodePerformPredictionResponse(response *PredictionResult, w http.ResponseWriter, span trace.Span) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(200) - span.SetStatus(codes.Ok, http.StatusText(200)) - - e := new(jx.Encoder) - response.Encode(e) - if _, err := e.WriteTo(w); err != nil { - return errors.Wrap(err, "write") - } - - return nil -} - -func encodeReadinessCheckResponse(response *ReadinessResponse, w http.ResponseWriter, span trace.Span) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(200) - span.SetStatus(codes.Ok, http.StatusText(200)) - - e := new(jx.Encoder) - response.Encode(e) - if _, err := e.WriteTo(w); err != nil { - return errors.Wrap(err, "write") - } - - return nil -} - -func encodeErrorResponse(response *ErrorStatusCode, w http.ResponseWriter, span trace.Span) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - code := response.StatusCode - if code == 0 { - // Set default status code. - code = http.StatusOK - } - w.WriteHeader(code) - if st := http.StatusText(code); code >= http.StatusBadRequest { - span.SetStatus(codes.Error, st) - } else { - span.SetStatus(codes.Ok, st) - } - - e := new(jx.Encoder) - response.Response.Encode(e) - if _, err := e.WriteTo(w); err != nil { - return errors.Wrap(err, "write") - } - - if code >= http.StatusInternalServerError { - return errors.Wrapf(ht.ErrInternalServerErrorResponse, "code: %d, message: %s", code, http.StatusText(code)) - } - return nil - -} diff --git a/pkg/rest/oas_router_gen.go b/pkg/rest/oas_router_gen.go deleted file mode 100644 index 5f7617a..0000000 --- a/pkg/rest/oas_router_gen.go +++ /dev/null @@ -1,258 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "net/http" - "net/url" - "strings" - - "github.com/ogen-go/ogen/uri" -) - -func (s *Server) cutPrefix(path string) (string, bool) { - prefix := s.cfg.Prefix - if prefix == "" { - return path, true - } - if !strings.HasPrefix(path, prefix) { - // Prefix doesn't match. - return "", false - } - // Cut prefix from the path. - return strings.TrimPrefix(path, prefix), true -} - -// ServeHTTP serves http request as defined by OpenAPI v3 specification, -// calling handler that matches the path or returning not found error. -func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - elem := r.URL.Path - elemIsEscaped := false - if rawPath := r.URL.RawPath; rawPath != "" { - if normalized, ok := uri.NormalizeEscapedPath(rawPath); ok { - elem = normalized - elemIsEscaped = strings.ContainsRune(elem, '%') - } - } - - elem, ok := s.cutPrefix(elem) - if !ok || len(elem) == 0 { - s.notFound(w, r) - return - } - - // Static code generated router with unwrapped path search. - switch { - default: - if len(elem) == 0 { - break - } - switch elem[0] { - case '/': // Prefix: "/" - - if l := len("/"); len(elem) >= l && elem[0:l] == "/" { - elem = elem[l:] - } else { - break - } - - if len(elem) == 0 { - break - } - switch elem[0] { - case 'a': // Prefix: "api/v1/prediction" - - if l := len("api/v1/prediction"); len(elem) >= l && elem[0:l] == "api/v1/prediction" { - elem = elem[l:] - } else { - break - } - - if len(elem) == 0 { - // Leaf node. - switch r.Method { - case "GET": - s.handlePerformPredictionRequest([0]string{}, elemIsEscaped, w, r) - default: - s.notAllowed(w, r, "GET") - } - - return - } - - case 'r': // Prefix: "ready" - - if l := len("ready"); len(elem) >= l && elem[0:l] == "ready" { - elem = elem[l:] - } else { - break - } - - if len(elem) == 0 { - // Leaf node. - switch r.Method { - case "GET": - s.handleReadinessCheckRequest([0]string{}, elemIsEscaped, w, r) - default: - s.notAllowed(w, r, "GET") - } - - return - } - - } - - } - } - s.notFound(w, r) -} - -// Route is route object. -type Route struct { - name string - summary string - operationID string - operationGroup string - pathPattern string - count int - args [0]string -} - -// Name returns ogen operation name. -// -// It is guaranteed to be unique and not empty. -func (r Route) Name() string { - return r.name -} - -// Summary returns OpenAPI summary. -func (r Route) Summary() string { - return r.summary -} - -// OperationID returns OpenAPI operationId. -func (r Route) OperationID() string { - return r.operationID -} - -// OperationGroup returns the x-ogen-operation-group value. -func (r Route) OperationGroup() string { - return r.operationGroup -} - -// PathPattern returns OpenAPI path. -func (r Route) PathPattern() string { - return r.pathPattern -} - -// Args returns parsed arguments. -func (r Route) Args() []string { - return r.args[:r.count] -} - -// FindRoute finds Route for given method and path. -// -// Note: this method does not unescape path or handle reserved characters in path properly. Use FindPath instead. -func (s *Server) FindRoute(method, path string) (Route, bool) { - return s.FindPath(method, &url.URL{Path: path}) -} - -// FindPath finds Route for given method and URL. -func (s *Server) FindPath(method string, u *url.URL) (r Route, _ bool) { - var ( - elem = u.Path - args = r.args - ) - if rawPath := u.RawPath; rawPath != "" { - if normalized, ok := uri.NormalizeEscapedPath(rawPath); ok { - elem = normalized - } - defer func() { - for i, arg := range r.args[:r.count] { - if unescaped, err := url.PathUnescape(arg); err == nil { - r.args[i] = unescaped - } - } - }() - } - - elem, ok := s.cutPrefix(elem) - if !ok { - return r, false - } - - // Static code generated router with unwrapped path search. - switch { - default: - if len(elem) == 0 { - break - } - switch elem[0] { - case '/': // Prefix: "/" - - if l := len("/"); len(elem) >= l && elem[0:l] == "/" { - elem = elem[l:] - } else { - break - } - - if len(elem) == 0 { - break - } - switch elem[0] { - case 'a': // Prefix: "api/v1/prediction" - - if l := len("api/v1/prediction"); len(elem) >= l && elem[0:l] == "api/v1/prediction" { - elem = elem[l:] - } else { - break - } - - if len(elem) == 0 { - // Leaf node. - switch method { - case "GET": - r.name = PerformPredictionOperation - r.summary = "Perform prediction" - r.operationID = "performPrediction" - r.operationGroup = "" - r.pathPattern = "/api/v1/prediction" - r.args = args - r.count = 0 - return r, true - default: - return - } - } - - case 'r': // Prefix: "ready" - - if l := len("ready"); len(elem) >= l && elem[0:l] == "ready" { - elem = elem[l:] - } else { - break - } - - if len(elem) == 0 { - // Leaf node. - switch method { - case "GET": - r.name = ReadinessCheckOperation - r.summary = "Readiness check" - r.operationID = "readinessCheck" - r.operationGroup = "" - r.pathPattern = "/ready" - r.args = args - r.count = 0 - return r, true - default: - return - } - } - - } - - } - } - return r, false -} diff --git a/pkg/rest/oas_schemas_gen.go b/pkg/rest/oas_schemas_gen.go deleted file mode 100644 index b484e26..0000000 --- a/pkg/rest/oas_schemas_gen.go +++ /dev/null @@ -1,746 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "fmt" - "time" - - "github.com/go-faster/errors" -) - -func (s *ErrorStatusCode) Error() string { - return fmt.Sprintf("code %d: %+v", s.StatusCode, s.Response) -} - -// Ref: #/components/schemas/Error -type Error struct { - Message string `json:"message"` - Details OptString `json:"details"` -} - -// GetMessage returns the value of Message. -func (s *Error) GetMessage() string { - return s.Message -} - -// GetDetails returns the value of Details. -func (s *Error) GetDetails() OptString { - return s.Details -} - -// SetMessage sets the value of Message. -func (s *Error) SetMessage(val string) { - s.Message = val -} - -// SetDetails sets the value of Details. -func (s *Error) SetDetails(val OptString) { - s.Details = val -} - -// ErrorStatusCode wraps Error with StatusCode. -type ErrorStatusCode struct { - StatusCode int - Response Error -} - -// GetStatusCode returns the value of StatusCode. -func (s *ErrorStatusCode) GetStatusCode() int { - return s.StatusCode -} - -// GetResponse returns the value of Response. -func (s *ErrorStatusCode) GetResponse() Error { - return s.Response -} - -// SetStatusCode sets the value of StatusCode. -func (s *ErrorStatusCode) SetStatusCode(val int) { - s.StatusCode = val -} - -// SetResponse sets the value of Response. -func (s *ErrorStatusCode) SetResponse(val Error) { - s.Response = val -} - -// NewOptBool returns new OptBool with value set to v. -func NewOptBool(v bool) OptBool { - return OptBool{ - Value: v, - Set: true, - } -} - -// OptBool is optional bool. -type OptBool struct { - Value bool - Set bool -} - -// IsSet returns true if OptBool was set. -func (o OptBool) IsSet() bool { return o.Set } - -// Reset unsets value. -func (o *OptBool) Reset() { - var v bool - o.Value = v - o.Set = false -} - -// SetTo sets value to v. -func (o *OptBool) SetTo(v bool) { - o.Set = true - o.Value = v -} - -// Get returns value and boolean that denotes whether value was set. -func (o OptBool) Get() (v bool, ok bool) { - if !o.Set { - return v, false - } - return o.Value, true -} - -// Or returns value if set, or given parameter if does not. -func (o OptBool) Or(d bool) bool { - if v, ok := o.Get(); ok { - return v - } - return d -} - -// NewOptDateTime returns new OptDateTime with value set to v. -func NewOptDateTime(v time.Time) OptDateTime { - return OptDateTime{ - Value: v, - Set: true, - } -} - -// OptDateTime is optional time.Time. -type OptDateTime struct { - Value time.Time - Set bool -} - -// IsSet returns true if OptDateTime was set. -func (o OptDateTime) IsSet() bool { return o.Set } - -// Reset unsets value. -func (o *OptDateTime) Reset() { - var v time.Time - o.Value = v - o.Set = false -} - -// SetTo sets value to v. -func (o *OptDateTime) SetTo(v time.Time) { - o.Set = true - o.Value = v -} - -// Get returns value and boolean that denotes whether value was set. -func (o OptDateTime) Get() (v time.Time, ok bool) { - if !o.Set { - return v, false - } - return o.Value, true -} - -// Or returns value if set, or given parameter if does not. -func (o OptDateTime) Or(d time.Time) time.Time { - if v, ok := o.Get(); ok { - return v - } - return d -} - -// NewOptFloat64 returns new OptFloat64 with value set to v. -func NewOptFloat64(v float64) OptFloat64 { - return OptFloat64{ - Value: v, - Set: true, - } -} - -// OptFloat64 is optional float64. -type OptFloat64 struct { - Value float64 - Set bool -} - -// IsSet returns true if OptFloat64 was set. -func (o OptFloat64) IsSet() bool { return o.Set } - -// Reset unsets value. -func (o *OptFloat64) Reset() { - var v float64 - o.Value = v - o.Set = false -} - -// SetTo sets value to v. -func (o *OptFloat64) SetTo(v float64) { - o.Set = true - o.Value = v -} - -// Get returns value and boolean that denotes whether value was set. -func (o OptFloat64) Get() (v float64, ok bool) { - if !o.Set { - return v, false - } - return o.Value, true -} - -// Or returns value if set, or given parameter if does not. -func (o OptFloat64) Or(d float64) float64 { - if v, ok := o.Get(); ok { - return v - } - return d -} - -// NewOptPerformPredictionFormat returns new OptPerformPredictionFormat with value set to v. -func NewOptPerformPredictionFormat(v PerformPredictionFormat) OptPerformPredictionFormat { - return OptPerformPredictionFormat{ - Value: v, - Set: true, - } -} - -// OptPerformPredictionFormat is optional PerformPredictionFormat. -type OptPerformPredictionFormat struct { - Value PerformPredictionFormat - Set bool -} - -// IsSet returns true if OptPerformPredictionFormat was set. -func (o OptPerformPredictionFormat) IsSet() bool { return o.Set } - -// Reset unsets value. -func (o *OptPerformPredictionFormat) Reset() { - var v PerformPredictionFormat - o.Value = v - o.Set = false -} - -// SetTo sets value to v. -func (o *OptPerformPredictionFormat) SetTo(v PerformPredictionFormat) { - o.Set = true - o.Value = v -} - -// Get returns value and boolean that denotes whether value was set. -func (o OptPerformPredictionFormat) Get() (v PerformPredictionFormat, ok bool) { - if !o.Set { - return v, false - } - return o.Value, true -} - -// Or returns value if set, or given parameter if does not. -func (o OptPerformPredictionFormat) Or(d PerformPredictionFormat) PerformPredictionFormat { - if v, ok := o.Get(); ok { - return v - } - return d -} - -// NewOptPerformPredictionProfile returns new OptPerformPredictionProfile with value set to v. -func NewOptPerformPredictionProfile(v PerformPredictionProfile) OptPerformPredictionProfile { - return OptPerformPredictionProfile{ - Value: v, - Set: true, - } -} - -// OptPerformPredictionProfile is optional PerformPredictionProfile. -type OptPerformPredictionProfile struct { - Value PerformPredictionProfile - Set bool -} - -// IsSet returns true if OptPerformPredictionProfile was set. -func (o OptPerformPredictionProfile) IsSet() bool { return o.Set } - -// Reset unsets value. -func (o *OptPerformPredictionProfile) Reset() { - var v PerformPredictionProfile - o.Value = v - o.Set = false -} - -// SetTo sets value to v. -func (o *OptPerformPredictionProfile) SetTo(v PerformPredictionProfile) { - o.Set = true - o.Value = v -} - -// Get returns value and boolean that denotes whether value was set. -func (o OptPerformPredictionProfile) Get() (v PerformPredictionProfile, ok bool) { - if !o.Set { - return v, false - } - return o.Value, true -} - -// Or returns value if set, or given parameter if does not. -func (o OptPerformPredictionProfile) Or(d PerformPredictionProfile) PerformPredictionProfile { - if v, ok := o.Get(); ok { - return v - } - return d -} - -// NewOptString returns new OptString with value set to v. -func NewOptString(v string) OptString { - return OptString{ - Value: v, - Set: true, - } -} - -// OptString is optional string. -type OptString struct { - Value string - Set bool -} - -// IsSet returns true if OptString was set. -func (o OptString) IsSet() bool { return o.Set } - -// Reset unsets value. -func (o *OptString) Reset() { - var v string - o.Value = v - o.Set = false -} - -// SetTo sets value to v. -func (o *OptString) SetTo(v string) { - o.Set = true - o.Value = v -} - -// Get returns value and boolean that denotes whether value was set. -func (o OptString) Get() (v string, ok bool) { - if !o.Set { - return v, false - } - return o.Value, true -} - -// Or returns value if set, or given parameter if does not. -func (o OptString) Or(d string) string { - if v, ok := o.Get(); ok { - return v - } - return d -} - -type PerformPredictionFormat string - -const ( - PerformPredictionFormatJSON PerformPredictionFormat = "json" -) - -// AllValues returns all PerformPredictionFormat values. -func (PerformPredictionFormat) AllValues() []PerformPredictionFormat { - return []PerformPredictionFormat{ - PerformPredictionFormatJSON, - } -} - -// MarshalText implements encoding.TextMarshaler. -func (s PerformPredictionFormat) MarshalText() ([]byte, error) { - switch s { - case PerformPredictionFormatJSON: - return []byte(s), nil - default: - return nil, errors.Errorf("invalid value: %q", s) - } -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (s *PerformPredictionFormat) UnmarshalText(data []byte) error { - switch PerformPredictionFormat(data) { - case PerformPredictionFormatJSON: - *s = PerformPredictionFormatJSON - return nil - default: - return errors.Errorf("invalid value: %q", data) - } -} - -type PerformPredictionProfile string - -const ( - PerformPredictionProfileStandardProfile PerformPredictionProfile = "standard_profile" - PerformPredictionProfileFloatProfile PerformPredictionProfile = "float_profile" - PerformPredictionProfileReverseProfile PerformPredictionProfile = "reverse_profile" - PerformPredictionProfileCustomProfile PerformPredictionProfile = "custom_profile" -) - -// AllValues returns all PerformPredictionProfile values. -func (PerformPredictionProfile) AllValues() []PerformPredictionProfile { - return []PerformPredictionProfile{ - PerformPredictionProfileStandardProfile, - PerformPredictionProfileFloatProfile, - PerformPredictionProfileReverseProfile, - PerformPredictionProfileCustomProfile, - } -} - -// MarshalText implements encoding.TextMarshaler. -func (s PerformPredictionProfile) MarshalText() ([]byte, error) { - switch s { - case PerformPredictionProfileStandardProfile: - return []byte(s), nil - case PerformPredictionProfileFloatProfile: - return []byte(s), nil - case PerformPredictionProfileReverseProfile: - return []byte(s), nil - case PerformPredictionProfileCustomProfile: - return []byte(s), nil - default: - return nil, errors.Errorf("invalid value: %q", s) - } -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (s *PerformPredictionProfile) UnmarshalText(data []byte) error { - switch PerformPredictionProfile(data) { - case PerformPredictionProfileStandardProfile: - *s = PerformPredictionProfileStandardProfile - return nil - case PerformPredictionProfileFloatProfile: - *s = PerformPredictionProfileFloatProfile - return nil - case PerformPredictionProfileReverseProfile: - *s = PerformPredictionProfileReverseProfile - return nil - case PerformPredictionProfileCustomProfile: - *s = PerformPredictionProfileCustomProfile - return nil - default: - return errors.Errorf("invalid value: %q", data) - } -} - -type PerformPredictionSimulateStagesItem string - -const ( - PerformPredictionSimulateStagesItemAscent PerformPredictionSimulateStagesItem = "ascent" - PerformPredictionSimulateStagesItemDescent PerformPredictionSimulateStagesItem = "descent" - PerformPredictionSimulateStagesItemFloat PerformPredictionSimulateStagesItem = "float" -) - -// AllValues returns all PerformPredictionSimulateStagesItem values. -func (PerformPredictionSimulateStagesItem) AllValues() []PerformPredictionSimulateStagesItem { - return []PerformPredictionSimulateStagesItem{ - PerformPredictionSimulateStagesItemAscent, - PerformPredictionSimulateStagesItemDescent, - PerformPredictionSimulateStagesItemFloat, - } -} - -// MarshalText implements encoding.TextMarshaler. -func (s PerformPredictionSimulateStagesItem) MarshalText() ([]byte, error) { - switch s { - case PerformPredictionSimulateStagesItemAscent: - return []byte(s), nil - case PerformPredictionSimulateStagesItemDescent: - return []byte(s), nil - case PerformPredictionSimulateStagesItemFloat: - return []byte(s), nil - default: - return nil, errors.Errorf("invalid value: %q", s) - } -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (s *PerformPredictionSimulateStagesItem) UnmarshalText(data []byte) error { - switch PerformPredictionSimulateStagesItem(data) { - case PerformPredictionSimulateStagesItemAscent: - *s = PerformPredictionSimulateStagesItemAscent - return nil - case PerformPredictionSimulateStagesItemDescent: - *s = PerformPredictionSimulateStagesItemDescent - return nil - case PerformPredictionSimulateStagesItemFloat: - *s = PerformPredictionSimulateStagesItemFloat - return nil - default: - return errors.Errorf("invalid value: %q", data) - } -} - -// Ref: #/components/schemas/PredictionResult -type PredictionResult struct { - Metadata PredictionResultMetadata `json:"metadata"` - Prediction []PredictionResultPredictionItem `json:"prediction"` -} - -// GetMetadata returns the value of Metadata. -func (s *PredictionResult) GetMetadata() PredictionResultMetadata { - return s.Metadata -} - -// GetPrediction returns the value of Prediction. -func (s *PredictionResult) GetPrediction() []PredictionResultPredictionItem { - return s.Prediction -} - -// SetMetadata sets the value of Metadata. -func (s *PredictionResult) SetMetadata(val PredictionResultMetadata) { - s.Metadata = val -} - -// SetPrediction sets the value of Prediction. -func (s *PredictionResult) SetPrediction(val []PredictionResultPredictionItem) { - s.Prediction = val -} - -type PredictionResultMetadata struct { - CompleteDatetime time.Time `json:"complete_datetime"` - StartDatetime time.Time `json:"start_datetime"` -} - -// GetCompleteDatetime returns the value of CompleteDatetime. -func (s *PredictionResultMetadata) GetCompleteDatetime() time.Time { - return s.CompleteDatetime -} - -// GetStartDatetime returns the value of StartDatetime. -func (s *PredictionResultMetadata) GetStartDatetime() time.Time { - return s.StartDatetime -} - -// SetCompleteDatetime sets the value of CompleteDatetime. -func (s *PredictionResultMetadata) SetCompleteDatetime(val time.Time) { - s.CompleteDatetime = val -} - -// SetStartDatetime sets the value of StartDatetime. -func (s *PredictionResultMetadata) SetStartDatetime(val time.Time) { - s.StartDatetime = val -} - -type PredictionResultPredictionItem struct { - Stage PredictionResultPredictionItemStage `json:"stage"` - Trajectory []PredictionResultPredictionItemTrajectoryItem `json:"trajectory"` -} - -// GetStage returns the value of Stage. -func (s *PredictionResultPredictionItem) GetStage() PredictionResultPredictionItemStage { - return s.Stage -} - -// GetTrajectory returns the value of Trajectory. -func (s *PredictionResultPredictionItem) GetTrajectory() []PredictionResultPredictionItemTrajectoryItem { - return s.Trajectory -} - -// SetStage sets the value of Stage. -func (s *PredictionResultPredictionItem) SetStage(val PredictionResultPredictionItemStage) { - s.Stage = val -} - -// SetTrajectory sets the value of Trajectory. -func (s *PredictionResultPredictionItem) SetTrajectory(val []PredictionResultPredictionItemTrajectoryItem) { - s.Trajectory = val -} - -type PredictionResultPredictionItemStage string - -const ( - PredictionResultPredictionItemStageAscent PredictionResultPredictionItemStage = "ascent" - PredictionResultPredictionItemStageDescent PredictionResultPredictionItemStage = "descent" - PredictionResultPredictionItemStageFloat PredictionResultPredictionItemStage = "float" -) - -// AllValues returns all PredictionResultPredictionItemStage values. -func (PredictionResultPredictionItemStage) AllValues() []PredictionResultPredictionItemStage { - return []PredictionResultPredictionItemStage{ - PredictionResultPredictionItemStageAscent, - PredictionResultPredictionItemStageDescent, - PredictionResultPredictionItemStageFloat, - } -} - -// MarshalText implements encoding.TextMarshaler. -func (s PredictionResultPredictionItemStage) MarshalText() ([]byte, error) { - switch s { - case PredictionResultPredictionItemStageAscent: - return []byte(s), nil - case PredictionResultPredictionItemStageDescent: - return []byte(s), nil - case PredictionResultPredictionItemStageFloat: - return []byte(s), nil - default: - return nil, errors.Errorf("invalid value: %q", s) - } -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (s *PredictionResultPredictionItemStage) UnmarshalText(data []byte) error { - switch PredictionResultPredictionItemStage(data) { - case PredictionResultPredictionItemStageAscent: - *s = PredictionResultPredictionItemStageAscent - return nil - case PredictionResultPredictionItemStageDescent: - *s = PredictionResultPredictionItemStageDescent - return nil - case PredictionResultPredictionItemStageFloat: - *s = PredictionResultPredictionItemStageFloat - return nil - default: - return errors.Errorf("invalid value: %q", data) - } -} - -type PredictionResultPredictionItemTrajectoryItem struct { - Datetime time.Time `json:"datetime"` - Latitude float64 `json:"latitude"` - Longitude float64 `json:"longitude"` - Altitude float64 `json:"altitude"` -} - -// GetDatetime returns the value of Datetime. -func (s *PredictionResultPredictionItemTrajectoryItem) GetDatetime() time.Time { - return s.Datetime -} - -// GetLatitude returns the value of Latitude. -func (s *PredictionResultPredictionItemTrajectoryItem) GetLatitude() float64 { - return s.Latitude -} - -// GetLongitude returns the value of Longitude. -func (s *PredictionResultPredictionItemTrajectoryItem) GetLongitude() float64 { - return s.Longitude -} - -// GetAltitude returns the value of Altitude. -func (s *PredictionResultPredictionItemTrajectoryItem) GetAltitude() float64 { - return s.Altitude -} - -// SetDatetime sets the value of Datetime. -func (s *PredictionResultPredictionItemTrajectoryItem) SetDatetime(val time.Time) { - s.Datetime = val -} - -// SetLatitude sets the value of Latitude. -func (s *PredictionResultPredictionItemTrajectoryItem) SetLatitude(val float64) { - s.Latitude = val -} - -// SetLongitude sets the value of Longitude. -func (s *PredictionResultPredictionItemTrajectoryItem) SetLongitude(val float64) { - s.Longitude = val -} - -// SetAltitude sets the value of Altitude. -func (s *PredictionResultPredictionItemTrajectoryItem) SetAltitude(val float64) { - s.Altitude = val -} - -// Ref: #/components/schemas/ReadinessResponse -type ReadinessResponse struct { - Status ReadinessResponseStatus `json:"status"` - LastUpdate OptDateTime `json:"last_update"` - IsFresh OptBool `json:"is_fresh"` - ErrorMessage OptString `json:"error_message"` -} - -// GetStatus returns the value of Status. -func (s *ReadinessResponse) GetStatus() ReadinessResponseStatus { - return s.Status -} - -// GetLastUpdate returns the value of LastUpdate. -func (s *ReadinessResponse) GetLastUpdate() OptDateTime { - return s.LastUpdate -} - -// GetIsFresh returns the value of IsFresh. -func (s *ReadinessResponse) GetIsFresh() OptBool { - return s.IsFresh -} - -// GetErrorMessage returns the value of ErrorMessage. -func (s *ReadinessResponse) GetErrorMessage() OptString { - return s.ErrorMessage -} - -// SetStatus sets the value of Status. -func (s *ReadinessResponse) SetStatus(val ReadinessResponseStatus) { - s.Status = val -} - -// SetLastUpdate sets the value of LastUpdate. -func (s *ReadinessResponse) SetLastUpdate(val OptDateTime) { - s.LastUpdate = val -} - -// SetIsFresh sets the value of IsFresh. -func (s *ReadinessResponse) SetIsFresh(val OptBool) { - s.IsFresh = val -} - -// SetErrorMessage sets the value of ErrorMessage. -func (s *ReadinessResponse) SetErrorMessage(val OptString) { - s.ErrorMessage = val -} - -type ReadinessResponseStatus string - -const ( - ReadinessResponseStatusOk ReadinessResponseStatus = "ok" - ReadinessResponseStatusNotReady ReadinessResponseStatus = "not_ready" - ReadinessResponseStatusError ReadinessResponseStatus = "error" -) - -// AllValues returns all ReadinessResponseStatus values. -func (ReadinessResponseStatus) AllValues() []ReadinessResponseStatus { - return []ReadinessResponseStatus{ - ReadinessResponseStatusOk, - ReadinessResponseStatusNotReady, - ReadinessResponseStatusError, - } -} - -// MarshalText implements encoding.TextMarshaler. -func (s ReadinessResponseStatus) MarshalText() ([]byte, error) { - switch s { - case ReadinessResponseStatusOk: - return []byte(s), nil - case ReadinessResponseStatusNotReady: - return []byte(s), nil - case ReadinessResponseStatusError: - return []byte(s), nil - default: - return nil, errors.Errorf("invalid value: %q", s) - } -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (s *ReadinessResponseStatus) UnmarshalText(data []byte) error { - switch ReadinessResponseStatus(data) { - case ReadinessResponseStatusOk: - *s = ReadinessResponseStatusOk - return nil - case ReadinessResponseStatusNotReady: - *s = ReadinessResponseStatusNotReady - return nil - case ReadinessResponseStatusError: - *s = ReadinessResponseStatusError - return nil - default: - return errors.Errorf("invalid value: %q", data) - } -} diff --git a/pkg/rest/oas_server_gen.go b/pkg/rest/oas_server_gen.go deleted file mode 100644 index 9ef451e..0000000 --- a/pkg/rest/oas_server_gen.go +++ /dev/null @@ -1,46 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "context" -) - -// Handler handles operations described by OpenAPI v3 specification. -type Handler interface { - // PerformPrediction implements performPrediction operation. - // - // Perform prediction. - // - // GET /api/v1/prediction - PerformPrediction(ctx context.Context, params PerformPredictionParams) (*PredictionResult, error) - // ReadinessCheck implements readinessCheck operation. - // - // Readiness check. - // - // GET /ready - ReadinessCheck(ctx context.Context) (*ReadinessResponse, error) - // NewError creates *ErrorStatusCode from error returned by handler. - // - // Used for common default response. - NewError(ctx context.Context, err error) *ErrorStatusCode -} - -// Server implements http server based on OpenAPI v3 specification and -// calls Handler to handle requests. -type Server struct { - h Handler - baseServer -} - -// NewServer creates new Server. -func NewServer(h Handler, opts ...ServerOption) (*Server, error) { - s, err := newServerConfig(opts...).baseServer() - if err != nil { - return nil, err - } - return &Server{ - h: h, - baseServer: s, - }, nil -} diff --git a/pkg/rest/oas_unimplemented_gen.go b/pkg/rest/oas_unimplemented_gen.go deleted file mode 100644 index 9662197..0000000 --- a/pkg/rest/oas_unimplemented_gen.go +++ /dev/null @@ -1,40 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "context" - - ht "github.com/ogen-go/ogen/http" -) - -// UnimplementedHandler is no-op Handler which returns http.ErrNotImplemented. -type UnimplementedHandler struct{} - -var _ Handler = UnimplementedHandler{} - -// PerformPrediction implements performPrediction operation. -// -// Perform prediction. -// -// GET /api/v1/prediction -func (UnimplementedHandler) PerformPrediction(ctx context.Context, params PerformPredictionParams) (r *PredictionResult, _ error) { - return r, ht.ErrNotImplemented -} - -// ReadinessCheck implements readinessCheck operation. -// -// Readiness check. -// -// GET /ready -func (UnimplementedHandler) ReadinessCheck(ctx context.Context) (r *ReadinessResponse, _ error) { - return r, ht.ErrNotImplemented -} - -// NewError creates *ErrorStatusCode from error returned by handler. -// -// Used for common default response. -func (UnimplementedHandler) NewError(ctx context.Context, err error) (r *ErrorStatusCode) { - r = new(ErrorStatusCode) - return r -} diff --git a/pkg/rest/oas_validators_gen.go b/pkg/rest/oas_validators_gen.go deleted file mode 100644 index 1207d8f..0000000 --- a/pkg/rest/oas_validators_gen.go +++ /dev/null @@ -1,232 +0,0 @@ -// Code generated by ogen, DO NOT EDIT. - -package gsn - -import ( - "fmt" - - "github.com/go-faster/errors" - "github.com/ogen-go/ogen/validate" -) - -func (s PerformPredictionFormat) Validate() error { - switch s { - case "json": - return nil - default: - return errors.Errorf("invalid value: %v", s) - } -} - -func (s PerformPredictionProfile) Validate() error { - switch s { - case "standard_profile": - return nil - case "float_profile": - return nil - case "reverse_profile": - return nil - case "custom_profile": - return nil - default: - return errors.Errorf("invalid value: %v", s) - } -} - -func (s PerformPredictionSimulateStagesItem) Validate() error { - switch s { - case "ascent": - return nil - case "descent": - return nil - case "float": - return nil - default: - return errors.Errorf("invalid value: %v", s) - } -} - -func (s *PredictionResult) Validate() error { - if s == nil { - return validate.ErrNilPointer - } - - var failures []validate.FieldError - if err := func() error { - if s.Prediction == nil { - return errors.New("nil is invalid value") - } - var failures []validate.FieldError - for i, elem := range s.Prediction { - if err := func() error { - if err := elem.Validate(); err != nil { - return err - } - return nil - }(); err != nil { - failures = append(failures, validate.FieldError{ - Name: fmt.Sprintf("[%d]", i), - Error: err, - }) - } - } - if len(failures) > 0 { - return &validate.Error{Fields: failures} - } - return nil - }(); err != nil { - failures = append(failures, validate.FieldError{ - Name: "prediction", - Error: err, - }) - } - if len(failures) > 0 { - return &validate.Error{Fields: failures} - } - return nil -} - -func (s *PredictionResultPredictionItem) Validate() error { - if s == nil { - return validate.ErrNilPointer - } - - var failures []validate.FieldError - if err := func() error { - if err := s.Stage.Validate(); err != nil { - return err - } - return nil - }(); err != nil { - failures = append(failures, validate.FieldError{ - Name: "stage", - Error: err, - }) - } - if err := func() error { - if s.Trajectory == nil { - return errors.New("nil is invalid value") - } - var failures []validate.FieldError - for i, elem := range s.Trajectory { - if err := func() error { - if err := elem.Validate(); err != nil { - return err - } - return nil - }(); err != nil { - failures = append(failures, validate.FieldError{ - Name: fmt.Sprintf("[%d]", i), - Error: err, - }) - } - } - if len(failures) > 0 { - return &validate.Error{Fields: failures} - } - return nil - }(); err != nil { - failures = append(failures, validate.FieldError{ - Name: "trajectory", - Error: err, - }) - } - if len(failures) > 0 { - return &validate.Error{Fields: failures} - } - return nil -} - -func (s PredictionResultPredictionItemStage) Validate() error { - switch s { - case "ascent": - return nil - case "descent": - return nil - case "float": - return nil - default: - return errors.Errorf("invalid value: %v", s) - } -} - -func (s *PredictionResultPredictionItemTrajectoryItem) Validate() error { - if s == nil { - return validate.ErrNilPointer - } - - var failures []validate.FieldError - if err := func() error { - if err := (validate.Float{}).Validate(float64(s.Latitude)); err != nil { - return errors.Wrap(err, "float") - } - return nil - }(); err != nil { - failures = append(failures, validate.FieldError{ - Name: "latitude", - Error: err, - }) - } - if err := func() error { - if err := (validate.Float{}).Validate(float64(s.Longitude)); err != nil { - return errors.Wrap(err, "float") - } - return nil - }(); err != nil { - failures = append(failures, validate.FieldError{ - Name: "longitude", - Error: err, - }) - } - if err := func() error { - if err := (validate.Float{}).Validate(float64(s.Altitude)); err != nil { - return errors.Wrap(err, "float") - } - return nil - }(); err != nil { - failures = append(failures, validate.FieldError{ - Name: "altitude", - Error: err, - }) - } - if len(failures) > 0 { - return &validate.Error{Fields: failures} - } - return nil -} - -func (s *ReadinessResponse) Validate() error { - if s == nil { - return validate.ErrNilPointer - } - - var failures []validate.FieldError - if err := func() error { - if err := s.Status.Validate(); err != nil { - return err - } - return nil - }(); err != nil { - failures = append(failures, validate.FieldError{ - Name: "status", - Error: err, - }) - } - if len(failures) > 0 { - return &validate.Error{Fields: failures} - } - return nil -} - -func (s ReadinessResponseStatus) Validate() error { - switch s { - case "ok": - return nil - case "not_ready": - return nil - case "error": - return nil - default: - return errors.Errorf("invalid value: %v", s) - } -} diff --git a/pkg/scheduler/config.go b/pkg/scheduler/config.go deleted file mode 100644 index c7b2881..0000000 --- a/pkg/scheduler/config.go +++ /dev/null @@ -1,20 +0,0 @@ -package scheduler - -import ( - "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" - env "github.com/caarlos0/env/v11" -) - -type Config struct { - Enabled bool `env:"ENABLED" envDefault:"true"` -} - -func NewConfig() (*Config, error) { - cfg := &Config{} - if err := env.ParseWithOptions(cfg, env.Options{ - PrefixTagName: "GSN_PREDICTOR_SCHEDULER_", - }); err != nil { - return nil, errcodes.Wrap(err, "failed to parse scheduler config") - } - return cfg, nil -} diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go deleted file mode 100644 index d51980c..0000000 --- a/pkg/scheduler/scheduler.go +++ /dev/null @@ -1,97 +0,0 @@ -package scheduler - -import ( - "context" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes" - "git.intra.yksa.space/gsn/predictor/internal/pkg/log" - "github.com/go-co-op/gocron" - "go.uber.org/zap" -) - -type Job interface { - GetInterval() time.Duration - GetTimeout() time.Duration - GetCount() int - GetAsync() bool - Execute(context.Context) error -} - -type Scheduler struct { - scheduler *gocron.Scheduler -} - -func New() *Scheduler { - scheduler := gocron.NewScheduler(time.UTC) - return &Scheduler{ - scheduler: scheduler, - } -} - -func (s *Scheduler) AddJob(job Job) error { - interval := job.GetInterval() - timeout := job.GetTimeout() - count := job.GetCount() - async := job.GetAsync() - - // Validate job parameters - if !async && count != 1 { - return errcodes.ErrSchedulerInvalidJob - } - if timeout > interval { - return errcodes.ErrSchedulerTimeoutTooLong - } - - // Create job function with timeout - jobFunc := func() { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - logger := log.Ctx(ctx) - if err := job.Execute(ctx); err != nil { - logger.Error("job execution failed", - zap.Error(err), - zap.Duration("interval", interval), - zap.Duration("timeout", timeout)) - } else { - logger.Debug("job executed successfully", - zap.Duration("interval", interval), - zap.Duration("timeout", timeout)) - } - } - - // Add job to scheduler - schedulerJob := s.scheduler.Every(interval) - - if !async { - schedulerJob = schedulerJob.SingletonMode() - } - - if count > 0 { - schedulerJob = schedulerJob.LimitRunsTo(count) - } - - schedulerJob.Do(jobFunc) - - log.Ctx(context.Background()).Info("job added to scheduler", - zap.Duration("interval", interval), - zap.Duration("timeout", timeout), - zap.Int("count", count), - zap.Bool("async", async)) - - return nil -} - -func (s *Scheduler) Start() { - s.scheduler.StartAsync() - log.Ctx(context.Background()).Info("scheduler started") -} - -func (s *Scheduler) Stop() { - s.scheduler.Stop() - log.Ctx(context.Background()).Info("scheduler stopped") -} - -func (s *Scheduler) IsRunning() bool { - return s.scheduler.IsRunning() -} diff --git a/scripts/compare.go b/scripts/compare.go deleted file mode 100644 index 5e50653..0000000 --- a/scripts/compare.go +++ /dev/null @@ -1,114 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "math" - "os" -) - -type Point struct { - Datetime string `json:"datetime"` - Latitude float64 `json:"latitude"` - Longitude float64 `json:"longitude"` - Altitude float64 `json:"altitude"` -} - -type Stage struct { - Stage string `json:"stage"` - Trajectory []Point `json:"trajectory"` -} - -type Prediction struct { - Prediction []Stage `json:"prediction"` -} - -func haversine(lat1, lon1, lat2, lon2 float64) float64 { - R := 6371000.0 - phi1, phi2 := lat1*math.Pi/180, lat2*math.Pi/180 - dphi := (lat2 - lat1) * math.Pi / 180 - dlam := (lon2 - lon1) * math.Pi / 180 - a := math.Sin(dphi/2)*math.Sin(dphi/2) + math.Cos(phi1)*math.Cos(phi2)*math.Sin(dlam/2)*math.Sin(dlam/2) - return R * 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a)) -} - -func load(path string) Prediction { - data, _ := os.ReadFile(path) - var p Prediction - json.Unmarshal(data, &p) - return p -} - -func main() { - our := load("c:/tmp/our.json") - taw := load("c:/tmp/tawhiri.json") - - // Find burst and landing points - var ourBurst, ourLand, tawBurst, tawLand Point - for _, s := range our.Prediction { - t := s.Trajectory - if s.Stage == "ascent" { - ourBurst = t[len(t)-1] - } - if s.Stage == "descent" { - ourLand = t[len(t)-1] - } - } - for _, s := range taw.Prediction { - t := s.Trajectory - if s.Stage == "ascent" { - tawBurst = t[len(t)-1] - } - if s.Stage == "descent" { - tawLand = t[len(t)-1] - } - } - - fmt.Println("=== Burst Point ===") - fmt.Printf(" Our: lat=%.4f, lon=%.4f, alt=%.0f, time=%s\n", ourBurst.Latitude, ourBurst.Longitude, ourBurst.Altitude, ourBurst.Datetime) - fmt.Printf(" Tawhiri: lat=%.4f, lon=%.4f, alt=%.0f, time=%s\n", tawBurst.Latitude, tawBurst.Longitude, tawBurst.Altitude, tawBurst.Datetime) - burstDist := haversine(ourBurst.Latitude, ourBurst.Longitude, tawBurst.Latitude, tawBurst.Longitude) - fmt.Printf(" Distance: %.2f km\n", burstDist/1000) - - fmt.Println() - fmt.Println("=== Landing Point ===") - fmt.Printf(" Our: lat=%.4f, lon=%.4f, alt=%.0f, time=%s\n", ourLand.Latitude, ourLand.Longitude, ourLand.Altitude, ourLand.Datetime) - fmt.Printf(" Tawhiri: lat=%.4f, lon=%.4f, alt=%.0f, time=%s\n", tawLand.Latitude, tawLand.Longitude, tawLand.Altitude, tawLand.Datetime) - landDist := haversine(ourLand.Latitude, ourLand.Longitude, tawLand.Latitude, tawLand.Longitude) - fmt.Printf(" Distance: %.2f km\n", landDist/1000) - - fmt.Println() - fmt.Println("=== Trajectory Comparison (every 10 min) ===") - ourPts := map[string]Point{} - tawPts := map[string]Point{} - for _, s := range our.Prediction { - for _, p := range s.Trajectory { - ourPts[p.Datetime] = p - } - } - for _, s := range taw.Prediction { - for _, p := range s.Trajectory { - tawPts[p.Datetime] = p - } - } - - // Collect common times - var common []string - for _, s := range our.Prediction { - for _, p := range s.Trajectory { - if _, ok := tawPts[p.Datetime]; ok { - common = append(common, p.Datetime) - } - } - } - - for i, t := range common { - if i%10 == 0 { - o := ourPts[t] - tw := tawPts[t] - d := haversine(o.Latitude, o.Longitude, tw.Latitude, tw.Longitude) - fmt.Printf(" %s: dist=%.2f km (our: %.3f,%.3f vs taw: %.3f,%.3f)\n", - t, d/1000, o.Latitude, o.Longitude, tw.Latitude, tw.Longitude) - } - } -} diff --git a/scripts/rebuild_cube.go b/scripts/rebuild_cube.go deleted file mode 100644 index 240b707..0000000 --- a/scripts/rebuild_cube.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/grib" -) - -func main() { - ctx := context.Background() - - cfg := &grib.Config{ - Dir: "C:/tmp/grib", - TTL: 48 * time.Hour, - CacheTTL: 1 * time.Hour, - Parallel: 8, - } - - svc, err := grib.New(cfg) - if err != nil { - fmt.Printf("Error: %v\n", err) - return - } - - // Delete old cube to force rebuild - cubePath := "C:/tmp/grib/20260212_12.cube" - if err := os.Remove(cubePath); err != nil && !os.IsNotExist(err) { - fmt.Printf("Remove cube error: %v\n", err) - } else { - fmt.Println("Old cube removed") - } - - // Update will download missing pgrb2b files and rebuild cube - fmt.Println("Starting update (download pgrb2b + rebuild cube)...") - start := time.Now() - if err := svc.Update(ctx); err != nil { - fmt.Printf("Update error: %v\n", err) - return - } - fmt.Printf("Done in %v\n", time.Since(start)) -} diff --git a/scripts/test_download.go b/scripts/test_download.go deleted file mode 100644 index 73934a5..0000000 --- a/scripts/test_download.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - "context" - "fmt" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/grib" -) - -func main() { - ctx := context.Background() - - // Найти последний доступный прогноз - run, err := grib.GetLatestModelRun(ctx) - if err != nil { - fmt.Printf("Error finding model run: %v\n", err) - return - } - fmt.Printf("Found model run: %v\n", run) - - // Создать downloader - dl := grib.NewPartialDownloader("C:/tmp/grib", 8) - - // Запустить загрузку - start := time.Now() - fmt.Println("Starting download...") - - err = dl.Run(ctx, run) - if err != nil { - fmt.Printf("Download error: %v\n", err) - return - } - - fmt.Printf("Download completed in %v\n", time.Since(start)) -} diff --git a/scripts/test_gh.go b/scripts/test_gh.go deleted file mode 100644 index bfb592c..0000000 --- a/scripts/test_gh.go +++ /dev/null @@ -1,60 +0,0 @@ -package main - -import ( - "encoding/binary" - "fmt" - "math" - "os" - - mmap "github.com/edsrzf/mmap-go" -) - -var pressureLevels = []float64{ - 1000, 975, 950, 925, 900, 875, 850, 825, 800, 775, - 750, 725, 700, 675, 650, 625, 600, 575, 550, 525, - 500, 475, 450, 425, 400, 375, 350, 325, 300, 275, - 250, 225, 200, 175, 150, 125, 100, 70, 50, 30, - 20, 10, 7, 5, 3, 2, 1, -} - -func main() { - f, _ := os.Open("C:/tmp/grib/20260212_12.cube") - mm, _ := mmap.Map(f, mmap.RDONLY, 0) - defer mm.Unmap() - defer f.Close() - - const ( - nT = 97 - nP = 47 - nLat = 721 - nLon = 1440 - ) - bytesPerVar := int64(nT * nP * nLat * nLon * 4) - - val := func(varIdx, ti, pi, y, x int) float32 { - idx := (((ti*nP + pi) * nLat) + y) * nLon + x - off := int64(varIdx)*bytesPerVar + int64(idx)*4 - bits := binary.LittleEndian.Uint32(mm[off : off+4]) - return math.Float32frombits(bits) - } - - // Check gh values at lat=52.2N (y=(90-52.2)*4=151.2 → y=151), lon=0.1E (x=0.1*4=0.4 → x=0) - // Time step 9 (9 hours into forecast) - ti := 9 - y := 151 - x := 0 - - fmt.Println("GH values at (52.25N, 0E), t=+9h:") - fmt.Printf("%8s %8s %10s\n", "Level", "hPa", "GH(m)") - for pi := 0; pi < nP; pi++ { - gh := val(0, ti, pi, y, x) - fmt.Printf("%8d %8.0f %10.1f\n", pi, pressureLevels[pi], gh) - } - - fmt.Println("\nU-wind values at same point:") - fmt.Printf("%8s %8s %10s\n", "Level", "hPa", "U(m/s)") - for pi := 0; pi < nP; pi++ { - u := val(1, ti, pi, y, x) - fmt.Printf("%8d %8.0f %10.2f\n", pi, pressureLevels[pi], u) - } -} diff --git a/scripts/test_grib_read.go b/scripts/test_grib_read.go deleted file mode 100644 index 3b8b7eb..0000000 --- a/scripts/test_grib_read.go +++ /dev/null @@ -1,38 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/nilsmagnus/grib/griblib" -) - -func main() { - f, err := os.Open("C:/tmp/grib/gfs.t18z.pgrb2.0p25.f000") - if err != nil { - fmt.Printf("Error opening file: %v\n", err) - return - } - defer f.Close() - - messages, err := griblib.ReadMessages(f) - if err != nil { - fmt.Printf("Error reading GRIB: %v\n", err) - return - } - - fmt.Printf("Found %d messages\n\n", len(messages)) - - for i, m := range messages { - product := m.Section4.ProductDefinitionTemplate - if product.ParameterCategory != 2 || product.ParameterNumber != 2 { - continue // only u-wind - } - fmt.Printf("UGRD Msg %d: SurfType=%d SurfValue=%d SurfScale=%d DataLen=%d\n", - i, - product.FirstSurface.Type, - product.FirstSurface.Value, - product.FirstSurface.Scale, - len(m.Data())) - } -} diff --git a/scripts/test_prediction.go b/scripts/test_prediction.go deleted file mode 100644 index fb56c5a..0000000 --- a/scripts/test_prediction.go +++ /dev/null @@ -1,87 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/grib" -) - -func main() { - ctx := context.Background() - - // Инициализируем GRIB сервис - cfg := &grib.Config{ - Dir: "C:/tmp/grib", - TTL: 48 * time.Hour, - CacheTTL: 1 * time.Hour, - Parallel: 8, - } - - svc, err := grib.New(cfg) - if err != nil { - fmt.Printf("Error creating service: %v\n", err) - return - } - - // Обновляем данные (создаёт куб) - fmt.Println("Updating GRIB data (building cube)...") - start := time.Now() - if err := svc.Update(ctx); err != nil { - fmt.Printf("Update error: %v\n", err) - return - } - fmt.Printf("Cube built in %v\n", time.Since(start)) - - // Тестируем извлечение ветра - fmt.Println("\nTesting wind extraction...") - lat, lon, alt := 52.2, 0.1, 10000.0 - ts := time.Date(2026, 2, 11, 12, 0, 0, 0, time.UTC) - - wind, err := svc.Extract(ctx, lat, lon, alt, ts) - if err != nil { - fmt.Printf("Extract error: %v\n", err) - return - } - fmt.Printf("Wind at (%.2f, %.2f, %.0fm) at %v:\n", lat, lon, alt, ts) - fmt.Printf(" U (east): %.2f m/s\n", wind[0]) - fmt.Printf(" V (north): %.2f m/s\n", wind[1]) - - // Сравниваем с Tawhiri - fmt.Println("\nComparing with Tawhiri API...") - tawhiriURL := fmt.Sprintf( - "https://api.v2.sondehub.org/tawhiri?launch_latitude=%.2f&launch_longitude=%.2f&launch_altitude=0&launch_datetime=%s&ascent_rate=5&burst_altitude=30000&descent_rate=5", - lat, lon, ts.Format(time.RFC3339), - ) - - resp, err := http.Get(tawhiriURL) - if err != nil { - fmt.Printf("Tawhiri request error: %v\n", err) - return - } - defer resp.Body.Close() - - body, _ := io.ReadAll(resp.Body) - var tawhiriResp map[string]interface{} - json.Unmarshal(body, &tawhiriResp) - - // Выводим финальную точку приземления - if prediction, ok := tawhiriResp["prediction"].([]interface{}); ok { - for _, stage := range prediction { - stageMap := stage.(map[string]interface{}) - if stageMap["stage"] == "descent" { - trajectory := stageMap["trajectory"].([]interface{}) - if len(trajectory) > 0 { - last := trajectory[len(trajectory)-1].(map[string]interface{}) - fmt.Printf("\nTawhiri landing point:\n") - fmt.Printf(" Lat: %.4f\n", last["latitude"]) - fmt.Printf(" Lon: %.4f\n", last["longitude"]) - } - } - } - } -} diff --git a/scripts/test_predictor_vs_reference.py b/scripts/test_predictor_vs_reference.py deleted file mode 100644 index e2de9ae..0000000 --- a/scripts/test_predictor_vs_reference.py +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/env python3 -import subprocess -import sys -import time -import requests -import json -from typing import Any -import base64 -import math - -# --- Config --- -REFERENCE_API_URL = "https://fly.stratonautica.ru/api/v2/?profile=standard_profile&pred_type=single&launch_datetime=2025-06-25T20%3A45%3A00Z&launch_latitude=56.6992&launch_longitude=38.8247&launch_altitude=0&ascent_rate=5&burst_altitude=30000&descent_rate=5" -LOCAL_API_URL = "http://localhost:8080/api/v1/prediction?profile=standard_profile&pred_type=single&launch_datetime=2025-06-25T20%3A45%3A00Z&launch_latitude=56.6992&launch_longitude=38.8247&launch_altitude=0&ascent_rate=5&burst_altitude=30000&descent_rate=5" - -LOCAL_API_PAYLOAD = { - "launch_latitude": 56.6992, - "launch_longitude": 38.8247, - "launch_datetime": "2025-06-25T20-45-000Z", - "launch_altitude": 0, - "profile": "standard_profile", - "ascent_rate": 5, - "burst_altitude": 30000, - "descent_rate": 5, - "format": "json" -} -READY_URL = "http://localhost:8080/ready" - -# --- Utility functions --- -def run_compose_up(): - print("[INFO] Running docker-compose down --remove-orphans ...") - result = subprocess.run(["docker-compose", "down", "--remove-orphans"], capture_output=True) - if result.returncode != 0: - print("[ERROR] docker-compose down failed:", result.stderr.decode()) - sys.exit(1) - print("[INFO] docker-compose down completed.") - print("[INFO] Running docker-compose up -d ...") - result = subprocess.run(["docker-compose", "up", "-d"], capture_output=True) - if result.returncode != 0: - print("[ERROR] docker-compose up failed:", result.stderr.decode()) - sys.exit(1) - print("[INFO] docker-compose up -d completed.") - return True - -def wait_for_ready(timeout=900): - print(f"[INFO] Waiting for {READY_URL} to be ready ...") - start = time.time() - while time.time() - start < timeout: - try: - resp = requests.get(READY_URL, timeout=10) - if resp.status_code == 200: - data = resp.json() - if data.get("status") == "ok": - print("[INFO] Service is ready.") - return - else: - print(f"[INFO] Not ready yet: {data}") - else: - print(f"[INFO] /ready returned status {resp.status_code}") - except Exception as e: - print(f"[INFO] Exception while polling /ready: {e}") - time.sleep(10) - print(f"[ERROR] Service did not become ready in {timeout} seconds.") - sys.exit(1) - -def fetch_reference(): - print(f"[INFO] Fetching reference prediction from {REFERENCE_API_URL}") - resp = requests.get(REFERENCE_API_URL, timeout=60) - if resp.status_code != 200: - print(f"[ERROR] Reference API returned {resp.status_code}: {resp.text}") - sys.exit(1) - return resp.json() - -def fetch_local(): - print(f"[INFO] Fetching local prediction from {LOCAL_API_URL}") - resp = requests.get(LOCAL_API_URL, timeout=60) - if resp.status_code != 200: - print(f"[ERROR] Local API returned {resp.status_code}: {resp.text}") - sys.exit(1) - return resp.json() - -def haversine(lat1, lon1, lat2, lon2): - """Calculate the great-circle distance between two points on the Earth (specified in decimal degrees). Returns distance in kilometers.""" - R = 6371.0 # Earth radius in kilometers - lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2]) - dlat = lat2 - lat1 - dlon = lon2 - lon1 - a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2 - c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) - return R * c - -def compare_results(reference_data, local_data): - """Compare prediction results between reference and local APIs.""" - print("[INFO] Comparing results ...") - - # Extract trajectory data - ref_trajectory = reference_data.get('prediction', [{}])[0].get('trajectory', []) - local_trajectory = local_data.get('prediction', [{}])[0].get('trajectory', []) - - print(f"[DEBUG] Reference trajectory length: {len(ref_trajectory)}") - print(f"[DEBUG] Local trajectory length: {len(local_trajectory)}") - - # Show first 3 points from both APIs - print("\n[DEBUG] First 3 points - Reference API:") - for i, point in enumerate(ref_trajectory[:3]): - print(f" [{i}] alt={point.get('altitude', 'N/A')}, lat={point.get('latitude', 'N/A')}, lon={point.get('longitude', 'N/A')}, time={point.get('datetime', 'N/A')}") - - print("\n[DEBUG] First 3 points - Local API:") - for i, point in enumerate(local_trajectory[:3]): - print(f" [{i}] alt={point.get('altitude', 'N/A')}, lat={point.get('latitude', 'N/A')}, lon={point.get('longitude', 'N/A')}, time={point.get('datetime', 'N/A')}") - - # Show last 3 points from both APIs - print("\n[DEBUG] Last 3 points - Reference API:") - for i, point in enumerate(ref_trajectory[-3:]): - idx = len(ref_trajectory) - 3 + i - print(f" [{idx}] alt={point.get('altitude', 'N/A')}, lat={point.get('latitude', 'N/A')}, lon={point.get('longitude', 'N/A')}, time={point.get('datetime', 'N/A')}") - - print("\n[DEBUG] Last 3 points - Local API:") - for i, point in enumerate(local_trajectory[-3:]): - idx = len(local_trajectory) - 3 + i - print(f" [{idx}] alt={point.get('altitude', 'N/A')}, lat={point.get('latitude', 'N/A')}, lon={point.get('longitude', 'N/A')}, time={point.get('datetime', 'N/A')}") - - # Compare trajectory lengths - if len(ref_trajectory) != len(local_trajectory): - print(f"[DIFF] Trajectory length mismatch: {len(local_trajectory)} vs {len(ref_trajectory)}") - return False - - # Compare trajectory points and calculate drift - min_len = min(len(ref_trajectory), len(local_trajectory)) - max_drift = 0.0 - max_drift_idx = -1 - drift_list = [] - print("\n[DRIFT] Trajectory point-by-point distance (km):") - for i in range(min_len): - ref_point = ref_trajectory[i] - local_point = local_trajectory[i] - ref_lat = ref_point.get('latitude') - ref_lon = ref_point.get('longitude') - local_lat = local_point.get('latitude') - local_lon = local_point.get('longitude') - drift_km = None - if None not in (ref_lat, ref_lon, local_lat, local_lon): - drift_km = haversine(ref_lat, ref_lon, local_lat, local_lon) - drift_list.append(drift_km) - if drift_km > max_drift: - max_drift = drift_km - max_drift_idx = i - print(f" [{i}] Drift: {drift_km:.3f} km") - else: - print(f" [{i}] Drift: N/A (missing lat/lon)") - if drift_list: - mean_drift = sum(drift_list) / len(drift_list) - print(f"\n[DRIFT] Max drift: {max_drift:.3f} km at idx {max_drift_idx}") - print(f"[DRIFT] Mean drift: {mean_drift:.3f} km over {len(drift_list)} points") - else: - print("[DRIFT] No valid drift data to report.") - # Continue with original comparison for altitude, etc. - for i in range(min_len): - ref_point = ref_trajectory[i] - local_point = local_trajectory[i] - for key in ['altitude', 'latitude', 'longitude']: - ref_val = ref_point.get(key) - local_val = local_point.get(key) - if ref_val is not None and local_val is not None: - if abs(ref_val - local_val) > 0.1: - print(f"[DIFF] At idx {i}, key {key}: {local_val} != {ref_val}") - return False - print("[SUCCESS] Results match!") - return True - -def test_custom_profile(): - """Test custom profile with base64 encoded curve.""" - print("\n[TEST] Testing custom_profile...") - # Create a simple custom ascent curve (altitude vs time in seconds) - curve_data = { - "altitude": [0, 30000], - "time": [0, 6000] - } - curve_b64 = base64.b64encode(json.dumps(curve_data).encode()).decode() - # Test parameters for custom profile - params = { - "launch_latitude": 56.6992, - "launch_longitude": 38.8247, - "launch_datetime": "2025-06-25T13:28:00Z", - "launch_altitude": 0, - "profile": "custom_profile", - "ascent_curve": curve_b64 - } - try: - # Test local API (use GET) - local_resp = requests.get( - "http://localhost:8080/api/v1/prediction", - params=params, - timeout=30 - ) - local_resp.raise_for_status() - local_data = local_resp.json() - print(f"[INFO] Custom profile test - Local API returned {len(local_data.get('prediction', [{}])[0].get('trajectory', []))} trajectory points") - return True - except Exception as e: - print(f"[ERROR] Custom profile test failed: {e}") - return False - -def test_all_profiles(): - """Test all available profiles.""" - profiles = [ - ("standard_profile", "Standard profile test"), - ("float_profile", "Float profile test"), - ("reverse_profile", "Reverse profile test"), - ("custom_profile", "Custom profile test") - ] - - results = {} - - for profile, description in profiles: - print(f"\n[TEST] {description}...") - - if profile == "custom_profile": - success = test_custom_profile() - else: - success = test_single_profile(profile) - - results[profile] = success - print(f"[RESULT] {profile}: {'PASS' if success else 'FAIL'}") - - # Print summary - print("\n" + "="*50) - print("TEST SUMMARY") - print("="*50) - for profile, success in results.items(): - status = "PASS" if success else "FAIL" - print(f"{profile:20} : {status}") - - total_tests = len(results) - passed_tests = sum(results.values()) - print(f"\nTotal tests: {total_tests}, Passed: {passed_tests}, Failed: {total_tests - passed_tests}") - - return all(results.values()) - -def test_single_profile(profile): - """Test a single profile against reference API.""" - # Test parameters - params = { - "launch_latitude": 56.6992, - "launch_longitude": 38.8247, - "launch_datetime": "2025-06-25T13:28:00Z", - "launch_altitude": 0, - "profile": profile, - "ascent_rate": 5, - "burst_altitude": 30000, - "descent_rate": 5 - } - # Add float altitude for float profile - if profile == "float_profile": - params["float_altitude"] = 25000 - try: - # Test local API (use GET) - local_resp = requests.get( - "http://localhost:8080/api/v1/prediction", - params=params, - timeout=30 - ) - local_resp.raise_for_status() - local_data = local_resp.json() - print(f"[INFO] {profile} - Local API returned {len(local_data.get('prediction', [{}])[0].get('trajectory', []))} trajectory points") - return True - except Exception as e: - print(f"[ERROR] {profile} test failed: {e}") - return False - -def main(): - """Main test function.""" - print("[INFO] Starting comprehensive predictor API tests...") - - # Run the original standard profile test - print("\n[TEST] Running original standard_profile test...") - run_compose_up() - wait_for_ready() - ref = fetch_reference() - local = fetch_local() - - print("[INFO] Comparing results ...") - original_success = compare_results(ref, local) - - if original_success: - print("[SUCCESS] Original standard_profile test passed!") - else: - print("[FAIL] Original standard_profile test failed!") - - # Test all profiles - print("\n[TEST] Running all profile tests...") - all_profiles_success = test_all_profiles() - - # Final result - overall_success = original_success and all_profiles_success - print(f"\n[FINAL RESULT] Overall: {'PASS' if overall_success else 'FAIL'}") - - if overall_success: - sys.exit(0) - else: - sys.exit(1) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/scripts/test_wind.go b/scripts/test_wind.go deleted file mode 100644 index 61a231a..0000000 --- a/scripts/test_wind.go +++ /dev/null @@ -1,55 +0,0 @@ -package main - -import ( - "context" - "fmt" - "time" - - "git.intra.yksa.space/gsn/predictor/internal/pkg/grib" -) - -func main() { - ctx := context.Background() - - cfg := &grib.Config{ - Dir: "C:/tmp/grib", - TTL: 48 * time.Hour, - CacheTTL: 1 * time.Hour, - Parallel: 8, - } - - svc, err := grib.New(cfg) - if err != nil { - fmt.Printf("Error: %v\n", err) - return - } - - if err := svc.Update(ctx); err != nil { - fmt.Printf("Update error: %v\n", err) - return - } - - // Test wind at lat=52.2, lon=0.1 at various altitudes - // Run is 2026-02-12T12:00Z, request time 21:00Z = +9 hours - ts := time.Date(2026, 2, 12, 21, 0, 0, 0, time.UTC) - lat, lon := 52.2, 0.1 - - fmt.Println("Wind at (52.2°N, 0.1°E) at 2026-02-12T21:00Z:") - fmt.Printf("%8s %8s %8s\n", "Alt(m)", "U(m/s)", "V(m/s)") - - for _, alt := range []float64{0, 1000, 3000, 5000, 7000, 10000, 15000, 20000, 25000, 30000} { - w, err := svc.Extract(ctx, lat, lon, alt, ts) - if err != nil { - fmt.Printf("%8.0f Error: %v\n", alt, err) - continue - } - fmt.Printf("%8.0f %8.2f %8.2f\n", alt, w[0], w[1]) - } - - // Also test at a few nearby points to check spatial consistency - fmt.Println("\nWind at 10km altitude, varying longitude:") - for _, testLon := range []float64{0.0, 0.25, 0.5, 1.0, 2.0, 5.0, 10.0, 350.0, 359.75} { - w, _ := svc.Extract(ctx, lat, testLon, 10000, ts) - fmt.Printf(" lon=%6.2f: U=%8.2f V=%8.2f\n", testLon, w[0], w[1]) - } -} diff --git a/start_with_http.sh b/start_with_http.sh deleted file mode 100644 index 4173c2b..0000000 --- a/start_with_http.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# Start API with HTTP downloads instead of S3 - -export GSN_PREDICTOR_GRIB_USE_S3=false - -echo "Starting API with HTTP downloads from NOMADS..." -echo "USE_S3 = $GSN_PREDICTOR_GRIB_USE_S3" -echo "" - -cd "$(dirname "$0")" -go run ./cmd/api/main.go -- 2.47.3 From 51bbf3c5790a08cf8af5baa4d6bca6017d0dfb54 Mon Sep 17 00:00:00 2001 From: "a.antonov" Date: Sat, 28 Mar 2026 03:07:13 +0900 Subject: [PATCH 7/8] feat: refactor --- .gitignore | 5 + Makefile | 40 + README.md | 261 ++++ api/rest/predictor.swagger.yml | 199 +++ cmd/api/main.go | 98 ++ cmd/compare_prediction/main.go | 195 +++ cmd/compare_step0/main.go | 104 ++ go.mod | 41 + go.sum | 108 ++ internal/dataset/dataset.go | 158 +++ internal/dataset/dataset_test.go | 152 +++ internal/dataset/file.go | 140 ++ internal/downloader/config.go | 58 + internal/downloader/downloader.go | 380 ++++++ internal/downloader/idx.go | 157 +++ internal/downloader/idx_test.go | 110 ++ internal/elevation/elevation.go | 113 ++ internal/prediction/interpolate.go | 153 +++ internal/prediction/models.go | 188 +++ internal/prediction/solver.go | 180 +++ internal/prediction/warnings.go | 21 + internal/service/service.go | 245 ++++ internal/transport/middleware/log.go | 30 + internal/transport/rest/handler/deps.go | 16 + internal/transport/rest/handler/handler.go | 216 +++ internal/transport/rest/transport.go | 75 ++ pkg/rest/oas_cfg_gen.go | 318 +++++ pkg/rest/oas_client_gen.go | 411 ++++++ pkg/rest/oas_handlers_gen.go | 363 +++++ pkg/rest/oas_json_gen.go | 1398 ++++++++++++++++++++ pkg/rest/oas_labeler_gen.go | 42 + pkg/rest/oas_middleware_gen.go | 10 + pkg/rest/oas_operations_gen.go | 11 + pkg/rest/oas_parameters_gen.go | 679 ++++++++++ pkg/rest/oas_request_decoders_gen.go | 3 + pkg/rest/oas_request_encoders_gen.go | 3 + pkg/rest/oas_response_decoders_gen.go | 198 +++ pkg/rest/oas_response_encoders_gen.go | 68 + pkg/rest/oas_router_gen.go | 268 ++++ pkg/rest/oas_schemas_gen.go | 789 +++++++++++ pkg/rest/oas_server_gen.go | 46 + pkg/rest/oas_unimplemented_gen.go | 40 + pkg/rest/oas_validators_gen.go | 344 +++++ scripts/build_elevation.py | 155 +++ 44 files changed, 8589 insertions(+) create mode 100644 .gitignore create mode 100644 Makefile create mode 100644 README.md create mode 100644 api/rest/predictor.swagger.yml create mode 100644 cmd/api/main.go create mode 100644 cmd/compare_prediction/main.go create mode 100644 cmd/compare_step0/main.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 internal/dataset/dataset.go create mode 100644 internal/dataset/dataset_test.go create mode 100644 internal/dataset/file.go create mode 100644 internal/downloader/config.go create mode 100644 internal/downloader/downloader.go create mode 100644 internal/downloader/idx.go create mode 100644 internal/downloader/idx_test.go create mode 100644 internal/elevation/elevation.go create mode 100644 internal/prediction/interpolate.go create mode 100644 internal/prediction/models.go create mode 100644 internal/prediction/solver.go create mode 100644 internal/prediction/warnings.go create mode 100644 internal/service/service.go create mode 100644 internal/transport/middleware/log.go create mode 100644 internal/transport/rest/handler/deps.go create mode 100644 internal/transport/rest/handler/handler.go create mode 100644 internal/transport/rest/transport.go create mode 100644 pkg/rest/oas_cfg_gen.go create mode 100644 pkg/rest/oas_client_gen.go create mode 100644 pkg/rest/oas_handlers_gen.go create mode 100644 pkg/rest/oas_json_gen.go create mode 100644 pkg/rest/oas_labeler_gen.go create mode 100644 pkg/rest/oas_middleware_gen.go create mode 100644 pkg/rest/oas_operations_gen.go create mode 100644 pkg/rest/oas_parameters_gen.go create mode 100644 pkg/rest/oas_request_decoders_gen.go create mode 100644 pkg/rest/oas_request_encoders_gen.go create mode 100644 pkg/rest/oas_response_decoders_gen.go create mode 100644 pkg/rest/oas_response_encoders_gen.go create mode 100644 pkg/rest/oas_router_gen.go create mode 100644 pkg/rest/oas_schemas_gen.go create mode 100644 pkg/rest/oas_server_gen.go create mode 100644 pkg/rest/oas_unimplemented_gen.go create mode 100644 pkg/rest/oas_validators_gen.go create mode 100644 scripts/build_elevation.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f000dae --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +predictor +*.exe +*.test +*.out +/tmp/ diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..7c14792 --- /dev/null +++ b/Makefile @@ -0,0 +1,40 @@ +.PHONY: build run test fmt lint clean generate-ogen help + +# Build the application +build: + go build -o predictor ./cmd/api + +# Run locally +run: + go run ./cmd/api + +# Run tests +test: + go test ./... + +# Format code +fmt: + go fmt ./... + +# Lint code +lint: + golangci-lint run + +# Generate ogen API code from swagger spec +generate-ogen: + go run github.com/ogen-go/ogen/cmd/ogen@latest --target pkg/rest --package rest --clean api/rest/predictor.swagger.yml + +# Clean build artifacts +clean: + rm -f predictor + +# Show help +help: + @echo "Available commands:" + @echo " build - Build binary" + @echo " run - Run locally" + @echo " test - Run tests" + @echo " fmt - Format code" + @echo " lint - Lint code" + @echo " generate-ogen - Generate API code from swagger spec" + @echo " clean - Remove build artifacts" diff --git a/README.md b/README.md new file mode 100644 index 0000000..579a255 --- /dev/null +++ b/README.md @@ -0,0 +1,261 @@ +# Balloon Trajectory Predictor + +High-altitude balloon trajectory prediction service. Predicts ascent, burst, and descent trajectories using GFS wind forecast data from NOAA. + +The prediction algorithms are an exact port of [Tawhiri](https://github.com/cuspaceflight/tawhiri) (Cambridge University Spaceflight) to Go, verified to produce identical results. + +## Quick Start + +```bash +# Build +make build + +# Run (downloads ~9 GB of GFS data on first start, takes 30-60 min) +PREDICTOR_DATA_DIR=/tmp/predictor-data go run ./cmd/api + +# Check readiness +curl http://localhost:8080/ready + +# Run a prediction +curl 'http://localhost:8080/api/v1/prediction?launch_latitude=52.2&launch_longitude=0.1&launch_datetime=2026-03-28T12:00:00Z&launch_altitude=0&ascent_rate=5&burst_altitude=30000&descent_rate=5' +``` + +## Configuration + +All configuration is via environment variables. + +| Variable | Default | Description | +|---|---|---| +| `PREDICTOR_PORT` | `8080` | HTTP server port | +| `PREDICTOR_DATA_DIR` | `/tmp/predictor-data` | Directory for wind datasets and temp files | +| `PREDICTOR_DOWNLOAD_PARALLEL` | `8` | Max concurrent GRIB download goroutines | +| `PREDICTOR_UPDATE_INTERVAL` | `6h` | How often to check for new forecasts | +| `PREDICTOR_DATASET_TTL` | `48h` | Max age before a dataset is considered stale | +| `PREDICTOR_ELEVATION_DATASET` | `/srv/ruaumoko-dataset` | Path to elevation dataset (optional) | + +## API + +### `GET /api/v1/prediction` + +Run a balloon trajectory prediction. + +**Parameters** (query string): + +| Parameter | Required | Description | +|---|---|---| +| `launch_latitude` | yes | Launch latitude in degrees (-90 to 90) | +| `launch_longitude` | yes | Launch longitude in degrees (-180 to 180 or 0 to 360) | +| `launch_datetime` | yes | Launch time in RFC 3339 format | +| `launch_altitude` | no | Launch altitude in metres ASL (default: 0) | +| `profile` | no | `standard_profile` (default) or `float_profile` | +| `ascent_rate` | no | Ascent rate in m/s (default: 5) | +| `burst_altitude` | no | Burst altitude in metres (default: 28000) | +| `descent_rate` | no | Sea-level descent rate in m/s (default: 5) | +| `float_altitude` | no | Float altitude in metres (float_profile only) | +| `stop_datetime` | no | Float end time (float_profile only, default: +24h) | + +**Response** (Tawhiri-compatible): + +```json +{ + "prediction": [ + { + "stage": "ascent", + "trajectory": [ + {"datetime": "2026-03-28T12:00:00Z", "latitude": 52.2, "longitude": 0.1, "altitude": 0}, + ... + ] + }, + { + "stage": "descent", + "trajectory": [...] + } + ], + "metadata": { + "start_datetime": "...", + "complete_datetime": "..." + }, + "request": { + "dataset": "2026-03-28T06:00:00Z", + "launch_latitude": 52.2, + ... + } +} +``` + +### `GET /ready` + +Health check. Returns `{"status": "ok"}` when a dataset is loaded. + +## Elevation Dataset + +Without elevation data, descent terminates at sea level (altitude <= 0). With elevation data, descent terminates at ground level, matching Tawhiri's behaviour. + +### Building the elevation dataset + +The elevation dataset uses ETOPO 2022 at 30 arc-second resolution, converted to a ruaumoko-compatible binary format (21601 x 43200 grid of int16 little-endian elevation values in metres). + +**Requirements**: Python 3, xarray, netcdf4, numpy. + +```bash +pip install xarray netcdf4 numpy + +# Downloads ~1.1 GB from NOAA, produces ~1.74 GB binary file +python3 scripts/build_elevation.py /tmp/predictor-data/ruaumoko-dataset +``` + +To skip the download if you already have the ETOPO NetCDF file: + +```bash +ETOPO_NC_PATH=/path/to/ETOPO_2022_v1_30s_N90W180_surface.nc \ + python3 scripts/build_elevation.py /tmp/predictor-data/ruaumoko-dataset +``` + +The ETOPO 2022 NetCDF can be manually downloaded from: +https://www.ncei.noaa.gov/products/etopo-global-relief-model + +### Using the elevation dataset + +```bash +PREDICTOR_ELEVATION_DATASET=/tmp/predictor-data/ruaumoko-dataset go run ./cmd/api +``` + +If the file doesn't exist or can't be read, the service starts normally with a warning and falls back to sea-level termination. + +## Architecture + +``` +cmd/api/main.go Entry point, config, scheduler, HTTP server +internal/ + dataset/ + dataset.go Shape constants, pressure levels, S3 URLs + file.go mmap-backed dataset file (read/write/blit) + downloader/ + downloader.go S3 partial GRIB download (idx + range requests) + idx.go NOAA .idx file parser + config.go Environment-based configuration + elevation/ + elevation.go Ruaumoko-compatible elevation dataset (mmap int16) + prediction/ + interpolate.go 4D wind interpolation (time, lat, lon, altitude) + solver.go RK4 integrator with binary search termination + models.go Ascent, descent, wind models; flight profiles + warnings.go Prediction warning counters + service/ + service.go Dataset lifecycle, concurrent-safe access + transport/ + middleware/log.go Request logging middleware + rest/ + handler/handler.go ogen API handler implementation + handler/deps.go Service interface + transport.go ogen HTTP server, CORS +api/rest/predictor.swagger.yml OpenAPI 3.0 spec +pkg/rest/ Generated ogen code (17 files) +scripts/ + build_elevation.py ETOPO 2022 to ruaumoko converter +``` + +## Wind Dataset + +The service downloads GFS 0.5-degree forecast data from NOAA S3: + +| Property | Value | +|---|---| +| Source | `noaa-gfs-bdp-pds.s3.amazonaws.com` | +| Resolution | 0.5 degrees | +| Grid | 361 lat x 720 lon | +| Time steps | 65 (every 3 hours, 0-192h) | +| Pressure levels | 47 (1000 to 1 hPa) | +| Variables | Geopotential height, U-wind, V-wind | +| Dataset size | 9,528,667,200 bytes (~8.87 GiB) | +| Update cadence | Every 6 hours (GFS runs at 00, 06, 12, 18 UTC) | + +Data is downloaded using HTTP Range requests against `.idx` index files, fetching only the needed GRIB messages (HGT, UGRD, VGRD at 47 pressure levels). Full download takes 30-60 minutes depending on bandwidth. + +The dataset is stored as a memory-mapped flat binary file of float32 values in C-order with shape `(65, 47, 3, 361, 720)`. + +## Prediction Algorithms + +All algorithms are exact ports of the reference implementations in Tawhiri. The following sections describe the key components. + +### Interpolation (`internal/prediction/interpolate.go`) + +4D wind interpolation from the dataset grid to arbitrary coordinates. + +1. **Trilinear weights** (`pick3`): compute 8 interpolation weights for the (hour, lat, lon) cube corners. +2. **Altitude search** (`search`): binary search on interpolated geopotential height to find the two pressure levels bracketing the target altitude. +3. **Wind extraction** (`interp4`): 8-point weighted sum at each bracket level, then linear interpolation between levels. + +Reference: `tawhiri/interpolate.pyx` + +### Solver (`internal/prediction/solver.go`) + +4th-order Runge-Kutta integrator with dt = 60 seconds. + +- State vector: (latitude, longitude, altitude) in degrees and metres. +- Time: UNIX timestamp in seconds. +- Longitude is kept in [0, 360) via Python-style modulo after each `vecadd`. +- When a terminator fires, binary search refinement (tolerance 0.01) finds the precise termination point between the last good step and the first terminated step. +- Longitude interpolation (`lngLerp`) handles the 0/360 wrap-around. + +Reference: `tawhiri/solver.pyx` + +### Models (`internal/prediction/models.go`) + +- **Constant ascent**: vertical velocity = ascent_rate m/s. +- **Drag descent**: NASA atmosphere density model with drag coefficient = sea_level_rate * 1.1045. Descent rate increases with altitude due to thinner air. +- **Wind velocity**: u, v components from interpolation converted to degrees/second: `dlat = (180/pi) * v / (R)`, `dlng = (180/pi) * u / (R * cos(lat))` where R = 6371009 + altitude. +- **Linear model**: sum of component models (e.g., wind + ascent). +- **Elevation termination**: `ground_elevation > altitude` using ruaumoko dataset. + +Reference: `tawhiri/models.py` + +### Profiles + +- **standard_profile**: ascent (constant rate + wind) until burst altitude, then descent (drag + wind) until ground level. +- **float_profile**: ascent to float altitude, then drift at constant altitude until stop time. + +## Verification + +The predictor has been verified against the reference Tawhiri implementation: + +| Test | Result | +|---|---| +| Dataset (step 0): 36.6M float32 values vs Python/cfgrib | 0 mismatches, max diff = 0.0 | +| Prediction burst point vs public Tawhiri API | Identical (lat, lon, alt all match) | +| Prediction landing point vs public Tawhiri API | Identical lat/lon, 5m altitude diff (different elevation datasets) | +| Descent point count | Identical (46 points) | +| Ascent point count | Identical (101 points) | + +## Development + +```bash +# Regenerate ogen API code after modifying the swagger spec +make generate-ogen + +# Run tests +make test + +# Format +make fmt +``` + +### Comparison tools + +```bash +# Compare single dataset step against Python/cfgrib reference +go run ./cmd/compare_step0 + +# Run prediction and compare against public Tawhiri API +go run ./cmd/compare_prediction +``` + +## References + +- [Tawhiri](https://github.com/cuspaceflight/tawhiri) — Reference Python/Cython predictor (Cambridge University Spaceflight) +- [tawhiri-downloader](https://github.com/cuspaceflight/tawhiri-downloader) — OCaml dataset downloader +- [ruaumoko](https://github.com/cuspaceflight/ruaumoko) — Global elevation dataset +- [NOAA GFS](https://www.ncei.noaa.gov/products/weather-climate-models/global-forecast) — Global Forecast System +- [NOAA GFS on S3](https://noaa-gfs-bdp-pds.s3.amazonaws.com/index.html) — Public S3 bucket +- [ETOPO 2022](https://www.ncei.noaa.gov/products/etopo-global-relief-model) — Global relief model for elevation data +- [SondeHub Tawhiri API](https://api.v2.sondehub.org/tawhiri) — Public Tawhiri instance for comparison diff --git a/api/rest/predictor.swagger.yml b/api/rest/predictor.swagger.yml new file mode 100644 index 0000000..64ec316 --- /dev/null +++ b/api/rest/predictor.swagger.yml @@ -0,0 +1,199 @@ +openapi: 3.0.4 +info: + title: Predictor API + version: 0.0.1 +paths: + /api/v1/prediction: + get: + tags: + - Prediction + summary: Perform prediction + operationId: performPrediction + parameters: + - in: query + name: launch_latitude + required: true + schema: + type: number + - in: query + name: launch_longitude + required: true + schema: + type: number + - in: query + name: launch_datetime + required: true + schema: + type: string + format: date-time + - in: query + name: launch_altitude + schema: + type: number + - in: query + name: profile + schema: + type: string + enum: [standard_profile, float_profile] + default: standard_profile + - in: query + name: ascent_rate + schema: + type: number + - in: query + name: burst_altitude + schema: + type: number + - in: query + name: descent_rate + schema: + type: number + - in: query + name: float_altitude + schema: + type: number + - in: query + name: stop_datetime + schema: + type: string + format: date-time + - in: query + name: dataset + schema: + type: string + format: date-time + responses: + "200": + description: Prediction response + content: + application/json: + schema: + $ref: '#/components/schemas/PredictionResponse' + default: + description: Error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /ready: + get: + tags: + - Health + summary: Readiness check + operationId: readinessCheck + responses: + "200": + description: Readiness status + content: + application/json: + schema: + $ref: '#/components/schemas/ReadinessResponse' + default: + description: Error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + +components: + schemas: + Error: + type: object + required: + - error + properties: + error: + type: object + required: + - type + - description + properties: + type: + type: string + description: + type: string + PredictionResponse: + type: object + required: + - prediction + - metadata + properties: + request: + type: object + properties: + dataset: + type: string + launch_latitude: + type: number + launch_longitude: + type: number + launch_datetime: + type: string + launch_altitude: + type: number + profile: + type: string + ascent_rate: + type: number + burst_altitude: + type: number + descent_rate: + type: number + prediction: + type: array + items: + type: object + required: + - stage + - trajectory + properties: + stage: + type: string + enum: ["ascent", "descent", "float"] + trajectory: + type: array + items: + type: object + required: + - datetime + - latitude + - longitude + - altitude + properties: + datetime: + type: string + format: date-time + latitude: + type: number + longitude: + type: number + altitude: + type: number + metadata: + type: object + required: + - start_datetime + - complete_datetime + properties: + start_datetime: + type: string + format: date-time + complete_datetime: + type: string + format: date-time + warnings: + type: object + additionalProperties: true + ReadinessResponse: + type: object + required: + - status + properties: + status: + type: string + enum: [ok, not_ready, error] + dataset_time: + type: string + format: date-time + error_message: + type: string diff --git a/cmd/api/main.go b/cmd/api/main.go new file mode 100644 index 0000000..08d12e3 --- /dev/null +++ b/cmd/api/main.go @@ -0,0 +1,98 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + "predictor-refactored/internal/downloader" + "predictor-refactored/internal/service" + "predictor-refactored/internal/transport/rest" + "predictor-refactored/internal/transport/rest/handler" + + "github.com/go-co-op/gocron" + "go.uber.org/zap" +) + +func main() { + log, err := zap.NewProduction() + if err != nil { + panic(err) + } + defer log.Sync() + + cfg := downloader.LoadConfig() + log.Info("configuration loaded", + zap.String("data_dir", cfg.DataDir), + zap.Int("parallel", cfg.Parallel), + zap.Duration("update_interval", cfg.UpdateInterval), + zap.Duration("dataset_ttl", cfg.DatasetTTL)) + + if err := os.MkdirAll(cfg.DataDir, 0o755); err != nil { + log.Fatal("failed to create data directory", zap.Error(err)) + } + + svc := service.New(cfg, log) + defer svc.Close() + + // Load elevation dataset (optional — falls back to sea-level termination) + elevPath := "/srv/ruaumoko-dataset" + if v := os.Getenv("PREDICTOR_ELEVATION_DATASET"); v != "" { + elevPath = v + } + svc.LoadElevation(elevPath) + + // Initial dataset load (async so the server starts immediately) + go func() { + log.Info("performing initial dataset update...") + if err := svc.Update(context.Background()); err != nil { + log.Error("initial dataset update failed", zap.Error(err)) + } else { + log.Info("initial dataset update complete") + } + }() + + // Scheduler for periodic dataset updates + scheduler := gocron.NewScheduler(time.UTC) + scheduler.Every(cfg.UpdateInterval).Do(func() { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute) + defer cancel() + log.Info("scheduled dataset update starting") + if err := svc.Update(ctx); err != nil { + log.Error("scheduled dataset update failed", zap.Error(err)) + } else { + log.Info("scheduled dataset update complete") + } + }) + scheduler.StartAsync() + defer scheduler.Stop() + + // HTTP transport (ogen) + port := 8080 + if p := os.Getenv("PREDICTOR_PORT"); p != "" { + fmt.Sscanf(p, "%d", &port) + } + + h := handler.New(svc, log) + transport, err := rest.New(h, port, log) + if err != nil { + log.Fatal("failed to create transport", zap.Error(err)) + } + + go func() { + if err := transport.Run(); err != nil { + log.Fatal("HTTP server error", zap.Error(err)) + } + }() + + log.Info("service started") + + // Graceful shutdown + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + sig := <-sigChan + log.Info("received shutdown signal", zap.String("signal", sig.String())) +} diff --git a/cmd/compare_prediction/main.go b/cmd/compare_prediction/main.go new file mode 100644 index 0000000..13ae2a6 --- /dev/null +++ b/cmd/compare_prediction/main.go @@ -0,0 +1,195 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "io" + "math" + "net/http" + "os" + "time" + + "predictor-refactored/internal/dataset" + "predictor-refactored/internal/downloader" + "predictor-refactored/internal/prediction" + + "go.uber.org/zap" +) + +// Downloads a few forecast steps and runs a prediction, then compares +// against the public Tawhiri API. +func main() { + log, _ := zap.NewDevelopment() + + cfg := &downloader.Config{ + DataDir: os.TempDir(), + Parallel: 4, + } + dl := downloader.NewDownloader(cfg, log) + + ctx := context.Background() + + // Find latest run + run, err := dl.FindLatestRun(ctx) + if err != nil { + fmt.Fprintf(os.Stderr, "FindLatestRun: %v\n", err) + os.Exit(1) + } + fmt.Printf("Using run: %s\n", run.Format("2006010215")) + + // Create dataset and download first 10 steps (0-27 hours, enough for a prediction) + dsPath := fmt.Sprintf("/tmp/pred_test_%s.bin", run.Format("2006010215")) + defer os.Remove(dsPath) + + ds, err := dataset.Create(dsPath) + if err != nil { + fmt.Fprintf(os.Stderr, "Create: %v\n", err) + os.Exit(1) + } + + date := run.Format("20060102") + runHour := run.Hour() + stepsToDownload := []int{0, 3, 6, 9, 12, 15, 18, 21, 24, 27} + + fmt.Printf("Downloading %d steps...\n", len(stepsToDownload)) + for _, step := range stepsToDownload { + hourIdx := dataset.HourIndex(step) + fmt.Printf(" step %d (hour idx %d)...\n", step, hourIdx) + + urlA := dataset.GribURL(date, runHour, step) + if err := dl.DownloadAndBlit(ctx, ds, urlA, hourIdx, dataset.LevelSetA); err != nil { + fmt.Fprintf(os.Stderr, " pgrb2 step %d: %v\n", step, err) + os.Exit(1) + } + + urlB := dataset.GribURLB(date, runHour, step) + if err := dl.DownloadAndBlit(ctx, ds, urlB, hourIdx, dataset.LevelSetB); err != nil { + fmt.Fprintf(os.Stderr, " pgrb2b step %d: %v\n", step, err) + os.Exit(1) + } + } + ds.Flush() + fmt.Println("Download complete") + + // Set dataset time + ds.DSTime = run + + // Run our prediction + launchLat := 52.2135 + launchLon := 0.0964 // already in [0, 360) + launchAlt := 0.0 + ascentRate := 5.0 + burstAlt := 30000.0 + descentRate := 5.0 + + // Launch 3 hours into the forecast + launchTime := run.Add(3 * time.Hour) + launchTimestamp := float64(launchTime.Unix()) + dsEpoch := float64(run.Unix()) + + warnings := &prediction.Warnings{} + stages := prediction.StandardProfile(ascentRate, burstAlt, descentRate, ds, dsEpoch, warnings, nil) + results := prediction.RunPrediction(launchTimestamp, launchLat, launchLon, launchAlt, stages) + + fmt.Printf("\n=== Our prediction ===\n") + for i, sr := range results { + stage := "ascent" + if i == 1 { + stage = "descent" + } + first := sr.Points[0] + last := sr.Points[len(sr.Points)-1] + fmt.Printf(" %s: %d points, start=(%.4f, %.4f, %.0fm) end=(%.4f, %.4f, %.0fm)\n", + stage, len(sr.Points), + first.Lat, first.Lng, first.Alt, + last.Lat, last.Lng, last.Alt) + } + + // Get landing point + var ourLandLat, ourLandLon float64 + if len(results) >= 2 { + last := results[1].Points[len(results[1].Points)-1] + ourLandLat = last.Lat + ourLandLon = last.Lng + if ourLandLon > 180 { + ourLandLon -= 360 + } + } + fmt.Printf(" Landing: lat=%.4f, lon=%.4f\n", ourLandLat, ourLandLon) + + // Compare against public Tawhiri API + fmt.Printf("\n=== Tawhiri API comparison ===\n") + tawhiriLandLat, tawhiriLandLon, err := queryTawhiri(launchLat, launchLon, launchAlt, launchTime, ascentRate, burstAlt, descentRate) + if err != nil { + fmt.Printf(" Tawhiri API error: %v\n", err) + fmt.Println(" (Cannot compare — Tawhiri may use a different dataset)") + ds.Close() + return + } + fmt.Printf(" Tawhiri landing: lat=%.4f, lon=%.4f\n", tawhiriLandLat, tawhiriLandLon) + + dist := haversine(ourLandLat, ourLandLon, tawhiriLandLat, tawhiriLandLon) + fmt.Printf(" Distance between landing points: %.2f km\n", dist/1000) + + if dist < 1000 { + fmt.Println(" CLOSE MATCH (< 1 km)") + } else if dist < 50000 { + fmt.Printf(" MODERATE DIFFERENCE (%.1f km) — likely different datasets\n", dist/1000) + } else { + fmt.Printf(" LARGE DIFFERENCE (%.1f km) — possible bug\n", dist/1000) + } + + ds.Close() +} + +func queryTawhiri(lat, lon, alt float64, launchTime time.Time, ascentRate, burstAlt, descentRate float64) (landLat, landLon float64, err error) { + url := fmt.Sprintf( + "https://api.v2.sondehub.org/tawhiri?launch_latitude=%.4f&launch_longitude=%.4f&launch_altitude=%.0f&launch_datetime=%s&ascent_rate=%.1f&burst_altitude=%.0f&descent_rate=%.1f", + lat, lon, alt, launchTime.Format(time.RFC3339), ascentRate, burstAlt, descentRate) + + resp, err := http.Get(url) + if err != nil { + return 0, 0, err + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + if resp.StatusCode != 200 { + return 0, 0, fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body)) + } + + var result struct { + Prediction []struct { + Stage string `json:"stage"` + Trajectory []struct { + Latitude float64 `json:"latitude"` + Longitude float64 `json:"longitude"` + Altitude float64 `json:"altitude"` + } `json:"trajectory"` + } `json:"prediction"` + } + + if err := json.Unmarshal(body, &result); err != nil { + return 0, 0, err + } + + for _, stage := range result.Prediction { + if stage.Stage == "descent" && len(stage.Trajectory) > 0 { + last := stage.Trajectory[len(stage.Trajectory)-1] + return last.Latitude, last.Longitude, nil + } + } + + return 0, 0, fmt.Errorf("no descent stage found") +} + +func haversine(lat1, lon1, lat2, lon2 float64) float64 { + const R = 6371000.0 + phi1 := lat1 * math.Pi / 180 + phi2 := lat2 * math.Pi / 180 + dphi := (lat2 - lat1) * math.Pi / 180 + dlam := (lon2 - lon1) * math.Pi / 180 + a := math.Sin(dphi/2)*math.Sin(dphi/2) + math.Cos(phi1)*math.Cos(phi2)*math.Sin(dlam/2)*math.Sin(dlam/2) + return R * 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a)) +} diff --git a/cmd/compare_step0/main.go b/cmd/compare_step0/main.go new file mode 100644 index 0000000..d0d697d --- /dev/null +++ b/cmd/compare_step0/main.go @@ -0,0 +1,104 @@ +package main + +import ( + "context" + "fmt" + "os" + "time" + + "predictor-refactored/internal/dataset" + "predictor-refactored/internal/downloader" + + "go.uber.org/zap" +) + +// Downloads step 0 of a given run and writes a minimal dataset for comparison. +// Usage: go run ./cmd/compare_step0 +func main() { + if len(os.Args) < 3 { + fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) + os.Exit(1) + } + + runStr := os.Args[1] + outPath := os.Args[2] + + run, err := time.Parse("2006010215", runStr) + if err != nil { + fmt.Fprintf(os.Stderr, "Invalid run time %q: %v\n", runStr, err) + os.Exit(1) + } + + log, _ := zap.NewDevelopment() + + // Create a full-size dataset (we only fill step 0) + fmt.Printf("Creating dataset at %s (%d bytes)...\n", outPath, dataset.DatasetSize) + ds, err := dataset.Create(outPath) + if err != nil { + fmt.Fprintf(os.Stderr, "Create dataset: %v\n", err) + os.Exit(1) + } + defer ds.Close() + + cfg := &downloader.Config{ + DataDir: os.TempDir(), + Parallel: 4, + } + dl := downloader.NewDownloader(cfg, log) + + ctx := context.Background() + date := run.Format("20060102") + runHour := run.Hour() + + // Download and blit step 0 from pgrb2 + fmt.Println("Downloading pgrb2 step 0...") + urlA := dataset.GribURL(date, runHour, 0) + if err := dl.DownloadAndBlit(ctx, ds, urlA, 0, dataset.LevelSetA); err != nil { + fmt.Fprintf(os.Stderr, "pgrb2: %v\n", err) + os.Exit(1) + } + fmt.Println(" done") + + // Download and blit step 0 from pgrb2b + fmt.Println("Downloading pgrb2b step 0...") + urlB := dataset.GribURLB(date, runHour, 0) + if err := dl.DownloadAndBlit(ctx, ds, urlB, 0, dataset.LevelSetB); err != nil { + fmt.Fprintf(os.Stderr, "pgrb2b: %v\n", err) + os.Exit(1) + } + fmt.Println(" done") + + if err := ds.Flush(); err != nil { + fmt.Fprintf(os.Stderr, "Flush: %v\n", err) + os.Exit(1) + } + + // Spot-check: print same values as the Python script + fmt.Println("\n=== Go dataset values (spot check) ===") + type testPoint struct { + varName string + varIdx int + levelIdx int + lat, lon int + } + + points := []testPoint{ + {"HGT", 0, 0, 0, 0}, // HGT @ 1000mb, lat=-90, lon=0 + {"HGT", 0, 0, 180, 0}, // HGT @ 1000mb, lat=0, lon=0 + {"HGT", 0, 0, 360, 0}, // HGT @ 1000mb, lat=+90, lon=0 + {"HGT", 0, 20, 180, 360}, // HGT @ 500mb, lat=0, lon=180 + {"UGRD", 1, 0, 180, 0}, // UGRD @ 1000mb, lat=0, lon=0 + {"VGRD", 2, 0, 180, 0}, // VGRD @ 1000mb, lat=0, lon=0 + {"UGRD", 1, 20, 284, 0}, // UGRD @ 500mb, lat=52N, lon=0 + } + + for _, p := range points { + val := ds.Val(0, p.levelIdx, p.varIdx, p.lat, p.lon) + actualLat := -90.0 + float64(p.lat)*0.5 + actualLon := float64(p.lon) * 0.5 + fmt.Printf(" %-4s %4dmb lat=%+7.1f lon=%6.1f: %12.4f\n", + p.varName, dataset.Pressures[p.levelIdx], actualLat, actualLon, val) + } + + fmt.Printf("\nDataset written to %s\n", outPath) +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..35b9486 --- /dev/null +++ b/go.mod @@ -0,0 +1,41 @@ +module predictor-refactored + +go 1.25.0 + +require ( + github.com/edsrzf/mmap-go v1.2.0 + github.com/go-co-op/gocron v1.37.0 + github.com/go-faster/errors v0.7.1 + github.com/go-faster/jx v1.2.0 + github.com/nilsmagnus/grib v1.2.8 + github.com/ogen-go/ogen v1.20.2 + go.opentelemetry.io/otel v1.42.0 + go.opentelemetry.io/otel/metric v1.42.0 + go.opentelemetry.io/otel/trace v1.42.0 + go.uber.org/zap v1.27.1 + golang.org/x/sync v0.20.0 +) + +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/dlclark/regexp2 v1.11.5 // indirect + github.com/fatih/color v1.19.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-faster/yaml v0.4.6 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/segmentio/asm v1.2.1 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 // indirect + golang.org/x/net v0.52.0 // indirect + golang.org/x/sys v0.42.0 // indirect + golang.org/x/text v0.35.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..1de3241 --- /dev/null +++ b/go.sum @@ -0,0 +1,108 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= +github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84= +github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/fatih/color v1.19.0 h1:Zp3PiM21/9Ld6FzSKyL5c/BULoe/ONr9KlbYVOfG8+w= +github.com/fatih/color v1.19.0/go.mod h1:zNk67I0ZUT1bEGsSGyCZYZNrHuTkJJB+r6Q9VuMi0LE= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0= +github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY= +github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= +github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= +github.com/go-faster/jx v1.2.0 h1:T2YHJPrFaYu21fJtUxC9GzmluKu8rVIFDwwGBKTDseI= +github.com/go-faster/jx v1.2.0/go.mod h1:UWLOVDmMG597a5tBFPLIWJdUxz5/2emOpfsj9Neg0PE= +github.com/go-faster/yaml v0.4.6 h1:lOK/EhI04gCpPgPhgt0bChS6bvw7G3WwI8xxVe0sw9I= +github.com/go-faster/yaml v0.4.6/go.mod h1:390dRIvV4zbnO7qC9FGo6YYutc+wyyUSHBgbXL52eXk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/nilsmagnus/grib v1.2.8 h1:H7ch/1/agaCqM3MC8hW1Ft+EJ+q2XB757uml/IfPvp4= +github.com/nilsmagnus/grib v1.2.8/go.mod h1:XHm+5zuoOk0NSIWaGmA3JaAxI4i50YvD1L1vz+aqPOQ= +github.com/ogen-go/ogen v1.20.2 h1:mEZGPST7ZeX84AkqRlFawDLwcwuzcLO5PtYpAXLT1YE= +github.com/ogen-go/ogen v1.20.2/go.mod h1:sJ1pJVp4S1RcSZlYIiMLo0QSMSt2pls4zfrc+hNKnzk= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0= +github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.42.0 h1:lSQGzTgVR3+sgJDAU/7/ZMjN9Z+vUip7leaqBKy4sho= +go.opentelemetry.io/otel v1.42.0/go.mod h1:lJNsdRMxCUIWuMlVJWzecSMuNjE7dOYyWlqOXWkdqCc= +go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= +go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= +go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= +go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY= +golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= +golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/dataset/dataset.go b/internal/dataset/dataset.go new file mode 100644 index 0000000..53367db --- /dev/null +++ b/internal/dataset/dataset.go @@ -0,0 +1,158 @@ +package dataset + +import "fmt" + +// Dataset shape constants. +// Shape: (65, 47, 3, 361, 720) = (hour, pressure_level, variable, latitude, longitude) +// This matches the reference predictor exactly. +const ( + NumHours = 65 // 0, 3, 6, ..., 192 + NumLevels = 47 // pressure levels + NumVariables = 3 // height, wind_u, wind_v + NumLatitudes = 361 // -90.0 to +90.0 in 0.5 degree steps + NumLongitudes = 720 // 0.0 to 359.5 in 0.5 degree steps + + HourStep = 3 // hours between forecast time steps + MaxHour = 192 // maximum forecast hour + Resolution = 0.5 // grid resolution in degrees + LatStart = -90.0 // first latitude in the dataset + LonStart = 0.0 // first longitude in the dataset + + // Variable indices within the dataset. + VarHeight = 0 + VarWindU = 1 + VarWindV = 2 + + ElementSize = 4 // float32 = 4 bytes +) + +// DatasetSize is the total size of the dataset file in bytes. +// 65 * 47 * 3 * 361 * 720 * 4 = 9,528,667,200 +const DatasetSize int64 = int64(NumHours) * int64(NumLevels) * int64(NumVariables) * + int64(NumLatitudes) * int64(NumLongitudes) * int64(ElementSize) + +// LevelSet identifies which GRIB file set a pressure level belongs to. +type LevelSet int + +const ( + LevelSetA LevelSet = iota // pgrb2 (primary) + LevelSetB // pgrb2b (secondary) +) + +// Pressures contains the 47 pressure levels in hPa, sorted descending. +// Index 0 = 1000 hPa (near surface), Index 46 = 1 hPa (high atmosphere). +var Pressures = [NumLevels]int{ + 1000, 975, 950, 925, 900, 875, 850, 825, 800, 775, + 750, 725, 700, 675, 650, 625, 600, 575, 550, 525, + 500, 475, 450, 425, 400, 375, 350, 325, 300, 275, + 250, 225, 200, 175, 150, 125, 100, 70, 50, 30, + 20, 10, 7, 5, 3, 2, 1, +} + +// pressureIndex maps pressure in hPa to its index in the Pressures array. +var pressureIndex map[int]int + +// pressureLevelSet maps pressure in hPa to its GRIB file set. +var pressureLevelSet map[int]LevelSet + +func init() { + pressureIndex = make(map[int]int, NumLevels) + for i, p := range Pressures { + pressureIndex[p] = i + } + + pressureLevelSet = make(map[int]LevelSet, NumLevels) + for _, p := range PressuresPgrb2 { + pressureLevelSet[p] = LevelSetA + } + for _, p := range PressuresPgrb2b { + pressureLevelSet[p] = LevelSetB + } +} + +// PressuresPgrb2 contains levels found in the primary pgrb2 file (26 levels). +var PressuresPgrb2 = []int{ + 10, 20, 30, 50, 70, 100, 150, 200, 250, 300, 350, 400, + 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 925, + 950, 975, 1000, +} + +// PressuresPgrb2b contains levels found in the secondary pgrb2b file (21 levels). +var PressuresPgrb2b = []int{ + 1, 2, 3, 5, 7, 125, 175, 225, 275, 325, 375, 425, + 475, 525, 575, 625, 675, 725, 775, 825, 875, +} + +// PressureIndex returns the dataset index for a given pressure level in hPa. +// Returns -1 if the level is not found. +func PressureIndex(hPa int) int { + idx, ok := pressureIndex[hPa] + if !ok { + return -1 + } + return idx +} + +// PressureLevelSet returns which GRIB file set a pressure level belongs to. +func PressureLevelSet(hPa int) (LevelSet, bool) { + ls, ok := pressureLevelSet[hPa] + return ls, ok +} + +// HourIndex returns the dataset time index for a forecast hour. +// Returns -1 if the hour is invalid (not a multiple of HourStep or out of range). +func HourIndex(hour int) int { + if hour < 0 || hour > MaxHour || hour%HourStep != 0 { + return -1 + } + return hour / HourStep +} + +// Hours returns all forecast hours as a slice: [0, 3, 6, ..., 192]. +func Hours() []int { + out := make([]int, 0, NumHours) + for h := 0; h <= MaxHour; h += HourStep { + out = append(out, h) + } + return out +} + +// S3 URL configuration for NOAA GFS data. +const S3BaseURL = "https://noaa-gfs-bdp-pds.s3.amazonaws.com" + +// GribURL returns the S3 URL for a primary (pgrb2) GRIB file. +func GribURL(date string, runHour, forecastStep int) string { + return fmt.Sprintf("%s/gfs.%s/%02d/atmos/gfs.t%02dz.pgrb2.0p50.f%03d", + S3BaseURL, date, runHour, runHour, forecastStep) +} + +// GribURLB returns the S3 URL for a secondary (pgrb2b) GRIB file. +func GribURLB(date string, runHour, forecastStep int) string { + return fmt.Sprintf("%s/gfs.%s/%02d/atmos/gfs.t%02dz.pgrb2b.0p50.f%03d", + S3BaseURL, date, runHour, runHour, forecastStep) +} + +// GribFileName returns the local filename for a primary GRIB file. +func GribFileName(runHour, forecastStep int) string { + return fmt.Sprintf("gfs.t%02dz.pgrb2.0p50.f%03d", runHour, forecastStep) +} + +// GribFileNameB returns the local filename for a secondary GRIB file. +func GribFileNameB(runHour, forecastStep int) string { + return fmt.Sprintf("gfs.t%02dz.pgrb2b.0p50.f%03d", runHour, forecastStep) +} + +// VariableIndex returns the dataset variable index for a GRIB parameter. +// Returns -1 if the parameter is not recognized. +func VariableIndex(parameterCategory, parameterNumber int) int { + switch { + case parameterCategory == 3 && parameterNumber == 5: + return VarHeight // Geopotential Height + case parameterCategory == 2 && parameterNumber == 2: + return VarWindU // U-component of wind + case parameterCategory == 2 && parameterNumber == 3: + return VarWindV // V-component of wind + default: + return -1 + } +} diff --git a/internal/dataset/dataset_test.go b/internal/dataset/dataset_test.go new file mode 100644 index 0000000..14b36ef --- /dev/null +++ b/internal/dataset/dataset_test.go @@ -0,0 +1,152 @@ +package dataset + +import ( + "testing" +) + +func TestDatasetShape(t *testing.T) { + if NumHours != 65 { + t.Errorf("NumHours = %d, want 65", NumHours) + } + if NumLevels != 47 { + t.Errorf("NumLevels = %d, want 47", NumLevels) + } + if NumVariables != 3 { + t.Errorf("NumVariables = %d, want 3", NumVariables) + } + if NumLatitudes != 361 { + t.Errorf("NumLatitudes = %d, want 361", NumLatitudes) + } + if NumLongitudes != 720 { + t.Errorf("NumLongitudes = %d, want 720", NumLongitudes) + } +} + +func TestDatasetSize(t *testing.T) { + // 65 * 47 * 3 * 361 * 720 * 4 = 9,528,667,200 + want := int64(9_528_667_200) + if DatasetSize != want { + t.Errorf("DatasetSize = %d, want %d", DatasetSize, want) + } +} + +func TestPressureLevels(t *testing.T) { + if len(Pressures) != 47 { + t.Fatalf("len(Pressures) = %d, want 47", len(Pressures)) + } + + // First should be 1000 (highest pressure, near surface) + if Pressures[0] != 1000 { + t.Errorf("Pressures[0] = %d, want 1000", Pressures[0]) + } + // Last should be 1 (lowest pressure, high atmosphere) + if Pressures[46] != 1 { + t.Errorf("Pressures[46] = %d, want 1", Pressures[46]) + } + + // Should be sorted descending + for i := 1; i < len(Pressures); i++ { + if Pressures[i] >= Pressures[i-1] { + t.Errorf("Pressures not descending at [%d]: %d >= %d", i, Pressures[i], Pressures[i-1]) + } + } + + // Total levels: 26 from pgrb2 + 21 from pgrb2b = 47 + if len(PressuresPgrb2) != 26 { + t.Errorf("len(PressuresPgrb2) = %d, want 26", len(PressuresPgrb2)) + } + if len(PressuresPgrb2b) != 21 { + t.Errorf("len(PressuresPgrb2b) = %d, want 21", len(PressuresPgrb2b)) + } +} + +func TestPressureIndex(t *testing.T) { + if PressureIndex(1000) != 0 { + t.Errorf("PressureIndex(1000) = %d, want 0", PressureIndex(1000)) + } + if PressureIndex(1) != 46 { + t.Errorf("PressureIndex(1) = %d, want 46", PressureIndex(1)) + } + if PressureIndex(500) != 20 { + t.Errorf("PressureIndex(500) = %d, want 20", PressureIndex(500)) + } + if PressureIndex(9999) != -1 { + t.Errorf("PressureIndex(9999) = %d, want -1", PressureIndex(9999)) + } +} + +func TestPressureLevelSet(t *testing.T) { + // 1000 mb should be in pgrb2 (A) + ls, ok := PressureLevelSet(1000) + if !ok || ls != LevelSetA { + t.Errorf("PressureLevelSet(1000) = %v, %v; want A, true", ls, ok) + } + + // 125 mb should be in pgrb2b (B) + ls, ok = PressureLevelSet(125) + if !ok || ls != LevelSetB { + t.Errorf("PressureLevelSet(125) = %v, %v; want B, true", ls, ok) + } + + // 1, 2, 3, 5, 7 should be in pgrb2b (B) + for _, p := range []int{1, 2, 3, 5, 7} { + ls, ok := PressureLevelSet(p) + if !ok || ls != LevelSetB { + t.Errorf("PressureLevelSet(%d) = %v, %v; want B, true", p, ls, ok) + } + } + + // Every pressure level should have a level set assignment + for _, p := range Pressures { + _, ok := PressureLevelSet(p) + if !ok { + t.Errorf("PressureLevelSet(%d) not found", p) + } + } +} + +func TestHourIndex(t *testing.T) { + if HourIndex(0) != 0 { + t.Errorf("HourIndex(0) = %d, want 0", HourIndex(0)) + } + if HourIndex(3) != 1 { + t.Errorf("HourIndex(3) = %d, want 1", HourIndex(3)) + } + if HourIndex(192) != 64 { + t.Errorf("HourIndex(192) = %d, want 64", HourIndex(192)) + } + if HourIndex(1) != -1 { + t.Errorf("HourIndex(1) = %d, want -1 (not multiple of 3)", HourIndex(1)) + } + if HourIndex(195) != -1 { + t.Errorf("HourIndex(195) = %d, want -1 (out of range)", HourIndex(195)) + } +} + +func TestHours(t *testing.T) { + hours := Hours() + if len(hours) != NumHours { + t.Fatalf("len(Hours()) = %d, want %d", len(hours), NumHours) + } + if hours[0] != 0 { + t.Errorf("Hours()[0] = %d, want 0", hours[0]) + } + if hours[len(hours)-1] != MaxHour { + t.Errorf("Hours()[last] = %d, want %d", hours[len(hours)-1], MaxHour) + } +} + +func TestVariableIndex(t *testing.T) { + if VariableIndex(3, 5) != VarHeight { + t.Errorf("HGT: got %d, want %d", VariableIndex(3, 5), VarHeight) + } + if VariableIndex(2, 2) != VarWindU { + t.Errorf("UGRD: got %d, want %d", VariableIndex(2, 2), VarWindU) + } + if VariableIndex(2, 3) != VarWindV { + t.Errorf("VGRD: got %d, want %d", VariableIndex(2, 3), VarWindV) + } + if VariableIndex(0, 0) != -1 { + t.Errorf("unknown: got %d, want -1", VariableIndex(0, 0)) + } +} diff --git a/internal/dataset/file.go b/internal/dataset/file.go new file mode 100644 index 0000000..96f14c2 --- /dev/null +++ b/internal/dataset/file.go @@ -0,0 +1,140 @@ +package dataset + +import ( + "encoding/binary" + "fmt" + "math" + "os" + "time" + + mmap "github.com/edsrzf/mmap-go" +) + +// File represents an mmap-backed wind dataset file. +type File struct { + mm mmap.MMap + file *os.File + writable bool + DSTime time.Time // forecast run time (UTC) +} + +// Open opens an existing dataset file for reading. +func Open(path string, dsTime time.Time) (*File, error) { + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("open dataset: %w", err) + } + + info, err := f.Stat() + if err != nil { + f.Close() + return nil, fmt.Errorf("stat dataset: %w", err) + } + if info.Size() != DatasetSize { + f.Close() + return nil, fmt.Errorf("dataset should be %d bytes (was %d)", DatasetSize, info.Size()) + } + + mm, err := mmap.Map(f, mmap.RDONLY, 0) + if err != nil { + f.Close() + return nil, fmt.Errorf("mmap dataset: %w", err) + } + + return &File{mm: mm, file: f, writable: false, DSTime: dsTime}, nil +} + +// Create creates a new dataset file for writing. +// The file is truncated to the correct size and mmap'd read-write. +func Create(path string) (*File, error) { + f, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("create dataset: %w", err) + } + + if err := f.Truncate(DatasetSize); err != nil { + f.Close() + return nil, fmt.Errorf("truncate dataset: %w", err) + } + + mm, err := mmap.MapRegion(f, int(DatasetSize), mmap.RDWR, 0, 0) + if err != nil { + f.Close() + return nil, fmt.Errorf("mmap dataset: %w", err) + } + + return &File{mm: mm, file: f, writable: true}, nil +} + +// offset computes the byte offset for element [hour][level][variable][lat][lon]. +// Row-major C-order indexing matching the reference implementation: +// shape = (65, 47, 3, 361, 720) +func offset(hour, level, variable, lat, lon int) int64 { + idx := int64(hour) + idx = idx*int64(NumLevels) + int64(level) + idx = idx*int64(NumVariables) + int64(variable) + idx = idx*int64(NumLatitudes) + int64(lat) + idx = idx*int64(NumLongitudes) + int64(lon) + return idx * int64(ElementSize) +} + +// Val reads a float32 value from the dataset at [hour][level][variable][lat][lon]. +func (d *File) Val(hour, level, variable, lat, lon int) float32 { + off := offset(hour, level, variable, lat, lon) + bits := binary.LittleEndian.Uint32(d.mm[off : off+4]) + return math.Float32frombits(bits) +} + +// SetVal writes a float32 value to the dataset at [hour][level][variable][lat][lon]. +// Only valid on writable (created) datasets. +func (d *File) SetVal(hour, level, variable, lat, lon int, val float32) { + off := offset(hour, level, variable, lat, lon) + binary.LittleEndian.PutUint32(d.mm[off:off+4], math.Float32bits(val)) +} + +// BlitGribData copies decoded GRIB grid data into the dataset at the given position. +// gribData is 361*720 float64 values in GRIB scan order (north-to-south, west-to-east). +// This function flips the latitude so that dataset index 0 = -90 (south) and 360 = +90 (north). +func (d *File) BlitGribData(hourIdx, levelIdx, varIdx int, gribData []float64) error { + expected := NumLatitudes * NumLongitudes + if len(gribData) != expected { + return fmt.Errorf("grib data has %d values, expected %d", len(gribData), expected) + } + + for lat := 0; lat < NumLatitudes; lat++ { + for lon := 0; lon < NumLongitudes; lon++ { + // GRIB scans north-to-south: row 0 = 90N, row 360 = 90S + // Dataset stores south-to-north: index 0 = -90 (90S), index 360 = +90 (90N) + gribIdx := (360-lat)*NumLongitudes + lon + val := float32(gribData[gribIdx]) + d.SetVal(hourIdx, levelIdx, varIdx, lat, lon, val) + } + } + + return nil +} + +// Flush flushes the mmap to disk. +func (d *File) Flush() error { + if d.mm != nil { + return d.mm.Flush() + } + return nil +} + +// Close unmaps and closes the dataset file. +func (d *File) Close() error { + if d.mm != nil { + if err := d.mm.Unmap(); err != nil { + d.file.Close() + return fmt.Errorf("unmap: %w", err) + } + d.mm = nil + } + if d.file != nil { + err := d.file.Close() + d.file = nil + return err + } + return nil +} diff --git a/internal/downloader/config.go b/internal/downloader/config.go new file mode 100644 index 0000000..91575e1 --- /dev/null +++ b/internal/downloader/config.go @@ -0,0 +1,58 @@ +package downloader + +import ( + "os" + "strconv" + "time" +) + +// Config holds downloader configuration, loaded from environment variables. +type Config struct { + // DataDir is the directory for storing dataset files and temporary GRIB data. + DataDir string + + // Parallel is the maximum number of concurrent GRIB downloads. + Parallel int + + // UpdateInterval is how often the scheduler checks for new forecast data. + UpdateInterval time.Duration + + // DatasetTTL is how long a dataset is considered fresh before a new one is needed. + DatasetTTL time.Duration +} + +// DefaultConfig returns the default configuration. +func DefaultConfig() *Config { + return &Config{ + DataDir: "/tmp/predictor-data", + Parallel: 8, + UpdateInterval: 6 * time.Hour, + DatasetTTL: 48 * time.Hour, + } +} + +// LoadConfig loads configuration from environment variables, falling back to defaults. +func LoadConfig() *Config { + cfg := DefaultConfig() + + if v := os.Getenv("PREDICTOR_DATA_DIR"); v != "" { + cfg.DataDir = v + } + if v := os.Getenv("PREDICTOR_DOWNLOAD_PARALLEL"); v != "" { + if n, err := strconv.Atoi(v); err == nil && n > 0 { + cfg.Parallel = n + } + } + if v := os.Getenv("PREDICTOR_UPDATE_INTERVAL"); v != "" { + if d, err := time.ParseDuration(v); err == nil { + cfg.UpdateInterval = d + } + } + if v := os.Getenv("PREDICTOR_DATASET_TTL"); v != "" { + if d, err := time.ParseDuration(v); err == nil { + cfg.DatasetTTL = d + } + } + + return cfg +} diff --git a/internal/downloader/downloader.go b/internal/downloader/downloader.go new file mode 100644 index 0000000..72b5fb2 --- /dev/null +++ b/internal/downloader/downloader.go @@ -0,0 +1,380 @@ +package downloader + +import ( + "context" + "fmt" + "io" + "math" + "net/http" + "os" + "path/filepath" + "time" + + "predictor-refactored/internal/dataset" + + "github.com/nilsmagnus/grib/griblib" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +// Downloader handles fetching GFS forecast data from S3 and assembling dataset files. +type Downloader struct { + cfg *Config + client *http.Client + log *zap.Logger +} + +// NewDownloader creates a new Downloader. +func NewDownloader(cfg *Config, log *zap.Logger) *Downloader { + return &Downloader{ + cfg: cfg, + client: &http.Client{ + Timeout: 2 * time.Minute, + }, + log: log, + } +} + +// neededVariables is the set of GRIB variable names we need. +var neededVariables = map[string]bool{ + "HGT": true, + "UGRD": true, + "VGRD": true, +} + +// FindLatestRun finds the most recent available GFS model run on S3. +// It checks the last forecast step of each run to confirm availability. +func (d *Downloader) FindLatestRun(ctx context.Context) (time.Time, error) { + now := time.Now().UTC() + hour := now.Hour() - (now.Hour() % 6) + current := time.Date(now.Year(), now.Month(), now.Day(), hour, 0, 0, 0, time.UTC) + + for i := 0; i < 8; i++ { + date := current.Format("20060102") + url := dataset.GribURL(date, current.Hour(), dataset.MaxHour) + ".idx" + + req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil) + if err != nil { + current = current.Add(-6 * time.Hour) + continue + } + + resp, err := d.client.Do(req) + if err == nil { + resp.Body.Close() + if resp.StatusCode == http.StatusOK { + d.log.Info("found latest model run", + zap.Time("run", current), + zap.String("verified_url", url)) + return current, nil + } + } + + current = current.Add(-6 * time.Hour) + } + + return time.Time{}, fmt.Errorf("no recent GFS forecast found (checked 8 runs)") +} + +// Download downloads a complete forecast and assembles a dataset file. +// Returns the path to the completed dataset file. +func (d *Downloader) Download(ctx context.Context, run time.Time) (string, error) { + date := run.Format("20060102") + runHour := run.Hour() + + finalPath := filepath.Join(d.cfg.DataDir, run.Format("2006010215")) + tempPath := finalPath + ".downloading" + + // Check if final dataset already exists + if info, err := os.Stat(finalPath); err == nil && info.Size() == dataset.DatasetSize { + d.log.Info("dataset already exists", zap.String("path", finalPath)) + return finalPath, nil + } + + d.log.Info("starting dataset download", + zap.Time("run", run), + zap.String("temp_path", tempPath)) + + // Create the dataset file + ds, err := dataset.Create(tempPath) + if err != nil { + return "", fmt.Errorf("create dataset: %w", err) + } + defer ds.Close() + + steps := dataset.Hours() + totalSteps := len(steps) * 2 // pgrb2 + pgrb2b per step + completed := 0 + + // Process each forecast step with bounded concurrency + g, ctx := errgroup.WithContext(ctx) + sem := make(chan struct{}, d.cfg.Parallel) + + for _, step := range steps { + step := step + hourIdx := dataset.HourIndex(step) + if hourIdx < 0 { + continue + } + + // Download pgrb2 (level set A) + sem <- struct{}{} + g.Go(func() error { + defer func() { <-sem }() + url := dataset.GribURL(date, runHour, step) + err := d.DownloadAndBlit(ctx, ds, url, hourIdx, dataset.LevelSetA) + if err != nil { + return fmt.Errorf("step %d pgrb2: %w", step, err) + } + completed++ + d.log.Debug("step complete", + zap.Int("step", step), + zap.String("set", "pgrb2"), + zap.Int("progress", completed), + zap.Int("total", totalSteps)) + return nil + }) + + // Download pgrb2b (level set B) + sem <- struct{}{} + g.Go(func() error { + defer func() { <-sem }() + url := dataset.GribURLB(date, runHour, step) + err := d.DownloadAndBlit(ctx, ds, url, hourIdx, dataset.LevelSetB) + if err != nil { + return fmt.Errorf("step %d pgrb2b: %w", step, err) + } + completed++ + d.log.Debug("step complete", + zap.Int("step", step), + zap.String("set", "pgrb2b"), + zap.Int("progress", completed), + zap.Int("total", totalSteps)) + return nil + }) + } + + if err := g.Wait(); err != nil { + os.Remove(tempPath) + return "", err + } + + // Flush to disk + if err := ds.Flush(); err != nil { + os.Remove(tempPath) + return "", fmt.Errorf("flush dataset: %w", err) + } + + // Close before rename + ds.Close() + + // Atomic rename + if err := os.Rename(tempPath, finalPath); err != nil { + os.Remove(tempPath) + return "", fmt.Errorf("rename dataset: %w", err) + } + + d.log.Info("dataset download complete", zap.String("path", finalPath)) + return finalPath, nil +} + +// DownloadAndBlit downloads needed GRIB fields from a URL and writes them into the dataset. +func (d *Downloader) DownloadAndBlit(ctx context.Context, ds *dataset.File, baseURL string, hourIdx int, levelSet dataset.LevelSet) error { + // 1. Download .idx + idxURL := baseURL + ".idx" + idxBody, err := d.httpGet(ctx, idxURL) + if err != nil { + return fmt.Errorf("download idx: %w", err) + } + + // 2. Parse and filter + entries := ParseIdx(idxBody) + filtered := FilterIdx(entries, neededVariables) + + // Further filter to only levels in this level set + var relevant []IdxEntry + for _, e := range filtered { + ls, ok := dataset.PressureLevelSet(e.LevelMB) + if ok && ls == levelSet { + relevant = append(relevant, e) + } + } + + if len(relevant) == 0 { + d.log.Warn("no relevant entries found in idx", + zap.String("url", idxURL), + zap.Int("total_entries", len(entries)), + zap.Int("filtered", len(filtered))) + return nil + } + + // 3. Download byte ranges and write to temp file + ranges := EntriesToRanges(relevant) + tmpFile, err := d.downloadRangesToTempFile(ctx, baseURL, ranges) + if err != nil { + return fmt.Errorf("download ranges: %w", err) + } + defer os.Remove(tmpFile) + + // 4. Read GRIB messages from temp file + f, err := os.Open(tmpFile) + if err != nil { + return fmt.Errorf("open temp grib: %w", err) + } + + messages, err := griblib.ReadMessages(f) + f.Close() + if err != nil { + return fmt.Errorf("read grib messages: %w", err) + } + + // 5. Decode and blit each message into the dataset + for _, msg := range messages { + if msg.Section4.ProductDefinitionTemplateNumber != 0 { + continue + } + + product := msg.Section4.ProductDefinitionTemplate + + varIdx := dataset.VariableIndex(int(product.ParameterCategory), int(product.ParameterNumber)) + if varIdx < 0 { + continue + } + + if product.FirstSurface.Type != 100 { // isobaric surface + continue + } + + pressurePa := float64(product.FirstSurface.Value) + pressureMB := int(math.Round(pressurePa / 100.0)) + levelIdx := dataset.PressureIndex(pressureMB) + if levelIdx < 0 { + continue + } + + data := msg.Data() + if err := ds.BlitGribData(hourIdx, levelIdx, varIdx, data); err != nil { + d.log.Warn("blit failed", + zap.Int("var", varIdx), + zap.Int("level_mb", pressureMB), + zap.Error(err)) + continue + } + } + + return nil +} + +// downloadRangesToTempFile downloads multiple byte ranges from a URL, +// concatenating them into a single temp file (valid concatenated GRIB messages). +func (d *Downloader) downloadRangesToTempFile(ctx context.Context, baseURL string, ranges []ByteRange) (string, error) { + tmpFile, err := os.CreateTemp(d.cfg.DataDir, "grib-*.tmp") + if err != nil { + return "", fmt.Errorf("create temp file: %w", err) + } + tmpPath := tmpFile.Name() + + for _, r := range ranges { + data, err := d.httpGetRange(ctx, baseURL, r.Start, r.End) + if err != nil { + tmpFile.Close() + os.Remove(tmpPath) + return "", fmt.Errorf("download range %d-%d: %w", r.Start, r.End, err) + } + if _, err := tmpFile.Write(data); err != nil { + tmpFile.Close() + os.Remove(tmpPath) + return "", fmt.Errorf("write temp: %w", err) + } + } + + if err := tmpFile.Close(); err != nil { + os.Remove(tmpPath) + return "", err + } + + return tmpPath, nil +} + +// httpGet downloads a URL and returns the body bytes. +func (d *Downloader) httpGet(ctx context.Context, url string) ([]byte, error) { + var lastErr error + for attempt := 0; attempt < 3; attempt++ { + if attempt > 0 { + select { + case <-time.After(time.Duration(attempt*2) * time.Second): + case <-ctx.Done(): + return nil, ctx.Err() + } + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + resp, err := d.client.Do(req) + if err != nil { + lastErr = err + continue + } + + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + lastErr = fmt.Errorf("HTTP %d for %s", resp.StatusCode, url) + continue + } + if err != nil { + lastErr = err + continue + } + + return body, nil + } + + return nil, fmt.Errorf("after 3 attempts: %w", lastErr) +} + +// httpGetRange downloads a byte range from a URL. +func (d *Downloader) httpGetRange(ctx context.Context, url string, start, end int64) ([]byte, error) { + var lastErr error + for attempt := 0; attempt < 3; attempt++ { + if attempt > 0 { + select { + case <-time.After(time.Duration(attempt*2) * time.Second): + case <-ctx.Done(): + return nil, ctx.Err() + } + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) + + resp, err := d.client.Do(req) + if err != nil { + lastErr = err + continue + } + + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + + if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK { + lastErr = fmt.Errorf("HTTP %d for range %d-%d of %s", resp.StatusCode, start, end, url) + continue + } + if err != nil { + lastErr = err + continue + } + + return body, nil + } + + return nil, fmt.Errorf("after 3 attempts: %w", lastErr) +} diff --git a/internal/downloader/idx.go b/internal/downloader/idx.go new file mode 100644 index 0000000..2e09bc4 --- /dev/null +++ b/internal/downloader/idx.go @@ -0,0 +1,157 @@ +package downloader + +import ( + "fmt" + "strconv" + "strings" +) + +// IdxEntry represents a single parsed line from a GRIB .idx file. +// Example line: "15:1207405:d=2024010100:HGT:1000 mb:0 hour fcst:" +type IdxEntry struct { + Index int + Offset int64 + Variable string // "HGT", "UGRD", "VGRD", etc. + LevelMB int // pressure level in mb (0 if not a pressure level) + Hour int // forecast hour + EndOffset int64 // byte after this message (from next entry's offset, or -1 if last) +} + +// Length returns the byte length of this GRIB message, or -1 if unknown. +func (e *IdxEntry) Length() int64 { + if e.EndOffset <= 0 { + return -1 + } + return e.EndOffset - e.Offset +} + +// ParseIdx parses a .idx file body and returns all entries. +// Lines that can't be parsed are silently skipped. +func ParseIdx(body []byte) []IdxEntry { + lines := strings.Split(string(body), "\n") + var entries []IdxEntry + + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + + parts := strings.Split(line, ":") + if len(parts) < 7 { + continue + } + + idx, err := strconv.Atoi(parts[0]) + if err != nil { + continue + } + + offset, err := strconv.ParseInt(parts[1], 10, 64) + if err != nil { + continue + } + + variable := parts[3] + levelStr := parts[4] + hourStr := parts[5] + + levelMB := parseLevelMB(levelStr) + hour := parseHour(hourStr) + + entries = append(entries, IdxEntry{ + Index: idx, + Offset: offset, + Variable: variable, + LevelMB: levelMB, + Hour: hour, + EndOffset: -1, // filled in below + }) + } + + // Fill in EndOffset from the next entry's Offset. + for i := 0; i < len(entries)-1; i++ { + entries[i].EndOffset = entries[i+1].Offset + } + + return entries +} + +// FilterIdx returns entries matching the given variables at pressure levels. +// Only entries with a recognized pressure level (levelMB > 0) are returned. +func FilterIdx(entries []IdxEntry, variables map[string]bool) []IdxEntry { + var filtered []IdxEntry + for _, e := range entries { + if !variables[e.Variable] { + continue + } + if e.LevelMB <= 0 { + continue + } + // Must have a known length (not the last entry) or be handled specially + if e.Length() <= 0 { + continue + } + filtered = append(filtered, e) + } + return filtered +} + +// parseLevelMB parses a level string like "1000 mb" and returns the pressure in mb. +// Returns 0 if not a pressure level. +func parseLevelMB(s string) int { + s = strings.TrimSpace(s) + if !strings.HasSuffix(s, " mb") { + return 0 + } + numStr := strings.TrimSuffix(s, " mb") + n, err := strconv.Atoi(numStr) + if err != nil { + return 0 + } + return n +} + +// parseHour parses a forecast hour string like "0 hour fcst" or "anl". +// Returns -1 if it can't be parsed. +func parseHour(s string) int { + s = strings.TrimSpace(s) + if s == "anl" { + return 0 + } + s = strings.TrimSuffix(s, " hour fcst") + n, err := strconv.Atoi(s) + if err != nil { + return -1 + } + return n +} + +// GroupByRange groups idx entries into byte ranges suitable for HTTP Range downloads. +// Each range covers one contiguous GRIB message. +type ByteRange struct { + Start int64 + End int64 // inclusive + Entry IdxEntry +} + +// EntriesToRanges converts filtered idx entries to byte ranges. +func EntriesToRanges(entries []IdxEntry) []ByteRange { + ranges := make([]ByteRange, 0, len(entries)) + for _, e := range entries { + if e.Length() <= 0 { + continue + } + ranges = append(ranges, ByteRange{ + Start: e.Offset, + End: e.EndOffset - 1, // inclusive + Entry: e, + }) + } + return ranges +} + +// FormatRange returns an HTTP Range header value for a byte range. +func (r ByteRange) FormatRange() string { + return fmt.Sprintf("bytes=%d-%d", r.Start, r.End) +} diff --git a/internal/downloader/idx_test.go b/internal/downloader/idx_test.go new file mode 100644 index 0000000..71a7224 --- /dev/null +++ b/internal/downloader/idx_test.go @@ -0,0 +1,110 @@ +package downloader + +import ( + "testing" +) + +const sampleIdx = `1:0:d=2024010100:HGT:1000 mb:0 hour fcst: +2:289012:d=2024010100:HGT:975 mb:0 hour fcst: +3:541876:d=2024010100:TMP:1000 mb:0 hour fcst: +4:789012:d=2024010100:UGRD:1000 mb:0 hour fcst: +5:1045678:d=2024010100:VGRD:1000 mb:0 hour fcst: +6:1298765:d=2024010100:UGRD:975 mb:0 hour fcst: +7:1567890:d=2024010100:UGRD:2 m above ground:0 hour fcst: +8:1812345:d=2024010100:VGRD:975 mb:0 hour fcst: +9:2098765:d=2024010100:HGT:500 mb:3 hour fcst: +` + +func TestParseIdx(t *testing.T) { + entries := ParseIdx([]byte(sampleIdx)) + if len(entries) != 9 { + t.Fatalf("expected 9 entries, got %d", len(entries)) + } + + // Check first entry + e := entries[0] + if e.Index != 1 || e.Offset != 0 || e.Variable != "HGT" || e.LevelMB != 1000 || e.Hour != 0 { + t.Errorf("entry 0: got %+v", e) + } + if e.EndOffset != 289012 { + t.Errorf("entry 0 EndOffset: got %d, want 289012", e.EndOffset) + } + + // Check "2 m above ground" is not a pressure level + e = entries[6] // UGRD at "2 m above ground" + if e.LevelMB != 0 { + t.Errorf("non-pressure level should have LevelMB=0, got %d", e.LevelMB) + } + + // Last entry should have EndOffset = -1 + last := entries[len(entries)-1] + if last.EndOffset != -1 { + t.Errorf("last entry EndOffset: got %d, want -1", last.EndOffset) + } +} + +func TestFilterIdx(t *testing.T) { + entries := ParseIdx([]byte(sampleIdx)) + filtered := FilterIdx(entries, neededVariables) + + // Should include HGT/UGRD/VGRD at pressure levels, exclude TMP and "above ground" + // And exclude last entry (no EndOffset) + for _, e := range filtered { + if !neededVariables[e.Variable] { + t.Errorf("unexpected variable %s", e.Variable) + } + if e.LevelMB <= 0 { + t.Errorf("non-pressure level included: %+v", e) + } + if e.Length() <= 0 { + t.Errorf("entry with unknown length included: %+v", e) + } + } + + // Count expected: HGT@1000, HGT@975, UGRD@1000, VGRD@1000, UGRD@975, VGRD@975 = 6 + // But HGT@500 at 3hr fcst is the last entry (no EndOffset), so excluded + if len(filtered) != 6 { + t.Errorf("expected 6 filtered entries, got %d", len(filtered)) + for _, e := range filtered { + t.Logf(" %s %d mb (offset %d, len %d)", e.Variable, e.LevelMB, e.Offset, e.Length()) + } + } +} + +func TestParseLevelMB(t *testing.T) { + tests := []struct { + input string + want int + }{ + {"1000 mb", 1000}, + {"975 mb", 975}, + {"1 mb", 1}, + {"2 m above ground", 0}, + {"surface", 0}, + {"tropopause", 0}, + } + for _, tt := range tests { + got := parseLevelMB(tt.input) + if got != tt.want { + t.Errorf("parseLevelMB(%q) = %d, want %d", tt.input, got, tt.want) + } + } +} + +func TestParseHour(t *testing.T) { + tests := []struct { + input string + want int + }{ + {"0 hour fcst", 0}, + {"3 hour fcst", 3}, + {"192 hour fcst", 192}, + {"anl", 0}, + } + for _, tt := range tests { + got := parseHour(tt.input) + if got != tt.want { + t.Errorf("parseHour(%q) = %d, want %d", tt.input, got, tt.want) + } + } +} diff --git a/internal/elevation/elevation.go b/internal/elevation/elevation.go new file mode 100644 index 0000000..9fde295 --- /dev/null +++ b/internal/elevation/elevation.go @@ -0,0 +1,113 @@ +package elevation + +import ( + "encoding/binary" + "fmt" + "math" + "os" + + mmap "github.com/edsrzf/mmap-go" +) + +// Dataset provides global elevation lookup, compatible with ruaumoko. +// Binary format: int16 little-endian elevation values in metres, row-major (lat, lon). +// Latitude axis: -90 to +90 (south to north), Longitude axis: 0 to 360 (wraps). +// Resolution: 120 cells per degree (30 arc-seconds). +const ( + CellsPerDegree = 120 + NumLats = 180*CellsPerDegree + 1 // 21601 + NumLons = 360 * CellsPerDegree // 43200 + DataSize = NumLats * NumLons * 2 // 1,866,326,400 bytes (~1.74 GiB) +) + +// Dataset is a memory-mapped global elevation grid. +type Dataset struct { + mm mmap.MMap + file *os.File +} + +// Open opens an existing elevation dataset file. +func Open(path string) (*Dataset, error) { + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("open elevation: %w", err) + } + + info, err := f.Stat() + if err != nil { + f.Close() + return nil, fmt.Errorf("stat elevation: %w", err) + } + if info.Size() != DataSize { + f.Close() + return nil, fmt.Errorf("elevation dataset should be %d bytes (was %d)", DataSize, info.Size()) + } + + mm, err := mmap.Map(f, mmap.RDONLY, 0) + if err != nil { + f.Close() + return nil, fmt.Errorf("mmap elevation: %w", err) + } + + return &Dataset{mm: mm, file: f}, nil +} + +// getCell reads the int16 elevation at grid indices (latIdx, lngIdx). +func (d *Dataset) getCell(latIdx, lngIdx int) int16 { + // Clamp latitude + if latIdx < 0 { + latIdx = 0 + } + if latIdx >= NumLats { + latIdx = NumLats - 1 + } + // Wrap longitude + lngIdx = lngIdx % NumLons + if lngIdx < 0 { + lngIdx += NumLons + } + + off := (latIdx*NumLons + lngIdx) * 2 + return int16(binary.LittleEndian.Uint16(d.mm[off : off+2])) +} + +// Get returns the interpolated elevation in metres at the given coordinates. +// lat: -90 to +90, lng: 0 to 360 (or -180 to 180, will be normalised). +func (d *Dataset) Get(lat, lng float64) float64 { + // Normalise longitude to [0, 360) + if lng < 0 { + lng += 360 + } + + // Convert to cell coordinates + latCell := (lat + 90.0) * CellsPerDegree + lngCell := lng * CellsPerDegree + + lat0 := int(math.Floor(latCell)) + lng0 := int(math.Floor(lngCell)) + latFrac := latCell - float64(lat0) + lngFrac := lngCell - float64(lng0) + + // Bilinear interpolation + v00 := float64(d.getCell(lat0, lng0)) + v10 := float64(d.getCell(lat0+1, lng0)) + v01 := float64(d.getCell(lat0, lng0+1)) + v11 := float64(d.getCell(lat0+1, lng0+1)) + + return (1-latFrac)*((1-lngFrac)*v00+lngFrac*v01) + + latFrac*((1-lngFrac)*v10+lngFrac*v11) +} + +// Close unmaps and closes the dataset. +func (d *Dataset) Close() error { + if d.mm != nil { + d.mm.Unmap() + d.mm = nil + } + if d.file != nil { + err := d.file.Close() + d.file = nil + return err + } + return nil +} diff --git a/internal/prediction/interpolate.go b/internal/prediction/interpolate.go new file mode 100644 index 0000000..5ef0d14 --- /dev/null +++ b/internal/prediction/interpolate.go @@ -0,0 +1,153 @@ +package prediction + +import ( + "fmt" + + "predictor-refactored/internal/dataset" +) + +// Exact port of the reference interpolation logic (interpolate.pyx). +// 4D interpolation: time, latitude, longitude, altitude (via geopotential height). + +// lerp1 holds an index and interpolation weight for one axis. +type lerp1 struct { + index int + lerp float64 +} + +// lerp3 holds indices and a combined weight for the (hour, lat, lon) axes. +type lerp3 struct { + hour, lat, lng int + lerp float64 +} + +// RangeError indicates a coordinate is outside the dataset bounds. +type RangeError struct { + Variable string + Value float64 +} + +func (e *RangeError) Error() string { + return fmt.Sprintf("%s=%f out of range", e.Variable, e.Value) +} + +// pick computes interpolation indices and weights for a single axis. +// left: axis start, step: axis spacing, n: number of points, value: query value. +// Returns two lerp1 values (lower and upper bracket). +func pick(left, step float64, n int, value float64, variableName string) ([2]lerp1, error) { + a := (value - left) / step + b := int(a) // truncation toward zero, same as Cython cast + if b < 0 || b >= n-1 { + return [2]lerp1{}, &RangeError{Variable: variableName, Value: value} + } + l := a - float64(b) + return [2]lerp1{ + {index: b, lerp: 1 - l}, + {index: b + 1, lerp: l}, + }, nil +} + +// pick3 computes 8 trilinear interpolation weights for (hour, lat, lng). +func pick3(hour, lat, lng float64) ([8]lerp3, error) { + lhour, err := pick(0, 3, 65, hour, "hour") + if err != nil { + return [8]lerp3{}, err + } + llat, err := pick(-90, 0.5, 361, lat, "lat") + if err != nil { + return [8]lerp3{}, err + } + // Longitude wraps: tell pick the axis is one larger, then wrap index 720 → 0 + llng, err := pick(0, 0.5, 720+1, lng, "lng") + if err != nil { + return [8]lerp3{}, err + } + if llng[1].index == 720 { + llng[1].index = 0 + } + + var out [8]lerp3 + i := 0 + for _, a := range lhour { + for _, b := range llat { + for _, c := range llng { + out[i] = lerp3{ + hour: a.index, + lat: b.index, + lng: c.index, + lerp: a.lerp * b.lerp * c.lerp, + } + i++ + } + } + } + return out, nil +} + +// interp3 performs 8-point weighted interpolation at a given variable and pressure level. +func interp3(ds *dataset.File, lerps [8]lerp3, variable, level int) float64 { + var r float64 + for i := 0; i < 8; i++ { + v := ds.Val(lerps[i].hour, level, variable, lerps[i].lat, lerps[i].lng) + r += float64(v) * lerps[i].lerp + } + return r +} + +// search finds the largest pressure level index where interpolated geopotential +// height is less than the target altitude. Searches levels 0..45 (excludes topmost). +func search(ds *dataset.File, lerps [8]lerp3, target float64) int { + lower, upper := 0, 45 + + for lower < upper { + mid := (lower + upper + 1) / 2 + test := interp3(ds, lerps, dataset.VarHeight, mid) + if target <= test { + upper = mid - 1 + } else { + lower = mid + } + } + + return lower +} + +// interp4 performs altitude-interpolated wind lookup using two bracketing levels. +func interp4(ds *dataset.File, lerps [8]lerp3, altLerp lerp1, variable int) float64 { + lower := interp3(ds, lerps, variable, altLerp.index) + upper := interp3(ds, lerps, variable, altLerp.index+1) + return lower*altLerp.lerp + upper*(1-altLerp.lerp) +} + +// GetWind returns interpolated (u, v) wind components for the given position. +// hour: fractional hours since dataset start. +// lat: latitude in degrees (-90 to +90). +// lng: longitude in degrees (0 to 360). +// alt: altitude in metres above sea level. +func GetWind(ds *dataset.File, warnings *Warnings, hour, lat, lng, alt float64) (u, v float64, err error) { + lerps, err := pick3(hour, lat, lng) + if err != nil { + return 0, 0, err + } + + altidx := search(ds, lerps, alt) + lower := interp3(ds, lerps, dataset.VarHeight, altidx) + upper := interp3(ds, lerps, dataset.VarHeight, altidx+1) + + var altLerp float64 + if lower != upper { + altLerp = (upper - alt) / (upper - lower) + } else { + altLerp = 0.5 + } + + if altLerp < 0 { + warnings.AltitudeTooHigh.Add(1) + } + + alt1 := lerp1{index: altidx, lerp: altLerp} + u = interp4(ds, lerps, alt1, dataset.VarWindU) + v = interp4(ds, lerps, alt1, dataset.VarWindV) + + return u, v, nil +} diff --git a/internal/prediction/models.go b/internal/prediction/models.go new file mode 100644 index 0000000..8048c46 --- /dev/null +++ b/internal/prediction/models.go @@ -0,0 +1,188 @@ +package prediction + +import ( + "math" + "time" + + "predictor-refactored/internal/dataset" + "predictor-refactored/internal/elevation" +) + +// Exact port of the reference flight models (models.py). + +const ( + pi180 = math.Pi / 180.0 + _180pi = 180.0 / math.Pi +) + +// --- Up/Down Models --- + +// ConstantAscent returns a model with constant vertical velocity (m/s). +func ConstantAscent(ascentRate float64) Model { + return func(t, lat, lng, alt float64) (dlat, dlng, dalt float64) { + return 0, 0, ascentRate + } +} + +// DragDescent returns a descent-under-parachute model. +// seaLevelDescentRate is the descent rate at sea level (m/s, positive value). +// Uses the NASA atmosphere model for density at altitude. +func DragDescent(seaLevelDescentRate float64) Model { + dragCoefficient := seaLevelDescentRate * 1.1045 + + return func(t, lat, lng, alt float64) (dlat, dlng, dalt float64) { + return 0, 0, -dragCoefficient / math.Sqrt(nasaDensity(alt)) + } +} + +// nasaDensity computes air density using the NASA atmosphere model. +// Reference: http://www.grc.nasa.gov/WWW/K-12/airplane/atmosmet.html +func nasaDensity(alt float64) float64 { + var temp, pressure float64 + + switch { + case alt > 25000: + temp = -131.21 + 0.00299*alt + pressure = 2.488 * math.Pow((temp+273.1)/216.6, -11.388) + case alt > 11000: + temp = -56.46 + pressure = 22.65 * math.Exp(1.73-0.000157*alt) + default: + temp = 15.04 - 0.00649*alt + pressure = 101.29 * math.Pow((temp+273.1)/288.08, 5.256) + } + + return pressure / (0.2869 * (temp + 273.1)) +} + +// --- Sideways Models --- + +// WindVelocity returns a model that gives lateral movement at the wind velocity. +// ds is the wind dataset, dsEpoch is the dataset start time as UNIX timestamp. +func WindVelocity(ds *dataset.File, dsEpoch float64, warnings *Warnings) Model { + return func(t, lat, lng, alt float64) (dlat, dlng, dalt float64) { + tHours := (t - dsEpoch) / 3600.0 + u, v, err := GetWind(ds, warnings, tHours, lat, lng, alt) + if err != nil { + return 0, 0, 0 + } + + R := 6371009.0 + alt + dlat = _180pi * v / R + dlng = _180pi * u / (R * math.Cos(lat*pi180)) + return dlat, dlng, 0 + } +} + +// --- Model Combinations --- + +// LinearModel returns a model that sums all component models. +func LinearModel(models ...Model) Model { + return func(t, lat, lng, alt float64) (dlat, dlng, dalt float64) { + for _, m := range models { + d1, d2, d3 := m(t, lat, lng, alt) + dlat += d1 + dlng += d2 + dalt += d3 + } + return + } +} + +// --- Termination Criteria --- + +// BurstTermination returns a terminator that fires when altitude >= burstAltitude. +func BurstTermination(burstAltitude float64) Terminator { + return func(t, lat, lng, alt float64) bool { + return alt >= burstAltitude + } +} + +// SeaLevelTermination fires when altitude <= 0. +func SeaLevelTermination(t, lat, lng, alt float64) bool { + return alt <= 0 +} + +// TimeTermination returns a terminator that fires when t > maxTime. +func TimeTermination(maxTime float64) Terminator { + return func(t, lat, lng, alt float64) bool { + return t > maxTime + } +} + +// ElevationTermination returns a terminator that fires when alt < ground level. +// Uses ruaumoko-compatible elevation data. Longitude is normalised internally. +func ElevationTermination(elev *elevation.Dataset) Terminator { + return func(t, lat, lng, alt float64) bool { + return elev.Get(lat, lng) > alt + } +} + +// --- Pre-Defined Profiles --- + +// Stage pairs a model with its termination criterion. +type Stage struct { + Model Model + Terminator Terminator +} + +// StandardProfile creates the chain for a standard high-altitude balloon flight: +// ascent at constant rate → burst → descent under parachute. +// If elev is non-nil, descent terminates at ground level; otherwise at sea level. +func StandardProfile(ascentRate, burstAltitude, descentRate float64, + ds *dataset.File, dsEpoch float64, warnings *Warnings, + elev *elevation.Dataset) []Stage { + + wind := WindVelocity(ds, dsEpoch, warnings) + + modelUp := LinearModel(ConstantAscent(ascentRate), wind) + termUp := BurstTermination(burstAltitude) + + modelDown := LinearModel(DragDescent(descentRate), wind) + var termDown Terminator + if elev != nil { + termDown = ElevationTermination(elev) + } else { + termDown = Terminator(SeaLevelTermination) + } + + return []Stage{ + {Model: modelUp, Terminator: termUp}, + {Model: modelDown, Terminator: termDown}, + } +} + +// FloatProfile creates the chain for a floating balloon flight: +// ascent to float altitude → float until stop time. +func FloatProfile(ascentRate, floatAltitude float64, stopTime time.Time, + ds *dataset.File, dsEpoch float64, warnings *Warnings) []Stage { + + wind := WindVelocity(ds, dsEpoch, warnings) + + modelUp := LinearModel(ConstantAscent(ascentRate), wind) + termUp := BurstTermination(floatAltitude) + + modelFloat := wind + termFloat := TimeTermination(float64(stopTime.Unix())) + + return []Stage{ + {Model: modelUp, Terminator: termUp}, + {Model: modelFloat, Terminator: termFloat}, + } +} + +// RunPrediction runs a prediction with the given profile stages. +// launchTime is a UNIX timestamp. +func RunPrediction(launchTime float64, lat, lng, alt float64, stages []Stage) []StageResult { + chain := make([]struct { + Model Model + Terminator Terminator + }, len(stages)) + + for i, s := range stages { + chain[i].Model = s.Model + chain[i].Terminator = s.Terminator + } + + return Solve(launchTime, lat, lng, alt, chain) +} diff --git a/internal/prediction/solver.go b/internal/prediction/solver.go new file mode 100644 index 0000000..62e29a7 --- /dev/null +++ b/internal/prediction/solver.go @@ -0,0 +1,180 @@ +package prediction + +import "math" + +// Exact port of the reference RK4 solver (solver.pyx). +// Integrates balloon state using RK4 with dt=60 seconds. +// Termination uses binary search refinement (tolerance 0.01). + +// Vec holds the balloon state: latitude, longitude, altitude. +type Vec struct { + Lat float64 + Lng float64 + Alt float64 +} + +// Model is a function that returns (dlat/dt, dlng/dt, dalt/dt) given state. +// t is UNIX timestamp, lat/lng in degrees, alt in metres. +type Model func(t float64, lat, lng, alt float64) (dlat, dlng, dalt float64) + +// Terminator returns true when integration should stop. +type Terminator func(t float64, lat, lng, alt float64) bool + +// StageResult holds the trajectory points for one flight stage. +type StageResult struct { + Points []TrajectoryPoint +} + +// TrajectoryPoint is a single point in a trajectory (used by solver). +type TrajectoryPoint struct { + T float64 // UNIX timestamp + Lat float64 + Lng float64 + Alt float64 +} + +// pymod returns a % b with Python semantics (always non-negative when b > 0). +func pymod(a, b float64) float64 { + r := math.Mod(a, b) + if r < 0 { + r += b + } + return r +} + +// vecadd returns a + k*b, with lng wrapped to [0, 360). +func vecadd(a Vec, k float64, b Vec) Vec { + return Vec{ + Lat: a.Lat + k*b.Lat, + Lng: pymod(a.Lng+k*b.Lng, 360.0), + Alt: a.Alt + k*b.Alt, + } +} + +// scalarLerp returns (1-l)*a + l*b. +func scalarLerp(a, b, l float64) float64 { + return (1-l)*a + l*b +} + +// lngLerp interpolates longitude handling the 0/360 wrap-around. +func lngLerp(a, b, l float64) float64 { + l2 := 1 - l + + if a > b { + a, b = b, a + l, l2 = l2, l + } + + // distance round one way: b - a + // distance around other: (a + 360) - b + if b-a < 180.0 { + return l2*a + l*b + } + return pymod(l2*(a+360)+l*b, 360.0) +} + +// vecLerp returns (1-l)*a + l*b with proper longitude wrapping. +func vecLerp(a, b Vec, l float64) Vec { + return Vec{ + Lat: scalarLerp(a.Lat, b.Lat, l), + Lng: lngLerp(a.Lng, b.Lng, l), + Alt: scalarLerp(a.Alt, b.Alt, l), + } +} + +// rk4 integrates from initial conditions using RK4. +// dt=60.0 seconds, terminationTolerance=0.01. +func rk4(t float64, lat, lng, alt float64, model Model, terminator Terminator) []TrajectoryPoint { + const dt = 60.0 + const terminationTolerance = 0.01 + + y := Vec{Lat: lat, Lng: lng, Alt: alt} + result := []TrajectoryPoint{{T: t, Lat: y.Lat, Lng: y.Lng, Alt: y.Alt}} + + for { + // Evaluate model at 4 points (standard RK4) + k1lat, k1lng, k1alt := model(t, y.Lat, y.Lng, y.Alt) + k1 := Vec{Lat: k1lat, Lng: k1lng, Alt: k1alt} + + mid1 := vecadd(y, dt/2, k1) + k2lat, k2lng, k2alt := model(t+dt/2, mid1.Lat, mid1.Lng, mid1.Alt) + k2 := Vec{Lat: k2lat, Lng: k2lng, Alt: k2alt} + + mid2 := vecadd(y, dt/2, k2) + k3lat, k3lng, k3alt := model(t+dt/2, mid2.Lat, mid2.Lng, mid2.Alt) + k3 := Vec{Lat: k3lat, Lng: k3lng, Alt: k3alt} + + end := vecadd(y, dt, k3) + k4lat, k4lng, k4alt := model(t+dt, end.Lat, end.Lng, end.Alt) + k4 := Vec{Lat: k4lat, Lng: k4lng, Alt: k4alt} + + // y2 = y + dt/6*k1 + dt/3*k2 + dt/3*k3 + dt/6*k4 + y2 := y + y2 = vecadd(y2, dt/6, k1) + y2 = vecadd(y2, dt/3, k2) + y2 = vecadd(y2, dt/3, k3) + y2 = vecadd(y2, dt/6, k4) + + t2 := t + dt + + if terminator(t2, y2.Lat, y2.Lng, y2.Alt) { + // Binary search to refine the termination point. + // Find l in [0, 1] such that (t3, y3) = lerp((t, y), (t2, y2), l) + // is near where the terminator fires. + left := 0.0 + right := 1.0 + + var t3 float64 + var y3 Vec + t3 = t2 + y3 = y2 + + for right-left > terminationTolerance { + mid := (left + right) / 2 + t3 = scalarLerp(t, t2, mid) + y3 = vecLerp(y, y2, mid) + + if terminator(t3, y3.Lat, y3.Lng, y3.Alt) { + right = mid + } else { + left = mid + } + } + + result = append(result, TrajectoryPoint{T: t3, Lat: y3.Lat, Lng: y3.Lng, Alt: y3.Alt}) + break + } + + // Update current state + t = t2 + y = y2 + result = append(result, TrajectoryPoint{T: t, Lat: y.Lat, Lng: y.Lng, Alt: y.Alt}) + } + + return result +} + +// Solve runs through a chain of (model, terminator) stages. +// Returns one StageResult per stage. +func Solve(t, lat, lng, alt float64, chain []struct { + Model Model + Terminator Terminator +}) []StageResult { + var results []StageResult + + for _, stage := range chain { + points := rk4(t, lat, lng, alt, stage.Model, stage.Terminator) + results = append(results, StageResult{Points: points}) + + // Next stage starts where this one ended + if len(points) > 0 { + last := points[len(points)-1] + t = last.T + lat = last.Lat + lng = last.Lng + alt = last.Alt + } + } + + return results +} diff --git a/internal/prediction/warnings.go b/internal/prediction/warnings.go new file mode 100644 index 0000000..1beeb1a --- /dev/null +++ b/internal/prediction/warnings.go @@ -0,0 +1,21 @@ +package prediction + +import "sync/atomic" + +// Warnings tracks warning conditions during a prediction run. +type Warnings struct { + AltitudeTooHigh atomic.Int64 +} + +// ToMap returns warnings as a map suitable for JSON serialization. +// Only includes warnings that have fired. +func (w *Warnings) ToMap() map[string]any { + result := make(map[string]any) + if n := w.AltitudeTooHigh.Load(); n > 0 { + result["altitude_too_high"] = map[string]any{ + "count": n, + "description": "The altitude went too high, above the max forecast wind. Wind data will be unreliable", + } + } + return result +} diff --git a/internal/service/service.go b/internal/service/service.go new file mode 100644 index 0000000..4ccd1d4 --- /dev/null +++ b/internal/service/service.go @@ -0,0 +1,245 @@ +package service + +import ( + "context" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "predictor-refactored/internal/dataset" + "predictor-refactored/internal/downloader" + "predictor-refactored/internal/elevation" + + "go.uber.org/zap" +) + +// Service orchestrates the dataset lifecycle and provides prediction capabilities. +type Service struct { + mu sync.RWMutex + ds *dataset.File + elev *elevation.Dataset + cfg *downloader.Config + dl *downloader.Downloader + log *zap.Logger + updating sync.Mutex // prevents concurrent downloads +} + +// New creates a new Service. +func New(cfg *downloader.Config, log *zap.Logger) *Service { + return &Service{ + cfg: cfg, + dl: downloader.NewDownloader(cfg, log), + log: log, + } +} + +// LoadElevation loads the ruaumoko-compatible elevation dataset from path. +// If the file doesn't exist, elevation termination is disabled (falls back to sea level). +func (s *Service) LoadElevation(path string) { + ds, err := elevation.Open(path) + if err != nil { + s.log.Warn("elevation dataset not available, using sea-level termination", + zap.String("path", path), zap.Error(err)) + return + } + s.elev = ds + s.log.Info("elevation dataset loaded", zap.String("path", path)) +} + +// Elevation returns the elevation dataset (may be nil). +func (s *Service) Elevation() *elevation.Dataset { + return s.elev +} + +// Ready returns true if the service has a loaded dataset. +func (s *Service) Ready() bool { + s.mu.RLock() + defer s.mu.RUnlock() + return s.ds != nil +} + +// DatasetTime returns the forecast time of the currently loaded dataset. +func (s *Service) DatasetTime() (time.Time, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + if s.ds == nil { + return time.Time{}, false + } + return s.ds.DSTime, true +} + +// Dataset returns the current dataset for reading. +func (s *Service) Dataset() *dataset.File { + s.mu.RLock() + defer s.mu.RUnlock() + return s.ds +} + +// Update checks for and downloads new forecast data if needed. +func (s *Service) Update(ctx context.Context) error { + if !s.updating.TryLock() { + s.log.Info("update already in progress, skipping") + return nil + } + defer s.updating.Unlock() + + // Check if current dataset is still fresh + if dsTime, ok := s.DatasetTime(); ok { + if time.Since(dsTime) < s.cfg.DatasetTTL { + s.log.Info("dataset still fresh, skipping update", + zap.Time("dataset_time", dsTime), + zap.Duration("age", time.Since(dsTime))) + return nil + } + } + + // Try loading an existing dataset from disk first + if err := s.loadExistingDataset(); err == nil { + return nil + } + + // Find latest available model run + run, err := s.dl.FindLatestRun(ctx) + if err != nil { + return err + } + + // Download and assemble + path, err := s.dl.Download(ctx, run) + if err != nil { + return err + } + + // Open the new dataset + ds, err := dataset.Open(path, run) + if err != nil { + return err + } + + // Swap in the new dataset + s.setDataset(ds) + s.log.Info("dataset loaded", zap.Time("run", run), zap.String("path", path)) + + // Clean old datasets + s.cleanOldDatasets(path) + + return nil +} + +// loadExistingDataset tries to find and load an existing dataset from the data directory. +func (s *Service) loadExistingDataset() error { + entries, err := os.ReadDir(s.cfg.DataDir) + if err != nil { + return err + } + + // Collect valid dataset files (name is YYYYMMDDHH, no extension, correct size) + type candidate struct { + name string + path string + run time.Time + } + var candidates []candidate + + for _, e := range entries { + if e.IsDir() || strings.Contains(e.Name(), ".") { + continue + } + if len(e.Name()) != 10 { + continue + } + + run, err := time.Parse("2006010215", e.Name()) + if err != nil { + continue + } + + path := filepath.Join(s.cfg.DataDir, e.Name()) + info, err := os.Stat(path) + if err != nil || info.Size() != dataset.DatasetSize { + continue + } + + if time.Since(run) > s.cfg.DatasetTTL { + continue + } + + candidates = append(candidates, candidate{name: e.Name(), path: path, run: run}) + } + + if len(candidates) == 0 { + return os.ErrNotExist + } + + // Pick the newest + sort.Slice(candidates, func(i, j int) bool { + return candidates[i].run.After(candidates[j].run) + }) + + best := candidates[0] + ds, err := dataset.Open(best.path, best.run) + if err != nil { + return err + } + + s.setDataset(ds) + s.log.Info("loaded existing dataset", + zap.Time("run", best.run), + zap.String("path", best.path)) + return nil +} + +// setDataset swaps the current dataset with a new one, closing the old one. +func (s *Service) setDataset(ds *dataset.File) { + s.mu.Lock() + old := s.ds + s.ds = ds + s.mu.Unlock() + + if old != nil { + if err := old.Close(); err != nil { + s.log.Error("failed to close old dataset", zap.Error(err)) + } + } +} + +// cleanOldDatasets removes dataset files other than the one at keepPath. +func (s *Service) cleanOldDatasets(keepPath string) { + entries, err := os.ReadDir(s.cfg.DataDir) + if err != nil { + return + } + + for _, e := range entries { + if e.IsDir() { + continue + } + path := filepath.Join(s.cfg.DataDir, e.Name()) + if path == keepPath { + continue + } + // Remove old datasets and temp files + if len(e.Name()) == 10 || strings.HasSuffix(e.Name(), ".downloading") { + if err := os.Remove(path); err != nil { + s.log.Warn("failed to remove old file", zap.String("path", path), zap.Error(err)) + } else { + s.log.Info("removed old dataset", zap.String("path", path)) + } + } + } +} + +// Close releases all resources. +func (s *Service) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + if s.ds != nil { + err := s.ds.Close() + s.ds = nil + return err + } + return nil +} diff --git a/internal/transport/middleware/log.go b/internal/transport/middleware/log.go new file mode 100644 index 0000000..fbbbbc1 --- /dev/null +++ b/internal/transport/middleware/log.go @@ -0,0 +1,30 @@ +package middleware + +import ( + "time" + + "github.com/ogen-go/ogen/middleware" + "go.uber.org/zap" +) + +// Logging returns an ogen middleware that logs request duration. +func Logging(log *zap.Logger) middleware.Middleware { + return func(req middleware.Request, next func(req middleware.Request) (middleware.Response, error)) (middleware.Response, error) { + lg := log.With(zap.String("operation", req.OperationID)) + + start := time.Now() + resp, err := next(req) + dur := time.Since(start) + + if err != nil { + lg.Error("request failed", + zap.Duration("duration", dur), + zap.Error(err)) + } else { + lg.Info("request completed", + zap.Duration("duration", dur)) + } + + return resp, err + } +} diff --git a/internal/transport/rest/handler/deps.go b/internal/transport/rest/handler/deps.go new file mode 100644 index 0000000..f81a3b8 --- /dev/null +++ b/internal/transport/rest/handler/deps.go @@ -0,0 +1,16 @@ +package handler + +import ( + "time" + + "predictor-refactored/internal/dataset" + "predictor-refactored/internal/elevation" +) + +// Service defines the interface the handler needs from the service layer. +type Service interface { + Ready() bool + DatasetTime() (time.Time, bool) + Dataset() *dataset.File + Elevation() *elevation.Dataset +} diff --git a/internal/transport/rest/handler/handler.go b/internal/transport/rest/handler/handler.go new file mode 100644 index 0000000..fc1f693 --- /dev/null +++ b/internal/transport/rest/handler/handler.go @@ -0,0 +1,216 @@ +package handler + +import ( + "context" + "net/http" + "time" + + "predictor-refactored/internal/prediction" + api "predictor-refactored/pkg/rest" + + "go.uber.org/zap" +) + +var _ api.Handler = (*Handler)(nil) + +// Handler implements the ogen-generated api.Handler interface. +type Handler struct { + svc Service + log *zap.Logger +} + +// New creates a new Handler. +func New(svc Service, log *zap.Logger) *Handler { + return &Handler{svc: svc, log: log} +} + +// PerformPrediction implements the prediction endpoint. +func (h *Handler) PerformPrediction(ctx context.Context, params api.PerformPredictionParams) (*api.PredictionResponse, error) { + if !h.svc.Ready() { + return nil, newError(http.StatusServiceUnavailable, "no dataset loaded, service is starting up") + } + + ds := h.svc.Dataset() + if ds == nil { + return nil, newError(http.StatusServiceUnavailable, "dataset unavailable") + } + + dsEpoch := float64(ds.DSTime.Unix()) + + // Parse parameters with defaults + profile := "standard_profile" + if p, ok := params.Profile.Get(); ok { + profile = string(p) + } + + ascentRate := 5.0 + if v, ok := params.AscentRate.Get(); ok { + ascentRate = v + } + + burstAltitude := 28000.0 + if v, ok := params.BurstAltitude.Get(); ok { + burstAltitude = v + } + + descentRate := 5.0 + if v, ok := params.DescentRate.Get(); ok { + descentRate = v + } + + launchAlt := 0.0 + if v, ok := params.LaunchAltitude.Get(); ok { + launchAlt = v + } + + // Normalize longitude to [0, 360) + lng := params.LaunchLongitude + if lng < 0 { + lng += 360.0 + } + + launchTime := float64(params.LaunchDatetime.Unix()) + + warnings := &prediction.Warnings{} + + // Build profile chain + elev := h.svc.Elevation() + var stages []prediction.Stage + switch profile { + case "standard_profile": + stages = prediction.StandardProfile( + ascentRate, burstAltitude, descentRate, + ds, dsEpoch, warnings, elev) + case "float_profile": + floatAlt := 25000.0 + if v, ok := params.FloatAltitude.Get(); ok { + floatAlt = v + } + stopTime := params.LaunchDatetime.Add(24 * time.Hour) + if v, ok := params.StopDatetime.Get(); ok { + stopTime = v + } + stages = prediction.FloatProfile( + ascentRate, floatAlt, stopTime, + ds, dsEpoch, warnings) + default: + return nil, newError(http.StatusBadRequest, "unknown profile: "+profile) + } + + // Run prediction + startTime := time.Now().UTC() + results := prediction.RunPrediction(launchTime, params.LaunchLatitude, lng, launchAlt, stages) + completeTime := time.Now().UTC() + + // Build response + stageNames := []string{"ascent", "descent"} + if profile == "float_profile" { + stageNames = []string{"ascent", "float"} + } + + var predItems []api.PredictionResponsePredictionItem + for i, sr := range results { + stageName := "ascent" + if i < len(stageNames) { + stageName = stageNames[i] + } + + var stageEnum api.PredictionResponsePredictionItemStage + switch stageName { + case "ascent": + stageEnum = api.PredictionResponsePredictionItemStageAscent + case "descent": + stageEnum = api.PredictionResponsePredictionItemStageDescent + case "float": + stageEnum = api.PredictionResponsePredictionItemStageFloat + } + + var traj []api.PredictionResponsePredictionItemTrajectoryItem + for _, pt := range sr.Points { + ptLng := pt.Lng + if ptLng > 180 { + ptLng -= 360 + } + traj = append(traj, api.PredictionResponsePredictionItemTrajectoryItem{ + Datetime: time.Unix(int64(pt.T), 0).UTC(), + Latitude: pt.Lat, + Longitude: ptLng, + Altitude: pt.Alt, + }) + } + + predItems = append(predItems, api.PredictionResponsePredictionItem{ + Stage: stageEnum, + Trajectory: traj, + }) + } + + resp := &api.PredictionResponse{ + Prediction: predItems, + Metadata: api.PredictionResponseMetadata{ + StartDatetime: startTime, + CompleteDatetime: completeTime, + }, + } + + // Echo request + resp.Request = api.NewOptPredictionResponseRequest(api.PredictionResponseRequest{ + Dataset: api.NewOptString(ds.DSTime.Format("2006-01-02T15:04:05Z")), + LaunchLatitude: api.NewOptFloat64(params.LaunchLatitude), + LaunchLongitude: api.NewOptFloat64(params.LaunchLongitude), + LaunchDatetime: api.NewOptString(params.LaunchDatetime.Format(time.RFC3339)), + LaunchAltitude: params.LaunchAltitude, + }) + + // Warnings + warnMap := warnings.ToMap() + if len(warnMap) > 0 { + resp.Warnings = api.NewOptPredictionResponseWarnings(api.PredictionResponseWarnings{}) + } + + h.log.Info("prediction complete", + zap.String("profile", profile), + zap.Int("stages", len(results)), + zap.Duration("elapsed", completeTime.Sub(startTime))) + + return resp, nil +} + +// ReadinessCheck implements the health check endpoint. +func (h *Handler) ReadinessCheck(ctx context.Context) (*api.ReadinessResponse, error) { + resp := &api.ReadinessResponse{} + + if h.svc.Ready() { + resp.Status = api.ReadinessResponseStatusOk + if dsTime, ok := h.svc.DatasetTime(); ok { + resp.DatasetTime = api.NewOptDateTime(dsTime) + } + } else { + resp.Status = api.ReadinessResponseStatusNotReady + resp.ErrorMessage = api.NewOptString("no dataset loaded") + } + + return resp, nil +} + +// NewError creates an ErrorStatusCode from an error returned by a handler. +func (h *Handler) NewError(ctx context.Context, err error) *api.ErrorStatusCode { + if statusErr, ok := err.(*api.ErrorStatusCode); ok { + return statusErr + } + + h.log.Error("unhandled error", zap.Error(err)) + return newError(http.StatusInternalServerError, err.Error()) +} + +func newError(status int, description string) *api.ErrorStatusCode { + return &api.ErrorStatusCode{ + StatusCode: status, + Response: api.Error{ + Error: api.ErrorError{ + Type: http.StatusText(status), + Description: description, + }, + }, + } +} diff --git a/internal/transport/rest/transport.go b/internal/transport/rest/transport.go new file mode 100644 index 0000000..3744270 --- /dev/null +++ b/internal/transport/rest/transport.go @@ -0,0 +1,75 @@ +package rest + +import ( + "context" + "fmt" + "net/http" + + "predictor-refactored/internal/transport/middleware" + "predictor-refactored/internal/transport/rest/handler" + api "predictor-refactored/pkg/rest" + + "go.uber.org/zap" +) + +// Transport wraps the ogen HTTP server. +type Transport struct { + srv *api.Server + handler *handler.Handler + port int + log *zap.Logger +} + +// New creates a new REST transport. +func New(h *handler.Handler, port int, log *zap.Logger) (*Transport, error) { + srv, err := api.NewServer( + h, + api.WithMiddleware(middleware.Logging(log)), + ) + if err != nil { + return nil, fmt.Errorf("create ogen server: %w", err) + } + + return &Transport{ + srv: srv, + handler: h, + port: port, + log: log, + }, nil +} + +// Run starts the HTTP server. Blocks until the server stops. +func (t *Transport) Run() error { + mux := http.NewServeMux() + mux.Handle("/", t.srv) + + httpSrv := &http.Server{ + Addr: fmt.Sprintf(":%d", t.port), + Handler: corsMiddleware(mux), + } + + t.log.Info("starting HTTP server", zap.Int("port", t.port)) + return httpSrv.ListenAndServe() +} + +// Shutdown gracefully stops the HTTP server. +func (t *Transport) Shutdown(ctx context.Context) error { + // The ogen server doesn't have a shutdown method; + // shutdown is handled by the http.Server in main.go + return nil +} + +func corsMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Methods", "GET, OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Content-Type") + + if r.Method == http.MethodOptions { + w.WriteHeader(http.StatusNoContent) + return + } + + next.ServeHTTP(w, r) + }) +} diff --git a/pkg/rest/oas_cfg_gen.go b/pkg/rest/oas_cfg_gen.go new file mode 100644 index 0000000..9d5a059 --- /dev/null +++ b/pkg/rest/oas_cfg_gen.go @@ -0,0 +1,318 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "net/http" + "strings" + + ht "github.com/ogen-go/ogen/http" + "github.com/ogen-go/ogen/middleware" + "github.com/ogen-go/ogen/ogenerrors" + "github.com/ogen-go/ogen/otelogen" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +var ( + // Allocate option closure once. + clientSpanKind = trace.WithSpanKind(trace.SpanKindClient) + // Allocate option closure once. + serverSpanKind = trace.WithSpanKind(trace.SpanKindServer) +) + +type ( + optionFunc[C any] func(*C) + otelOptionFunc func(*otelConfig) +) + +type otelConfig struct { + TracerProvider trace.TracerProvider + Tracer trace.Tracer + MeterProvider metric.MeterProvider + Meter metric.Meter + Attributes []attribute.KeyValue +} + +func (cfg *otelConfig) initOTEL() { + if cfg.TracerProvider == nil { + cfg.TracerProvider = otel.GetTracerProvider() + } + if cfg.MeterProvider == nil { + cfg.MeterProvider = otel.GetMeterProvider() + } + cfg.Tracer = cfg.TracerProvider.Tracer(otelogen.Name, + trace.WithInstrumentationVersion(otelogen.SemVersion()), + ) + cfg.Meter = cfg.MeterProvider.Meter(otelogen.Name, + metric.WithInstrumentationVersion(otelogen.SemVersion()), + ) +} + +// ErrorHandler is error handler. +type ErrorHandler = ogenerrors.ErrorHandler + +type serverConfig struct { + otelConfig + NotFound http.HandlerFunc + MethodNotAllowed func(w http.ResponseWriter, r *http.Request, allowed string) + ErrorHandler ErrorHandler + Prefix string + Middleware Middleware + MaxMultipartMemory int64 +} + +// ServerOption is server config option. +type ServerOption interface { + applyServer(*serverConfig) +} + +var _ ServerOption = (optionFunc[serverConfig])(nil) + +func (o optionFunc[C]) applyServer(c *C) { + o(c) +} + +var _ ServerOption = (otelOptionFunc)(nil) + +func (o otelOptionFunc) applyServer(c *serverConfig) { + o(&c.otelConfig) +} + +func newServerConfig(opts ...ServerOption) serverConfig { + cfg := serverConfig{ + NotFound: http.NotFound, + MethodNotAllowed: nil, + ErrorHandler: ogenerrors.DefaultErrorHandler, + Middleware: nil, + MaxMultipartMemory: 32 << 20, // 32 MB + } + for _, opt := range opts { + opt.applyServer(&cfg) + } + cfg.initOTEL() + return cfg +} + +type baseServer struct { + cfg serverConfig + requests metric.Int64Counter + errors metric.Int64Counter + duration metric.Float64Histogram +} + +func (s baseServer) notFound(w http.ResponseWriter, r *http.Request) { + s.cfg.NotFound(w, r) +} + +type notAllowedParams struct { + allowedMethods string + allowedHeaders map[string]string + acceptPost string + acceptPatch string +} + +func (s baseServer) notAllowed(w http.ResponseWriter, r *http.Request, params notAllowedParams) { + h := w.Header() + isOptions := r.Method == "OPTIONS" + if isOptions { + h.Set("Access-Control-Allow-Methods", params.allowedMethods) + if params.allowedHeaders != nil { + m := r.Header.Get("Access-Control-Request-Method") + if m != "" { + allowedHeaders, ok := params.allowedHeaders[strings.ToUpper(m)] + if ok { + h.Set("Access-Control-Allow-Headers", allowedHeaders) + } + } + } + if params.acceptPost != "" { + h.Set("Accept-Post", params.acceptPost) + } + if params.acceptPatch != "" { + h.Set("Accept-Patch", params.acceptPatch) + } + } + if s.cfg.MethodNotAllowed != nil { + s.cfg.MethodNotAllowed(w, r, params.allowedMethods) + return + } + status := http.StatusNoContent + if !isOptions { + h.Set("Allow", params.allowedMethods) + status = http.StatusMethodNotAllowed + } + w.WriteHeader(status) +} + +func (cfg serverConfig) baseServer() (s baseServer, err error) { + s = baseServer{cfg: cfg} + if s.requests, err = otelogen.ServerRequestCountCounter(s.cfg.Meter); err != nil { + return s, err + } + if s.errors, err = otelogen.ServerErrorsCountCounter(s.cfg.Meter); err != nil { + return s, err + } + if s.duration, err = otelogen.ServerDurationHistogram(s.cfg.Meter); err != nil { + return s, err + } + return s, nil +} + +type clientConfig struct { + otelConfig + Client ht.Client +} + +// ClientOption is client config option. +type ClientOption interface { + applyClient(*clientConfig) +} + +var _ ClientOption = (optionFunc[clientConfig])(nil) + +func (o optionFunc[C]) applyClient(c *C) { + o(c) +} + +var _ ClientOption = (otelOptionFunc)(nil) + +func (o otelOptionFunc) applyClient(c *clientConfig) { + o(&c.otelConfig) +} + +func newClientConfig(opts ...ClientOption) clientConfig { + cfg := clientConfig{ + Client: http.DefaultClient, + } + for _, opt := range opts { + opt.applyClient(&cfg) + } + cfg.initOTEL() + return cfg +} + +type baseClient struct { + cfg clientConfig + requests metric.Int64Counter + errors metric.Int64Counter + duration metric.Float64Histogram +} + +func (cfg clientConfig) baseClient() (c baseClient, err error) { + c = baseClient{cfg: cfg} + if c.requests, err = otelogen.ClientRequestCountCounter(c.cfg.Meter); err != nil { + return c, err + } + if c.errors, err = otelogen.ClientErrorsCountCounter(c.cfg.Meter); err != nil { + return c, err + } + if c.duration, err = otelogen.ClientDurationHistogram(c.cfg.Meter); err != nil { + return c, err + } + return c, nil +} + +// Option is config option. +type Option interface { + ServerOption + ClientOption +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) Option { + return otelOptionFunc(func(cfg *otelConfig) { + if provider != nil { + cfg.TracerProvider = provider + } + }) +} + +// WithMeterProvider specifies a meter provider to use for creating a meter. +// +// If none is specified, the otel.GetMeterProvider() is used. +func WithMeterProvider(provider metric.MeterProvider) Option { + return otelOptionFunc(func(cfg *otelConfig) { + if provider != nil { + cfg.MeterProvider = provider + } + }) +} + +// WithAttributes specifies default otel attributes. +func WithAttributes(attributes ...attribute.KeyValue) Option { + return otelOptionFunc(func(cfg *otelConfig) { + cfg.Attributes = attributes + }) +} + +// WithClient specifies http client to use. +func WithClient(client ht.Client) ClientOption { + return optionFunc[clientConfig](func(cfg *clientConfig) { + if client != nil { + cfg.Client = client + } + }) +} + +// WithNotFound specifies Not Found handler to use. +func WithNotFound(notFound http.HandlerFunc) ServerOption { + return optionFunc[serverConfig](func(cfg *serverConfig) { + if notFound != nil { + cfg.NotFound = notFound + } + }) +} + +// WithMethodNotAllowed specifies Method Not Allowed handler to use. +func WithMethodNotAllowed(methodNotAllowed func(w http.ResponseWriter, r *http.Request, allowed string)) ServerOption { + return optionFunc[serverConfig](func(cfg *serverConfig) { + if methodNotAllowed != nil { + cfg.MethodNotAllowed = methodNotAllowed + } + }) +} + +// WithErrorHandler specifies error handler to use. +func WithErrorHandler(h ErrorHandler) ServerOption { + return optionFunc[serverConfig](func(cfg *serverConfig) { + if h != nil { + cfg.ErrorHandler = h + } + }) +} + +// WithPathPrefix specifies server path prefix. +func WithPathPrefix(prefix string) ServerOption { + return optionFunc[serverConfig](func(cfg *serverConfig) { + cfg.Prefix = prefix + }) +} + +// WithMiddleware specifies middlewares to use. +func WithMiddleware(m ...Middleware) ServerOption { + return optionFunc[serverConfig](func(cfg *serverConfig) { + switch len(m) { + case 0: + cfg.Middleware = nil + case 1: + cfg.Middleware = m[0] + default: + cfg.Middleware = middleware.ChainMiddlewares(m...) + } + }) +} + +// WithMaxMultipartMemory specifies limit of memory for storing file parts. +// File parts which can't be stored in memory will be stored on disk in temporary files. +func WithMaxMultipartMemory(max int64) ServerOption { + return optionFunc[serverConfig](func(cfg *serverConfig) { + if max > 0 { + cfg.MaxMultipartMemory = max + } + }) +} diff --git a/pkg/rest/oas_client_gen.go b/pkg/rest/oas_client_gen.go new file mode 100644 index 0000000..02e95c3 --- /dev/null +++ b/pkg/rest/oas_client_gen.go @@ -0,0 +1,411 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "context" + "net/url" + "strings" + "time" + + "github.com/go-faster/errors" + "github.com/ogen-go/ogen/conv" + ht "github.com/ogen-go/ogen/http" + "github.com/ogen-go/ogen/otelogen" + "github.com/ogen-go/ogen/uri" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + "go.opentelemetry.io/otel/trace" +) + +func trimTrailingSlashes(u *url.URL) { + u.Path = strings.TrimRight(u.Path, "/") + u.RawPath = strings.TrimRight(u.RawPath, "/") +} + +// Invoker invokes operations described by OpenAPI v3 specification. +type Invoker interface { + // PerformPrediction invokes performPrediction operation. + // + // Perform prediction. + // + // GET /api/v1/prediction + PerformPrediction(ctx context.Context, params PerformPredictionParams) (*PredictionResponse, error) + // ReadinessCheck invokes readinessCheck operation. + // + // Readiness check. + // + // GET /ready + ReadinessCheck(ctx context.Context) (*ReadinessResponse, error) +} + +// Client implements OAS client. +type Client struct { + serverURL *url.URL + baseClient +} + +// NewClient initializes new Client defined by OAS. +func NewClient(serverURL string, opts ...ClientOption) (*Client, error) { + u, err := url.Parse(serverURL) + if err != nil { + return nil, err + } + trimTrailingSlashes(u) + + c, err := newClientConfig(opts...).baseClient() + if err != nil { + return nil, err + } + return &Client{ + serverURL: u, + baseClient: c, + }, nil +} + +type serverURLKey struct{} + +// WithServerURL sets context key to override server URL. +func WithServerURL(ctx context.Context, u *url.URL) context.Context { + return context.WithValue(ctx, serverURLKey{}, u) +} + +func (c *Client) requestURL(ctx context.Context) *url.URL { + u, ok := ctx.Value(serverURLKey{}).(*url.URL) + if !ok { + return c.serverURL + } + return u +} + +// PerformPrediction invokes performPrediction operation. +// +// Perform prediction. +// +// GET /api/v1/prediction +func (c *Client) PerformPrediction(ctx context.Context, params PerformPredictionParams) (*PredictionResponse, error) { + res, err := c.sendPerformPrediction(ctx, params) + return res, err +} + +func (c *Client) sendPerformPrediction(ctx context.Context, params PerformPredictionParams) (res *PredictionResponse, err error) { + otelAttrs := []attribute.KeyValue{ + otelogen.OperationID("performPrediction"), + semconv.HTTPRequestMethodKey.String("GET"), + semconv.URLTemplateKey.String("/api/v1/prediction"), + } + otelAttrs = append(otelAttrs, c.cfg.Attributes...) + + // Run stopwatch. + startTime := time.Now() + defer func() { + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedDuration := time.Since(startTime) + c.duration.Record(ctx, float64(elapsedDuration)/float64(time.Millisecond), metric.WithAttributes(otelAttrs...)) + }() + + // Increment request counter. + c.requests.Add(ctx, 1, metric.WithAttributes(otelAttrs...)) + + // Start a span for this request. + ctx, span := c.cfg.Tracer.Start(ctx, PerformPredictionOperation, + trace.WithAttributes(otelAttrs...), + clientSpanKind, + ) + // Track stage for error reporting. + var stage string + defer func() { + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, stage) + c.errors.Add(ctx, 1, metric.WithAttributes(otelAttrs...)) + } + span.End() + }() + + stage = "BuildURL" + u := uri.Clone(c.requestURL(ctx)) + var pathParts [1]string + pathParts[0] = "/api/v1/prediction" + uri.AddPathParts(u, pathParts[:]...) + + stage = "EncodeQueryParams" + q := uri.NewQueryEncoder() + { + // Encode "launch_latitude" parameter. + cfg := uri.QueryParameterEncodingConfig{ + Name: "launch_latitude", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.EncodeParam(cfg, func(e uri.Encoder) error { + return e.EncodeValue(conv.Float64ToString(params.LaunchLatitude)) + }); err != nil { + return res, errors.Wrap(err, "encode query") + } + } + { + // Encode "launch_longitude" parameter. + cfg := uri.QueryParameterEncodingConfig{ + Name: "launch_longitude", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.EncodeParam(cfg, func(e uri.Encoder) error { + return e.EncodeValue(conv.Float64ToString(params.LaunchLongitude)) + }); err != nil { + return res, errors.Wrap(err, "encode query") + } + } + { + // Encode "launch_datetime" parameter. + cfg := uri.QueryParameterEncodingConfig{ + Name: "launch_datetime", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.EncodeParam(cfg, func(e uri.Encoder) error { + return e.EncodeValue(conv.DateTimeToString(params.LaunchDatetime)) + }); err != nil { + return res, errors.Wrap(err, "encode query") + } + } + { + // Encode "launch_altitude" parameter. + cfg := uri.QueryParameterEncodingConfig{ + Name: "launch_altitude", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.EncodeParam(cfg, func(e uri.Encoder) error { + if val, ok := params.LaunchAltitude.Get(); ok { + return e.EncodeValue(conv.Float64ToString(val)) + } + return nil + }); err != nil { + return res, errors.Wrap(err, "encode query") + } + } + { + // Encode "profile" parameter. + cfg := uri.QueryParameterEncodingConfig{ + Name: "profile", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.EncodeParam(cfg, func(e uri.Encoder) error { + if val, ok := params.Profile.Get(); ok { + return e.EncodeValue(conv.StringToString(string(val))) + } + return nil + }); err != nil { + return res, errors.Wrap(err, "encode query") + } + } + { + // Encode "ascent_rate" parameter. + cfg := uri.QueryParameterEncodingConfig{ + Name: "ascent_rate", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.EncodeParam(cfg, func(e uri.Encoder) error { + if val, ok := params.AscentRate.Get(); ok { + return e.EncodeValue(conv.Float64ToString(val)) + } + return nil + }); err != nil { + return res, errors.Wrap(err, "encode query") + } + } + { + // Encode "burst_altitude" parameter. + cfg := uri.QueryParameterEncodingConfig{ + Name: "burst_altitude", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.EncodeParam(cfg, func(e uri.Encoder) error { + if val, ok := params.BurstAltitude.Get(); ok { + return e.EncodeValue(conv.Float64ToString(val)) + } + return nil + }); err != nil { + return res, errors.Wrap(err, "encode query") + } + } + { + // Encode "descent_rate" parameter. + cfg := uri.QueryParameterEncodingConfig{ + Name: "descent_rate", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.EncodeParam(cfg, func(e uri.Encoder) error { + if val, ok := params.DescentRate.Get(); ok { + return e.EncodeValue(conv.Float64ToString(val)) + } + return nil + }); err != nil { + return res, errors.Wrap(err, "encode query") + } + } + { + // Encode "float_altitude" parameter. + cfg := uri.QueryParameterEncodingConfig{ + Name: "float_altitude", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.EncodeParam(cfg, func(e uri.Encoder) error { + if val, ok := params.FloatAltitude.Get(); ok { + return e.EncodeValue(conv.Float64ToString(val)) + } + return nil + }); err != nil { + return res, errors.Wrap(err, "encode query") + } + } + { + // Encode "stop_datetime" parameter. + cfg := uri.QueryParameterEncodingConfig{ + Name: "stop_datetime", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.EncodeParam(cfg, func(e uri.Encoder) error { + if val, ok := params.StopDatetime.Get(); ok { + return e.EncodeValue(conv.DateTimeToString(val)) + } + return nil + }); err != nil { + return res, errors.Wrap(err, "encode query") + } + } + { + // Encode "dataset" parameter. + cfg := uri.QueryParameterEncodingConfig{ + Name: "dataset", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.EncodeParam(cfg, func(e uri.Encoder) error { + if val, ok := params.Dataset.Get(); ok { + return e.EncodeValue(conv.DateTimeToString(val)) + } + return nil + }); err != nil { + return res, errors.Wrap(err, "encode query") + } + } + u.RawQuery = q.Values().Encode() + + stage = "EncodeRequest" + r, err := ht.NewRequest(ctx, "GET", u) + if err != nil { + return res, errors.Wrap(err, "create request") + } + + stage = "SendRequest" + resp, err := c.cfg.Client.Do(r) + if err != nil { + return res, errors.Wrap(err, "do request") + } + body := resp.Body + defer body.Close() + + stage = "DecodeResponse" + result, err := decodePerformPredictionResponse(resp) + if err != nil { + return res, errors.Wrap(err, "decode response") + } + + return result, nil +} + +// ReadinessCheck invokes readinessCheck operation. +// +// Readiness check. +// +// GET /ready +func (c *Client) ReadinessCheck(ctx context.Context) (*ReadinessResponse, error) { + res, err := c.sendReadinessCheck(ctx) + return res, err +} + +func (c *Client) sendReadinessCheck(ctx context.Context) (res *ReadinessResponse, err error) { + otelAttrs := []attribute.KeyValue{ + otelogen.OperationID("readinessCheck"), + semconv.HTTPRequestMethodKey.String("GET"), + semconv.URLTemplateKey.String("/ready"), + } + otelAttrs = append(otelAttrs, c.cfg.Attributes...) + + // Run stopwatch. + startTime := time.Now() + defer func() { + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedDuration := time.Since(startTime) + c.duration.Record(ctx, float64(elapsedDuration)/float64(time.Millisecond), metric.WithAttributes(otelAttrs...)) + }() + + // Increment request counter. + c.requests.Add(ctx, 1, metric.WithAttributes(otelAttrs...)) + + // Start a span for this request. + ctx, span := c.cfg.Tracer.Start(ctx, ReadinessCheckOperation, + trace.WithAttributes(otelAttrs...), + clientSpanKind, + ) + // Track stage for error reporting. + var stage string + defer func() { + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, stage) + c.errors.Add(ctx, 1, metric.WithAttributes(otelAttrs...)) + } + span.End() + }() + + stage = "BuildURL" + u := uri.Clone(c.requestURL(ctx)) + var pathParts [1]string + pathParts[0] = "/ready" + uri.AddPathParts(u, pathParts[:]...) + + stage = "EncodeRequest" + r, err := ht.NewRequest(ctx, "GET", u) + if err != nil { + return res, errors.Wrap(err, "create request") + } + + stage = "SendRequest" + resp, err := c.cfg.Client.Do(r) + if err != nil { + return res, errors.Wrap(err, "do request") + } + body := resp.Body + defer body.Close() + + stage = "DecodeResponse" + result, err := decodeReadinessCheckResponse(resp) + if err != nil { + return res, errors.Wrap(err, "decode response") + } + + return result, nil +} diff --git a/pkg/rest/oas_handlers_gen.go b/pkg/rest/oas_handlers_gen.go new file mode 100644 index 0000000..d41771a --- /dev/null +++ b/pkg/rest/oas_handlers_gen.go @@ -0,0 +1,363 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "context" + "net/http" + "time" + + "github.com/go-faster/errors" + ht "github.com/ogen-go/ogen/http" + "github.com/ogen-go/ogen/middleware" + "github.com/ogen-go/ogen/ogenerrors" + "github.com/ogen-go/ogen/otelogen" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + "go.opentelemetry.io/otel/trace" +) + +type codeRecorder struct { + http.ResponseWriter + status int +} + +func (c *codeRecorder) WriteHeader(status int) { + c.status = status + c.ResponseWriter.WriteHeader(status) +} + +func (c *codeRecorder) Unwrap() http.ResponseWriter { + return c.ResponseWriter +} + +// handlePerformPredictionRequest handles performPrediction operation. +// +// Perform prediction. +// +// GET /api/v1/prediction +func (s *Server) handlePerformPredictionRequest(args [0]string, argsEscaped bool, w http.ResponseWriter, r *http.Request) { + statusWriter := &codeRecorder{ResponseWriter: w} + w = statusWriter + otelAttrs := []attribute.KeyValue{ + otelogen.OperationID("performPrediction"), + semconv.HTTPRequestMethodKey.String("GET"), + semconv.HTTPRouteKey.String("/api/v1/prediction"), + } + // Add attributes from config. + otelAttrs = append(otelAttrs, s.cfg.Attributes...) + + // Start a span for this request. + ctx, span := s.cfg.Tracer.Start(r.Context(), PerformPredictionOperation, + trace.WithAttributes(otelAttrs...), + serverSpanKind, + ) + defer span.End() + + // Add Labeler to context. + labeler := &Labeler{attrs: otelAttrs} + ctx = contextWithLabeler(ctx, labeler) + + // Run stopwatch. + startTime := time.Now() + defer func() { + elapsedDuration := time.Since(startTime) + + attrSet := labeler.AttributeSet() + attrs := attrSet.ToSlice() + code := statusWriter.status + if code != 0 { + codeAttr := semconv.HTTPResponseStatusCode(code) + attrs = append(attrs, codeAttr) + span.SetAttributes(codeAttr) + } + attrOpt := metric.WithAttributes(attrs...) + + // Increment request counter. + s.requests.Add(ctx, 1, attrOpt) + + // Use floating point division here for higher precision (instead of Millisecond method). + s.duration.Record(ctx, float64(elapsedDuration)/float64(time.Millisecond), attrOpt) + }() + + var ( + recordError = func(stage string, err error) { + span.RecordError(err) + + // https://opentelemetry.io/docs/specs/semconv/http/http-spans/#status + // Span Status MUST be left unset if HTTP status code was in the 1xx, 2xx or 3xx ranges, + // unless there was another error (e.g., network error receiving the response body; or 3xx codes with + // max redirects exceeded), in which case status MUST be set to Error. + code := statusWriter.status + if code < 100 || code >= 500 { + span.SetStatus(codes.Error, stage) + } + + attrSet := labeler.AttributeSet() + attrs := attrSet.ToSlice() + if code != 0 { + attrs = append(attrs, semconv.HTTPResponseStatusCode(code)) + } + + s.errors.Add(ctx, 1, metric.WithAttributes(attrs...)) + } + err error + opErrContext = ogenerrors.OperationContext{ + Name: PerformPredictionOperation, + ID: "performPrediction", + } + ) + params, err := decodePerformPredictionParams(args, argsEscaped, r) + if err != nil { + err = &ogenerrors.DecodeParamsError{ + OperationContext: opErrContext, + Err: err, + } + defer recordError("DecodeParams", err) + s.cfg.ErrorHandler(ctx, w, r, err) + return + } + + var rawBody []byte + + var response *PredictionResponse + if m := s.cfg.Middleware; m != nil { + mreq := middleware.Request{ + Context: ctx, + OperationName: PerformPredictionOperation, + OperationSummary: "Perform prediction", + OperationID: "performPrediction", + Body: nil, + RawBody: rawBody, + Params: middleware.Parameters{ + { + Name: "launch_latitude", + In: "query", + }: params.LaunchLatitude, + { + Name: "launch_longitude", + In: "query", + }: params.LaunchLongitude, + { + Name: "launch_datetime", + In: "query", + }: params.LaunchDatetime, + { + Name: "launch_altitude", + In: "query", + }: params.LaunchAltitude, + { + Name: "profile", + In: "query", + }: params.Profile, + { + Name: "ascent_rate", + In: "query", + }: params.AscentRate, + { + Name: "burst_altitude", + In: "query", + }: params.BurstAltitude, + { + Name: "descent_rate", + In: "query", + }: params.DescentRate, + { + Name: "float_altitude", + In: "query", + }: params.FloatAltitude, + { + Name: "stop_datetime", + In: "query", + }: params.StopDatetime, + { + Name: "dataset", + In: "query", + }: params.Dataset, + }, + Raw: r, + } + + type ( + Request = struct{} + Params = PerformPredictionParams + Response = *PredictionResponse + ) + response, err = middleware.HookMiddleware[ + Request, + Params, + Response, + ]( + m, + mreq, + unpackPerformPredictionParams, + func(ctx context.Context, request Request, params Params) (response Response, err error) { + response, err = s.h.PerformPrediction(ctx, params) + return response, err + }, + ) + } else { + response, err = s.h.PerformPrediction(ctx, params) + } + if err != nil { + if errRes, ok := errors.Into[*ErrorStatusCode](err); ok { + if err := encodeErrorResponse(errRes, w, span); err != nil { + defer recordError("Internal", err) + } + return + } + if errors.Is(err, ht.ErrNotImplemented) { + s.cfg.ErrorHandler(ctx, w, r, err) + return + } + if err := encodeErrorResponse(s.h.NewError(ctx, err), w, span); err != nil { + defer recordError("Internal", err) + } + return + } + + if err := encodePerformPredictionResponse(response, w, span); err != nil { + defer recordError("EncodeResponse", err) + if !errors.Is(err, ht.ErrInternalServerErrorResponse) { + s.cfg.ErrorHandler(ctx, w, r, err) + } + return + } +} + +// handleReadinessCheckRequest handles readinessCheck operation. +// +// Readiness check. +// +// GET /ready +func (s *Server) handleReadinessCheckRequest(args [0]string, argsEscaped bool, w http.ResponseWriter, r *http.Request) { + statusWriter := &codeRecorder{ResponseWriter: w} + w = statusWriter + otelAttrs := []attribute.KeyValue{ + otelogen.OperationID("readinessCheck"), + semconv.HTTPRequestMethodKey.String("GET"), + semconv.HTTPRouteKey.String("/ready"), + } + // Add attributes from config. + otelAttrs = append(otelAttrs, s.cfg.Attributes...) + + // Start a span for this request. + ctx, span := s.cfg.Tracer.Start(r.Context(), ReadinessCheckOperation, + trace.WithAttributes(otelAttrs...), + serverSpanKind, + ) + defer span.End() + + // Add Labeler to context. + labeler := &Labeler{attrs: otelAttrs} + ctx = contextWithLabeler(ctx, labeler) + + // Run stopwatch. + startTime := time.Now() + defer func() { + elapsedDuration := time.Since(startTime) + + attrSet := labeler.AttributeSet() + attrs := attrSet.ToSlice() + code := statusWriter.status + if code != 0 { + codeAttr := semconv.HTTPResponseStatusCode(code) + attrs = append(attrs, codeAttr) + span.SetAttributes(codeAttr) + } + attrOpt := metric.WithAttributes(attrs...) + + // Increment request counter. + s.requests.Add(ctx, 1, attrOpt) + + // Use floating point division here for higher precision (instead of Millisecond method). + s.duration.Record(ctx, float64(elapsedDuration)/float64(time.Millisecond), attrOpt) + }() + + var ( + recordError = func(stage string, err error) { + span.RecordError(err) + + // https://opentelemetry.io/docs/specs/semconv/http/http-spans/#status + // Span Status MUST be left unset if HTTP status code was in the 1xx, 2xx or 3xx ranges, + // unless there was another error (e.g., network error receiving the response body; or 3xx codes with + // max redirects exceeded), in which case status MUST be set to Error. + code := statusWriter.status + if code < 100 || code >= 500 { + span.SetStatus(codes.Error, stage) + } + + attrSet := labeler.AttributeSet() + attrs := attrSet.ToSlice() + if code != 0 { + attrs = append(attrs, semconv.HTTPResponseStatusCode(code)) + } + + s.errors.Add(ctx, 1, metric.WithAttributes(attrs...)) + } + err error + ) + + var rawBody []byte + + var response *ReadinessResponse + if m := s.cfg.Middleware; m != nil { + mreq := middleware.Request{ + Context: ctx, + OperationName: ReadinessCheckOperation, + OperationSummary: "Readiness check", + OperationID: "readinessCheck", + Body: nil, + RawBody: rawBody, + Params: middleware.Parameters{}, + Raw: r, + } + + type ( + Request = struct{} + Params = struct{} + Response = *ReadinessResponse + ) + response, err = middleware.HookMiddleware[ + Request, + Params, + Response, + ]( + m, + mreq, + nil, + func(ctx context.Context, request Request, params Params) (response Response, err error) { + response, err = s.h.ReadinessCheck(ctx) + return response, err + }, + ) + } else { + response, err = s.h.ReadinessCheck(ctx) + } + if err != nil { + if errRes, ok := errors.Into[*ErrorStatusCode](err); ok { + if err := encodeErrorResponse(errRes, w, span); err != nil { + defer recordError("Internal", err) + } + return + } + if errors.Is(err, ht.ErrNotImplemented) { + s.cfg.ErrorHandler(ctx, w, r, err) + return + } + if err := encodeErrorResponse(s.h.NewError(ctx, err), w, span); err != nil { + defer recordError("Internal", err) + } + return + } + + if err := encodeReadinessCheckResponse(response, w, span); err != nil { + defer recordError("EncodeResponse", err) + if !errors.Is(err, ht.ErrInternalServerErrorResponse) { + s.cfg.ErrorHandler(ctx, w, r, err) + } + return + } +} diff --git a/pkg/rest/oas_json_gen.go b/pkg/rest/oas_json_gen.go new file mode 100644 index 0000000..8fa8634 --- /dev/null +++ b/pkg/rest/oas_json_gen.go @@ -0,0 +1,1398 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "math/bits" + "strconv" + "time" + + "github.com/go-faster/errors" + "github.com/go-faster/jx" + "github.com/ogen-go/ogen/json" + "github.com/ogen-go/ogen/validate" +) + +// Encode implements json.Marshaler. +func (s *Error) Encode(e *jx.Encoder) { + e.ObjStart() + s.encodeFields(e) + e.ObjEnd() +} + +// encodeFields encodes fields. +func (s *Error) encodeFields(e *jx.Encoder) { + { + e.FieldStart("error") + s.Error.Encode(e) + } +} + +var jsonFieldsNameOfError = [1]string{ + 0: "error", +} + +// Decode decodes Error from json. +func (s *Error) Decode(d *jx.Decoder) error { + if s == nil { + return errors.New("invalid: unable to decode Error to nil") + } + var requiredBitSet [1]uint8 + + if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { + switch string(k) { + case "error": + requiredBitSet[0] |= 1 << 0 + if err := func() error { + if err := s.Error.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"error\"") + } + default: + return d.Skip() + } + return nil + }); err != nil { + return errors.Wrap(err, "decode Error") + } + // Validate required fields. + var failures []validate.FieldError + for i, mask := range [1]uint8{ + 0b00000001, + } { + if result := (requiredBitSet[i] & mask) ^ mask; result != 0 { + // Mask only required fields and check equality to mask using XOR. + // + // If XOR result is not zero, result is not equal to expected, so some fields are missed. + // Bits of fields which would be set are actually bits of missed fields. + missed := bits.OnesCount8(result) + for bitN := 0; bitN < missed; bitN++ { + bitIdx := bits.TrailingZeros8(result) + fieldIdx := i*8 + bitIdx + var name string + if fieldIdx < len(jsonFieldsNameOfError) { + name = jsonFieldsNameOfError[fieldIdx] + } else { + name = strconv.Itoa(fieldIdx) + } + failures = append(failures, validate.FieldError{ + Name: name, + Error: validate.ErrFieldRequired, + }) + // Reset bit. + result &^= 1 << bitIdx + } + } + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s *Error) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *Error) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode implements json.Marshaler. +func (s *ErrorError) Encode(e *jx.Encoder) { + e.ObjStart() + s.encodeFields(e) + e.ObjEnd() +} + +// encodeFields encodes fields. +func (s *ErrorError) encodeFields(e *jx.Encoder) { + { + e.FieldStart("type") + e.Str(s.Type) + } + { + e.FieldStart("description") + e.Str(s.Description) + } +} + +var jsonFieldsNameOfErrorError = [2]string{ + 0: "type", + 1: "description", +} + +// Decode decodes ErrorError from json. +func (s *ErrorError) Decode(d *jx.Decoder) error { + if s == nil { + return errors.New("invalid: unable to decode ErrorError to nil") + } + var requiredBitSet [1]uint8 + + if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { + switch string(k) { + case "type": + requiredBitSet[0] |= 1 << 0 + if err := func() error { + v, err := d.Str() + s.Type = string(v) + if err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"type\"") + } + case "description": + requiredBitSet[0] |= 1 << 1 + if err := func() error { + v, err := d.Str() + s.Description = string(v) + if err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"description\"") + } + default: + return d.Skip() + } + return nil + }); err != nil { + return errors.Wrap(err, "decode ErrorError") + } + // Validate required fields. + var failures []validate.FieldError + for i, mask := range [1]uint8{ + 0b00000011, + } { + if result := (requiredBitSet[i] & mask) ^ mask; result != 0 { + // Mask only required fields and check equality to mask using XOR. + // + // If XOR result is not zero, result is not equal to expected, so some fields are missed. + // Bits of fields which would be set are actually bits of missed fields. + missed := bits.OnesCount8(result) + for bitN := 0; bitN < missed; bitN++ { + bitIdx := bits.TrailingZeros8(result) + fieldIdx := i*8 + bitIdx + var name string + if fieldIdx < len(jsonFieldsNameOfErrorError) { + name = jsonFieldsNameOfErrorError[fieldIdx] + } else { + name = strconv.Itoa(fieldIdx) + } + failures = append(failures, validate.FieldError{ + Name: name, + Error: validate.ErrFieldRequired, + }) + // Reset bit. + result &^= 1 << bitIdx + } + } + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s *ErrorError) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *ErrorError) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode encodes time.Time as json. +func (o OptDateTime) Encode(e *jx.Encoder, format func(*jx.Encoder, time.Time)) { + if !o.Set { + return + } + format(e, o.Value) +} + +// Decode decodes time.Time from json. +func (o *OptDateTime) Decode(d *jx.Decoder, format func(*jx.Decoder) (time.Time, error)) error { + if o == nil { + return errors.New("invalid: unable to decode OptDateTime to nil") + } + o.Set = true + v, err := format(d) + if err != nil { + return err + } + o.Value = v + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s OptDateTime) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e, json.EncodeDateTime) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *OptDateTime) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d, json.DecodeDateTime) +} + +// Encode encodes float64 as json. +func (o OptFloat64) Encode(e *jx.Encoder) { + if !o.Set { + return + } + e.Float64(float64(o.Value)) +} + +// Decode decodes float64 from json. +func (o *OptFloat64) Decode(d *jx.Decoder) error { + if o == nil { + return errors.New("invalid: unable to decode OptFloat64 to nil") + } + o.Set = true + v, err := d.Float64() + if err != nil { + return err + } + o.Value = float64(v) + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s OptFloat64) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *OptFloat64) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode encodes PredictionResponseRequest as json. +func (o OptPredictionResponseRequest) Encode(e *jx.Encoder) { + if !o.Set { + return + } + o.Value.Encode(e) +} + +// Decode decodes PredictionResponseRequest from json. +func (o *OptPredictionResponseRequest) Decode(d *jx.Decoder) error { + if o == nil { + return errors.New("invalid: unable to decode OptPredictionResponseRequest to nil") + } + o.Set = true + if err := o.Value.Decode(d); err != nil { + return err + } + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s OptPredictionResponseRequest) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *OptPredictionResponseRequest) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode encodes PredictionResponseWarnings as json. +func (o OptPredictionResponseWarnings) Encode(e *jx.Encoder) { + if !o.Set { + return + } + o.Value.Encode(e) +} + +// Decode decodes PredictionResponseWarnings from json. +func (o *OptPredictionResponseWarnings) Decode(d *jx.Decoder) error { + if o == nil { + return errors.New("invalid: unable to decode OptPredictionResponseWarnings to nil") + } + o.Set = true + o.Value = make(PredictionResponseWarnings) + if err := o.Value.Decode(d); err != nil { + return err + } + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s OptPredictionResponseWarnings) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *OptPredictionResponseWarnings) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode encodes string as json. +func (o OptString) Encode(e *jx.Encoder) { + if !o.Set { + return + } + e.Str(string(o.Value)) +} + +// Decode decodes string from json. +func (o *OptString) Decode(d *jx.Decoder) error { + if o == nil { + return errors.New("invalid: unable to decode OptString to nil") + } + o.Set = true + v, err := d.Str() + if err != nil { + return err + } + o.Value = string(v) + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s OptString) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *OptString) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode implements json.Marshaler. +func (s *PredictionResponse) Encode(e *jx.Encoder) { + e.ObjStart() + s.encodeFields(e) + e.ObjEnd() +} + +// encodeFields encodes fields. +func (s *PredictionResponse) encodeFields(e *jx.Encoder) { + { + if s.Request.Set { + e.FieldStart("request") + s.Request.Encode(e) + } + } + { + e.FieldStart("prediction") + e.ArrStart() + for _, elem := range s.Prediction { + elem.Encode(e) + } + e.ArrEnd() + } + { + e.FieldStart("metadata") + s.Metadata.Encode(e) + } + { + if s.Warnings.Set { + e.FieldStart("warnings") + s.Warnings.Encode(e) + } + } +} + +var jsonFieldsNameOfPredictionResponse = [4]string{ + 0: "request", + 1: "prediction", + 2: "metadata", + 3: "warnings", +} + +// Decode decodes PredictionResponse from json. +func (s *PredictionResponse) Decode(d *jx.Decoder) error { + if s == nil { + return errors.New("invalid: unable to decode PredictionResponse to nil") + } + var requiredBitSet [1]uint8 + + if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { + switch string(k) { + case "request": + if err := func() error { + s.Request.Reset() + if err := s.Request.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"request\"") + } + case "prediction": + requiredBitSet[0] |= 1 << 1 + if err := func() error { + s.Prediction = make([]PredictionResponsePredictionItem, 0) + if err := d.Arr(func(d *jx.Decoder) error { + var elem PredictionResponsePredictionItem + if err := elem.Decode(d); err != nil { + return err + } + s.Prediction = append(s.Prediction, elem) + return nil + }); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"prediction\"") + } + case "metadata": + requiredBitSet[0] |= 1 << 2 + if err := func() error { + if err := s.Metadata.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"metadata\"") + } + case "warnings": + if err := func() error { + s.Warnings.Reset() + if err := s.Warnings.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"warnings\"") + } + default: + return d.Skip() + } + return nil + }); err != nil { + return errors.Wrap(err, "decode PredictionResponse") + } + // Validate required fields. + var failures []validate.FieldError + for i, mask := range [1]uint8{ + 0b00000110, + } { + if result := (requiredBitSet[i] & mask) ^ mask; result != 0 { + // Mask only required fields and check equality to mask using XOR. + // + // If XOR result is not zero, result is not equal to expected, so some fields are missed. + // Bits of fields which would be set are actually bits of missed fields. + missed := bits.OnesCount8(result) + for bitN := 0; bitN < missed; bitN++ { + bitIdx := bits.TrailingZeros8(result) + fieldIdx := i*8 + bitIdx + var name string + if fieldIdx < len(jsonFieldsNameOfPredictionResponse) { + name = jsonFieldsNameOfPredictionResponse[fieldIdx] + } else { + name = strconv.Itoa(fieldIdx) + } + failures = append(failures, validate.FieldError{ + Name: name, + Error: validate.ErrFieldRequired, + }) + // Reset bit. + result &^= 1 << bitIdx + } + } + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s *PredictionResponse) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *PredictionResponse) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode implements json.Marshaler. +func (s *PredictionResponseMetadata) Encode(e *jx.Encoder) { + e.ObjStart() + s.encodeFields(e) + e.ObjEnd() +} + +// encodeFields encodes fields. +func (s *PredictionResponseMetadata) encodeFields(e *jx.Encoder) { + { + e.FieldStart("start_datetime") + json.EncodeDateTime(e, s.StartDatetime) + } + { + e.FieldStart("complete_datetime") + json.EncodeDateTime(e, s.CompleteDatetime) + } +} + +var jsonFieldsNameOfPredictionResponseMetadata = [2]string{ + 0: "start_datetime", + 1: "complete_datetime", +} + +// Decode decodes PredictionResponseMetadata from json. +func (s *PredictionResponseMetadata) Decode(d *jx.Decoder) error { + if s == nil { + return errors.New("invalid: unable to decode PredictionResponseMetadata to nil") + } + var requiredBitSet [1]uint8 + + if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { + switch string(k) { + case "start_datetime": + requiredBitSet[0] |= 1 << 0 + if err := func() error { + v, err := json.DecodeDateTime(d) + s.StartDatetime = v + if err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"start_datetime\"") + } + case "complete_datetime": + requiredBitSet[0] |= 1 << 1 + if err := func() error { + v, err := json.DecodeDateTime(d) + s.CompleteDatetime = v + if err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"complete_datetime\"") + } + default: + return d.Skip() + } + return nil + }); err != nil { + return errors.Wrap(err, "decode PredictionResponseMetadata") + } + // Validate required fields. + var failures []validate.FieldError + for i, mask := range [1]uint8{ + 0b00000011, + } { + if result := (requiredBitSet[i] & mask) ^ mask; result != 0 { + // Mask only required fields and check equality to mask using XOR. + // + // If XOR result is not zero, result is not equal to expected, so some fields are missed. + // Bits of fields which would be set are actually bits of missed fields. + missed := bits.OnesCount8(result) + for bitN := 0; bitN < missed; bitN++ { + bitIdx := bits.TrailingZeros8(result) + fieldIdx := i*8 + bitIdx + var name string + if fieldIdx < len(jsonFieldsNameOfPredictionResponseMetadata) { + name = jsonFieldsNameOfPredictionResponseMetadata[fieldIdx] + } else { + name = strconv.Itoa(fieldIdx) + } + failures = append(failures, validate.FieldError{ + Name: name, + Error: validate.ErrFieldRequired, + }) + // Reset bit. + result &^= 1 << bitIdx + } + } + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s *PredictionResponseMetadata) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *PredictionResponseMetadata) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode implements json.Marshaler. +func (s *PredictionResponsePredictionItem) Encode(e *jx.Encoder) { + e.ObjStart() + s.encodeFields(e) + e.ObjEnd() +} + +// encodeFields encodes fields. +func (s *PredictionResponsePredictionItem) encodeFields(e *jx.Encoder) { + { + e.FieldStart("stage") + s.Stage.Encode(e) + } + { + e.FieldStart("trajectory") + e.ArrStart() + for _, elem := range s.Trajectory { + elem.Encode(e) + } + e.ArrEnd() + } +} + +var jsonFieldsNameOfPredictionResponsePredictionItem = [2]string{ + 0: "stage", + 1: "trajectory", +} + +// Decode decodes PredictionResponsePredictionItem from json. +func (s *PredictionResponsePredictionItem) Decode(d *jx.Decoder) error { + if s == nil { + return errors.New("invalid: unable to decode PredictionResponsePredictionItem to nil") + } + var requiredBitSet [1]uint8 + + if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { + switch string(k) { + case "stage": + requiredBitSet[0] |= 1 << 0 + if err := func() error { + if err := s.Stage.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"stage\"") + } + case "trajectory": + requiredBitSet[0] |= 1 << 1 + if err := func() error { + s.Trajectory = make([]PredictionResponsePredictionItemTrajectoryItem, 0) + if err := d.Arr(func(d *jx.Decoder) error { + var elem PredictionResponsePredictionItemTrajectoryItem + if err := elem.Decode(d); err != nil { + return err + } + s.Trajectory = append(s.Trajectory, elem) + return nil + }); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"trajectory\"") + } + default: + return d.Skip() + } + return nil + }); err != nil { + return errors.Wrap(err, "decode PredictionResponsePredictionItem") + } + // Validate required fields. + var failures []validate.FieldError + for i, mask := range [1]uint8{ + 0b00000011, + } { + if result := (requiredBitSet[i] & mask) ^ mask; result != 0 { + // Mask only required fields and check equality to mask using XOR. + // + // If XOR result is not zero, result is not equal to expected, so some fields are missed. + // Bits of fields which would be set are actually bits of missed fields. + missed := bits.OnesCount8(result) + for bitN := 0; bitN < missed; bitN++ { + bitIdx := bits.TrailingZeros8(result) + fieldIdx := i*8 + bitIdx + var name string + if fieldIdx < len(jsonFieldsNameOfPredictionResponsePredictionItem) { + name = jsonFieldsNameOfPredictionResponsePredictionItem[fieldIdx] + } else { + name = strconv.Itoa(fieldIdx) + } + failures = append(failures, validate.FieldError{ + Name: name, + Error: validate.ErrFieldRequired, + }) + // Reset bit. + result &^= 1 << bitIdx + } + } + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s *PredictionResponsePredictionItem) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *PredictionResponsePredictionItem) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode encodes PredictionResponsePredictionItemStage as json. +func (s PredictionResponsePredictionItemStage) Encode(e *jx.Encoder) { + e.Str(string(s)) +} + +// Decode decodes PredictionResponsePredictionItemStage from json. +func (s *PredictionResponsePredictionItemStage) Decode(d *jx.Decoder) error { + if s == nil { + return errors.New("invalid: unable to decode PredictionResponsePredictionItemStage to nil") + } + v, err := d.StrBytes() + if err != nil { + return err + } + // Try to use constant string. + switch PredictionResponsePredictionItemStage(v) { + case PredictionResponsePredictionItemStageAscent: + *s = PredictionResponsePredictionItemStageAscent + case PredictionResponsePredictionItemStageDescent: + *s = PredictionResponsePredictionItemStageDescent + case PredictionResponsePredictionItemStageFloat: + *s = PredictionResponsePredictionItemStageFloat + default: + *s = PredictionResponsePredictionItemStage(v) + } + + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s PredictionResponsePredictionItemStage) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *PredictionResponsePredictionItemStage) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode implements json.Marshaler. +func (s *PredictionResponsePredictionItemTrajectoryItem) Encode(e *jx.Encoder) { + e.ObjStart() + s.encodeFields(e) + e.ObjEnd() +} + +// encodeFields encodes fields. +func (s *PredictionResponsePredictionItemTrajectoryItem) encodeFields(e *jx.Encoder) { + { + e.FieldStart("datetime") + json.EncodeDateTime(e, s.Datetime) + } + { + e.FieldStart("latitude") + e.Float64(s.Latitude) + } + { + e.FieldStart("longitude") + e.Float64(s.Longitude) + } + { + e.FieldStart("altitude") + e.Float64(s.Altitude) + } +} + +var jsonFieldsNameOfPredictionResponsePredictionItemTrajectoryItem = [4]string{ + 0: "datetime", + 1: "latitude", + 2: "longitude", + 3: "altitude", +} + +// Decode decodes PredictionResponsePredictionItemTrajectoryItem from json. +func (s *PredictionResponsePredictionItemTrajectoryItem) Decode(d *jx.Decoder) error { + if s == nil { + return errors.New("invalid: unable to decode PredictionResponsePredictionItemTrajectoryItem to nil") + } + var requiredBitSet [1]uint8 + + if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { + switch string(k) { + case "datetime": + requiredBitSet[0] |= 1 << 0 + if err := func() error { + v, err := json.DecodeDateTime(d) + s.Datetime = v + if err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"datetime\"") + } + case "latitude": + requiredBitSet[0] |= 1 << 1 + if err := func() error { + v, err := d.Float64() + s.Latitude = float64(v) + if err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"latitude\"") + } + case "longitude": + requiredBitSet[0] |= 1 << 2 + if err := func() error { + v, err := d.Float64() + s.Longitude = float64(v) + if err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"longitude\"") + } + case "altitude": + requiredBitSet[0] |= 1 << 3 + if err := func() error { + v, err := d.Float64() + s.Altitude = float64(v) + if err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"altitude\"") + } + default: + return d.Skip() + } + return nil + }); err != nil { + return errors.Wrap(err, "decode PredictionResponsePredictionItemTrajectoryItem") + } + // Validate required fields. + var failures []validate.FieldError + for i, mask := range [1]uint8{ + 0b00001111, + } { + if result := (requiredBitSet[i] & mask) ^ mask; result != 0 { + // Mask only required fields and check equality to mask using XOR. + // + // If XOR result is not zero, result is not equal to expected, so some fields are missed. + // Bits of fields which would be set are actually bits of missed fields. + missed := bits.OnesCount8(result) + for bitN := 0; bitN < missed; bitN++ { + bitIdx := bits.TrailingZeros8(result) + fieldIdx := i*8 + bitIdx + var name string + if fieldIdx < len(jsonFieldsNameOfPredictionResponsePredictionItemTrajectoryItem) { + name = jsonFieldsNameOfPredictionResponsePredictionItemTrajectoryItem[fieldIdx] + } else { + name = strconv.Itoa(fieldIdx) + } + failures = append(failures, validate.FieldError{ + Name: name, + Error: validate.ErrFieldRequired, + }) + // Reset bit. + result &^= 1 << bitIdx + } + } + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s *PredictionResponsePredictionItemTrajectoryItem) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *PredictionResponsePredictionItemTrajectoryItem) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode implements json.Marshaler. +func (s *PredictionResponseRequest) Encode(e *jx.Encoder) { + e.ObjStart() + s.encodeFields(e) + e.ObjEnd() +} + +// encodeFields encodes fields. +func (s *PredictionResponseRequest) encodeFields(e *jx.Encoder) { + { + if s.Dataset.Set { + e.FieldStart("dataset") + s.Dataset.Encode(e) + } + } + { + if s.LaunchLatitude.Set { + e.FieldStart("launch_latitude") + s.LaunchLatitude.Encode(e) + } + } + { + if s.LaunchLongitude.Set { + e.FieldStart("launch_longitude") + s.LaunchLongitude.Encode(e) + } + } + { + if s.LaunchDatetime.Set { + e.FieldStart("launch_datetime") + s.LaunchDatetime.Encode(e) + } + } + { + if s.LaunchAltitude.Set { + e.FieldStart("launch_altitude") + s.LaunchAltitude.Encode(e) + } + } + { + if s.Profile.Set { + e.FieldStart("profile") + s.Profile.Encode(e) + } + } + { + if s.AscentRate.Set { + e.FieldStart("ascent_rate") + s.AscentRate.Encode(e) + } + } + { + if s.BurstAltitude.Set { + e.FieldStart("burst_altitude") + s.BurstAltitude.Encode(e) + } + } + { + if s.DescentRate.Set { + e.FieldStart("descent_rate") + s.DescentRate.Encode(e) + } + } +} + +var jsonFieldsNameOfPredictionResponseRequest = [9]string{ + 0: "dataset", + 1: "launch_latitude", + 2: "launch_longitude", + 3: "launch_datetime", + 4: "launch_altitude", + 5: "profile", + 6: "ascent_rate", + 7: "burst_altitude", + 8: "descent_rate", +} + +// Decode decodes PredictionResponseRequest from json. +func (s *PredictionResponseRequest) Decode(d *jx.Decoder) error { + if s == nil { + return errors.New("invalid: unable to decode PredictionResponseRequest to nil") + } + + if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { + switch string(k) { + case "dataset": + if err := func() error { + s.Dataset.Reset() + if err := s.Dataset.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"dataset\"") + } + case "launch_latitude": + if err := func() error { + s.LaunchLatitude.Reset() + if err := s.LaunchLatitude.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"launch_latitude\"") + } + case "launch_longitude": + if err := func() error { + s.LaunchLongitude.Reset() + if err := s.LaunchLongitude.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"launch_longitude\"") + } + case "launch_datetime": + if err := func() error { + s.LaunchDatetime.Reset() + if err := s.LaunchDatetime.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"launch_datetime\"") + } + case "launch_altitude": + if err := func() error { + s.LaunchAltitude.Reset() + if err := s.LaunchAltitude.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"launch_altitude\"") + } + case "profile": + if err := func() error { + s.Profile.Reset() + if err := s.Profile.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"profile\"") + } + case "ascent_rate": + if err := func() error { + s.AscentRate.Reset() + if err := s.AscentRate.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"ascent_rate\"") + } + case "burst_altitude": + if err := func() error { + s.BurstAltitude.Reset() + if err := s.BurstAltitude.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"burst_altitude\"") + } + case "descent_rate": + if err := func() error { + s.DescentRate.Reset() + if err := s.DescentRate.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"descent_rate\"") + } + default: + return d.Skip() + } + return nil + }); err != nil { + return errors.Wrap(err, "decode PredictionResponseRequest") + } + + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s *PredictionResponseRequest) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *PredictionResponseRequest) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode implements json.Marshaler. +func (s PredictionResponseWarnings) Encode(e *jx.Encoder) { + e.ObjStart() + s.encodeFields(e) + e.ObjEnd() +} + +// encodeFields implements json.Marshaler. +func (s PredictionResponseWarnings) encodeFields(e *jx.Encoder) { + for k, elem := range s { + e.FieldStart(k) + + if len(elem) != 0 { + e.Raw(elem) + } + } +} + +// Decode decodes PredictionResponseWarnings from json. +func (s *PredictionResponseWarnings) Decode(d *jx.Decoder) error { + if s == nil { + return errors.New("invalid: unable to decode PredictionResponseWarnings to nil") + } + m := s.init() + if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { + var elem jx.Raw + if err := func() error { + v, err := d.RawAppend(nil) + elem = jx.Raw(v) + if err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrapf(err, "decode field %q", k) + } + m[string(k)] = elem + return nil + }); err != nil { + return errors.Wrap(err, "decode PredictionResponseWarnings") + } + + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s PredictionResponseWarnings) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *PredictionResponseWarnings) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode implements json.Marshaler. +func (s *ReadinessResponse) Encode(e *jx.Encoder) { + e.ObjStart() + s.encodeFields(e) + e.ObjEnd() +} + +// encodeFields encodes fields. +func (s *ReadinessResponse) encodeFields(e *jx.Encoder) { + { + e.FieldStart("status") + s.Status.Encode(e) + } + { + if s.DatasetTime.Set { + e.FieldStart("dataset_time") + s.DatasetTime.Encode(e, json.EncodeDateTime) + } + } + { + if s.ErrorMessage.Set { + e.FieldStart("error_message") + s.ErrorMessage.Encode(e) + } + } +} + +var jsonFieldsNameOfReadinessResponse = [3]string{ + 0: "status", + 1: "dataset_time", + 2: "error_message", +} + +// Decode decodes ReadinessResponse from json. +func (s *ReadinessResponse) Decode(d *jx.Decoder) error { + if s == nil { + return errors.New("invalid: unable to decode ReadinessResponse to nil") + } + var requiredBitSet [1]uint8 + + if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error { + switch string(k) { + case "status": + requiredBitSet[0] |= 1 << 0 + if err := func() error { + if err := s.Status.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"status\"") + } + case "dataset_time": + if err := func() error { + s.DatasetTime.Reset() + if err := s.DatasetTime.Decode(d, json.DecodeDateTime); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"dataset_time\"") + } + case "error_message": + if err := func() error { + s.ErrorMessage.Reset() + if err := s.ErrorMessage.Decode(d); err != nil { + return err + } + return nil + }(); err != nil { + return errors.Wrap(err, "decode field \"error_message\"") + } + default: + return d.Skip() + } + return nil + }); err != nil { + return errors.Wrap(err, "decode ReadinessResponse") + } + // Validate required fields. + var failures []validate.FieldError + for i, mask := range [1]uint8{ + 0b00000001, + } { + if result := (requiredBitSet[i] & mask) ^ mask; result != 0 { + // Mask only required fields and check equality to mask using XOR. + // + // If XOR result is not zero, result is not equal to expected, so some fields are missed. + // Bits of fields which would be set are actually bits of missed fields. + missed := bits.OnesCount8(result) + for bitN := 0; bitN < missed; bitN++ { + bitIdx := bits.TrailingZeros8(result) + fieldIdx := i*8 + bitIdx + var name string + if fieldIdx < len(jsonFieldsNameOfReadinessResponse) { + name = jsonFieldsNameOfReadinessResponse[fieldIdx] + } else { + name = strconv.Itoa(fieldIdx) + } + failures = append(failures, validate.FieldError{ + Name: name, + Error: validate.ErrFieldRequired, + }) + // Reset bit. + result &^= 1 << bitIdx + } + } + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s *ReadinessResponse) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *ReadinessResponse) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} + +// Encode encodes ReadinessResponseStatus as json. +func (s ReadinessResponseStatus) Encode(e *jx.Encoder) { + e.Str(string(s)) +} + +// Decode decodes ReadinessResponseStatus from json. +func (s *ReadinessResponseStatus) Decode(d *jx.Decoder) error { + if s == nil { + return errors.New("invalid: unable to decode ReadinessResponseStatus to nil") + } + v, err := d.StrBytes() + if err != nil { + return err + } + // Try to use constant string. + switch ReadinessResponseStatus(v) { + case ReadinessResponseStatusOk: + *s = ReadinessResponseStatusOk + case ReadinessResponseStatusNotReady: + *s = ReadinessResponseStatusNotReady + case ReadinessResponseStatusError: + *s = ReadinessResponseStatusError + default: + *s = ReadinessResponseStatus(v) + } + + return nil +} + +// MarshalJSON implements stdjson.Marshaler. +func (s ReadinessResponseStatus) MarshalJSON() ([]byte, error) { + e := jx.Encoder{} + s.Encode(&e) + return e.Bytes(), nil +} + +// UnmarshalJSON implements stdjson.Unmarshaler. +func (s *ReadinessResponseStatus) UnmarshalJSON(data []byte) error { + d := jx.DecodeBytes(data) + return s.Decode(d) +} diff --git a/pkg/rest/oas_labeler_gen.go b/pkg/rest/oas_labeler_gen.go new file mode 100644 index 0000000..b726eef --- /dev/null +++ b/pkg/rest/oas_labeler_gen.go @@ -0,0 +1,42 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Labeler is used to allow adding custom attributes to the server request metrics. +type Labeler struct { + attrs []attribute.KeyValue +} + +// Add attributes to the Labeler. +func (l *Labeler) Add(attrs ...attribute.KeyValue) { + l.attrs = append(l.attrs, attrs...) +} + +// AttributeSet returns the attributes added to the Labeler as an attribute.Set. +func (l *Labeler) AttributeSet() attribute.Set { + return attribute.NewSet(l.attrs...) +} + +type labelerContextKey struct{} + +// LabelerFromContext retrieves the Labeler from the provided context, if present. +// +// If no Labeler was found in the provided context a new, empty Labeler is returned and the second +// return value is false. In this case it is safe to use the Labeler but any attributes added to +// it will not be used. +func LabelerFromContext(ctx context.Context) (*Labeler, bool) { + if l, ok := ctx.Value(labelerContextKey{}).(*Labeler); ok { + return l, true + } + return &Labeler{}, false +} + +func contextWithLabeler(ctx context.Context, l *Labeler) context.Context { + return context.WithValue(ctx, labelerContextKey{}, l) +} diff --git a/pkg/rest/oas_middleware_gen.go b/pkg/rest/oas_middleware_gen.go new file mode 100644 index 0000000..57ea1b4 --- /dev/null +++ b/pkg/rest/oas_middleware_gen.go @@ -0,0 +1,10 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "github.com/ogen-go/ogen/middleware" +) + +// Middleware is middleware type. +type Middleware = middleware.Middleware diff --git a/pkg/rest/oas_operations_gen.go b/pkg/rest/oas_operations_gen.go new file mode 100644 index 0000000..68097b0 --- /dev/null +++ b/pkg/rest/oas_operations_gen.go @@ -0,0 +1,11 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +// OperationName is the ogen operation name +type OperationName = string + +const ( + PerformPredictionOperation OperationName = "PerformPrediction" + ReadinessCheckOperation OperationName = "ReadinessCheck" +) diff --git a/pkg/rest/oas_parameters_gen.go b/pkg/rest/oas_parameters_gen.go new file mode 100644 index 0000000..c3be508 --- /dev/null +++ b/pkg/rest/oas_parameters_gen.go @@ -0,0 +1,679 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "net/http" + "time" + + "github.com/go-faster/errors" + "github.com/ogen-go/ogen/conv" + "github.com/ogen-go/ogen/middleware" + "github.com/ogen-go/ogen/ogenerrors" + "github.com/ogen-go/ogen/uri" + "github.com/ogen-go/ogen/validate" +) + +// PerformPredictionParams is parameters of performPrediction operation. +type PerformPredictionParams struct { + LaunchLatitude float64 + LaunchLongitude float64 + LaunchDatetime time.Time + LaunchAltitude OptFloat64 `json:",omitempty,omitzero"` + Profile OptPerformPredictionProfile `json:",omitempty,omitzero"` + AscentRate OptFloat64 `json:",omitempty,omitzero"` + BurstAltitude OptFloat64 `json:",omitempty,omitzero"` + DescentRate OptFloat64 `json:",omitempty,omitzero"` + FloatAltitude OptFloat64 `json:",omitempty,omitzero"` + StopDatetime OptDateTime `json:",omitempty,omitzero"` + Dataset OptDateTime `json:",omitempty,omitzero"` +} + +func unpackPerformPredictionParams(packed middleware.Parameters) (params PerformPredictionParams) { + { + key := middleware.ParameterKey{ + Name: "launch_latitude", + In: "query", + } + params.LaunchLatitude = packed[key].(float64) + } + { + key := middleware.ParameterKey{ + Name: "launch_longitude", + In: "query", + } + params.LaunchLongitude = packed[key].(float64) + } + { + key := middleware.ParameterKey{ + Name: "launch_datetime", + In: "query", + } + params.LaunchDatetime = packed[key].(time.Time) + } + { + key := middleware.ParameterKey{ + Name: "launch_altitude", + In: "query", + } + if v, ok := packed[key]; ok { + params.LaunchAltitude = v.(OptFloat64) + } + } + { + key := middleware.ParameterKey{ + Name: "profile", + In: "query", + } + if v, ok := packed[key]; ok { + params.Profile = v.(OptPerformPredictionProfile) + } + } + { + key := middleware.ParameterKey{ + Name: "ascent_rate", + In: "query", + } + if v, ok := packed[key]; ok { + params.AscentRate = v.(OptFloat64) + } + } + { + key := middleware.ParameterKey{ + Name: "burst_altitude", + In: "query", + } + if v, ok := packed[key]; ok { + params.BurstAltitude = v.(OptFloat64) + } + } + { + key := middleware.ParameterKey{ + Name: "descent_rate", + In: "query", + } + if v, ok := packed[key]; ok { + params.DescentRate = v.(OptFloat64) + } + } + { + key := middleware.ParameterKey{ + Name: "float_altitude", + In: "query", + } + if v, ok := packed[key]; ok { + params.FloatAltitude = v.(OptFloat64) + } + } + { + key := middleware.ParameterKey{ + Name: "stop_datetime", + In: "query", + } + if v, ok := packed[key]; ok { + params.StopDatetime = v.(OptDateTime) + } + } + { + key := middleware.ParameterKey{ + Name: "dataset", + In: "query", + } + if v, ok := packed[key]; ok { + params.Dataset = v.(OptDateTime) + } + } + return params +} + +func decodePerformPredictionParams(args [0]string, argsEscaped bool, r *http.Request) (params PerformPredictionParams, _ error) { + q := uri.NewQueryDecoder(r.URL.Query()) + // Decode query: launch_latitude. + if err := func() error { + cfg := uri.QueryParameterDecodingConfig{ + Name: "launch_latitude", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.HasParam(cfg); err == nil { + if err := q.DecodeParam(cfg, func(d uri.Decoder) error { + val, err := d.DecodeValue() + if err != nil { + return err + } + + c, err := conv.ToFloat64(val) + if err != nil { + return err + } + + params.LaunchLatitude = c + return nil + }); err != nil { + return err + } + if err := func() error { + if err := (validate.Float{}).Validate(float64(params.LaunchLatitude)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + return err + } + } else { + return err + } + return nil + }(); err != nil { + return params, &ogenerrors.DecodeParamError{ + Name: "launch_latitude", + In: "query", + Err: err, + } + } + // Decode query: launch_longitude. + if err := func() error { + cfg := uri.QueryParameterDecodingConfig{ + Name: "launch_longitude", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.HasParam(cfg); err == nil { + if err := q.DecodeParam(cfg, func(d uri.Decoder) error { + val, err := d.DecodeValue() + if err != nil { + return err + } + + c, err := conv.ToFloat64(val) + if err != nil { + return err + } + + params.LaunchLongitude = c + return nil + }); err != nil { + return err + } + if err := func() error { + if err := (validate.Float{}).Validate(float64(params.LaunchLongitude)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + return err + } + } else { + return err + } + return nil + }(); err != nil { + return params, &ogenerrors.DecodeParamError{ + Name: "launch_longitude", + In: "query", + Err: err, + } + } + // Decode query: launch_datetime. + if err := func() error { + cfg := uri.QueryParameterDecodingConfig{ + Name: "launch_datetime", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.HasParam(cfg); err == nil { + if err := q.DecodeParam(cfg, func(d uri.Decoder) error { + val, err := d.DecodeValue() + if err != nil { + return err + } + + c, err := conv.ToDateTime(val) + if err != nil { + return err + } + + params.LaunchDatetime = c + return nil + }); err != nil { + return err + } + } else { + return err + } + return nil + }(); err != nil { + return params, &ogenerrors.DecodeParamError{ + Name: "launch_datetime", + In: "query", + Err: err, + } + } + // Decode query: launch_altitude. + if err := func() error { + cfg := uri.QueryParameterDecodingConfig{ + Name: "launch_altitude", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.HasParam(cfg); err == nil { + if err := q.DecodeParam(cfg, func(d uri.Decoder) error { + var paramsDotLaunchAltitudeVal float64 + if err := func() error { + val, err := d.DecodeValue() + if err != nil { + return err + } + + c, err := conv.ToFloat64(val) + if err != nil { + return err + } + + paramsDotLaunchAltitudeVal = c + return nil + }(); err != nil { + return err + } + params.LaunchAltitude.SetTo(paramsDotLaunchAltitudeVal) + return nil + }); err != nil { + return err + } + if err := func() error { + if value, ok := params.LaunchAltitude.Get(); ok { + if err := func() error { + if err := (validate.Float{}).Validate(float64(value)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + return params, &ogenerrors.DecodeParamError{ + Name: "launch_altitude", + In: "query", + Err: err, + } + } + // Set default value for query: profile. + { + val := PerformPredictionProfile("standard_profile") + params.Profile.SetTo(val) + } + // Decode query: profile. + if err := func() error { + cfg := uri.QueryParameterDecodingConfig{ + Name: "profile", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.HasParam(cfg); err == nil { + if err := q.DecodeParam(cfg, func(d uri.Decoder) error { + var paramsDotProfileVal PerformPredictionProfile + if err := func() error { + val, err := d.DecodeValue() + if err != nil { + return err + } + + c, err := conv.ToString(val) + if err != nil { + return err + } + + paramsDotProfileVal = PerformPredictionProfile(c) + return nil + }(); err != nil { + return err + } + params.Profile.SetTo(paramsDotProfileVal) + return nil + }); err != nil { + return err + } + if err := func() error { + if value, ok := params.Profile.Get(); ok { + if err := func() error { + if err := value.Validate(); err != nil { + return err + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + return params, &ogenerrors.DecodeParamError{ + Name: "profile", + In: "query", + Err: err, + } + } + // Decode query: ascent_rate. + if err := func() error { + cfg := uri.QueryParameterDecodingConfig{ + Name: "ascent_rate", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.HasParam(cfg); err == nil { + if err := q.DecodeParam(cfg, func(d uri.Decoder) error { + var paramsDotAscentRateVal float64 + if err := func() error { + val, err := d.DecodeValue() + if err != nil { + return err + } + + c, err := conv.ToFloat64(val) + if err != nil { + return err + } + + paramsDotAscentRateVal = c + return nil + }(); err != nil { + return err + } + params.AscentRate.SetTo(paramsDotAscentRateVal) + return nil + }); err != nil { + return err + } + if err := func() error { + if value, ok := params.AscentRate.Get(); ok { + if err := func() error { + if err := (validate.Float{}).Validate(float64(value)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + return params, &ogenerrors.DecodeParamError{ + Name: "ascent_rate", + In: "query", + Err: err, + } + } + // Decode query: burst_altitude. + if err := func() error { + cfg := uri.QueryParameterDecodingConfig{ + Name: "burst_altitude", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.HasParam(cfg); err == nil { + if err := q.DecodeParam(cfg, func(d uri.Decoder) error { + var paramsDotBurstAltitudeVal float64 + if err := func() error { + val, err := d.DecodeValue() + if err != nil { + return err + } + + c, err := conv.ToFloat64(val) + if err != nil { + return err + } + + paramsDotBurstAltitudeVal = c + return nil + }(); err != nil { + return err + } + params.BurstAltitude.SetTo(paramsDotBurstAltitudeVal) + return nil + }); err != nil { + return err + } + if err := func() error { + if value, ok := params.BurstAltitude.Get(); ok { + if err := func() error { + if err := (validate.Float{}).Validate(float64(value)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + return params, &ogenerrors.DecodeParamError{ + Name: "burst_altitude", + In: "query", + Err: err, + } + } + // Decode query: descent_rate. + if err := func() error { + cfg := uri.QueryParameterDecodingConfig{ + Name: "descent_rate", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.HasParam(cfg); err == nil { + if err := q.DecodeParam(cfg, func(d uri.Decoder) error { + var paramsDotDescentRateVal float64 + if err := func() error { + val, err := d.DecodeValue() + if err != nil { + return err + } + + c, err := conv.ToFloat64(val) + if err != nil { + return err + } + + paramsDotDescentRateVal = c + return nil + }(); err != nil { + return err + } + params.DescentRate.SetTo(paramsDotDescentRateVal) + return nil + }); err != nil { + return err + } + if err := func() error { + if value, ok := params.DescentRate.Get(); ok { + if err := func() error { + if err := (validate.Float{}).Validate(float64(value)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + return params, &ogenerrors.DecodeParamError{ + Name: "descent_rate", + In: "query", + Err: err, + } + } + // Decode query: float_altitude. + if err := func() error { + cfg := uri.QueryParameterDecodingConfig{ + Name: "float_altitude", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.HasParam(cfg); err == nil { + if err := q.DecodeParam(cfg, func(d uri.Decoder) error { + var paramsDotFloatAltitudeVal float64 + if err := func() error { + val, err := d.DecodeValue() + if err != nil { + return err + } + + c, err := conv.ToFloat64(val) + if err != nil { + return err + } + + paramsDotFloatAltitudeVal = c + return nil + }(); err != nil { + return err + } + params.FloatAltitude.SetTo(paramsDotFloatAltitudeVal) + return nil + }); err != nil { + return err + } + if err := func() error { + if value, ok := params.FloatAltitude.Get(); ok { + if err := func() error { + if err := (validate.Float{}).Validate(float64(value)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + return params, &ogenerrors.DecodeParamError{ + Name: "float_altitude", + In: "query", + Err: err, + } + } + // Decode query: stop_datetime. + if err := func() error { + cfg := uri.QueryParameterDecodingConfig{ + Name: "stop_datetime", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.HasParam(cfg); err == nil { + if err := q.DecodeParam(cfg, func(d uri.Decoder) error { + var paramsDotStopDatetimeVal time.Time + if err := func() error { + val, err := d.DecodeValue() + if err != nil { + return err + } + + c, err := conv.ToDateTime(val) + if err != nil { + return err + } + + paramsDotStopDatetimeVal = c + return nil + }(); err != nil { + return err + } + params.StopDatetime.SetTo(paramsDotStopDatetimeVal) + return nil + }); err != nil { + return err + } + } + return nil + }(); err != nil { + return params, &ogenerrors.DecodeParamError{ + Name: "stop_datetime", + In: "query", + Err: err, + } + } + // Decode query: dataset. + if err := func() error { + cfg := uri.QueryParameterDecodingConfig{ + Name: "dataset", + Style: uri.QueryStyleForm, + Explode: true, + } + + if err := q.HasParam(cfg); err == nil { + if err := q.DecodeParam(cfg, func(d uri.Decoder) error { + var paramsDotDatasetVal time.Time + if err := func() error { + val, err := d.DecodeValue() + if err != nil { + return err + } + + c, err := conv.ToDateTime(val) + if err != nil { + return err + } + + paramsDotDatasetVal = c + return nil + }(); err != nil { + return err + } + params.Dataset.SetTo(paramsDotDatasetVal) + return nil + }); err != nil { + return err + } + } + return nil + }(); err != nil { + return params, &ogenerrors.DecodeParamError{ + Name: "dataset", + In: "query", + Err: err, + } + } + return params, nil +} diff --git a/pkg/rest/oas_request_decoders_gen.go b/pkg/rest/oas_request_decoders_gen.go new file mode 100644 index 0000000..1ad6008 --- /dev/null +++ b/pkg/rest/oas_request_decoders_gen.go @@ -0,0 +1,3 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest diff --git a/pkg/rest/oas_request_encoders_gen.go b/pkg/rest/oas_request_encoders_gen.go new file mode 100644 index 0000000..1ad6008 --- /dev/null +++ b/pkg/rest/oas_request_encoders_gen.go @@ -0,0 +1,3 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest diff --git a/pkg/rest/oas_response_decoders_gen.go b/pkg/rest/oas_response_decoders_gen.go new file mode 100644 index 0000000..842583d --- /dev/null +++ b/pkg/rest/oas_response_decoders_gen.go @@ -0,0 +1,198 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "io" + "mime" + "net/http" + + "github.com/go-faster/errors" + "github.com/go-faster/jx" + "github.com/ogen-go/ogen/ogenerrors" + "github.com/ogen-go/ogen/validate" +) + +func decodePerformPredictionResponse(resp *http.Response) (res *PredictionResponse, _ error) { + switch resp.StatusCode { + case 200: + // Code 200. + ct, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return res, errors.Wrap(err, "parse media type") + } + switch { + case ct == "application/json": + buf, err := io.ReadAll(resp.Body) + if err != nil { + return res, err + } + d := jx.DecodeBytes(buf) + + var response PredictionResponse + if err := func() error { + if err := response.Decode(d); err != nil { + return err + } + if err := d.Skip(); err != io.EOF { + return errors.New("unexpected trailing data") + } + return nil + }(); err != nil { + err = &ogenerrors.DecodeBodyError{ + ContentType: ct, + Body: buf, + Err: err, + } + return res, err + } + // Validate response. + if err := func() error { + if err := response.Validate(); err != nil { + return err + } + return nil + }(); err != nil { + return res, errors.Wrap(err, "validate") + } + return &response, nil + default: + return res, validate.InvalidContentType(ct) + } + } + // Convenient error response. + defRes, err := func() (res *ErrorStatusCode, err error) { + ct, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return res, errors.Wrap(err, "parse media type") + } + switch { + case ct == "application/json": + buf, err := io.ReadAll(resp.Body) + if err != nil { + return res, err + } + d := jx.DecodeBytes(buf) + + var response Error + if err := func() error { + if err := response.Decode(d); err != nil { + return err + } + if err := d.Skip(); err != io.EOF { + return errors.New("unexpected trailing data") + } + return nil + }(); err != nil { + err = &ogenerrors.DecodeBodyError{ + ContentType: ct, + Body: buf, + Err: err, + } + return res, err + } + return &ErrorStatusCode{ + StatusCode: resp.StatusCode, + Response: response, + }, nil + default: + return res, validate.InvalidContentType(ct) + } + }() + if err != nil { + return res, errors.Wrapf(err, "default (code %d)", resp.StatusCode) + } + return res, errors.Wrap(defRes, "error") +} + +func decodeReadinessCheckResponse(resp *http.Response) (res *ReadinessResponse, _ error) { + switch resp.StatusCode { + case 200: + // Code 200. + ct, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return res, errors.Wrap(err, "parse media type") + } + switch { + case ct == "application/json": + buf, err := io.ReadAll(resp.Body) + if err != nil { + return res, err + } + d := jx.DecodeBytes(buf) + + var response ReadinessResponse + if err := func() error { + if err := response.Decode(d); err != nil { + return err + } + if err := d.Skip(); err != io.EOF { + return errors.New("unexpected trailing data") + } + return nil + }(); err != nil { + err = &ogenerrors.DecodeBodyError{ + ContentType: ct, + Body: buf, + Err: err, + } + return res, err + } + // Validate response. + if err := func() error { + if err := response.Validate(); err != nil { + return err + } + return nil + }(); err != nil { + return res, errors.Wrap(err, "validate") + } + return &response, nil + default: + return res, validate.InvalidContentType(ct) + } + } + // Convenient error response. + defRes, err := func() (res *ErrorStatusCode, err error) { + ct, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return res, errors.Wrap(err, "parse media type") + } + switch { + case ct == "application/json": + buf, err := io.ReadAll(resp.Body) + if err != nil { + return res, err + } + d := jx.DecodeBytes(buf) + + var response Error + if err := func() error { + if err := response.Decode(d); err != nil { + return err + } + if err := d.Skip(); err != io.EOF { + return errors.New("unexpected trailing data") + } + return nil + }(); err != nil { + err = &ogenerrors.DecodeBodyError{ + ContentType: ct, + Body: buf, + Err: err, + } + return res, err + } + return &ErrorStatusCode{ + StatusCode: resp.StatusCode, + Response: response, + }, nil + default: + return res, validate.InvalidContentType(ct) + } + }() + if err != nil { + return res, errors.Wrapf(err, "default (code %d)", resp.StatusCode) + } + return res, errors.Wrap(defRes, "error") +} diff --git a/pkg/rest/oas_response_encoders_gen.go b/pkg/rest/oas_response_encoders_gen.go new file mode 100644 index 0000000..37892a3 --- /dev/null +++ b/pkg/rest/oas_response_encoders_gen.go @@ -0,0 +1,68 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "net/http" + + "github.com/go-faster/errors" + "github.com/go-faster/jx" + ht "github.com/ogen-go/ogen/http" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +func encodePerformPredictionResponse(response *PredictionResponse, w http.ResponseWriter, span trace.Span) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(200) + span.SetStatus(codes.Ok, http.StatusText(200)) + + e := new(jx.Encoder) + response.Encode(e) + if _, err := e.WriteTo(w); err != nil { + return errors.Wrap(err, "write") + } + + return nil +} + +func encodeReadinessCheckResponse(response *ReadinessResponse, w http.ResponseWriter, span trace.Span) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(200) + span.SetStatus(codes.Ok, http.StatusText(200)) + + e := new(jx.Encoder) + response.Encode(e) + if _, err := e.WriteTo(w); err != nil { + return errors.Wrap(err, "write") + } + + return nil +} + +func encodeErrorResponse(response *ErrorStatusCode, w http.ResponseWriter, span trace.Span) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + code := response.StatusCode + if code == 0 { + // Set default status code. + code = http.StatusOK + } + w.WriteHeader(code) + if st := http.StatusText(code); code >= http.StatusBadRequest { + span.SetStatus(codes.Error, st) + } else { + span.SetStatus(codes.Ok, st) + } + + e := new(jx.Encoder) + response.Response.Encode(e) + if _, err := e.WriteTo(w); err != nil { + return errors.Wrap(err, "write") + } + + if code >= http.StatusInternalServerError { + return errors.Wrapf(ht.ErrInternalServerErrorResponse, "code: %d, message: %s", code, http.StatusText(code)) + } + return nil + +} diff --git a/pkg/rest/oas_router_gen.go b/pkg/rest/oas_router_gen.go new file mode 100644 index 0000000..ac8879a --- /dev/null +++ b/pkg/rest/oas_router_gen.go @@ -0,0 +1,268 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "net/http" + "net/url" + "strings" + + "github.com/ogen-go/ogen/uri" +) + +func (s *Server) cutPrefix(path string) (string, bool) { + prefix := s.cfg.Prefix + if prefix == "" { + return path, true + } + if !strings.HasPrefix(path, prefix) { + // Prefix doesn't match. + return "", false + } + // Cut prefix from the path. + return strings.TrimPrefix(path, prefix), true +} + +// ServeHTTP serves http request as defined by OpenAPI v3 specification, +// calling handler that matches the path or returning not found error. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + elem := r.URL.Path + elemIsEscaped := false + if rawPath := r.URL.RawPath; rawPath != "" { + if normalized, ok := uri.NormalizeEscapedPath(rawPath); ok { + elem = normalized + elemIsEscaped = strings.ContainsRune(elem, '%') + } + } + + elem, ok := s.cutPrefix(elem) + if !ok || len(elem) == 0 { + s.notFound(w, r) + return + } + + // Static code generated router with unwrapped path search. + switch { + default: + if len(elem) == 0 { + break + } + switch elem[0] { + case '/': // Prefix: "/" + + if l := len("/"); len(elem) >= l && elem[0:l] == "/" { + elem = elem[l:] + } else { + break + } + + if len(elem) == 0 { + break + } + switch elem[0] { + case 'a': // Prefix: "api/v1/prediction" + + if l := len("api/v1/prediction"); len(elem) >= l && elem[0:l] == "api/v1/prediction" { + elem = elem[l:] + } else { + break + } + + if len(elem) == 0 { + // Leaf node. + switch r.Method { + case "GET": + s.handlePerformPredictionRequest([0]string{}, elemIsEscaped, w, r) + default: + s.notAllowed(w, r, notAllowedParams{ + allowedMethods: "GET", + allowedHeaders: nil, + acceptPost: "", + acceptPatch: "", + }) + } + + return + } + + case 'r': // Prefix: "ready" + + if l := len("ready"); len(elem) >= l && elem[0:l] == "ready" { + elem = elem[l:] + } else { + break + } + + if len(elem) == 0 { + // Leaf node. + switch r.Method { + case "GET": + s.handleReadinessCheckRequest([0]string{}, elemIsEscaped, w, r) + default: + s.notAllowed(w, r, notAllowedParams{ + allowedMethods: "GET", + allowedHeaders: nil, + acceptPost: "", + acceptPatch: "", + }) + } + + return + } + + } + + } + } + s.notFound(w, r) +} + +// Route is route object. +type Route struct { + name string + summary string + operationID string + operationGroup string + pathPattern string + count int + args [0]string +} + +// Name returns ogen operation name. +// +// It is guaranteed to be unique and not empty. +func (r Route) Name() string { + return r.name +} + +// Summary returns OpenAPI summary. +func (r Route) Summary() string { + return r.summary +} + +// OperationID returns OpenAPI operationId. +func (r Route) OperationID() string { + return r.operationID +} + +// OperationGroup returns the x-ogen-operation-group value. +func (r Route) OperationGroup() string { + return r.operationGroup +} + +// PathPattern returns OpenAPI path. +func (r Route) PathPattern() string { + return r.pathPattern +} + +// Args returns parsed arguments. +func (r Route) Args() []string { + return r.args[:r.count] +} + +// FindRoute finds Route for given method and path. +// +// Note: this method does not unescape path or handle reserved characters in path properly. Use FindPath instead. +func (s *Server) FindRoute(method, path string) (Route, bool) { + return s.FindPath(method, &url.URL{Path: path}) +} + +// FindPath finds Route for given method and URL. +func (s *Server) FindPath(method string, u *url.URL) (r Route, _ bool) { + var ( + elem = u.Path + args = r.args + ) + if rawPath := u.RawPath; rawPath != "" { + if normalized, ok := uri.NormalizeEscapedPath(rawPath); ok { + elem = normalized + } + defer func() { + for i, arg := range r.args[:r.count] { + if unescaped, err := url.PathUnescape(arg); err == nil { + r.args[i] = unescaped + } + } + }() + } + + elem, ok := s.cutPrefix(elem) + if !ok { + return r, false + } + + // Static code generated router with unwrapped path search. + switch { + default: + if len(elem) == 0 { + break + } + switch elem[0] { + case '/': // Prefix: "/" + + if l := len("/"); len(elem) >= l && elem[0:l] == "/" { + elem = elem[l:] + } else { + break + } + + if len(elem) == 0 { + break + } + switch elem[0] { + case 'a': // Prefix: "api/v1/prediction" + + if l := len("api/v1/prediction"); len(elem) >= l && elem[0:l] == "api/v1/prediction" { + elem = elem[l:] + } else { + break + } + + if len(elem) == 0 { + // Leaf node. + switch method { + case "GET": + r.name = PerformPredictionOperation + r.summary = "Perform prediction" + r.operationID = "performPrediction" + r.operationGroup = "" + r.pathPattern = "/api/v1/prediction" + r.args = args + r.count = 0 + return r, true + default: + return + } + } + + case 'r': // Prefix: "ready" + + if l := len("ready"); len(elem) >= l && elem[0:l] == "ready" { + elem = elem[l:] + } else { + break + } + + if len(elem) == 0 { + // Leaf node. + switch method { + case "GET": + r.name = ReadinessCheckOperation + r.summary = "Readiness check" + r.operationID = "readinessCheck" + r.operationGroup = "" + r.pathPattern = "/ready" + r.args = args + r.count = 0 + return r, true + default: + return + } + } + + } + + } + } + return r, false +} diff --git a/pkg/rest/oas_schemas_gen.go b/pkg/rest/oas_schemas_gen.go new file mode 100644 index 0000000..28faae8 --- /dev/null +++ b/pkg/rest/oas_schemas_gen.go @@ -0,0 +1,789 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "fmt" + "time" + + "github.com/go-faster/errors" + "github.com/go-faster/jx" +) + +func (s *ErrorStatusCode) Error() string { + return fmt.Sprintf("code %d: %+v", s.StatusCode, s.Response) +} + +// Ref: #/components/schemas/Error +type Error struct { + Error ErrorError `json:"error"` +} + +// GetError returns the value of Error. +func (s *Error) GetError() ErrorError { + return s.Error +} + +// SetError sets the value of Error. +func (s *Error) SetError(val ErrorError) { + s.Error = val +} + +type ErrorError struct { + Type string `json:"type"` + Description string `json:"description"` +} + +// GetType returns the value of Type. +func (s *ErrorError) GetType() string { + return s.Type +} + +// GetDescription returns the value of Description. +func (s *ErrorError) GetDescription() string { + return s.Description +} + +// SetType sets the value of Type. +func (s *ErrorError) SetType(val string) { + s.Type = val +} + +// SetDescription sets the value of Description. +func (s *ErrorError) SetDescription(val string) { + s.Description = val +} + +// ErrorStatusCode wraps Error with StatusCode. +type ErrorStatusCode struct { + StatusCode int + Response Error +} + +// GetStatusCode returns the value of StatusCode. +func (s *ErrorStatusCode) GetStatusCode() int { + return s.StatusCode +} + +// GetResponse returns the value of Response. +func (s *ErrorStatusCode) GetResponse() Error { + return s.Response +} + +// SetStatusCode sets the value of StatusCode. +func (s *ErrorStatusCode) SetStatusCode(val int) { + s.StatusCode = val +} + +// SetResponse sets the value of Response. +func (s *ErrorStatusCode) SetResponse(val Error) { + s.Response = val +} + +// NewOptDateTime returns new OptDateTime with value set to v. +func NewOptDateTime(v time.Time) OptDateTime { + return OptDateTime{ + Value: v, + Set: true, + } +} + +// OptDateTime is optional time.Time. +type OptDateTime struct { + Value time.Time + Set bool +} + +// IsSet returns true if OptDateTime was set. +func (o OptDateTime) IsSet() bool { return o.Set } + +// Reset unsets value. +func (o *OptDateTime) Reset() { + var v time.Time + o.Value = v + o.Set = false +} + +// SetTo sets value to v. +func (o *OptDateTime) SetTo(v time.Time) { + o.Set = true + o.Value = v +} + +// Get returns value and boolean that denotes whether value was set. +func (o OptDateTime) Get() (v time.Time, ok bool) { + if !o.Set { + return v, false + } + return o.Value, true +} + +// Or returns value if set, or given parameter if does not. +func (o OptDateTime) Or(d time.Time) time.Time { + if v, ok := o.Get(); ok { + return v + } + return d +} + +// NewOptFloat64 returns new OptFloat64 with value set to v. +func NewOptFloat64(v float64) OptFloat64 { + return OptFloat64{ + Value: v, + Set: true, + } +} + +// OptFloat64 is optional float64. +type OptFloat64 struct { + Value float64 + Set bool +} + +// IsSet returns true if OptFloat64 was set. +func (o OptFloat64) IsSet() bool { return o.Set } + +// Reset unsets value. +func (o *OptFloat64) Reset() { + var v float64 + o.Value = v + o.Set = false +} + +// SetTo sets value to v. +func (o *OptFloat64) SetTo(v float64) { + o.Set = true + o.Value = v +} + +// Get returns value and boolean that denotes whether value was set. +func (o OptFloat64) Get() (v float64, ok bool) { + if !o.Set { + return v, false + } + return o.Value, true +} + +// Or returns value if set, or given parameter if does not. +func (o OptFloat64) Or(d float64) float64 { + if v, ok := o.Get(); ok { + return v + } + return d +} + +// NewOptPerformPredictionProfile returns new OptPerformPredictionProfile with value set to v. +func NewOptPerformPredictionProfile(v PerformPredictionProfile) OptPerformPredictionProfile { + return OptPerformPredictionProfile{ + Value: v, + Set: true, + } +} + +// OptPerformPredictionProfile is optional PerformPredictionProfile. +type OptPerformPredictionProfile struct { + Value PerformPredictionProfile + Set bool +} + +// IsSet returns true if OptPerformPredictionProfile was set. +func (o OptPerformPredictionProfile) IsSet() bool { return o.Set } + +// Reset unsets value. +func (o *OptPerformPredictionProfile) Reset() { + var v PerformPredictionProfile + o.Value = v + o.Set = false +} + +// SetTo sets value to v. +func (o *OptPerformPredictionProfile) SetTo(v PerformPredictionProfile) { + o.Set = true + o.Value = v +} + +// Get returns value and boolean that denotes whether value was set. +func (o OptPerformPredictionProfile) Get() (v PerformPredictionProfile, ok bool) { + if !o.Set { + return v, false + } + return o.Value, true +} + +// Or returns value if set, or given parameter if does not. +func (o OptPerformPredictionProfile) Or(d PerformPredictionProfile) PerformPredictionProfile { + if v, ok := o.Get(); ok { + return v + } + return d +} + +// NewOptPredictionResponseRequest returns new OptPredictionResponseRequest with value set to v. +func NewOptPredictionResponseRequest(v PredictionResponseRequest) OptPredictionResponseRequest { + return OptPredictionResponseRequest{ + Value: v, + Set: true, + } +} + +// OptPredictionResponseRequest is optional PredictionResponseRequest. +type OptPredictionResponseRequest struct { + Value PredictionResponseRequest + Set bool +} + +// IsSet returns true if OptPredictionResponseRequest was set. +func (o OptPredictionResponseRequest) IsSet() bool { return o.Set } + +// Reset unsets value. +func (o *OptPredictionResponseRequest) Reset() { + var v PredictionResponseRequest + o.Value = v + o.Set = false +} + +// SetTo sets value to v. +func (o *OptPredictionResponseRequest) SetTo(v PredictionResponseRequest) { + o.Set = true + o.Value = v +} + +// Get returns value and boolean that denotes whether value was set. +func (o OptPredictionResponseRequest) Get() (v PredictionResponseRequest, ok bool) { + if !o.Set { + return v, false + } + return o.Value, true +} + +// Or returns value if set, or given parameter if does not. +func (o OptPredictionResponseRequest) Or(d PredictionResponseRequest) PredictionResponseRequest { + if v, ok := o.Get(); ok { + return v + } + return d +} + +// NewOptPredictionResponseWarnings returns new OptPredictionResponseWarnings with value set to v. +func NewOptPredictionResponseWarnings(v PredictionResponseWarnings) OptPredictionResponseWarnings { + return OptPredictionResponseWarnings{ + Value: v, + Set: true, + } +} + +// OptPredictionResponseWarnings is optional PredictionResponseWarnings. +type OptPredictionResponseWarnings struct { + Value PredictionResponseWarnings + Set bool +} + +// IsSet returns true if OptPredictionResponseWarnings was set. +func (o OptPredictionResponseWarnings) IsSet() bool { return o.Set } + +// Reset unsets value. +func (o *OptPredictionResponseWarnings) Reset() { + var v PredictionResponseWarnings + o.Value = v + o.Set = false +} + +// SetTo sets value to v. +func (o *OptPredictionResponseWarnings) SetTo(v PredictionResponseWarnings) { + o.Set = true + o.Value = v +} + +// Get returns value and boolean that denotes whether value was set. +func (o OptPredictionResponseWarnings) Get() (v PredictionResponseWarnings, ok bool) { + if !o.Set { + return v, false + } + return o.Value, true +} + +// Or returns value if set, or given parameter if does not. +func (o OptPredictionResponseWarnings) Or(d PredictionResponseWarnings) PredictionResponseWarnings { + if v, ok := o.Get(); ok { + return v + } + return d +} + +// NewOptString returns new OptString with value set to v. +func NewOptString(v string) OptString { + return OptString{ + Value: v, + Set: true, + } +} + +// OptString is optional string. +type OptString struct { + Value string + Set bool +} + +// IsSet returns true if OptString was set. +func (o OptString) IsSet() bool { return o.Set } + +// Reset unsets value. +func (o *OptString) Reset() { + var v string + o.Value = v + o.Set = false +} + +// SetTo sets value to v. +func (o *OptString) SetTo(v string) { + o.Set = true + o.Value = v +} + +// Get returns value and boolean that denotes whether value was set. +func (o OptString) Get() (v string, ok bool) { + if !o.Set { + return v, false + } + return o.Value, true +} + +// Or returns value if set, or given parameter if does not. +func (o OptString) Or(d string) string { + if v, ok := o.Get(); ok { + return v + } + return d +} + +type PerformPredictionProfile string + +const ( + PerformPredictionProfileStandardProfile PerformPredictionProfile = "standard_profile" + PerformPredictionProfileFloatProfile PerformPredictionProfile = "float_profile" +) + +// AllValues returns all PerformPredictionProfile values. +func (PerformPredictionProfile) AllValues() []PerformPredictionProfile { + return []PerformPredictionProfile{ + PerformPredictionProfileStandardProfile, + PerformPredictionProfileFloatProfile, + } +} + +// MarshalText implements encoding.TextMarshaler. +func (s PerformPredictionProfile) MarshalText() ([]byte, error) { + switch s { + case PerformPredictionProfileStandardProfile: + return []byte(s), nil + case PerformPredictionProfileFloatProfile: + return []byte(s), nil + default: + return nil, errors.Errorf("invalid value: %q", s) + } +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (s *PerformPredictionProfile) UnmarshalText(data []byte) error { + switch PerformPredictionProfile(data) { + case PerformPredictionProfileStandardProfile: + *s = PerformPredictionProfileStandardProfile + return nil + case PerformPredictionProfileFloatProfile: + *s = PerformPredictionProfileFloatProfile + return nil + default: + return errors.Errorf("invalid value: %q", data) + } +} + +// Ref: #/components/schemas/PredictionResponse +type PredictionResponse struct { + Request OptPredictionResponseRequest `json:"request"` + Prediction []PredictionResponsePredictionItem `json:"prediction"` + Metadata PredictionResponseMetadata `json:"metadata"` + Warnings OptPredictionResponseWarnings `json:"warnings"` +} + +// GetRequest returns the value of Request. +func (s *PredictionResponse) GetRequest() OptPredictionResponseRequest { + return s.Request +} + +// GetPrediction returns the value of Prediction. +func (s *PredictionResponse) GetPrediction() []PredictionResponsePredictionItem { + return s.Prediction +} + +// GetMetadata returns the value of Metadata. +func (s *PredictionResponse) GetMetadata() PredictionResponseMetadata { + return s.Metadata +} + +// GetWarnings returns the value of Warnings. +func (s *PredictionResponse) GetWarnings() OptPredictionResponseWarnings { + return s.Warnings +} + +// SetRequest sets the value of Request. +func (s *PredictionResponse) SetRequest(val OptPredictionResponseRequest) { + s.Request = val +} + +// SetPrediction sets the value of Prediction. +func (s *PredictionResponse) SetPrediction(val []PredictionResponsePredictionItem) { + s.Prediction = val +} + +// SetMetadata sets the value of Metadata. +func (s *PredictionResponse) SetMetadata(val PredictionResponseMetadata) { + s.Metadata = val +} + +// SetWarnings sets the value of Warnings. +func (s *PredictionResponse) SetWarnings(val OptPredictionResponseWarnings) { + s.Warnings = val +} + +type PredictionResponseMetadata struct { + StartDatetime time.Time `json:"start_datetime"` + CompleteDatetime time.Time `json:"complete_datetime"` +} + +// GetStartDatetime returns the value of StartDatetime. +func (s *PredictionResponseMetadata) GetStartDatetime() time.Time { + return s.StartDatetime +} + +// GetCompleteDatetime returns the value of CompleteDatetime. +func (s *PredictionResponseMetadata) GetCompleteDatetime() time.Time { + return s.CompleteDatetime +} + +// SetStartDatetime sets the value of StartDatetime. +func (s *PredictionResponseMetadata) SetStartDatetime(val time.Time) { + s.StartDatetime = val +} + +// SetCompleteDatetime sets the value of CompleteDatetime. +func (s *PredictionResponseMetadata) SetCompleteDatetime(val time.Time) { + s.CompleteDatetime = val +} + +type PredictionResponsePredictionItem struct { + Stage PredictionResponsePredictionItemStage `json:"stage"` + Trajectory []PredictionResponsePredictionItemTrajectoryItem `json:"trajectory"` +} + +// GetStage returns the value of Stage. +func (s *PredictionResponsePredictionItem) GetStage() PredictionResponsePredictionItemStage { + return s.Stage +} + +// GetTrajectory returns the value of Trajectory. +func (s *PredictionResponsePredictionItem) GetTrajectory() []PredictionResponsePredictionItemTrajectoryItem { + return s.Trajectory +} + +// SetStage sets the value of Stage. +func (s *PredictionResponsePredictionItem) SetStage(val PredictionResponsePredictionItemStage) { + s.Stage = val +} + +// SetTrajectory sets the value of Trajectory. +func (s *PredictionResponsePredictionItem) SetTrajectory(val []PredictionResponsePredictionItemTrajectoryItem) { + s.Trajectory = val +} + +type PredictionResponsePredictionItemStage string + +const ( + PredictionResponsePredictionItemStageAscent PredictionResponsePredictionItemStage = "ascent" + PredictionResponsePredictionItemStageDescent PredictionResponsePredictionItemStage = "descent" + PredictionResponsePredictionItemStageFloat PredictionResponsePredictionItemStage = "float" +) + +// AllValues returns all PredictionResponsePredictionItemStage values. +func (PredictionResponsePredictionItemStage) AllValues() []PredictionResponsePredictionItemStage { + return []PredictionResponsePredictionItemStage{ + PredictionResponsePredictionItemStageAscent, + PredictionResponsePredictionItemStageDescent, + PredictionResponsePredictionItemStageFloat, + } +} + +// MarshalText implements encoding.TextMarshaler. +func (s PredictionResponsePredictionItemStage) MarshalText() ([]byte, error) { + switch s { + case PredictionResponsePredictionItemStageAscent: + return []byte(s), nil + case PredictionResponsePredictionItemStageDescent: + return []byte(s), nil + case PredictionResponsePredictionItemStageFloat: + return []byte(s), nil + default: + return nil, errors.Errorf("invalid value: %q", s) + } +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (s *PredictionResponsePredictionItemStage) UnmarshalText(data []byte) error { + switch PredictionResponsePredictionItemStage(data) { + case PredictionResponsePredictionItemStageAscent: + *s = PredictionResponsePredictionItemStageAscent + return nil + case PredictionResponsePredictionItemStageDescent: + *s = PredictionResponsePredictionItemStageDescent + return nil + case PredictionResponsePredictionItemStageFloat: + *s = PredictionResponsePredictionItemStageFloat + return nil + default: + return errors.Errorf("invalid value: %q", data) + } +} + +type PredictionResponsePredictionItemTrajectoryItem struct { + Datetime time.Time `json:"datetime"` + Latitude float64 `json:"latitude"` + Longitude float64 `json:"longitude"` + Altitude float64 `json:"altitude"` +} + +// GetDatetime returns the value of Datetime. +func (s *PredictionResponsePredictionItemTrajectoryItem) GetDatetime() time.Time { + return s.Datetime +} + +// GetLatitude returns the value of Latitude. +func (s *PredictionResponsePredictionItemTrajectoryItem) GetLatitude() float64 { + return s.Latitude +} + +// GetLongitude returns the value of Longitude. +func (s *PredictionResponsePredictionItemTrajectoryItem) GetLongitude() float64 { + return s.Longitude +} + +// GetAltitude returns the value of Altitude. +func (s *PredictionResponsePredictionItemTrajectoryItem) GetAltitude() float64 { + return s.Altitude +} + +// SetDatetime sets the value of Datetime. +func (s *PredictionResponsePredictionItemTrajectoryItem) SetDatetime(val time.Time) { + s.Datetime = val +} + +// SetLatitude sets the value of Latitude. +func (s *PredictionResponsePredictionItemTrajectoryItem) SetLatitude(val float64) { + s.Latitude = val +} + +// SetLongitude sets the value of Longitude. +func (s *PredictionResponsePredictionItemTrajectoryItem) SetLongitude(val float64) { + s.Longitude = val +} + +// SetAltitude sets the value of Altitude. +func (s *PredictionResponsePredictionItemTrajectoryItem) SetAltitude(val float64) { + s.Altitude = val +} + +type PredictionResponseRequest struct { + Dataset OptString `json:"dataset"` + LaunchLatitude OptFloat64 `json:"launch_latitude"` + LaunchLongitude OptFloat64 `json:"launch_longitude"` + LaunchDatetime OptString `json:"launch_datetime"` + LaunchAltitude OptFloat64 `json:"launch_altitude"` + Profile OptString `json:"profile"` + AscentRate OptFloat64 `json:"ascent_rate"` + BurstAltitude OptFloat64 `json:"burst_altitude"` + DescentRate OptFloat64 `json:"descent_rate"` +} + +// GetDataset returns the value of Dataset. +func (s *PredictionResponseRequest) GetDataset() OptString { + return s.Dataset +} + +// GetLaunchLatitude returns the value of LaunchLatitude. +func (s *PredictionResponseRequest) GetLaunchLatitude() OptFloat64 { + return s.LaunchLatitude +} + +// GetLaunchLongitude returns the value of LaunchLongitude. +func (s *PredictionResponseRequest) GetLaunchLongitude() OptFloat64 { + return s.LaunchLongitude +} + +// GetLaunchDatetime returns the value of LaunchDatetime. +func (s *PredictionResponseRequest) GetLaunchDatetime() OptString { + return s.LaunchDatetime +} + +// GetLaunchAltitude returns the value of LaunchAltitude. +func (s *PredictionResponseRequest) GetLaunchAltitude() OptFloat64 { + return s.LaunchAltitude +} + +// GetProfile returns the value of Profile. +func (s *PredictionResponseRequest) GetProfile() OptString { + return s.Profile +} + +// GetAscentRate returns the value of AscentRate. +func (s *PredictionResponseRequest) GetAscentRate() OptFloat64 { + return s.AscentRate +} + +// GetBurstAltitude returns the value of BurstAltitude. +func (s *PredictionResponseRequest) GetBurstAltitude() OptFloat64 { + return s.BurstAltitude +} + +// GetDescentRate returns the value of DescentRate. +func (s *PredictionResponseRequest) GetDescentRate() OptFloat64 { + return s.DescentRate +} + +// SetDataset sets the value of Dataset. +func (s *PredictionResponseRequest) SetDataset(val OptString) { + s.Dataset = val +} + +// SetLaunchLatitude sets the value of LaunchLatitude. +func (s *PredictionResponseRequest) SetLaunchLatitude(val OptFloat64) { + s.LaunchLatitude = val +} + +// SetLaunchLongitude sets the value of LaunchLongitude. +func (s *PredictionResponseRequest) SetLaunchLongitude(val OptFloat64) { + s.LaunchLongitude = val +} + +// SetLaunchDatetime sets the value of LaunchDatetime. +func (s *PredictionResponseRequest) SetLaunchDatetime(val OptString) { + s.LaunchDatetime = val +} + +// SetLaunchAltitude sets the value of LaunchAltitude. +func (s *PredictionResponseRequest) SetLaunchAltitude(val OptFloat64) { + s.LaunchAltitude = val +} + +// SetProfile sets the value of Profile. +func (s *PredictionResponseRequest) SetProfile(val OptString) { + s.Profile = val +} + +// SetAscentRate sets the value of AscentRate. +func (s *PredictionResponseRequest) SetAscentRate(val OptFloat64) { + s.AscentRate = val +} + +// SetBurstAltitude sets the value of BurstAltitude. +func (s *PredictionResponseRequest) SetBurstAltitude(val OptFloat64) { + s.BurstAltitude = val +} + +// SetDescentRate sets the value of DescentRate. +func (s *PredictionResponseRequest) SetDescentRate(val OptFloat64) { + s.DescentRate = val +} + +type PredictionResponseWarnings map[string]jx.Raw + +func (s *PredictionResponseWarnings) init() PredictionResponseWarnings { + m := *s + if m == nil { + m = map[string]jx.Raw{} + *s = m + } + return m +} + +// Ref: #/components/schemas/ReadinessResponse +type ReadinessResponse struct { + Status ReadinessResponseStatus `json:"status"` + DatasetTime OptDateTime `json:"dataset_time"` + ErrorMessage OptString `json:"error_message"` +} + +// GetStatus returns the value of Status. +func (s *ReadinessResponse) GetStatus() ReadinessResponseStatus { + return s.Status +} + +// GetDatasetTime returns the value of DatasetTime. +func (s *ReadinessResponse) GetDatasetTime() OptDateTime { + return s.DatasetTime +} + +// GetErrorMessage returns the value of ErrorMessage. +func (s *ReadinessResponse) GetErrorMessage() OptString { + return s.ErrorMessage +} + +// SetStatus sets the value of Status. +func (s *ReadinessResponse) SetStatus(val ReadinessResponseStatus) { + s.Status = val +} + +// SetDatasetTime sets the value of DatasetTime. +func (s *ReadinessResponse) SetDatasetTime(val OptDateTime) { + s.DatasetTime = val +} + +// SetErrorMessage sets the value of ErrorMessage. +func (s *ReadinessResponse) SetErrorMessage(val OptString) { + s.ErrorMessage = val +} + +type ReadinessResponseStatus string + +const ( + ReadinessResponseStatusOk ReadinessResponseStatus = "ok" + ReadinessResponseStatusNotReady ReadinessResponseStatus = "not_ready" + ReadinessResponseStatusError ReadinessResponseStatus = "error" +) + +// AllValues returns all ReadinessResponseStatus values. +func (ReadinessResponseStatus) AllValues() []ReadinessResponseStatus { + return []ReadinessResponseStatus{ + ReadinessResponseStatusOk, + ReadinessResponseStatusNotReady, + ReadinessResponseStatusError, + } +} + +// MarshalText implements encoding.TextMarshaler. +func (s ReadinessResponseStatus) MarshalText() ([]byte, error) { + switch s { + case ReadinessResponseStatusOk: + return []byte(s), nil + case ReadinessResponseStatusNotReady: + return []byte(s), nil + case ReadinessResponseStatusError: + return []byte(s), nil + default: + return nil, errors.Errorf("invalid value: %q", s) + } +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (s *ReadinessResponseStatus) UnmarshalText(data []byte) error { + switch ReadinessResponseStatus(data) { + case ReadinessResponseStatusOk: + *s = ReadinessResponseStatusOk + return nil + case ReadinessResponseStatusNotReady: + *s = ReadinessResponseStatusNotReady + return nil + case ReadinessResponseStatusError: + *s = ReadinessResponseStatusError + return nil + default: + return errors.Errorf("invalid value: %q", data) + } +} diff --git a/pkg/rest/oas_server_gen.go b/pkg/rest/oas_server_gen.go new file mode 100644 index 0000000..7b6c592 --- /dev/null +++ b/pkg/rest/oas_server_gen.go @@ -0,0 +1,46 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "context" +) + +// Handler handles operations described by OpenAPI v3 specification. +type Handler interface { + // PerformPrediction implements performPrediction operation. + // + // Perform prediction. + // + // GET /api/v1/prediction + PerformPrediction(ctx context.Context, params PerformPredictionParams) (*PredictionResponse, error) + // ReadinessCheck implements readinessCheck operation. + // + // Readiness check. + // + // GET /ready + ReadinessCheck(ctx context.Context) (*ReadinessResponse, error) + // NewError creates *ErrorStatusCode from error returned by handler. + // + // Used for common default response. + NewError(ctx context.Context, err error) *ErrorStatusCode +} + +// Server implements http server based on OpenAPI v3 specification and +// calls Handler to handle requests. +type Server struct { + h Handler + baseServer +} + +// NewServer creates new Server. +func NewServer(h Handler, opts ...ServerOption) (*Server, error) { + s, err := newServerConfig(opts...).baseServer() + if err != nil { + return nil, err + } + return &Server{ + h: h, + baseServer: s, + }, nil +} diff --git a/pkg/rest/oas_unimplemented_gen.go b/pkg/rest/oas_unimplemented_gen.go new file mode 100644 index 0000000..8c3d8be --- /dev/null +++ b/pkg/rest/oas_unimplemented_gen.go @@ -0,0 +1,40 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "context" + + ht "github.com/ogen-go/ogen/http" +) + +// UnimplementedHandler is no-op Handler which returns http.ErrNotImplemented. +type UnimplementedHandler struct{} + +var _ Handler = UnimplementedHandler{} + +// PerformPrediction implements performPrediction operation. +// +// Perform prediction. +// +// GET /api/v1/prediction +func (UnimplementedHandler) PerformPrediction(ctx context.Context, params PerformPredictionParams) (r *PredictionResponse, _ error) { + return r, ht.ErrNotImplemented +} + +// ReadinessCheck implements readinessCheck operation. +// +// Readiness check. +// +// GET /ready +func (UnimplementedHandler) ReadinessCheck(ctx context.Context) (r *ReadinessResponse, _ error) { + return r, ht.ErrNotImplemented +} + +// NewError creates *ErrorStatusCode from error returned by handler. +// +// Used for common default response. +func (UnimplementedHandler) NewError(ctx context.Context, err error) (r *ErrorStatusCode) { + r = new(ErrorStatusCode) + return r +} diff --git a/pkg/rest/oas_validators_gen.go b/pkg/rest/oas_validators_gen.go new file mode 100644 index 0000000..33b3d41 --- /dev/null +++ b/pkg/rest/oas_validators_gen.go @@ -0,0 +1,344 @@ +// Code generated by ogen, DO NOT EDIT. + +package rest + +import ( + "fmt" + + "github.com/go-faster/errors" + "github.com/ogen-go/ogen/validate" +) + +func (s PerformPredictionProfile) Validate() error { + switch s { + case "standard_profile": + return nil + case "float_profile": + return nil + default: + return errors.Errorf("invalid value: %v", s) + } +} + +func (s *PredictionResponse) Validate() error { + if s == nil { + return validate.ErrNilPointer + } + + var failures []validate.FieldError + if err := func() error { + if value, ok := s.Request.Get(); ok { + if err := func() error { + if err := value.Validate(); err != nil { + return err + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "request", + Error: err, + }) + } + if err := func() error { + if s.Prediction == nil { + return errors.New("nil is invalid value") + } + var failures []validate.FieldError + for i, elem := range s.Prediction { + if err := func() error { + if err := elem.Validate(); err != nil { + return err + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: fmt.Sprintf("[%d]", i), + Error: err, + }) + } + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "prediction", + Error: err, + }) + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + return nil +} + +func (s *PredictionResponsePredictionItem) Validate() error { + if s == nil { + return validate.ErrNilPointer + } + + var failures []validate.FieldError + if err := func() error { + if err := s.Stage.Validate(); err != nil { + return err + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "stage", + Error: err, + }) + } + if err := func() error { + if s.Trajectory == nil { + return errors.New("nil is invalid value") + } + var failures []validate.FieldError + for i, elem := range s.Trajectory { + if err := func() error { + if err := elem.Validate(); err != nil { + return err + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: fmt.Sprintf("[%d]", i), + Error: err, + }) + } + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "trajectory", + Error: err, + }) + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + return nil +} + +func (s PredictionResponsePredictionItemStage) Validate() error { + switch s { + case "ascent": + return nil + case "descent": + return nil + case "float": + return nil + default: + return errors.Errorf("invalid value: %v", s) + } +} + +func (s *PredictionResponsePredictionItemTrajectoryItem) Validate() error { + if s == nil { + return validate.ErrNilPointer + } + + var failures []validate.FieldError + if err := func() error { + if err := (validate.Float{}).Validate(float64(s.Latitude)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "latitude", + Error: err, + }) + } + if err := func() error { + if err := (validate.Float{}).Validate(float64(s.Longitude)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "longitude", + Error: err, + }) + } + if err := func() error { + if err := (validate.Float{}).Validate(float64(s.Altitude)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "altitude", + Error: err, + }) + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + return nil +} + +func (s *PredictionResponseRequest) Validate() error { + if s == nil { + return validate.ErrNilPointer + } + + var failures []validate.FieldError + if err := func() error { + if value, ok := s.LaunchLatitude.Get(); ok { + if err := func() error { + if err := (validate.Float{}).Validate(float64(value)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "launch_latitude", + Error: err, + }) + } + if err := func() error { + if value, ok := s.LaunchLongitude.Get(); ok { + if err := func() error { + if err := (validate.Float{}).Validate(float64(value)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "launch_longitude", + Error: err, + }) + } + if err := func() error { + if value, ok := s.LaunchAltitude.Get(); ok { + if err := func() error { + if err := (validate.Float{}).Validate(float64(value)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "launch_altitude", + Error: err, + }) + } + if err := func() error { + if value, ok := s.AscentRate.Get(); ok { + if err := func() error { + if err := (validate.Float{}).Validate(float64(value)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "ascent_rate", + Error: err, + }) + } + if err := func() error { + if value, ok := s.BurstAltitude.Get(); ok { + if err := func() error { + if err := (validate.Float{}).Validate(float64(value)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "burst_altitude", + Error: err, + }) + } + if err := func() error { + if value, ok := s.DescentRate.Get(); ok { + if err := func() error { + if err := (validate.Float{}).Validate(float64(value)); err != nil { + return errors.Wrap(err, "float") + } + return nil + }(); err != nil { + return err + } + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "descent_rate", + Error: err, + }) + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + return nil +} + +func (s *ReadinessResponse) Validate() error { + if s == nil { + return validate.ErrNilPointer + } + + var failures []validate.FieldError + if err := func() error { + if err := s.Status.Validate(); err != nil { + return err + } + return nil + }(); err != nil { + failures = append(failures, validate.FieldError{ + Name: "status", + Error: err, + }) + } + if len(failures) > 0 { + return &validate.Error{Fields: failures} + } + return nil +} + +func (s ReadinessResponseStatus) Validate() error { + switch s { + case "ok": + return nil + case "not_ready": + return nil + case "error": + return nil + default: + return errors.Errorf("invalid value: %v", s) + } +} diff --git a/scripts/build_elevation.py b/scripts/build_elevation.py new file mode 100644 index 0000000..8f4244a --- /dev/null +++ b/scripts/build_elevation.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python3 +""" +Download ETOPO 2022 30-arc-second elevation data and convert to ruaumoko-compatible +binary format (int16 little-endian, 21601 lat x 43200 lon, south-to-north). + +Output: ~1.74 GiB binary file. + +Usage: python3 build_elevation.py [output_path] + Default output: /srv/ruaumoko-dataset +""" + +import sys +import os +import struct +import tempfile +import numpy as np + +CELLS_PER_DEGREE = 120 +NUM_LATS = 180 * CELLS_PER_DEGREE + 1 # 21601 +NUM_LONS = 360 * CELLS_PER_DEGREE # 43200 +EXPECTED_SIZE = NUM_LATS * NUM_LONS * 2 # 1,866,326,400 + +ETOPO_URL = "https://www.ngdc.noaa.gov/thredds/fileServer/global/ETOPO2022/30s/30s_surface_elev_netcdf/ETOPO_2022_v1_30s_N90W180_surface.nc" + + +def download_etopo(output_path): + """Download ETOPO 2022 NetCDF and convert to ruaumoko binary format.""" + try: + import xarray as xr + except ImportError: + print("ERROR: xarray is required. Install with: pip install xarray netcdf4") + sys.exit(1) + + # Check if we can download directly or need a local file + nc_path = os.environ.get("ETOPO_NC_PATH") + if nc_path and os.path.exists(nc_path): + print(f"Using local ETOPO file: {nc_path}") + else: + print(f"Downloading ETOPO 2022 30-second data (~1.1 GB)...") + print(f" URL: {ETOPO_URL}") + print(f" (Set ETOPO_NC_PATH env var to use a pre-downloaded file)") + + import urllib.request + with tempfile.NamedTemporaryFile(suffix=".nc", delete=False) as f: + nc_path = f.name + + try: + urllib.request.urlretrieve(ETOPO_URL, nc_path, _progress) + print() + except Exception as e: + os.unlink(nc_path) + print(f"\nDownload failed: {e}") + print("\nAlternative: manually download ETOPO 2022 30s NetCDF from:") + print(" https://www.ncei.noaa.gov/products/etopo-global-relief-model") + print(f" Then set ETOPO_NC_PATH=/path/to/file.nc and re-run") + sys.exit(1) + + print(f"Opening NetCDF dataset...") + ds = xr.open_dataset(nc_path) + + # ETOPO 2022 30s has: + # - lat: -90 to +90, 21601 points (south to north) + # - lon: -180 to +180, 43201 points + # We need: + # - lat: -90 to +90, 21601 points (south to north) ← same + # - lon: 0 to 360 (exclusive), 43200 points ← need to shift and drop last + + z = ds["z"] # elevation variable + print(f" Shape: {z.shape}") + print(f" Lat range: {float(z.lat.min())} to {float(z.lat.max())}") + print(f" Lon range: {float(z.lon.min())} to {float(z.lon.max())}") + + # Sort latitude south-to-north (should already be, but ensure) + z = z.sortby("lat") + + # Shift longitude from [-180, 180] to [0, 360) + print("Shifting longitude to [0, 360)...") + z = z.assign_coords(lon=(z.lon % 360)) + z = z.sortby("lon") + + data = z.values + print(f" Raw shape after sort: {data.shape}") + + # Handle longitude dimension: drop last col if it wraps (43201 → 43200) + if data.shape[1] == NUM_LONS + 1: + data = data[:, :NUM_LONS] + elif data.shape[1] != NUM_LONS: + print(f"ERROR: unexpected lon dimension: {data.shape[1]}, expected {NUM_LONS} or {NUM_LONS+1}") + sys.exit(1) + + # Handle latitude dimension: ETOPO 2022 is cell-centered (21600 rows), + # ruaumoko expects grid-registered (21601 rows including both poles). + # Pad by duplicating edge rows for the poles. + if data.shape[0] == NUM_LATS - 1: + print(f" Padding latitude from {data.shape[0]} to {NUM_LATS} (adding north pole row)") + north_pole = data[-1:, :] # duplicate +89.99... as +90 + data = np.concatenate([data, north_pole], axis=0) + elif data.shape[0] != NUM_LATS: + print(f"ERROR: unexpected lat dimension: {data.shape[0]}, expected {NUM_LATS} or {NUM_LATS-1}") + sys.exit(1) + + print(f"Final grid shape: {data.shape}") + print(f"Elevation range: {data.min():.1f} to {data.max():.1f} metres") + + # Write as int16 little-endian + print(f"Writing to {output_path}...") + elev_int16 = np.clip(data, -32768, 32767).astype(np.dtype(" 0: + pct = int(block_num * block_size * 100 / total_size) + if pct != _last_pct and pct % 5 == 0: + _last_pct = pct + print(f" {pct}%...", end="", flush=True) + + +if __name__ == "__main__": + output = sys.argv[1] if len(sys.argv) > 1 else "/srv/ruaumoko-dataset" + download_etopo(output) -- 2.47.3 From e14a00287ca4140f5e119bbc1d38a3d350b8625e Mon Sep 17 00:00:00 2001 From: "a.antonov" Date: Sun, 29 Mar 2026 17:11:22 +0900 Subject: [PATCH 8/8] feat: dataset dl progress logs --- internal/downloader/downloader.go | 103 ++++++++++++++++++++++++------ 1 file changed, 82 insertions(+), 21 deletions(-) diff --git a/internal/downloader/downloader.go b/internal/downloader/downloader.go index 72b5fb2..32208a0 100644 --- a/internal/downloader/downloader.go +++ b/internal/downloader/downloader.go @@ -8,6 +8,7 @@ import ( "net/http" "os" "path/filepath" + "sync/atomic" "time" "predictor-refactored/internal/dataset" @@ -76,6 +77,60 @@ func (d *Downloader) FindLatestRun(ctx context.Context) (time.Time, error) { return time.Time{}, fmt.Errorf("no recent GFS forecast found (checked 8 runs)") } +// progress tracks download progress across concurrent goroutines. +type progress struct { + bytesDownloaded atomic.Int64 + stepsCompleted atomic.Int64 + totalSteps int64 + startTime time.Time + log *zap.Logger +} + +func newProgress(totalSteps int, log *zap.Logger) *progress { + return &progress{ + totalSteps: int64(totalSteps), + startTime: time.Now(), + log: log, + } +} + +func (p *progress) addBytes(n int64) { + p.bytesDownloaded.Add(n) +} + +func (p *progress) completeStep() { + done := p.stepsCompleted.Add(1) + total := p.totalSteps + bytes := p.bytesDownloaded.Load() + elapsed := time.Since(p.startTime).Seconds() + + pct := float64(done) / float64(total) * 100 + mbDownloaded := float64(bytes) / (1024 * 1024) + mbPerSec := 0.0 + if elapsed > 0 { + mbPerSec = mbDownloaded / elapsed + } + + // Estimate remaining + eta := "" + if done > 0 && done < total { + secsPerStep := elapsed / float64(done) + remaining := secsPerStep * float64(total-done) + if remaining > 60 { + eta = fmt.Sprintf("%.0fm%02.0fs", math.Floor(remaining/60), math.Mod(remaining, 60)) + } else { + eta = fmt.Sprintf("%.0fs", remaining) + } + } + + p.log.Info("download progress", + zap.String("progress", fmt.Sprintf("%d/%d", done, total)), + zap.String("percent", fmt.Sprintf("%.1f%%", pct)), + zap.String("downloaded", fmt.Sprintf("%.1f MB", mbDownloaded)), + zap.String("speed", fmt.Sprintf("%.1f MB/s", mbPerSec)), + zap.String("eta", eta)) +} + // Download downloads a complete forecast and assembles a dataset file. // Returns the path to the completed dataset file. func (d *Downloader) Download(ctx context.Context, run time.Time) (string, error) { @@ -91,8 +146,13 @@ func (d *Downloader) Download(ctx context.Context, run time.Time) (string, error return finalPath, nil } + steps := dataset.Hours() + totalSteps := len(steps) * 2 // pgrb2 + pgrb2b per step + prog := newProgress(totalSteps, d.log) + d.log.Info("starting dataset download", zap.Time("run", run), + zap.Int("total_steps", totalSteps), zap.String("temp_path", tempPath)) // Create the dataset file @@ -102,10 +162,6 @@ func (d *Downloader) Download(ctx context.Context, run time.Time) (string, error } defer ds.Close() - steps := dataset.Hours() - totalSteps := len(steps) * 2 // pgrb2 + pgrb2b per step - completed := 0 - // Process each forecast step with bounded concurrency g, ctx := errgroup.WithContext(ctx) sem := make(chan struct{}, d.cfg.Parallel) @@ -122,16 +178,11 @@ func (d *Downloader) Download(ctx context.Context, run time.Time) (string, error g.Go(func() error { defer func() { <-sem }() url := dataset.GribURL(date, runHour, step) - err := d.DownloadAndBlit(ctx, ds, url, hourIdx, dataset.LevelSetA) + err := d.downloadAndBlit(ctx, ds, url, hourIdx, dataset.LevelSetA, prog) if err != nil { return fmt.Errorf("step %d pgrb2: %w", step, err) } - completed++ - d.log.Debug("step complete", - zap.Int("step", step), - zap.String("set", "pgrb2"), - zap.Int("progress", completed), - zap.Int("total", totalSteps)) + prog.completeStep() return nil }) @@ -140,16 +191,11 @@ func (d *Downloader) Download(ctx context.Context, run time.Time) (string, error g.Go(func() error { defer func() { <-sem }() url := dataset.GribURLB(date, runHour, step) - err := d.DownloadAndBlit(ctx, ds, url, hourIdx, dataset.LevelSetB) + err := d.downloadAndBlit(ctx, ds, url, hourIdx, dataset.LevelSetB, prog) if err != nil { return fmt.Errorf("step %d pgrb2b: %w", step, err) } - completed++ - d.log.Debug("step complete", - zap.Int("step", step), - zap.String("set", "pgrb2b"), - zap.Int("progress", completed), - zap.Int("total", totalSteps)) + prog.completeStep() return nil }) } @@ -159,6 +205,13 @@ func (d *Downloader) Download(ctx context.Context, run time.Time) (string, error return "", err } + elapsed := time.Since(prog.startTime) + totalMB := float64(prog.bytesDownloaded.Load()) / (1024 * 1024) + d.log.Info("download complete, flushing to disk", + zap.String("downloaded", fmt.Sprintf("%.1f MB", totalMB)), + zap.Duration("elapsed", elapsed), + zap.String("avg_speed", fmt.Sprintf("%.1f MB/s", totalMB/elapsed.Seconds()))) + // Flush to disk if err := ds.Flush(); err != nil { os.Remove(tempPath) @@ -174,12 +227,17 @@ func (d *Downloader) Download(ctx context.Context, run time.Time) (string, error return "", fmt.Errorf("rename dataset: %w", err) } - d.log.Info("dataset download complete", zap.String("path", finalPath)) + d.log.Info("dataset ready", zap.String("path", finalPath)) return finalPath, nil } // DownloadAndBlit downloads needed GRIB fields from a URL and writes them into the dataset. func (d *Downloader) DownloadAndBlit(ctx context.Context, ds *dataset.File, baseURL string, hourIdx int, levelSet dataset.LevelSet) error { + return d.downloadAndBlit(ctx, ds, baseURL, hourIdx, levelSet, nil) +} + +// downloadAndBlit is the internal implementation with optional progress tracking. +func (d *Downloader) downloadAndBlit(ctx context.Context, ds *dataset.File, baseURL string, hourIdx int, levelSet dataset.LevelSet, prog *progress) error { // 1. Download .idx idxURL := baseURL + ".idx" idxBody, err := d.httpGet(ctx, idxURL) @@ -210,7 +268,7 @@ func (d *Downloader) DownloadAndBlit(ctx context.Context, ds *dataset.File, base // 3. Download byte ranges and write to temp file ranges := EntriesToRanges(relevant) - tmpFile, err := d.downloadRangesToTempFile(ctx, baseURL, ranges) + tmpFile, err := d.downloadRangesToTempFile(ctx, baseURL, ranges, prog) if err != nil { return fmt.Errorf("download ranges: %w", err) } @@ -267,7 +325,7 @@ func (d *Downloader) DownloadAndBlit(ctx context.Context, ds *dataset.File, base // downloadRangesToTempFile downloads multiple byte ranges from a URL, // concatenating them into a single temp file (valid concatenated GRIB messages). -func (d *Downloader) downloadRangesToTempFile(ctx context.Context, baseURL string, ranges []ByteRange) (string, error) { +func (d *Downloader) downloadRangesToTempFile(ctx context.Context, baseURL string, ranges []ByteRange, prog *progress) (string, error) { tmpFile, err := os.CreateTemp(d.cfg.DataDir, "grib-*.tmp") if err != nil { return "", fmt.Errorf("create temp file: %w", err) @@ -286,6 +344,9 @@ func (d *Downloader) downloadRangesToTempFile(ctx context.Context, baseURL strin os.Remove(tmpPath) return "", fmt.Errorf("write temp: %w", err) } + if prog != nil { + prog.addBytes(int64(len(data))) + } } if err := tmpFile.Close(); err != nil { -- 2.47.3