forked from gsn/predictor
feat: cleanup
This commit is contained in:
parent
8e9f117799
commit
82ef1cb3b8
66 changed files with 0 additions and 9521 deletions
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(cat:*)",
|
||||
"Bash(xargs:*)",
|
||||
"Bash(ls:*)",
|
||||
"Bash(done)",
|
||||
"Bash(curl:*)",
|
||||
"WebFetch(domain:raw.githubusercontent.com)",
|
||||
"WebFetch(domain:github.com)",
|
||||
"Bash(go run:*)",
|
||||
"Bash(pkill:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
}
|
||||
}
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
# Git
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# Docker
|
||||
Dockerfile
|
||||
docker-compose.yml
|
||||
.dockerignore
|
||||
|
||||
# Documentation
|
||||
README.md
|
||||
*.md
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
|
||||
# Environment files
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
|
||||
# Build artifacts
|
||||
predictor
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
*.test
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
|
||||
# Temporary files
|
||||
/tmp/
|
||||
/temp/
|
||||
*.py
|
||||
# Test coverage
|
||||
*.out
|
||||
|
||||
# Go workspace
|
||||
go.work
|
||||
68
.gitignore
vendored
68
.gitignore
vendored
|
|
@ -1,68 +0,0 @@
|
|||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
*.ps1
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
|
||||
# Go workspace file
|
||||
go.work
|
||||
|
||||
# Environment variables
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
|
||||
# IDE files
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
*.bak
|
||||
*.py
|
||||
*.json
|
||||
|
||||
# OS generated files
|
||||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
|
||||
# Temporary files
|
||||
/tmp/
|
||||
/temp/
|
||||
|
||||
# Build artifacts
|
||||
/build/
|
||||
/dist/
|
||||
|
||||
# GRIB files
|
||||
/grib_data/
|
||||
/grib_data/*
|
||||
|
||||
# Leaflet WebUI
|
||||
/leaflet_predictor
|
||||
/leaflet_predictor/*
|
||||
|
||||
# Tawhiri
|
||||
/tawhiri
|
||||
/tawhiri/*
|
||||
|
||||
*.md
|
||||
3
.vscode/settings.json
vendored
3
.vscode/settings.json
vendored
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"makefile.configureOnOpen": false
|
||||
}
|
||||
57
Dockerfile
57
Dockerfile
|
|
@ -1,57 +0,0 @@
|
|||
# Build stage
|
||||
FROM golang:1.24.4-alpine AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache git ca-certificates tzdata
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy go mod files
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
# Download dependencies
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the application
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \
|
||||
-ldflags="-w -s" \
|
||||
-o predictor \
|
||||
./cmd/api
|
||||
|
||||
# Runtime stage
|
||||
FROM alpine:3.19
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache ca-certificates tzdata
|
||||
|
||||
# Create non-root user
|
||||
RUN addgroup -g 1001 -S appgroup && \
|
||||
adduser -u 1001 -S appuser -G appgroup
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder stage
|
||||
COPY --from=builder /app/predictor .
|
||||
|
||||
# Create necessary directories
|
||||
RUN mkdir -p /tmp/grib && \
|
||||
chown -R appuser:appgroup /app && \
|
||||
chmod -R 777 /tmp/grib
|
||||
|
||||
# Switch to non-root user
|
||||
USER appuser
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8080
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:8080/ready || exit 1
|
||||
|
||||
# Run the application
|
||||
CMD ["./predictor"]
|
||||
113
Makefile
113
Makefile
|
|
@ -1,113 +0,0 @@
|
|||
# Variables
|
||||
IMAGE_NAME = predictor
|
||||
TAG = latest
|
||||
COMPOSE_FILE = docker-compose.yml
|
||||
|
||||
# Validate Docker configuration
|
||||
.PHONY: validate-docker
|
||||
validate-docker:
|
||||
./scripts/validate-docker.sh
|
||||
|
||||
# Build the Docker image
|
||||
.PHONY: build
|
||||
build:
|
||||
docker build -t $(IMAGE_NAME):$(TAG) .
|
||||
|
||||
# Run the application with docker-compose
|
||||
.PHONY: up
|
||||
up:
|
||||
docker-compose -f $(COMPOSE_FILE) up -d
|
||||
|
||||
# Run the application with docker-compose and rebuild
|
||||
.PHONY: up-build
|
||||
up-build:
|
||||
docker-compose -f $(COMPOSE_FILE) up -d --build
|
||||
|
||||
# Stop the application
|
||||
.PHONY: down
|
||||
down:
|
||||
docker-compose -f $(COMPOSE_FILE) down
|
||||
|
||||
# Stop the application and remove volumes
|
||||
.PHONY: down-volumes
|
||||
down-volumes:
|
||||
docker-compose -f $(COMPOSE_FILE) down -v
|
||||
|
||||
# View logs
|
||||
.PHONY: logs
|
||||
logs:
|
||||
docker-compose -f $(COMPOSE_FILE) logs -f
|
||||
|
||||
# View logs for specific service
|
||||
.PHONY: logs-predictor
|
||||
logs-predictor:
|
||||
docker-compose -f $(COMPOSE_FILE) logs -f predictor
|
||||
|
||||
|
||||
# Check service status
|
||||
.PHONY: ps
|
||||
ps:
|
||||
docker-compose -f $(COMPOSE_FILE) ps
|
||||
|
||||
# Execute command in predictor container
|
||||
.PHONY: exec
|
||||
exec:
|
||||
docker-compose -f $(COMPOSE_FILE) exec predictor sh
|
||||
|
||||
# Clean up Docker resources
|
||||
.PHONY: clean
|
||||
clean:
|
||||
docker-compose -f $(COMPOSE_FILE) down -v --rmi all
|
||||
docker system prune -f
|
||||
|
||||
# Run tests
|
||||
.PHONY: test
|
||||
test:
|
||||
go test ./...
|
||||
|
||||
# Build locally
|
||||
.PHONY: build-local
|
||||
build-local:
|
||||
go build -o predictor ./cmd/api
|
||||
|
||||
# Run locally
|
||||
.PHONY: run-local
|
||||
run-local:
|
||||
cd cmd/api && go run .
|
||||
|
||||
# Format code
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
go fmt ./...
|
||||
|
||||
# Lint code
|
||||
.PHONY: lint
|
||||
lint:
|
||||
golangci-lint run
|
||||
|
||||
# Show help
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "Available commands:"
|
||||
@echo " validate-docker - Validate Docker configuration"
|
||||
@echo " build - Build Docker image"
|
||||
@echo " up - Start services with docker-compose"
|
||||
@echo " up-build - Start services and rebuild images"
|
||||
@echo " down - Stop services"
|
||||
@echo " down-volumes - Stop services and remove volumes"
|
||||
@echo " logs - View all logs"
|
||||
@echo " logs-predictor - View predictor logs"
|
||||
@echo " ps - Show service status"
|
||||
@echo " exec - Execute shell in predictor container"
|
||||
@echo " clean - Clean up Docker resources"
|
||||
@echo " test - Run tests"
|
||||
@echo " build-local - Build locally"
|
||||
@echo " run-local - Run locally"
|
||||
@echo " fmt - Format code"
|
||||
@echo " lint - Lint code"
|
||||
@echo " generate-ogen - Generate OpenAPI code from swagger spec"
|
||||
@echo " help - Show this help"
|
||||
|
||||
.PHONY: generate-ogen
|
||||
generate-ogen:
|
||||
go run github.com/ogen-go/ogen/cmd/ogen@latest --target pkg/rest -package gsn --clean api/rest/predictor.swagger.yml
|
||||
|
|
@ -1,191 +0,0 @@
|
|||
openapi: 3.0.4
|
||||
info:
|
||||
title: GSN Predictor - OpenAPI 3.0
|
||||
version: 0.0.1
|
||||
paths:
|
||||
/api/v1/prediction:
|
||||
get:
|
||||
tags:
|
||||
- Prediction
|
||||
summary: Perform prediction
|
||||
operationId: performPrediction
|
||||
parameters:
|
||||
- in: query
|
||||
name: launch_latitude
|
||||
schema:
|
||||
type: number
|
||||
- in: query
|
||||
name: launch_longitude
|
||||
schema:
|
||||
type: number
|
||||
- in: query
|
||||
name: launch_datetime
|
||||
schema:
|
||||
type: string
|
||||
format: date-time
|
||||
- in: query
|
||||
name: launch_altitude
|
||||
schema:
|
||||
type: number
|
||||
- in: query
|
||||
name: profile
|
||||
schema:
|
||||
type: string
|
||||
enum: [standard_profile, float_profile, reverse_profile, custom_profile]
|
||||
- in: query
|
||||
name: ascent_rate
|
||||
schema:
|
||||
type: number
|
||||
- in: query
|
||||
name: burst_altitude
|
||||
schema:
|
||||
type: number
|
||||
- in: query
|
||||
name: descent_rate
|
||||
schema:
|
||||
type: number
|
||||
- in: query
|
||||
name: float_altitude
|
||||
schema:
|
||||
type: number
|
||||
- in: query
|
||||
name: stop_datetime
|
||||
schema:
|
||||
type: string
|
||||
format: date-time
|
||||
- in: query
|
||||
name: ascent_curve
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: descent_curve
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: simulate_stages
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
enum: [ascent, descent, float]
|
||||
- in: query
|
||||
name: interpolate
|
||||
schema:
|
||||
type: boolean
|
||||
- in: query
|
||||
name: format
|
||||
schema:
|
||||
type: string
|
||||
enum: [json]
|
||||
- in: query
|
||||
name: dataset
|
||||
schema:
|
||||
type: string
|
||||
format: date-time
|
||||
responses:
|
||||
"200":
|
||||
description: "Prediction response"
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PredictionResult'
|
||||
default:
|
||||
description: Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
/ready:
|
||||
get:
|
||||
tags:
|
||||
- Health
|
||||
summary: Readiness check
|
||||
operationId: readinessCheck
|
||||
responses:
|
||||
"200":
|
||||
description: Readiness status
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ReadinessResponse'
|
||||
default:
|
||||
description: Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
|
||||
components:
|
||||
schemas:
|
||||
Error:
|
||||
type: object
|
||||
required:
|
||||
- message
|
||||
properties:
|
||||
message:
|
||||
type: string
|
||||
details:
|
||||
type: string
|
||||
PredictionResult:
|
||||
type: object
|
||||
required:
|
||||
- metadata
|
||||
- prediction
|
||||
properties:
|
||||
metadata:
|
||||
type: object
|
||||
required:
|
||||
- complete_datetime
|
||||
- start_datetime
|
||||
properties:
|
||||
complete_datetime:
|
||||
type: string
|
||||
format: date-time
|
||||
start_datetime:
|
||||
type: string
|
||||
format: date-time
|
||||
prediction:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- stage
|
||||
- trajectory
|
||||
properties:
|
||||
stage:
|
||||
type: string
|
||||
enum: ["ascent", "descent", "float"]
|
||||
trajectory:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- datetime
|
||||
- latitude
|
||||
- longitude
|
||||
- altitude
|
||||
properties:
|
||||
datetime:
|
||||
type: string
|
||||
format: date-time
|
||||
latitude:
|
||||
type: number
|
||||
longitude:
|
||||
type: number
|
||||
altitude:
|
||||
type: number
|
||||
ReadinessResponse:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: [ok, not_ready, error]
|
||||
last_update:
|
||||
type: string
|
||||
format: date-time
|
||||
is_fresh:
|
||||
type: boolean
|
||||
error_message:
|
||||
type: string
|
||||
required:
|
||||
- status
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This will be a simple wrapper that calls the internal assembleCube function
|
||||
// We'll compile it as part of the grib package
|
||||
|
||||
func main() {
|
||||
dir := "C:/tmp/grib"
|
||||
run := time.Date(2025, 12, 6, 0, 0, 0, 0, time.UTC)
|
||||
cubePath := fmt.Sprintf("%s/%s.cube", dir, run.Format("20060102_15"))
|
||||
|
||||
fmt.Printf("Assembling cube from existing GRIB files...\n")
|
||||
fmt.Printf("Directory: %s\n", dir)
|
||||
fmt.Printf("Run: %s\n", run.Format("2006-01-02 15:04 MST"))
|
||||
fmt.Printf("Output: %s\n", cubePath)
|
||||
fmt.Println()
|
||||
|
||||
// Just print instructions - we'll do it directly
|
||||
fmt.Println("Run this Go code to assemble:")
|
||||
fmt.Printf("cd internal/pkg/grib && go test -run TestAssemble\n")
|
||||
}
|
||||
114
cmd/api/main.go
114
cmd/api/main.go
|
|
@ -1,114 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/jobs/grib/updater"
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/grib"
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/log"
|
||||
"git.intra.yksa.space/gsn/predictor/internal/service"
|
||||
"git.intra.yksa.space/gsn/predictor/internal/transport/rest"
|
||||
"git.intra.yksa.space/gsn/predictor/internal/transport/rest/handler"
|
||||
"git.intra.yksa.space/gsn/predictor/pkg/scheduler"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const servicePrefix = "GSN_PREDICTOR"
|
||||
|
||||
func main() {
|
||||
lg, err := zap.NewProduction()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer lg.Sync()
|
||||
zap.ReplaceGlobals(lg)
|
||||
ctx := log.ToCtx(context.Background(), lg)
|
||||
|
||||
schedulerConfig, err := scheduler.NewConfig()
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Fatal("failed to load scheduler configuration", zap.Error(err))
|
||||
}
|
||||
|
||||
gribUpdaterConfig, err := updater.NewConfig()
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Fatal("failed to load GRIB updater configuration", zap.Error(err))
|
||||
}
|
||||
|
||||
gribCfg, err := grib.NewConfig()
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Fatal("failed to load GRIB configuration", zap.Error(err))
|
||||
}
|
||||
|
||||
gribService, err := grib.New(gribCfg)
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Fatal("failed to initialize GRIB service", zap.Error(err))
|
||||
}
|
||||
defer gribService.Close()
|
||||
|
||||
// Force GRIB update on startup in a goroutine
|
||||
go func() {
|
||||
log.Ctx(ctx).Info("Performing initial GRIB update (async)...")
|
||||
if err := gribService.Update(ctx); err != nil {
|
||||
log.Ctx(ctx).Error("initial GRIB update failed", zap.Error(err))
|
||||
} else {
|
||||
log.Ctx(ctx).Info("initial GRIB update complete")
|
||||
}
|
||||
}()
|
||||
|
||||
svc, err := service.New(gribService)
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Fatal("failed to initialize service", zap.Error(err))
|
||||
}
|
||||
defer svc.Close()
|
||||
|
||||
var sched *scheduler.Scheduler
|
||||
if schedulerConfig.Enabled {
|
||||
sched = scheduler.New()
|
||||
|
||||
gribJob := updater.New(gribService, gribUpdaterConfig)
|
||||
if err := sched.AddJob(gribJob); err != nil {
|
||||
log.Ctx(ctx).Error("failed to add GRIB update job to scheduler", zap.Error(err))
|
||||
}
|
||||
|
||||
log.Ctx(ctx).Info("scheduler initialized with jobs")
|
||||
}
|
||||
|
||||
handler := handler.New(svc)
|
||||
|
||||
restConfig, err := rest.NewConfig()
|
||||
if err != nil {
|
||||
lg.Fatal("failed to init transport config", zap.Error(err))
|
||||
}
|
||||
|
||||
transport, err := rest.New(handler, restConfig)
|
||||
if err != nil {
|
||||
lg.Fatal("failed to init transport", zap.Error(err))
|
||||
}
|
||||
|
||||
svc.Start()
|
||||
if sched != nil {
|
||||
sched.Start()
|
||||
lg.Info("scheduler started")
|
||||
}
|
||||
|
||||
lg.Info("service started successfully")
|
||||
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
go func() {
|
||||
lg.Info("starting HTTP server on port", zap.Int("port", restConfig.Port))
|
||||
transport.Run()
|
||||
}()
|
||||
|
||||
<-sigChan
|
||||
lg.Info("received shutdown signal, stopping service")
|
||||
|
||||
if sched != nil {
|
||||
sched.Stop()
|
||||
lg.Info("scheduler stopped")
|
||||
}
|
||||
}
|
||||
47
go.mod
47
go.mod
|
|
@ -1,47 +0,0 @@
|
|||
module git.intra.yksa.space/gsn/predictor
|
||||
|
||||
go 1.24.4
|
||||
|
||||
require (
|
||||
github.com/caarlos0/env/v11 v11.3.1
|
||||
github.com/edsrzf/mmap-go v1.2.0
|
||||
github.com/go-co-op/gocron v1.37.0
|
||||
github.com/go-faster/errors v0.7.1
|
||||
github.com/go-faster/jx v1.1.0
|
||||
github.com/nilsmagnus/grib v1.2.8
|
||||
github.com/ogen-go/ogen v1.16.0
|
||||
github.com/rs/cors v1.11.1
|
||||
github.com/stretchr/testify v1.11.1
|
||||
go.opentelemetry.io/otel v1.38.0
|
||||
go.opentelemetry.io/otel/metric v1.38.0
|
||||
go.opentelemetry.io/otel/trace v1.38.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/sync v0.17.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dlclark/regexp2 v1.11.5 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-faster/yaml v0.4.6 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/segmentio/asm v1.2.1 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20251017212417-90e834f514db // indirect
|
||||
golang.org/x/net v0.46.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
113
go.sum
113
go.sum
|
|
@ -1,113 +0,0 @@
|
|||
github.com/caarlos0/env/v11 v11.3.1 h1:cArPWC15hWmEt+gWk7YBi7lEXTXCvpaSdCiZE2X5mCA=
|
||||
github.com/caarlos0/env/v11 v11.3.1/go.mod h1:qupehSf/Y0TUTsxKywqRt/vJjN5nz6vauiYEUUr8P4U=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
|
||||
github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
|
||||
github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0=
|
||||
github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY=
|
||||
github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg=
|
||||
github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo=
|
||||
github.com/go-faster/jx v1.1.0 h1:ZsW3wD+snOdmTDy9eIVgQdjUpXRRV4rqW8NS3t+20bg=
|
||||
github.com/go-faster/jx v1.1.0/go.mod h1:vKDNikrKoyUmpzaJ0OkIkRQClNHFX/nF3dnTJZb3skg=
|
||||
github.com/go-faster/yaml v0.4.6 h1:lOK/EhI04gCpPgPhgt0bChS6bvw7G3WwI8xxVe0sw9I=
|
||||
github.com/go-faster/yaml v0.4.6/go.mod h1:390dRIvV4zbnO7qC9FGo6YYutc+wyyUSHBgbXL52eXk=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/nilsmagnus/grib v1.2.8 h1:H7ch/1/agaCqM3MC8hW1Ft+EJ+q2XB757uml/IfPvp4=
|
||||
github.com/nilsmagnus/grib v1.2.8/go.mod h1:XHm+5zuoOk0NSIWaGmA3JaAxI4i50YvD1L1vz+aqPOQ=
|
||||
github.com/ogen-go/ogen v1.16.0 h1:fKHEYokW/QrMzVNXId74/6RObRIUs9T2oroGKtR25Iw=
|
||||
github.com/ogen-go/ogen v1.16.0/go.mod h1:s3nWiMzybSf8fhxckyO+wtto92+QHpEL8FmkPnhL3jI=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
|
||||
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0=
|
||||
github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/exp v0.0.0-20251017212417-90e834f514db h1:by6IehL4BH5k3e3SJmcoNbOobMey2SLpAF79iPOEBvw=
|
||||
golang.org/x/exp v0.0.0-20251017212417-90e834f514db/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
|
||||
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
|
||||
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
package updater
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes"
|
||||
env "github.com/caarlos0/env/v11"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Interval time.Duration `env:"INTERVAL" envDefault:"6h"`
|
||||
Timeout time.Duration `env:"TIMEOUT" envDefault:"45m"`
|
||||
}
|
||||
|
||||
func NewConfig() (*Config, error) {
|
||||
cfg := &Config{}
|
||||
if err := env.ParseWithOptions(cfg, env.Options{
|
||||
PrefixTagName: "GSN_PREDICTOR_GRIB_UPDATER_",
|
||||
}); err != nil {
|
||||
return nil, errcodes.Wrap(err, "failed to parse GRIB updater config")
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
package updater
|
||||
|
||||
import "context"
|
||||
|
||||
// GribService defines the interface for GRIB operations needed by the updater job
|
||||
type GribService interface {
|
||||
Update(ctx context.Context) error
|
||||
}
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
package updater
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes"
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/log"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Job struct {
|
||||
service GribService
|
||||
config *Config
|
||||
}
|
||||
|
||||
func New(service GribService, config *Config) *Job {
|
||||
return &Job{
|
||||
service: service,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
func (j *Job) GetInterval() time.Duration {
|
||||
return j.config.Interval
|
||||
}
|
||||
|
||||
func (j *Job) GetTimeout() time.Duration {
|
||||
return j.config.Timeout
|
||||
}
|
||||
|
||||
func (j *Job) GetCount() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (j *Job) GetAsync() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (j *Job) Execute(ctx context.Context) error {
|
||||
log := log.Ctx(ctx)
|
||||
log.Info("executing GRIB update job")
|
||||
|
||||
if err := j.service.Update(ctx); err != nil {
|
||||
log.Error("GRIB update failed", zap.Error(err))
|
||||
return errcodes.Wrap(err, "failed to update GRIB data")
|
||||
}
|
||||
|
||||
log.Info("GRIB update completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,96 +0,0 @@
|
|||
package ds
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
api "git.intra.yksa.space/gsn/predictor/pkg/rest"
|
||||
)
|
||||
|
||||
type PredictionParameters struct {
|
||||
LaunchLatitude *float64
|
||||
LaunchLongitude *float64
|
||||
LaunchDatetime *time.Time
|
||||
LaunchAltitude *float64
|
||||
Profile *string
|
||||
AscentRate *float64
|
||||
BurstAltitude *float64
|
||||
DescentRate *float64
|
||||
FloatAltitude *float64
|
||||
StopDatetime *time.Time
|
||||
AscentCurve *string // base64
|
||||
DescentCurve *string // base64
|
||||
SimulateStages []string
|
||||
Interpolate *bool
|
||||
Format *string
|
||||
Dataset *time.Time
|
||||
// Add other parameters as needed
|
||||
}
|
||||
|
||||
type PredicitonResult struct {
|
||||
Latitude *float64
|
||||
Longitude *float64
|
||||
Altitude *float64
|
||||
Timestamp *time.Time
|
||||
WindU *float64
|
||||
WindV *float64
|
||||
// Add other result fields as needed
|
||||
}
|
||||
|
||||
// Converts flat ogen params to internal pointer-based model
|
||||
func ConvertFlatPredictionParams(params api.PerformPredictionParams) *PredictionParameters {
|
||||
out := &PredictionParameters{}
|
||||
if v, ok := params.LaunchLatitude.Get(); ok {
|
||||
out.LaunchLatitude = &v
|
||||
}
|
||||
if v, ok := params.LaunchLongitude.Get(); ok {
|
||||
out.LaunchLongitude = &v
|
||||
}
|
||||
if v, ok := params.LaunchDatetime.Get(); ok {
|
||||
out.LaunchDatetime = &v
|
||||
}
|
||||
if v, ok := params.LaunchAltitude.Get(); ok {
|
||||
out.LaunchAltitude = &v
|
||||
}
|
||||
if v, ok := params.Profile.Get(); ok {
|
||||
s := string(v)
|
||||
out.Profile = &s
|
||||
}
|
||||
if v, ok := params.AscentRate.Get(); ok {
|
||||
out.AscentRate = &v
|
||||
}
|
||||
if v, ok := params.BurstAltitude.Get(); ok {
|
||||
out.BurstAltitude = &v
|
||||
}
|
||||
if v, ok := params.DescentRate.Get(); ok {
|
||||
out.DescentRate = &v
|
||||
}
|
||||
if v, ok := params.FloatAltitude.Get(); ok {
|
||||
out.FloatAltitude = &v
|
||||
}
|
||||
if v, ok := params.StopDatetime.Get(); ok {
|
||||
out.StopDatetime = &v
|
||||
}
|
||||
if v, ok := params.AscentCurve.Get(); ok {
|
||||
out.AscentCurve = &v
|
||||
}
|
||||
if v, ok := params.DescentCurve.Get(); ok {
|
||||
out.DescentCurve = &v
|
||||
}
|
||||
if v, ok := params.Interpolate.Get(); ok {
|
||||
out.Interpolate = &v
|
||||
}
|
||||
if v, ok := params.Format.Get(); ok {
|
||||
s := string(v)
|
||||
out.Format = &s
|
||||
}
|
||||
if v, ok := params.Dataset.Get(); ok {
|
||||
out.Dataset = &v
|
||||
}
|
||||
if len(params.SimulateStages) > 0 {
|
||||
out.SimulateStages = make([]string, len(params.SimulateStages))
|
||||
for i, stage := range params.SimulateStages {
|
||||
out.SimulateStages[i] = string(stage)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
|
@ -1,102 +0,0 @@
|
|||
package errcodes
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ErrorCode struct {
|
||||
StatusCode int
|
||||
Message string
|
||||
Details string
|
||||
}
|
||||
|
||||
func New(statusCode int, message string, details ...string) *ErrorCode {
|
||||
return &ErrorCode{
|
||||
StatusCode: statusCode,
|
||||
Message: message,
|
||||
Details: strings.Join(details, " "),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ErrorCode) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
func IsErr(err error) bool {
|
||||
_, ok := err.(*ErrorCode)
|
||||
return ok
|
||||
}
|
||||
|
||||
func AsErr(err error) (*ErrorCode, bool) {
|
||||
if err == nil {
|
||||
return nil, false
|
||||
}
|
||||
errcode, ok := err.(*ErrorCode)
|
||||
return errcode, ok
|
||||
}
|
||||
|
||||
func Join(errs ...error) error {
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var messages []string
|
||||
var details []string
|
||||
|
||||
for _, err := range errs {
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if errcode, ok := AsErr(err); ok {
|
||||
messages = append(messages, errcode.Message)
|
||||
if errcode.Details != "" {
|
||||
details = append(details, errcode.Details)
|
||||
}
|
||||
} else {
|
||||
messages = append(messages, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
statusCode := http.StatusInternalServerError
|
||||
if len(errs) > 0 {
|
||||
if errcode, ok := AsErr(errs[0]); ok {
|
||||
statusCode = errcode.StatusCode
|
||||
}
|
||||
}
|
||||
|
||||
return New(statusCode, strings.Join(messages, "; "), details...)
|
||||
}
|
||||
|
||||
func Wrap(err error, message string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if errcode, ok := AsErr(err); ok {
|
||||
return New(errcode.StatusCode, message, errcode.Message, errcode.Details)
|
||||
}
|
||||
|
||||
return New(http.StatusInternalServerError, message, err.Error())
|
||||
}
|
||||
|
||||
var (
|
||||
ErrNoDataset = New(http.StatusNotFound, "no grib dataset found")
|
||||
ErrOutOfBounds = New(http.StatusBadRequest, "requested time is out of bounds")
|
||||
ErrConfig = New(http.StatusInternalServerError, "configuration error")
|
||||
ErrConfigInvalidEnv = New(http.StatusInternalServerError, "invalid environment configuration")
|
||||
ErrConfigMissingRequired = New(http.StatusInternalServerError, "missing required configuration")
|
||||
ErrDownload = New(http.StatusInternalServerError, "download error")
|
||||
ErrProcessing = New(http.StatusInternalServerError, "data processing error")
|
||||
ErrNoCubeFilesFound = New(http.StatusNotFound, "no cube files found")
|
||||
ErrNoValidCubeFilesFound = New(http.StatusNotFound, "no valid cube files found")
|
||||
ErrLatestCubeFileIsTooOld = New(http.StatusNotFound, "latest cube file is too old")
|
||||
ErrScheduler = New(http.StatusInternalServerError, "scheduler error")
|
||||
ErrSchedulerInvalidJob = New(http.StatusBadRequest, "invalid job configuration")
|
||||
ErrSchedulerTimeoutTooLong = New(http.StatusBadRequest, "job timeout too long", "timeout cannot exceed interval")
|
||||
)
|
||||
|
|
@ -1,100 +0,0 @@
|
|||
# GRIB Module
|
||||
|
||||
Этот модуль реализует функциональность для работы с GRIB-файлами, аналогичную tawhiri-downloader и tawhiri, но на Go.
|
||||
|
||||
## Основные возможности
|
||||
|
||||
- **Скачивание GRIB-файлов** с NOMADS (GFS прогнозы)
|
||||
- **Сборка 5D-куба** (время, давление, широта, долгота, переменные u/v)
|
||||
- **Эффективное хранение** с использованием mmap
|
||||
- **Интерполяция** ветровых данных для произвольных координат и времени
|
||||
- **Кэширование** результатов (in-memory)
|
||||
- **Распределенные блокировки** для предотвращения дублирования загрузок
|
||||
|
||||
## Архитектура
|
||||
|
||||
### Основные компоненты
|
||||
|
||||
- **Downloader** - скачивает GRIB-файлы с NOMADS
|
||||
- **Cube** - управляет 5D-массивом данных через mmap
|
||||
- **Extractor** - выполняет интерполяцию данных
|
||||
- **Cache** - кэширует результаты запросов
|
||||
- **Service** - основной интерфейс для работы с модулем
|
||||
|
||||
### Структура данных
|
||||
|
||||
5D-куб содержит:
|
||||
- **Время**: 17 временных срезов (0, 3, 6, ..., 48 часов)
|
||||
- **Давление**: 34 уровня давления (1000, 975, 950, ..., 2 hPa)
|
||||
- **Широта**: 361 точка (-90° до +90°)
|
||||
- **Долгота**: 720 точек (0° до 359.5°)
|
||||
- **Переменные**: u-ветер и v-ветер
|
||||
|
||||
## Использование
|
||||
|
||||
```go
|
||||
// Создание сервиса
|
||||
cfg := grib.ServiceConfig{
|
||||
Dir: "/tmp/grib",
|
||||
TTL: 24 * time.Hour,
|
||||
CacheTTL: 1 * time.Hour,
|
||||
Parallel: 4,
|
||||
Client: &http.Client{Timeout: 30 * time.Second},
|
||||
}
|
||||
|
||||
service, err := grib.New(cfg)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer service.Close()
|
||||
|
||||
// Обновление данных
|
||||
err = service.Update(ctx)
|
||||
|
||||
// Извлечение ветровых данных
|
||||
wind, err := service.Extract(ctx, lat, lon, alt, timestamp)
|
||||
// wind[0] - u-компонента ветра
|
||||
// wind[1] - v-компонента ветра
|
||||
```
|
||||
|
||||
## Интерполяция
|
||||
|
||||
Модуль выполняет 16-точечную интерполяцию:
|
||||
1. **Временная интерполяция** между двумя ближайшими срезами
|
||||
2. **Интерполяция по давлению** между двумя ближайшими уровнями
|
||||
3. **Билинейная интерполяция** по широте и долготе
|
||||
|
||||
## Кэширование
|
||||
|
||||
- **In-memory кэш**: быстрый доступ к недавно запрошенным данным
|
||||
|
||||
## Расписание обновлений
|
||||
|
||||
Рекомендуемая частота вызова `Update()`:
|
||||
- **Каждые 6 часов** - для получения свежих GFS прогнозов
|
||||
- **При запуске** - для загрузки начальных данных
|
||||
- **По требованию** - при отсутствии данных для запрашиваемого времени
|
||||
|
||||
## Отличия от tawhiri
|
||||
|
||||
### Преимущества Go-реализации:
|
||||
- **Высокая производительность** (mmap, конкурентные загрузки)
|
||||
- **Эффективное использование памяти** (не загружает весь массив в RAM)
|
||||
- **Горизонтальное масштабирование** (stateless, множество реплик)
|
||||
- **Встроенное кэширование** (in-memory)
|
||||
|
||||
### Особенности:
|
||||
- Использует `github.com/nilsmagnus/grib` вместо pygrib
|
||||
- Реализует собственную логику интерполяции
|
||||
|
||||
## Конфигурация
|
||||
|
||||
### Переменные окружения:
|
||||
- `PREDICTOR_GRIB_DATASET_URL` - URL источника данных (опционально)
|
||||
|
||||
### Параметры ServiceConfig:
|
||||
- `Dir` - директория для хранения файлов
|
||||
- `TTL` - время жизни данных (по умолчанию 24 часа)
|
||||
- `CacheTTL` - время жизни кэша (по умолчанию 1 час)
|
||||
- `Parallel` - количество параллельных загрузок
|
||||
- `Client` - HTTP клиент для загрузок
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
package grib
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestAssembleCubeFromExisting(t *testing.T) {
|
||||
dir := "C:/tmp/grib"
|
||||
run := time.Date(2026, 1, 16, 6, 0, 0, 0, time.UTC)
|
||||
cubePath := dir + "/" + run.Format("20060102_15") + ".cube"
|
||||
|
||||
t.Logf("Assembling cube from existing GRIB files...")
|
||||
t.Logf("Directory: %s", dir)
|
||||
t.Logf("Run: %s", run.Format("2006-01-02 15:04 MST"))
|
||||
t.Logf("Output: %s", cubePath)
|
||||
|
||||
err := assembleCube(dir, run, cubePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to assemble cube: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("✓ Cube assembled successfully!")
|
||||
t.Logf("Cube file: %s", cubePath)
|
||||
}
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
package grib
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type vec [2]float64
|
||||
|
||||
type item struct {
|
||||
v vec
|
||||
exp time.Time
|
||||
}
|
||||
|
||||
type memCache struct {
|
||||
ttl time.Duration
|
||||
m sync.Map
|
||||
}
|
||||
|
||||
func (c *memCache) get(k uint64) (vec, bool) {
|
||||
if v, ok := c.m.Load(k); ok {
|
||||
it := v.(item)
|
||||
|
||||
if time.Now().Before(it.exp) {
|
||||
return it.v, true
|
||||
}
|
||||
|
||||
c.m.Delete(k)
|
||||
}
|
||||
|
||||
return vec{}, false
|
||||
}
|
||||
|
||||
func (c *memCache) set(k uint64, v vec) {
|
||||
c.m.Store(k, item{v, time.Now().Add(c.ttl)})
|
||||
}
|
||||
|
|
@ -1,139 +0,0 @@
|
|||
package grib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes"
|
||||
env "github.com/caarlos0/env/v11"
|
||||
)
|
||||
|
||||
// DatasetConfig описывает параметры GFS-датасета: сетку, временные шаги,
|
||||
// уровни давления и URL для загрузки.
|
||||
type DatasetConfig struct {
|
||||
// Сетка
|
||||
Resolution float64 // шаг сетки в градусах (0.25 или 0.5)
|
||||
NLat int // точек по широте (721 для 0.25°, 361 для 0.5°)
|
||||
NLon int // точек по долготе (1440 для 0.25°, 720 для 0.5°)
|
||||
|
||||
// Время
|
||||
NT int // кол-во временных шагов (97 для 0–96 ч с шагом 1)
|
||||
MaxHour int // последний час прогноза (96)
|
||||
TimeStep int // интервал между шагами, часы (1 или 3)
|
||||
|
||||
// Вертикаль
|
||||
NP int // кол-во уровней давления
|
||||
Levels []float64 // уровни давления в гПа, по убыванию (1000 … 1)
|
||||
|
||||
// Переменные в кубе (порядок важен: индексы 0, 1, 2, …)
|
||||
NVar int // кол-во переменных
|
||||
Variables []string // GRIB-имена для фильтрации idx (HGT, UGRD, VGRD)
|
||||
|
||||
// URL загрузки (fmt-шаблоны: date, hour, hour, step)
|
||||
URLMask string // основной pgrb2
|
||||
URLMaskB string // дополнительный pgrb2b
|
||||
|
||||
// Имена файлов
|
||||
FileSuffix string // токен разрешения в именах файлов ("0p25", "0p50")
|
||||
}
|
||||
|
||||
// SizePerVar возвращает размер одной переменной в кубе, байт.
|
||||
func (dc *DatasetConfig) SizePerVar() int64 {
|
||||
return int64(dc.NT) * int64(dc.NP) * int64(dc.NLat) * int64(dc.NLon) * 4
|
||||
}
|
||||
|
||||
// CubeSize возвращает полный размер куба, байт.
|
||||
func (dc *DatasetConfig) CubeSize() int64 {
|
||||
return dc.SizePerVar() * int64(dc.NVar)
|
||||
}
|
||||
|
||||
// GridSize возвращает NLat * NLon.
|
||||
func (dc *DatasetConfig) GridSize() int {
|
||||
return dc.NLat * dc.NLon
|
||||
}
|
||||
|
||||
// InvResolution возвращает 1/Resolution — множитель для перевода координат в индексы.
|
||||
func (dc *DatasetConfig) InvResolution() float64 {
|
||||
return 1.0 / dc.Resolution
|
||||
}
|
||||
|
||||
// Steps возвращает список часов прогноза [0, TimeStep, 2*TimeStep, …, MaxHour].
|
||||
func (dc *DatasetConfig) Steps() []int {
|
||||
out := make([]int, 0, dc.NT)
|
||||
for h := 0; h <= dc.MaxHour; h += dc.TimeStep {
|
||||
out = append(out, h)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// FileName возвращает имя основного GRIB-файла (pgrb2).
|
||||
func (dc *DatasetConfig) FileName(run time.Time, step int) string {
|
||||
return fmt.Sprintf("gfs.t%02dz.pgrb2.%s.f%03d", run.Hour(), dc.FileSuffix, step)
|
||||
}
|
||||
|
||||
// FileNameB возвращает имя вторичного GRIB-файла (pgrb2b).
|
||||
func (dc *DatasetConfig) FileNameB(run time.Time, step int) string {
|
||||
return fmt.Sprintf("gfs.t%02dz.pgrb2b.%s.f%03d", run.Hour(), dc.FileSuffix, step)
|
||||
}
|
||||
|
||||
// GribURL возвращает URL основного GRIB-файла.
|
||||
func (dc *DatasetConfig) GribURL(run time.Time, step int) string {
|
||||
return fmt.Sprintf(dc.URLMask, run.Format("20060102"), run.Hour(), run.Hour(), step)
|
||||
}
|
||||
|
||||
// GribURLB возвращает URL вторичного GRIB-файла.
|
||||
func (dc *DatasetConfig) GribURLB(run time.Time, step int) string {
|
||||
return fmt.Sprintf(dc.URLMaskB, run.Format("20060102"), run.Hour(), run.Hour(), step)
|
||||
}
|
||||
|
||||
// DefaultDatasetConfig возвращает конфиг GFS 0.25° / 1 час / 47 уровней.
|
||||
func DefaultDatasetConfig() DatasetConfig {
|
||||
return DatasetConfig{
|
||||
Resolution: 0.25,
|
||||
NLat: 721,
|
||||
NLon: 1440,
|
||||
|
||||
NT: 97,
|
||||
MaxHour: 96,
|
||||
TimeStep: 1,
|
||||
|
||||
NP: 47,
|
||||
Levels: []float64{
|
||||
1000, 975, 950, 925, 900, 875, 850, 825, 800, 775,
|
||||
750, 725, 700, 675, 650, 625, 600, 575, 550, 525,
|
||||
500, 475, 450, 425, 400, 375, 350, 325, 300, 275,
|
||||
250, 225, 200, 175, 150, 125, 100, 70, 50, 30,
|
||||
20, 10, 7, 5, 3, 2, 1,
|
||||
},
|
||||
|
||||
NVar: 3,
|
||||
Variables: []string{"HGT", "UGRD", "VGRD"},
|
||||
|
||||
URLMask: "https://noaa-gfs-bdp-pds.s3.amazonaws.com/gfs.%s/%02d/atmos/gfs.t%02dz.pgrb2.0p25.f%03d",
|
||||
URLMaskB: "https://noaa-gfs-bdp-pds.s3.amazonaws.com/gfs.%s/%02d/atmos/gfs.t%02dz.pgrb2b.0p25.f%03d",
|
||||
|
||||
FileSuffix: "0p25",
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type Config struct {
|
||||
Dir string `env:"DIR" envDefault:"C:/tmp/grib"`
|
||||
TTL time.Duration `env:"TTL" envDefault:"48h"`
|
||||
CacheTTL time.Duration `env:"CACHE_TTL" envDefault:"1h"`
|
||||
Parallel int `env:"PARALLEL" envDefault:"8"`
|
||||
|
||||
Dataset DatasetConfig
|
||||
}
|
||||
|
||||
func NewConfig() (*Config, error) {
|
||||
cfg := &Config{}
|
||||
if err := env.ParseWithOptions(cfg, env.Options{
|
||||
PrefixTagName: "GSN_PREDICTOR_GRIB_",
|
||||
}); err != nil {
|
||||
return nil, errcodes.Wrap(err, "failed to parse GRIB config")
|
||||
}
|
||||
cfg.Dataset = DefaultDatasetConfig()
|
||||
return cfg, nil
|
||||
}
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
package grib
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"os"
|
||||
|
||||
mmap "github.com/edsrzf/mmap-go"
|
||||
)
|
||||
|
||||
type cube struct {
|
||||
mm mmap.MMap
|
||||
t, p, lat, lon int
|
||||
bytesPerVar int64
|
||||
file *os.File
|
||||
}
|
||||
|
||||
func openCube(path string, dc *DatasetConfig) (*cube, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mm, err := mmap.Map(f, mmap.RDONLY, 0)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cube{
|
||||
mm: mm,
|
||||
t: dc.NT,
|
||||
p: dc.NP,
|
||||
lat: dc.NLat,
|
||||
lon: dc.NLon,
|
||||
bytesPerVar: dc.SizePerVar(),
|
||||
file: f,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *cube) val(varIdx, ti, pi, y, x int) float32 {
|
||||
idx := (((ti*c.p+pi)*c.lat + y) * c.lon) + x
|
||||
off := int64(varIdx)*c.bytesPerVar + int64(idx)*4
|
||||
bits := binary.LittleEndian.Uint32(c.mm[off : off+4])
|
||||
return math.Float32frombits(bits)
|
||||
}
|
||||
|
||||
func (c *cube) Close() error {
|
||||
if c.mm != nil {
|
||||
c.mm.Unmap()
|
||||
}
|
||||
if c.file != nil {
|
||||
return c.file.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
package grib
|
||||
|
||||
type dataset struct {
|
||||
cube *cube
|
||||
ds *DatasetConfig
|
||||
runUTC int64 // unix seconds
|
||||
}
|
||||
|
||||
func (d *dataset) Close() error {
|
||||
if d.cube != nil {
|
||||
return d.cube.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,126 +0,0 @@
|
|||
package grib
|
||||
|
||||
import "math"
|
||||
|
||||
func lerp(a, b, t float64) float64 { return a + t*(b-a) }
|
||||
|
||||
// ghInterp returns interpolated geopotential height at given time/pressure/lat/lon
|
||||
func (d *dataset) ghInterp(ti, pi int, y0, y1, x0, x1 int, wy, wx float64) float64 {
|
||||
g00 := d.cube.val(0, ti, pi, y0, x0)
|
||||
g10 := d.cube.val(0, ti, pi, y0, x1)
|
||||
g01 := d.cube.val(0, ti, pi, y1, x0)
|
||||
g11 := d.cube.val(0, ti, pi, y1, x1)
|
||||
return (1-wy)*((1-wx)*float64(g00)+wx*float64(g10)) + wy*((1-wx)*float64(g01)+wx*float64(g11))
|
||||
}
|
||||
|
||||
// searchAltLevel uses geopotential height to find pressure level bracket for target altitude.
|
||||
func (d *dataset) searchAltLevel(alt float64, ti, y0, y1, x0, x1 int, wy, wx float64) (int, float64) {
|
||||
levels := d.ds.Levels
|
||||
nLevels := len(levels)
|
||||
|
||||
lo, hi := 0, nLevels-1
|
||||
for lo < hi-1 {
|
||||
mid := (lo + hi) / 2
|
||||
ghMid := d.ghInterp(ti, mid, y0, y1, x0, x1, wy, wx)
|
||||
if ghMid < alt {
|
||||
lo = mid
|
||||
} else {
|
||||
hi = mid
|
||||
}
|
||||
}
|
||||
|
||||
ghLo := d.ghInterp(ti, lo, y0, y1, x0, x1, wy, wx)
|
||||
ghHi := d.ghInterp(ti, hi, y0, y1, x0, x1, wy, wx)
|
||||
|
||||
wp := 0.0
|
||||
if ghHi != ghLo {
|
||||
wp = (alt - ghLo) / (ghHi - ghLo)
|
||||
}
|
||||
if wp < 0 {
|
||||
wp = 0
|
||||
}
|
||||
if wp > 1 {
|
||||
wp = 1
|
||||
}
|
||||
|
||||
return lo, wp
|
||||
}
|
||||
|
||||
// uv выполняет интерполяцию ветра по 4 измерениям (time, pressure, lat, lon).
|
||||
func (d *dataset) uv(lat, lon, alt float64, tHours float64) (float64, float64) {
|
||||
if lon < 0 {
|
||||
lon += 360
|
||||
}
|
||||
|
||||
inv := d.ds.InvResolution()
|
||||
|
||||
// GRIB scan north→south: index 0 = 90°N
|
||||
iy := (90 - lat) * inv
|
||||
y0 := int(math.Floor(iy))
|
||||
if y0 < 0 {
|
||||
y0 = 0
|
||||
}
|
||||
if y0 >= d.cube.lat-1 {
|
||||
y0 = d.cube.lat - 2
|
||||
}
|
||||
y1 := y0 + 1
|
||||
wy := iy - float64(y0)
|
||||
|
||||
ix := lon * inv
|
||||
x0 := int(math.Floor(ix)) % d.cube.lon
|
||||
x1 := (x0 + 1) % d.cube.lon
|
||||
wx := ix - float64(x0)
|
||||
|
||||
// Время: tHours делим на шаг, чтобы получить индекс в кубе
|
||||
tIdx := tHours / float64(d.ds.TimeStep)
|
||||
it0 := int(math.Floor(tIdx))
|
||||
if it0 < 0 {
|
||||
it0 = 0
|
||||
}
|
||||
if it0 >= d.cube.t-1 {
|
||||
it0 = d.cube.t - 2
|
||||
}
|
||||
wt := tIdx - float64(it0)
|
||||
|
||||
// ISA: высота → давление → индекс уровня
|
||||
levels := d.ds.Levels
|
||||
p := pressureFromAlt(alt)
|
||||
ip0 := 0
|
||||
for ip0+1 < len(levels) && levels[ip0+1] > p {
|
||||
ip0++
|
||||
}
|
||||
ip1 := ip0 + 1
|
||||
if ip1 >= len(levels) {
|
||||
ip1 = len(levels) - 1
|
||||
}
|
||||
wp := 0.0
|
||||
if levels[ip0] != levels[ip1] {
|
||||
wp = (levels[ip0] - p) / (levels[ip0] - levels[ip1])
|
||||
}
|
||||
|
||||
fetch := func(ti, pi int) (float64, float64) {
|
||||
u00 := d.cube.val(1, ti, pi, y0, x0)
|
||||
u10 := d.cube.val(1, ti, pi, y0, x1)
|
||||
u01 := d.cube.val(1, ti, pi, y1, x0)
|
||||
u11 := d.cube.val(1, ti, pi, y1, x1)
|
||||
v00 := d.cube.val(2, ti, pi, y0, x0)
|
||||
v10 := d.cube.val(2, ti, pi, y0, x1)
|
||||
v01 := d.cube.val(2, ti, pi, y1, x0)
|
||||
v11 := d.cube.val(2, ti, pi, y1, x1)
|
||||
uxy := (1-wy)*((1-wx)*float64(u00)+wx*float64(u10)) + wy*((1-wx)*float64(u01)+wx*float64(u11))
|
||||
vxy := (1-wy)*((1-wx)*float64(v00)+wx*float64(v10)) + wy*((1-wx)*float64(v01)+wx*float64(v11))
|
||||
return uxy, vxy
|
||||
}
|
||||
|
||||
u0p0, v0p0 := fetch(it0, ip0)
|
||||
u0p1, v0p1 := fetch(it0, ip1)
|
||||
u1p0, v1p0 := fetch(it0+1, ip0)
|
||||
u1p1, v1p1 := fetch(it0+1, ip1)
|
||||
uLow := lerp(u0p0, u0p1, wp)
|
||||
vLow := lerp(v0p0, v0p1, wp)
|
||||
uHig := lerp(u1p0, u1p1, wp)
|
||||
vHig := lerp(v1p0, v1p1, wp)
|
||||
u := lerp(uLow, uHig, wt)
|
||||
v := lerp(vLow, vHig, wt)
|
||||
return u, v
|
||||
}
|
||||
|
|
@ -1,291 +0,0 @@
|
|||
package grib
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes"
|
||||
"github.com/edsrzf/mmap-go"
|
||||
"github.com/nilsmagnus/grib/griblib"
|
||||
)
|
||||
|
||||
type Service interface {
|
||||
Update(ctx context.Context) error
|
||||
Extract(ctx context.Context, lat, lon, alt float64, ts time.Time) ([2]float64, error)
|
||||
Close() error
|
||||
GetStatus() (ready bool, lastUpdate time.Time, isFresh bool, errMsg string)
|
||||
}
|
||||
|
||||
type service struct {
|
||||
cfg *Config
|
||||
cache memCache
|
||||
data atomic.Pointer[dataset]
|
||||
}
|
||||
|
||||
func New(cfg *Config) (Service, error) {
|
||||
if cfg.TTL == 0 {
|
||||
cfg.TTL = 24 * time.Hour
|
||||
}
|
||||
if err := os.MkdirAll(cfg.Dir, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := &service{cfg: cfg, cache: memCache{ttl: cfg.CacheTTL}}
|
||||
|
||||
// Try to load existing dataset on startup
|
||||
if err := s.loadExistingDataset(); err != nil {
|
||||
// Log error but don't fail startup - dataset will be loaded on first Update()
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *service) loadExistingDataset() error {
|
||||
pattern := filepath.Join(s.cfg.Dir, "*.cube")
|
||||
matches, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
return errcodes.ErrNoCubeFilesFound
|
||||
}
|
||||
|
||||
var latestFile string
|
||||
var latestTime time.Time
|
||||
|
||||
for _, match := range matches {
|
||||
info, err := os.Stat(match)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if info.ModTime().After(latestTime) {
|
||||
latestTime = info.ModTime()
|
||||
latestFile = match
|
||||
}
|
||||
}
|
||||
|
||||
if latestFile == "" {
|
||||
return errcodes.ErrNoValidCubeFilesFound
|
||||
}
|
||||
|
||||
if time.Since(latestTime) > s.cfg.TTL {
|
||||
return errcodes.Wrap(errcodes.ErrLatestCubeFileIsTooOld, "latest cube file is too old")
|
||||
}
|
||||
|
||||
dc := &s.cfg.Dataset
|
||||
c, err := openCube(latestFile, dc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
base := filepath.Base(latestFile)
|
||||
runStr := strings.TrimSuffix(base, ".cube")
|
||||
run, err := time.Parse("20060102_15", runStr)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
s.data.Store(&dataset{cube: c, ds: dc, runUTC: run.Unix()})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) Update(ctx context.Context) error {
|
||||
if d := s.data.Load(); d != nil {
|
||||
runTime := time.Unix(d.runUTC, 0)
|
||||
if time.Since(runTime) < s.cfg.TTL {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if d := s.data.Load(); d != nil {
|
||||
runTime := time.Unix(d.runUTC, 0)
|
||||
if time.Since(runTime) < s.cfg.TTL {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
dc := &s.cfg.Dataset
|
||||
run := nearestRun(time.Now().UTC().Add(-6 * time.Hour))
|
||||
|
||||
cubePath := filepath.Join(s.cfg.Dir, run.Format("20060102_15")) + ".cube"
|
||||
if _, err := os.Stat(cubePath); err == nil {
|
||||
info, err := os.Stat(cubePath)
|
||||
if err == nil && time.Since(info.ModTime()) < s.cfg.TTL {
|
||||
c, err := openCube(cubePath, dc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.data.Store(&dataset{cube: c, ds: dc, runUTC: run.Unix()})
|
||||
s.cache = memCache{ttl: s.cfg.CacheTTL}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
downloadCtx, cancel := context.WithTimeout(ctx, 60*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
dl := NewPartialDownloader(s.cfg.Dir, s.cfg.Parallel, dc)
|
||||
if err := dl.Run(downloadCtx, run); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := os.Stat(cubePath); err != nil {
|
||||
if err := assembleCube(s.cfg.Dir, run, cubePath, dc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
c, err := openCube(cubePath, dc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.data.Store(&dataset{cube: c, ds: dc, runUTC: run.Unix()})
|
||||
s.cache = memCache{ttl: s.cfg.CacheTTL}
|
||||
return nil
|
||||
}
|
||||
|
||||
func assembleCube(dir string, run time.Time, cubePath string, dc *DatasetConfig) error {
|
||||
sizePerVar := dc.SizePerVar()
|
||||
total := dc.CubeSize()
|
||||
gridBytes := int64(dc.GridSize()) * 4
|
||||
|
||||
f, err := os.Create(cubePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.Truncate(total); err != nil {
|
||||
return err
|
||||
}
|
||||
mm, err := mmap.MapRegion(f, int(total), mmap.RDWR, 0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer mm.Unmap()
|
||||
defer f.Close()
|
||||
|
||||
pIndex := make(map[int]int)
|
||||
for i, p := range dc.Levels {
|
||||
pIndex[int(math.Round(p))] = i
|
||||
}
|
||||
|
||||
processFile := func(fn string, ti int) error {
|
||||
file, err := os.Open(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
messages, err := griblib.ReadMessages(file)
|
||||
file.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, m := range messages {
|
||||
if m.Section4.ProductDefinitionTemplateNumber != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
product := m.Section4.ProductDefinitionTemplate
|
||||
|
||||
var varIdx int
|
||||
if product.ParameterCategory == 2 {
|
||||
switch product.ParameterNumber {
|
||||
case 2: // u-wind
|
||||
varIdx = 1
|
||||
case 3: // v-wind
|
||||
varIdx = 2
|
||||
default:
|
||||
continue
|
||||
}
|
||||
} else if product.ParameterCategory == 3 && product.ParameterNumber == 5 {
|
||||
varIdx = 0 // geopotential height
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
if product.FirstSurface.Type != 100 {
|
||||
continue
|
||||
}
|
||||
|
||||
pressure := float64(product.FirstSurface.Value) / 100.0
|
||||
pIdx, ok := pIndex[int(math.Round(pressure))]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
vals := m.Data()
|
||||
raw := make([]byte, len(vals)*4)
|
||||
for i, v := range vals {
|
||||
binary.LittleEndian.PutUint32(raw[i*4:], math.Float32bits(float32(v)))
|
||||
}
|
||||
base := int64(varIdx)*sizePerVar + (int64(ti)*int64(dc.NP)+int64(pIdx))*gridBytes
|
||||
copy(mm[base:base+int64(len(raw))], raw)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
steps := dc.Steps()
|
||||
for ti, step := range steps {
|
||||
fn := filepath.Join(dir, dc.FileName(run, step))
|
||||
if err := processFile(fn, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fnB := filepath.Join(dir, dc.FileNameB(run, step))
|
||||
if err := processFile(fnB, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return mm.Flush()
|
||||
}
|
||||
|
||||
func (s *service) Extract(ctx context.Context, lat, lon, alt float64, ts time.Time) ([2]float64, error) {
|
||||
var zero [2]float64
|
||||
d := s.data.Load()
|
||||
if d == nil {
|
||||
return zero, errcodes.ErrNoDataset
|
||||
}
|
||||
maxDur := time.Duration(s.cfg.Dataset.MaxHour) * time.Hour
|
||||
if ts.Before(time.Unix(d.runUTC, 0)) || ts.After(time.Unix(d.runUTC, 0).Add(maxDur)) {
|
||||
return zero, errcodes.ErrOutOfBounds
|
||||
}
|
||||
|
||||
key := encodeKey(lat, lon, alt, ts)
|
||||
if v, ok := s.cache.get(key); ok {
|
||||
return [2]float64(v), nil
|
||||
}
|
||||
|
||||
td := ts.Sub(time.Unix(d.runUTC, 0)).Hours()
|
||||
u, v := d.uv(lat, lon, alt, td)
|
||||
out := [2]float64{u, v}
|
||||
|
||||
s.cache.set(key, vec(out))
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (s *service) Close() error {
|
||||
if d := s.data.Load(); d != nil {
|
||||
return d.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) GetStatus() (ready bool, lastUpdate time.Time, isFresh bool, errMsg string) {
|
||||
d := s.data.Load()
|
||||
if d == nil {
|
||||
return false, time.Time{}, false, "no dataset loaded"
|
||||
}
|
||||
runTime := time.Unix(d.runUTC, 0)
|
||||
fresh := time.Since(runTime) < s.cfg.TTL
|
||||
if !fresh {
|
||||
return false, runTime, false, "dataset is too old"
|
||||
}
|
||||
return true, runTime, true, ""
|
||||
}
|
||||
|
|
@ -1,350 +0,0 @@
|
|||
package grib
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes"
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/log"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// PartialDownloader загружает только необходимые поля из GRIB файлов
|
||||
// используя HTTP Range requests и .idx индексные файлы
|
||||
type PartialDownloader struct {
|
||||
Dir string
|
||||
Parallel int
|
||||
Client *http.Client
|
||||
Variables []string
|
||||
ds *DatasetConfig
|
||||
}
|
||||
|
||||
// NewPartialDownloader создаёт новый partial downloader
|
||||
func NewPartialDownloader(dir string, parallel int, dc *DatasetConfig) *PartialDownloader {
|
||||
return &PartialDownloader{
|
||||
Dir: dir,
|
||||
Parallel: parallel,
|
||||
Client: &http.Client{
|
||||
Timeout: 60 * time.Second,
|
||||
},
|
||||
Variables: dc.Variables,
|
||||
ds: dc,
|
||||
}
|
||||
}
|
||||
|
||||
// idxEntry представляет запись из .idx файла
|
||||
type idxEntry struct {
|
||||
Index int
|
||||
ByteStart int64
|
||||
Date string
|
||||
Variable string
|
||||
Level string
|
||||
Forecast string
|
||||
}
|
||||
|
||||
type ProgressWriter struct {
|
||||
Total int64
|
||||
Downloaded int64
|
||||
OnProgress func(percent float64)
|
||||
}
|
||||
|
||||
func (pw *ProgressWriter) Write(p []byte) (int, error) {
|
||||
n := len(p)
|
||||
pw.Downloaded += int64(n)
|
||||
if pw.Total > 0 && pw.OnProgress != nil {
|
||||
percent := float64(pw.Downloaded) / float64(pw.Total) * 100
|
||||
pw.OnProgress(percent)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// parseIdx парсит .idx файл и возвращает записи
|
||||
func (d *PartialDownloader) parseIdx(body []byte) []idxEntry {
|
||||
var entries []idxEntry
|
||||
lines := strings.Split(string(body), "\n")
|
||||
|
||||
for _, line := range lines {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(line, ":")
|
||||
if len(parts) < 7 {
|
||||
continue
|
||||
}
|
||||
|
||||
byteStart, _ := strconv.ParseInt(parts[1], 10, 64)
|
||||
entries = append(entries, idxEntry{
|
||||
Index: len(entries),
|
||||
ByteStart: byteStart,
|
||||
Date: parts[2],
|
||||
Variable: parts[3],
|
||||
Level: parts[4],
|
||||
Forecast: parts[5],
|
||||
})
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
// filterEntries фильтрует записи по нужным переменным и уровням давления
|
||||
func (d *PartialDownloader) filterEntries(entries []idxEntry) []idxEntry {
|
||||
var filtered []idxEntry
|
||||
|
||||
for _, e := range entries {
|
||||
isNeededVar := false
|
||||
for _, v := range d.Variables {
|
||||
if v == e.Variable {
|
||||
isNeededVar = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
isPressureLevel := strings.HasSuffix(e.Level, " mb")
|
||||
|
||||
if isNeededVar && isPressureLevel {
|
||||
filtered = append(filtered, e)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
// Вспомогательная функция для выполнения запроса с повторами
|
||||
func (d *PartialDownloader) doWithRetry(ctx context.Context, req *http.Request) (*http.Response, error) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
|
||||
backoff := 1 * time.Second
|
||||
maxRetries := 3
|
||||
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
resp, err = d.Client.Do(req)
|
||||
if err == nil && resp.StatusCode < 500 {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
log.Ctx(ctx).Warn("retry download", zap.Int("attempt", i+1), zap.Error(err))
|
||||
|
||||
select {
|
||||
case <-time.After(backoff):
|
||||
backoff *= 2
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// downloadRange загружает диапазон байтов из URL
|
||||
func (d *PartialDownloader) downloadRange(ctx context.Context, url string, start, end int64, out io.Writer) error {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rangeHeader := fmt.Sprintf("bytes=%d-", start)
|
||||
if end > 0 {
|
||||
rangeHeader = fmt.Sprintf("bytes=%d-%d", start, end)
|
||||
}
|
||||
req.Header.Set("Range", rangeHeader)
|
||||
|
||||
resp, err := d.Client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
|
||||
return errcodes.Wrap(errcodes.ErrDownload, "bad status: "+resp.Status)
|
||||
}
|
||||
|
||||
_, err = io.Copy(out, resp.Body)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *PartialDownloader) downloadFieldsFromURL(ctx context.Context, url string, dst string, step int) (err error) {
|
||||
idxURL := url + ".idx"
|
||||
tmp := dst + ".part"
|
||||
|
||||
if info, err := os.Stat(dst); err == nil && info.Size() > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
reqIdx, _ := http.NewRequestWithContext(ctx, http.MethodGet, idxURL, nil)
|
||||
respIdx, err := d.doWithRetry(ctx, reqIdx)
|
||||
if err != nil {
|
||||
return errcodes.Wrap(err, "failed to get idx")
|
||||
}
|
||||
defer respIdx.Body.Close()
|
||||
|
||||
idxBody, _ := io.ReadAll(respIdx.Body)
|
||||
entries := d.parseIdx(idxBody)
|
||||
filtered := d.filterEntries(entries)
|
||||
if len(filtered) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var totalBytes int64
|
||||
type chunk struct{ start, end int64 }
|
||||
chunks := make([]chunk, 0, len(filtered))
|
||||
|
||||
for _, entry := range filtered {
|
||||
var endByte int64 = -1
|
||||
for j, e := range entries {
|
||||
if e.ByteStart == entry.ByteStart && j+1 < len(entries) {
|
||||
endByte = entries[j+1].ByteStart - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
chunks = append(chunks, chunk{entry.ByteStart, endByte})
|
||||
if endByte > 0 {
|
||||
totalBytes += (endByte - entry.ByteStart + 1)
|
||||
}
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(tmp, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var downloaded int64
|
||||
|
||||
err = func() error {
|
||||
defer f.Close()
|
||||
bufWriter := bufio.NewWriterSize(f, 1024*1024)
|
||||
|
||||
for i, c := range chunks {
|
||||
countingWriter := &proxyWriter{
|
||||
Writer: bufWriter,
|
||||
OnWrite: func(n int) {
|
||||
downloaded += int64(n)
|
||||
if totalBytes > 0 && i%20 == 0 {
|
||||
pct := float64(downloaded) / float64(totalBytes) * 100
|
||||
log.Ctx(ctx).Debug("download progress",
|
||||
zap.Int("step", step),
|
||||
zap.String("pct", fmt.Sprintf("%.1f%%", pct)))
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
if err := d.downloadRange(ctx, url, c.start, c.end, countingWriter); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return bufWriter.Flush()
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
f.Close()
|
||||
os.Remove(tmp)
|
||||
return err
|
||||
}
|
||||
|
||||
return d.safeRename(tmp, dst)
|
||||
}
|
||||
|
||||
type proxyWriter struct {
|
||||
io.Writer
|
||||
OnWrite func(int)
|
||||
}
|
||||
|
||||
func (p *proxyWriter) Write(data []byte) (int, error) {
|
||||
n, err := p.Writer.Write(data)
|
||||
if n > 0 && p.OnWrite != nil {
|
||||
p.OnWrite(n)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (d *PartialDownloader) safeRename(src, dst string) error {
|
||||
var lastErr error
|
||||
for i := 0; i < 5; i++ {
|
||||
if err := os.Rename(src, dst); err == nil {
|
||||
return nil
|
||||
} else {
|
||||
lastErr = err
|
||||
}
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("rename failed: %w", lastErr)
|
||||
}
|
||||
|
||||
// Run запускает загрузку всех необходимых файлов (pgrb2 + pgrb2b)
|
||||
func (d *PartialDownloader) Run(ctx context.Context, run time.Time) error {
|
||||
log.Ctx(ctx).Info("starting partial download",
|
||||
zap.Time("run", run),
|
||||
zap.Strings("variables", d.Variables))
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
sem := make(chan struct{}, d.Parallel)
|
||||
steps := d.ds.Steps()
|
||||
|
||||
for _, step := range steps {
|
||||
step := step
|
||||
|
||||
// Download primary pgrb2
|
||||
sem <- struct{}{}
|
||||
g.Go(func() error {
|
||||
defer func() { <-sem }()
|
||||
url := d.ds.GribURL(run, step)
|
||||
dst := filepath.Join(d.Dir, d.ds.FileName(run, step))
|
||||
return d.downloadFieldsFromURL(ctx, url, dst, step)
|
||||
})
|
||||
|
||||
// Download secondary pgrb2b
|
||||
sem <- struct{}{}
|
||||
g.Go(func() error {
|
||||
defer func() { <-sem }()
|
||||
url := d.ds.GribURLB(run, step)
|
||||
dst := filepath.Join(d.Dir, d.ds.FileNameB(run, step))
|
||||
return d.downloadFieldsFromURL(ctx, url, dst, step)
|
||||
})
|
||||
}
|
||||
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
// GetLatestModelRun находит последний доступный прогноз GFS
|
||||
func GetLatestModelRun(ctx context.Context, dc *DatasetConfig) (time.Time, error) {
|
||||
now := time.Now().UTC()
|
||||
hour := now.Hour() - (now.Hour() % 6)
|
||||
current := time.Date(now.Year(), now.Month(), now.Day(), hour, 0, 0, 0, time.UTC)
|
||||
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
|
||||
for i := 0; i < 8; i++ {
|
||||
url := dc.GribURL(current, dc.MaxHour)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil)
|
||||
if err != nil {
|
||||
current = current.Add(-6 * time.Hour)
|
||||
continue
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
resp.Body.Close()
|
||||
log.Ctx(ctx).Info("found latest model run", zap.Time("run", current))
|
||||
return current, nil
|
||||
}
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
current = current.Add(-6 * time.Hour)
|
||||
}
|
||||
|
||||
return time.Time{}, errcodes.Wrap(errcodes.ErrDownload, "no recent GFS forecast found")
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
package grib
|
||||
|
||||
import "math"
|
||||
|
||||
func pressureFromAlt(alt float64) float64 { // ICAO ISA
|
||||
return 1013.25 * math.Pow(1-alt/44307.69396, 5.255877)
|
||||
}
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
package grib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func nearestRun(t time.Time) time.Time {
|
||||
h := t.UTC().Hour() - t.UTC().Hour()%6
|
||||
return time.Date(t.Year(), t.Month(), t.Day(), h, 0, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
func encodeKey(a ...any) uint64 {
|
||||
h := fnv.New64a()
|
||||
for _, v := range a {
|
||||
fmt.Fprint(h, v)
|
||||
}
|
||||
return h.Sum64()
|
||||
}
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type ctxLogKey struct{}
|
||||
|
||||
func ToCtx(ctx context.Context, lg *zap.Logger) context.Context {
|
||||
return context.WithValue(ctx, ctxLogKey{}, lg)
|
||||
}
|
||||
|
||||
func Ctx(ctx context.Context) *zap.Logger {
|
||||
lg, ok := ctx.Value(ctxLogKey{}).(*zap.Logger)
|
||||
if !ok || lg == nil {
|
||||
zap.L().Error("no logger in context, using global")
|
||||
return zap.L()
|
||||
}
|
||||
|
||||
return lg
|
||||
}
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Grib interface {
|
||||
Update(ctx context.Context) error
|
||||
Extract(ctx context.Context, lat, lon, alt float64, ts time.Time) ([2]float64, error)
|
||||
Close() error
|
||||
}
|
||||
|
|
@ -1,684 +0,0 @@
|
|||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/ds"
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes"
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/log"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var ErrInvalidParameters = errcodes.New(400, "missing required prediction parameters")
|
||||
|
||||
// Stage represents a prediction stage (ascent, descent, float)
|
||||
type Stage struct {
|
||||
Name string
|
||||
Results []ds.PredicitonResult
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
}
|
||||
|
||||
// shouldSimulateStage checks if a given stage should be simulated based on the SimulateStages filter
|
||||
func shouldSimulateStage(params ds.PredictionParameters, stage string) bool {
|
||||
// If no filter is specified, simulate all stages
|
||||
if len(params.SimulateStages) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if the stage is in the filter list
|
||||
for _, s := range params.SimulateStages {
|
||||
if s == stage {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CustomCurve represents a custom ascent/descent curve
|
||||
type CustomCurve struct {
|
||||
Altitude []float64 `json:"altitude"`
|
||||
Time []float64 `json:"time"` // seconds from start
|
||||
}
|
||||
|
||||
func (s *Service) PerformPrediction(ctx context.Context, params ds.PredictionParameters) ([]ds.PredicitonResult, error) {
|
||||
// Validate required parameters
|
||||
if params.LaunchLatitude == nil || params.LaunchLongitude == nil || params.LaunchAltitude == nil || params.LaunchDatetime == nil {
|
||||
return nil, ErrInvalidParameters
|
||||
}
|
||||
|
||||
// Get default values
|
||||
profile := "standard_profile"
|
||||
if params.Profile != nil {
|
||||
profile = *params.Profile
|
||||
}
|
||||
|
||||
ascentRate := 5.0
|
||||
if params.AscentRate != nil {
|
||||
ascentRate = *params.AscentRate
|
||||
}
|
||||
|
||||
burstAltitude := 30000.0
|
||||
if params.BurstAltitude != nil {
|
||||
burstAltitude = *params.BurstAltitude
|
||||
}
|
||||
|
||||
descentRate := 5.0
|
||||
if params.DescentRate != nil {
|
||||
descentRate = *params.DescentRate
|
||||
}
|
||||
|
||||
floatAltitude := 0.0
|
||||
if params.FloatAltitude != nil {
|
||||
floatAltitude = *params.FloatAltitude
|
||||
}
|
||||
|
||||
// Parse custom curves if provided
|
||||
var ascentCurve, descentCurve *CustomCurve
|
||||
if params.AscentCurve != nil && *params.AscentCurve != "" {
|
||||
if curve, err := parseCustomCurve(*params.AscentCurve); err == nil {
|
||||
ascentCurve = curve
|
||||
}
|
||||
}
|
||||
if params.DescentCurve != nil && *params.DescentCurve != "" {
|
||||
if curve, err := parseCustomCurve(*params.DescentCurve); err == nil {
|
||||
descentCurve = curve
|
||||
}
|
||||
}
|
||||
|
||||
log.Ctx(ctx).Warn("🚀 PREDICTION STARTING",
|
||||
zap.String("profile", profile),
|
||||
zap.Float64("lat", *params.LaunchLatitude),
|
||||
zap.Float64("lon", *params.LaunchLongitude),
|
||||
zap.Float64("alt", *params.LaunchAltitude),
|
||||
zap.Time("time", *params.LaunchDatetime),
|
||||
)
|
||||
|
||||
var allResults []ds.PredicitonResult
|
||||
|
||||
switch profile {
|
||||
case "standard_profile":
|
||||
allResults = s.standardProfile(ctx, params, ascentRate, burstAltitude, descentRate, ascentCurve, descentCurve)
|
||||
case "float_profile":
|
||||
allResults = s.floatProfile(ctx, params, ascentRate, burstAltitude, floatAltitude, descentRate, ascentCurve, descentCurve)
|
||||
case "reverse_profile":
|
||||
allResults = s.reverseProfile(ctx, params, ascentRate, burstAltitude, descentRate, ascentCurve, descentCurve)
|
||||
case "custom_profile":
|
||||
allResults = s.customProfile(ctx, params, ascentCurve, descentCurve)
|
||||
default:
|
||||
return nil, errcodes.New(400, "unsupported profile: "+profile)
|
||||
}
|
||||
|
||||
log.Ctx(ctx).Info("Prediction complete", zap.Int("total_steps", len(allResults)))
|
||||
return allResults, nil
|
||||
}
|
||||
|
||||
func (s *Service) standardProfile(ctx context.Context, params ds.PredictionParameters, ascentRate, burstAltitude, descentRate float64, ascentCurve, descentCurve *CustomCurve) []ds.PredicitonResult {
|
||||
var results []ds.PredicitonResult
|
||||
var lastResult ds.PredicitonResult
|
||||
|
||||
// Stage 1: Ascent
|
||||
if shouldSimulateStage(params, "ascent") {
|
||||
ascentResults := s.simulateAscent(ctx, params, ascentRate, burstAltitude, ascentCurve)
|
||||
results = append(results, ascentResults...)
|
||||
if len(ascentResults) > 0 {
|
||||
lastResult = ascentResults[len(ascentResults)-1]
|
||||
}
|
||||
} else {
|
||||
// If ascent is skipped, use initial position as starting point
|
||||
lastResult = ds.PredicitonResult{
|
||||
Latitude: params.LaunchLatitude,
|
||||
Longitude: params.LaunchLongitude,
|
||||
Altitude: &burstAltitude,
|
||||
Timestamp: params.LaunchDatetime,
|
||||
}
|
||||
}
|
||||
|
||||
// Stage 2: Descent
|
||||
if shouldSimulateStage(params, "descent") && lastResult.Latitude != nil {
|
||||
descentParams := ds.PredictionParameters{
|
||||
LaunchLatitude: lastResult.Latitude,
|
||||
LaunchLongitude: lastResult.Longitude,
|
||||
LaunchAltitude: lastResult.Altitude,
|
||||
LaunchDatetime: lastResult.Timestamp,
|
||||
}
|
||||
|
||||
descentResults := s.simulateDescent(ctx, descentParams, descentRate, 0, descentCurve)
|
||||
results = append(results, descentResults...)
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func (s *Service) floatProfile(ctx context.Context, params ds.PredictionParameters, ascentRate, burstAltitude, floatAltitude, descentRate float64, ascentCurve, descentCurve *CustomCurve) []ds.PredicitonResult {
|
||||
var results []ds.PredicitonResult
|
||||
var lastResult ds.PredicitonResult
|
||||
|
||||
// Stage 1: Ascent to float altitude
|
||||
if shouldSimulateStage(params, "ascent") {
|
||||
ascentResults := s.simulateAscent(ctx, params, ascentRate, floatAltitude, ascentCurve)
|
||||
results = append(results, ascentResults...)
|
||||
if len(ascentResults) > 0 {
|
||||
lastResult = ascentResults[len(ascentResults)-1]
|
||||
}
|
||||
} else {
|
||||
// If ascent is skipped, use initial position at float altitude as starting point
|
||||
lastResult = ds.PredicitonResult{
|
||||
Latitude: params.LaunchLatitude,
|
||||
Longitude: params.LaunchLongitude,
|
||||
Altitude: &floatAltitude,
|
||||
Timestamp: params.LaunchDatetime,
|
||||
}
|
||||
}
|
||||
|
||||
// Stage 2: Float (simulate for some time)
|
||||
if shouldSimulateStage(params, "float") && lastResult.Latitude != nil {
|
||||
floatResults := s.simulateFloat(ctx, lastResult, 30*time.Minute) // Float for 30 minutes
|
||||
results = append(results, floatResults...)
|
||||
if len(floatResults) > 0 {
|
||||
lastResult = floatResults[len(floatResults)-1]
|
||||
}
|
||||
}
|
||||
|
||||
// Stage 3: Descent
|
||||
if shouldSimulateStage(params, "descent") && lastResult.Latitude != nil {
|
||||
descentParams := ds.PredictionParameters{
|
||||
LaunchLatitude: lastResult.Latitude,
|
||||
LaunchLongitude: lastResult.Longitude,
|
||||
LaunchAltitude: lastResult.Altitude,
|
||||
LaunchDatetime: lastResult.Timestamp,
|
||||
}
|
||||
|
||||
descentResults := s.simulateDescent(ctx, descentParams, descentRate, 0, descentCurve)
|
||||
results = append(results, descentResults...)
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func (s *Service) reverseProfile(ctx context.Context, params ds.PredictionParameters, ascentRate, burstAltitude, descentRate float64, ascentCurve, descentCurve *CustomCurve) []ds.PredicitonResult {
|
||||
var results []ds.PredicitonResult
|
||||
var lastResult ds.PredicitonResult
|
||||
|
||||
// Stage 1: Ascent
|
||||
if shouldSimulateStage(params, "ascent") {
|
||||
ascentResults := s.simulateAscent(ctx, params, ascentRate, burstAltitude, ascentCurve)
|
||||
results = append(results, ascentResults...)
|
||||
if len(ascentResults) > 0 {
|
||||
lastResult = ascentResults[len(ascentResults)-1]
|
||||
}
|
||||
} else {
|
||||
// If ascent is skipped, use initial position at burst altitude as starting point
|
||||
lastResult = ds.PredicitonResult{
|
||||
Latitude: params.LaunchLatitude,
|
||||
Longitude: params.LaunchLongitude,
|
||||
Altitude: &burstAltitude,
|
||||
Timestamp: params.LaunchDatetime,
|
||||
}
|
||||
}
|
||||
|
||||
// Stage 2: Descent to float altitude
|
||||
floatAlt := 0.0
|
||||
if params.FloatAltitude != nil {
|
||||
floatAlt = *params.FloatAltitude
|
||||
}
|
||||
|
||||
if shouldSimulateStage(params, "descent") && lastResult.Latitude != nil {
|
||||
descentParams := ds.PredictionParameters{
|
||||
LaunchLatitude: lastResult.Latitude,
|
||||
LaunchLongitude: lastResult.Longitude,
|
||||
LaunchAltitude: lastResult.Altitude,
|
||||
LaunchDatetime: lastResult.Timestamp,
|
||||
}
|
||||
|
||||
descentResults := s.simulateDescent(ctx, descentParams, descentRate, floatAlt, descentCurve)
|
||||
results = append(results, descentResults...)
|
||||
if len(descentResults) > 0 {
|
||||
lastResult = descentResults[len(descentResults)-1]
|
||||
}
|
||||
} else if floatAlt > 0 {
|
||||
// If descent is skipped but we need to float, position at float altitude
|
||||
lastResult = ds.PredicitonResult{
|
||||
Latitude: lastResult.Latitude,
|
||||
Longitude: lastResult.Longitude,
|
||||
Altitude: &floatAlt,
|
||||
Timestamp: lastResult.Timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
// Stage 3: Float
|
||||
if shouldSimulateStage(params, "float") && floatAlt > 0 && lastResult.Latitude != nil {
|
||||
floatResults := s.simulateFloat(ctx, lastResult, 30*time.Minute)
|
||||
results = append(results, floatResults...)
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func (s *Service) customProfile(ctx context.Context, params ds.PredictionParameters, ascentCurve, descentCurve *CustomCurve) []ds.PredicitonResult {
|
||||
var results []ds.PredicitonResult
|
||||
var lastResult ds.PredicitonResult
|
||||
|
||||
// Custom ascent
|
||||
if shouldSimulateStage(params, "ascent") && ascentCurve != nil {
|
||||
ascentResults := s.simulateCustomAscent(ctx, params, ascentCurve)
|
||||
results = append(results, ascentResults...)
|
||||
if len(ascentResults) > 0 {
|
||||
lastResult = ascentResults[len(ascentResults)-1]
|
||||
}
|
||||
} else if len(results) == 0 {
|
||||
// If ascent is skipped, use initial position
|
||||
lastResult = ds.PredicitonResult{
|
||||
Latitude: params.LaunchLatitude,
|
||||
Longitude: params.LaunchLongitude,
|
||||
Altitude: params.LaunchAltitude,
|
||||
Timestamp: params.LaunchDatetime,
|
||||
}
|
||||
}
|
||||
|
||||
// Custom descent
|
||||
if shouldSimulateStage(params, "descent") && descentCurve != nil && lastResult.Latitude != nil {
|
||||
descentParams := ds.PredictionParameters{
|
||||
LaunchLatitude: lastResult.Latitude,
|
||||
LaunchLongitude: lastResult.Longitude,
|
||||
LaunchAltitude: lastResult.Altitude,
|
||||
LaunchDatetime: lastResult.Timestamp,
|
||||
}
|
||||
|
||||
descentResults := s.simulateCustomDescent(ctx, descentParams, descentCurve)
|
||||
results = append(results, descentResults...)
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func rk4Step(lat, lon, alt float64, t time.Time, dt float64, windFunc func(lat, lon, alt float64, t time.Time) (float64, float64), altRate float64) (float64, float64, float64) {
|
||||
// Helper for RK4 integration step
|
||||
toRad := math.Pi / 180.0
|
||||
toDeg := 180.0 / math.Pi
|
||||
R := func(alt float64) float64 { return 6371009.0 + alt }
|
||||
|
||||
f := func(lat, lon, alt float64, t time.Time) (float64, float64, float64) {
|
||||
windU, windV := windFunc(lat, lon, alt, t)
|
||||
Rnow := R(alt)
|
||||
dlat := toDeg * windV / Rnow
|
||||
dlon := toDeg * windU / (Rnow * math.Cos(lat*toRad))
|
||||
return dlat, dlon, altRate
|
||||
}
|
||||
|
||||
k1_lat, k1_lon, k1_alt := f(lat, lon, alt, t)
|
||||
k2_lat, k2_lon, k2_alt := f(lat+0.5*k1_lat*dt, lon+0.5*k1_lon*dt, alt+0.5*k1_alt*dt, t.Add(time.Duration(0.5*dt)*time.Second))
|
||||
k3_lat, k3_lon, k3_alt := f(lat+0.5*k2_lat*dt, lon+0.5*k2_lon*dt, alt+0.5*k2_alt*dt, t.Add(time.Duration(0.5*dt)*time.Second))
|
||||
k4_lat, k4_lon, k4_alt := f(lat+k3_lat*dt, lon+k3_lon*dt, alt+k3_alt*dt, t.Add(time.Duration(dt)*time.Second))
|
||||
|
||||
latNew := lat + (dt/6.0)*(k1_lat+2*k2_lat+2*k3_lat+k4_lat)
|
||||
lonNew := lon + (dt/6.0)*(k1_lon+2*k2_lon+2*k3_lon+k4_lon)
|
||||
altNew := alt + (dt/6.0)*(k1_alt+2*k2_alt+2*k3_alt+k4_alt)
|
||||
return latNew, lonNew, altNew
|
||||
}
|
||||
|
||||
func (s *Service) simulateAscent(ctx context.Context, params ds.PredictionParameters, ascentRate, targetAltitude float64, customCurve *CustomCurve) []ds.PredicitonResult {
|
||||
const dt = 10.0 // simulation step in seconds
|
||||
const outputInterval = 60.0 // output every 60 seconds
|
||||
|
||||
log.Ctx(ctx).Warn("⬆️ ASCENT SIMULATION STARTING",
|
||||
zap.Float64("ascentRate", ascentRate),
|
||||
zap.Float64("targetAlt", targetAltitude))
|
||||
|
||||
lat := *params.LaunchLatitude
|
||||
lon := *params.LaunchLongitude
|
||||
alt := *params.LaunchAltitude
|
||||
timeCur := *params.LaunchDatetime
|
||||
|
||||
results := make([]ds.PredicitonResult, 0, 1000)
|
||||
|
||||
latCopy := lat
|
||||
lonCopy := lon
|
||||
altCopy := alt
|
||||
timeCopy := timeCur
|
||||
wind := [2]float64{0, 0}
|
||||
windU := wind[0]
|
||||
windV := wind[1]
|
||||
results = append(results, ds.PredicitonResult{
|
||||
Latitude: &latCopy,
|
||||
Longitude: &lonCopy,
|
||||
Altitude: &altCopy,
|
||||
Timestamp: &timeCopy,
|
||||
WindU: &windU,
|
||||
WindV: &windV,
|
||||
})
|
||||
|
||||
nextOutputTime := timeCur.Add(time.Duration(outputInterval) * time.Second)
|
||||
firstExtraction := true
|
||||
windFunc := func(lat, lon, alt float64, t time.Time) (float64, float64) {
|
||||
w, err := s.ExtractWind(ctx, lat, lon, alt, t)
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Error("Wind extraction FAILED during ascent",
|
||||
zap.Error(err),
|
||||
zap.Float64("lat", lat),
|
||||
zap.Float64("lon", lon),
|
||||
zap.Float64("alt", alt),
|
||||
zap.Time("time", t))
|
||||
return 0, 0
|
||||
}
|
||||
// Log only first extraction and when wind is zero
|
||||
if firstExtraction || (w[0] == 0 && w[1] == 0) {
|
||||
log.Ctx(ctx).Warn("Wind data check",
|
||||
zap.Bool("first", firstExtraction),
|
||||
zap.Float64("lat", lat),
|
||||
zap.Float64("lon", lon),
|
||||
zap.Float64("alt", alt),
|
||||
zap.Float64("u", w[0]),
|
||||
zap.Float64("v", w[1]))
|
||||
firstExtraction = false
|
||||
}
|
||||
return w[0], w[1]
|
||||
}
|
||||
|
||||
for alt < targetAltitude {
|
||||
altRate := ascentRate
|
||||
if customCurve != nil {
|
||||
altRate = s.getCustomAltitudeRate(customCurve, alt, ascentRate)
|
||||
}
|
||||
latNew, lonNew, altNew := rk4Step(lat, lon, alt, timeCur, dt, windFunc, altRate)
|
||||
timeCur = timeCur.Add(time.Duration(dt) * time.Second)
|
||||
lat = latNew
|
||||
lon = lonNew
|
||||
alt = altNew
|
||||
|
||||
if alt >= targetAltitude {
|
||||
alt = targetAltitude
|
||||
// Record burst point
|
||||
wU, wV := windFunc(lat, lon, alt, timeCur)
|
||||
latCopy := lat
|
||||
lonCopy := lon
|
||||
altCopy := alt
|
||||
timeCopy := timeCur
|
||||
windU := wU
|
||||
windV := wV
|
||||
results = append(results, ds.PredicitonResult{
|
||||
Latitude: &latCopy,
|
||||
Longitude: &lonCopy,
|
||||
Altitude: &altCopy,
|
||||
Timestamp: &timeCopy,
|
||||
WindU: &windU,
|
||||
WindV: &windV,
|
||||
})
|
||||
break
|
||||
}
|
||||
|
||||
if !timeCur.Before(nextOutputTime) {
|
||||
wU, wV := windFunc(lat, lon, alt, timeCur)
|
||||
latCopy := lat
|
||||
lonCopy := lon
|
||||
altCopy := alt
|
||||
timeCopy := timeCur
|
||||
windU := wU
|
||||
windV := wV
|
||||
results = append(results, ds.PredicitonResult{
|
||||
Latitude: &latCopy,
|
||||
Longitude: &lonCopy,
|
||||
Altitude: &altCopy,
|
||||
Timestamp: &timeCopy,
|
||||
WindU: &windU,
|
||||
WindV: &windV,
|
||||
})
|
||||
nextOutputTime = nextOutputTime.Add(time.Duration(outputInterval) * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func (s *Service) simulateDescent(ctx context.Context, params ds.PredictionParameters, descentRate, targetAltitude float64, customCurve *CustomCurve) []ds.PredicitonResult {
|
||||
const dt = 10.0 // simulation step in seconds
|
||||
const outputInterval = 60.0 // output every 60 seconds
|
||||
|
||||
lat := *params.LaunchLatitude
|
||||
lon := *params.LaunchLongitude
|
||||
alt := *params.LaunchAltitude
|
||||
timeCur := *params.LaunchDatetime
|
||||
|
||||
results := make([]ds.PredicitonResult, 0, 1000)
|
||||
|
||||
latCopy := lat
|
||||
lonCopy := lon
|
||||
altCopy := alt
|
||||
timeCopy := timeCur
|
||||
wind := [2]float64{0, 0}
|
||||
windU := wind[0]
|
||||
windV := wind[1]
|
||||
results = append(results, ds.PredicitonResult{
|
||||
Latitude: &latCopy,
|
||||
Longitude: &lonCopy,
|
||||
Altitude: &altCopy,
|
||||
Timestamp: &timeCopy,
|
||||
WindU: &windU,
|
||||
WindV: &windV,
|
||||
})
|
||||
|
||||
nextOutputTime := timeCur.Add(time.Duration(outputInterval) * time.Second)
|
||||
windFunc := func(lat, lon, alt float64, t time.Time) (float64, float64) {
|
||||
w, err := s.ExtractWind(ctx, lat, lon, alt, t)
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Error("Wind extraction FAILED during descent",
|
||||
zap.Error(err),
|
||||
zap.Float64("lat", lat),
|
||||
zap.Float64("lon", lon),
|
||||
zap.Float64("alt", alt),
|
||||
zap.Time("time", t))
|
||||
return 0, 0
|
||||
}
|
||||
return w[0], w[1]
|
||||
}
|
||||
|
||||
for alt > targetAltitude {
|
||||
altRate := -descentRateAtAlt(descentRate, alt)
|
||||
if customCurve != nil {
|
||||
altRate = -s.getCustomAltitudeRate(customCurve, alt, descentRate)
|
||||
}
|
||||
latNew, lonNew, altNew := rk4Step(lat, lon, alt, timeCur, dt, windFunc, altRate)
|
||||
timeCur = timeCur.Add(time.Duration(dt) * time.Second)
|
||||
lat = latNew
|
||||
lon = lonNew
|
||||
alt = altNew
|
||||
|
||||
if alt <= targetAltitude {
|
||||
alt = targetAltitude
|
||||
// Record landing point
|
||||
wU, wV := windFunc(lat, lon, alt, timeCur)
|
||||
latCopy := lat
|
||||
lonCopy := lon
|
||||
altCopy := alt
|
||||
timeCopy := timeCur
|
||||
windU := wU
|
||||
windV := wV
|
||||
results = append(results, ds.PredicitonResult{
|
||||
Latitude: &latCopy,
|
||||
Longitude: &lonCopy,
|
||||
Altitude: &altCopy,
|
||||
Timestamp: &timeCopy,
|
||||
WindU: &windU,
|
||||
WindV: &windV,
|
||||
})
|
||||
break
|
||||
}
|
||||
|
||||
if !timeCur.Before(nextOutputTime) {
|
||||
wU, wV := windFunc(lat, lon, alt, timeCur)
|
||||
latCopy := lat
|
||||
lonCopy := lon
|
||||
altCopy := alt
|
||||
timeCopy := timeCur
|
||||
windU := wU
|
||||
windV := wV
|
||||
results = append(results, ds.PredicitonResult{
|
||||
Latitude: &latCopy,
|
||||
Longitude: &lonCopy,
|
||||
Altitude: &altCopy,
|
||||
Timestamp: &timeCopy,
|
||||
WindU: &windU,
|
||||
WindV: &windV,
|
||||
})
|
||||
nextOutputTime = nextOutputTime.Add(time.Duration(outputInterval) * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func (s *Service) simulateFloat(ctx context.Context, startResult ds.PredicitonResult, duration time.Duration) []ds.PredicitonResult {
|
||||
const dt = 10.0 // simulation step in seconds
|
||||
const outputInterval = 60.0 // output every 60 seconds
|
||||
|
||||
lat := *startResult.Latitude
|
||||
lon := *startResult.Longitude
|
||||
alt := *startResult.Altitude
|
||||
timeCur := *startResult.Timestamp
|
||||
endTime := timeCur.Add(duration)
|
||||
|
||||
results := make([]ds.PredicitonResult, 0, 1000)
|
||||
|
||||
// Always include the initial float point
|
||||
latCopy := lat
|
||||
lonCopy := lon
|
||||
altCopy := alt
|
||||
timeCopy := timeCur
|
||||
wind := [2]float64{0, 0}
|
||||
windU := wind[0]
|
||||
windV := wind[1]
|
||||
results = append(results, ds.PredicitonResult{
|
||||
Latitude: &latCopy,
|
||||
Longitude: &lonCopy,
|
||||
Altitude: &altCopy,
|
||||
Timestamp: &timeCopy,
|
||||
WindU: &windU,
|
||||
WindV: &windV,
|
||||
})
|
||||
|
||||
var nextOutputTime = timeCur.Add(time.Duration(outputInterval) * time.Second)
|
||||
|
||||
for timeCur.Before(endTime) {
|
||||
wind, err := s.ExtractWind(ctx, lat, lon, alt, timeCur)
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Warn("Wind extraction failed during float", zap.Error(err))
|
||||
break
|
||||
}
|
||||
|
||||
latDot := (wind[1] / 111320.0)
|
||||
lonDot := (wind[0] / (40075000.0 * math.Cos(lat*math.Pi/180) / 360.0))
|
||||
|
||||
lat += latDot * dt
|
||||
lon += lonDot * dt
|
||||
// alt remains constant during float
|
||||
timeCur = timeCur.Add(time.Duration(dt) * time.Second)
|
||||
|
||||
if !timeCur.Before(nextOutputTime) {
|
||||
latCopy := lat
|
||||
lonCopy := lon
|
||||
altCopy := alt
|
||||
timeCopy := timeCur
|
||||
windU := wind[0]
|
||||
windV := wind[1]
|
||||
results = append(results, ds.PredicitonResult{
|
||||
Latitude: &latCopy,
|
||||
Longitude: &lonCopy,
|
||||
Altitude: &altCopy,
|
||||
Timestamp: &timeCopy,
|
||||
WindU: &windU,
|
||||
WindV: &windV,
|
||||
})
|
||||
nextOutputTime = nextOutputTime.Add(time.Duration(outputInterval) * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// airDensity returns ISA air density in kg/m³ at given altitude in meters
|
||||
func airDensity(h float64) float64 {
|
||||
var T, p float64
|
||||
switch {
|
||||
case h < 11000:
|
||||
T = 288.15 - 0.0065*h
|
||||
p = 101325 * math.Pow(T/288.15, 5.2561)
|
||||
case h < 20000:
|
||||
T = 216.65
|
||||
p = 22632.1 * math.Exp(-0.00015769*(h-11000))
|
||||
case h < 32000:
|
||||
T = 216.65 + 0.001*(h-20000)
|
||||
p = 5474.89 * math.Pow(T/216.65, -34.1632)
|
||||
default:
|
||||
T = 228.65 + 0.0028*(h-32000)
|
||||
p = 868.019 * math.Pow(T/228.65, -12.2009)
|
||||
}
|
||||
return p / (287.05 * T)
|
||||
}
|
||||
|
||||
// descentRateAtAlt returns descent rate adjusted for air density at altitude.
|
||||
// descent_rate parameter is the sea-level rate. At altitude, thinner air means faster descent.
|
||||
func descentRateAtAlt(seaLevelRate, alt float64) float64 {
|
||||
rho0 := airDensity(0)
|
||||
rhoH := airDensity(alt)
|
||||
if rhoH <= 0 {
|
||||
return seaLevelRate
|
||||
}
|
||||
return seaLevelRate * math.Sqrt(rho0/rhoH)
|
||||
}
|
||||
|
||||
func (s *Service) simulateCustomAscent(ctx context.Context, params ds.PredictionParameters, curve *CustomCurve) []ds.PredicitonResult {
|
||||
// Implementation for custom ascent curve
|
||||
// This would interpolate the altitude rate from the custom curve
|
||||
return s.simulateAscent(ctx, params, 5.0, 30000.0, curve)
|
||||
}
|
||||
|
||||
func (s *Service) simulateCustomDescent(ctx context.Context, params ds.PredictionParameters, curve *CustomCurve) []ds.PredicitonResult {
|
||||
// Implementation for custom descent curve
|
||||
// This would interpolate the altitude rate from the custom curve
|
||||
return s.simulateDescent(ctx, params, 5.0, 0.0, curve)
|
||||
}
|
||||
|
||||
func (s *Service) getCustomAltitudeRate(curve *CustomCurve, currentAltitude, defaultRate float64) float64 {
|
||||
if curve == nil || len(curve.Altitude) < 2 {
|
||||
return defaultRate
|
||||
}
|
||||
|
||||
// Find the two points in the curve that bracket the current altitude
|
||||
for i := 0; i < len(curve.Altitude)-1; i++ {
|
||||
if curve.Altitude[i] <= currentAltitude && currentAltitude <= curve.Altitude[i+1] {
|
||||
// Linear interpolation
|
||||
alt1, alt2 := curve.Altitude[i], curve.Altitude[i+1]
|
||||
time1, time2 := curve.Time[i], curve.Time[i+1]
|
||||
|
||||
if alt2 == alt1 {
|
||||
return defaultRate
|
||||
}
|
||||
|
||||
// Calculate rate (change in altitude per second)
|
||||
if time2 > time1 {
|
||||
return (alt2 - alt1) / (time2 - time1)
|
||||
}
|
||||
return defaultRate
|
||||
}
|
||||
}
|
||||
|
||||
return defaultRate
|
||||
}
|
||||
|
||||
func parseCustomCurve(base64Data string) (*CustomCurve, error) {
|
||||
data, err := base64.StdEncoding.DecodeString(base64Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var curve CustomCurve
|
||||
if err := json.Unmarshal(data, &curve); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &curve, nil
|
||||
}
|
||||
|
|
@ -1,492 +0,0 @@
|
|||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/ds"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockGrib is a mock implementation of the Grib interface
|
||||
type MockGrib struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockGrib) Update(ctx context.Context) error {
|
||||
args := m.Called(ctx)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *MockGrib) Extract(ctx context.Context, lat, lon, alt float64, t time.Time) ([2]float64, error) {
|
||||
args := m.Called(ctx, lat, lon, alt, t)
|
||||
return args.Get(0).([2]float64), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *MockGrib) Close() error {
|
||||
args := m.Called()
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
// Helper function to create a test service with mocked GRIB
|
||||
func createTestService() (*Service, *MockGrib) {
|
||||
mockGrib := new(MockGrib)
|
||||
|
||||
// Default mock behavior: return constant wind (5 m/s east, 3 m/s north)
|
||||
mockGrib.On("Extract", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return([2]float64{5.0, 3.0}, nil)
|
||||
|
||||
service := &Service{
|
||||
grib: mockGrib,
|
||||
}
|
||||
|
||||
return service, mockGrib
|
||||
}
|
||||
|
||||
// Helper function to create basic prediction parameters
|
||||
func createBasicParams() ds.PredictionParameters {
|
||||
lat := 40.0
|
||||
lon := -105.0
|
||||
alt := 1000.0
|
||||
launchTime := time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC)
|
||||
profile := "standard_profile"
|
||||
ascentRate := 5.0
|
||||
burstAltitude := 10000.0
|
||||
descentRate := 5.0
|
||||
|
||||
return ds.PredictionParameters{
|
||||
LaunchLatitude: &lat,
|
||||
LaunchLongitude: &lon,
|
||||
LaunchAltitude: &alt,
|
||||
LaunchDatetime: &launchTime,
|
||||
Profile: &profile,
|
||||
AscentRate: &ascentRate,
|
||||
BurstAltitude: &burstAltitude,
|
||||
DescentRate: &descentRate,
|
||||
}
|
||||
}
|
||||
|
||||
func TestRestrictedPrediction_OnlyAscent(t *testing.T) {
|
||||
service, _ := createTestService()
|
||||
params := createBasicParams()
|
||||
|
||||
// Restrict to ascent only
|
||||
params.SimulateStages = []string{"ascent"}
|
||||
|
||||
results, err := service.PerformPrediction(context.Background(), params)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, results)
|
||||
|
||||
// Verify all results are during ascent phase (altitude increasing)
|
||||
for i := 1; i < len(results); i++ {
|
||||
assert.GreaterOrEqual(t, *results[i].Altitude, *results[i-1].Altitude,
|
||||
"Altitude should be increasing or equal during ascent")
|
||||
}
|
||||
|
||||
// Last altitude should be near burst altitude
|
||||
lastAlt := *results[len(results)-1].Altitude
|
||||
burstAlt := *params.BurstAltitude
|
||||
assert.InDelta(t, burstAlt, lastAlt, 500.0, "Last altitude should be near burst altitude")
|
||||
}
|
||||
|
||||
func TestRestrictedPrediction_OnlyDescent(t *testing.T) {
|
||||
service, _ := createTestService()
|
||||
params := createBasicParams()
|
||||
|
||||
// Restrict to descent only
|
||||
params.SimulateStages = []string{"descent"}
|
||||
|
||||
results, err := service.PerformPrediction(context.Background(), params)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, results)
|
||||
|
||||
// First result should be at burst altitude (since ascent was skipped)
|
||||
firstAlt := *results[0].Altitude
|
||||
burstAlt := *params.BurstAltitude
|
||||
assert.Equal(t, burstAlt, firstAlt, "Should start at burst altitude when ascent is skipped")
|
||||
|
||||
// Verify all results are during descent phase (altitude decreasing)
|
||||
for i := 1; i < len(results); i++ {
|
||||
assert.LessOrEqual(t, *results[i].Altitude, *results[i-1].Altitude,
|
||||
"Altitude should be decreasing or equal during descent")
|
||||
}
|
||||
|
||||
// Last altitude should be near ground
|
||||
lastAlt := *results[len(results)-1].Altitude
|
||||
assert.Less(t, lastAlt, 1000.0, "Last altitude should be near ground")
|
||||
}
|
||||
|
||||
func TestRestrictedPrediction_AscentAndDescent(t *testing.T) {
|
||||
service, _ := createTestService()
|
||||
params := createBasicParams()
|
||||
|
||||
// Include both ascent and descent
|
||||
params.SimulateStages = []string{"ascent", "descent"}
|
||||
|
||||
results, err := service.PerformPrediction(context.Background(), params)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, results)
|
||||
|
||||
// Find the peak altitude (transition point)
|
||||
maxAlt := 0.0
|
||||
maxIdx := 0
|
||||
for i, result := range results {
|
||||
if *result.Altitude > maxAlt {
|
||||
maxAlt = *result.Altitude
|
||||
maxIdx = i
|
||||
}
|
||||
}
|
||||
|
||||
// Verify ascent phase
|
||||
for i := 1; i <= maxIdx; i++ {
|
||||
assert.GreaterOrEqual(t, *results[i].Altitude, *results[i-1].Altitude,
|
||||
"Altitude should increase during ascent phase")
|
||||
}
|
||||
|
||||
// Verify descent phase
|
||||
for i := maxIdx + 1; i < len(results); i++ {
|
||||
assert.LessOrEqual(t, *results[i].Altitude, *results[i-1].Altitude,
|
||||
"Altitude should decrease during descent phase")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRestrictedPrediction_FloatProfile_OnlyFloat(t *testing.T) {
|
||||
service, _ := createTestService()
|
||||
params := createBasicParams()
|
||||
|
||||
profile := "float_profile"
|
||||
floatAlt := 15000.0
|
||||
params.Profile = &profile
|
||||
params.FloatAltitude = &floatAlt
|
||||
|
||||
// Restrict to float only
|
||||
params.SimulateStages = []string{"float"}
|
||||
|
||||
results, err := service.PerformPrediction(context.Background(), params)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, results)
|
||||
|
||||
// All results should be at the float altitude
|
||||
for _, result := range results {
|
||||
assert.Equal(t, floatAlt, *result.Altitude,
|
||||
"Altitude should remain constant at float altitude")
|
||||
}
|
||||
|
||||
// Verify horizontal movement (lat/lon changes due to wind)
|
||||
firstLat := *results[0].Latitude
|
||||
lastLat := *results[len(results)-1].Latitude
|
||||
assert.NotEqual(t, firstLat, lastLat, "Latitude should change during float due to wind")
|
||||
}
|
||||
|
||||
func TestRestrictedPrediction_FloatProfile_AllStages(t *testing.T) {
|
||||
service, _ := createTestService()
|
||||
params := createBasicParams()
|
||||
|
||||
profile := "float_profile"
|
||||
floatAlt := 15000.0
|
||||
params.Profile = &profile
|
||||
params.FloatAltitude = &floatAlt
|
||||
|
||||
// Include all stages
|
||||
params.SimulateStages = []string{"ascent", "float", "descent"}
|
||||
|
||||
results, err := service.PerformPrediction(context.Background(), params)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, results)
|
||||
|
||||
// Verify we have ascending, constant, and descending altitude patterns
|
||||
hasAscent := false
|
||||
hasFloat := false
|
||||
hasDescent := false
|
||||
|
||||
const altTolerance = 50.0 // Tolerance for altitude comparison
|
||||
|
||||
for i := 1; i < len(results); i++ {
|
||||
altDiff := *results[i].Altitude - *results[i-1].Altitude
|
||||
|
||||
if altDiff > altTolerance {
|
||||
hasAscent = true
|
||||
} else if altDiff < -altTolerance {
|
||||
hasDescent = true
|
||||
} else if *results[i].Altitude > 10000 { // Float happens at high altitude
|
||||
hasFloat = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, hasAscent, "Should have ascent phase")
|
||||
assert.True(t, hasFloat, "Should have float phase")
|
||||
assert.True(t, hasDescent, "Should have descent phase")
|
||||
|
||||
// Verify maximum altitude is near float altitude
|
||||
maxAlt := 0.0
|
||||
for _, result := range results {
|
||||
if *result.Altitude > maxAlt {
|
||||
maxAlt = *result.Altitude
|
||||
}
|
||||
}
|
||||
assert.InDelta(t, floatAlt, maxAlt, 1000.0, "Max altitude should be near float altitude")
|
||||
}
|
||||
|
||||
func TestRestrictedPrediction_ReverseProfile_OnlyFloat(t *testing.T) {
|
||||
service, _ := createTestService()
|
||||
params := createBasicParams()
|
||||
|
||||
profile := "reverse_profile"
|
||||
floatAlt := 5000.0
|
||||
params.Profile = &profile
|
||||
params.FloatAltitude = &floatAlt
|
||||
|
||||
// Restrict to float only
|
||||
params.SimulateStages = []string{"float"}
|
||||
|
||||
results, err := service.PerformPrediction(context.Background(), params)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, results)
|
||||
|
||||
// All results should be at the float altitude
|
||||
for _, result := range results {
|
||||
assert.InDelta(t, floatAlt, *result.Altitude, 10.0,
|
||||
"Altitude should remain near float altitude")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRestrictedPrediction_EmptyStages_SimulatesAll(t *testing.T) {
|
||||
service, _ := createTestService()
|
||||
params := createBasicParams()
|
||||
|
||||
// Empty SimulateStages should simulate all stages
|
||||
params.SimulateStages = []string{}
|
||||
|
||||
results, err := service.PerformPrediction(context.Background(), params)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, results)
|
||||
|
||||
// Should have both ascent and descent
|
||||
// Find the peak
|
||||
maxAlt := 0.0
|
||||
hasAscent := false
|
||||
hasDescent := false
|
||||
|
||||
for i := 1; i < len(results); i++ {
|
||||
if *results[i].Altitude > *results[i-1].Altitude {
|
||||
hasAscent = true
|
||||
}
|
||||
if *results[i].Altitude < *results[i-1].Altitude {
|
||||
hasDescent = true
|
||||
}
|
||||
if *results[i].Altitude > maxAlt {
|
||||
maxAlt = *results[i].Altitude
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, hasAscent, "Should have ascent phase")
|
||||
assert.True(t, hasDescent, "Should have descent phase")
|
||||
}
|
||||
|
||||
func TestRestrictedPrediction_NilStages_SimulatesAll(t *testing.T) {
|
||||
service, _ := createTestService()
|
||||
params := createBasicParams()
|
||||
|
||||
// Nil SimulateStages should simulate all stages
|
||||
params.SimulateStages = nil
|
||||
|
||||
results, err := service.PerformPrediction(context.Background(), params)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, results)
|
||||
|
||||
// Should have both ascent and descent
|
||||
maxAlt := 0.0
|
||||
minAltAfterMax := 1000000.0
|
||||
|
||||
for _, result := range results {
|
||||
if *result.Altitude > maxAlt {
|
||||
maxAlt = *result.Altitude
|
||||
}
|
||||
}
|
||||
|
||||
foundMax := false
|
||||
for _, result := range results {
|
||||
if *result.Altitude == maxAlt {
|
||||
foundMax = true
|
||||
}
|
||||
if foundMax && *result.Altitude < minAltAfterMax {
|
||||
minAltAfterMax = *result.Altitude
|
||||
}
|
||||
}
|
||||
|
||||
// Should reach high altitude and come back down
|
||||
assert.Greater(t, maxAlt, 5000.0, "Should reach high altitude")
|
||||
assert.Less(t, minAltAfterMax, maxAlt, "Should descend after reaching max altitude")
|
||||
}
|
||||
|
||||
func TestRestrictedPrediction_InvalidStage_IgnoresInvalid(t *testing.T) {
|
||||
service, _ := createTestService()
|
||||
params := createBasicParams()
|
||||
|
||||
// Include invalid stage name (should be ignored)
|
||||
params.SimulateStages = []string{"ascent", "invalid_stage", "descent"}
|
||||
|
||||
results, err := service.PerformPrediction(context.Background(), params)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, results)
|
||||
// Should still simulate ascent and descent, ignoring the invalid stage
|
||||
}
|
||||
|
||||
func TestRestrictedPrediction_WindImpact(t *testing.T) {
|
||||
service, mockGrib := createTestService()
|
||||
|
||||
// Override mock to return strong eastward wind
|
||||
mockGrib.ExpectedCalls = nil
|
||||
mockGrib.On("Extract", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return([2]float64{20.0, 0.0}, nil) // Strong eastward wind
|
||||
|
||||
params := createBasicParams()
|
||||
params.SimulateStages = []string{"ascent"}
|
||||
|
||||
results, err := service.PerformPrediction(context.Background(), params)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, results)
|
||||
|
||||
// Longitude should increase significantly due to eastward wind
|
||||
firstLon := *results[0].Longitude
|
||||
lastLon := *results[len(results)-1].Longitude
|
||||
assert.Greater(t, lastLon, firstLon, "Longitude should increase with eastward wind")
|
||||
|
||||
// Verify wind values are captured in results
|
||||
for _, result := range results {
|
||||
if result.WindU != nil {
|
||||
// Wind values should be present in results
|
||||
assert.NotNil(t, result.WindV, "WindV should be present if WindU is present")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRestrictedPrediction_MissingRequiredParams(t *testing.T) {
|
||||
service, _ := createTestService()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
params ds.PredictionParameters
|
||||
}{
|
||||
{
|
||||
name: "Missing latitude",
|
||||
params: ds.PredictionParameters{
|
||||
LaunchLongitude: floatPtr(-105.0),
|
||||
LaunchAltitude: floatPtr(1000.0),
|
||||
LaunchDatetime: timePtr(time.Now()),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Missing longitude",
|
||||
params: ds.PredictionParameters{
|
||||
LaunchLatitude: floatPtr(40.0),
|
||||
LaunchAltitude: floatPtr(1000.0),
|
||||
LaunchDatetime: timePtr(time.Now()),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Missing altitude",
|
||||
params: ds.PredictionParameters{
|
||||
LaunchLatitude: floatPtr(40.0),
|
||||
LaunchLongitude: floatPtr(-105.0),
|
||||
LaunchDatetime: timePtr(time.Now()),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Missing datetime",
|
||||
params: ds.PredictionParameters{
|
||||
LaunchLatitude: floatPtr(40.0),
|
||||
LaunchLongitude: floatPtr(-105.0),
|
||||
LaunchAltitude: floatPtr(1000.0),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tc.params.SimulateStages = []string{"ascent"}
|
||||
results, err := service.PerformPrediction(context.Background(), tc.params)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrInvalidParameters, err)
|
||||
assert.Nil(t, results)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldSimulateStage(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
stages []string
|
||||
queryStage string
|
||||
shouldSimulate bool
|
||||
}{
|
||||
{
|
||||
name: "Empty filter simulates all",
|
||||
stages: []string{},
|
||||
queryStage: "ascent",
|
||||
shouldSimulate: true,
|
||||
},
|
||||
{
|
||||
name: "Nil filter simulates all",
|
||||
stages: nil,
|
||||
queryStage: "descent",
|
||||
shouldSimulate: true,
|
||||
},
|
||||
{
|
||||
name: "Stage in filter",
|
||||
stages: []string{"ascent", "descent"},
|
||||
queryStage: "ascent",
|
||||
shouldSimulate: true,
|
||||
},
|
||||
{
|
||||
name: "Stage not in filter",
|
||||
stages: []string{"ascent"},
|
||||
queryStage: "descent",
|
||||
shouldSimulate: false,
|
||||
},
|
||||
{
|
||||
name: "Float stage in filter",
|
||||
stages: []string{"float"},
|
||||
queryStage: "float",
|
||||
shouldSimulate: true,
|
||||
},
|
||||
{
|
||||
name: "Multiple stages excluding one",
|
||||
stages: []string{"ascent", "float"},
|
||||
queryStage: "descent",
|
||||
shouldSimulate: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
params := ds.PredictionParameters{
|
||||
SimulateStages: tc.stages,
|
||||
}
|
||||
result := shouldSimulateStage(params, tc.queryStage)
|
||||
assert.Equal(t, tc.shouldSimulate, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
func floatPtr(f float64) *float64 {
|
||||
return &f
|
||||
}
|
||||
|
||||
func timePtr(t time.Time) *time.Time {
|
||||
return &t
|
||||
}
|
||||
|
|
@ -1,60 +0,0 @@
|
|||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/log"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
grib Grib
|
||||
}
|
||||
|
||||
func New(gribService Grib) (*Service, error) {
|
||||
svc := &Service{
|
||||
grib: gribService,
|
||||
}
|
||||
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
// UpdateWeatherData updates weather forecast data using the configured grib service
|
||||
func (s *Service) UpdateWeatherData(ctx context.Context) error {
|
||||
return s.grib.Update(ctx)
|
||||
}
|
||||
|
||||
// ExtractWind extracts wind data for given coordinates and time
|
||||
func (s *Service) ExtractWind(ctx context.Context, lat, lon, alt float64, ts time.Time) ([2]float64, error) {
|
||||
return s.grib.Extract(ctx, lat, lon, alt, ts)
|
||||
}
|
||||
|
||||
// Update updates the GRIB data (implements updater.GribService)
|
||||
func (s *Service) Update(ctx context.Context) error {
|
||||
return s.UpdateWeatherData(ctx)
|
||||
}
|
||||
|
||||
// Start starts the service
|
||||
func (s *Service) Start() {
|
||||
log.Ctx(context.Background()).Info("service started")
|
||||
}
|
||||
|
||||
// Stop stops the service
|
||||
func (s *Service) Stop() {
|
||||
log.Ctx(context.Background()).Info("service stopped")
|
||||
}
|
||||
|
||||
// Close closes the service and releases resources
|
||||
func (s *Service) Close() error {
|
||||
s.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) GetGribStatus(ctx context.Context) (ready bool, lastUpdate time.Time, isFresh bool, errMsg string) {
|
||||
if gribStatus, ok := s.grib.(interface {
|
||||
GetStatus() (ready bool, lastUpdate time.Time, isFresh bool, errMsg string)
|
||||
}); ok {
|
||||
return gribStatus.GetStatus()
|
||||
}
|
||||
return false, time.Time{}, false, "grib service does not implement GetStatus"
|
||||
}
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes"
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/log"
|
||||
"github.com/ogen-go/ogen/middleware"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func Logging() middleware.Middleware {
|
||||
return func(req middleware.Request, next func(req middleware.Request) (middleware.Response, error)) (middleware.Response, error) {
|
||||
lg := log.Ctx(req.Context).With(
|
||||
zap.String("operationId", req.OperationID),
|
||||
)
|
||||
|
||||
lg.Info("started request")
|
||||
|
||||
req.Context = log.ToCtx(req.Context, lg)
|
||||
|
||||
start := time.Now()
|
||||
resp, err := next(req)
|
||||
dur := time.Since(start).Microseconds()
|
||||
|
||||
if err != nil {
|
||||
if errcode, ok := err.(*errcodes.ErrorCode); ok {
|
||||
lg.Error("request error",
|
||||
zap.Int("status_code", errcode.StatusCode),
|
||||
zap.String("message", errcode.Message),
|
||||
zap.String("details", errcode.Details),
|
||||
)
|
||||
} else {
|
||||
lg.Error("request internal error",
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
lg.Info("done request", zap.Float64("duration_ms", float64(dur)/float64(1000)))
|
||||
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
package rest
|
||||
|
||||
import (
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes"
|
||||
env "github.com/caarlos0/env/v11"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Host string `env:"HOST" envDefault:"0.0.0.0"`
|
||||
Port int `env:"PORT" envDefault:"8080"`
|
||||
ReadTimeout string `env:"READ_TIMEOUT" envDefault:"30s"`
|
||||
WriteTimeout string `env:"WRITE_TIMEOUT" envDefault:"30s"`
|
||||
IdleTimeout string `env:"IDLE_TIMEOUT" envDefault:"60s"`
|
||||
}
|
||||
|
||||
func NewConfig() (*Config, error) {
|
||||
cfg := &Config{}
|
||||
if err := env.ParseWithOptions(cfg, env.Options{
|
||||
PrefixTagName: "GSN_PREDICTOR_REST_",
|
||||
}); err != nil {
|
||||
return nil, errcodes.Wrap(err, "failed to parse REST config")
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/ds"
|
||||
)
|
||||
|
||||
type Service interface {
|
||||
UpdateWeatherData(ctx context.Context) error
|
||||
ExtractWind(ctx context.Context, lat, lon, alt float64, ts time.Time) ([2]float64, error)
|
||||
PerformPrediction(ctx context.Context, params ds.PredictionParameters) ([]ds.PredicitonResult, error)
|
||||
}
|
||||
|
|
@ -1,194 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/ds"
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes"
|
||||
api "git.intra.yksa.space/gsn/predictor/pkg/rest"
|
||||
)
|
||||
|
||||
var (
|
||||
_ api.Handler = (*Handler)(nil)
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
svc Service
|
||||
}
|
||||
|
||||
func New(svc Service) *Handler {
|
||||
return &Handler{
|
||||
svc: svc,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) PerformPrediction(ctx context.Context, params api.PerformPredictionParams) (*api.PredictionResult, error) {
|
||||
internalParams := ds.ConvertFlatPredictionParams(params)
|
||||
if internalParams == nil {
|
||||
return nil, errcodes.New(http.StatusBadRequest, "invalid or missing parameters")
|
||||
}
|
||||
results, err := h.svc.PerformPrediction(ctx, *internalParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(results) == 0 {
|
||||
return nil, errcodes.New(http.StatusInternalServerError, "no prediction results")
|
||||
}
|
||||
|
||||
// Group results into stages (ascent and descent)
|
||||
stages := h.groupResultsIntoStages(results)
|
||||
|
||||
// Map to OpenAPI schema
|
||||
var predictionItems []api.PredictionResultPredictionItem
|
||||
|
||||
for _, stage := range stages {
|
||||
var trajectory []api.PredictionResultPredictionItemTrajectoryItem
|
||||
|
||||
for _, result := range stage.Results {
|
||||
traj := api.PredictionResultPredictionItemTrajectoryItem{
|
||||
Datetime: *result.Timestamp,
|
||||
Latitude: *result.Latitude,
|
||||
Longitude: *result.Longitude,
|
||||
Altitude: *result.Altitude,
|
||||
}
|
||||
trajectory = append(trajectory, traj)
|
||||
}
|
||||
|
||||
item := api.PredictionResultPredictionItem{
|
||||
Stage: stage.Stage,
|
||||
Trajectory: trajectory,
|
||||
}
|
||||
predictionItems = append(predictionItems, item)
|
||||
}
|
||||
|
||||
metadata := api.PredictionResultMetadata{
|
||||
StartDatetime: *results[0].Timestamp,
|
||||
CompleteDatetime: *results[len(results)-1].Timestamp,
|
||||
}
|
||||
|
||||
resp := &api.PredictionResult{
|
||||
Metadata: metadata,
|
||||
Prediction: predictionItems,
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// StageResult represents a stage with its results
|
||||
type StageResult struct {
|
||||
Stage api.PredictionResultPredictionItemStage
|
||||
Results []ds.PredicitonResult
|
||||
}
|
||||
|
||||
// groupResultsIntoStages groups the prediction results into ascent and descent stages
|
||||
func (h *Handler) groupResultsIntoStages(results []ds.PredicitonResult) []StageResult {
|
||||
if len(results) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var stages []StageResult
|
||||
var currentStage []ds.PredicitonResult
|
||||
var currentStageType api.PredictionResultPredictionItemStage
|
||||
|
||||
// Determine if we're in ascent or descent based on altitude changes
|
||||
prevAlt := *results[0].Altitude
|
||||
currentStage = append(currentStage, results[0])
|
||||
currentStageType = api.PredictionResultPredictionItemStageAscent
|
||||
|
||||
for i := 1; i < len(results); i++ {
|
||||
result := results[i]
|
||||
currentAlt := *result.Altitude
|
||||
|
||||
// Determine if we're still in the same stage
|
||||
var stageType api.PredictionResultPredictionItemStage
|
||||
if currentAlt > prevAlt {
|
||||
stageType = api.PredictionResultPredictionItemStageAscent
|
||||
} else if currentAlt < prevAlt {
|
||||
stageType = api.PredictionResultPredictionItemStageDescent
|
||||
} else {
|
||||
// Same altitude - continue with current stage
|
||||
stageType = currentStageType
|
||||
}
|
||||
|
||||
// If stage type changed, finalize current stage and start new one
|
||||
if stageType != currentStageType && len(currentStage) > 0 {
|
||||
stages = append(stages, StageResult{
|
||||
Stage: currentStageType,
|
||||
Results: currentStage,
|
||||
})
|
||||
currentStage = nil
|
||||
currentStageType = stageType
|
||||
}
|
||||
|
||||
currentStage = append(currentStage, result)
|
||||
prevAlt = currentAlt
|
||||
}
|
||||
|
||||
// Add the final stage
|
||||
if len(currentStage) > 0 {
|
||||
stages = append(stages, StageResult{
|
||||
Stage: currentStageType,
|
||||
Results: currentStage,
|
||||
})
|
||||
}
|
||||
|
||||
return stages
|
||||
}
|
||||
|
||||
func (h *Handler) NewError(ctx context.Context, err error) *api.ErrorStatusCode {
|
||||
if errcode, ok := err.(*errcodes.ErrorCode); ok {
|
||||
resp := api.Error{
|
||||
Message: errcode.Message,
|
||||
}
|
||||
|
||||
if errcode.Details != "" {
|
||||
resp.Details = api.NewOptString(errcode.Details)
|
||||
}
|
||||
|
||||
return &api.ErrorStatusCode{
|
||||
StatusCode: errcode.StatusCode,
|
||||
Response: resp,
|
||||
}
|
||||
}
|
||||
|
||||
return &api.ErrorStatusCode{
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Response: api.Error{
|
||||
Message: "undefined internal error",
|
||||
Details: api.NewOptString(err.Error()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) ReadinessCheck(ctx context.Context) (*api.ReadinessResponse, error) {
|
||||
status := api.ReadinessResponseStatusNotReady
|
||||
var lastUpdate time.Time
|
||||
var isFresh bool
|
||||
var errMsg string
|
||||
|
||||
if s, ok := h.svc.(interface {
|
||||
GetGribStatus(ctx context.Context) (ready bool, lastUpdate time.Time, isFresh bool, errMsg string)
|
||||
}); ok {
|
||||
ready, lu, fresh, em := s.GetGribStatus(ctx)
|
||||
lastUpdate = lu
|
||||
isFresh = fresh
|
||||
errMsg = em
|
||||
if ready {
|
||||
status = api.ReadinessResponseStatusOk
|
||||
} else if em != "" {
|
||||
status = api.ReadinessResponseStatusError
|
||||
}
|
||||
} else {
|
||||
errMsg = "service does not implement GetGribStatus"
|
||||
status = api.ReadinessResponseStatusError
|
||||
}
|
||||
|
||||
resp := &api.ReadinessResponse{
|
||||
Status: status,
|
||||
IsFresh: api.NewOptBool(isFresh),
|
||||
LastUpdate: api.NewOptDateTime(lastUpdate),
|
||||
ErrorMessage: api.NewOptString(errMsg),
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
package rest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/log"
|
||||
"git.intra.yksa.space/gsn/predictor/internal/transport/middleware"
|
||||
handler "git.intra.yksa.space/gsn/predictor/internal/transport/rest/handler"
|
||||
api "git.intra.yksa.space/gsn/predictor/pkg/rest"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
type Transport struct {
|
||||
cfg *Config
|
||||
srv *api.Server
|
||||
handler *handler.Handler
|
||||
}
|
||||
|
||||
func New(handler *handler.Handler, cfg *Config) (*Transport, error) {
|
||||
srv, err := api.NewServer(
|
||||
handler,
|
||||
api.WithMiddleware(middleware.Logging()),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Transport{
|
||||
srv: srv,
|
||||
cfg: cfg,
|
||||
handler: handler,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *Transport) Run() {
|
||||
log.Ctx(context.Background()).Info("started")
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/", t.srv)
|
||||
cors.AllowAll().Handler(mux)
|
||||
|
||||
if err := http.ListenAndServe(fmt.Sprintf(":%d", t.cfg.Port), t.srv); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,291 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
ht "github.com/ogen-go/ogen/http"
|
||||
"github.com/ogen-go/ogen/middleware"
|
||||
"github.com/ogen-go/ogen/ogenerrors"
|
||||
"github.com/ogen-go/ogen/otelogen"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
// Allocate option closure once.
|
||||
clientSpanKind = trace.WithSpanKind(trace.SpanKindClient)
|
||||
// Allocate option closure once.
|
||||
serverSpanKind = trace.WithSpanKind(trace.SpanKindServer)
|
||||
)
|
||||
|
||||
type (
|
||||
optionFunc[C any] func(*C)
|
||||
otelOptionFunc func(*otelConfig)
|
||||
)
|
||||
|
||||
type otelConfig struct {
|
||||
TracerProvider trace.TracerProvider
|
||||
Tracer trace.Tracer
|
||||
MeterProvider metric.MeterProvider
|
||||
Meter metric.Meter
|
||||
Attributes []attribute.KeyValue
|
||||
}
|
||||
|
||||
func (cfg *otelConfig) initOTEL() {
|
||||
if cfg.TracerProvider == nil {
|
||||
cfg.TracerProvider = otel.GetTracerProvider()
|
||||
}
|
||||
if cfg.MeterProvider == nil {
|
||||
cfg.MeterProvider = otel.GetMeterProvider()
|
||||
}
|
||||
cfg.Tracer = cfg.TracerProvider.Tracer(otelogen.Name,
|
||||
trace.WithInstrumentationVersion(otelogen.SemVersion()),
|
||||
)
|
||||
cfg.Meter = cfg.MeterProvider.Meter(otelogen.Name,
|
||||
metric.WithInstrumentationVersion(otelogen.SemVersion()),
|
||||
)
|
||||
}
|
||||
|
||||
// ErrorHandler is error handler.
|
||||
type ErrorHandler = ogenerrors.ErrorHandler
|
||||
|
||||
type serverConfig struct {
|
||||
otelConfig
|
||||
NotFound http.HandlerFunc
|
||||
MethodNotAllowed func(w http.ResponseWriter, r *http.Request, allowed string)
|
||||
ErrorHandler ErrorHandler
|
||||
Prefix string
|
||||
Middleware Middleware
|
||||
MaxMultipartMemory int64
|
||||
}
|
||||
|
||||
// ServerOption is server config option.
|
||||
type ServerOption interface {
|
||||
applyServer(*serverConfig)
|
||||
}
|
||||
|
||||
var _ ServerOption = (optionFunc[serverConfig])(nil)
|
||||
|
||||
func (o optionFunc[C]) applyServer(c *C) {
|
||||
o(c)
|
||||
}
|
||||
|
||||
var _ ServerOption = (otelOptionFunc)(nil)
|
||||
|
||||
func (o otelOptionFunc) applyServer(c *serverConfig) {
|
||||
o(&c.otelConfig)
|
||||
}
|
||||
|
||||
func newServerConfig(opts ...ServerOption) serverConfig {
|
||||
cfg := serverConfig{
|
||||
NotFound: http.NotFound,
|
||||
MethodNotAllowed: func(w http.ResponseWriter, r *http.Request, allowed string) {
|
||||
status := http.StatusMethodNotAllowed
|
||||
if r.Method == "OPTIONS" {
|
||||
w.Header().Set("Access-Control-Allow-Methods", allowed)
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
|
||||
status = http.StatusNoContent
|
||||
} else {
|
||||
w.Header().Set("Allow", allowed)
|
||||
}
|
||||
w.WriteHeader(status)
|
||||
},
|
||||
ErrorHandler: ogenerrors.DefaultErrorHandler,
|
||||
Middleware: nil,
|
||||
MaxMultipartMemory: 32 << 20, // 32 MB
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.applyServer(&cfg)
|
||||
}
|
||||
cfg.initOTEL()
|
||||
return cfg
|
||||
}
|
||||
|
||||
type baseServer struct {
|
||||
cfg serverConfig
|
||||
requests metric.Int64Counter
|
||||
errors metric.Int64Counter
|
||||
duration metric.Float64Histogram
|
||||
}
|
||||
|
||||
func (s baseServer) notFound(w http.ResponseWriter, r *http.Request) {
|
||||
s.cfg.NotFound(w, r)
|
||||
}
|
||||
|
||||
func (s baseServer) notAllowed(w http.ResponseWriter, r *http.Request, allowed string) {
|
||||
s.cfg.MethodNotAllowed(w, r, allowed)
|
||||
}
|
||||
|
||||
func (cfg serverConfig) baseServer() (s baseServer, err error) {
|
||||
s = baseServer{cfg: cfg}
|
||||
if s.requests, err = otelogen.ServerRequestCountCounter(s.cfg.Meter); err != nil {
|
||||
return s, err
|
||||
}
|
||||
if s.errors, err = otelogen.ServerErrorsCountCounter(s.cfg.Meter); err != nil {
|
||||
return s, err
|
||||
}
|
||||
if s.duration, err = otelogen.ServerDurationHistogram(s.cfg.Meter); err != nil {
|
||||
return s, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type clientConfig struct {
|
||||
otelConfig
|
||||
Client ht.Client
|
||||
}
|
||||
|
||||
// ClientOption is client config option.
|
||||
type ClientOption interface {
|
||||
applyClient(*clientConfig)
|
||||
}
|
||||
|
||||
var _ ClientOption = (optionFunc[clientConfig])(nil)
|
||||
|
||||
func (o optionFunc[C]) applyClient(c *C) {
|
||||
o(c)
|
||||
}
|
||||
|
||||
var _ ClientOption = (otelOptionFunc)(nil)
|
||||
|
||||
func (o otelOptionFunc) applyClient(c *clientConfig) {
|
||||
o(&c.otelConfig)
|
||||
}
|
||||
|
||||
func newClientConfig(opts ...ClientOption) clientConfig {
|
||||
cfg := clientConfig{
|
||||
Client: http.DefaultClient,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.applyClient(&cfg)
|
||||
}
|
||||
cfg.initOTEL()
|
||||
return cfg
|
||||
}
|
||||
|
||||
type baseClient struct {
|
||||
cfg clientConfig
|
||||
requests metric.Int64Counter
|
||||
errors metric.Int64Counter
|
||||
duration metric.Float64Histogram
|
||||
}
|
||||
|
||||
func (cfg clientConfig) baseClient() (c baseClient, err error) {
|
||||
c = baseClient{cfg: cfg}
|
||||
if c.requests, err = otelogen.ClientRequestCountCounter(c.cfg.Meter); err != nil {
|
||||
return c, err
|
||||
}
|
||||
if c.errors, err = otelogen.ClientErrorsCountCounter(c.cfg.Meter); err != nil {
|
||||
return c, err
|
||||
}
|
||||
if c.duration, err = otelogen.ClientDurationHistogram(c.cfg.Meter); err != nil {
|
||||
return c, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Option is config option.
|
||||
type Option interface {
|
||||
ServerOption
|
||||
ClientOption
|
||||
}
|
||||
|
||||
// WithTracerProvider specifies a tracer provider to use for creating a tracer.
|
||||
//
|
||||
// If none is specified, the global provider is used.
|
||||
func WithTracerProvider(provider trace.TracerProvider) Option {
|
||||
return otelOptionFunc(func(cfg *otelConfig) {
|
||||
if provider != nil {
|
||||
cfg.TracerProvider = provider
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithMeterProvider specifies a meter provider to use for creating a meter.
|
||||
//
|
||||
// If none is specified, the otel.GetMeterProvider() is used.
|
||||
func WithMeterProvider(provider metric.MeterProvider) Option {
|
||||
return otelOptionFunc(func(cfg *otelConfig) {
|
||||
if provider != nil {
|
||||
cfg.MeterProvider = provider
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithAttributes specifies default otel attributes.
|
||||
func WithAttributes(attributes ...attribute.KeyValue) Option {
|
||||
return otelOptionFunc(func(cfg *otelConfig) {
|
||||
cfg.Attributes = attributes
|
||||
})
|
||||
}
|
||||
|
||||
// WithClient specifies http client to use.
|
||||
func WithClient(client ht.Client) ClientOption {
|
||||
return optionFunc[clientConfig](func(cfg *clientConfig) {
|
||||
if client != nil {
|
||||
cfg.Client = client
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithNotFound specifies Not Found handler to use.
|
||||
func WithNotFound(notFound http.HandlerFunc) ServerOption {
|
||||
return optionFunc[serverConfig](func(cfg *serverConfig) {
|
||||
if notFound != nil {
|
||||
cfg.NotFound = notFound
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithMethodNotAllowed specifies Method Not Allowed handler to use.
|
||||
func WithMethodNotAllowed(methodNotAllowed func(w http.ResponseWriter, r *http.Request, allowed string)) ServerOption {
|
||||
return optionFunc[serverConfig](func(cfg *serverConfig) {
|
||||
if methodNotAllowed != nil {
|
||||
cfg.MethodNotAllowed = methodNotAllowed
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithErrorHandler specifies error handler to use.
|
||||
func WithErrorHandler(h ErrorHandler) ServerOption {
|
||||
return optionFunc[serverConfig](func(cfg *serverConfig) {
|
||||
if h != nil {
|
||||
cfg.ErrorHandler = h
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithPathPrefix specifies server path prefix.
|
||||
func WithPathPrefix(prefix string) ServerOption {
|
||||
return optionFunc[serverConfig](func(cfg *serverConfig) {
|
||||
cfg.Prefix = prefix
|
||||
})
|
||||
}
|
||||
|
||||
// WithMiddleware specifies middlewares to use.
|
||||
func WithMiddleware(m ...Middleware) ServerOption {
|
||||
return optionFunc[serverConfig](func(cfg *serverConfig) {
|
||||
switch len(m) {
|
||||
case 0:
|
||||
cfg.Middleware = nil
|
||||
case 1:
|
||||
cfg.Middleware = m[0]
|
||||
default:
|
||||
cfg.Middleware = middleware.ChainMiddlewares(m...)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithMaxMultipartMemory specifies limit of memory for storing file parts.
|
||||
// File parts which can't be stored in memory will be stored on disk in temporary files.
|
||||
func WithMaxMultipartMemory(max int64) ServerOption {
|
||||
return optionFunc[serverConfig](func(cfg *serverConfig) {
|
||||
if max > 0 {
|
||||
cfg.MaxMultipartMemory = max
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -1,520 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-faster/errors"
|
||||
"github.com/ogen-go/ogen/conv"
|
||||
ht "github.com/ogen-go/ogen/http"
|
||||
"github.com/ogen-go/ogen/otelogen"
|
||||
"github.com/ogen-go/ogen/uri"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
func trimTrailingSlashes(u *url.URL) {
|
||||
u.Path = strings.TrimRight(u.Path, "/")
|
||||
u.RawPath = strings.TrimRight(u.RawPath, "/")
|
||||
}
|
||||
|
||||
// Invoker invokes operations described by OpenAPI v3 specification.
|
||||
type Invoker interface {
|
||||
// PerformPrediction invokes performPrediction operation.
|
||||
//
|
||||
// Perform prediction.
|
||||
//
|
||||
// GET /api/v1/prediction
|
||||
PerformPrediction(ctx context.Context, params PerformPredictionParams) (*PredictionResult, error)
|
||||
// ReadinessCheck invokes readinessCheck operation.
|
||||
//
|
||||
// Readiness check.
|
||||
//
|
||||
// GET /ready
|
||||
ReadinessCheck(ctx context.Context) (*ReadinessResponse, error)
|
||||
}
|
||||
|
||||
// Client implements OAS client.
|
||||
type Client struct {
|
||||
serverURL *url.URL
|
||||
baseClient
|
||||
}
|
||||
type errorHandler interface {
|
||||
NewError(ctx context.Context, err error) *ErrorStatusCode
|
||||
}
|
||||
|
||||
var _ Handler = struct {
|
||||
errorHandler
|
||||
*Client
|
||||
}{}
|
||||
|
||||
// NewClient initializes new Client defined by OAS.
|
||||
func NewClient(serverURL string, opts ...ClientOption) (*Client, error) {
|
||||
u, err := url.Parse(serverURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trimTrailingSlashes(u)
|
||||
|
||||
c, err := newClientConfig(opts...).baseClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Client{
|
||||
serverURL: u,
|
||||
baseClient: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type serverURLKey struct{}
|
||||
|
||||
// WithServerURL sets context key to override server URL.
|
||||
func WithServerURL(ctx context.Context, u *url.URL) context.Context {
|
||||
return context.WithValue(ctx, serverURLKey{}, u)
|
||||
}
|
||||
|
||||
func (c *Client) requestURL(ctx context.Context) *url.URL {
|
||||
u, ok := ctx.Value(serverURLKey{}).(*url.URL)
|
||||
if !ok {
|
||||
return c.serverURL
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
// PerformPrediction invokes performPrediction operation.
|
||||
//
|
||||
// Perform prediction.
|
||||
//
|
||||
// GET /api/v1/prediction
|
||||
func (c *Client) PerformPrediction(ctx context.Context, params PerformPredictionParams) (*PredictionResult, error) {
|
||||
res, err := c.sendPerformPrediction(ctx, params)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (c *Client) sendPerformPrediction(ctx context.Context, params PerformPredictionParams) (res *PredictionResult, err error) {
|
||||
otelAttrs := []attribute.KeyValue{
|
||||
otelogen.OperationID("performPrediction"),
|
||||
semconv.HTTPRequestMethodKey.String("GET"),
|
||||
semconv.URLTemplateKey.String("/api/v1/prediction"),
|
||||
}
|
||||
otelAttrs = append(otelAttrs, c.cfg.Attributes...)
|
||||
|
||||
// Run stopwatch.
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
// Use floating point division here for higher precision (instead of Millisecond method).
|
||||
elapsedDuration := time.Since(startTime)
|
||||
c.duration.Record(ctx, float64(elapsedDuration)/float64(time.Millisecond), metric.WithAttributes(otelAttrs...))
|
||||
}()
|
||||
|
||||
// Increment request counter.
|
||||
c.requests.Add(ctx, 1, metric.WithAttributes(otelAttrs...))
|
||||
|
||||
// Start a span for this request.
|
||||
ctx, span := c.cfg.Tracer.Start(ctx, PerformPredictionOperation,
|
||||
trace.WithAttributes(otelAttrs...),
|
||||
clientSpanKind,
|
||||
)
|
||||
// Track stage for error reporting.
|
||||
var stage string
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
span.SetStatus(codes.Error, stage)
|
||||
c.errors.Add(ctx, 1, metric.WithAttributes(otelAttrs...))
|
||||
}
|
||||
span.End()
|
||||
}()
|
||||
|
||||
stage = "BuildURL"
|
||||
u := uri.Clone(c.requestURL(ctx))
|
||||
var pathParts [1]string
|
||||
pathParts[0] = "/api/v1/prediction"
|
||||
uri.AddPathParts(u, pathParts[:]...)
|
||||
|
||||
stage = "EncodeQueryParams"
|
||||
q := uri.NewQueryEncoder()
|
||||
{
|
||||
// Encode "launch_latitude" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "launch_latitude",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.LaunchLatitude.Get(); ok {
|
||||
return e.EncodeValue(conv.Float64ToString(val))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "launch_longitude" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "launch_longitude",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.LaunchLongitude.Get(); ok {
|
||||
return e.EncodeValue(conv.Float64ToString(val))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "launch_datetime" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "launch_datetime",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.LaunchDatetime.Get(); ok {
|
||||
return e.EncodeValue(conv.DateTimeToString(val))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "launch_altitude" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "launch_altitude",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.LaunchAltitude.Get(); ok {
|
||||
return e.EncodeValue(conv.Float64ToString(val))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "profile" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "profile",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.Profile.Get(); ok {
|
||||
return e.EncodeValue(conv.StringToString(string(val)))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "ascent_rate" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "ascent_rate",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.AscentRate.Get(); ok {
|
||||
return e.EncodeValue(conv.Float64ToString(val))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "burst_altitude" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "burst_altitude",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.BurstAltitude.Get(); ok {
|
||||
return e.EncodeValue(conv.Float64ToString(val))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "descent_rate" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "descent_rate",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.DescentRate.Get(); ok {
|
||||
return e.EncodeValue(conv.Float64ToString(val))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "float_altitude" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "float_altitude",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.FloatAltitude.Get(); ok {
|
||||
return e.EncodeValue(conv.Float64ToString(val))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "stop_datetime" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "stop_datetime",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.StopDatetime.Get(); ok {
|
||||
return e.EncodeValue(conv.DateTimeToString(val))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "ascent_curve" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "ascent_curve",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.AscentCurve.Get(); ok {
|
||||
return e.EncodeValue(conv.StringToString(val))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "descent_curve" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "descent_curve",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.DescentCurve.Get(); ok {
|
||||
return e.EncodeValue(conv.StringToString(val))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "simulate_stages" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "simulate_stages",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if params.SimulateStages != nil {
|
||||
return e.EncodeArray(func(e uri.Encoder) error {
|
||||
for i, item := range params.SimulateStages {
|
||||
if err := func() error {
|
||||
return e.EncodeValue(conv.StringToString(string(item)))
|
||||
}(); err != nil {
|
||||
return errors.Wrapf(err, "[%d]", i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "interpolate" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "interpolate",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.Interpolate.Get(); ok {
|
||||
return e.EncodeValue(conv.BoolToString(val))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "format" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "format",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.Format.Get(); ok {
|
||||
return e.EncodeValue(conv.StringToString(string(val)))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
{
|
||||
// Encode "dataset" parameter.
|
||||
cfg := uri.QueryParameterEncodingConfig{
|
||||
Name: "dataset",
|
||||
Style: uri.QueryStyleForm,
|
||||
Explode: true,
|
||||
}
|
||||
|
||||
if err := q.EncodeParam(cfg, func(e uri.Encoder) error {
|
||||
if val, ok := params.Dataset.Get(); ok {
|
||||
return e.EncodeValue(conv.DateTimeToString(val))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return res, errors.Wrap(err, "encode query")
|
||||
}
|
||||
}
|
||||
u.RawQuery = q.Values().Encode()
|
||||
|
||||
stage = "EncodeRequest"
|
||||
r, err := ht.NewRequest(ctx, "GET", u)
|
||||
if err != nil {
|
||||
return res, errors.Wrap(err, "create request")
|
||||
}
|
||||
|
||||
stage = "SendRequest"
|
||||
resp, err := c.cfg.Client.Do(r)
|
||||
if err != nil {
|
||||
return res, errors.Wrap(err, "do request")
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
stage = "DecodeResponse"
|
||||
result, err := decodePerformPredictionResponse(resp)
|
||||
if err != nil {
|
||||
return res, errors.Wrap(err, "decode response")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ReadinessCheck invokes readinessCheck operation.
|
||||
//
|
||||
// Readiness check.
|
||||
//
|
||||
// GET /ready
|
||||
func (c *Client) ReadinessCheck(ctx context.Context) (*ReadinessResponse, error) {
|
||||
res, err := c.sendReadinessCheck(ctx)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (c *Client) sendReadinessCheck(ctx context.Context) (res *ReadinessResponse, err error) {
|
||||
otelAttrs := []attribute.KeyValue{
|
||||
otelogen.OperationID("readinessCheck"),
|
||||
semconv.HTTPRequestMethodKey.String("GET"),
|
||||
semconv.URLTemplateKey.String("/ready"),
|
||||
}
|
||||
otelAttrs = append(otelAttrs, c.cfg.Attributes...)
|
||||
|
||||
// Run stopwatch.
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
// Use floating point division here for higher precision (instead of Millisecond method).
|
||||
elapsedDuration := time.Since(startTime)
|
||||
c.duration.Record(ctx, float64(elapsedDuration)/float64(time.Millisecond), metric.WithAttributes(otelAttrs...))
|
||||
}()
|
||||
|
||||
// Increment request counter.
|
||||
c.requests.Add(ctx, 1, metric.WithAttributes(otelAttrs...))
|
||||
|
||||
// Start a span for this request.
|
||||
ctx, span := c.cfg.Tracer.Start(ctx, ReadinessCheckOperation,
|
||||
trace.WithAttributes(otelAttrs...),
|
||||
clientSpanKind,
|
||||
)
|
||||
// Track stage for error reporting.
|
||||
var stage string
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
span.SetStatus(codes.Error, stage)
|
||||
c.errors.Add(ctx, 1, metric.WithAttributes(otelAttrs...))
|
||||
}
|
||||
span.End()
|
||||
}()
|
||||
|
||||
stage = "BuildURL"
|
||||
u := uri.Clone(c.requestURL(ctx))
|
||||
var pathParts [1]string
|
||||
pathParts[0] = "/ready"
|
||||
uri.AddPathParts(u, pathParts[:]...)
|
||||
|
||||
stage = "EncodeRequest"
|
||||
r, err := ht.NewRequest(ctx, "GET", u)
|
||||
if err != nil {
|
||||
return res, errors.Wrap(err, "create request")
|
||||
}
|
||||
|
||||
stage = "SendRequest"
|
||||
resp, err := c.cfg.Client.Do(r)
|
||||
if err != nil {
|
||||
return res, errors.Wrap(err, "do request")
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
stage = "DecodeResponse"
|
||||
result, err := decodeReadinessCheckResponse(resp)
|
||||
if err != nil {
|
||||
return res, errors.Wrap(err, "decode response")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
|
@ -1,379 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/go-faster/errors"
|
||||
ht "github.com/ogen-go/ogen/http"
|
||||
"github.com/ogen-go/ogen/middleware"
|
||||
"github.com/ogen-go/ogen/ogenerrors"
|
||||
"github.com/ogen-go/ogen/otelogen"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type codeRecorder struct {
|
||||
http.ResponseWriter
|
||||
status int
|
||||
}
|
||||
|
||||
func (c *codeRecorder) WriteHeader(status int) {
|
||||
c.status = status
|
||||
c.ResponseWriter.WriteHeader(status)
|
||||
}
|
||||
|
||||
func (c *codeRecorder) Unwrap() http.ResponseWriter {
|
||||
return c.ResponseWriter
|
||||
}
|
||||
|
||||
// handlePerformPredictionRequest handles performPrediction operation.
|
||||
//
|
||||
// Perform prediction.
|
||||
//
|
||||
// GET /api/v1/prediction
|
||||
func (s *Server) handlePerformPredictionRequest(args [0]string, argsEscaped bool, w http.ResponseWriter, r *http.Request) {
|
||||
statusWriter := &codeRecorder{ResponseWriter: w}
|
||||
w = statusWriter
|
||||
otelAttrs := []attribute.KeyValue{
|
||||
otelogen.OperationID("performPrediction"),
|
||||
semconv.HTTPRequestMethodKey.String("GET"),
|
||||
semconv.HTTPRouteKey.String("/api/v1/prediction"),
|
||||
}
|
||||
|
||||
// Start a span for this request.
|
||||
ctx, span := s.cfg.Tracer.Start(r.Context(), PerformPredictionOperation,
|
||||
trace.WithAttributes(otelAttrs...),
|
||||
serverSpanKind,
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
// Add Labeler to context.
|
||||
labeler := &Labeler{attrs: otelAttrs}
|
||||
ctx = contextWithLabeler(ctx, labeler)
|
||||
|
||||
// Run stopwatch.
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
elapsedDuration := time.Since(startTime)
|
||||
|
||||
attrSet := labeler.AttributeSet()
|
||||
attrs := attrSet.ToSlice()
|
||||
code := statusWriter.status
|
||||
if code != 0 {
|
||||
codeAttr := semconv.HTTPResponseStatusCode(code)
|
||||
attrs = append(attrs, codeAttr)
|
||||
span.SetAttributes(codeAttr)
|
||||
}
|
||||
attrOpt := metric.WithAttributes(attrs...)
|
||||
|
||||
// Increment request counter.
|
||||
s.requests.Add(ctx, 1, attrOpt)
|
||||
|
||||
// Use floating point division here for higher precision (instead of Millisecond method).
|
||||
s.duration.Record(ctx, float64(elapsedDuration)/float64(time.Millisecond), attrOpt)
|
||||
}()
|
||||
|
||||
var (
|
||||
recordError = func(stage string, err error) {
|
||||
span.RecordError(err)
|
||||
|
||||
// https://opentelemetry.io/docs/specs/semconv/http/http-spans/#status
|
||||
// Span Status MUST be left unset if HTTP status code was in the 1xx, 2xx or 3xx ranges,
|
||||
// unless there was another error (e.g., network error receiving the response body; or 3xx codes with
|
||||
// max redirects exceeded), in which case status MUST be set to Error.
|
||||
code := statusWriter.status
|
||||
if code < 100 || code >= 500 {
|
||||
span.SetStatus(codes.Error, stage)
|
||||
}
|
||||
|
||||
attrSet := labeler.AttributeSet()
|
||||
attrs := attrSet.ToSlice()
|
||||
if code != 0 {
|
||||
attrs = append(attrs, semconv.HTTPResponseStatusCode(code))
|
||||
}
|
||||
|
||||
s.errors.Add(ctx, 1, metric.WithAttributes(attrs...))
|
||||
}
|
||||
err error
|
||||
opErrContext = ogenerrors.OperationContext{
|
||||
Name: PerformPredictionOperation,
|
||||
ID: "performPrediction",
|
||||
}
|
||||
)
|
||||
params, err := decodePerformPredictionParams(args, argsEscaped, r)
|
||||
if err != nil {
|
||||
err = &ogenerrors.DecodeParamsError{
|
||||
OperationContext: opErrContext,
|
||||
Err: err,
|
||||
}
|
||||
defer recordError("DecodeParams", err)
|
||||
s.cfg.ErrorHandler(ctx, w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
var rawBody []byte
|
||||
|
||||
var response *PredictionResult
|
||||
if m := s.cfg.Middleware; m != nil {
|
||||
mreq := middleware.Request{
|
||||
Context: ctx,
|
||||
OperationName: PerformPredictionOperation,
|
||||
OperationSummary: "Perform prediction",
|
||||
OperationID: "performPrediction",
|
||||
Body: nil,
|
||||
RawBody: rawBody,
|
||||
Params: middleware.Parameters{
|
||||
{
|
||||
Name: "launch_latitude",
|
||||
In: "query",
|
||||
}: params.LaunchLatitude,
|
||||
{
|
||||
Name: "launch_longitude",
|
||||
In: "query",
|
||||
}: params.LaunchLongitude,
|
||||
{
|
||||
Name: "launch_datetime",
|
||||
In: "query",
|
||||
}: params.LaunchDatetime,
|
||||
{
|
||||
Name: "launch_altitude",
|
||||
In: "query",
|
||||
}: params.LaunchAltitude,
|
||||
{
|
||||
Name: "profile",
|
||||
In: "query",
|
||||
}: params.Profile,
|
||||
{
|
||||
Name: "ascent_rate",
|
||||
In: "query",
|
||||
}: params.AscentRate,
|
||||
{
|
||||
Name: "burst_altitude",
|
||||
In: "query",
|
||||
}: params.BurstAltitude,
|
||||
{
|
||||
Name: "descent_rate",
|
||||
In: "query",
|
||||
}: params.DescentRate,
|
||||
{
|
||||
Name: "float_altitude",
|
||||
In: "query",
|
||||
}: params.FloatAltitude,
|
||||
{
|
||||
Name: "stop_datetime",
|
||||
In: "query",
|
||||
}: params.StopDatetime,
|
||||
{
|
||||
Name: "ascent_curve",
|
||||
In: "query",
|
||||
}: params.AscentCurve,
|
||||
{
|
||||
Name: "descent_curve",
|
||||
In: "query",
|
||||
}: params.DescentCurve,
|
||||
{
|
||||
Name: "simulate_stages",
|
||||
In: "query",
|
||||
}: params.SimulateStages,
|
||||
{
|
||||
Name: "interpolate",
|
||||
In: "query",
|
||||
}: params.Interpolate,
|
||||
{
|
||||
Name: "format",
|
||||
In: "query",
|
||||
}: params.Format,
|
||||
{
|
||||
Name: "dataset",
|
||||
In: "query",
|
||||
}: params.Dataset,
|
||||
},
|
||||
Raw: r,
|
||||
}
|
||||
|
||||
type (
|
||||
Request = struct{}
|
||||
Params = PerformPredictionParams
|
||||
Response = *PredictionResult
|
||||
)
|
||||
response, err = middleware.HookMiddleware[
|
||||
Request,
|
||||
Params,
|
||||
Response,
|
||||
](
|
||||
m,
|
||||
mreq,
|
||||
unpackPerformPredictionParams,
|
||||
func(ctx context.Context, request Request, params Params) (response Response, err error) {
|
||||
response, err = s.h.PerformPrediction(ctx, params)
|
||||
return response, err
|
||||
},
|
||||
)
|
||||
} else {
|
||||
response, err = s.h.PerformPrediction(ctx, params)
|
||||
}
|
||||
if err != nil {
|
||||
if errRes, ok := errors.Into[*ErrorStatusCode](err); ok {
|
||||
if err := encodeErrorResponse(errRes, w, span); err != nil {
|
||||
defer recordError("Internal", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if errors.Is(err, ht.ErrNotImplemented) {
|
||||
s.cfg.ErrorHandler(ctx, w, r, err)
|
||||
return
|
||||
}
|
||||
if err := encodeErrorResponse(s.h.NewError(ctx, err), w, span); err != nil {
|
||||
defer recordError("Internal", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := encodePerformPredictionResponse(response, w, span); err != nil {
|
||||
defer recordError("EncodeResponse", err)
|
||||
if !errors.Is(err, ht.ErrInternalServerErrorResponse) {
|
||||
s.cfg.ErrorHandler(ctx, w, r, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// handleReadinessCheckRequest handles readinessCheck operation.
|
||||
//
|
||||
// Readiness check.
|
||||
//
|
||||
// GET /ready
|
||||
func (s *Server) handleReadinessCheckRequest(args [0]string, argsEscaped bool, w http.ResponseWriter, r *http.Request) {
|
||||
statusWriter := &codeRecorder{ResponseWriter: w}
|
||||
w = statusWriter
|
||||
otelAttrs := []attribute.KeyValue{
|
||||
otelogen.OperationID("readinessCheck"),
|
||||
semconv.HTTPRequestMethodKey.String("GET"),
|
||||
semconv.HTTPRouteKey.String("/ready"),
|
||||
}
|
||||
|
||||
// Start a span for this request.
|
||||
ctx, span := s.cfg.Tracer.Start(r.Context(), ReadinessCheckOperation,
|
||||
trace.WithAttributes(otelAttrs...),
|
||||
serverSpanKind,
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
// Add Labeler to context.
|
||||
labeler := &Labeler{attrs: otelAttrs}
|
||||
ctx = contextWithLabeler(ctx, labeler)
|
||||
|
||||
// Run stopwatch.
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
elapsedDuration := time.Since(startTime)
|
||||
|
||||
attrSet := labeler.AttributeSet()
|
||||
attrs := attrSet.ToSlice()
|
||||
code := statusWriter.status
|
||||
if code != 0 {
|
||||
codeAttr := semconv.HTTPResponseStatusCode(code)
|
||||
attrs = append(attrs, codeAttr)
|
||||
span.SetAttributes(codeAttr)
|
||||
}
|
||||
attrOpt := metric.WithAttributes(attrs...)
|
||||
|
||||
// Increment request counter.
|
||||
s.requests.Add(ctx, 1, attrOpt)
|
||||
|
||||
// Use floating point division here for higher precision (instead of Millisecond method).
|
||||
s.duration.Record(ctx, float64(elapsedDuration)/float64(time.Millisecond), attrOpt)
|
||||
}()
|
||||
|
||||
var (
|
||||
recordError = func(stage string, err error) {
|
||||
span.RecordError(err)
|
||||
|
||||
// https://opentelemetry.io/docs/specs/semconv/http/http-spans/#status
|
||||
// Span Status MUST be left unset if HTTP status code was in the 1xx, 2xx or 3xx ranges,
|
||||
// unless there was another error (e.g., network error receiving the response body; or 3xx codes with
|
||||
// max redirects exceeded), in which case status MUST be set to Error.
|
||||
code := statusWriter.status
|
||||
if code < 100 || code >= 500 {
|
||||
span.SetStatus(codes.Error, stage)
|
||||
}
|
||||
|
||||
attrSet := labeler.AttributeSet()
|
||||
attrs := attrSet.ToSlice()
|
||||
if code != 0 {
|
||||
attrs = append(attrs, semconv.HTTPResponseStatusCode(code))
|
||||
}
|
||||
|
||||
s.errors.Add(ctx, 1, metric.WithAttributes(attrs...))
|
||||
}
|
||||
err error
|
||||
)
|
||||
|
||||
var rawBody []byte
|
||||
|
||||
var response *ReadinessResponse
|
||||
if m := s.cfg.Middleware; m != nil {
|
||||
mreq := middleware.Request{
|
||||
Context: ctx,
|
||||
OperationName: ReadinessCheckOperation,
|
||||
OperationSummary: "Readiness check",
|
||||
OperationID: "readinessCheck",
|
||||
Body: nil,
|
||||
RawBody: rawBody,
|
||||
Params: middleware.Parameters{},
|
||||
Raw: r,
|
||||
}
|
||||
|
||||
type (
|
||||
Request = struct{}
|
||||
Params = struct{}
|
||||
Response = *ReadinessResponse
|
||||
)
|
||||
response, err = middleware.HookMiddleware[
|
||||
Request,
|
||||
Params,
|
||||
Response,
|
||||
](
|
||||
m,
|
||||
mreq,
|
||||
nil,
|
||||
func(ctx context.Context, request Request, params Params) (response Response, err error) {
|
||||
response, err = s.h.ReadinessCheck(ctx)
|
||||
return response, err
|
||||
},
|
||||
)
|
||||
} else {
|
||||
response, err = s.h.ReadinessCheck(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
if errRes, ok := errors.Into[*ErrorStatusCode](err); ok {
|
||||
if err := encodeErrorResponse(errRes, w, span); err != nil {
|
||||
defer recordError("Internal", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if errors.Is(err, ht.ErrNotImplemented) {
|
||||
s.cfg.ErrorHandler(ctx, w, r, err)
|
||||
return
|
||||
}
|
||||
if err := encodeErrorResponse(s.h.NewError(ctx, err), w, span); err != nil {
|
||||
defer recordError("Internal", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := encodeReadinessCheckResponse(response, w, span); err != nil {
|
||||
defer recordError("EncodeResponse", err)
|
||||
if !errors.Is(err, ht.ErrInternalServerErrorResponse) {
|
||||
s.cfg.ErrorHandler(ctx, w, r, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -1,963 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-faster/errors"
|
||||
"github.com/go-faster/jx"
|
||||
"github.com/ogen-go/ogen/json"
|
||||
"github.com/ogen-go/ogen/validate"
|
||||
)
|
||||
|
||||
// Encode implements json.Marshaler.
|
||||
func (s *Error) Encode(e *jx.Encoder) {
|
||||
e.ObjStart()
|
||||
s.encodeFields(e)
|
||||
e.ObjEnd()
|
||||
}
|
||||
|
||||
// encodeFields encodes fields.
|
||||
func (s *Error) encodeFields(e *jx.Encoder) {
|
||||
{
|
||||
e.FieldStart("message")
|
||||
e.Str(s.Message)
|
||||
}
|
||||
{
|
||||
if s.Details.Set {
|
||||
e.FieldStart("details")
|
||||
s.Details.Encode(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var jsonFieldsNameOfError = [2]string{
|
||||
0: "message",
|
||||
1: "details",
|
||||
}
|
||||
|
||||
// Decode decodes Error from json.
|
||||
func (s *Error) Decode(d *jx.Decoder) error {
|
||||
if s == nil {
|
||||
return errors.New("invalid: unable to decode Error to nil")
|
||||
}
|
||||
var requiredBitSet [1]uint8
|
||||
|
||||
if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error {
|
||||
switch string(k) {
|
||||
case "message":
|
||||
requiredBitSet[0] |= 1 << 0
|
||||
if err := func() error {
|
||||
v, err := d.Str()
|
||||
s.Message = string(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"message\"")
|
||||
}
|
||||
case "details":
|
||||
if err := func() error {
|
||||
s.Details.Reset()
|
||||
if err := s.Details.Decode(d); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"details\"")
|
||||
}
|
||||
default:
|
||||
return d.Skip()
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "decode Error")
|
||||
}
|
||||
// Validate required fields.
|
||||
var failures []validate.FieldError
|
||||
for i, mask := range [1]uint8{
|
||||
0b00000001,
|
||||
} {
|
||||
if result := (requiredBitSet[i] & mask) ^ mask; result != 0 {
|
||||
// Mask only required fields and check equality to mask using XOR.
|
||||
//
|
||||
// If XOR result is not zero, result is not equal to expected, so some fields are missed.
|
||||
// Bits of fields which would be set are actually bits of missed fields.
|
||||
missed := bits.OnesCount8(result)
|
||||
for bitN := 0; bitN < missed; bitN++ {
|
||||
bitIdx := bits.TrailingZeros8(result)
|
||||
fieldIdx := i*8 + bitIdx
|
||||
var name string
|
||||
if fieldIdx < len(jsonFieldsNameOfError) {
|
||||
name = jsonFieldsNameOfError[fieldIdx]
|
||||
} else {
|
||||
name = strconv.Itoa(fieldIdx)
|
||||
}
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: name,
|
||||
Error: validate.ErrFieldRequired,
|
||||
})
|
||||
// Reset bit.
|
||||
result &^= 1 << bitIdx
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(failures) > 0 {
|
||||
return &validate.Error{Fields: failures}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements stdjson.Marshaler.
|
||||
func (s *Error) MarshalJSON() ([]byte, error) {
|
||||
e := jx.Encoder{}
|
||||
s.Encode(&e)
|
||||
return e.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements stdjson.Unmarshaler.
|
||||
func (s *Error) UnmarshalJSON(data []byte) error {
|
||||
d := jx.DecodeBytes(data)
|
||||
return s.Decode(d)
|
||||
}
|
||||
|
||||
// Encode encodes bool as json.
|
||||
func (o OptBool) Encode(e *jx.Encoder) {
|
||||
if !o.Set {
|
||||
return
|
||||
}
|
||||
e.Bool(bool(o.Value))
|
||||
}
|
||||
|
||||
// Decode decodes bool from json.
|
||||
func (o *OptBool) Decode(d *jx.Decoder) error {
|
||||
if o == nil {
|
||||
return errors.New("invalid: unable to decode OptBool to nil")
|
||||
}
|
||||
o.Set = true
|
||||
v, err := d.Bool()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.Value = bool(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements stdjson.Marshaler.
|
||||
func (s OptBool) MarshalJSON() ([]byte, error) {
|
||||
e := jx.Encoder{}
|
||||
s.Encode(&e)
|
||||
return e.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements stdjson.Unmarshaler.
|
||||
func (s *OptBool) UnmarshalJSON(data []byte) error {
|
||||
d := jx.DecodeBytes(data)
|
||||
return s.Decode(d)
|
||||
}
|
||||
|
||||
// Encode encodes time.Time as json.
|
||||
func (o OptDateTime) Encode(e *jx.Encoder, format func(*jx.Encoder, time.Time)) {
|
||||
if !o.Set {
|
||||
return
|
||||
}
|
||||
format(e, o.Value)
|
||||
}
|
||||
|
||||
// Decode decodes time.Time from json.
|
||||
func (o *OptDateTime) Decode(d *jx.Decoder, format func(*jx.Decoder) (time.Time, error)) error {
|
||||
if o == nil {
|
||||
return errors.New("invalid: unable to decode OptDateTime to nil")
|
||||
}
|
||||
o.Set = true
|
||||
v, err := format(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.Value = v
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements stdjson.Marshaler.
|
||||
func (s OptDateTime) MarshalJSON() ([]byte, error) {
|
||||
e := jx.Encoder{}
|
||||
s.Encode(&e, json.EncodeDateTime)
|
||||
return e.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements stdjson.Unmarshaler.
|
||||
func (s *OptDateTime) UnmarshalJSON(data []byte) error {
|
||||
d := jx.DecodeBytes(data)
|
||||
return s.Decode(d, json.DecodeDateTime)
|
||||
}
|
||||
|
||||
// Encode encodes string as json.
|
||||
func (o OptString) Encode(e *jx.Encoder) {
|
||||
if !o.Set {
|
||||
return
|
||||
}
|
||||
e.Str(string(o.Value))
|
||||
}
|
||||
|
||||
// Decode decodes string from json.
|
||||
func (o *OptString) Decode(d *jx.Decoder) error {
|
||||
if o == nil {
|
||||
return errors.New("invalid: unable to decode OptString to nil")
|
||||
}
|
||||
o.Set = true
|
||||
v, err := d.Str()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.Value = string(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements stdjson.Marshaler.
|
||||
func (s OptString) MarshalJSON() ([]byte, error) {
|
||||
e := jx.Encoder{}
|
||||
s.Encode(&e)
|
||||
return e.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements stdjson.Unmarshaler.
|
||||
func (s *OptString) UnmarshalJSON(data []byte) error {
|
||||
d := jx.DecodeBytes(data)
|
||||
return s.Decode(d)
|
||||
}
|
||||
|
||||
// Encode implements json.Marshaler.
|
||||
func (s *PredictionResult) Encode(e *jx.Encoder) {
|
||||
e.ObjStart()
|
||||
s.encodeFields(e)
|
||||
e.ObjEnd()
|
||||
}
|
||||
|
||||
// encodeFields encodes fields.
|
||||
func (s *PredictionResult) encodeFields(e *jx.Encoder) {
|
||||
{
|
||||
e.FieldStart("metadata")
|
||||
s.Metadata.Encode(e)
|
||||
}
|
||||
{
|
||||
e.FieldStart("prediction")
|
||||
e.ArrStart()
|
||||
for _, elem := range s.Prediction {
|
||||
elem.Encode(e)
|
||||
}
|
||||
e.ArrEnd()
|
||||
}
|
||||
}
|
||||
|
||||
var jsonFieldsNameOfPredictionResult = [2]string{
|
||||
0: "metadata",
|
||||
1: "prediction",
|
||||
}
|
||||
|
||||
// Decode decodes PredictionResult from json.
|
||||
func (s *PredictionResult) Decode(d *jx.Decoder) error {
|
||||
if s == nil {
|
||||
return errors.New("invalid: unable to decode PredictionResult to nil")
|
||||
}
|
||||
var requiredBitSet [1]uint8
|
||||
|
||||
if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error {
|
||||
switch string(k) {
|
||||
case "metadata":
|
||||
requiredBitSet[0] |= 1 << 0
|
||||
if err := func() error {
|
||||
if err := s.Metadata.Decode(d); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"metadata\"")
|
||||
}
|
||||
case "prediction":
|
||||
requiredBitSet[0] |= 1 << 1
|
||||
if err := func() error {
|
||||
s.Prediction = make([]PredictionResultPredictionItem, 0)
|
||||
if err := d.Arr(func(d *jx.Decoder) error {
|
||||
var elem PredictionResultPredictionItem
|
||||
if err := elem.Decode(d); err != nil {
|
||||
return err
|
||||
}
|
||||
s.Prediction = append(s.Prediction, elem)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"prediction\"")
|
||||
}
|
||||
default:
|
||||
return d.Skip()
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "decode PredictionResult")
|
||||
}
|
||||
// Validate required fields.
|
||||
var failures []validate.FieldError
|
||||
for i, mask := range [1]uint8{
|
||||
0b00000011,
|
||||
} {
|
||||
if result := (requiredBitSet[i] & mask) ^ mask; result != 0 {
|
||||
// Mask only required fields and check equality to mask using XOR.
|
||||
//
|
||||
// If XOR result is not zero, result is not equal to expected, so some fields are missed.
|
||||
// Bits of fields which would be set are actually bits of missed fields.
|
||||
missed := bits.OnesCount8(result)
|
||||
for bitN := 0; bitN < missed; bitN++ {
|
||||
bitIdx := bits.TrailingZeros8(result)
|
||||
fieldIdx := i*8 + bitIdx
|
||||
var name string
|
||||
if fieldIdx < len(jsonFieldsNameOfPredictionResult) {
|
||||
name = jsonFieldsNameOfPredictionResult[fieldIdx]
|
||||
} else {
|
||||
name = strconv.Itoa(fieldIdx)
|
||||
}
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: name,
|
||||
Error: validate.ErrFieldRequired,
|
||||
})
|
||||
// Reset bit.
|
||||
result &^= 1 << bitIdx
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(failures) > 0 {
|
||||
return &validate.Error{Fields: failures}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements stdjson.Marshaler.
|
||||
func (s *PredictionResult) MarshalJSON() ([]byte, error) {
|
||||
e := jx.Encoder{}
|
||||
s.Encode(&e)
|
||||
return e.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements stdjson.Unmarshaler.
|
||||
func (s *PredictionResult) UnmarshalJSON(data []byte) error {
|
||||
d := jx.DecodeBytes(data)
|
||||
return s.Decode(d)
|
||||
}
|
||||
|
||||
// Encode implements json.Marshaler.
|
||||
func (s *PredictionResultMetadata) Encode(e *jx.Encoder) {
|
||||
e.ObjStart()
|
||||
s.encodeFields(e)
|
||||
e.ObjEnd()
|
||||
}
|
||||
|
||||
// encodeFields encodes fields.
|
||||
func (s *PredictionResultMetadata) encodeFields(e *jx.Encoder) {
|
||||
{
|
||||
e.FieldStart("complete_datetime")
|
||||
json.EncodeDateTime(e, s.CompleteDatetime)
|
||||
}
|
||||
{
|
||||
e.FieldStart("start_datetime")
|
||||
json.EncodeDateTime(e, s.StartDatetime)
|
||||
}
|
||||
}
|
||||
|
||||
var jsonFieldsNameOfPredictionResultMetadata = [2]string{
|
||||
0: "complete_datetime",
|
||||
1: "start_datetime",
|
||||
}
|
||||
|
||||
// Decode decodes PredictionResultMetadata from json.
|
||||
func (s *PredictionResultMetadata) Decode(d *jx.Decoder) error {
|
||||
if s == nil {
|
||||
return errors.New("invalid: unable to decode PredictionResultMetadata to nil")
|
||||
}
|
||||
var requiredBitSet [1]uint8
|
||||
|
||||
if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error {
|
||||
switch string(k) {
|
||||
case "complete_datetime":
|
||||
requiredBitSet[0] |= 1 << 0
|
||||
if err := func() error {
|
||||
v, err := json.DecodeDateTime(d)
|
||||
s.CompleteDatetime = v
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"complete_datetime\"")
|
||||
}
|
||||
case "start_datetime":
|
||||
requiredBitSet[0] |= 1 << 1
|
||||
if err := func() error {
|
||||
v, err := json.DecodeDateTime(d)
|
||||
s.StartDatetime = v
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"start_datetime\"")
|
||||
}
|
||||
default:
|
||||
return d.Skip()
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "decode PredictionResultMetadata")
|
||||
}
|
||||
// Validate required fields.
|
||||
var failures []validate.FieldError
|
||||
for i, mask := range [1]uint8{
|
||||
0b00000011,
|
||||
} {
|
||||
if result := (requiredBitSet[i] & mask) ^ mask; result != 0 {
|
||||
// Mask only required fields and check equality to mask using XOR.
|
||||
//
|
||||
// If XOR result is not zero, result is not equal to expected, so some fields are missed.
|
||||
// Bits of fields which would be set are actually bits of missed fields.
|
||||
missed := bits.OnesCount8(result)
|
||||
for bitN := 0; bitN < missed; bitN++ {
|
||||
bitIdx := bits.TrailingZeros8(result)
|
||||
fieldIdx := i*8 + bitIdx
|
||||
var name string
|
||||
if fieldIdx < len(jsonFieldsNameOfPredictionResultMetadata) {
|
||||
name = jsonFieldsNameOfPredictionResultMetadata[fieldIdx]
|
||||
} else {
|
||||
name = strconv.Itoa(fieldIdx)
|
||||
}
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: name,
|
||||
Error: validate.ErrFieldRequired,
|
||||
})
|
||||
// Reset bit.
|
||||
result &^= 1 << bitIdx
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(failures) > 0 {
|
||||
return &validate.Error{Fields: failures}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements stdjson.Marshaler.
|
||||
func (s *PredictionResultMetadata) MarshalJSON() ([]byte, error) {
|
||||
e := jx.Encoder{}
|
||||
s.Encode(&e)
|
||||
return e.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements stdjson.Unmarshaler.
|
||||
func (s *PredictionResultMetadata) UnmarshalJSON(data []byte) error {
|
||||
d := jx.DecodeBytes(data)
|
||||
return s.Decode(d)
|
||||
}
|
||||
|
||||
// Encode implements json.Marshaler.
|
||||
func (s *PredictionResultPredictionItem) Encode(e *jx.Encoder) {
|
||||
e.ObjStart()
|
||||
s.encodeFields(e)
|
||||
e.ObjEnd()
|
||||
}
|
||||
|
||||
// encodeFields encodes fields.
|
||||
func (s *PredictionResultPredictionItem) encodeFields(e *jx.Encoder) {
|
||||
{
|
||||
e.FieldStart("stage")
|
||||
s.Stage.Encode(e)
|
||||
}
|
||||
{
|
||||
e.FieldStart("trajectory")
|
||||
e.ArrStart()
|
||||
for _, elem := range s.Trajectory {
|
||||
elem.Encode(e)
|
||||
}
|
||||
e.ArrEnd()
|
||||
}
|
||||
}
|
||||
|
||||
var jsonFieldsNameOfPredictionResultPredictionItem = [2]string{
|
||||
0: "stage",
|
||||
1: "trajectory",
|
||||
}
|
||||
|
||||
// Decode decodes PredictionResultPredictionItem from json.
|
||||
func (s *PredictionResultPredictionItem) Decode(d *jx.Decoder) error {
|
||||
if s == nil {
|
||||
return errors.New("invalid: unable to decode PredictionResultPredictionItem to nil")
|
||||
}
|
||||
var requiredBitSet [1]uint8
|
||||
|
||||
if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error {
|
||||
switch string(k) {
|
||||
case "stage":
|
||||
requiredBitSet[0] |= 1 << 0
|
||||
if err := func() error {
|
||||
if err := s.Stage.Decode(d); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"stage\"")
|
||||
}
|
||||
case "trajectory":
|
||||
requiredBitSet[0] |= 1 << 1
|
||||
if err := func() error {
|
||||
s.Trajectory = make([]PredictionResultPredictionItemTrajectoryItem, 0)
|
||||
if err := d.Arr(func(d *jx.Decoder) error {
|
||||
var elem PredictionResultPredictionItemTrajectoryItem
|
||||
if err := elem.Decode(d); err != nil {
|
||||
return err
|
||||
}
|
||||
s.Trajectory = append(s.Trajectory, elem)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"trajectory\"")
|
||||
}
|
||||
default:
|
||||
return d.Skip()
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "decode PredictionResultPredictionItem")
|
||||
}
|
||||
// Validate required fields.
|
||||
var failures []validate.FieldError
|
||||
for i, mask := range [1]uint8{
|
||||
0b00000011,
|
||||
} {
|
||||
if result := (requiredBitSet[i] & mask) ^ mask; result != 0 {
|
||||
// Mask only required fields and check equality to mask using XOR.
|
||||
//
|
||||
// If XOR result is not zero, result is not equal to expected, so some fields are missed.
|
||||
// Bits of fields which would be set are actually bits of missed fields.
|
||||
missed := bits.OnesCount8(result)
|
||||
for bitN := 0; bitN < missed; bitN++ {
|
||||
bitIdx := bits.TrailingZeros8(result)
|
||||
fieldIdx := i*8 + bitIdx
|
||||
var name string
|
||||
if fieldIdx < len(jsonFieldsNameOfPredictionResultPredictionItem) {
|
||||
name = jsonFieldsNameOfPredictionResultPredictionItem[fieldIdx]
|
||||
} else {
|
||||
name = strconv.Itoa(fieldIdx)
|
||||
}
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: name,
|
||||
Error: validate.ErrFieldRequired,
|
||||
})
|
||||
// Reset bit.
|
||||
result &^= 1 << bitIdx
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(failures) > 0 {
|
||||
return &validate.Error{Fields: failures}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements stdjson.Marshaler.
|
||||
func (s *PredictionResultPredictionItem) MarshalJSON() ([]byte, error) {
|
||||
e := jx.Encoder{}
|
||||
s.Encode(&e)
|
||||
return e.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements stdjson.Unmarshaler.
|
||||
func (s *PredictionResultPredictionItem) UnmarshalJSON(data []byte) error {
|
||||
d := jx.DecodeBytes(data)
|
||||
return s.Decode(d)
|
||||
}
|
||||
|
||||
// Encode encodes PredictionResultPredictionItemStage as json.
|
||||
func (s PredictionResultPredictionItemStage) Encode(e *jx.Encoder) {
|
||||
e.Str(string(s))
|
||||
}
|
||||
|
||||
// Decode decodes PredictionResultPredictionItemStage from json.
|
||||
func (s *PredictionResultPredictionItemStage) Decode(d *jx.Decoder) error {
|
||||
if s == nil {
|
||||
return errors.New("invalid: unable to decode PredictionResultPredictionItemStage to nil")
|
||||
}
|
||||
v, err := d.StrBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Try to use constant string.
|
||||
switch PredictionResultPredictionItemStage(v) {
|
||||
case PredictionResultPredictionItemStageAscent:
|
||||
*s = PredictionResultPredictionItemStageAscent
|
||||
case PredictionResultPredictionItemStageDescent:
|
||||
*s = PredictionResultPredictionItemStageDescent
|
||||
case PredictionResultPredictionItemStageFloat:
|
||||
*s = PredictionResultPredictionItemStageFloat
|
||||
default:
|
||||
*s = PredictionResultPredictionItemStage(v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements stdjson.Marshaler.
|
||||
func (s PredictionResultPredictionItemStage) MarshalJSON() ([]byte, error) {
|
||||
e := jx.Encoder{}
|
||||
s.Encode(&e)
|
||||
return e.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements stdjson.Unmarshaler.
|
||||
func (s *PredictionResultPredictionItemStage) UnmarshalJSON(data []byte) error {
|
||||
d := jx.DecodeBytes(data)
|
||||
return s.Decode(d)
|
||||
}
|
||||
|
||||
// Encode implements json.Marshaler.
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) Encode(e *jx.Encoder) {
|
||||
e.ObjStart()
|
||||
s.encodeFields(e)
|
||||
e.ObjEnd()
|
||||
}
|
||||
|
||||
// encodeFields encodes fields.
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) encodeFields(e *jx.Encoder) {
|
||||
{
|
||||
e.FieldStart("datetime")
|
||||
json.EncodeDateTime(e, s.Datetime)
|
||||
}
|
||||
{
|
||||
e.FieldStart("latitude")
|
||||
e.Float64(s.Latitude)
|
||||
}
|
||||
{
|
||||
e.FieldStart("longitude")
|
||||
e.Float64(s.Longitude)
|
||||
}
|
||||
{
|
||||
e.FieldStart("altitude")
|
||||
e.Float64(s.Altitude)
|
||||
}
|
||||
}
|
||||
|
||||
var jsonFieldsNameOfPredictionResultPredictionItemTrajectoryItem = [4]string{
|
||||
0: "datetime",
|
||||
1: "latitude",
|
||||
2: "longitude",
|
||||
3: "altitude",
|
||||
}
|
||||
|
||||
// Decode decodes PredictionResultPredictionItemTrajectoryItem from json.
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) Decode(d *jx.Decoder) error {
|
||||
if s == nil {
|
||||
return errors.New("invalid: unable to decode PredictionResultPredictionItemTrajectoryItem to nil")
|
||||
}
|
||||
var requiredBitSet [1]uint8
|
||||
|
||||
if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error {
|
||||
switch string(k) {
|
||||
case "datetime":
|
||||
requiredBitSet[0] |= 1 << 0
|
||||
if err := func() error {
|
||||
v, err := json.DecodeDateTime(d)
|
||||
s.Datetime = v
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"datetime\"")
|
||||
}
|
||||
case "latitude":
|
||||
requiredBitSet[0] |= 1 << 1
|
||||
if err := func() error {
|
||||
v, err := d.Float64()
|
||||
s.Latitude = float64(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"latitude\"")
|
||||
}
|
||||
case "longitude":
|
||||
requiredBitSet[0] |= 1 << 2
|
||||
if err := func() error {
|
||||
v, err := d.Float64()
|
||||
s.Longitude = float64(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"longitude\"")
|
||||
}
|
||||
case "altitude":
|
||||
requiredBitSet[0] |= 1 << 3
|
||||
if err := func() error {
|
||||
v, err := d.Float64()
|
||||
s.Altitude = float64(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"altitude\"")
|
||||
}
|
||||
default:
|
||||
return d.Skip()
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "decode PredictionResultPredictionItemTrajectoryItem")
|
||||
}
|
||||
// Validate required fields.
|
||||
var failures []validate.FieldError
|
||||
for i, mask := range [1]uint8{
|
||||
0b00001111,
|
||||
} {
|
||||
if result := (requiredBitSet[i] & mask) ^ mask; result != 0 {
|
||||
// Mask only required fields and check equality to mask using XOR.
|
||||
//
|
||||
// If XOR result is not zero, result is not equal to expected, so some fields are missed.
|
||||
// Bits of fields which would be set are actually bits of missed fields.
|
||||
missed := bits.OnesCount8(result)
|
||||
for bitN := 0; bitN < missed; bitN++ {
|
||||
bitIdx := bits.TrailingZeros8(result)
|
||||
fieldIdx := i*8 + bitIdx
|
||||
var name string
|
||||
if fieldIdx < len(jsonFieldsNameOfPredictionResultPredictionItemTrajectoryItem) {
|
||||
name = jsonFieldsNameOfPredictionResultPredictionItemTrajectoryItem[fieldIdx]
|
||||
} else {
|
||||
name = strconv.Itoa(fieldIdx)
|
||||
}
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: name,
|
||||
Error: validate.ErrFieldRequired,
|
||||
})
|
||||
// Reset bit.
|
||||
result &^= 1 << bitIdx
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(failures) > 0 {
|
||||
return &validate.Error{Fields: failures}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements stdjson.Marshaler.
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) MarshalJSON() ([]byte, error) {
|
||||
e := jx.Encoder{}
|
||||
s.Encode(&e)
|
||||
return e.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements stdjson.Unmarshaler.
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) UnmarshalJSON(data []byte) error {
|
||||
d := jx.DecodeBytes(data)
|
||||
return s.Decode(d)
|
||||
}
|
||||
|
||||
// Encode implements json.Marshaler.
|
||||
func (s *ReadinessResponse) Encode(e *jx.Encoder) {
|
||||
e.ObjStart()
|
||||
s.encodeFields(e)
|
||||
e.ObjEnd()
|
||||
}
|
||||
|
||||
// encodeFields encodes fields.
|
||||
func (s *ReadinessResponse) encodeFields(e *jx.Encoder) {
|
||||
{
|
||||
e.FieldStart("status")
|
||||
s.Status.Encode(e)
|
||||
}
|
||||
{
|
||||
if s.LastUpdate.Set {
|
||||
e.FieldStart("last_update")
|
||||
s.LastUpdate.Encode(e, json.EncodeDateTime)
|
||||
}
|
||||
}
|
||||
{
|
||||
if s.IsFresh.Set {
|
||||
e.FieldStart("is_fresh")
|
||||
s.IsFresh.Encode(e)
|
||||
}
|
||||
}
|
||||
{
|
||||
if s.ErrorMessage.Set {
|
||||
e.FieldStart("error_message")
|
||||
s.ErrorMessage.Encode(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var jsonFieldsNameOfReadinessResponse = [4]string{
|
||||
0: "status",
|
||||
1: "last_update",
|
||||
2: "is_fresh",
|
||||
3: "error_message",
|
||||
}
|
||||
|
||||
// Decode decodes ReadinessResponse from json.
|
||||
func (s *ReadinessResponse) Decode(d *jx.Decoder) error {
|
||||
if s == nil {
|
||||
return errors.New("invalid: unable to decode ReadinessResponse to nil")
|
||||
}
|
||||
var requiredBitSet [1]uint8
|
||||
|
||||
if err := d.ObjBytes(func(d *jx.Decoder, k []byte) error {
|
||||
switch string(k) {
|
||||
case "status":
|
||||
requiredBitSet[0] |= 1 << 0
|
||||
if err := func() error {
|
||||
if err := s.Status.Decode(d); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"status\"")
|
||||
}
|
||||
case "last_update":
|
||||
if err := func() error {
|
||||
s.LastUpdate.Reset()
|
||||
if err := s.LastUpdate.Decode(d, json.DecodeDateTime); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"last_update\"")
|
||||
}
|
||||
case "is_fresh":
|
||||
if err := func() error {
|
||||
s.IsFresh.Reset()
|
||||
if err := s.IsFresh.Decode(d); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"is_fresh\"")
|
||||
}
|
||||
case "error_message":
|
||||
if err := func() error {
|
||||
s.ErrorMessage.Reset()
|
||||
if err := s.ErrorMessage.Decode(d); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return errors.Wrap(err, "decode field \"error_message\"")
|
||||
}
|
||||
default:
|
||||
return d.Skip()
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "decode ReadinessResponse")
|
||||
}
|
||||
// Validate required fields.
|
||||
var failures []validate.FieldError
|
||||
for i, mask := range [1]uint8{
|
||||
0b00000001,
|
||||
} {
|
||||
if result := (requiredBitSet[i] & mask) ^ mask; result != 0 {
|
||||
// Mask only required fields and check equality to mask using XOR.
|
||||
//
|
||||
// If XOR result is not zero, result is not equal to expected, so some fields are missed.
|
||||
// Bits of fields which would be set are actually bits of missed fields.
|
||||
missed := bits.OnesCount8(result)
|
||||
for bitN := 0; bitN < missed; bitN++ {
|
||||
bitIdx := bits.TrailingZeros8(result)
|
||||
fieldIdx := i*8 + bitIdx
|
||||
var name string
|
||||
if fieldIdx < len(jsonFieldsNameOfReadinessResponse) {
|
||||
name = jsonFieldsNameOfReadinessResponse[fieldIdx]
|
||||
} else {
|
||||
name = strconv.Itoa(fieldIdx)
|
||||
}
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: name,
|
||||
Error: validate.ErrFieldRequired,
|
||||
})
|
||||
// Reset bit.
|
||||
result &^= 1 << bitIdx
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(failures) > 0 {
|
||||
return &validate.Error{Fields: failures}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements stdjson.Marshaler.
|
||||
func (s *ReadinessResponse) MarshalJSON() ([]byte, error) {
|
||||
e := jx.Encoder{}
|
||||
s.Encode(&e)
|
||||
return e.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements stdjson.Unmarshaler.
|
||||
func (s *ReadinessResponse) UnmarshalJSON(data []byte) error {
|
||||
d := jx.DecodeBytes(data)
|
||||
return s.Decode(d)
|
||||
}
|
||||
|
||||
// Encode encodes ReadinessResponseStatus as json.
|
||||
func (s ReadinessResponseStatus) Encode(e *jx.Encoder) {
|
||||
e.Str(string(s))
|
||||
}
|
||||
|
||||
// Decode decodes ReadinessResponseStatus from json.
|
||||
func (s *ReadinessResponseStatus) Decode(d *jx.Decoder) error {
|
||||
if s == nil {
|
||||
return errors.New("invalid: unable to decode ReadinessResponseStatus to nil")
|
||||
}
|
||||
v, err := d.StrBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Try to use constant string.
|
||||
switch ReadinessResponseStatus(v) {
|
||||
case ReadinessResponseStatusOk:
|
||||
*s = ReadinessResponseStatusOk
|
||||
case ReadinessResponseStatusNotReady:
|
||||
*s = ReadinessResponseStatusNotReady
|
||||
case ReadinessResponseStatusError:
|
||||
*s = ReadinessResponseStatusError
|
||||
default:
|
||||
*s = ReadinessResponseStatus(v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements stdjson.Marshaler.
|
||||
func (s ReadinessResponseStatus) MarshalJSON() ([]byte, error) {
|
||||
e := jx.Encoder{}
|
||||
s.Encode(&e)
|
||||
return e.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements stdjson.Unmarshaler.
|
||||
func (s *ReadinessResponseStatus) UnmarshalJSON(data []byte) error {
|
||||
d := jx.DecodeBytes(data)
|
||||
return s.Decode(d)
|
||||
}
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
// Labeler is used to allow adding custom attributes to the server request metrics.
|
||||
type Labeler struct {
|
||||
attrs []attribute.KeyValue
|
||||
}
|
||||
|
||||
// Add attributes to the Labeler.
|
||||
func (l *Labeler) Add(attrs ...attribute.KeyValue) {
|
||||
l.attrs = append(l.attrs, attrs...)
|
||||
}
|
||||
|
||||
// AttributeSet returns the attributes added to the Labeler as an attribute.Set.
|
||||
func (l *Labeler) AttributeSet() attribute.Set {
|
||||
return attribute.NewSet(l.attrs...)
|
||||
}
|
||||
|
||||
type labelerContextKey struct{}
|
||||
|
||||
// LabelerFromContext retrieves the Labeler from the provided context, if present.
|
||||
//
|
||||
// If no Labeler was found in the provided context a new, empty Labeler is returned and the second
|
||||
// return value is false. In this case it is safe to use the Labeler but any attributes added to
|
||||
// it will not be used.
|
||||
func LabelerFromContext(ctx context.Context) (*Labeler, bool) {
|
||||
if l, ok := ctx.Value(labelerContextKey{}).(*Labeler); ok {
|
||||
return l, true
|
||||
}
|
||||
return &Labeler{}, false
|
||||
}
|
||||
|
||||
func contextWithLabeler(ctx context.Context, l *Labeler) context.Context {
|
||||
return context.WithValue(ctx, labelerContextKey{}, l)
|
||||
}
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
import (
|
||||
"github.com/ogen-go/ogen/middleware"
|
||||
)
|
||||
|
||||
// Middleware is middleware type.
|
||||
type Middleware = middleware.Middleware
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
// OperationName is the ogen operation name
|
||||
type OperationName = string
|
||||
|
||||
const (
|
||||
PerformPredictionOperation OperationName = "PerformPrediction"
|
||||
ReadinessCheckOperation OperationName = "ReadinessCheck"
|
||||
)
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,3 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
|
@ -1,198 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
import (
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-faster/errors"
|
||||
"github.com/go-faster/jx"
|
||||
"github.com/ogen-go/ogen/ogenerrors"
|
||||
"github.com/ogen-go/ogen/validate"
|
||||
)
|
||||
|
||||
func decodePerformPredictionResponse(resp *http.Response) (res *PredictionResult, _ error) {
|
||||
switch resp.StatusCode {
|
||||
case 200:
|
||||
// Code 200.
|
||||
ct, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return res, errors.Wrap(err, "parse media type")
|
||||
}
|
||||
switch {
|
||||
case ct == "application/json":
|
||||
buf, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
d := jx.DecodeBytes(buf)
|
||||
|
||||
var response PredictionResult
|
||||
if err := func() error {
|
||||
if err := response.Decode(d); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.Skip(); err != io.EOF {
|
||||
return errors.New("unexpected trailing data")
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
err = &ogenerrors.DecodeBodyError{
|
||||
ContentType: ct,
|
||||
Body: buf,
|
||||
Err: err,
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
// Validate response.
|
||||
if err := func() error {
|
||||
if err := response.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return res, errors.Wrap(err, "validate")
|
||||
}
|
||||
return &response, nil
|
||||
default:
|
||||
return res, validate.InvalidContentType(ct)
|
||||
}
|
||||
}
|
||||
// Convenient error response.
|
||||
defRes, err := func() (res *ErrorStatusCode, err error) {
|
||||
ct, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return res, errors.Wrap(err, "parse media type")
|
||||
}
|
||||
switch {
|
||||
case ct == "application/json":
|
||||
buf, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
d := jx.DecodeBytes(buf)
|
||||
|
||||
var response Error
|
||||
if err := func() error {
|
||||
if err := response.Decode(d); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.Skip(); err != io.EOF {
|
||||
return errors.New("unexpected trailing data")
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
err = &ogenerrors.DecodeBodyError{
|
||||
ContentType: ct,
|
||||
Body: buf,
|
||||
Err: err,
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
return &ErrorStatusCode{
|
||||
StatusCode: resp.StatusCode,
|
||||
Response: response,
|
||||
}, nil
|
||||
default:
|
||||
return res, validate.InvalidContentType(ct)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return res, errors.Wrapf(err, "default (code %d)", resp.StatusCode)
|
||||
}
|
||||
return res, errors.Wrap(defRes, "error")
|
||||
}
|
||||
|
||||
func decodeReadinessCheckResponse(resp *http.Response) (res *ReadinessResponse, _ error) {
|
||||
switch resp.StatusCode {
|
||||
case 200:
|
||||
// Code 200.
|
||||
ct, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return res, errors.Wrap(err, "parse media type")
|
||||
}
|
||||
switch {
|
||||
case ct == "application/json":
|
||||
buf, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
d := jx.DecodeBytes(buf)
|
||||
|
||||
var response ReadinessResponse
|
||||
if err := func() error {
|
||||
if err := response.Decode(d); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.Skip(); err != io.EOF {
|
||||
return errors.New("unexpected trailing data")
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
err = &ogenerrors.DecodeBodyError{
|
||||
ContentType: ct,
|
||||
Body: buf,
|
||||
Err: err,
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
// Validate response.
|
||||
if err := func() error {
|
||||
if err := response.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return res, errors.Wrap(err, "validate")
|
||||
}
|
||||
return &response, nil
|
||||
default:
|
||||
return res, validate.InvalidContentType(ct)
|
||||
}
|
||||
}
|
||||
// Convenient error response.
|
||||
defRes, err := func() (res *ErrorStatusCode, err error) {
|
||||
ct, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return res, errors.Wrap(err, "parse media type")
|
||||
}
|
||||
switch {
|
||||
case ct == "application/json":
|
||||
buf, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
d := jx.DecodeBytes(buf)
|
||||
|
||||
var response Error
|
||||
if err := func() error {
|
||||
if err := response.Decode(d); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.Skip(); err != io.EOF {
|
||||
return errors.New("unexpected trailing data")
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
err = &ogenerrors.DecodeBodyError{
|
||||
ContentType: ct,
|
||||
Body: buf,
|
||||
Err: err,
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
return &ErrorStatusCode{
|
||||
StatusCode: resp.StatusCode,
|
||||
Response: response,
|
||||
}, nil
|
||||
default:
|
||||
return res, validate.InvalidContentType(ct)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return res, errors.Wrapf(err, "default (code %d)", resp.StatusCode)
|
||||
}
|
||||
return res, errors.Wrap(defRes, "error")
|
||||
}
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/go-faster/errors"
|
||||
"github.com/go-faster/jx"
|
||||
ht "github.com/ogen-go/ogen/http"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
func encodePerformPredictionResponse(response *PredictionResult, w http.ResponseWriter, span trace.Span) error {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.WriteHeader(200)
|
||||
span.SetStatus(codes.Ok, http.StatusText(200))
|
||||
|
||||
e := new(jx.Encoder)
|
||||
response.Encode(e)
|
||||
if _, err := e.WriteTo(w); err != nil {
|
||||
return errors.Wrap(err, "write")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func encodeReadinessCheckResponse(response *ReadinessResponse, w http.ResponseWriter, span trace.Span) error {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.WriteHeader(200)
|
||||
span.SetStatus(codes.Ok, http.StatusText(200))
|
||||
|
||||
e := new(jx.Encoder)
|
||||
response.Encode(e)
|
||||
if _, err := e.WriteTo(w); err != nil {
|
||||
return errors.Wrap(err, "write")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func encodeErrorResponse(response *ErrorStatusCode, w http.ResponseWriter, span trace.Span) error {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
code := response.StatusCode
|
||||
if code == 0 {
|
||||
// Set default status code.
|
||||
code = http.StatusOK
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
if st := http.StatusText(code); code >= http.StatusBadRequest {
|
||||
span.SetStatus(codes.Error, st)
|
||||
} else {
|
||||
span.SetStatus(codes.Ok, st)
|
||||
}
|
||||
|
||||
e := new(jx.Encoder)
|
||||
response.Response.Encode(e)
|
||||
if _, err := e.WriteTo(w); err != nil {
|
||||
return errors.Wrap(err, "write")
|
||||
}
|
||||
|
||||
if code >= http.StatusInternalServerError {
|
||||
return errors.Wrapf(ht.ErrInternalServerErrorResponse, "code: %d, message: %s", code, http.StatusText(code))
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
|
@ -1,258 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/ogen-go/ogen/uri"
|
||||
)
|
||||
|
||||
func (s *Server) cutPrefix(path string) (string, bool) {
|
||||
prefix := s.cfg.Prefix
|
||||
if prefix == "" {
|
||||
return path, true
|
||||
}
|
||||
if !strings.HasPrefix(path, prefix) {
|
||||
// Prefix doesn't match.
|
||||
return "", false
|
||||
}
|
||||
// Cut prefix from the path.
|
||||
return strings.TrimPrefix(path, prefix), true
|
||||
}
|
||||
|
||||
// ServeHTTP serves http request as defined by OpenAPI v3 specification,
|
||||
// calling handler that matches the path or returning not found error.
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
elem := r.URL.Path
|
||||
elemIsEscaped := false
|
||||
if rawPath := r.URL.RawPath; rawPath != "" {
|
||||
if normalized, ok := uri.NormalizeEscapedPath(rawPath); ok {
|
||||
elem = normalized
|
||||
elemIsEscaped = strings.ContainsRune(elem, '%')
|
||||
}
|
||||
}
|
||||
|
||||
elem, ok := s.cutPrefix(elem)
|
||||
if !ok || len(elem) == 0 {
|
||||
s.notFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Static code generated router with unwrapped path search.
|
||||
switch {
|
||||
default:
|
||||
if len(elem) == 0 {
|
||||
break
|
||||
}
|
||||
switch elem[0] {
|
||||
case '/': // Prefix: "/"
|
||||
|
||||
if l := len("/"); len(elem) >= l && elem[0:l] == "/" {
|
||||
elem = elem[l:]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
||||
if len(elem) == 0 {
|
||||
break
|
||||
}
|
||||
switch elem[0] {
|
||||
case 'a': // Prefix: "api/v1/prediction"
|
||||
|
||||
if l := len("api/v1/prediction"); len(elem) >= l && elem[0:l] == "api/v1/prediction" {
|
||||
elem = elem[l:]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
||||
if len(elem) == 0 {
|
||||
// Leaf node.
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
s.handlePerformPredictionRequest([0]string{}, elemIsEscaped, w, r)
|
||||
default:
|
||||
s.notAllowed(w, r, "GET")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
case 'r': // Prefix: "ready"
|
||||
|
||||
if l := len("ready"); len(elem) >= l && elem[0:l] == "ready" {
|
||||
elem = elem[l:]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
||||
if len(elem) == 0 {
|
||||
// Leaf node.
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
s.handleReadinessCheckRequest([0]string{}, elemIsEscaped, w, r)
|
||||
default:
|
||||
s.notAllowed(w, r, "GET")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
s.notFound(w, r)
|
||||
}
|
||||
|
||||
// Route is route object.
|
||||
type Route struct {
|
||||
name string
|
||||
summary string
|
||||
operationID string
|
||||
operationGroup string
|
||||
pathPattern string
|
||||
count int
|
||||
args [0]string
|
||||
}
|
||||
|
||||
// Name returns ogen operation name.
|
||||
//
|
||||
// It is guaranteed to be unique and not empty.
|
||||
func (r Route) Name() string {
|
||||
return r.name
|
||||
}
|
||||
|
||||
// Summary returns OpenAPI summary.
|
||||
func (r Route) Summary() string {
|
||||
return r.summary
|
||||
}
|
||||
|
||||
// OperationID returns OpenAPI operationId.
|
||||
func (r Route) OperationID() string {
|
||||
return r.operationID
|
||||
}
|
||||
|
||||
// OperationGroup returns the x-ogen-operation-group value.
|
||||
func (r Route) OperationGroup() string {
|
||||
return r.operationGroup
|
||||
}
|
||||
|
||||
// PathPattern returns OpenAPI path.
|
||||
func (r Route) PathPattern() string {
|
||||
return r.pathPattern
|
||||
}
|
||||
|
||||
// Args returns parsed arguments.
|
||||
func (r Route) Args() []string {
|
||||
return r.args[:r.count]
|
||||
}
|
||||
|
||||
// FindRoute finds Route for given method and path.
|
||||
//
|
||||
// Note: this method does not unescape path or handle reserved characters in path properly. Use FindPath instead.
|
||||
func (s *Server) FindRoute(method, path string) (Route, bool) {
|
||||
return s.FindPath(method, &url.URL{Path: path})
|
||||
}
|
||||
|
||||
// FindPath finds Route for given method and URL.
|
||||
func (s *Server) FindPath(method string, u *url.URL) (r Route, _ bool) {
|
||||
var (
|
||||
elem = u.Path
|
||||
args = r.args
|
||||
)
|
||||
if rawPath := u.RawPath; rawPath != "" {
|
||||
if normalized, ok := uri.NormalizeEscapedPath(rawPath); ok {
|
||||
elem = normalized
|
||||
}
|
||||
defer func() {
|
||||
for i, arg := range r.args[:r.count] {
|
||||
if unescaped, err := url.PathUnescape(arg); err == nil {
|
||||
r.args[i] = unescaped
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
elem, ok := s.cutPrefix(elem)
|
||||
if !ok {
|
||||
return r, false
|
||||
}
|
||||
|
||||
// Static code generated router with unwrapped path search.
|
||||
switch {
|
||||
default:
|
||||
if len(elem) == 0 {
|
||||
break
|
||||
}
|
||||
switch elem[0] {
|
||||
case '/': // Prefix: "/"
|
||||
|
||||
if l := len("/"); len(elem) >= l && elem[0:l] == "/" {
|
||||
elem = elem[l:]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
||||
if len(elem) == 0 {
|
||||
break
|
||||
}
|
||||
switch elem[0] {
|
||||
case 'a': // Prefix: "api/v1/prediction"
|
||||
|
||||
if l := len("api/v1/prediction"); len(elem) >= l && elem[0:l] == "api/v1/prediction" {
|
||||
elem = elem[l:]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
||||
if len(elem) == 0 {
|
||||
// Leaf node.
|
||||
switch method {
|
||||
case "GET":
|
||||
r.name = PerformPredictionOperation
|
||||
r.summary = "Perform prediction"
|
||||
r.operationID = "performPrediction"
|
||||
r.operationGroup = ""
|
||||
r.pathPattern = "/api/v1/prediction"
|
||||
r.args = args
|
||||
r.count = 0
|
||||
return r, true
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
case 'r': // Prefix: "ready"
|
||||
|
||||
if l := len("ready"); len(elem) >= l && elem[0:l] == "ready" {
|
||||
elem = elem[l:]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
||||
if len(elem) == 0 {
|
||||
// Leaf node.
|
||||
switch method {
|
||||
case "GET":
|
||||
r.name = ReadinessCheckOperation
|
||||
r.summary = "Readiness check"
|
||||
r.operationID = "readinessCheck"
|
||||
r.operationGroup = ""
|
||||
r.pathPattern = "/ready"
|
||||
r.args = args
|
||||
r.count = 0
|
||||
return r, true
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return r, false
|
||||
}
|
||||
|
|
@ -1,746 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-faster/errors"
|
||||
)
|
||||
|
||||
func (s *ErrorStatusCode) Error() string {
|
||||
return fmt.Sprintf("code %d: %+v", s.StatusCode, s.Response)
|
||||
}
|
||||
|
||||
// Ref: #/components/schemas/Error
|
||||
type Error struct {
|
||||
Message string `json:"message"`
|
||||
Details OptString `json:"details"`
|
||||
}
|
||||
|
||||
// GetMessage returns the value of Message.
|
||||
func (s *Error) GetMessage() string {
|
||||
return s.Message
|
||||
}
|
||||
|
||||
// GetDetails returns the value of Details.
|
||||
func (s *Error) GetDetails() OptString {
|
||||
return s.Details
|
||||
}
|
||||
|
||||
// SetMessage sets the value of Message.
|
||||
func (s *Error) SetMessage(val string) {
|
||||
s.Message = val
|
||||
}
|
||||
|
||||
// SetDetails sets the value of Details.
|
||||
func (s *Error) SetDetails(val OptString) {
|
||||
s.Details = val
|
||||
}
|
||||
|
||||
// ErrorStatusCode wraps Error with StatusCode.
|
||||
type ErrorStatusCode struct {
|
||||
StatusCode int
|
||||
Response Error
|
||||
}
|
||||
|
||||
// GetStatusCode returns the value of StatusCode.
|
||||
func (s *ErrorStatusCode) GetStatusCode() int {
|
||||
return s.StatusCode
|
||||
}
|
||||
|
||||
// GetResponse returns the value of Response.
|
||||
func (s *ErrorStatusCode) GetResponse() Error {
|
||||
return s.Response
|
||||
}
|
||||
|
||||
// SetStatusCode sets the value of StatusCode.
|
||||
func (s *ErrorStatusCode) SetStatusCode(val int) {
|
||||
s.StatusCode = val
|
||||
}
|
||||
|
||||
// SetResponse sets the value of Response.
|
||||
func (s *ErrorStatusCode) SetResponse(val Error) {
|
||||
s.Response = val
|
||||
}
|
||||
|
||||
// NewOptBool returns new OptBool with value set to v.
|
||||
func NewOptBool(v bool) OptBool {
|
||||
return OptBool{
|
||||
Value: v,
|
||||
Set: true,
|
||||
}
|
||||
}
|
||||
|
||||
// OptBool is optional bool.
|
||||
type OptBool struct {
|
||||
Value bool
|
||||
Set bool
|
||||
}
|
||||
|
||||
// IsSet returns true if OptBool was set.
|
||||
func (o OptBool) IsSet() bool { return o.Set }
|
||||
|
||||
// Reset unsets value.
|
||||
func (o *OptBool) Reset() {
|
||||
var v bool
|
||||
o.Value = v
|
||||
o.Set = false
|
||||
}
|
||||
|
||||
// SetTo sets value to v.
|
||||
func (o *OptBool) SetTo(v bool) {
|
||||
o.Set = true
|
||||
o.Value = v
|
||||
}
|
||||
|
||||
// Get returns value and boolean that denotes whether value was set.
|
||||
func (o OptBool) Get() (v bool, ok bool) {
|
||||
if !o.Set {
|
||||
return v, false
|
||||
}
|
||||
return o.Value, true
|
||||
}
|
||||
|
||||
// Or returns value if set, or given parameter if does not.
|
||||
func (o OptBool) Or(d bool) bool {
|
||||
if v, ok := o.Get(); ok {
|
||||
return v
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// NewOptDateTime returns new OptDateTime with value set to v.
|
||||
func NewOptDateTime(v time.Time) OptDateTime {
|
||||
return OptDateTime{
|
||||
Value: v,
|
||||
Set: true,
|
||||
}
|
||||
}
|
||||
|
||||
// OptDateTime is optional time.Time.
|
||||
type OptDateTime struct {
|
||||
Value time.Time
|
||||
Set bool
|
||||
}
|
||||
|
||||
// IsSet returns true if OptDateTime was set.
|
||||
func (o OptDateTime) IsSet() bool { return o.Set }
|
||||
|
||||
// Reset unsets value.
|
||||
func (o *OptDateTime) Reset() {
|
||||
var v time.Time
|
||||
o.Value = v
|
||||
o.Set = false
|
||||
}
|
||||
|
||||
// SetTo sets value to v.
|
||||
func (o *OptDateTime) SetTo(v time.Time) {
|
||||
o.Set = true
|
||||
o.Value = v
|
||||
}
|
||||
|
||||
// Get returns value and boolean that denotes whether value was set.
|
||||
func (o OptDateTime) Get() (v time.Time, ok bool) {
|
||||
if !o.Set {
|
||||
return v, false
|
||||
}
|
||||
return o.Value, true
|
||||
}
|
||||
|
||||
// Or returns value if set, or given parameter if does not.
|
||||
func (o OptDateTime) Or(d time.Time) time.Time {
|
||||
if v, ok := o.Get(); ok {
|
||||
return v
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// NewOptFloat64 returns new OptFloat64 with value set to v.
|
||||
func NewOptFloat64(v float64) OptFloat64 {
|
||||
return OptFloat64{
|
||||
Value: v,
|
||||
Set: true,
|
||||
}
|
||||
}
|
||||
|
||||
// OptFloat64 is optional float64.
|
||||
type OptFloat64 struct {
|
||||
Value float64
|
||||
Set bool
|
||||
}
|
||||
|
||||
// IsSet returns true if OptFloat64 was set.
|
||||
func (o OptFloat64) IsSet() bool { return o.Set }
|
||||
|
||||
// Reset unsets value.
|
||||
func (o *OptFloat64) Reset() {
|
||||
var v float64
|
||||
o.Value = v
|
||||
o.Set = false
|
||||
}
|
||||
|
||||
// SetTo sets value to v.
|
||||
func (o *OptFloat64) SetTo(v float64) {
|
||||
o.Set = true
|
||||
o.Value = v
|
||||
}
|
||||
|
||||
// Get returns value and boolean that denotes whether value was set.
|
||||
func (o OptFloat64) Get() (v float64, ok bool) {
|
||||
if !o.Set {
|
||||
return v, false
|
||||
}
|
||||
return o.Value, true
|
||||
}
|
||||
|
||||
// Or returns value if set, or given parameter if does not.
|
||||
func (o OptFloat64) Or(d float64) float64 {
|
||||
if v, ok := o.Get(); ok {
|
||||
return v
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// NewOptPerformPredictionFormat returns new OptPerformPredictionFormat with value set to v.
|
||||
func NewOptPerformPredictionFormat(v PerformPredictionFormat) OptPerformPredictionFormat {
|
||||
return OptPerformPredictionFormat{
|
||||
Value: v,
|
||||
Set: true,
|
||||
}
|
||||
}
|
||||
|
||||
// OptPerformPredictionFormat is optional PerformPredictionFormat.
|
||||
type OptPerformPredictionFormat struct {
|
||||
Value PerformPredictionFormat
|
||||
Set bool
|
||||
}
|
||||
|
||||
// IsSet returns true if OptPerformPredictionFormat was set.
|
||||
func (o OptPerformPredictionFormat) IsSet() bool { return o.Set }
|
||||
|
||||
// Reset unsets value.
|
||||
func (o *OptPerformPredictionFormat) Reset() {
|
||||
var v PerformPredictionFormat
|
||||
o.Value = v
|
||||
o.Set = false
|
||||
}
|
||||
|
||||
// SetTo sets value to v.
|
||||
func (o *OptPerformPredictionFormat) SetTo(v PerformPredictionFormat) {
|
||||
o.Set = true
|
||||
o.Value = v
|
||||
}
|
||||
|
||||
// Get returns value and boolean that denotes whether value was set.
|
||||
func (o OptPerformPredictionFormat) Get() (v PerformPredictionFormat, ok bool) {
|
||||
if !o.Set {
|
||||
return v, false
|
||||
}
|
||||
return o.Value, true
|
||||
}
|
||||
|
||||
// Or returns value if set, or given parameter if does not.
|
||||
func (o OptPerformPredictionFormat) Or(d PerformPredictionFormat) PerformPredictionFormat {
|
||||
if v, ok := o.Get(); ok {
|
||||
return v
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// NewOptPerformPredictionProfile returns new OptPerformPredictionProfile with value set to v.
|
||||
func NewOptPerformPredictionProfile(v PerformPredictionProfile) OptPerformPredictionProfile {
|
||||
return OptPerformPredictionProfile{
|
||||
Value: v,
|
||||
Set: true,
|
||||
}
|
||||
}
|
||||
|
||||
// OptPerformPredictionProfile is optional PerformPredictionProfile.
|
||||
type OptPerformPredictionProfile struct {
|
||||
Value PerformPredictionProfile
|
||||
Set bool
|
||||
}
|
||||
|
||||
// IsSet returns true if OptPerformPredictionProfile was set.
|
||||
func (o OptPerformPredictionProfile) IsSet() bool { return o.Set }
|
||||
|
||||
// Reset unsets value.
|
||||
func (o *OptPerformPredictionProfile) Reset() {
|
||||
var v PerformPredictionProfile
|
||||
o.Value = v
|
||||
o.Set = false
|
||||
}
|
||||
|
||||
// SetTo sets value to v.
|
||||
func (o *OptPerformPredictionProfile) SetTo(v PerformPredictionProfile) {
|
||||
o.Set = true
|
||||
o.Value = v
|
||||
}
|
||||
|
||||
// Get returns value and boolean that denotes whether value was set.
|
||||
func (o OptPerformPredictionProfile) Get() (v PerformPredictionProfile, ok bool) {
|
||||
if !o.Set {
|
||||
return v, false
|
||||
}
|
||||
return o.Value, true
|
||||
}
|
||||
|
||||
// Or returns value if set, or given parameter if does not.
|
||||
func (o OptPerformPredictionProfile) Or(d PerformPredictionProfile) PerformPredictionProfile {
|
||||
if v, ok := o.Get(); ok {
|
||||
return v
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// NewOptString returns new OptString with value set to v.
|
||||
func NewOptString(v string) OptString {
|
||||
return OptString{
|
||||
Value: v,
|
||||
Set: true,
|
||||
}
|
||||
}
|
||||
|
||||
// OptString is optional string.
|
||||
type OptString struct {
|
||||
Value string
|
||||
Set bool
|
||||
}
|
||||
|
||||
// IsSet returns true if OptString was set.
|
||||
func (o OptString) IsSet() bool { return o.Set }
|
||||
|
||||
// Reset unsets value.
|
||||
func (o *OptString) Reset() {
|
||||
var v string
|
||||
o.Value = v
|
||||
o.Set = false
|
||||
}
|
||||
|
||||
// SetTo sets value to v.
|
||||
func (o *OptString) SetTo(v string) {
|
||||
o.Set = true
|
||||
o.Value = v
|
||||
}
|
||||
|
||||
// Get returns value and boolean that denotes whether value was set.
|
||||
func (o OptString) Get() (v string, ok bool) {
|
||||
if !o.Set {
|
||||
return v, false
|
||||
}
|
||||
return o.Value, true
|
||||
}
|
||||
|
||||
// Or returns value if set, or given parameter if does not.
|
||||
func (o OptString) Or(d string) string {
|
||||
if v, ok := o.Get(); ok {
|
||||
return v
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
type PerformPredictionFormat string
|
||||
|
||||
const (
|
||||
PerformPredictionFormatJSON PerformPredictionFormat = "json"
|
||||
)
|
||||
|
||||
// AllValues returns all PerformPredictionFormat values.
|
||||
func (PerformPredictionFormat) AllValues() []PerformPredictionFormat {
|
||||
return []PerformPredictionFormat{
|
||||
PerformPredictionFormatJSON,
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (s PerformPredictionFormat) MarshalText() ([]byte, error) {
|
||||
switch s {
|
||||
case PerformPredictionFormatJSON:
|
||||
return []byte(s), nil
|
||||
default:
|
||||
return nil, errors.Errorf("invalid value: %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (s *PerformPredictionFormat) UnmarshalText(data []byte) error {
|
||||
switch PerformPredictionFormat(data) {
|
||||
case PerformPredictionFormatJSON:
|
||||
*s = PerformPredictionFormatJSON
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid value: %q", data)
|
||||
}
|
||||
}
|
||||
|
||||
type PerformPredictionProfile string
|
||||
|
||||
const (
|
||||
PerformPredictionProfileStandardProfile PerformPredictionProfile = "standard_profile"
|
||||
PerformPredictionProfileFloatProfile PerformPredictionProfile = "float_profile"
|
||||
PerformPredictionProfileReverseProfile PerformPredictionProfile = "reverse_profile"
|
||||
PerformPredictionProfileCustomProfile PerformPredictionProfile = "custom_profile"
|
||||
)
|
||||
|
||||
// AllValues returns all PerformPredictionProfile values.
|
||||
func (PerformPredictionProfile) AllValues() []PerformPredictionProfile {
|
||||
return []PerformPredictionProfile{
|
||||
PerformPredictionProfileStandardProfile,
|
||||
PerformPredictionProfileFloatProfile,
|
||||
PerformPredictionProfileReverseProfile,
|
||||
PerformPredictionProfileCustomProfile,
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (s PerformPredictionProfile) MarshalText() ([]byte, error) {
|
||||
switch s {
|
||||
case PerformPredictionProfileStandardProfile:
|
||||
return []byte(s), nil
|
||||
case PerformPredictionProfileFloatProfile:
|
||||
return []byte(s), nil
|
||||
case PerformPredictionProfileReverseProfile:
|
||||
return []byte(s), nil
|
||||
case PerformPredictionProfileCustomProfile:
|
||||
return []byte(s), nil
|
||||
default:
|
||||
return nil, errors.Errorf("invalid value: %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (s *PerformPredictionProfile) UnmarshalText(data []byte) error {
|
||||
switch PerformPredictionProfile(data) {
|
||||
case PerformPredictionProfileStandardProfile:
|
||||
*s = PerformPredictionProfileStandardProfile
|
||||
return nil
|
||||
case PerformPredictionProfileFloatProfile:
|
||||
*s = PerformPredictionProfileFloatProfile
|
||||
return nil
|
||||
case PerformPredictionProfileReverseProfile:
|
||||
*s = PerformPredictionProfileReverseProfile
|
||||
return nil
|
||||
case PerformPredictionProfileCustomProfile:
|
||||
*s = PerformPredictionProfileCustomProfile
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid value: %q", data)
|
||||
}
|
||||
}
|
||||
|
||||
type PerformPredictionSimulateStagesItem string
|
||||
|
||||
const (
|
||||
PerformPredictionSimulateStagesItemAscent PerformPredictionSimulateStagesItem = "ascent"
|
||||
PerformPredictionSimulateStagesItemDescent PerformPredictionSimulateStagesItem = "descent"
|
||||
PerformPredictionSimulateStagesItemFloat PerformPredictionSimulateStagesItem = "float"
|
||||
)
|
||||
|
||||
// AllValues returns all PerformPredictionSimulateStagesItem values.
|
||||
func (PerformPredictionSimulateStagesItem) AllValues() []PerformPredictionSimulateStagesItem {
|
||||
return []PerformPredictionSimulateStagesItem{
|
||||
PerformPredictionSimulateStagesItemAscent,
|
||||
PerformPredictionSimulateStagesItemDescent,
|
||||
PerformPredictionSimulateStagesItemFloat,
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (s PerformPredictionSimulateStagesItem) MarshalText() ([]byte, error) {
|
||||
switch s {
|
||||
case PerformPredictionSimulateStagesItemAscent:
|
||||
return []byte(s), nil
|
||||
case PerformPredictionSimulateStagesItemDescent:
|
||||
return []byte(s), nil
|
||||
case PerformPredictionSimulateStagesItemFloat:
|
||||
return []byte(s), nil
|
||||
default:
|
||||
return nil, errors.Errorf("invalid value: %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (s *PerformPredictionSimulateStagesItem) UnmarshalText(data []byte) error {
|
||||
switch PerformPredictionSimulateStagesItem(data) {
|
||||
case PerformPredictionSimulateStagesItemAscent:
|
||||
*s = PerformPredictionSimulateStagesItemAscent
|
||||
return nil
|
||||
case PerformPredictionSimulateStagesItemDescent:
|
||||
*s = PerformPredictionSimulateStagesItemDescent
|
||||
return nil
|
||||
case PerformPredictionSimulateStagesItemFloat:
|
||||
*s = PerformPredictionSimulateStagesItemFloat
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid value: %q", data)
|
||||
}
|
||||
}
|
||||
|
||||
// Ref: #/components/schemas/PredictionResult
|
||||
type PredictionResult struct {
|
||||
Metadata PredictionResultMetadata `json:"metadata"`
|
||||
Prediction []PredictionResultPredictionItem `json:"prediction"`
|
||||
}
|
||||
|
||||
// GetMetadata returns the value of Metadata.
|
||||
func (s *PredictionResult) GetMetadata() PredictionResultMetadata {
|
||||
return s.Metadata
|
||||
}
|
||||
|
||||
// GetPrediction returns the value of Prediction.
|
||||
func (s *PredictionResult) GetPrediction() []PredictionResultPredictionItem {
|
||||
return s.Prediction
|
||||
}
|
||||
|
||||
// SetMetadata sets the value of Metadata.
|
||||
func (s *PredictionResult) SetMetadata(val PredictionResultMetadata) {
|
||||
s.Metadata = val
|
||||
}
|
||||
|
||||
// SetPrediction sets the value of Prediction.
|
||||
func (s *PredictionResult) SetPrediction(val []PredictionResultPredictionItem) {
|
||||
s.Prediction = val
|
||||
}
|
||||
|
||||
type PredictionResultMetadata struct {
|
||||
CompleteDatetime time.Time `json:"complete_datetime"`
|
||||
StartDatetime time.Time `json:"start_datetime"`
|
||||
}
|
||||
|
||||
// GetCompleteDatetime returns the value of CompleteDatetime.
|
||||
func (s *PredictionResultMetadata) GetCompleteDatetime() time.Time {
|
||||
return s.CompleteDatetime
|
||||
}
|
||||
|
||||
// GetStartDatetime returns the value of StartDatetime.
|
||||
func (s *PredictionResultMetadata) GetStartDatetime() time.Time {
|
||||
return s.StartDatetime
|
||||
}
|
||||
|
||||
// SetCompleteDatetime sets the value of CompleteDatetime.
|
||||
func (s *PredictionResultMetadata) SetCompleteDatetime(val time.Time) {
|
||||
s.CompleteDatetime = val
|
||||
}
|
||||
|
||||
// SetStartDatetime sets the value of StartDatetime.
|
||||
func (s *PredictionResultMetadata) SetStartDatetime(val time.Time) {
|
||||
s.StartDatetime = val
|
||||
}
|
||||
|
||||
type PredictionResultPredictionItem struct {
|
||||
Stage PredictionResultPredictionItemStage `json:"stage"`
|
||||
Trajectory []PredictionResultPredictionItemTrajectoryItem `json:"trajectory"`
|
||||
}
|
||||
|
||||
// GetStage returns the value of Stage.
|
||||
func (s *PredictionResultPredictionItem) GetStage() PredictionResultPredictionItemStage {
|
||||
return s.Stage
|
||||
}
|
||||
|
||||
// GetTrajectory returns the value of Trajectory.
|
||||
func (s *PredictionResultPredictionItem) GetTrajectory() []PredictionResultPredictionItemTrajectoryItem {
|
||||
return s.Trajectory
|
||||
}
|
||||
|
||||
// SetStage sets the value of Stage.
|
||||
func (s *PredictionResultPredictionItem) SetStage(val PredictionResultPredictionItemStage) {
|
||||
s.Stage = val
|
||||
}
|
||||
|
||||
// SetTrajectory sets the value of Trajectory.
|
||||
func (s *PredictionResultPredictionItem) SetTrajectory(val []PredictionResultPredictionItemTrajectoryItem) {
|
||||
s.Trajectory = val
|
||||
}
|
||||
|
||||
type PredictionResultPredictionItemStage string
|
||||
|
||||
const (
|
||||
PredictionResultPredictionItemStageAscent PredictionResultPredictionItemStage = "ascent"
|
||||
PredictionResultPredictionItemStageDescent PredictionResultPredictionItemStage = "descent"
|
||||
PredictionResultPredictionItemStageFloat PredictionResultPredictionItemStage = "float"
|
||||
)
|
||||
|
||||
// AllValues returns all PredictionResultPredictionItemStage values.
|
||||
func (PredictionResultPredictionItemStage) AllValues() []PredictionResultPredictionItemStage {
|
||||
return []PredictionResultPredictionItemStage{
|
||||
PredictionResultPredictionItemStageAscent,
|
||||
PredictionResultPredictionItemStageDescent,
|
||||
PredictionResultPredictionItemStageFloat,
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (s PredictionResultPredictionItemStage) MarshalText() ([]byte, error) {
|
||||
switch s {
|
||||
case PredictionResultPredictionItemStageAscent:
|
||||
return []byte(s), nil
|
||||
case PredictionResultPredictionItemStageDescent:
|
||||
return []byte(s), nil
|
||||
case PredictionResultPredictionItemStageFloat:
|
||||
return []byte(s), nil
|
||||
default:
|
||||
return nil, errors.Errorf("invalid value: %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (s *PredictionResultPredictionItemStage) UnmarshalText(data []byte) error {
|
||||
switch PredictionResultPredictionItemStage(data) {
|
||||
case PredictionResultPredictionItemStageAscent:
|
||||
*s = PredictionResultPredictionItemStageAscent
|
||||
return nil
|
||||
case PredictionResultPredictionItemStageDescent:
|
||||
*s = PredictionResultPredictionItemStageDescent
|
||||
return nil
|
||||
case PredictionResultPredictionItemStageFloat:
|
||||
*s = PredictionResultPredictionItemStageFloat
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid value: %q", data)
|
||||
}
|
||||
}
|
||||
|
||||
type PredictionResultPredictionItemTrajectoryItem struct {
|
||||
Datetime time.Time `json:"datetime"`
|
||||
Latitude float64 `json:"latitude"`
|
||||
Longitude float64 `json:"longitude"`
|
||||
Altitude float64 `json:"altitude"`
|
||||
}
|
||||
|
||||
// GetDatetime returns the value of Datetime.
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) GetDatetime() time.Time {
|
||||
return s.Datetime
|
||||
}
|
||||
|
||||
// GetLatitude returns the value of Latitude.
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) GetLatitude() float64 {
|
||||
return s.Latitude
|
||||
}
|
||||
|
||||
// GetLongitude returns the value of Longitude.
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) GetLongitude() float64 {
|
||||
return s.Longitude
|
||||
}
|
||||
|
||||
// GetAltitude returns the value of Altitude.
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) GetAltitude() float64 {
|
||||
return s.Altitude
|
||||
}
|
||||
|
||||
// SetDatetime sets the value of Datetime.
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) SetDatetime(val time.Time) {
|
||||
s.Datetime = val
|
||||
}
|
||||
|
||||
// SetLatitude sets the value of Latitude.
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) SetLatitude(val float64) {
|
||||
s.Latitude = val
|
||||
}
|
||||
|
||||
// SetLongitude sets the value of Longitude.
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) SetLongitude(val float64) {
|
||||
s.Longitude = val
|
||||
}
|
||||
|
||||
// SetAltitude sets the value of Altitude.
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) SetAltitude(val float64) {
|
||||
s.Altitude = val
|
||||
}
|
||||
|
||||
// Ref: #/components/schemas/ReadinessResponse
|
||||
type ReadinessResponse struct {
|
||||
Status ReadinessResponseStatus `json:"status"`
|
||||
LastUpdate OptDateTime `json:"last_update"`
|
||||
IsFresh OptBool `json:"is_fresh"`
|
||||
ErrorMessage OptString `json:"error_message"`
|
||||
}
|
||||
|
||||
// GetStatus returns the value of Status.
|
||||
func (s *ReadinessResponse) GetStatus() ReadinessResponseStatus {
|
||||
return s.Status
|
||||
}
|
||||
|
||||
// GetLastUpdate returns the value of LastUpdate.
|
||||
func (s *ReadinessResponse) GetLastUpdate() OptDateTime {
|
||||
return s.LastUpdate
|
||||
}
|
||||
|
||||
// GetIsFresh returns the value of IsFresh.
|
||||
func (s *ReadinessResponse) GetIsFresh() OptBool {
|
||||
return s.IsFresh
|
||||
}
|
||||
|
||||
// GetErrorMessage returns the value of ErrorMessage.
|
||||
func (s *ReadinessResponse) GetErrorMessage() OptString {
|
||||
return s.ErrorMessage
|
||||
}
|
||||
|
||||
// SetStatus sets the value of Status.
|
||||
func (s *ReadinessResponse) SetStatus(val ReadinessResponseStatus) {
|
||||
s.Status = val
|
||||
}
|
||||
|
||||
// SetLastUpdate sets the value of LastUpdate.
|
||||
func (s *ReadinessResponse) SetLastUpdate(val OptDateTime) {
|
||||
s.LastUpdate = val
|
||||
}
|
||||
|
||||
// SetIsFresh sets the value of IsFresh.
|
||||
func (s *ReadinessResponse) SetIsFresh(val OptBool) {
|
||||
s.IsFresh = val
|
||||
}
|
||||
|
||||
// SetErrorMessage sets the value of ErrorMessage.
|
||||
func (s *ReadinessResponse) SetErrorMessage(val OptString) {
|
||||
s.ErrorMessage = val
|
||||
}
|
||||
|
||||
type ReadinessResponseStatus string
|
||||
|
||||
const (
|
||||
ReadinessResponseStatusOk ReadinessResponseStatus = "ok"
|
||||
ReadinessResponseStatusNotReady ReadinessResponseStatus = "not_ready"
|
||||
ReadinessResponseStatusError ReadinessResponseStatus = "error"
|
||||
)
|
||||
|
||||
// AllValues returns all ReadinessResponseStatus values.
|
||||
func (ReadinessResponseStatus) AllValues() []ReadinessResponseStatus {
|
||||
return []ReadinessResponseStatus{
|
||||
ReadinessResponseStatusOk,
|
||||
ReadinessResponseStatusNotReady,
|
||||
ReadinessResponseStatusError,
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (s ReadinessResponseStatus) MarshalText() ([]byte, error) {
|
||||
switch s {
|
||||
case ReadinessResponseStatusOk:
|
||||
return []byte(s), nil
|
||||
case ReadinessResponseStatusNotReady:
|
||||
return []byte(s), nil
|
||||
case ReadinessResponseStatusError:
|
||||
return []byte(s), nil
|
||||
default:
|
||||
return nil, errors.Errorf("invalid value: %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (s *ReadinessResponseStatus) UnmarshalText(data []byte) error {
|
||||
switch ReadinessResponseStatus(data) {
|
||||
case ReadinessResponseStatusOk:
|
||||
*s = ReadinessResponseStatusOk
|
||||
return nil
|
||||
case ReadinessResponseStatusNotReady:
|
||||
*s = ReadinessResponseStatusNotReady
|
||||
return nil
|
||||
case ReadinessResponseStatusError:
|
||||
*s = ReadinessResponseStatusError
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid value: %q", data)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,46 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// Handler handles operations described by OpenAPI v3 specification.
|
||||
type Handler interface {
|
||||
// PerformPrediction implements performPrediction operation.
|
||||
//
|
||||
// Perform prediction.
|
||||
//
|
||||
// GET /api/v1/prediction
|
||||
PerformPrediction(ctx context.Context, params PerformPredictionParams) (*PredictionResult, error)
|
||||
// ReadinessCheck implements readinessCheck operation.
|
||||
//
|
||||
// Readiness check.
|
||||
//
|
||||
// GET /ready
|
||||
ReadinessCheck(ctx context.Context) (*ReadinessResponse, error)
|
||||
// NewError creates *ErrorStatusCode from error returned by handler.
|
||||
//
|
||||
// Used for common default response.
|
||||
NewError(ctx context.Context, err error) *ErrorStatusCode
|
||||
}
|
||||
|
||||
// Server implements http server based on OpenAPI v3 specification and
|
||||
// calls Handler to handle requests.
|
||||
type Server struct {
|
||||
h Handler
|
||||
baseServer
|
||||
}
|
||||
|
||||
// NewServer creates new Server.
|
||||
func NewServer(h Handler, opts ...ServerOption) (*Server, error) {
|
||||
s, err := newServerConfig(opts...).baseServer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Server{
|
||||
h: h,
|
||||
baseServer: s,
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ht "github.com/ogen-go/ogen/http"
|
||||
)
|
||||
|
||||
// UnimplementedHandler is no-op Handler which returns http.ErrNotImplemented.
|
||||
type UnimplementedHandler struct{}
|
||||
|
||||
var _ Handler = UnimplementedHandler{}
|
||||
|
||||
// PerformPrediction implements performPrediction operation.
|
||||
//
|
||||
// Perform prediction.
|
||||
//
|
||||
// GET /api/v1/prediction
|
||||
func (UnimplementedHandler) PerformPrediction(ctx context.Context, params PerformPredictionParams) (r *PredictionResult, _ error) {
|
||||
return r, ht.ErrNotImplemented
|
||||
}
|
||||
|
||||
// ReadinessCheck implements readinessCheck operation.
|
||||
//
|
||||
// Readiness check.
|
||||
//
|
||||
// GET /ready
|
||||
func (UnimplementedHandler) ReadinessCheck(ctx context.Context) (r *ReadinessResponse, _ error) {
|
||||
return r, ht.ErrNotImplemented
|
||||
}
|
||||
|
||||
// NewError creates *ErrorStatusCode from error returned by handler.
|
||||
//
|
||||
// Used for common default response.
|
||||
func (UnimplementedHandler) NewError(ctx context.Context, err error) (r *ErrorStatusCode) {
|
||||
r = new(ErrorStatusCode)
|
||||
return r
|
||||
}
|
||||
|
|
@ -1,232 +0,0 @@
|
|||
// Code generated by ogen, DO NOT EDIT.
|
||||
|
||||
package gsn
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/go-faster/errors"
|
||||
"github.com/ogen-go/ogen/validate"
|
||||
)
|
||||
|
||||
func (s PerformPredictionFormat) Validate() error {
|
||||
switch s {
|
||||
case "json":
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid value: %v", s)
|
||||
}
|
||||
}
|
||||
|
||||
func (s PerformPredictionProfile) Validate() error {
|
||||
switch s {
|
||||
case "standard_profile":
|
||||
return nil
|
||||
case "float_profile":
|
||||
return nil
|
||||
case "reverse_profile":
|
||||
return nil
|
||||
case "custom_profile":
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid value: %v", s)
|
||||
}
|
||||
}
|
||||
|
||||
func (s PerformPredictionSimulateStagesItem) Validate() error {
|
||||
switch s {
|
||||
case "ascent":
|
||||
return nil
|
||||
case "descent":
|
||||
return nil
|
||||
case "float":
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid value: %v", s)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PredictionResult) Validate() error {
|
||||
if s == nil {
|
||||
return validate.ErrNilPointer
|
||||
}
|
||||
|
||||
var failures []validate.FieldError
|
||||
if err := func() error {
|
||||
if s.Prediction == nil {
|
||||
return errors.New("nil is invalid value")
|
||||
}
|
||||
var failures []validate.FieldError
|
||||
for i, elem := range s.Prediction {
|
||||
if err := func() error {
|
||||
if err := elem.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: fmt.Sprintf("[%d]", i),
|
||||
Error: err,
|
||||
})
|
||||
}
|
||||
}
|
||||
if len(failures) > 0 {
|
||||
return &validate.Error{Fields: failures}
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: "prediction",
|
||||
Error: err,
|
||||
})
|
||||
}
|
||||
if len(failures) > 0 {
|
||||
return &validate.Error{Fields: failures}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *PredictionResultPredictionItem) Validate() error {
|
||||
if s == nil {
|
||||
return validate.ErrNilPointer
|
||||
}
|
||||
|
||||
var failures []validate.FieldError
|
||||
if err := func() error {
|
||||
if err := s.Stage.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: "stage",
|
||||
Error: err,
|
||||
})
|
||||
}
|
||||
if err := func() error {
|
||||
if s.Trajectory == nil {
|
||||
return errors.New("nil is invalid value")
|
||||
}
|
||||
var failures []validate.FieldError
|
||||
for i, elem := range s.Trajectory {
|
||||
if err := func() error {
|
||||
if err := elem.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: fmt.Sprintf("[%d]", i),
|
||||
Error: err,
|
||||
})
|
||||
}
|
||||
}
|
||||
if len(failures) > 0 {
|
||||
return &validate.Error{Fields: failures}
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: "trajectory",
|
||||
Error: err,
|
||||
})
|
||||
}
|
||||
if len(failures) > 0 {
|
||||
return &validate.Error{Fields: failures}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s PredictionResultPredictionItemStage) Validate() error {
|
||||
switch s {
|
||||
case "ascent":
|
||||
return nil
|
||||
case "descent":
|
||||
return nil
|
||||
case "float":
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid value: %v", s)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PredictionResultPredictionItemTrajectoryItem) Validate() error {
|
||||
if s == nil {
|
||||
return validate.ErrNilPointer
|
||||
}
|
||||
|
||||
var failures []validate.FieldError
|
||||
if err := func() error {
|
||||
if err := (validate.Float{}).Validate(float64(s.Latitude)); err != nil {
|
||||
return errors.Wrap(err, "float")
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: "latitude",
|
||||
Error: err,
|
||||
})
|
||||
}
|
||||
if err := func() error {
|
||||
if err := (validate.Float{}).Validate(float64(s.Longitude)); err != nil {
|
||||
return errors.Wrap(err, "float")
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: "longitude",
|
||||
Error: err,
|
||||
})
|
||||
}
|
||||
if err := func() error {
|
||||
if err := (validate.Float{}).Validate(float64(s.Altitude)); err != nil {
|
||||
return errors.Wrap(err, "float")
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: "altitude",
|
||||
Error: err,
|
||||
})
|
||||
}
|
||||
if len(failures) > 0 {
|
||||
return &validate.Error{Fields: failures}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ReadinessResponse) Validate() error {
|
||||
if s == nil {
|
||||
return validate.ErrNilPointer
|
||||
}
|
||||
|
||||
var failures []validate.FieldError
|
||||
if err := func() error {
|
||||
if err := s.Status.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
failures = append(failures, validate.FieldError{
|
||||
Name: "status",
|
||||
Error: err,
|
||||
})
|
||||
}
|
||||
if len(failures) > 0 {
|
||||
return &validate.Error{Fields: failures}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s ReadinessResponseStatus) Validate() error {
|
||||
switch s {
|
||||
case "ok":
|
||||
return nil
|
||||
case "not_ready":
|
||||
return nil
|
||||
case "error":
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid value: %v", s)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
package scheduler
|
||||
|
||||
import (
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes"
|
||||
env "github.com/caarlos0/env/v11"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Enabled bool `env:"ENABLED" envDefault:"true"`
|
||||
}
|
||||
|
||||
func NewConfig() (*Config, error) {
|
||||
cfg := &Config{}
|
||||
if err := env.ParseWithOptions(cfg, env.Options{
|
||||
PrefixTagName: "GSN_PREDICTOR_SCHEDULER_",
|
||||
}); err != nil {
|
||||
return nil, errcodes.Wrap(err, "failed to parse scheduler config")
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
|
@ -1,97 +0,0 @@
|
|||
package scheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes"
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/log"
|
||||
"github.com/go-co-op/gocron"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Job interface {
|
||||
GetInterval() time.Duration
|
||||
GetTimeout() time.Duration
|
||||
GetCount() int
|
||||
GetAsync() bool
|
||||
Execute(context.Context) error
|
||||
}
|
||||
|
||||
type Scheduler struct {
|
||||
scheduler *gocron.Scheduler
|
||||
}
|
||||
|
||||
func New() *Scheduler {
|
||||
scheduler := gocron.NewScheduler(time.UTC)
|
||||
return &Scheduler{
|
||||
scheduler: scheduler,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scheduler) AddJob(job Job) error {
|
||||
interval := job.GetInterval()
|
||||
timeout := job.GetTimeout()
|
||||
count := job.GetCount()
|
||||
async := job.GetAsync()
|
||||
|
||||
// Validate job parameters
|
||||
if !async && count != 1 {
|
||||
return errcodes.ErrSchedulerInvalidJob
|
||||
}
|
||||
if timeout > interval {
|
||||
return errcodes.ErrSchedulerTimeoutTooLong
|
||||
}
|
||||
|
||||
// Create job function with timeout
|
||||
jobFunc := func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
logger := log.Ctx(ctx)
|
||||
if err := job.Execute(ctx); err != nil {
|
||||
logger.Error("job execution failed",
|
||||
zap.Error(err),
|
||||
zap.Duration("interval", interval),
|
||||
zap.Duration("timeout", timeout))
|
||||
} else {
|
||||
logger.Debug("job executed successfully",
|
||||
zap.Duration("interval", interval),
|
||||
zap.Duration("timeout", timeout))
|
||||
}
|
||||
}
|
||||
|
||||
// Add job to scheduler
|
||||
schedulerJob := s.scheduler.Every(interval)
|
||||
|
||||
if !async {
|
||||
schedulerJob = schedulerJob.SingletonMode()
|
||||
}
|
||||
|
||||
if count > 0 {
|
||||
schedulerJob = schedulerJob.LimitRunsTo(count)
|
||||
}
|
||||
|
||||
schedulerJob.Do(jobFunc)
|
||||
|
||||
log.Ctx(context.Background()).Info("job added to scheduler",
|
||||
zap.Duration("interval", interval),
|
||||
zap.Duration("timeout", timeout),
|
||||
zap.Int("count", count),
|
||||
zap.Bool("async", async))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) Start() {
|
||||
s.scheduler.StartAsync()
|
||||
log.Ctx(context.Background()).Info("scheduler started")
|
||||
}
|
||||
|
||||
func (s *Scheduler) Stop() {
|
||||
s.scheduler.Stop()
|
||||
log.Ctx(context.Background()).Info("scheduler stopped")
|
||||
}
|
||||
|
||||
func (s *Scheduler) IsRunning() bool {
|
||||
return s.scheduler.IsRunning()
|
||||
}
|
||||
|
|
@ -1,114 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
)
|
||||
|
||||
type Point struct {
|
||||
Datetime string `json:"datetime"`
|
||||
Latitude float64 `json:"latitude"`
|
||||
Longitude float64 `json:"longitude"`
|
||||
Altitude float64 `json:"altitude"`
|
||||
}
|
||||
|
||||
type Stage struct {
|
||||
Stage string `json:"stage"`
|
||||
Trajectory []Point `json:"trajectory"`
|
||||
}
|
||||
|
||||
type Prediction struct {
|
||||
Prediction []Stage `json:"prediction"`
|
||||
}
|
||||
|
||||
func haversine(lat1, lon1, lat2, lon2 float64) float64 {
|
||||
R := 6371000.0
|
||||
phi1, phi2 := lat1*math.Pi/180, lat2*math.Pi/180
|
||||
dphi := (lat2 - lat1) * math.Pi / 180
|
||||
dlam := (lon2 - lon1) * math.Pi / 180
|
||||
a := math.Sin(dphi/2)*math.Sin(dphi/2) + math.Cos(phi1)*math.Cos(phi2)*math.Sin(dlam/2)*math.Sin(dlam/2)
|
||||
return R * 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
|
||||
}
|
||||
|
||||
func load(path string) Prediction {
|
||||
data, _ := os.ReadFile(path)
|
||||
var p Prediction
|
||||
json.Unmarshal(data, &p)
|
||||
return p
|
||||
}
|
||||
|
||||
func main() {
|
||||
our := load("c:/tmp/our.json")
|
||||
taw := load("c:/tmp/tawhiri.json")
|
||||
|
||||
// Find burst and landing points
|
||||
var ourBurst, ourLand, tawBurst, tawLand Point
|
||||
for _, s := range our.Prediction {
|
||||
t := s.Trajectory
|
||||
if s.Stage == "ascent" {
|
||||
ourBurst = t[len(t)-1]
|
||||
}
|
||||
if s.Stage == "descent" {
|
||||
ourLand = t[len(t)-1]
|
||||
}
|
||||
}
|
||||
for _, s := range taw.Prediction {
|
||||
t := s.Trajectory
|
||||
if s.Stage == "ascent" {
|
||||
tawBurst = t[len(t)-1]
|
||||
}
|
||||
if s.Stage == "descent" {
|
||||
tawLand = t[len(t)-1]
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("=== Burst Point ===")
|
||||
fmt.Printf(" Our: lat=%.4f, lon=%.4f, alt=%.0f, time=%s\n", ourBurst.Latitude, ourBurst.Longitude, ourBurst.Altitude, ourBurst.Datetime)
|
||||
fmt.Printf(" Tawhiri: lat=%.4f, lon=%.4f, alt=%.0f, time=%s\n", tawBurst.Latitude, tawBurst.Longitude, tawBurst.Altitude, tawBurst.Datetime)
|
||||
burstDist := haversine(ourBurst.Latitude, ourBurst.Longitude, tawBurst.Latitude, tawBurst.Longitude)
|
||||
fmt.Printf(" Distance: %.2f km\n", burstDist/1000)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("=== Landing Point ===")
|
||||
fmt.Printf(" Our: lat=%.4f, lon=%.4f, alt=%.0f, time=%s\n", ourLand.Latitude, ourLand.Longitude, ourLand.Altitude, ourLand.Datetime)
|
||||
fmt.Printf(" Tawhiri: lat=%.4f, lon=%.4f, alt=%.0f, time=%s\n", tawLand.Latitude, tawLand.Longitude, tawLand.Altitude, tawLand.Datetime)
|
||||
landDist := haversine(ourLand.Latitude, ourLand.Longitude, tawLand.Latitude, tawLand.Longitude)
|
||||
fmt.Printf(" Distance: %.2f km\n", landDist/1000)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("=== Trajectory Comparison (every 10 min) ===")
|
||||
ourPts := map[string]Point{}
|
||||
tawPts := map[string]Point{}
|
||||
for _, s := range our.Prediction {
|
||||
for _, p := range s.Trajectory {
|
||||
ourPts[p.Datetime] = p
|
||||
}
|
||||
}
|
||||
for _, s := range taw.Prediction {
|
||||
for _, p := range s.Trajectory {
|
||||
tawPts[p.Datetime] = p
|
||||
}
|
||||
}
|
||||
|
||||
// Collect common times
|
||||
var common []string
|
||||
for _, s := range our.Prediction {
|
||||
for _, p := range s.Trajectory {
|
||||
if _, ok := tawPts[p.Datetime]; ok {
|
||||
common = append(common, p.Datetime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, t := range common {
|
||||
if i%10 == 0 {
|
||||
o := ourPts[t]
|
||||
tw := tawPts[t]
|
||||
d := haversine(o.Latitude, o.Longitude, tw.Latitude, tw.Longitude)
|
||||
fmt.Printf(" %s: dist=%.2f km (our: %.3f,%.3f vs taw: %.3f,%.3f)\n",
|
||||
t, d/1000, o.Latitude, o.Longitude, tw.Latitude, tw.Longitude)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/grib"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
|
||||
cfg := &grib.Config{
|
||||
Dir: "C:/tmp/grib",
|
||||
TTL: 48 * time.Hour,
|
||||
CacheTTL: 1 * time.Hour,
|
||||
Parallel: 8,
|
||||
}
|
||||
|
||||
svc, err := grib.New(cfg)
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete old cube to force rebuild
|
||||
cubePath := "C:/tmp/grib/20260212_12.cube"
|
||||
if err := os.Remove(cubePath); err != nil && !os.IsNotExist(err) {
|
||||
fmt.Printf("Remove cube error: %v\n", err)
|
||||
} else {
|
||||
fmt.Println("Old cube removed")
|
||||
}
|
||||
|
||||
// Update will download missing pgrb2b files and rebuild cube
|
||||
fmt.Println("Starting update (download pgrb2b + rebuild cube)...")
|
||||
start := time.Now()
|
||||
if err := svc.Update(ctx); err != nil {
|
||||
fmt.Printf("Update error: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Done in %v\n", time.Since(start))
|
||||
}
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/grib"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
|
||||
// Найти последний доступный прогноз
|
||||
run, err := grib.GetLatestModelRun(ctx)
|
||||
if err != nil {
|
||||
fmt.Printf("Error finding model run: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Found model run: %v\n", run)
|
||||
|
||||
// Создать downloader
|
||||
dl := grib.NewPartialDownloader("C:/tmp/grib", 8)
|
||||
|
||||
// Запустить загрузку
|
||||
start := time.Now()
|
||||
fmt.Println("Starting download...")
|
||||
|
||||
err = dl.Run(ctx, run)
|
||||
if err != nil {
|
||||
fmt.Printf("Download error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Download completed in %v\n", time.Since(start))
|
||||
}
|
||||
|
|
@ -1,60 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
|
||||
mmap "github.com/edsrzf/mmap-go"
|
||||
)
|
||||
|
||||
var pressureLevels = []float64{
|
||||
1000, 975, 950, 925, 900, 875, 850, 825, 800, 775,
|
||||
750, 725, 700, 675, 650, 625, 600, 575, 550, 525,
|
||||
500, 475, 450, 425, 400, 375, 350, 325, 300, 275,
|
||||
250, 225, 200, 175, 150, 125, 100, 70, 50, 30,
|
||||
20, 10, 7, 5, 3, 2, 1,
|
||||
}
|
||||
|
||||
func main() {
|
||||
f, _ := os.Open("C:/tmp/grib/20260212_12.cube")
|
||||
mm, _ := mmap.Map(f, mmap.RDONLY, 0)
|
||||
defer mm.Unmap()
|
||||
defer f.Close()
|
||||
|
||||
const (
|
||||
nT = 97
|
||||
nP = 47
|
||||
nLat = 721
|
||||
nLon = 1440
|
||||
)
|
||||
bytesPerVar := int64(nT * nP * nLat * nLon * 4)
|
||||
|
||||
val := func(varIdx, ti, pi, y, x int) float32 {
|
||||
idx := (((ti*nP + pi) * nLat) + y) * nLon + x
|
||||
off := int64(varIdx)*bytesPerVar + int64(idx)*4
|
||||
bits := binary.LittleEndian.Uint32(mm[off : off+4])
|
||||
return math.Float32frombits(bits)
|
||||
}
|
||||
|
||||
// Check gh values at lat=52.2N (y=(90-52.2)*4=151.2 → y=151), lon=0.1E (x=0.1*4=0.4 → x=0)
|
||||
// Time step 9 (9 hours into forecast)
|
||||
ti := 9
|
||||
y := 151
|
||||
x := 0
|
||||
|
||||
fmt.Println("GH values at (52.25N, 0E), t=+9h:")
|
||||
fmt.Printf("%8s %8s %10s\n", "Level", "hPa", "GH(m)")
|
||||
for pi := 0; pi < nP; pi++ {
|
||||
gh := val(0, ti, pi, y, x)
|
||||
fmt.Printf("%8d %8.0f %10.1f\n", pi, pressureLevels[pi], gh)
|
||||
}
|
||||
|
||||
fmt.Println("\nU-wind values at same point:")
|
||||
fmt.Printf("%8s %8s %10s\n", "Level", "hPa", "U(m/s)")
|
||||
for pi := 0; pi < nP; pi++ {
|
||||
u := val(1, ti, pi, y, x)
|
||||
fmt.Printf("%8d %8.0f %10.2f\n", pi, pressureLevels[pi], u)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,38 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/nilsmagnus/grib/griblib"
|
||||
)
|
||||
|
||||
func main() {
|
||||
f, err := os.Open("C:/tmp/grib/gfs.t18z.pgrb2.0p25.f000")
|
||||
if err != nil {
|
||||
fmt.Printf("Error opening file: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
messages, err := griblib.ReadMessages(f)
|
||||
if err != nil {
|
||||
fmt.Printf("Error reading GRIB: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Found %d messages\n\n", len(messages))
|
||||
|
||||
for i, m := range messages {
|
||||
product := m.Section4.ProductDefinitionTemplate
|
||||
if product.ParameterCategory != 2 || product.ParameterNumber != 2 {
|
||||
continue // only u-wind
|
||||
}
|
||||
fmt.Printf("UGRD Msg %d: SurfType=%d SurfValue=%d SurfScale=%d DataLen=%d\n",
|
||||
i,
|
||||
product.FirstSurface.Type,
|
||||
product.FirstSurface.Value,
|
||||
product.FirstSurface.Scale,
|
||||
len(m.Data()))
|
||||
}
|
||||
}
|
||||
|
|
@ -1,87 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/grib"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
|
||||
// Инициализируем GRIB сервис
|
||||
cfg := &grib.Config{
|
||||
Dir: "C:/tmp/grib",
|
||||
TTL: 48 * time.Hour,
|
||||
CacheTTL: 1 * time.Hour,
|
||||
Parallel: 8,
|
||||
}
|
||||
|
||||
svc, err := grib.New(cfg)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating service: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Обновляем данные (создаёт куб)
|
||||
fmt.Println("Updating GRIB data (building cube)...")
|
||||
start := time.Now()
|
||||
if err := svc.Update(ctx); err != nil {
|
||||
fmt.Printf("Update error: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Cube built in %v\n", time.Since(start))
|
||||
|
||||
// Тестируем извлечение ветра
|
||||
fmt.Println("\nTesting wind extraction...")
|
||||
lat, lon, alt := 52.2, 0.1, 10000.0
|
||||
ts := time.Date(2026, 2, 11, 12, 0, 0, 0, time.UTC)
|
||||
|
||||
wind, err := svc.Extract(ctx, lat, lon, alt, ts)
|
||||
if err != nil {
|
||||
fmt.Printf("Extract error: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Wind at (%.2f, %.2f, %.0fm) at %v:\n", lat, lon, alt, ts)
|
||||
fmt.Printf(" U (east): %.2f m/s\n", wind[0])
|
||||
fmt.Printf(" V (north): %.2f m/s\n", wind[1])
|
||||
|
||||
// Сравниваем с Tawhiri
|
||||
fmt.Println("\nComparing with Tawhiri API...")
|
||||
tawhiriURL := fmt.Sprintf(
|
||||
"https://api.v2.sondehub.org/tawhiri?launch_latitude=%.2f&launch_longitude=%.2f&launch_altitude=0&launch_datetime=%s&ascent_rate=5&burst_altitude=30000&descent_rate=5",
|
||||
lat, lon, ts.Format(time.RFC3339),
|
||||
)
|
||||
|
||||
resp, err := http.Get(tawhiriURL)
|
||||
if err != nil {
|
||||
fmt.Printf("Tawhiri request error: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
var tawhiriResp map[string]interface{}
|
||||
json.Unmarshal(body, &tawhiriResp)
|
||||
|
||||
// Выводим финальную точку приземления
|
||||
if prediction, ok := tawhiriResp["prediction"].([]interface{}); ok {
|
||||
for _, stage := range prediction {
|
||||
stageMap := stage.(map[string]interface{})
|
||||
if stageMap["stage"] == "descent" {
|
||||
trajectory := stageMap["trajectory"].([]interface{})
|
||||
if len(trajectory) > 0 {
|
||||
last := trajectory[len(trajectory)-1].(map[string]interface{})
|
||||
fmt.Printf("\nTawhiri landing point:\n")
|
||||
fmt.Printf(" Lat: %.4f\n", last["latitude"])
|
||||
fmt.Printf(" Lon: %.4f\n", last["longitude"])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,303 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import requests
|
||||
import json
|
||||
from typing import Any
|
||||
import base64
|
||||
import math
|
||||
|
||||
# --- Config ---
|
||||
REFERENCE_API_URL = "https://fly.stratonautica.ru/api/v2/?profile=standard_profile&pred_type=single&launch_datetime=2025-06-25T20%3A45%3A00Z&launch_latitude=56.6992&launch_longitude=38.8247&launch_altitude=0&ascent_rate=5&burst_altitude=30000&descent_rate=5"
|
||||
LOCAL_API_URL = "http://localhost:8080/api/v1/prediction?profile=standard_profile&pred_type=single&launch_datetime=2025-06-25T20%3A45%3A00Z&launch_latitude=56.6992&launch_longitude=38.8247&launch_altitude=0&ascent_rate=5&burst_altitude=30000&descent_rate=5"
|
||||
|
||||
LOCAL_API_PAYLOAD = {
|
||||
"launch_latitude": 56.6992,
|
||||
"launch_longitude": 38.8247,
|
||||
"launch_datetime": "2025-06-25T20-45-000Z",
|
||||
"launch_altitude": 0,
|
||||
"profile": "standard_profile",
|
||||
"ascent_rate": 5,
|
||||
"burst_altitude": 30000,
|
||||
"descent_rate": 5,
|
||||
"format": "json"
|
||||
}
|
||||
READY_URL = "http://localhost:8080/ready"
|
||||
|
||||
# --- Utility functions ---
|
||||
def run_compose_up():
|
||||
print("[INFO] Running docker-compose down --remove-orphans ...")
|
||||
result = subprocess.run(["docker-compose", "down", "--remove-orphans"], capture_output=True)
|
||||
if result.returncode != 0:
|
||||
print("[ERROR] docker-compose down failed:", result.stderr.decode())
|
||||
sys.exit(1)
|
||||
print("[INFO] docker-compose down completed.")
|
||||
print("[INFO] Running docker-compose up -d ...")
|
||||
result = subprocess.run(["docker-compose", "up", "-d"], capture_output=True)
|
||||
if result.returncode != 0:
|
||||
print("[ERROR] docker-compose up failed:", result.stderr.decode())
|
||||
sys.exit(1)
|
||||
print("[INFO] docker-compose up -d completed.")
|
||||
return True
|
||||
|
||||
def wait_for_ready(timeout=900):
|
||||
print(f"[INFO] Waiting for {READY_URL} to be ready ...")
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
try:
|
||||
resp = requests.get(READY_URL, timeout=10)
|
||||
if resp.status_code == 200:
|
||||
data = resp.json()
|
||||
if data.get("status") == "ok":
|
||||
print("[INFO] Service is ready.")
|
||||
return
|
||||
else:
|
||||
print(f"[INFO] Not ready yet: {data}")
|
||||
else:
|
||||
print(f"[INFO] /ready returned status {resp.status_code}")
|
||||
except Exception as e:
|
||||
print(f"[INFO] Exception while polling /ready: {e}")
|
||||
time.sleep(10)
|
||||
print(f"[ERROR] Service did not become ready in {timeout} seconds.")
|
||||
sys.exit(1)
|
||||
|
||||
def fetch_reference():
|
||||
print(f"[INFO] Fetching reference prediction from {REFERENCE_API_URL}")
|
||||
resp = requests.get(REFERENCE_API_URL, timeout=60)
|
||||
if resp.status_code != 200:
|
||||
print(f"[ERROR] Reference API returned {resp.status_code}: {resp.text}")
|
||||
sys.exit(1)
|
||||
return resp.json()
|
||||
|
||||
def fetch_local():
|
||||
print(f"[INFO] Fetching local prediction from {LOCAL_API_URL}")
|
||||
resp = requests.get(LOCAL_API_URL, timeout=60)
|
||||
if resp.status_code != 200:
|
||||
print(f"[ERROR] Local API returned {resp.status_code}: {resp.text}")
|
||||
sys.exit(1)
|
||||
return resp.json()
|
||||
|
||||
def haversine(lat1, lon1, lat2, lon2):
|
||||
"""Calculate the great-circle distance between two points on the Earth (specified in decimal degrees). Returns distance in kilometers."""
|
||||
R = 6371.0 # Earth radius in kilometers
|
||||
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
|
||||
dlat = lat2 - lat1
|
||||
dlon = lon2 - lon1
|
||||
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
|
||||
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
|
||||
return R * c
|
||||
|
||||
def compare_results(reference_data, local_data):
|
||||
"""Compare prediction results between reference and local APIs."""
|
||||
print("[INFO] Comparing results ...")
|
||||
|
||||
# Extract trajectory data
|
||||
ref_trajectory = reference_data.get('prediction', [{}])[0].get('trajectory', [])
|
||||
local_trajectory = local_data.get('prediction', [{}])[0].get('trajectory', [])
|
||||
|
||||
print(f"[DEBUG] Reference trajectory length: {len(ref_trajectory)}")
|
||||
print(f"[DEBUG] Local trajectory length: {len(local_trajectory)}")
|
||||
|
||||
# Show first 3 points from both APIs
|
||||
print("\n[DEBUG] First 3 points - Reference API:")
|
||||
for i, point in enumerate(ref_trajectory[:3]):
|
||||
print(f" [{i}] alt={point.get('altitude', 'N/A')}, lat={point.get('latitude', 'N/A')}, lon={point.get('longitude', 'N/A')}, time={point.get('datetime', 'N/A')}")
|
||||
|
||||
print("\n[DEBUG] First 3 points - Local API:")
|
||||
for i, point in enumerate(local_trajectory[:3]):
|
||||
print(f" [{i}] alt={point.get('altitude', 'N/A')}, lat={point.get('latitude', 'N/A')}, lon={point.get('longitude', 'N/A')}, time={point.get('datetime', 'N/A')}")
|
||||
|
||||
# Show last 3 points from both APIs
|
||||
print("\n[DEBUG] Last 3 points - Reference API:")
|
||||
for i, point in enumerate(ref_trajectory[-3:]):
|
||||
idx = len(ref_trajectory) - 3 + i
|
||||
print(f" [{idx}] alt={point.get('altitude', 'N/A')}, lat={point.get('latitude', 'N/A')}, lon={point.get('longitude', 'N/A')}, time={point.get('datetime', 'N/A')}")
|
||||
|
||||
print("\n[DEBUG] Last 3 points - Local API:")
|
||||
for i, point in enumerate(local_trajectory[-3:]):
|
||||
idx = len(local_trajectory) - 3 + i
|
||||
print(f" [{idx}] alt={point.get('altitude', 'N/A')}, lat={point.get('latitude', 'N/A')}, lon={point.get('longitude', 'N/A')}, time={point.get('datetime', 'N/A')}")
|
||||
|
||||
# Compare trajectory lengths
|
||||
if len(ref_trajectory) != len(local_trajectory):
|
||||
print(f"[DIFF] Trajectory length mismatch: {len(local_trajectory)} vs {len(ref_trajectory)}")
|
||||
return False
|
||||
|
||||
# Compare trajectory points and calculate drift
|
||||
min_len = min(len(ref_trajectory), len(local_trajectory))
|
||||
max_drift = 0.0
|
||||
max_drift_idx = -1
|
||||
drift_list = []
|
||||
print("\n[DRIFT] Trajectory point-by-point distance (km):")
|
||||
for i in range(min_len):
|
||||
ref_point = ref_trajectory[i]
|
||||
local_point = local_trajectory[i]
|
||||
ref_lat = ref_point.get('latitude')
|
||||
ref_lon = ref_point.get('longitude')
|
||||
local_lat = local_point.get('latitude')
|
||||
local_lon = local_point.get('longitude')
|
||||
drift_km = None
|
||||
if None not in (ref_lat, ref_lon, local_lat, local_lon):
|
||||
drift_km = haversine(ref_lat, ref_lon, local_lat, local_lon)
|
||||
drift_list.append(drift_km)
|
||||
if drift_km > max_drift:
|
||||
max_drift = drift_km
|
||||
max_drift_idx = i
|
||||
print(f" [{i}] Drift: {drift_km:.3f} km")
|
||||
else:
|
||||
print(f" [{i}] Drift: N/A (missing lat/lon)")
|
||||
if drift_list:
|
||||
mean_drift = sum(drift_list) / len(drift_list)
|
||||
print(f"\n[DRIFT] Max drift: {max_drift:.3f} km at idx {max_drift_idx}")
|
||||
print(f"[DRIFT] Mean drift: {mean_drift:.3f} km over {len(drift_list)} points")
|
||||
else:
|
||||
print("[DRIFT] No valid drift data to report.")
|
||||
# Continue with original comparison for altitude, etc.
|
||||
for i in range(min_len):
|
||||
ref_point = ref_trajectory[i]
|
||||
local_point = local_trajectory[i]
|
||||
for key in ['altitude', 'latitude', 'longitude']:
|
||||
ref_val = ref_point.get(key)
|
||||
local_val = local_point.get(key)
|
||||
if ref_val is not None and local_val is not None:
|
||||
if abs(ref_val - local_val) > 0.1:
|
||||
print(f"[DIFF] At idx {i}, key {key}: {local_val} != {ref_val}")
|
||||
return False
|
||||
print("[SUCCESS] Results match!")
|
||||
return True
|
||||
|
||||
def test_custom_profile():
|
||||
"""Test custom profile with base64 encoded curve."""
|
||||
print("\n[TEST] Testing custom_profile...")
|
||||
# Create a simple custom ascent curve (altitude vs time in seconds)
|
||||
curve_data = {
|
||||
"altitude": [0, 30000],
|
||||
"time": [0, 6000]
|
||||
}
|
||||
curve_b64 = base64.b64encode(json.dumps(curve_data).encode()).decode()
|
||||
# Test parameters for custom profile
|
||||
params = {
|
||||
"launch_latitude": 56.6992,
|
||||
"launch_longitude": 38.8247,
|
||||
"launch_datetime": "2025-06-25T13:28:00Z",
|
||||
"launch_altitude": 0,
|
||||
"profile": "custom_profile",
|
||||
"ascent_curve": curve_b64
|
||||
}
|
||||
try:
|
||||
# Test local API (use GET)
|
||||
local_resp = requests.get(
|
||||
"http://localhost:8080/api/v1/prediction",
|
||||
params=params,
|
||||
timeout=30
|
||||
)
|
||||
local_resp.raise_for_status()
|
||||
local_data = local_resp.json()
|
||||
print(f"[INFO] Custom profile test - Local API returned {len(local_data.get('prediction', [{}])[0].get('trajectory', []))} trajectory points")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"[ERROR] Custom profile test failed: {e}")
|
||||
return False
|
||||
|
||||
def test_all_profiles():
|
||||
"""Test all available profiles."""
|
||||
profiles = [
|
||||
("standard_profile", "Standard profile test"),
|
||||
("float_profile", "Float profile test"),
|
||||
("reverse_profile", "Reverse profile test"),
|
||||
("custom_profile", "Custom profile test")
|
||||
]
|
||||
|
||||
results = {}
|
||||
|
||||
for profile, description in profiles:
|
||||
print(f"\n[TEST] {description}...")
|
||||
|
||||
if profile == "custom_profile":
|
||||
success = test_custom_profile()
|
||||
else:
|
||||
success = test_single_profile(profile)
|
||||
|
||||
results[profile] = success
|
||||
print(f"[RESULT] {profile}: {'PASS' if success else 'FAIL'}")
|
||||
|
||||
# Print summary
|
||||
print("\n" + "="*50)
|
||||
print("TEST SUMMARY")
|
||||
print("="*50)
|
||||
for profile, success in results.items():
|
||||
status = "PASS" if success else "FAIL"
|
||||
print(f"{profile:20} : {status}")
|
||||
|
||||
total_tests = len(results)
|
||||
passed_tests = sum(results.values())
|
||||
print(f"\nTotal tests: {total_tests}, Passed: {passed_tests}, Failed: {total_tests - passed_tests}")
|
||||
|
||||
return all(results.values())
|
||||
|
||||
def test_single_profile(profile):
|
||||
"""Test a single profile against reference API."""
|
||||
# Test parameters
|
||||
params = {
|
||||
"launch_latitude": 56.6992,
|
||||
"launch_longitude": 38.8247,
|
||||
"launch_datetime": "2025-06-25T13:28:00Z",
|
||||
"launch_altitude": 0,
|
||||
"profile": profile,
|
||||
"ascent_rate": 5,
|
||||
"burst_altitude": 30000,
|
||||
"descent_rate": 5
|
||||
}
|
||||
# Add float altitude for float profile
|
||||
if profile == "float_profile":
|
||||
params["float_altitude"] = 25000
|
||||
try:
|
||||
# Test local API (use GET)
|
||||
local_resp = requests.get(
|
||||
"http://localhost:8080/api/v1/prediction",
|
||||
params=params,
|
||||
timeout=30
|
||||
)
|
||||
local_resp.raise_for_status()
|
||||
local_data = local_resp.json()
|
||||
print(f"[INFO] {profile} - Local API returned {len(local_data.get('prediction', [{}])[0].get('trajectory', []))} trajectory points")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"[ERROR] {profile} test failed: {e}")
|
||||
return False
|
||||
|
||||
def main():
|
||||
"""Main test function."""
|
||||
print("[INFO] Starting comprehensive predictor API tests...")
|
||||
|
||||
# Run the original standard profile test
|
||||
print("\n[TEST] Running original standard_profile test...")
|
||||
run_compose_up()
|
||||
wait_for_ready()
|
||||
ref = fetch_reference()
|
||||
local = fetch_local()
|
||||
|
||||
print("[INFO] Comparing results ...")
|
||||
original_success = compare_results(ref, local)
|
||||
|
||||
if original_success:
|
||||
print("[SUCCESS] Original standard_profile test passed!")
|
||||
else:
|
||||
print("[FAIL] Original standard_profile test failed!")
|
||||
|
||||
# Test all profiles
|
||||
print("\n[TEST] Running all profile tests...")
|
||||
all_profiles_success = test_all_profiles()
|
||||
|
||||
# Final result
|
||||
overall_success = original_success and all_profiles_success
|
||||
print(f"\n[FINAL RESULT] Overall: {'PASS' if overall_success else 'FAIL'}")
|
||||
|
||||
if overall_success:
|
||||
sys.exit(0)
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.intra.yksa.space/gsn/predictor/internal/pkg/grib"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
|
||||
cfg := &grib.Config{
|
||||
Dir: "C:/tmp/grib",
|
||||
TTL: 48 * time.Hour,
|
||||
CacheTTL: 1 * time.Hour,
|
||||
Parallel: 8,
|
||||
}
|
||||
|
||||
svc, err := grib.New(cfg)
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := svc.Update(ctx); err != nil {
|
||||
fmt.Printf("Update error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Test wind at lat=52.2, lon=0.1 at various altitudes
|
||||
// Run is 2026-02-12T12:00Z, request time 21:00Z = +9 hours
|
||||
ts := time.Date(2026, 2, 12, 21, 0, 0, 0, time.UTC)
|
||||
lat, lon := 52.2, 0.1
|
||||
|
||||
fmt.Println("Wind at (52.2°N, 0.1°E) at 2026-02-12T21:00Z:")
|
||||
fmt.Printf("%8s %8s %8s\n", "Alt(m)", "U(m/s)", "V(m/s)")
|
||||
|
||||
for _, alt := range []float64{0, 1000, 3000, 5000, 7000, 10000, 15000, 20000, 25000, 30000} {
|
||||
w, err := svc.Extract(ctx, lat, lon, alt, ts)
|
||||
if err != nil {
|
||||
fmt.Printf("%8.0f Error: %v\n", alt, err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("%8.0f %8.2f %8.2f\n", alt, w[0], w[1])
|
||||
}
|
||||
|
||||
// Also test at a few nearby points to check spatial consistency
|
||||
fmt.Println("\nWind at 10km altitude, varying longitude:")
|
||||
for _, testLon := range []float64{0.0, 0.25, 0.5, 1.0, 2.0, 5.0, 10.0, 350.0, 359.75} {
|
||||
w, _ := svc.Extract(ctx, lat, testLon, 10000, ts)
|
||||
fmt.Printf(" lon=%6.2f: U=%8.2f V=%8.2f\n", testLon, w[0], w[1])
|
||||
}
|
||||
}
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Start API with HTTP downloads instead of S3
|
||||
|
||||
export GSN_PREDICTOR_GRIB_USE_S3=false
|
||||
|
||||
echo "Starting API with HTTP downloads from NOMADS..."
|
||||
echo "USE_S3 = $GSN_PREDICTOR_GRIB_USE_S3"
|
||||
echo ""
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
go run ./cmd/api/main.go
|
||||
Loading…
Add table
Add a link
Reference in a new issue