feat: remove redis

This commit is contained in:
Anatoly Antonov 2025-10-20 16:31:45 +09:00
parent 7a9f81e527
commit a850615e1f
18 changed files with 170 additions and 1142 deletions

6
.gitignore vendored
View file

@ -55,4 +55,8 @@ Thumbs.db
# Leaflet WebUI
/leaflet_predictor
/leaflet_predictor/*
/leaflet_predictor/*
# Tawhiri
/tawhiri
/tawhiri/*

View file

@ -1,501 +0,0 @@
# Deployment Guide
This guide covers deploying the Predictor Service using Docker and Docker Compose.
## Prerequisites
- Docker Engine 20.10+
- Docker Compose 2.0+
- At least 2GB RAM available
- 10GB free disk space
## Quick Deployment
### 1. Clone and Setup
```bash
git clone <repository-url>
cd predictor
```
### 2. Validate Configuration
```bash
# Validate Docker configuration
./scripts/validate-docker.sh
```
### 3. Deploy
```bash
# Build and start services
make up-build
# Check status
make ps
# View logs
make logs
```
## Production Deployment
### Environment Configuration
1. **Copy environment template:**
```bash
cp cmd/api/.env cmd/api/.env.production
```
2. **Edit production environment:**
```bash
nano cmd/api/.env.production
```
3. **Key production settings:**
```bash
# Security
GSN_PREDICTOR_REDIS_PASSWORD=your_secure_password
# Performance
GSN_PREDICTOR_GRIB_PARALLEL=8
GSN_PREDICTOR_GRIB_CACHE_TTL=2h
# Monitoring
GSN_PREDICTOR_GRIB_UPDATER_INTERVAL=3h
```
### Production Docker Compose
Create `docker-compose.prod.yml`:
```yaml
version: '3.8'
services:
predictor:
build:
context: .
dockerfile: Dockerfile
container_name: predictor-prod
ports:
- "8080:8080"
env_file:
- cmd/api/.env.production
volumes:
- grib_data:/tmp/grib
depends_on:
redis:
condition: service_healthy
networks:
- predictor-network
restart: unless-stopped
deploy:
resources:
limits:
memory: 1G
cpus: '0.5'
reservations:
memory: 512M
cpus: '0.25'
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
redis:
image: redis:7.2-alpine
container_name: predictor-redis-prod
ports:
- "6379:6379"
volumes:
- redis_data:/data
networks:
- predictor-network
restart: unless-stopped
command: redis-server --appendonly yes --maxmemory 512mb --maxmemory-policy allkeys-lru --requirepass ${GSN_PREDICTOR_REDIS_PASSWORD}
healthcheck:
test: ["CMD", "redis-cli", "-a", "${GSN_PREDICTOR_REDIS_PASSWORD}", "ping"]
interval: 10s
timeout: 3s
retries: 5
start_period: 10s
volumes:
grib_data:
driver: local
redis_data:
driver: local
networks:
predictor-network:
driver: bridge
```
### Deploy to Production
```bash
# Deploy with production config
docker-compose -f docker-compose.prod.yml up -d
# Monitor deployment
docker-compose -f docker-compose.prod.yml logs -f
# Check health
curl http://localhost:8080/health
```
## Kubernetes Deployment
### Create Namespace
```yaml
# k8s/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: predictor
```
### Redis Deployment
```yaml
# k8s/redis.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: predictor
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7.2-alpine
ports:
- containerPort: 6379
command: ["redis-server", "--appendonly", "yes", "--maxmemory", "512mb", "--maxmemory-policy", "allkeys-lru"]
volumeMounts:
- name: redis-data
mountPath: /data
resources:
limits:
memory: "512Mi"
cpu: "250m"
requests:
memory: "256Mi"
cpu: "100m"
livenessProbe:
exec:
command: ["redis-cli", "ping"]
initialDelaySeconds: 10
periodSeconds: 10
readinessProbe:
exec:
command: ["redis-cli", "ping"]
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: redis-data
persistentVolumeClaim:
claimName: redis-pvc
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: predictor
spec:
selector:
app: redis
ports:
- port: 6379
targetPort: 6379
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-pvc
namespace: predictor
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
```
### Predictor Deployment
```yaml
# k8s/predictor.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: predictor
namespace: predictor
spec:
replicas: 2
selector:
matchLabels:
app: predictor
template:
metadata:
labels:
app: predictor
spec:
containers:
- name: predictor
image: predictor:latest
ports:
- containerPort: 8080
env:
- name: GSN_PREDICTOR_REDIS_HOST
value: "redis"
- name: GSN_PREDICTOR_REDIS_PORT
value: "6379"
- name: GSN_PREDICTOR_GRIB_DIR
value: "/tmp/grib"
- name: GSN_PREDICTOR_SCHEDULER_ENABLED
value: "true"
- name: GSN_PREDICTOR_GRIB_UPDATER_INTERVAL
value: "6h"
- name: GSN_PREDICTOR_GRIB_UPDATER_TIMEOUT
value: "45m"
volumeMounts:
- name: grib-data
mountPath: /tmp/grib
resources:
limits:
memory: "1Gi"
cpu: "500m"
requests:
memory: "512Mi"
cpu: "250m"
livenessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 40
periodSeconds: 30
readinessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 10
periodSeconds: 10
volumes:
- name: grib-data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: predictor
namespace: predictor
spec:
selector:
app: predictor
ports:
- port: 80
targetPort: 8080
type: LoadBalancer
```
### Deploy to Kubernetes
```bash
# Apply namespace
kubectl apply -f k8s/namespace.yaml
# Apply Redis
kubectl apply -f k8s/redis.yaml
# Wait for Redis to be ready
kubectl wait --for=condition=ready pod -l app=redis -n predictor
# Apply Predictor
kubectl apply -f k8s/predictor.yaml
# Check status
kubectl get pods -n predictor
kubectl get services -n predictor
```
## Monitoring and Logging
### Health Checks
The service includes built-in health checks:
```bash
# Application health
curl http://localhost:8080/health
# Docker health
docker inspect predictor | jq '.[0].State.Health'
# Kubernetes health
kubectl describe pod -l app=predictor -n predictor
```
### Logging
```bash
# Docker logs
docker-compose logs -f predictor
# Kubernetes logs
kubectl logs -f deployment/predictor -n predictor
```
### Metrics
Consider adding Prometheus metrics:
```yaml
# Add to docker-compose.yml
prometheus:
image: prom/prometheus:latest
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
networks:
- predictor-network
```
## Backup and Recovery
### Redis Backup
```bash
# Create backup
docker exec predictor-redis redis-cli BGSAVE
# Copy backup file
docker cp predictor-redis:/data/dump.rdb ./backup/redis-$(date +%Y%m%d).rdb
```
### GRIB Data Backup
```bash
# Backup GRIB data
docker run --rm -v predictor_grib_data:/data -v $(pwd)/backup:/backup alpine tar czf /backup/grib-$(date +%Y%m%d).tar.gz -C /data .
```
### Automated Backup Script
```bash
#!/bin/bash
# scripts/backup.sh
BACKUP_DIR="./backup/$(date +%Y%m%d)"
mkdir -p $BACKUP_DIR
# Redis backup
docker exec predictor-redis redis-cli BGSAVE
sleep 5
docker cp predictor-redis:/data/dump.rdb $BACKUP_DIR/redis.rdb
# GRIB data backup
docker run --rm -v predictor_grib_data:/data -v $(pwd)/$BACKUP_DIR:/backup alpine tar czf /backup/grib.tar.gz -C /data .
echo "Backup completed: $BACKUP_DIR"
```
## Troubleshooting
### Common Issues
1. **Redis Connection Issues:**
```bash
# Check Redis status
docker-compose exec redis redis-cli ping
# Check network connectivity
docker-compose exec predictor wget -O- http://redis:6379
```
2. **GRIB Download Failures:**
```bash
# Check disk space
docker-compose exec predictor df -h /tmp/grib
# Check internet connectivity
docker-compose exec predictor wget -O- https://nomads.ncep.noaa.gov/
```
3. **Memory Issues:**
```bash
# Check memory usage
docker stats
# Check container logs
docker-compose logs predictor | grep -i memory
```
### Performance Tuning
1. **Redis Optimization:**
```bash
# Increase Redis memory
GSN_PREDICTOR_REDIS_MAXMEMORY=1gb
# Optimize Redis settings
redis-server --maxmemory 1gb --maxmemory-policy allkeys-lru
```
2. **GRIB Processing:**
```bash
# Increase parallel workers
GSN_PREDICTOR_GRIB_PARALLEL=8
# Optimize cache TTL
GSN_PREDICTOR_GRIB_CACHE_TTL=2h
```
3. **Container Resources:**
```yaml
# In docker-compose.yml
deploy:
resources:
limits:
memory: 2G
cpus: '1.0'
reservations:
memory: 1G
cpus: '0.5'
```
## Security Considerations
1. **Network Security:**
- Use internal networks for service communication
- Expose only necessary ports
- Use reverse proxy for external access
2. **Container Security:**
- Run as non-root user
- Use minimal base images
- Regular security updates
3. **Data Security:**
- Encrypt sensitive environment variables
- Use secrets management for passwords
- Regular backups
4. **Access Control:**
- Implement API authentication
- Use HTTPS in production
- Monitor access logs
```

View file

@ -43,10 +43,6 @@ logs:
logs-predictor:
docker-compose -f $(COMPOSE_FILE) logs -f predictor
# View logs for Redis
.PHONY: logs-redis
logs-redis:
docker-compose -f $(COMPOSE_FILE) logs -f redis
# Check service status
.PHONY: ps
@ -58,11 +54,6 @@ ps:
exec:
docker-compose -f $(COMPOSE_FILE) exec predictor sh
# Execute command in Redis container
.PHONY: exec-redis
exec-redis:
docker-compose -f $(COMPOSE_FILE) exec redis sh
# Clean up Docker resources
.PHONY: clean
clean:
@ -79,7 +70,7 @@ test:
build-local:
go build -o predictor ./cmd/api
# Run locally (requires Redis)
# Run locally
.PHONY: run-local
run-local:
cd cmd/api && go run .
@ -106,14 +97,12 @@ help:
@echo " down-volumes - Stop services and remove volumes"
@echo " logs - View all logs"
@echo " logs-predictor - View predictor logs"
@echo " logs-redis - View Redis logs"
@echo " ps - Show service status"
@echo " exec - Execute shell in predictor container"
@echo " exec-redis - Execute shell in Redis container"
@echo " clean - Clean up Docker resources"
@echo " test - Run tests"
@echo " build-local - Build locally"
@echo " run-local - Run locally (requires Redis)"
@echo " run-local - Run locally"
@echo " fmt - Format code"
@echo " lint - Lint code"
@echo " help - Show this help"

261
README.md
View file

@ -1,261 +0,0 @@
# Predictor Service
A Go-based weather prediction service that downloads and processes GRIB files for wind vector data extraction.
## Features
- **GRIB File Processing**: Downloads and processes GRIB weather data files
- **Wind Vector Extraction**: Extracts wind vector data for given coordinates and time
- **Redis Caching**: Caches extraction results for improved performance
- **Distributed Locking**: Uses Redis-based distributed locks for safe concurrent downloads
- **Scheduled Updates**: Automatic GRIB file updates via configurable scheduler
- **REST API**: HTTP API for data extraction and service management
- **Modular Architecture**: Clean separation of concerns with dependency injection
## Architecture
The service follows a modular architecture with clear separation of concerns:
- **Service Layer**: Business logic and orchestration
- **GRIB Package**: GRIB file processing and data extraction
- **Redis Package**: Caching and distributed locking
- **Scheduler Package**: Job scheduling and execution
- **Transport Layer**: HTTP API handling
- **Jobs**: Background tasks (GRIB updates, etc.)
## Quick Start with Docker
### Prerequisites
- Docker
- Docker Compose
### Running the Service
1. **Clone the repository:**
```bash
git clone <repository-url>
cd predictor
```
2. **Start the services:**
```bash
# Production
docker-compose up -d
# Development (with volume mounts)
docker-compose -f docker-compose.dev.yml up -d
```
3. **Check service status:**
```bash
docker-compose ps
```
4. **View logs:**
```bash
# All services
docker-compose logs -f
# Specific service
docker-compose logs -f predictor
docker-compose logs -f redis
```
### Using Make Commands
The project includes a Makefile for common operations:
```bash
# Build and start services
make up-build
# View logs
make logs
# Stop services
make down
# Clean up everything
make clean
# Show all available commands
make help
```
## Configuration
The service is configured via environment variables with the prefix `GSN_PREDICTOR_`:
### GRIB Configuration
- `GSN_PREDICTOR_GRIB_DIR`: Directory for GRIB files (default: `/tmp/grib`)
- `GSN_PREDICTOR_GRIB_TTL`: GRIB file TTL (default: `24h`)
- `GSN_PREDICTOR_GRIB_CACHE_TTL`: Cache TTL (default: `1h`)
- `GSN_PREDICTOR_GRIB_PARALLEL`: Parallel download workers (default: `4`)
- `GSN_PREDICTOR_GRIB_TIMEOUT`: Download timeout (default: `30s`)
- `GSN_PREDICTOR_GRIB_DATASET_URL`: GRIB data source URL
### Redis Configuration
- `GSN_PREDICTOR_REDIS_HOST`: Redis host (default: `localhost`)
- `GSN_PREDICTOR_REDIS_PORT`: Redis port (default: `6379`)
- `GSN_PREDICTOR_REDIS_PASSWORD`: Redis password (default: empty)
- `GSN_PREDICTOR_REDIS_DB`: Redis database (default: `0`)
### Scheduler Configuration
- `GSN_PREDICTOR_SCHEDULER_ENABLED`: Enable scheduler (default: `true`)
### GRIB Updater Job Configuration
- `GSN_PREDICTOR_GRIB_UPDATER_INTERVAL`: Update interval (default: `6h`)
- `GSN_PREDICTOR_GRIB_UPDATER_TIMEOUT`: Update timeout (default: `45m`)
### REST Transport Configuration
- `GSN_PREDICTOR_REST_HOST`: HTTP host (default: `0.0.0.0`)
- `GSN_PREDICTOR_REST_PORT`: HTTP port (default: `8080`)
- `GSN_PREDICTOR_REST_READ_TIMEOUT`: Read timeout (default: `30s`)
- `GSN_PREDICTOR_REST_WRITE_TIMEOUT`: Write timeout (default: `30s`)
- `GSN_PREDICTOR_REST_IDLE_TIMEOUT`: Idle timeout (default: `60s`)
## API Endpoints
The service exposes a REST API for wind data extraction:
- `GET /health` - Health check endpoint
- `POST /predict` - Extract wind data for given coordinates and time
### Example API Usage
```bash
# Health check
curl http://localhost:8080/health
# Extract wind data
curl -X POST http://localhost:8080/predict \
-H "Content-Type: application/json" \
-d '{
"latitude": 40.7128,
"longitude": -74.0060,
"altitude": 100,
"timestamp": "2024-01-15T12:00:00Z"
}'
```
## Development
### Local Development
1. **Install dependencies:**
```bash
go mod download
```
2. **Start Redis:**
```bash
docker run -d --name redis -p 6379:6379 redis:7.2-alpine
```
3. **Set environment variables:**
```bash
cd cmd/api
source .env
```
4. **Run the service:**
```bash
go run .
```
### Building Locally
```bash
# Build the binary
make build-local
# Run tests
make test
# Format code
make fmt
# Lint code
make lint
```
### Docker Development
For development with hot reloading:
```bash
# Start development environment
docker-compose -f docker-compose.dev.yml up -d
# View logs
docker-compose -f docker-compose.dev.yml logs -f predictor
```
## Docker Best Practices
The Dockerfile follows Go best practices:
- **Multi-stage build**: Separate builder and runtime stages
- **Non-root user**: Runs as non-root user for security
- **Minimal runtime**: Uses Alpine Linux for smaller image size
- **Health checks**: Built-in health monitoring
- **Optimized layers**: Efficient layer caching
- **Security**: No unnecessary packages or permissions
## Monitoring and Health Checks
The service includes built-in health checks:
- **Application health**: HTTP endpoint at `/health`
- **Docker health**: Container health check with wget
- **Redis health**: Redis ping health check
- **Service dependencies**: Proper startup order with health checks
## Troubleshooting
### Common Issues
1. **Redis connection refused:**
- Ensure Redis is running: `docker-compose ps`
- Check Redis logs: `docker-compose logs redis`
- Verify network connectivity
2. **GRIB download failures:**
- Check internet connectivity
- Verify GRIB data source URL
- Check disk space in `/tmp/grib`
3. **Service not starting:**
- Check logs: `docker-compose logs predictor`
- Verify environment variables
- Check port conflicts
### Debug Commands
```bash
# Execute shell in container
docker-compose exec predictor sh
# Check Redis connectivity
docker-compose exec redis redis-cli ping
# View container resources
docker stats
# Check network connectivity
docker-compose exec predictor wget -O- http://redis:6379
```
## Contributing
1. Fork the repository
2. Create a feature branch
3. Make your changes
4. Add tests
5. Run tests and linting
6. Submit a pull request
## License
[Add your license information here]

View file

@ -14,7 +14,6 @@ import (
"git.intra.yksa.space/gsn/predictor/internal/service"
"git.intra.yksa.space/gsn/predictor/internal/transport/rest"
"git.intra.yksa.space/gsn/predictor/internal/transport/rest/handler"
"git.intra.yksa.space/gsn/predictor/pkg/redis"
"git.intra.yksa.space/gsn/predictor/pkg/scheduler"
env "github.com/caarlos0/env/v11"
"go.uber.org/zap"
@ -45,23 +44,10 @@ func main() {
log.Ctx(ctx).Fatal("failed to load GRIB updater configuration", zap.Error(err))
}
log.Ctx(ctx).Info("Connecting to Redis", zap.String("host", cfg.RedisHost), zap.Int("port", cfg.RedisPort))
redisService, err := redis.New(redis.Config{
Host: cfg.RedisHost,
Port: cfg.RedisPort,
Password: cfg.RedisPassword,
DB: cfg.RedisDB,
})
if err != nil {
log.Ctx(ctx).Fatal("failed to initialize Redis service", zap.Error(err), zap.String("host", cfg.RedisHost), zap.Int("port", cfg.RedisPort))
}
defer redisService.Close()
gribService, err := grib.New(grib.ServiceConfig{
Dir: cfg.GribDir,
TTL: cfg.GribTTL,
CacheTTL: cfg.GribCacheTTL,
Redis: redisService,
Parallel: cfg.GribParallel,
Client: cfg.CreateHTTPClient(),
DatasetURL: cfg.GribDatasetURL,
@ -81,7 +67,7 @@ func main() {
}
}()
svc, err := service.New(cfg, gribService, redisService)
svc, err := service.New(cfg, gribService)
if err != nil {
log.Ctx(ctx).Fatal("failed to initialize service", zap.Error(err))
}

View file

@ -1,78 +0,0 @@
version: '3.8'
services:
predictor:
build:
context: .
dockerfile: Dockerfile
container_name: predictor
ports:
- "8080:8080"
environment:
# --- GRIB Configuration ---
- GSN_PREDICTOR_GRIB_DIR=/tmp/grib
- GSN_PREDICTOR_GRIB_TTL=24h
- GSN_PREDICTOR_GRIB_CACHE_TTL=1h
- GSN_PREDICTOR_GRIB_PARALLEL=4
- GSN_PREDICTOR_GRIB_TIMEOUT=30s
- GSN_PREDICTOR_GRIB_DATASET_URL=https://nomads.ncep.noaa.gov/pub/data/nccf/com/gfs/prod
# --- Redis Configuration ---
- GSN_PREDICTOR_REDIS_HOST=redis
- GSN_PREDICTOR_REDIS_PORT=6379
- GSN_PREDICTOR_REDIS_PASSWORD=
- GSN_PREDICTOR_REDIS_DB=0
# --- Scheduler Configuration ---
- GSN_PREDICTOR_SCHEDULER_ENABLED=true
# --- GRIB Updater Job Configuration ---
- GSN_PREDICTOR_GRIB_UPDATER_INTERVAL=6h
- GSN_PREDICTOR_GRIB_UPDATER_TIMEOUT=45m
# --- REST Transport Configuration ---
- GSN_PREDICTOR_REST_HOST=0.0.0.0
- GSN_PREDICTOR_REST_PORT=8080
- GSN_PREDICTOR_REST_READ_TIMEOUT=30s
- GSN_PREDICTOR_REST_WRITE_TIMEOUT=30s
- GSN_PREDICTOR_REST_IDLE_TIMEOUT=60s
volumes:
- ./grib_data:/tmp/grib
depends_on:
redis:
condition: service_healthy
networks:
- predictor-network
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/ready"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
redis:
image: redis:7.2-alpine
container_name: predictor-redis
ports:
- "6379:6379"
volumes:
- redis_data:/data
networks:
- predictor-network
restart: unless-stopped
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 3s
retries: 5
start_period: 10s
command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru
volumes:
redis_data:
driver: local
networks:
predictor-network:
driver: bridge

37
go.mod
View file

@ -5,41 +5,38 @@ go 1.24.4
require (
github.com/caarlos0/env/v11 v11.3.1
github.com/edsrzf/mmap-go v1.2.0
github.com/go-co-op/gocron v1.37.0
github.com/go-faster/errors v0.7.1
github.com/go-faster/jx v1.1.0
github.com/nilsmagnus/grib v1.2.8
github.com/ogen-go/ogen v1.14.0
go.opentelemetry.io/otel v1.36.0
go.opentelemetry.io/otel/metric v1.36.0
go.opentelemetry.io/otel/trace v1.36.0
github.com/ogen-go/ogen v1.16.0
github.com/rs/cors v1.11.1
go.opentelemetry.io/otel v1.38.0
go.opentelemetry.io/otel/metric v1.38.0
go.opentelemetry.io/otel/trace v1.38.0
go.uber.org/zap v1.27.0
golang.org/x/sync v0.14.0
golang.org/x/sync v0.17.0
)
require (
github.com/bsm/redislock v0.9.4 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dlclark/regexp2 v1.11.5 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-co-op/gocron v1.37.0 // indirect
github.com/go-faster/yaml v0.4.6 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/redis/go-redis/v9 v9.10.0 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/rs/cors v1.11.1 // indirect
github.com/segmentio/asm v1.2.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
github.com/segmentio/asm v1.2.1 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/text v0.25.0 // indirect
golang.org/x/exp v0.0.0-20251017212417-90e834f514db // indirect
golang.org/x/net v0.46.0 // indirect
golang.org/x/sys v0.37.0 // indirect
golang.org/x/text v0.30.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

40
go.sum
View file

@ -1,15 +1,9 @@
github.com/bsm/redislock v0.9.4 h1:X/Wse1DPpiQgHbVYRE9zv6m070UcKoOGekgvpNhiSvw=
github.com/bsm/redislock v0.9.4/go.mod h1:Epf7AJLiSFwLCiZcfi6pWFO/8eAYrYpQXFxEDPoDeAk=
github.com/caarlos0/env/v11 v11.3.1 h1:cArPWC15hWmEt+gWk7YBi7lEXTXCvpaSdCiZE2X5mCA=
github.com/caarlos0/env/v11 v11.3.1/go.mod h1:qupehSf/Y0TUTsxKywqRt/vJjN5nz6vauiYEUUr8P4U=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
@ -29,6 +23,8 @@ github.com/go-faster/yaml v0.4.6/go.mod h1:390dRIvV4zbnO7qC9FGo6YYutc+wyyUSHBgbX
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
@ -47,6 +43,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
@ -54,21 +52,26 @@ github.com/nilsmagnus/grib v1.2.8 h1:H7ch/1/agaCqM3MC8hW1Ft+EJ+q2XB757uml/IfPvp4
github.com/nilsmagnus/grib v1.2.8/go.mod h1:XHm+5zuoOk0NSIWaGmA3JaAxI4i50YvD1L1vz+aqPOQ=
github.com/ogen-go/ogen v1.14.0 h1:TU1Nj4z9UBsAfTkf+IhuNNp7igdFQKqkk9+6/y4XuWg=
github.com/ogen-go/ogen v1.14.0/go.mod h1:Iw1vkqkx6SU7I9th5ceP+fVPJ6Wge4e3kAVzAxJEpPE=
github.com/ogen-go/ogen v1.16.0 h1:fKHEYokW/QrMzVNXId74/6RObRIUs9T2oroGKtR25Iw=
github.com/ogen-go/ogen v1.16.0/go.mod h1:s3nWiMzybSf8fhxckyO+wtto92+QHpEL8FmkPnhL3jI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/redis/go-redis/v9 v9.10.0 h1:FxwK3eV8p/CQa0Ch276C7u2d0eNC9kCmAYQ7mCXCzVs=
github.com/redis/go-redis/v9 v9.10.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0=
github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@ -78,16 +81,27 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@ -96,16 +110,26 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY=
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/exp v0.0.0-20251017212417-90e834f514db h1:by6IehL4BH5k3e3SJmcoNbOobMey2SLpAF79iPOEBvw=
golang.org/x/exp v0.0.0-20251017212417-90e834f514db/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=

View file

@ -91,10 +91,6 @@ var (
ErrConfig = New(http.StatusInternalServerError, "configuration error")
ErrConfigInvalidEnv = New(http.StatusInternalServerError, "invalid environment configuration")
ErrConfigMissingRequired = New(http.StatusInternalServerError, "missing required configuration")
ErrRedis = New(http.StatusInternalServerError, "redis error")
ErrRedisLockAlreadyLocked = New(http.StatusConflict, "could not perform redis lock", "already locked")
ErrRedisCacheMiss = New(http.StatusNotFound, "cache miss", "key not found")
ErrRedisCacheCorrupted = New(http.StatusInternalServerError, "cache data corrupted", "invalid format")
ErrDownload = New(http.StatusInternalServerError, "download error")
ErrProcessing = New(http.StatusInternalServerError, "data processing error")
ErrNoCubeFilesFound = New(http.StatusNotFound, "no cube files found")

View file

@ -8,7 +8,7 @@
- **Сборка 5D-куба** (время, давление, широта, долгота, переменные u/v)
- **Эффективное хранение** с использованием mmap
- **Интерполяция** ветровых данных для произвольных координат и времени
- **Кэширование** результатов (in-memory + Redis)
- **Кэширование** результатов (in-memory)
- **Распределенные блокировки** для предотвращения дублирования загрузок
## Архитектура
@ -38,7 +38,6 @@ cfg := grib.ServiceConfig{
Dir: "/tmp/grib",
TTL: 24 * time.Hour,
CacheTTL: 1 * time.Hour,
Redis: redisClient,
Parallel: 4,
Client: &http.Client{Timeout: 30 * time.Second},
}
@ -68,7 +67,6 @@ wind, err := service.Extract(ctx, lat, lon, alt, timestamp)
## Кэширование
- **In-memory кэш**: быстрый доступ к недавно запрошенным данным
- **Redis кэш**: распределенное кэширование для множественных реплик
## Расписание обновлений
@ -83,12 +81,11 @@ wind, err := service.Extract(ctx, lat, lon, alt, timestamp)
- **Высокая производительность** (mmap, конкурентные загрузки)
- **Эффективное использование памяти** (не загружает весь массив в RAM)
- **Горизонтальное масштабирование** (stateless, множество реплик)
- **Встроенное кэширование** (in-memory + Redis)
- **Встроенное кэширование** (in-memory)
### Особенности:
- Использует `github.com/nilsmagnus/grib` вместо pygrib
- Реализует собственную логику интерполяции
- Поддерживает распределенные блокировки через Redis
## Конфигурация
@ -99,6 +96,5 @@ wind, err := service.Extract(ctx, lat, lon, alt, timestamp)
- `Dir` - директория для хранения файлов
- `TTL` - время жизни данных (по умолчанию 24 часа)
- `CacheTTL` - время жизни кэша (по умолчанию 1 час)
- `Redis` - Redis клиент для блокировок и кэша
- `Parallel` - количество параллельных загрузок
- `Client` - HTTP клиент для загрузок

View file

@ -3,7 +3,6 @@ package grib
import (
"context"
"encoding/binary"
"fmt"
"math"
"net/http"
"os"
@ -17,12 +16,6 @@ import (
"github.com/nilsmagnus/grib/griblib"
)
type RedisIface interface {
Lock(ctx context.Context, key string, ttl time.Duration) (func(context.Context), error)
Set(key string, value []byte, ttl time.Duration) error
Get(key string) ([]byte, error)
}
type Service interface {
Update(ctx context.Context) error
Extract(ctx context.Context, lat, lon, alt float64, ts time.Time) ([2]float64, error)
@ -34,7 +27,6 @@ type ServiceConfig struct {
Dir string
TTL time.Duration
CacheTTL time.Duration
Redis RedisIface
Parallel int
Client *http.Client
DatasetURL string
@ -134,12 +126,6 @@ func (s *service) Update(ctx context.Context) error {
}
}
unlock, err := s.cfg.Redis.Lock(ctx, "grib-dl", 45*time.Minute)
if err != nil {
return err
}
defer unlock(ctx)
// Check again after acquiring lock (double-checked locking pattern)
if d := s.data.Load(); d != nil {
runTime := time.Unix(d.runUTC, 0)
@ -290,29 +276,6 @@ func (s *service) Extract(ctx context.Context, lat, lon, alt float64, ts time.Ti
return [2]float64(v), nil
}
// Try Redis cache
redisKey := fmt.Sprintf("grib:extract:%d", key)
if cached, err := s.cfg.Redis.Get(redisKey); err == nil {
var result [2]float64
if len(cached) == 16 {
result[0] = math.Float64frombits(binary.LittleEndian.Uint64(cached[:8]))
result[1] = math.Float64frombits(binary.LittleEndian.Uint64(cached[8:]))
s.cache.set(key, vec(result))
return result, nil
} else {
// Cache data is corrupted (wrong length)
return zero, errcodes.ErrRedisCacheCorrupted
}
} else {
// Check if it's a cache miss (expected error)
if errcode, ok := errcodes.AsErr(err); ok && errcode == errcodes.ErrRedisCacheMiss {
// Cache miss is expected, continue with calculation
} else {
// Unexpected error, return it
return zero, err
}
}
// Calculate result
td := ts.Sub(time.Unix(d.runUTC, 0)).Hours()
u, v := d.uv(lat, lon, alt, td)
@ -321,12 +284,6 @@ func (s *service) Extract(ctx context.Context, lat, lon, alt float64, ts time.Ti
// Cache in memory
s.cache.set(key, vec(out))
// Cache in Redis
encoded := make([]byte, 16)
binary.LittleEndian.PutUint64(encoded[:8], math.Float64bits(out[0]))
binary.LittleEndian.PutUint64(encoded[8:], math.Float64bits(out[1]))
s.cfg.Redis.Set(redisKey, encoded, s.cfg.CacheTTL)
return out, nil
}

View file

@ -13,12 +13,6 @@ type Config struct {
GribParallel int `env:"GSN_PREDICTOR_GRIB_PARALLEL" envDefault:"4"`
GribTimeout time.Duration `env:"GSN_PREDICTOR_GRIB_TIMEOUT" envDefault:"30s"`
GribDatasetURL string `env:"GSN_PREDICTOR_GRIB_DATASET_URL" envDefault:"https://nomads.ncep.noaa.gov/pub/data/nccf/com/gfs/prod"`
// --- Redis Configuration ---
RedisHost string `env:"GSN_PREDICTOR_REDIS_HOST"`
RedisPort int `env:"GSN_PREDICTOR_REDIS_PORT"`
RedisPassword string `env:"GSN_PREDICTOR_REDIS_PASSWORD"`
RedisDB int `env:"GSN_PREDICTOR_REDIS_DB"`
}
func (c *Config) CreateHTTPClient() *http.Client {

View file

@ -10,10 +10,3 @@ type Grib interface {
Extract(ctx context.Context, lat, lon, alt float64, ts time.Time) ([2]float64, error)
Close() error
}
type Redis interface {
Lock(ctx context.Context, key string, ttl time.Duration) (func(context.Context), error)
Set(key string, value []byte, ttl time.Duration) error
Get(key string) ([]byte, error)
Close() error
}

View file

@ -219,6 +219,31 @@ func (s *Service) customProfile(ctx context.Context, params ds.PredictionParamet
return results
}
func rk4Step(lat, lon, alt float64, t time.Time, dt float64, windFunc func(lat, lon, alt float64, t time.Time) (float64, float64), altRate float64) (float64, float64, float64) {
// Helper for RK4 integration step
toRad := math.Pi / 180.0
toDeg := 180.0 / math.Pi
R := func(alt float64) float64 { return 6371009.0 + alt }
f := func(lat, lon, alt float64, t time.Time) (float64, float64, float64) {
windU, windV := windFunc(lat, lon, alt, t)
Rnow := R(alt)
dlat := toDeg * windV / Rnow
dlon := toDeg * windU / (Rnow * math.Cos(lat*toRad))
return dlat, dlon, altRate
}
k1_lat, k1_lon, k1_alt := f(lat, lon, alt, t)
k2_lat, k2_lon, k2_alt := f(lat+0.5*k1_lat*dt, lon+0.5*k1_lon*dt, alt+0.5*k1_alt*dt, t.Add(time.Duration(0.5*dt)*time.Second))
k3_lat, k3_lon, k3_alt := f(lat+0.5*k2_lat*dt, lon+0.5*k2_lon*dt, alt+0.5*k2_alt*dt, t.Add(time.Duration(0.5*dt)*time.Second))
k4_lat, k4_lon, k4_alt := f(lat+k3_lat*dt, lon+k3_lon*dt, alt+k3_alt*dt, t.Add(time.Duration(dt)*time.Second))
latNew := lat + (dt/6.0)*(k1_lat+2*k2_lat+2*k3_lat+k4_lat)
lonNew := lon + (dt/6.0)*(k1_lon+2*k2_lon+2*k3_lon+k4_lon)
altNew := alt + (dt/6.0)*(k1_alt+2*k2_alt+2*k3_alt+k4_alt)
return latNew, lonNew, altNew
}
func (s *Service) simulateAscent(ctx context.Context, params ds.PredictionParameters, ascentRate, targetAltitude float64, customCurve *CustomCurve) []ds.PredicitonResult {
const dt = 10.0 // simulation step in seconds
const outputInterval = 60.0 // output every 60 seconds
@ -230,7 +255,6 @@ func (s *Service) simulateAscent(ctx context.Context, params ds.PredictionParame
results := make([]ds.PredicitonResult, 0, 1000)
// Always include the initial launch point
latCopy := lat
lonCopy := lon
altCopy := alt
@ -247,40 +271,39 @@ func (s *Service) simulateAscent(ctx context.Context, params ds.PredictionParame
WindV: &windV,
})
var nextOutputTime = timeCur.Add(time.Duration(outputInterval) * time.Second)
for alt < targetAltitude {
wind, err := s.ExtractWind(ctx, lat, lon, alt, timeCur)
nextOutputTime := timeCur.Add(time.Duration(outputInterval) * time.Second)
windFunc := func(lat, lon, alt float64, t time.Time) (float64, float64) {
w, err := s.ExtractWind(ctx, lat, lon, alt, t)
if err != nil {
log.Ctx(ctx).Warn("Wind extraction failed during ascent", zap.Error(err))
break
return 0, 0
}
return w[0], w[1]
}
for alt < targetAltitude {
altRate := ascentRate
if customCurve != nil {
altRate = s.getCustomAltitudeRate(customCurve, alt, ascentRate)
}
latDot := (wind[1] / 111320.0)
lonDot := (wind[0] / (40075000.0 * math.Cos(lat*math.Pi/180) / 360.0))
lat += latDot * dt
lon += lonDot * dt
alt += altRate * dt
latNew, lonNew, altNew := rk4Step(lat, lon, alt, timeCur, dt, windFunc, altRate)
timeCur = timeCur.Add(time.Duration(dt) * time.Second)
lat = latNew
lon = lonNew
alt = altNew
// Don't add a point if we've reached or exceeded target altitude
if alt >= targetAltitude {
break
}
if !timeCur.Before(nextOutputTime) {
wU, wV := windFunc(lat, lon, alt, timeCur)
latCopy := lat
lonCopy := lon
altCopy := alt
timeCopy := timeCur
windU := wind[0]
windV := wind[1]
windU := wU
windV := wV
results = append(results, ds.PredicitonResult{
Latitude: &latCopy,
Longitude: &lonCopy,
@ -307,7 +330,6 @@ func (s *Service) simulateDescent(ctx context.Context, params ds.PredictionParam
results := make([]ds.PredicitonResult, 0, 1000)
// Always include the initial descent point
latCopy := lat
lonCopy := lon
altCopy := alt
@ -324,40 +346,39 @@ func (s *Service) simulateDescent(ctx context.Context, params ds.PredictionParam
WindV: &windV,
})
var nextOutputTime = timeCur.Add(time.Duration(outputInterval) * time.Second)
for alt > targetAltitude {
wind, err := s.ExtractWind(ctx, lat, lon, alt, timeCur)
nextOutputTime := timeCur.Add(time.Duration(outputInterval) * time.Second)
windFunc := func(lat, lon, alt float64, t time.Time) (float64, float64) {
w, err := s.ExtractWind(ctx, lat, lon, alt, t)
if err != nil {
log.Ctx(ctx).Warn("Wind extraction failed during descent", zap.Error(err))
break
return 0, 0
}
return w[0], w[1]
}
for alt > targetAltitude {
altRate := -descentRate
if customCurve != nil {
altRate = -s.getCustomAltitudeRate(customCurve, alt, descentRate)
}
latDot := (wind[1] / 111320.0)
lonDot := (wind[0] / (40075000.0 * math.Cos(lat*math.Pi/180) / 360.0))
lat += latDot * dt
lon += lonDot * dt
alt += altRate * dt
latNew, lonNew, altNew := rk4Step(lat, lon, alt, timeCur, dt, windFunc, altRate)
timeCur = timeCur.Add(time.Duration(dt) * time.Second)
lat = latNew
lon = lonNew
alt = altNew
// Don't add a point if we've reached or gone below target altitude
if alt <= targetAltitude {
break
}
if !timeCur.Before(nextOutputTime) {
wU, wV := windFunc(lat, lon, alt, timeCur)
latCopy := lat
lonCopy := lon
altCopy := alt
timeCopy := timeCur
windU := wind[0]
windV := wind[1]
windU := wU
windV := wV
results = append(results, ds.PredicitonResult{
Latitude: &latCopy,
Longitude: &lonCopy,

View file

@ -8,16 +8,14 @@ import (
)
type Service struct {
cfg *Config
redis Redis
grib Grib
cfg *Config
grib Grib
}
func New(cfg *Config, gribService Grib, redisService Redis) (*Service, error) {
func New(cfg *Config, gribService Grib) (*Service, error) {
svc := &Service{
cfg: cfg,
redis: redisService,
grib: gribService,
cfg: cfg,
grib: gribService,
}
return svc, nil

View file

@ -1,21 +0,0 @@
package redis
import (
"context"
"time"
)
// Service defines the interface for Redis operations
type Service interface {
// Lock acquires a distributed lock
Lock(ctx context.Context, key string, ttl time.Duration) (func(context.Context), error)
// Set sets a key with value and TTL
Set(key string, value []byte, ttl time.Duration) error
// Get retrieves a value by key
Get(key string) ([]byte, error)
// Close closes the Redis connection
Close() error
}

View file

@ -1,89 +0,0 @@
package redis
import (
"context"
"fmt"
"time"
"git.intra.yksa.space/gsn/predictor/internal/pkg/errcodes"
"github.com/bsm/redislock"
"github.com/redis/go-redis/v9"
)
type Client struct {
client *redis.Client
locker *redislock.Client
}
// Ensure Client implements Service interface
var _ Service = (*Client)(nil)
type Config struct {
Host string `env:"HOST"`
Port int `env:"PORT"`
Password string `env:"PASSWORD"`
DB int `env:"DB"`
}
func New(cfg Config) (*Client, error) {
client := redis.NewClient(&redis.Options{
Addr: fmt.Sprintf("%s:%d", cfg.Host, cfg.Port),
Password: cfg.Password,
DB: cfg.DB,
})
// Test connection
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := client.Ping(ctx).Err(); err != nil {
return nil, fmt.Errorf("failed to connect to redis: %w", err)
}
locker := redislock.New(client)
return &Client{
client: client,
locker: locker,
}, nil
}
func (c *Client) Lock(ctx context.Context, key string, ttl time.Duration) (func(context.Context), error) {
lock, err := c.locker.Obtain(ctx, key, ttl, nil)
if err != nil {
if err == redislock.ErrNotObtained {
return nil, errcodes.ErrRedisLockAlreadyLocked
}
return nil, errcodes.Wrap(err, "failed to obtain redis lock")
}
unlock := func(ctx context.Context) {
lock.Release(ctx)
}
return unlock, nil
}
func (c *Client) Set(key string, value []byte, ttl time.Duration) error {
ctx := context.Background()
if err := c.client.Set(ctx, key, value, ttl).Err(); err != nil {
return errcodes.Wrap(err, "failed to set redis key")
}
return nil
}
func (c *Client) Get(key string) ([]byte, error) {
ctx := context.Background()
result := c.client.Get(ctx, key)
if result.Err() != nil {
if result.Err() == redis.Nil {
return nil, errcodes.ErrRedisCacheMiss
}
return nil, errcodes.Wrap(result.Err(), "failed to get redis key")
}
return result.Bytes()
}
func (c *Client) Close() error {
return c.client.Close()
}

View file

@ -6,18 +6,16 @@ import requests
import json
from typing import Any
import base64
import math
# --- Config ---
LOCAL_API_URL = "http://localhost:8080/api/v1/prediction"
REFERENCE_API_URL = (
"https://fly.stratonautica.ru/api/v2/?profile=standard_profile&pred_type=single"
"&launch_datetime=2025-06-25T13:28:00Z&launch_latitude=56.6992&launch_longitude=38.8247"
"&launch_altitude=0&ascent_rate=5&burst_altitude=30000&descent_rate=5"
)
REFERENCE_API_URL = "https://fly.stratonautica.ru/api/v2/?profile=standard_profile&pred_type=single&launch_datetime=2025-06-25T20%3A45%3A00Z&launch_latitude=56.6992&launch_longitude=38.8247&launch_altitude=0&ascent_rate=5&burst_altitude=30000&descent_rate=5"
LOCAL_API_URL = "http://localhost:8080/api/v1/prediction?profile=standard_profile&pred_type=single&launch_datetime=2025-06-25T20%3A45%3A00Z&launch_latitude=56.6992&launch_longitude=38.8247&launch_altitude=0&ascent_rate=5&burst_altitude=30000&descent_rate=5"
LOCAL_API_PAYLOAD = {
"launch_latitude": 56.6992,
"launch_longitude": 38.8247,
"launch_datetime": "2025-06-25T13:28:00Z",
"launch_datetime": "2025-06-25T20-45-000Z",
"launch_altitude": 0,
"profile": "standard_profile",
"ascent_rate": 5,
@ -68,18 +66,28 @@ def fetch_reference():
print(f"[INFO] Fetching reference prediction from {REFERENCE_API_URL}")
resp = requests.get(REFERENCE_API_URL, timeout=60)
if resp.status_code != 200:
print(f"[ERROR] Reference API returned {resp.status_code}")
print(f"[ERROR] Reference API returned {resp.status_code}: {resp.text}")
sys.exit(1)
return resp.json()
def fetch_local():
print(f"[INFO] Fetching local prediction from {LOCAL_API_URL}")
resp = requests.post(LOCAL_API_URL, json=LOCAL_API_PAYLOAD, timeout=120)
resp = requests.get(LOCAL_API_URL, timeout=60)
if resp.status_code != 200:
print(f"[ERROR] Local API returned {resp.status_code}: {resp.text}")
sys.exit(1)
return resp.json()
def haversine(lat1, lon1, lat2, lon2):
"""Calculate the great-circle distance between two points on the Earth (specified in decimal degrees). Returns distance in kilometers."""
R = 6371.0 # Earth radius in kilometers
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
dlat = lat2 - lat1
dlon = lon2 - lon1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return R * c
def compare_results(reference_data, local_data):
"""Compare prediction results between reference and local APIs."""
print("[INFO] Comparing results ...")
@ -116,38 +124,58 @@ def compare_results(reference_data, local_data):
print(f"[DIFF] Trajectory length mismatch: {len(local_trajectory)} vs {len(ref_trajectory)}")
return False
# Compare trajectory points
# Compare trajectory points and calculate drift
min_len = min(len(ref_trajectory), len(local_trajectory))
max_drift = 0.0
max_drift_idx = -1
drift_list = []
print("\n[DRIFT] Trajectory point-by-point distance (km):")
for i in range(min_len):
ref_point = ref_trajectory[i]
local_point = local_trajectory[i]
ref_lat = ref_point.get('latitude')
ref_lon = ref_point.get('longitude')
local_lat = local_point.get('latitude')
local_lon = local_point.get('longitude')
drift_km = None
if None not in (ref_lat, ref_lon, local_lat, local_lon):
drift_km = haversine(ref_lat, ref_lon, local_lat, local_lon)
drift_list.append(drift_km)
if drift_km > max_drift:
max_drift = drift_km
max_drift_idx = i
print(f" [{i}] Drift: {drift_km:.3f} km")
else:
print(f" [{i}] Drift: N/A (missing lat/lon)")
if drift_list:
mean_drift = sum(drift_list) / len(drift_list)
print(f"\n[DRIFT] Max drift: {max_drift:.3f} km at idx {max_drift_idx}")
print(f"[DRIFT] Mean drift: {mean_drift:.3f} km over {len(drift_list)} points")
else:
print("[DRIFT] No valid drift data to report.")
# Continue with original comparison for altitude, etc.
for i in range(min_len):
ref_point = ref_trajectory[i]
local_point = local_trajectory[i]
# Compare key fields
for key in ['altitude', 'latitude', 'longitude']:
ref_val = ref_point.get(key)
local_val = local_point.get(key)
if ref_val is not None and local_val is not None:
# Use relative tolerance for floating point comparison
if abs(ref_val - local_val) > 0.1: # 0.1 degree/meter tolerance
if abs(ref_val - local_val) > 0.1:
print(f"[DIFF] At idx {i}, key {key}: {local_val} != {ref_val}")
return False
print("[SUCCESS] Results match!")
return True
def test_custom_profile():
"""Test custom profile with base64 encoded curve."""
print("\n[TEST] Testing custom_profile...")
# Create a simple custom ascent curve (altitude vs time in seconds)
curve_data = {
"altitude": [0, 30000],
"time": [0, 6000]
}
curve_b64 = base64.b64encode(json.dumps(curve_data).encode()).decode()
# Test parameters for custom profile
params = {
"launch_latitude": 56.6992,
@ -157,17 +185,15 @@ def test_custom_profile():
"profile": "custom_profile",
"ascent_curve": curve_b64
}
try:
# Test local API
local_resp = requests.post(
# Test local API (use GET)
local_resp = requests.get(
"http://localhost:8080/api/v1/prediction",
json=params,
params=params,
timeout=30
)
local_resp.raise_for_status()
local_data = local_resp.json()
print(f"[INFO] Custom profile test - Local API returned {len(local_data.get('prediction', [{}])[0].get('trajectory', []))} trajectory points")
return True
except Exception as e:
@ -223,21 +249,18 @@ def test_single_profile(profile):
"burst_altitude": 30000,
"descent_rate": 5
}
# Add float altitude for float profile
if profile == "float_profile":
params["float_altitude"] = 25000
try:
# Test local API
local_resp = requests.post(
# Test local API (use GET)
local_resp = requests.get(
"http://localhost:8080/api/v1/prediction",
json=params,
params=params,
timeout=30
)
local_resp.raise_for_status()
local_data = local_resp.json()
print(f"[INFO] {profile} - Local API returned {len(local_data.get('prediction', [{}])[0].get('trajectory', []))} trajectory points")
return True
except Exception as e: