Compare commits

..

2 commits

30 changed files with 421 additions and 1197 deletions

View file

@ -1,36 +0,0 @@
# Build stage
FROM golang:1.24-alpine AS builder
WORKDIR /app
# Copy go mod files
COPY go.mod go.sum ./
RUN go mod download
# Copy source code
COPY . .
# Build server
RUN CGO_ENABLED=0 go build -o hash-of-wisdom ./cmd/server
# Runtime stage
FROM alpine:3.19
# Create non-root user
RUN addgroup -g 1001 -S hash-of-wisdom && \
adduser -u 1001 -S hash-of-wisdom -G hash-of-wisdom
WORKDIR /app
# Copy binary and config from builder stage with correct ownership
COPY --from=builder --chown=hash-of-wisdom:hash-of-wisdom /app/hash-of-wisdom .
COPY --from=builder --chown=hash-of-wisdom:hash-of-wisdom /app/config.yaml .
# Switch to non-root user
USER hash-of-wisdom
# Expose ports
EXPOSE 8080 8081
# Run server with config file
CMD ["./hash-of-wisdom", "-config", "config.yaml"]

View file

@ -1,86 +0,0 @@
# Hash of Wisdom
A TCP server implementing the "Word of Wisdom" concept with proof-of-work challenges to protect against DDoS attacks.
## Overview
The Hash of Wisdom server requires clients to solve computational puzzles (proof-of-work) before receiving wise quotes. This approach prevents spam and DDoS attacks by requiring clients to invest CPU time for each request.
## Quick Start
### Prerequisites
- Go 1.24.3+
- Docker (optional)
- [Task](https://taskfile.dev/) (optional, but recommended)
### Building
```bash
# Build server
go build -o hash-of-wisdom ./cmd/server
# Build client
go build -o client ./cmd/client
```
### Running
#### Using Task (Recommended)
```bash
# Most useful command - run all checks
task check
# Start server
task server -- -config config.yaml
# Connect client
task client -- --addr=localhost:8080
# Run tests
task test
# See coverage
task test-coverage
# See all available commands
task --list
```
#### Manual Commands
```bash
# Start server (uses config.yaml by default)
./hash-of-wisdom
# Or with custom config
./hash-of-wisdom -config /path/to/config.yaml
# Connect with client
./client -addr localhost:8080
# Run tests
go test ./...
```
### Using Docker
```bash
# Build image
docker build -t hash-of-wisdom .
# Run container
docker run -p 8080:8080 -p 8081:8081 hash-of-wisdom
```
### Monitoring
- Metrics: http://localhost:8081/metrics (Prometheus format with Go runtime stats)
- Profiling: http://localhost:8081/debug/pprof/
## Documentation
### Protocol & Implementation
- [Protocol Specification](docs/PROTOCOL.md) - Binary protocol definition
- [PoW Algorithm Analysis](docs/POW_ANALYSIS.md) - Algorithm selection rationale and comparison
- [Implementation Plan](docs/IMPLEMENTATION.md) - Development phases and progress
- [Package Structure](docs/PACKAGES.md) - Code organization and package responsibilities
- [Architecture Choices](docs/ARCHITECTURE.md) - Design decisions and patterns
### Production Readiness
- [Production Readiness Guide](docs/PRODUCTION_READINESS.md) - Requirements for production deployment

View file

@ -93,41 +93,3 @@ tasks:
desc: Alias for cpu-burner desc: Alias for cpu-burner
cmds: cmds:
- task: cpu-burner - task: cpu-burner
server:
desc: Build and run the server
cmds:
- go build -o hash-of-wisdom ./cmd/server
- ./hash-of-wisdom {{.CLI_ARGS}}
server-config:
desc: Build and run the server with custom config
cmds:
- go build -o hash-of-wisdom ./cmd/server
- ./hash-of-wisdom -config {{.CONFIG | default "config.yaml"}}
client:
desc: Build and run the client
cmds:
- go build -o client ./cmd/client
- ./client {{.CLI_ARGS | default "-addr localhost:8080"}}
docker-build:
desc: Build Docker image
cmds:
- docker build -t hash-of-wisdom .
docker-run:
desc: Run Docker container
cmds:
- docker run -p 8080:8080 -p 8081:8081 hash-of-wisdom
metrics:
desc: Check metrics endpoint
cmds:
- curl -s http://localhost:8081/metrics
integration:
desc: Run integration tests only
cmds:
- go test -v ./test/integration/...

View file

@ -2,47 +2,32 @@ package main
import ( import (
"context" "context"
"flag"
"log/slog" "log/slog"
"net/http"
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
"time"
"hash-of-wisdom/internal/config"
"hash-of-wisdom/internal/lib/sl" "hash-of-wisdom/internal/lib/sl"
"hash-of-wisdom/internal/pow/challenge" "hash-of-wisdom/internal/pow/challenge"
"hash-of-wisdom/internal/quotes" "hash-of-wisdom/internal/quotes"
"hash-of-wisdom/internal/server" "hash-of-wisdom/internal/server"
"hash-of-wisdom/internal/service" "hash-of-wisdom/internal/service"
"github.com/prometheus/client_golang/prometheus/promhttp"
_ "net/http/pprof"
) )
func main() { func main() {
configPath := flag.String("config", "", "path to configuration file") addr := ":8080"
flag.Parse() if len(os.Args) > 1 {
addr = os.Args[1]
// Load configuration
cfg, err := config.Load(*configPath)
if err != nil {
slog.Error("failed to load config", sl.Err(err))
os.Exit(1)
} }
logger := slog.Default() logger := slog.Default()
logger.Info("starting word of wisdom server", "address", cfg.Server.Address) logger.Info("starting word of wisdom server", "address", addr)
// Create components using config // Create components
challengeConfig, err := challenge.NewConfig( challengeConfig, err := challenge.NewConfig()
challenge.WithDefaultDifficulty(cfg.PoW.Difficulty),
challenge.WithMaxDifficulty(cfg.PoW.MaxDifficulty),
challenge.WithChallengeTTL(cfg.PoW.TTL),
challenge.WithHMACSecret([]byte(cfg.PoW.HMACSecret)),
)
if err != nil { if err != nil {
logger.Error("failed to create challenge config", sl.Err(err)) logger.Error("failed to create config", sl.Err(err))
os.Exit(1) os.Exit(1)
} }
generator := challenge.NewGenerator(challengeConfig) generator := challenge.NewGenerator(challengeConfig)
@ -54,51 +39,35 @@ func main() {
wisdomService := service.NewWisdomService(genAdapter, verifier, quoteService) wisdomService := service.NewWisdomService(genAdapter, verifier, quoteService)
// Create server configuration // Create server configuration
serverConfig := &server.Config{ serverConfig := server.DefaultConfig()
Address: cfg.Server.Address, serverConfig.Address = addr
Timeouts: server.TimeoutConfig{
Read: cfg.Server.Timeouts.Read,
Write: cfg.Server.Timeouts.Write,
Connection: cfg.Server.Timeouts.Connection,
},
}
// Go runtime metrics are automatically registered by default registry
// Start metrics and pprof HTTP server
go func() {
http.Handle("/metrics", promhttp.Handler())
logger.Info("starting metrics server", "address", cfg.Metrics.Address)
if err := http.ListenAndServe(cfg.Metrics.Address, nil); err != nil {
logger.Error("metrics server failed", sl.Err(err))
}
}()
// Create context that cancels on interrupt signals
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer cancel()
// Create server // Create server
srv := server.NewTCPServer(wisdomService, serverConfig, srv := server.NewTCPServer(wisdomService,
server.WithConfig(serverConfig),
server.WithLogger(logger)) server.WithLogger(logger))
// Start server // Start server
ctx := context.Background()
if err := srv.Start(ctx); err != nil { if err := srv.Start(ctx); err != nil {
logger.Error("failed to start server", sl.Err(err)) logger.Error("failed to start server", sl.Err(err))
os.Exit(1) os.Exit(1)
} }
logger.Info("server ready - press ctrl+c to stop") // Wait for interrupt
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
// Wait for context cancellation (signal received) logger.Info("server ready - press ctrl+c to stop")
<-ctx.Done() <-sigChan
// Graceful shutdown // Graceful shutdown
logger.Info("shutting down server") logger.Info("shutting down server")
if err := srv.Stop(); err != nil { if err := srv.Stop(); err != nil {
logger.Error("error during shutdown", sl.Err(err)) logger.Error("error during shutdown", sl.Err(err))
} else {
logger.Info("server stopped gracefully")
} }
// Give connections time to close
time.Sleep(100 * time.Millisecond)
logger.Info("server stopped")
} }

View file

@ -1,22 +0,0 @@
server:
address: ":8080"
timeouts:
read: 5s
write: 5s
connection: 15s
pow:
difficulty: 25
max_difficulty: 30
ttl: 5m
hmac_secret: "development-secret-change-in-production"
quotes:
timeout: 10s
metrics:
address: ":8081"
logging:
level: "info"
format: "text"

View file

@ -1,330 +0,0 @@
# Architecture Choices
This document explains the key architectural decisions made in the Hash of Wisdom project and the reasoning behind them.
## Overall Architecture
### Clean Architecture
We follow Clean Architecture principles with clear layer separation:
```
┌─────────────────────────────────────┐
│ Infrastructure Layer │ ← cmd/, internal/server, internal/protocol
├─────────────────────────────────────┤
│ Application Layer │ ← internal/application (message handling)
├─────────────────────────────────────┤
│ Domain Layer │ ← internal/service, internal/pow (business logic)
├─────────────────────────────────────┤
│ External Layer │ ← internal/quotes (external APIs)
└─────────────────────────────────────┘
```
**Benefits**:
- **Testability**: Each layer can be unit tested independently
- **Maintainability**: Changes in one layer don't cascade
- **Flexibility**: Easy to swap implementations (e.g., different quote sources)
- **Domain Focus**: Core business rules are isolated and protected
## Protocol Design
### Binary Protocol with JSON Payloads
Choice: Custom binary protocol with JSON-encoded message bodies
**Why Binary Protocol**:
- **Performance**: Efficient framing and length prefixes
- **Reliability**: Clear message boundaries prevent parsing issues
- **Extensibility**: Easy to add message types and versions
**Why JSON Payloads**:
- **Simplicity**: Standard library support, easy debugging
- **Flexibility**: Schema evolution without breaking compatibility
- **Tooling**: Excellent tooling and human readability
**Alternative Considered**: Pure binary (Protocol Buffers)
- **Rejected Because**: Added complexity without significant benefit for our use case
- **Trade-off**: Slightly larger payload size for much simpler implementation
### Stateless Challenge Design
Choice: HMAC-signed challenges with all state embedded
```go
type Challenge struct {
Target string `json:"target"` // "quotes"
Timestamp int64 `json:"timestamp"` // Unix timestamp
Difficulty int `json:"difficulty"` // Leading zero bits
Random string `json:"random"` // Entropy
Signature string `json:"signature"` // HMAC-SHA256
}
```
**Benefits**:
- **Scalability**: No server-side session storage required
- **Reliability**: Challenges survive server restarts
- **Security**: HMAC prevents tampering and replay attacks
- **Simplicity**: No cache management or cleanup needed
**Alternative Considered**: Session-based challenges
- **Rejected Because**: Requires distributed session management for horizontal scaling
## Proof-of-Work Algorithm
### SHA-256 with Leading Zero Bits
Choice: SHA-256 hashing with difficulty measured as leading zero bits
**Why SHA-256**:
- **Security**: Cryptographically secure, extensively tested
- **Performance**: Hardware-optimized on most platforms
- **Standardization**: Well-known algorithm with predictable properties
**Why Leading Zero Bits**:
- **Linear Scaling**: Each bit doubles the difficulty (2^n complexity)
- **Simplicity**: Easy to verify and understand
- **Flexibility**: Fine-grained difficulty adjustment
**Alternative Considered**: Scrypt/Argon2 (memory-hard functions)
- **Rejected Because**: Excessive complexity for DDoS protection use case
- **Trade-off**: ASIC resistance not needed for temporary challenges
### Difficulty Range: 4-30 Bits
Choice: Configurable difficulty with reasonable bounds
- **Minimum (4 bits)**: ~16 attempts average, sub-second solve time
- **Maximum (30 bits)**: ~1 billion attempts, several seconds on modern CPU
- **Default (4 bits)**: Balance between protection and user experience
## Server Architecture
### TCP Server with Per-Connection Goroutines
Choice: Custom TCP server with one goroutine per connection
```go
func (s *TCPServer) Start(ctx context.Context) error {
// Start listener
listener, err := net.Listen("tcp", s.config.Address)
if err != nil {
return err
}
// Start accept loop in goroutine
go s.acceptLoop(ctx)
return nil // Returns immediately
}
func (s *TCPServer) acceptLoop(ctx context.Context) {
for {
conn, err := s.listener.Accept()
if err != nil || ctx.Done() != nil {
return
}
// Launch handler in goroutine with WaitGroup tracking
s.wg.Add(1)
go func() {
defer s.wg.Done()
s.handleConnection(ctx, conn)
}()
}
}
```
**Benefits**:
- **Concurrency**: Each connection handled in separate goroutine
- **Non-blocking Start**: Server starts in background, returns immediately
- **Graceful Shutdown**: WaitGroup ensures all connections finish before stop
- **Context Cancellation**: Proper cleanup when context is cancelled
- **Resource Control**: Connection timeouts prevent resource exhaustion
**Alternative Considered**: HTTP/REST API
- **Rejected Because**: Test task requirements
### Connection Security: Multi-Level Timeouts
Choice: Layered timeout protection against various attacks
1. **Connection Timeout (15s)**: Maximum total connection lifetime
2. **Read Timeout (5s)**: Maximum time between incoming bytes
3. **Write Timeout (5s)**: Maximum time to send response
**Protects Against**:
- **Slowloris**: Slow read timeout prevents slow header attacks
- **Slow POST**: Connection timeout limits total request time
- **Resource Exhaustion**: Automatic cleanup of stale connections
## Configuration Management
### cleanenv with YAML + Environment Variables
Choice: File-based configuration with environment variable overrides
```yaml
# config.yaml
server:
address: ":8080"
pow:
difficulty: 4
```
```bash
# Environment override
export POW_DIFFICULTY=8
```
**Benefits**:
- **Development**: Easy configuration files for local development
- **Production**: Environment variables for containerized deployments
- **Validation**: Built-in validation and type conversion
- **Documentation**: Self-documenting with struct tags
**Alternative Considered**: Pure environment variables
- **Rejected Because**: Harder to manage complex configurations
## Observability Architecture
### Prometheus Metrics
Choice: Prometheus format metrics with essential measurements
**Application Metrics**:
- `wisdom_requests_total` - All incoming requests
- `wisdom_request_errors_total{error_type}` - Errors by type
- `wisdom_request_duration_seconds` - Request processing time
- `wisdom_quotes_served_total` - Successfully served quotes
**Go Runtime Metrics** (automatically exported):
- `go_memstats_*` - Memory allocation and GC statistics
- `go_goroutines` - Current number of goroutines
- `go_gc_duration_seconds` - Garbage collection duration
- `process_*` - Process-level CPU, memory, and file descriptor stats
**Design Principle**: Simple metrics that provide actionable insights
- **Avoided**: Complex multi-dimensional metrics
- **Focus**: Essential health and performance indicators
- **Runtime Visibility**: Go collector provides deep runtime observability
### Metrics at Infrastructure Layer
Choice: Collect metrics in TCP server, not business logic
```go
// In TCP server (infrastructure)
metrics.RequestsTotal.Inc()
start := time.Now()
response, err := s.wisdomApplication.HandleMessage(ctx, msg)
metrics.RequestDuration.Observe(time.Since(start).Seconds())
```
**Benefits**:
- **Separation of Concerns**: Business logic stays pure
- **Consistency**: All requests measured the same way
- **Performance**: Minimal overhead in critical path
## Design Patterns
### Dependency Injection
All major components use constructor injection:
```go
server := server.NewTCPServer(wisdomApplication, config, options...)
service := service.NewWisdomService(generator, verifier, quoteService)
```
**Benefits**:
- **Testing**: Easy to inject mocks and stubs
- **Configuration**: Runtime assembly of components
- **Decoupling**: Components don't know about concrete implementations
### Interface Segregation
Small, focused interfaces for easy testing:
```go
type ChallengeGenerator interface {
GenerateChallenge(ctx context.Context) (*Challenge, error)
}
type QuoteService interface {
GetQuote(ctx context.Context) (string, error)
}
```
### Functional Options
Flexible configuration with sensible defaults:
```go
server := NewTCPServer(application, config,
WithLogger(logger),
)
```
### Clean Architecture Implementation
See the layer diagram in the Overall Architecture section above for package organization.
## Testing Architecture
### Layered Testing Strategy
1. **Unit Tests**: Each package tested independently with mocks
2. **Integration Tests**: End-to-end tests with real TCP connections
3. **Benchmark Tests**: Performance validation for PoW algorithms
```go
// Unit test with mocks
func TestWisdomService_HandleMessage(t *testing.T) {
mockGenerator := &MockGenerator{}
mockVerifier := &MockVerifier{}
mockQuotes := &MockQuoteService{}
service := NewWisdomService(mockGenerator, mockVerifier, mockQuotes)
// Test business logic in isolation
}
// Integration test with real components
func TestTCPServer_SlowlorisProtection(t *testing.T) {
// Start real server, make slow connection
// Verify server doesn't hang
}
```
## Security Architecture
### Defense in Depth
Multiple security layers working together:
1. **HMAC Authentication**: Prevents challenge tampering
2. **Timestamp Validation**: Prevents replay attacks (5-minute TTL)
3. **Connection Timeouts**: Prevents resource exhaustion
4. **Proof-of-Work**: Rate limiting through computational cost
5. **Input Validation**: All protocol messages validated
### Threat Model
**Primary Threats Addressed**:
- **DDoS Attacks**: PoW makes attacks expensive
- **Resource Exhaustion**: Connection timeouts and limits
- **Protocol Attacks**: Binary framing prevents confusion
- **Replay Attacks**: Timestamp validation in challenges
**Threats NOT Addressed** (by design):
- **Authentication**: Public service, no user accounts
- **Authorization**: All valid solutions get quotes
- **Data Confidentiality**: Quotes are public information
## Trade-offs Made
### Simplicity vs Performance
- **Chose**: Simple JSON payloads over binary serialization
- **Trade-off**: ~30% larger messages for easier debugging and maintenance
### Memory vs CPU
- **Chose**: Stateless challenges requiring CPU verification
- **Trade-off**: More CPU per request for better scalability
### Flexibility vs Optimization
- **Chose**: Interface-based design with dependency injection
- **Trade-off**: Small runtime overhead for much better testability
### Features vs Complexity
- **Chose**: Essential features only (no rate limiting, user accounts, etc.)
- **Benefit**: Clean, focused implementation that does one thing well
## Future Architecture Considerations
For production scaling, consider:
1. **Quote Service Enhancement**: Caching, fallback quotes, multiple API sources
2. **Load Balancing**: Multiple server instances behind load balancer
3. **Rate Limiting**: Per-IP request limiting for additional protection
4. **Monitoring**: Full observability stack (Prometheus, Grafana, alerting)
5. **Security**: TLS encryption for sensitive deployments
The current architecture provides a solid foundation for these enhancements while maintaining simplicity and focus.

View file

@ -102,19 +102,90 @@
- [X] Verify server successfully handles slow writer attacks - [X] Verify server successfully handles slow writer attacks
- [X] Test end-to-end client-server communication flow - [X] Test end-to-end client-server communication flow
## Phase 8: Server Instrumentation & Configuration ## Phase 8: Basic Server Architecture
- [X] Add `/metrics` HTTP endpoint for Prometheus collection - [ ] Set up metrics collection (prometheus)
- [X] Add `/debug/pprof` endpoint for performance profiling - [ ] Create configuration management
- [X] Create Dockerfile to build server image - [ ] Integrate all components into server architecture
- [X] Implement configuration management using cleanenv library
- [X] Read configuration from file with environment variable support
## Phase 9: Documentation ## Phase 9: Advanced Server Features
- [X] Create comprehensive README.md with project overview and quick start - [ ] Add connection pooling and advanced connection management
- [X] Document package structure and responsibilities - [ ] Implement graceful shutdown mechanism
- [X] Document architecture choices and design decisions - [ ] Add health check endpoints
- [X] Update production readiness assessment - [ ] Add request/response logging middleware
- [ ] Create health check endpoints
- [ ] Write integration tests for server core
## Phase 10: DDOS Protection & Rate Limiting
- [ ] Implement IP-based connection limiting
- [ ] Create rate limiting service with time windows
- [ ] Add automatic difficulty adjustment based on load
- [ ] Implement temporary IP blacklisting
- [ ] Create circuit breaker for overload protection
- [ ] Add monitoring for attack detection
- [ ] Write tests for protection mechanisms
## Phase 11: Observability & Monitoring
- [ ] Add structured logging throughout application
- [ ] Implement metrics for key performance indicators:
- [ ] Active connections count
- [ ] Challenge generation rate
- [ ] Solution verification rate
- [ ] Success/failure ratios
- [ ] Response time histograms
- [ ] Create logging middleware for request tracing
- [ ] Add error categorization and reporting
- [ ] Implement health check endpoints
## Phase 12: Configuration & Environment Setup
- [ ] Create configuration structure with validation
- [ ] Support environment variables and config files
- [ ] Add configuration for different environments (dev/prod)
- [ ] Implement feature flags for protection levels
- [ ] Create deployment configuration templates
- [ ] Add configuration validation and defaults
## Phase 13: Docker & Deployment
- [ ] Create multi-stage Dockerfile for server
- [ ] Create Dockerfile for client
- [ ] Create docker-compose.yml for local development
- [ ] Add docker-compose for production deployment
- [ ] Create health check scripts for containers
- [ ] Add environment-specific configurations
- [ ] Create deployment documentation
## Phase 14: Testing & Quality Assurance
- [ ] Write comprehensive unit tests (>80% coverage):
- [ ] PoW algorithm tests
- [ ] Protocol handler tests
- [ ] Rate limiting tests
- [ ] Quote service tests
- [ ] Configuration tests
- [ ] Create integration tests:
- [ ] End-to-end client-server communication
- [ ] Load testing scenarios
- [ ] Failure recovery tests
- [ ] DDOS protection validation
- [ ] Add benchmark tests for performance validation
- [ ] Create stress testing scenarios
## Phase 15: Documentation & Final Polish
- [ ] Write comprehensive README with setup instructions
- [ ] Create API documentation for all interfaces
- [ ] Add inline code documentation
- [ ] Create deployment guide
- [ ] Write troubleshooting guide
- [ ] Add performance tuning recommendations
- [ ] Create monitoring and alerting guide
## Phase 16: Production Readiness Checklist
- [ ] Security audit of all components
- [ ] Performance benchmarking and optimization
- [ ] Memory leak detection and prevention
- [ ] Resource cleanup validation
- [ ] Error handling coverage review
- [ ] Logging security (no sensitive data exposure)
- [ ] Configuration security (secrets management)
- [ ] Container security hardening
## Directory Structure ## Directory Structure
``` ```
@ -137,3 +208,13 @@
├── deployments/ # Deployment configurations ├── deployments/ # Deployment configurations
└── docs/ # Additional documentation └── docs/ # Additional documentation
``` ```
## Success Criteria
- [ ] Server handles 1000+ concurrent connections
- [ ] PoW protection prevents DDOS attacks effectively
- [ ] All tests pass with >80% code coverage
- [ ] Docker containers build and run successfully
- [ ] Client successfully solves challenges and receives quotes
- [ ] Comprehensive logging and metrics in place
- [ ] Production-ready error handling and recovery
- [ ] Clear documentation for deployment and operation

View file

@ -1,147 +0,0 @@
# Package Structure
This document explains the organization and responsibilities of all packages in the Hash of Wisdom project.
## Directory Structure
```
/
├── cmd/ # Application entry points
│ ├── server/ # Server application
│ └── client/ # Client application
├── internal/ # Private application packages
│ ├── application/ # Application layer (message handling)
│ ├── config/ # Configuration management
│ ├── lib/ # Shared utilities
│ ├── metrics/ # Prometheus metrics
│ ├── pow/ # Proof-of-Work implementation
│ ├── protocol/ # Binary protocol codec
│ ├── quotes/ # Quote service
│ ├── server/ # TCP server implementation
│ └── service/ # Business logic layer
├── test/ # Integration tests
└── docs/ # Documentation
```
## Package Responsibilities
### `cmd/server`
**Entry point for the TCP server application**
- Parses command-line flags and configuration
- Initializes all components with dependency injection
- Starts TCP server and metrics endpoints
- Handles graceful shutdown signals
### `cmd/client`
**Entry point for the client application**
- Command-line interface for connecting to server
- Handles proof-of-work solving on client side
- Manages TCP connection lifecycle
### `internal/config`
**Configuration management with cleanenv**
- Defines configuration structures with YAML/env tags
- Loads configuration from files and environment variables
- Provides sensible defaults for all settings
- Supports both development and production configurations
### `internal/lib/sl`
**Shared logging utilities**
- Structured logging helpers for consistent log formatting
- Error attribute helpers for slog integration
### `internal/metrics`
**Prometheus metrics collection**
- Defines application-specific metrics (requests, errors, duration)
- Provides simple counters and histograms for monitoring
- Integrated at the infrastructure layer (TCP server)
### `internal/pow`
**Proof-of-Work implementation**
#### `internal/pow/challenge`
- **Challenge Generation**: Creates HMAC-signed stateless challenges
- **Verification**: Validates solutions against original challenges
- **Security**: HMAC authentication prevents tampering
- **Configuration**: Difficulty scaling, TTL management, secrets
#### `internal/pow/solver`
- **Solution Finding**: Brute-force nonce search with SHA-256
- **Optimization**: Efficient bit counting for difficulty verification
- **Client-side**: Used by client to solve server challenges
### `internal/protocol`
**Binary protocol codec**
- **Message Types**: Challenge requests/responses, solution requests/responses, errors
- **Encoding/Decoding**: JSON-based message serialization
- **Streaming**: MessageDecoder for reading from TCP connections
- **Validation**: Message structure and field validation
- See [Protocol Specification](PROTOCOL.md) for detailed message flow and format
### `internal/quotes`
**Quote service implementation**
- **HTTP Client**: Fetches quotes from external APIs using resty
- **Interface**: Clean abstraction for quote retrieval
- **Error Handling**: Graceful degradation for network issues
- **Timeout Management**: Configurable request timeouts
### `internal/server`
**TCP server implementation**
#### `internal/server/tcp.go`
- **Connection Management**: Accept, handle, cleanup TCP connections
- **Protocol Integration**: Uses protocol package for message handling
- **Security**: Connection timeouts, slowloris protection
- **Metrics**: Request tracking at infrastructure layer
- **Lifecycle**: Graceful startup/shutdown with context
#### `internal/server/config.go`
- **Server Configuration**: Network settings, timeouts
- **Functional Options**: Builder pattern for server customization
### `internal/application`
**Application layer (message handling and coordination)**
- **WisdomApplication**: Protocol message handler and coordinator
- **Message Processing**: Handles challenge and solution requests from protocol layer
- **Response Generation**: Creates appropriate protocol responses
- **Service Coordination**: Orchestrates calls to business logic layer
- **Error Handling**: Converts service errors to protocol error responses
### `internal/service`
**Business logic layer (core domain services)**
- **WisdomService**: Main business logic coordinator
- **Challenge Workflow**: Manages challenge generation and validation
- **Solution Workflow**: Handles solution verification and quote retrieval
- **Clean Architecture**: Pure business logic, no I/O dependencies
- **Testing**: Easily mockable interfaces for unit testing
**Service Dependencies**:
- `ChallengeGenerator` - Creates new challenges
- `ChallengeVerifier` - Validates submitted solutions
- `QuoteService` - Retrieves quotes after successful validation
### `test/integration`
**End-to-end integration tests**
- **Slowloris Protection**: Tests server resilience against slow attacks
- **Connection Timeouts**: Validates timeout configurations
- **Full Workflow**: Tests complete client-server interaction
- **Real Components**: Uses actual TCP connections and protocol
## Dependency Flow
```
cmd/server
internal/config → internal/server → internal/application → internal/service
↓ ↓ ↓
internal/protocol internal/protocol internal/pow
internal/metrics internal/quotes
```
## Architecture Benefits
This package structure provides:
- **Clear Separation**: Each package has a single, well-defined responsibility
- **Testability**: Dependencies are injected, making testing straightforward
- **Maintainability**: Changes are isolated to specific layers
- **Scalability**: Clean interfaces allow for easy implementation swapping

View file

@ -1,70 +0,0 @@
# Production Readiness Assessment
## Current Implementation Status
### ✅ Core Functionality (Complete)
- **Proof of Work System**: SHA-256 hashcash with HMAC-signed stateless challenges
- **Binary Protocol**: Custom TCP protocol with JSON payloads and proper framing
- **TCP Server**: Connection handling with timeout protection against slowloris attacks
- **Client Application**: CLI tool with challenge solving and solution submission
- **Service Layer**: Clean architecture with dependency injection
- **Quote System**: External API integration for inspirational quotes
- **Security**: HMAC authentication, replay protection, input validation
- **Testing**: Comprehensive unit tests and slowloris protection integration tests
### ✅ Observability & Configuration (Complete)
- **Metrics Endpoint**: Prometheus metrics at `/metrics` with application and Go runtime KPIs
- **Application Metrics**: Request tracking, error categorization, duration histograms, quotes served
- **Go Runtime Metrics**: Memory stats, GC metrics, goroutine counts, process stats (auto-registered)
- **Profiler Endpoint**: Go pprof integration at `/debug/pprof/` for performance debugging
- **Structured Logging**: slog integration throughout server components with consistent formatting
- **Configuration**: cleanenv-based config management with YAML files and environment variables
- **Containerization**: Production-ready Dockerfile with security best practices
- **Error Handling**: Proper error propagation and categorization
- **Graceful Shutdown**: Context-based shutdown with connection draining
## Remaining Components for Production
### Critical for Production
1. **Connection Pooling & Resource Management** (worker pools, connection limits)
2. **Rate Limiting & DDoS Protection**
3. **Secret Management** (HMAC keys, external API credentials)
4. **Advanced Monitoring & Alerting**
5. **Advanced Configuration Management**
6. **Health Checks** (graceful shutdown already implemented)
### Important for Scale
7. **Security Hardening**
8. **Quote Service Enhancement** (caching, fallback quotes, multiple sources)
9. **Load Testing & Performance**
10. **Documentation & Runbooks**
### Nice to Have
11. **Advanced Observability**
12. **Chaos Engineering**
13. **Automated Deployment**
## Risk Assessment
### High Risk Areas
- **No rate limiting**: Vulnerable to sophisticated DDoS attacks
- **Hardcoded secrets**: HMAC keys in configuration files (not properly secured)
- **Limited monitoring**: Basic metrics but no alerting or attack detection
- **Single point of failure**: No redundancy or failover
### Medium Risk Areas
- **Memory management**: Potential leaks under high load
- **External dependencies**: Quote API could become bottleneck
- **Configuration drift**: Manual configuration prone to errors
## Current Architecture Strengths
The existing implementation provides an excellent foundation:
- **Clean Architecture**: Proper separation of concerns with dependency injection
- **Security-First Design**: HMAC authentication, replay protection, and timeout protection
- **Stateless Operation**: HMAC-signed challenges enable horizontal scaling
- **Graceful Shutdown**: Proper context handling and connection draining
- **Comprehensive Testing**: Proven slowloris protection and unit test coverage
- **Observability Ready**: Prometheus metrics, pprof profiling, structured logging
- **Standard Protocols**: Industry-standard approaches (TCP, JSON, SHA-256)
- **Container Ready**: Production Dockerfile with security best practices

17
go.mod
View file

@ -4,28 +4,17 @@ go 1.24.3
require ( require (
github.com/go-resty/resty/v2 v2.16.5 github.com/go-resty/resty/v2 v2.16.5
github.com/ilyakaznacheev/cleanenv v1.5.0
github.com/prometheus/client_golang v1.23.0
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0
) )
require ( require (
github.com/BurntSushi/toml v1.2.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/joho/godotenv v1.5.1 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.65.0 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/stretchr/objx v0.5.2 // indirect github.com/stretchr/objx v0.5.2 // indirect
golang.org/x/net v0.40.0 // indirect golang.org/x/net v0.35.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/time v0.8.0 // indirect golang.org/x/time v0.8.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
olympos.io/encoding/edn v0.0.0-20201019073823-d3554ca0b0a3 // indirect
) )

50
go.sum
View file

@ -1,60 +1,26 @@
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM=
github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/ilyakaznacheev/cleanenv v1.5.0 h1:0VNZXggJE2OYdXE87bfSSwGxeiGt9moSR2lOrsHHvr4=
github.com/ilyakaznacheev/cleanenv v1.5.0/go.mod h1:a5aDzaJrLCQZsazHol1w8InnDcOX0OColm64SlIi6gk=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
olympos.io/encoding/edn v0.0.0-20201019073823-d3554ca0b0a3 h1:slmdOY3vp8a7KQbHkL+FLbvbkgMqmXojpFUO/jENuqQ=
olympos.io/encoding/edn v0.0.0-20201019073823-d3554ca0b0a3/go.mod h1:oVgVk4OWVDi43qWBEyGhXgYxt7+ED4iYNpTngSLX2Iw=

View file

@ -32,9 +32,9 @@ func TestWisdomApplication_HandleMessage_UnsupportedType(t *testing.T) {
ctx := context.Background() ctx := context.Background()
msg := &protocol.Message{ msg := &protocol.Message{
Type: protocol.MessageType(0xFF), // Invalid type Type: protocol.MessageType(0xFF), // Invalid type
PayloadLength: 0, PayloadLength: 0,
PayloadStream: nil, PayloadStream: nil,
} }
response, err := app.HandleMessage(ctx, msg) response, err := app.HandleMessage(ctx, msg)
@ -64,9 +64,9 @@ func TestWisdomApplication_HandleChallengeRequest_Success(t *testing.T) {
ctx := context.Background() ctx := context.Background()
msg := &protocol.Message{ msg := &protocol.Message{
Type: protocol.ChallengeRequestType, Type: protocol.ChallengeRequestType,
PayloadLength: 0, PayloadLength: 0,
PayloadStream: nil, PayloadStream: nil,
} }
response, err := app.HandleMessage(ctx, msg) response, err := app.HandleMessage(ctx, msg)
@ -89,9 +89,9 @@ func TestWisdomApplication_HandleChallengeRequest_ServiceError(t *testing.T) {
ctx := context.Background() ctx := context.Background()
msg := &protocol.Message{ msg := &protocol.Message{
Type: protocol.ChallengeRequestType, Type: protocol.ChallengeRequestType,
PayloadLength: 0, PayloadLength: 0,
PayloadStream: nil, PayloadStream: nil,
} }
response, err := app.HandleMessage(ctx, msg) response, err := app.HandleMessage(ctx, msg)
@ -138,9 +138,9 @@ func TestWisdomApplication_HandleSolutionRequest_Success(t *testing.T) {
ctx := context.Background() ctx := context.Background()
msg := &protocol.Message{ msg := &protocol.Message{
Type: protocol.SolutionRequestType, Type: protocol.SolutionRequestType,
PayloadLength: uint32(len(payloadJSON)), PayloadLength: uint32(len(payloadJSON)),
PayloadStream: bytes.NewReader(payloadJSON), PayloadStream: bytes.NewReader(payloadJSON),
} }
response, err := app.HandleMessage(ctx, msg) response, err := app.HandleMessage(ctx, msg)
@ -161,9 +161,9 @@ func TestWisdomApplication_HandleSolutionRequest_InvalidJSON(t *testing.T) {
invalidJSON := []byte("invalid json") invalidJSON := []byte("invalid json")
ctx := context.Background() ctx := context.Background()
msg := &protocol.Message{ msg := &protocol.Message{
Type: protocol.SolutionRequestType, Type: protocol.SolutionRequestType,
PayloadLength: uint32(len(invalidJSON)), PayloadLength: uint32(len(invalidJSON)),
PayloadStream: bytes.NewReader(invalidJSON), PayloadStream: bytes.NewReader(invalidJSON),
} }
response, err := app.HandleMessage(ctx, msg) response, err := app.HandleMessage(ctx, msg)
@ -205,9 +205,9 @@ func TestWisdomApplication_HandleSolutionRequest_VerificationFailed(t *testing.T
ctx := context.Background() ctx := context.Background()
msg := &protocol.Message{ msg := &protocol.Message{
Type: protocol.SolutionRequestType, Type: protocol.SolutionRequestType,
PayloadLength: uint32(len(payloadJSON)), PayloadLength: uint32(len(payloadJSON)),
PayloadStream: bytes.NewReader(payloadJSON), PayloadStream: bytes.NewReader(payloadJSON),
} }
response, err := app.HandleMessage(ctx, msg) response, err := app.HandleMessage(ctx, msg)
@ -250,9 +250,9 @@ func TestWisdomApplication_HandleSolutionRequest_QuoteServiceError(t *testing.T)
ctx := context.Background() ctx := context.Background()
msg := &protocol.Message{ msg := &protocol.Message{
Type: protocol.SolutionRequestType, Type: protocol.SolutionRequestType,
PayloadLength: uint32(len(payloadJSON)), PayloadLength: uint32(len(payloadJSON)),
PayloadStream: bytes.NewReader(payloadJSON), PayloadStream: bytes.NewReader(payloadJSON),
} }
response, err := app.HandleMessage(ctx, msg) response, err := app.HandleMessage(ctx, msg)
@ -279,9 +279,9 @@ func TestWisdomApplication_HandleMessage_ContextCancellation(t *testing.T) {
mockService.On("GenerateChallenge", mock.Anything, "quotes").Return(nil, context.Canceled) mockService.On("GenerateChallenge", mock.Anything, "quotes").Return(nil, context.Canceled)
msg := &protocol.Message{ msg := &protocol.Message{
Type: protocol.ChallengeRequestType, Type: protocol.ChallengeRequestType,
PayloadLength: 0, PayloadLength: 0,
PayloadStream: nil, PayloadStream: nil,
} }
response, err := app.HandleMessage(ctx, msg) response, err := app.HandleMessage(ctx, msg)

View file

@ -1,72 +0,0 @@
package config
import (
"time"
"github.com/ilyakaznacheev/cleanenv"
)
type Config struct {
Server ServerConfig `yaml:"server"`
PoW PoWConfig `yaml:"pow"`
Quotes QuotesConfig `yaml:"quotes"`
Metrics MetricsConfig `yaml:"metrics"`
Logging LoggingConfig `yaml:"logging"`
}
type ServerConfig struct {
Address string `yaml:"address" env:"SERVER_ADDRESS" env-default:":8080"`
Timeouts TimeoutConfig `yaml:"timeouts"`
}
type TimeoutConfig struct {
Read time.Duration `yaml:"read" env:"SERVER_READ_TIMEOUT" env-default:"5s"`
Write time.Duration `yaml:"write" env:"SERVER_WRITE_TIMEOUT" env-default:"5s"`
Connection time.Duration `yaml:"connection" env:"SERVER_CONNECTION_TIMEOUT" env-default:"15s"`
}
type PoWConfig struct {
Difficulty int `yaml:"difficulty" env:"POW_DIFFICULTY" env-default:"4"`
MaxDifficulty int `yaml:"max_difficulty" env:"POW_MAX_DIFFICULTY" env-default:"10"`
TTL time.Duration `yaml:"ttl" env:"POW_TTL" env-default:"5m"`
HMACSecret string `yaml:"hmac_secret" env:"POW_HMAC_SECRET" env-default:"development-secret-change-in-production"`
}
type QuotesConfig struct {
Timeout time.Duration `yaml:"timeout" env:"QUOTES_TIMEOUT" env-default:"10s"`
}
type MetricsConfig struct {
Address string `yaml:"address" env:"METRICS_ADDRESS" env-default:":8081"`
}
type LoggingConfig struct {
Level string `yaml:"level" env:"LOG_LEVEL" env-default:"info"`
Format string `yaml:"format" env:"LOG_FORMAT" env-default:"text"`
}
func Load(configPath string) (*Config, error) {
cfg := &Config{}
if configPath != "" {
err := cleanenv.ReadConfig(configPath, cfg)
if err != nil {
return nil, err
}
} else {
err := cleanenv.ReadEnv(cfg)
if err != nil {
return nil, err
}
}
return cfg, nil
}
func (c *Config) ToServerConfig() *ServerConfig {
return &c.Server
}
func (c *Config) ToPoWConfig() *PoWConfig {
return &c.PoW
}

View file

@ -1,33 +0,0 @@
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
ActiveConnections = promauto.NewGauge(prometheus.GaugeOpts{
Name: "wisdom_active_connections",
Help: "Number of currently active TCP connections",
})
RequestsTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "wisdom_requests_total",
Help: "Total number of requests processed",
})
RequestErrors = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "wisdom_request_errors_total",
Help: "Total number of request errors by type",
}, []string{"error_type"})
RequestDuration = promauto.NewHistogram(prometheus.HistogramOpts{
Name: "wisdom_request_duration_seconds",
Help: "Time taken to process requests",
})
QuotesServed = promauto.NewCounter(prometheus.CounterOpts{
Name: "wisdom_quotes_served_total",
Help: "Total number of quotes successfully served to clients",
})
)

View file

@ -68,8 +68,8 @@ func (c *Challenge) VerifySolution(nonce uint64) bool {
// hasLeadingZeroBits checks if hash has the required number of leading zero bits // hasLeadingZeroBits checks if hash has the required number of leading zero bits
func hasLeadingZeroBits(hash []byte, difficulty int) bool { func hasLeadingZeroBits(hash []byte, difficulty int) bool {
full := difficulty >> 3 // number of whole zero bytes full := difficulty >> 3 // number of whole zero bytes
rem := uint(difficulty & 7) // remaining leading zero bits rem := uint(difficulty & 7) // remaining leading zero bits
for i := range full { for i := range full {
if hash[i] != 0 { if hash[i] != 0 {

View file

@ -4,8 +4,8 @@ package protocol
type MessageType byte type MessageType byte
const ( const (
ChallengeRequestType MessageType = 0x01 ChallengeRequestType MessageType = 0x01
SolutionRequestType MessageType = 0x03 SolutionRequestType MessageType = 0x03
// Response types (for responses.go) // Response types (for responses.go)
ChallengeResponseType MessageType = 0x02 ChallengeResponseType MessageType = 0x02
QuoteResponseType MessageType = 0x04 QuoteResponseType MessageType = 0x04
@ -14,14 +14,14 @@ const (
// Error codes as defined in protocol specification // Error codes as defined in protocol specification
const ( const (
ErrMalformedMessage = "MALFORMED_MESSAGE" ErrMalformedMessage = "MALFORMED_MESSAGE"
ErrInvalidChallenge = "INVALID_CHALLENGE" ErrInvalidChallenge = "INVALID_CHALLENGE"
ErrInvalidSolution = "INVALID_SOLUTION" ErrInvalidSolution = "INVALID_SOLUTION"
ErrExpiredChallenge = "EXPIRED_CHALLENGE" ErrExpiredChallenge = "EXPIRED_CHALLENGE"
ErrRateLimited = "RATE_LIMITED" ErrRateLimited = "RATE_LIMITED"
ErrServerError = "SERVER_ERROR" ErrServerError = "SERVER_ERROR"
ErrTooManyConnections = "TOO_MANY_CONNECTIONS" ErrTooManyConnections = "TOO_MANY_CONNECTIONS"
ErrDifficultyTooHigh = "DIFFICULTY_TOO_HIGH" ErrDifficultyTooHigh = "DIFFICULTY_TOO_HIGH"
) )
// Protocol constants // Protocol constants

View file

@ -43,8 +43,8 @@ func (d *MessageDecoder) Decode(r io.Reader) (*Message, error) {
} }
return &Message{ return &Message{
Type: msgType, Type: msgType,
PayloadLength: payloadLength, PayloadLength: payloadLength,
PayloadStream: payloadStream, PayloadStream: payloadStream,
}, nil }, nil
} }

View file

@ -13,11 +13,11 @@ func TestMessageDecoder_Decode_Header(t *testing.T) {
decoder := NewMessageDecoder() decoder := NewMessageDecoder()
tests := []struct { tests := []struct {
name string name string
data []byte data []byte
wantType MessageType wantType MessageType
wantLength uint32 wantLength uint32
wantErr string wantErr string
}{ }{
{ {
name: "challenge request with empty payload", name: "challenge request with empty payload",

View file

@ -7,6 +7,7 @@ import (
"hash-of-wisdom/internal/pow/challenge" "hash-of-wisdom/internal/pow/challenge"
) )
// ChallengeRequest is empty (no payload for challenge requests) // ChallengeRequest is empty (no payload for challenge requests)
type ChallengeRequest struct{} type ChallengeRequest struct{}

View file

@ -8,6 +8,7 @@ import (
"hash-of-wisdom/internal/quotes" "hash-of-wisdom/internal/quotes"
) )
// ChallengeResponse represents a challenge response // ChallengeResponse represents a challenge response
type ChallengeResponse struct { type ChallengeResponse struct {
Challenge *challenge.Challenge Challenge *challenge.Challenge

View file

@ -113,6 +113,7 @@ func TestChallengeRequest_EmptyPayload(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
} }
func TestPayloadStream_LimitedRead(t *testing.T) { func TestPayloadStream_LimitedRead(t *testing.T) {
decoder := NewMessageDecoder() decoder := NewMessageDecoder()

View file

@ -4,7 +4,7 @@ import "io"
// Message represents a protocol message with type and payload stream // Message represents a protocol message with type and payload stream
type Message struct { type Message struct {
Type MessageType Type MessageType
PayloadLength uint32 PayloadLength uint32
PayloadStream io.Reader PayloadStream io.Reader
} }

View file

@ -17,3 +17,15 @@ type TimeoutConfig struct {
// Connection timeout is the maximum total connection lifetime // Connection timeout is the maximum total connection lifetime
Connection time.Duration Connection time.Duration
} }
// DefaultConfig returns default server configuration
func DefaultConfig() *Config {
return &Config{
Address: ":8080",
Timeouts: TimeoutConfig{
Read: 5 * time.Second,
Write: 5 * time.Second,
Connection: 15 * time.Second,
},
}
}

View file

@ -11,7 +11,6 @@ import (
"hash-of-wisdom/internal/application" "hash-of-wisdom/internal/application"
"hash-of-wisdom/internal/lib/sl" "hash-of-wisdom/internal/lib/sl"
"hash-of-wisdom/internal/metrics"
"hash-of-wisdom/internal/protocol" "hash-of-wisdom/internal/protocol"
"hash-of-wisdom/internal/service" "hash-of-wisdom/internal/service"
) )
@ -24,12 +23,19 @@ type TCPServer struct {
listener net.Listener listener net.Listener
logger *slog.Logger logger *slog.Logger
wg sync.WaitGroup wg sync.WaitGroup
cancel context.CancelFunc shutdown chan struct{}
} }
// Option is a functional option for configuring TCPServer // Option is a functional option for configuring TCPServer
type option func(*TCPServer) type option func(*TCPServer)
// WithConfig sets a custom configuration
func WithConfig(config *Config) option {
return func(s *TCPServer) {
s.config = config
}
}
// WithLogger sets a custom logger // WithLogger sets a custom logger
func WithLogger(logger *slog.Logger) option { func WithLogger(logger *slog.Logger) option {
return func(s *TCPServer) { return func(s *TCPServer) {
@ -37,13 +43,14 @@ func WithLogger(logger *slog.Logger) option {
} }
} }
// NewTCPServer creates a new TCP server with required configuration // NewTCPServer creates a new TCP server with optional configuration
func NewTCPServer(wisdomService *service.WisdomService, config *Config, opts ...option) *TCPServer { func NewTCPServer(wisdomService *service.WisdomService, opts ...option) *TCPServer {
server := &TCPServer{ server := &TCPServer{
config: config, config: DefaultConfig(),
wisdomApplication: application.NewWisdomApplication(wisdomService), wisdomApplication: application.NewWisdomApplication(wisdomService),
decoder: protocol.NewMessageDecoder(), decoder: protocol.NewMessageDecoder(),
logger: slog.Default(), logger: slog.Default(),
shutdown: make(chan struct{}),
} }
for _, opt := range opts { for _, opt := range opts {
@ -53,6 +60,7 @@ func NewTCPServer(wisdomService *service.WisdomService, config *Config, opts ...
return server return server
} }
// Start starts the TCP server // Start starts the TCP server
func (s *TCPServer) Start(ctx context.Context) error { func (s *TCPServer) Start(ctx context.Context) error {
listener, err := net.Listen("tcp", s.config.Address) listener, err := net.Listen("tcp", s.config.Address)
@ -63,22 +71,14 @@ func (s *TCPServer) Start(ctx context.Context) error {
s.listener = listener s.listener = listener
s.logger.Info("tcp server started", "address", s.config.Address) s.logger.Info("tcp server started", "address", s.config.Address)
// Create cancellable context for server lifecycle go s.acceptLoop(ctx)
serverCtx, cancel := context.WithCancel(ctx)
s.cancel = cancel
go s.acceptLoop(serverCtx)
return nil return nil
} }
// Stop gracefully stops the server // Stop gracefully stops the server
func (s *TCPServer) Stop() error { func (s *TCPServer) Stop() error {
s.logger.Info("stopping tcp server") s.logger.Info("stopping tcp server")
close(s.shutdown)
// Cancel server context to stop accept loop and active connections
if s.cancel != nil {
s.cancel()
}
if s.listener != nil { if s.listener != nil {
s.listener.Close() s.listener.Close()
@ -101,6 +101,8 @@ func (s *TCPServer) Address() string {
func (s *TCPServer) acceptLoop(ctx context.Context) { func (s *TCPServer) acceptLoop(ctx context.Context) {
for { for {
select { select {
case <-s.shutdown:
return
case <-ctx.Done(): case <-ctx.Done():
return return
default: default:
@ -109,7 +111,7 @@ func (s *TCPServer) acceptLoop(ctx context.Context) {
rawConn, err := s.listener.Accept() rawConn, err := s.listener.Accept()
if err != nil { if err != nil {
select { select {
case <-ctx.Done(): case <-s.shutdown:
return return
default: default:
s.logger.Error("accept error", sl.Err(err)) s.logger.Error("accept error", sl.Err(err))
@ -129,10 +131,6 @@ func (s *TCPServer) acceptLoop(ctx context.Context) {
func (s *TCPServer) handleConnection(ctx context.Context, rawConn net.Conn) { func (s *TCPServer) handleConnection(ctx context.Context, rawConn net.Conn) {
defer rawConn.Close() defer rawConn.Close()
// Track active connections
metrics.ActiveConnections.Inc()
defer metrics.ActiveConnections.Dec()
connLogger := s.logger.With("remote_addr", rawConn.RemoteAddr().String()) connLogger := s.logger.With("remote_addr", rawConn.RemoteAddr().String())
connLogger.Info("connection accepted") connLogger.Info("connection accepted")
@ -190,38 +188,19 @@ func (s *TCPServer) processConnection(ctx context.Context, conn net.Conn, logger
logger.Debug("client closed connection gracefully") logger.Debug("client closed connection gracefully")
return nil return nil
} }
metrics.RequestErrors.WithLabelValues("decode_error").Inc()
logger.Error("failed to decode message", sl.Err(err)) logger.Error("failed to decode message", sl.Err(err))
return fmt.Errorf("decode error: %w", err) return fmt.Errorf("decode error: %w", err)
} }
logger.Debug("message decoded", "type", msg.Type, "payload_length", msg.PayloadLength) logger.Debug("message decoded", "type", msg.Type, "payload_length", msg.PayloadLength)
// Track all requests // Process message through application layer
metrics.RequestsTotal.Inc()
// Process message through application layer with timing
start := time.Now()
response, err := s.wisdomApplication.HandleMessage(ctx, msg) response, err := s.wisdomApplication.HandleMessage(ctx, msg)
duration := time.Since(start)
metrics.RequestDuration.Observe(duration.Seconds())
if err != nil { if err != nil {
metrics.RequestErrors.WithLabelValues("internal_error").Inc()
logger.Error("application error", sl.Err(err)) logger.Error("application error", sl.Err(err))
return fmt.Errorf("application error: %w", err) return fmt.Errorf("application error: %w", err)
} }
// Check if response is an error response
if errorResp, isError := response.(*protocol.ErrorResponse); isError {
metrics.RequestErrors.WithLabelValues(string(errorResp.Code)).Inc()
} else {
// Track quotes served for successful solution requests
if msg.Type == protocol.SolutionRequestType {
metrics.QuotesServed.Inc()
}
}
logger.Debug("sending response to client") logger.Debug("sending response to client")
// Send response using the response's own Encode method // Send response using the response's own Encode method
if err := response.Encode(dc); err != nil { if err := response.Encode(dc); err != nil {

View file

@ -9,11 +9,11 @@ import (
) )
var ( var (
ErrResourceRequired = errors.New("resource is required") ErrResourceRequired = errors.New("resource is required")
ErrUnsupportedResource = errors.New("unsupported resource") ErrUnsupportedResource = errors.New("unsupported resource")
ErrSolutionRequired = errors.New("solution is required") ErrSolutionRequired = errors.New("solution is required")
ErrInvalidChallenge = errors.New("invalid challenge") ErrInvalidChallenge = errors.New("invalid challenge")
ErrInvalidSolution = errors.New("invalid proof of work solution") ErrInvalidSolution = errors.New("invalid proof of work solution")
) )
type ChallengeGenerator interface { type ChallengeGenerator interface {

View file

@ -70,15 +70,15 @@ func TestWisdomService_GenerateChallenge(t *testing.T) {
func TestWisdomService_VerifySolution(t *testing.T) { func TestWisdomService_VerifySolution(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
solution *challenge.Solution solution *challenge.Solution
wantErr error wantErr error
setupMocks func(*mocks.MockChallengeVerifier) setupMocks func(*mocks.MockChallengeVerifier)
}{ }{
{ {
name: "nil solution", name: "nil solution",
solution: nil, solution: nil,
wantErr: ErrSolutionRequired, wantErr: ErrSolutionRequired,
setupMocks: func(mv *mocks.MockChallengeVerifier) {}, setupMocks: func(mv *mocks.MockChallengeVerifier) {},
}, },
{ {
@ -93,17 +93,17 @@ func TestWisdomService_VerifySolution(t *testing.T) {
}, },
}, },
{ {
name: "invalid solution", name: "invalid solution",
solution: createInvalidSolution(t), solution: createInvalidSolution(t),
wantErr: ErrInvalidSolution, wantErr: ErrInvalidSolution,
setupMocks: func(mv *mocks.MockChallengeVerifier) { setupMocks: func(mv *mocks.MockChallengeVerifier) {
mv.EXPECT().VerifyChallenge(mock.Anything).Return(nil).Once() mv.EXPECT().VerifyChallenge(mock.Anything).Return(nil).Once()
}, },
}, },
{ {
name: "valid solution", name: "valid solution",
solution: createValidSolution(t), solution: createValidSolution(t),
wantErr: nil, wantErr: nil,
setupMocks: func(mv *mocks.MockChallengeVerifier) { setupMocks: func(mv *mocks.MockChallengeVerifier) {
mv.EXPECT().VerifyChallenge(mock.Anything).Return(nil).Once() mv.EXPECT().VerifyChallenge(mock.Anything).Return(nil).Once()
}, },

View file

@ -284,8 +284,8 @@ func TestWisdomService_InvalidSolutions(t *testing.T) {
func TestWisdomService_UnsuccessfulFlows(t *testing.T) { func TestWisdomService_UnsuccessfulFlows(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
difficulty int difficulty int
createSolution func(*challenge.Challenge, *challenge.Solution) *challenge.Solution createSolution func(*challenge.Challenge, *challenge.Solution) *challenge.Solution
}{ }{
{ {

View file

@ -1,56 +0,0 @@
package integration
import (
"context"
"testing"
"time"
"hash-of-wisdom/internal/lib/sl"
"hash-of-wisdom/internal/pow/challenge"
"hash-of-wisdom/internal/quotes"
"hash-of-wisdom/internal/server"
"hash-of-wisdom/internal/service"
"github.com/stretchr/testify/require"
)
// testQuoteService provides static quotes for testing
type testQuoteService struct{}
func (s *testQuoteService) GetRandomQuote(ctx context.Context) (*quotes.Quote, error) {
return &quotes.Quote{
Text: "Test quote for integration testing",
Author: "Test Author",
}, nil
}
func setupTestServerWithConfig(t *testing.T, serverConfig *server.Config) *server.TCPServer {
// Create test components
challengeConfig := challenge.TestConfig()
generator := challenge.NewGenerator(challengeConfig)
verifier := challenge.NewVerifier(challengeConfig)
// Create a simple test quote service
quoteService := &testQuoteService{}
// Wire up service
genAdapter := service.NewGeneratorAdapter(generator)
wisdomService := service.NewWisdomService(genAdapter, verifier, quoteService)
// Create server with custom config using functional options
logger := sl.NewMockLogger()
srv := server.NewTCPServer(wisdomService, serverConfig,
server.WithLogger(logger))
// Start server
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
err := srv.Start(ctx)
require.NoError(t, err)
// Give server time to start
time.Sleep(100 * time.Millisecond)
return srv
}

View file

@ -5,6 +5,7 @@ import (
"testing" "testing"
"time" "time"
"hash-of-wisdom/internal/protocol"
"hash-of-wisdom/internal/server" "hash-of-wisdom/internal/server"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -13,16 +14,13 @@ import (
func TestSlowlorisProtection_SlowReader(t *testing.T) { func TestSlowlorisProtection_SlowReader(t *testing.T) {
// Setup server with very short read timeout for testing // Setup server with very short read timeout for testing
serverConfig := &server.Config{ config := server.DefaultConfig()
Address: ":0", config.Address = ":0"
Timeouts: server.TimeoutConfig{ config.Timeouts.Read = 100 * time.Millisecond
Read: 100 * time.Millisecond, config.Timeouts.Write = 5 * time.Second
Write: 5 * time.Second, config.Timeouts.Connection = 15 * time.Second
Connection: 15 * time.Second,
},
}
srv := setupTestServerWithConfig(t, serverConfig) srv := setupTestServerWithConfig(t, config)
defer srv.Stop() defer srv.Stop()
// Connect to server // Connect to server
@ -47,68 +45,154 @@ func TestSlowlorisProtection_SlowReader(t *testing.T) {
assert.Error(t, err, "Connection should be closed due to slow reading") assert.Error(t, err, "Connection should be closed due to slow reading")
} }
func TestSlowlorisProtection_SlowConnectionTimeout(t *testing.T) { func TestSlowlorisProtection_SlowWriter(t *testing.T) {
// Setup server with very short connection timeout for testing // Setup server with very short write timeout for testing
serverConfig := &server.Config{ config := server.DefaultConfig()
Address: ":0", config.Address = ":0"
Timeouts: server.TimeoutConfig{ config.Timeouts.Read = 5 * time.Second
Read: 5 * time.Second, config.Timeouts.Write = 100 * time.Millisecond
Write: 5 * time.Second, config.Timeouts.Connection = 15 * time.Second
Connection: 200 * time.Millisecond,
},
}
srv := setupTestServerWithConfig(t, serverConfig) srv := setupTestServerWithConfig(t, config)
defer srv.Stop() defer srv.Stop()
// Connect to server but do nothing // Connect to server but don't read responses (simulate slow writer client)
conn, err := net.Dial("tcp", srv.Address()) conn, err := net.Dial("tcp", srv.Address())
require.NoError(t, err) require.NoError(t, err)
defer conn.Close() defer conn.Close()
// Wait longer than connection timeout // Send a complete challenge request
time.Sleep(300 * time.Millisecond) challengeReq := &protocol.ChallengeRequest{}
err = challengeReq.Encode(conn)
require.NoError(t, err)
// Try to read - connection should be closed // Don't read the response to simulate slow writer
// Server should timeout when trying to write response
time.Sleep(200 * time.Millisecond)
// Try to send another request - connection should be closed
err = challengeReq.Encode(conn)
assert.Error(t, err, "Connection should be closed due to slow writing")
}
func TestSlowlorisProtection_ConnectionTimeout(t *testing.T) {
// Setup server with very short connection timeout
config := server.DefaultConfig()
config.Address = ":0"
config.Timeouts.Read = 5 * time.Second
config.Timeouts.Write = 5 * time.Second
config.Timeouts.Connection = 100 * time.Millisecond
srv := setupTestServerWithConfig(t, config)
defer srv.Stop()
// Connect to server
conn, err := net.Dial("tcp", srv.Address())
require.NoError(t, err)
defer conn.Close()
// Wait longer than connection timeout without sending any data
time.Sleep(200 * time.Millisecond)
// Try to read from connection - should get EOF or connection reset
buffer := make([]byte, 1024) buffer := make([]byte, 1024)
conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond)) conn.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
_, err = conn.Read(buffer) _, err = conn.Read(buffer)
assert.Error(t, err, "Connection should be closed due to connection timeout") assert.Error(t, err, "Connection should be closed due to connection timeout")
} }
func TestSlowlorisProtection_MultipleSlowClients(t *testing.T) { func TestSlowlorisProtection_MultipleSlowConnections(t *testing.T) {
// Setup server with short timeouts // Setup server with short timeouts
serverConfig := &server.Config{ config := server.DefaultConfig()
Address: ":0", config.Address = ":0"
Timeouts: server.TimeoutConfig{ config.Timeouts.Read = 50 * time.Millisecond
Read: 1 * time.Second, config.Timeouts.Write = 50 * time.Millisecond
Write: 1 * time.Second, config.Timeouts.Connection = 200 * time.Millisecond
Connection: 2 * time.Second,
},
}
srv := setupTestServerWithConfig(t, serverConfig) srv := setupTestServerWithConfig(t, config)
defer srv.Stop() defer srv.Stop()
// Create multiple slow connections // Create multiple slow connections (simulating slowloris attack)
var connections []net.Conn var conns []net.Conn
for i := 0; i < 5; i++ { for i := 0; i < 3; i++ {
conn, err := net.Dial("tcp", srv.Address()) conn, err := net.Dial("tcp", srv.Address())
require.NoError(t, err) require.NoError(t, err)
connections = append(connections, conn) conns = append(conns, conn)
// Send partial data on each connection
conn.Write([]byte{0x01}) // Challenge request type only // Send partial data to trigger slow reader behavior
_, err = conn.Write([]byte{0x01}) // Just message type
require.NoError(t, err)
} }
// Wait for timeouts to kick in // Clean up connections
time.Sleep(1500 * time.Millisecond) defer func() {
for _, conn := range conns {
conn.Close()
}
}()
// All connections should be closed // Wait for read timeouts to kick in
buffer := make([]byte, 1024) time.Sleep(100 * time.Millisecond)
for i, conn := range connections {
conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond)) // Verify slow connections are closed by trying to read from them
for i, conn := range conns {
buffer := make([]byte, 1024)
conn.SetReadDeadline(time.Now().Add(50 * time.Millisecond))
_, err := conn.Read(buffer) _, err := conn.Read(buffer)
assert.Error(t, err, "Connection %d should be closed", i) assert.Error(t, err, "Slow connection %d should be closed", i)
conn.Close()
} }
} }
func TestSlowlorisProtection_NormalOperationWithinTimeouts(t *testing.T) {
// Setup server with reasonable timeouts
config := server.DefaultConfig()
config.Address = ":0"
srv := setupTestServerWithConfig(t, config)
defer srv.Stop()
// Connect and complete normal flow quickly
conn, err := net.Dial("tcp", srv.Address())
require.NoError(t, err)
defer conn.Close()
// Request challenge
challengeReq := &protocol.ChallengeRequest{}
err = challengeReq.Encode(conn)
require.NoError(t, err)
// Should receive challenge response without timeout
decoder := protocol.NewMessageDecoder()
msg, err := decoder.Decode(conn)
require.NoError(t, err)
assert.Equal(t, protocol.ChallengeResponseType, msg.Type)
assert.Greater(t, msg.PayloadLength, uint32(0), "Challenge payload should not be empty")
}
func TestSlowlorisProtection_PartialHeaderAttack(t *testing.T) {
// Setup server with short read timeout
config := server.DefaultConfig()
config.Address = ":0"
config.Timeouts.Read = 100 * time.Millisecond
srv := setupTestServerWithConfig(t, config)
defer srv.Stop()
// Connect to server
conn, err := net.Dial("tcp", srv.Address())
require.NoError(t, err)
defer conn.Close()
// Send only message type byte, then stall
_, err = conn.Write([]byte{0x01})
require.NoError(t, err)
// Wait for read timeout
time.Sleep(200 * time.Millisecond)
// Try to read from connection - should be closed
buffer := make([]byte, 1024)
conn.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
_, err = conn.Read(buffer)
assert.Error(t, err, "Connection should be closed due to partial header")
}

View file

@ -1,12 +1,17 @@
package integration package integration
import ( import (
"context"
"net" "net"
"testing" "testing"
"time" "time"
"hash-of-wisdom/internal/lib/sl"
"hash-of-wisdom/internal/pow/challenge"
"hash-of-wisdom/internal/protocol" "hash-of-wisdom/internal/protocol"
"hash-of-wisdom/internal/quotes"
"hash-of-wisdom/internal/server" "hash-of-wisdom/internal/server"
"hash-of-wisdom/internal/service"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -14,15 +19,12 @@ import (
func TestTCPServer_TimeoutProtection_SlowReader(t *testing.T) { func TestTCPServer_TimeoutProtection_SlowReader(t *testing.T) {
// Setup server with very short read timeout for testing // Setup server with very short read timeout for testing
serverConfig := &server.Config{ config := server.DefaultConfig()
Address: ":0", config.Address = ":0"
Timeouts: server.TimeoutConfig{ config.Timeouts.Read = 500 * time.Millisecond
Read: 500 * time.Millisecond, config.Timeouts.Write = 5 * time.Second
Write: 5 * time.Second, config.Timeouts.Connection = 15 * time.Second
Connection: 15 * time.Second, srv := setupTestServerWithConfig(t, config)
},
}
srv := setupTestServerWithConfig(t, serverConfig)
defer srv.Stop() defer srv.Stop()
// Connect to server // Connect to server
@ -49,15 +51,12 @@ func TestTCPServer_TimeoutProtection_SlowReader(t *testing.T) {
func TestTCPServer_TimeoutProtection_ConnectionTimeout(t *testing.T) { func TestTCPServer_TimeoutProtection_ConnectionTimeout(t *testing.T) {
// Setup server with very short connection timeout // Setup server with very short connection timeout
serverConfig := &server.Config{ config := server.DefaultConfig()
Address: ":0", config.Address = ":0"
Timeouts: server.TimeoutConfig{ config.Timeouts.Read = 5 * time.Second
Read: 5 * time.Second, config.Timeouts.Write = 5 * time.Second
Write: 5 * time.Second, config.Timeouts.Connection = 1 * time.Second
Connection: 1 * time.Second, srv := setupTestServerWithConfig(t, config)
},
}
srv := setupTestServerWithConfig(t, serverConfig)
defer srv.Stop() defer srv.Stop()
// Connect to server // Connect to server
@ -98,15 +97,12 @@ func TestTCPServer_NormalOperation_WithinTimeouts(t *testing.T) {
} }
func TestTCPServer_MultipleConnections_IndependentTimeouts(t *testing.T) { func TestTCPServer_MultipleConnections_IndependentTimeouts(t *testing.T) {
serverConfig := &server.Config{ config := server.DefaultConfig()
Address: ":0", config.Address = ":0"
Timeouts: server.TimeoutConfig{ config.Timeouts.Read = 1 * time.Second
Read: 1 * time.Second, config.Timeouts.Write = 5 * time.Second
Write: 5 * time.Second, config.Timeouts.Connection = 3 * time.Second
Connection: 3 * time.Second, srv := setupTestServerWithConfig(t, config)
},
}
srv := setupTestServerWithConfig(t, serverConfig)
defer srv.Stop() defer srv.Stop()
// Start two connections // Start two connections
@ -147,15 +143,50 @@ func TestTCPServer_MultipleConnections_IndependentTimeouts(t *testing.T) {
// Helper function to create test server with default config // Helper function to create test server with default config
func setupTestServer(t *testing.T) *server.TCPServer { func setupTestServer(t *testing.T) *server.TCPServer {
serverConfig := &server.Config{ config := server.DefaultConfig()
Address: ":0", config.Address = ":0"
Timeouts: server.TimeoutConfig{ return setupTestServerWithConfig(t, config)
Read: 5 * time.Second,
Write: 5 * time.Second,
Connection: 15 * time.Second,
},
}
return setupTestServerWithConfig(t, serverConfig)
} }
// Helper function to create test server with custom config // Helper function to create test server with custom config
func setupTestServerWithConfig(t *testing.T, serverConfig *server.Config) *server.TCPServer {
// Create test components
challengeConfig := challenge.TestConfig()
generator := challenge.NewGenerator(challengeConfig)
verifier := challenge.NewVerifier(challengeConfig)
// Create a simple test quote service
quoteService := &testQuoteService{}
// Wire up service
genAdapter := service.NewGeneratorAdapter(generator)
wisdomService := service.NewWisdomService(genAdapter, verifier, quoteService)
// Create server with custom config using functional options
logger := sl.NewMockLogger()
srv := server.NewTCPServer(wisdomService,
server.WithConfig(serverConfig),
server.WithLogger(logger))
// Start server
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
err := srv.Start(ctx)
require.NoError(t, err)
// Give server time to start
time.Sleep(100 * time.Millisecond)
return srv
}
// testQuoteService provides test quotes
type testQuoteService struct{}
func (s *testQuoteService) GetRandomQuote(ctx context.Context) (*quotes.Quote, error) {
return &quotes.Quote{
Text: "Test quote for integration testing",
Author: "Test Author",
}, nil
}