630 lines
16 KiB
Go
630 lines
16 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"sort"
|
|
"sync"
|
|
"time"
|
|
|
|
"next.orly.dev/pkg/database"
|
|
"next.orly.dev/pkg/encoders/event"
|
|
"next.orly.dev/pkg/encoders/filter"
|
|
"next.orly.dev/pkg/encoders/kind"
|
|
"next.orly.dev/pkg/encoders/tag"
|
|
"next.orly.dev/pkg/encoders/timestamp"
|
|
"next.orly.dev/pkg/interfaces/signer/p8k"
|
|
)
|
|
|
|
// BenchmarkAdapter adapts a database.Database interface to work with benchmark tests
|
|
type BenchmarkAdapter struct {
|
|
config *BenchmarkConfig
|
|
db database.Database
|
|
results []*BenchmarkResult
|
|
mu sync.RWMutex
|
|
cachedEvents []*event.E // Cache generated events to avoid expensive re-generation
|
|
eventCacheMu sync.Mutex
|
|
}
|
|
|
|
// NewBenchmarkAdapter creates a new benchmark adapter
|
|
func NewBenchmarkAdapter(config *BenchmarkConfig, db database.Database) *BenchmarkAdapter {
|
|
return &BenchmarkAdapter{
|
|
config: config,
|
|
db: db,
|
|
results: make([]*BenchmarkResult, 0),
|
|
}
|
|
}
|
|
|
|
// RunPeakThroughputTest runs the peak throughput benchmark
|
|
func (ba *BenchmarkAdapter) RunPeakThroughputTest() {
|
|
fmt.Println("\n=== Peak Throughput Test ===")
|
|
|
|
start := time.Now()
|
|
var wg sync.WaitGroup
|
|
var totalEvents int64
|
|
var errors []error
|
|
var latencies []time.Duration
|
|
var mu sync.Mutex
|
|
|
|
events := ba.generateEvents(ba.config.NumEvents)
|
|
eventChan := make(chan *event.E, len(events))
|
|
|
|
// Fill event channel
|
|
for _, ev := range events {
|
|
eventChan <- ev
|
|
}
|
|
close(eventChan)
|
|
|
|
// Calculate per-worker rate to avoid mutex contention
|
|
perWorkerRate := 20000.0 / float64(ba.config.ConcurrentWorkers)
|
|
|
|
for i := 0; i < ba.config.ConcurrentWorkers; i++ {
|
|
wg.Add(1)
|
|
go func(workerID int) {
|
|
defer wg.Done()
|
|
|
|
// Each worker gets its own rate limiter
|
|
workerLimiter := NewRateLimiter(perWorkerRate)
|
|
|
|
ctx := context.Background()
|
|
for ev := range eventChan {
|
|
// Wait for rate limiter to allow this event
|
|
workerLimiter.Wait()
|
|
|
|
eventStart := time.Now()
|
|
_, err := ba.db.SaveEvent(ctx, ev)
|
|
latency := time.Since(eventStart)
|
|
|
|
mu.Lock()
|
|
if err != nil {
|
|
errors = append(errors, err)
|
|
} else {
|
|
totalEvents++
|
|
latencies = append(latencies, latency)
|
|
}
|
|
mu.Unlock()
|
|
}
|
|
}(i)
|
|
}
|
|
|
|
wg.Wait()
|
|
duration := time.Since(start)
|
|
|
|
// Calculate metrics
|
|
result := &BenchmarkResult{
|
|
TestName: "Peak Throughput",
|
|
Duration: duration,
|
|
TotalEvents: int(totalEvents),
|
|
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
|
|
ConcurrentWorkers: ba.config.ConcurrentWorkers,
|
|
MemoryUsed: getMemUsage(),
|
|
}
|
|
|
|
if len(latencies) > 0 {
|
|
sort.Slice(latencies, func(i, j int) bool {
|
|
return latencies[i] < latencies[j]
|
|
})
|
|
result.AvgLatency = calculateAverage(latencies)
|
|
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
|
|
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
|
|
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
|
|
|
|
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
|
|
result.Bottom10Avg = calculateAverage(bottom10)
|
|
}
|
|
|
|
result.SuccessRate = float64(totalEvents) / float64(ba.config.NumEvents) * 100
|
|
if len(errors) > 0 {
|
|
result.Errors = make([]string, 0, len(errors))
|
|
for _, err := range errors {
|
|
result.Errors = append(result.Errors, err.Error())
|
|
}
|
|
}
|
|
|
|
ba.mu.Lock()
|
|
ba.results = append(ba.results, result)
|
|
ba.mu.Unlock()
|
|
|
|
ba.printResult(result)
|
|
}
|
|
|
|
// RunBurstPatternTest runs burst pattern test
|
|
func (ba *BenchmarkAdapter) RunBurstPatternTest() {
|
|
fmt.Println("\n=== Burst Pattern Test ===")
|
|
|
|
start := time.Now()
|
|
var totalEvents int64
|
|
var latencies []time.Duration
|
|
var mu sync.Mutex
|
|
|
|
ctx := context.Background()
|
|
burstSize := 100
|
|
bursts := ba.config.NumEvents / burstSize
|
|
|
|
// Create rate limiter: cap at 20,000 events/second globally
|
|
rateLimiter := NewRateLimiter(20000)
|
|
|
|
for i := 0; i < bursts; i++ {
|
|
// Generate a burst of events
|
|
events := ba.generateEvents(burstSize)
|
|
|
|
var wg sync.WaitGroup
|
|
for _, ev := range events {
|
|
wg.Add(1)
|
|
go func(e *event.E) {
|
|
defer wg.Done()
|
|
|
|
// Wait for rate limiter to allow this event
|
|
rateLimiter.Wait()
|
|
|
|
eventStart := time.Now()
|
|
_, err := ba.db.SaveEvent(ctx, e)
|
|
latency := time.Since(eventStart)
|
|
|
|
mu.Lock()
|
|
if err == nil {
|
|
totalEvents++
|
|
latencies = append(latencies, latency)
|
|
}
|
|
mu.Unlock()
|
|
}(ev)
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
// Short pause between bursts
|
|
time.Sleep(10 * time.Millisecond)
|
|
}
|
|
|
|
duration := time.Since(start)
|
|
|
|
result := &BenchmarkResult{
|
|
TestName: "Burst Pattern",
|
|
Duration: duration,
|
|
TotalEvents: int(totalEvents),
|
|
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
|
|
ConcurrentWorkers: burstSize,
|
|
MemoryUsed: getMemUsage(),
|
|
SuccessRate: float64(totalEvents) / float64(ba.config.NumEvents) * 100,
|
|
}
|
|
|
|
if len(latencies) > 0 {
|
|
sort.Slice(latencies, func(i, j int) bool {
|
|
return latencies[i] < latencies[j]
|
|
})
|
|
result.AvgLatency = calculateAverage(latencies)
|
|
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
|
|
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
|
|
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
|
|
|
|
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
|
|
result.Bottom10Avg = calculateAverage(bottom10)
|
|
}
|
|
|
|
ba.mu.Lock()
|
|
ba.results = append(ba.results, result)
|
|
ba.mu.Unlock()
|
|
|
|
ba.printResult(result)
|
|
}
|
|
|
|
// RunMixedReadWriteTest runs mixed read/write test
|
|
func (ba *BenchmarkAdapter) RunMixedReadWriteTest() {
|
|
fmt.Println("\n=== Mixed Read/Write Test ===")
|
|
|
|
// First, populate some events
|
|
fmt.Println("Populating database with initial events...")
|
|
populateEvents := ba.generateEvents(1000)
|
|
ctx := context.Background()
|
|
|
|
for _, ev := range populateEvents {
|
|
ba.db.SaveEvent(ctx, ev)
|
|
}
|
|
|
|
start := time.Now()
|
|
var writeCount, readCount int64
|
|
var latencies []time.Duration
|
|
var mu sync.Mutex
|
|
var wg sync.WaitGroup
|
|
|
|
// Create rate limiter for writes: cap at 20,000 events/second
|
|
rateLimiter := NewRateLimiter(20000)
|
|
|
|
// Start workers doing mixed read/write
|
|
for i := 0; i < ba.config.ConcurrentWorkers; i++ {
|
|
wg.Add(1)
|
|
go func(workerID int) {
|
|
defer wg.Done()
|
|
|
|
events := ba.generateEvents(ba.config.NumEvents / ba.config.ConcurrentWorkers)
|
|
|
|
for idx, ev := range events {
|
|
eventStart := time.Now()
|
|
|
|
if idx%3 == 0 {
|
|
// Read operation
|
|
f := filter.New()
|
|
f.Kinds = kind.NewS(kind.TextNote)
|
|
limit := uint(10)
|
|
f.Limit = &limit
|
|
_, _ = ba.db.QueryEvents(ctx, f)
|
|
|
|
mu.Lock()
|
|
readCount++
|
|
mu.Unlock()
|
|
} else {
|
|
// Write operation - apply rate limiting
|
|
rateLimiter.Wait()
|
|
_, _ = ba.db.SaveEvent(ctx, ev)
|
|
|
|
mu.Lock()
|
|
writeCount++
|
|
mu.Unlock()
|
|
}
|
|
|
|
latency := time.Since(eventStart)
|
|
mu.Lock()
|
|
latencies = append(latencies, latency)
|
|
mu.Unlock()
|
|
}
|
|
}(i)
|
|
}
|
|
|
|
wg.Wait()
|
|
duration := time.Since(start)
|
|
|
|
result := &BenchmarkResult{
|
|
TestName: fmt.Sprintf("Mixed R/W (R:%d W:%d)", readCount, writeCount),
|
|
Duration: duration,
|
|
TotalEvents: int(writeCount + readCount),
|
|
EventsPerSecond: float64(writeCount+readCount) / duration.Seconds(),
|
|
ConcurrentWorkers: ba.config.ConcurrentWorkers,
|
|
MemoryUsed: getMemUsage(),
|
|
SuccessRate: 100.0,
|
|
}
|
|
|
|
if len(latencies) > 0 {
|
|
sort.Slice(latencies, func(i, j int) bool {
|
|
return latencies[i] < latencies[j]
|
|
})
|
|
result.AvgLatency = calculateAverage(latencies)
|
|
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
|
|
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
|
|
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
|
|
|
|
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
|
|
result.Bottom10Avg = calculateAverage(bottom10)
|
|
}
|
|
|
|
ba.mu.Lock()
|
|
ba.results = append(ba.results, result)
|
|
ba.mu.Unlock()
|
|
|
|
ba.printResult(result)
|
|
}
|
|
|
|
// RunQueryTest runs query performance test
|
|
func (ba *BenchmarkAdapter) RunQueryTest() {
|
|
fmt.Println("\n=== Query Performance Test ===")
|
|
|
|
// Populate with test data
|
|
fmt.Println("Populating database for query tests...")
|
|
events := ba.generateEvents(5000)
|
|
ctx := context.Background()
|
|
|
|
for _, ev := range events {
|
|
ba.db.SaveEvent(ctx, ev)
|
|
}
|
|
|
|
start := time.Now()
|
|
var queryCount int64
|
|
var latencies []time.Duration
|
|
var mu sync.Mutex
|
|
var wg sync.WaitGroup
|
|
|
|
queryTypes := []func() *filter.F{
|
|
func() *filter.F {
|
|
f := filter.New()
|
|
f.Kinds = kind.NewS(kind.TextNote)
|
|
limit := uint(100)
|
|
f.Limit = &limit
|
|
return f
|
|
},
|
|
func() *filter.F {
|
|
f := filter.New()
|
|
f.Kinds = kind.NewS(kind.TextNote, kind.Repost)
|
|
limit := uint(50)
|
|
f.Limit = &limit
|
|
return f
|
|
},
|
|
func() *filter.F {
|
|
f := filter.New()
|
|
limit := uint(10)
|
|
f.Limit = &limit
|
|
since := time.Now().Add(-1 * time.Hour).Unix()
|
|
f.Since = timestamp.FromUnix(since)
|
|
return f
|
|
},
|
|
}
|
|
|
|
// Run concurrent queries
|
|
iterations := 1000
|
|
for i := 0; i < ba.config.ConcurrentWorkers; i++ {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
|
|
for j := 0; j < iterations/ba.config.ConcurrentWorkers; j++ {
|
|
f := queryTypes[j%len(queryTypes)]()
|
|
|
|
queryStart := time.Now()
|
|
_, _ = ba.db.QueryEvents(ctx, f)
|
|
latency := time.Since(queryStart)
|
|
|
|
mu.Lock()
|
|
queryCount++
|
|
latencies = append(latencies, latency)
|
|
mu.Unlock()
|
|
}
|
|
}()
|
|
}
|
|
|
|
wg.Wait()
|
|
duration := time.Since(start)
|
|
|
|
result := &BenchmarkResult{
|
|
TestName: fmt.Sprintf("Query Performance (%d queries)", queryCount),
|
|
Duration: duration,
|
|
TotalEvents: int(queryCount),
|
|
EventsPerSecond: float64(queryCount) / duration.Seconds(),
|
|
ConcurrentWorkers: ba.config.ConcurrentWorkers,
|
|
MemoryUsed: getMemUsage(),
|
|
SuccessRate: 100.0,
|
|
}
|
|
|
|
if len(latencies) > 0 {
|
|
sort.Slice(latencies, func(i, j int) bool {
|
|
return latencies[i] < latencies[j]
|
|
})
|
|
result.AvgLatency = calculateAverage(latencies)
|
|
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
|
|
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
|
|
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
|
|
|
|
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
|
|
result.Bottom10Avg = calculateAverage(bottom10)
|
|
}
|
|
|
|
ba.mu.Lock()
|
|
ba.results = append(ba.results, result)
|
|
ba.mu.Unlock()
|
|
|
|
ba.printResult(result)
|
|
}
|
|
|
|
// RunConcurrentQueryStoreTest runs concurrent query and store test
|
|
func (ba *BenchmarkAdapter) RunConcurrentQueryStoreTest() {
|
|
fmt.Println("\n=== Concurrent Query+Store Test ===")
|
|
|
|
start := time.Now()
|
|
var storeCount, queryCount int64
|
|
var latencies []time.Duration
|
|
var mu sync.Mutex
|
|
var wg sync.WaitGroup
|
|
|
|
ctx := context.Background()
|
|
|
|
// Half workers write, half query
|
|
halfWorkers := ba.config.ConcurrentWorkers / 2
|
|
if halfWorkers < 1 {
|
|
halfWorkers = 1
|
|
}
|
|
|
|
// Create rate limiter for writes: cap at 20,000 events/second
|
|
rateLimiter := NewRateLimiter(20000)
|
|
|
|
// Writers
|
|
for i := 0; i < halfWorkers; i++ {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
|
|
events := ba.generateEvents(ba.config.NumEvents / halfWorkers)
|
|
for _, ev := range events {
|
|
// Wait for rate limiter to allow this event
|
|
rateLimiter.Wait()
|
|
|
|
eventStart := time.Now()
|
|
ba.db.SaveEvent(ctx, ev)
|
|
latency := time.Since(eventStart)
|
|
|
|
mu.Lock()
|
|
storeCount++
|
|
latencies = append(latencies, latency)
|
|
mu.Unlock()
|
|
}
|
|
}()
|
|
}
|
|
|
|
// Readers
|
|
for i := 0; i < halfWorkers; i++ {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
|
|
for j := 0; j < ba.config.NumEvents/halfWorkers; j++ {
|
|
f := filter.New()
|
|
f.Kinds = kind.NewS(kind.TextNote)
|
|
limit := uint(10)
|
|
f.Limit = &limit
|
|
|
|
queryStart := time.Now()
|
|
ba.db.QueryEvents(ctx, f)
|
|
latency := time.Since(queryStart)
|
|
|
|
mu.Lock()
|
|
queryCount++
|
|
latencies = append(latencies, latency)
|
|
mu.Unlock()
|
|
|
|
time.Sleep(1 * time.Millisecond)
|
|
}
|
|
}()
|
|
}
|
|
|
|
wg.Wait()
|
|
duration := time.Since(start)
|
|
|
|
result := &BenchmarkResult{
|
|
TestName: fmt.Sprintf("Concurrent Q+S (Q:%d S:%d)", queryCount, storeCount),
|
|
Duration: duration,
|
|
TotalEvents: int(storeCount + queryCount),
|
|
EventsPerSecond: float64(storeCount+queryCount) / duration.Seconds(),
|
|
ConcurrentWorkers: ba.config.ConcurrentWorkers,
|
|
MemoryUsed: getMemUsage(),
|
|
SuccessRate: 100.0,
|
|
}
|
|
|
|
if len(latencies) > 0 {
|
|
sort.Slice(latencies, func(i, j int) bool {
|
|
return latencies[i] < latencies[j]
|
|
})
|
|
result.AvgLatency = calculateAverage(latencies)
|
|
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
|
|
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
|
|
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
|
|
|
|
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
|
|
result.Bottom10Avg = calculateAverage(bottom10)
|
|
}
|
|
|
|
ba.mu.Lock()
|
|
ba.results = append(ba.results, result)
|
|
ba.mu.Unlock()
|
|
|
|
ba.printResult(result)
|
|
}
|
|
|
|
// generateEvents generates unique synthetic events with realistic content sizes
|
|
func (ba *BenchmarkAdapter) generateEvents(count int) []*event.E {
|
|
fmt.Printf("Generating %d unique synthetic events (minimum 300 bytes each)...\n", count)
|
|
|
|
// Create a single signer for all events (reusing key is faster)
|
|
signer := p8k.MustNew()
|
|
if err := signer.Generate(); err != nil {
|
|
panic(fmt.Sprintf("Failed to generate keypair: %v", err))
|
|
}
|
|
|
|
// Base timestamp - start from current time and increment
|
|
baseTime := time.Now().Unix()
|
|
|
|
// Minimum content size
|
|
const minContentSize = 300
|
|
|
|
// Base content template
|
|
baseContent := "This is a benchmark test event with realistic content size. "
|
|
|
|
// Pre-calculate how much padding we need
|
|
paddingNeeded := minContentSize - len(baseContent)
|
|
if paddingNeeded < 0 {
|
|
paddingNeeded = 0
|
|
}
|
|
|
|
// Create padding string (with varied characters for realistic size)
|
|
padding := make([]byte, paddingNeeded)
|
|
for i := range padding {
|
|
padding[i] = ' ' + byte(i%94) // Printable ASCII characters
|
|
}
|
|
|
|
events := make([]*event.E, count)
|
|
for i := 0; i < count; i++ {
|
|
ev := event.New()
|
|
ev.Kind = kind.TextNote.K
|
|
ev.CreatedAt = baseTime + int64(i) // Unique timestamp for each event
|
|
ev.Tags = tag.NewS()
|
|
|
|
// Create content with unique identifier and padding
|
|
ev.Content = []byte(fmt.Sprintf("%s Event #%d. %s", baseContent, i, string(padding)))
|
|
|
|
// Sign the event (this calculates ID and Sig)
|
|
if err := ev.Sign(signer); err != nil {
|
|
panic(fmt.Sprintf("Failed to sign event %d: %v", i, err))
|
|
}
|
|
|
|
events[i] = ev
|
|
}
|
|
|
|
// Print stats
|
|
totalSize := int64(0)
|
|
for _, ev := range events {
|
|
totalSize += int64(len(ev.Content))
|
|
}
|
|
avgSize := totalSize / int64(count)
|
|
|
|
fmt.Printf("Generated %d events:\n", count)
|
|
fmt.Printf(" Average content size: %d bytes\n", avgSize)
|
|
fmt.Printf(" All events are unique (incremental timestamps)\n")
|
|
fmt.Printf(" All events are properly signed\n\n")
|
|
|
|
return events
|
|
}
|
|
|
|
func (ba *BenchmarkAdapter) printResult(r *BenchmarkResult) {
|
|
fmt.Printf("\nResults for %s:\n", r.TestName)
|
|
fmt.Printf(" Duration: %v\n", r.Duration)
|
|
fmt.Printf(" Total Events: %d\n", r.TotalEvents)
|
|
fmt.Printf(" Events/sec: %.2f\n", r.EventsPerSecond)
|
|
fmt.Printf(" Success Rate: %.2f%%\n", r.SuccessRate)
|
|
fmt.Printf(" Workers: %d\n", r.ConcurrentWorkers)
|
|
fmt.Printf(" Memory Used: %.2f MB\n", float64(r.MemoryUsed)/1024/1024)
|
|
|
|
if r.AvgLatency > 0 {
|
|
fmt.Printf(" Avg Latency: %v\n", r.AvgLatency)
|
|
fmt.Printf(" P90 Latency: %v\n", r.P90Latency)
|
|
fmt.Printf(" P95 Latency: %v\n", r.P95Latency)
|
|
fmt.Printf(" P99 Latency: %v\n", r.P99Latency)
|
|
fmt.Printf(" Bottom 10%% Avg: %v\n", r.Bottom10Avg)
|
|
}
|
|
|
|
if len(r.Errors) > 0 {
|
|
fmt.Printf(" Errors: %d\n", len(r.Errors))
|
|
// Print first few errors as samples
|
|
sampleCount := 3
|
|
if len(r.Errors) < sampleCount {
|
|
sampleCount = len(r.Errors)
|
|
}
|
|
for i := 0; i < sampleCount; i++ {
|
|
fmt.Printf(" Sample %d: %s\n", i+1, r.Errors[i])
|
|
}
|
|
}
|
|
}
|
|
|
|
func (ba *BenchmarkAdapter) GenerateReport() {
|
|
// Delegate to main benchmark report generator
|
|
// We'll add the results to a file
|
|
fmt.Println("\n=== Benchmark Results Summary ===")
|
|
ba.mu.RLock()
|
|
defer ba.mu.RUnlock()
|
|
|
|
for _, result := range ba.results {
|
|
ba.printResult(result)
|
|
}
|
|
}
|
|
|
|
func (ba *BenchmarkAdapter) GenerateAsciidocReport() {
|
|
// TODO: Implement asciidoc report generation
|
|
fmt.Println("Asciidoc report generation not yet implemented for adapter")
|
|
}
|
|
|
|
func calculateAverage(durations []time.Duration) time.Duration {
|
|
if len(durations) == 0 {
|
|
return 0
|
|
}
|
|
|
|
var total time.Duration
|
|
for _, d := range durations {
|
|
total += d
|
|
}
|
|
return total / time.Duration(len(durations))
|
|
}
|