Add Neo4j memory tuning config and query result limits (v0.43.0)
Some checks failed
Go / build-and-release (push) Has been cancelled
Some checks failed
Go / build-and-release (push) Has been cancelled
- Add Neo4j driver config options for memory management: - ORLY_NEO4J_MAX_CONN_POOL (default: 25) - connection pool size - ORLY_NEO4J_FETCH_SIZE (default: 1000) - records per batch - ORLY_NEO4J_MAX_TX_RETRY_SEC (default: 30) - transaction retry timeout - ORLY_NEO4J_QUERY_RESULT_LIMIT (default: 10000) - max results per query - Apply driver settings when creating Neo4j connection (pool size, fetch size, retry time) - Enforce query result limit as safety cap on all Cypher queries - Fix QueryForSerials and QueryForIds to preserve LIMIT clauses - Add comprehensive memory tuning documentation with sizing guidelines - Add NIP-46 signer-based authentication for bunker connections - Update go.mod with new dependencies Files modified: - app/config/config.go: Add Neo4j driver tuning config vars - main.go: Pass new config values to database factory - pkg/database/factory.go: Add Neo4j tuning fields to DatabaseConfig - pkg/database/factory_wasm.go: Mirror factory.go changes for WASM - pkg/neo4j/neo4j.go: Apply driver config, add getter methods - pkg/neo4j/query-events.go: Enforce query result limit, fix LIMIT preservation - docs/NEO4J_BACKEND.md: Add Memory Tuning section, update Docker example - CLAUDE.md: Add Neo4j memory tuning quick reference - app/handle-req.go: NIP-46 signer authentication - app/publisher.go: HasActiveNIP46Signer check - pkg/protocol/publish/publisher.go: NIP46SignerChecker interface - go.mod: Add dependencies 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -20,9 +20,14 @@ import (
|
||||
"next.orly.dev/pkg/utils/apputil"
|
||||
)
|
||||
|
||||
// maxConcurrentQueries limits the number of concurrent Neo4j queries to prevent
|
||||
// authentication rate limiting and connection exhaustion
|
||||
const maxConcurrentQueries = 10
|
||||
// Default configuration values (used when config values are 0 or not set)
|
||||
const (
|
||||
defaultMaxConcurrentQueries = 10
|
||||
defaultMaxConnPoolSize = 25
|
||||
defaultFetchSize = 1000
|
||||
defaultMaxTxRetrySeconds = 30
|
||||
defaultQueryResultLimit = 10000
|
||||
)
|
||||
|
||||
// maxRetryAttempts is the maximum number of times to retry a query on rate limit
|
||||
const maxRetryAttempts = 3
|
||||
@@ -45,6 +50,12 @@ type N struct {
|
||||
neo4jUser string
|
||||
neo4jPassword string
|
||||
|
||||
// Driver tuning options
|
||||
maxConnPoolSize int // max connections in pool
|
||||
fetchSize int // records per fetch batch
|
||||
maxTxRetryTime time.Duration
|
||||
queryResultLimit int // max results per query (0=unlimited)
|
||||
|
||||
ready chan struct{} // Closed when database is ready to serve requests
|
||||
|
||||
// querySem limits concurrent queries to prevent rate limiting
|
||||
@@ -118,16 +129,38 @@ func NewWithConfig(
|
||||
neo4jPassword = "password"
|
||||
}
|
||||
|
||||
// Apply defaults for driver tuning options
|
||||
maxConnPoolSize := cfg.Neo4jMaxConnPoolSize
|
||||
if maxConnPoolSize <= 0 {
|
||||
maxConnPoolSize = defaultMaxConnPoolSize
|
||||
}
|
||||
fetchSize := cfg.Neo4jFetchSize
|
||||
if fetchSize == 0 {
|
||||
fetchSize = defaultFetchSize
|
||||
}
|
||||
maxTxRetrySeconds := cfg.Neo4jMaxTxRetrySeconds
|
||||
if maxTxRetrySeconds <= 0 {
|
||||
maxTxRetrySeconds = defaultMaxTxRetrySeconds
|
||||
}
|
||||
queryResultLimit := cfg.Neo4jQueryResultLimit
|
||||
if queryResultLimit == 0 {
|
||||
queryResultLimit = defaultQueryResultLimit
|
||||
}
|
||||
|
||||
n = &N{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: cfg.DataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(cfg.LogLevel), cfg.DataDir),
|
||||
neo4jURI: neo4jURI,
|
||||
neo4jUser: neo4jUser,
|
||||
neo4jPassword: neo4jPassword,
|
||||
ready: make(chan struct{}),
|
||||
querySem: make(chan struct{}, maxConcurrentQueries),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: cfg.DataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(cfg.LogLevel), cfg.DataDir),
|
||||
neo4jURI: neo4jURI,
|
||||
neo4jUser: neo4jUser,
|
||||
neo4jPassword: neo4jPassword,
|
||||
maxConnPoolSize: maxConnPoolSize,
|
||||
fetchSize: fetchSize,
|
||||
maxTxRetryTime: time.Duration(maxTxRetrySeconds) * time.Second,
|
||||
queryResultLimit: queryResultLimit,
|
||||
ready: make(chan struct{}),
|
||||
querySem: make(chan struct{}, defaultMaxConcurrentQueries),
|
||||
}
|
||||
|
||||
// Ensure the data directory exists
|
||||
@@ -191,12 +224,24 @@ func New(
|
||||
|
||||
// initNeo4jClient establishes connection to Neo4j server
|
||||
func (n *N) initNeo4jClient() error {
|
||||
n.Logger.Infof("connecting to neo4j at %s", n.neo4jURI)
|
||||
n.Logger.Infof("connecting to neo4j at %s (pool=%d, fetch=%d, txRetry=%v)",
|
||||
n.neo4jURI, n.maxConnPoolSize, n.fetchSize, n.maxTxRetryTime)
|
||||
|
||||
// Create Neo4j driver
|
||||
// Create Neo4j driver with tuned configuration
|
||||
driver, err := neo4j.NewDriverWithContext(
|
||||
n.neo4jURI,
|
||||
neo4j.BasicAuth(n.neo4jUser, n.neo4jPassword, ""),
|
||||
func(config *neo4j.Config) {
|
||||
// Limit connection pool size to reduce memory usage
|
||||
config.MaxConnectionPoolSize = n.maxConnPoolSize
|
||||
|
||||
// Set fetch size to batch records and prevent memory overflow
|
||||
// -1 means fetch all (driver default), positive value limits batch size
|
||||
config.FetchSize = n.fetchSize
|
||||
|
||||
// Set max transaction retry time
|
||||
config.MaxTransactionRetryTime = n.maxTxRetryTime
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create neo4j driver: %w", err)
|
||||
@@ -462,3 +507,19 @@ func (n *N) QuerySem() chan struct{} {
|
||||
func (n *N) MaxConcurrentQueries() int {
|
||||
return cap(n.querySem)
|
||||
}
|
||||
|
||||
// QueryResultLimit returns the configured maximum results per query.
|
||||
// Returns 0 if unlimited (no limit applied).
|
||||
func (n *N) QueryResultLimit() int {
|
||||
return n.queryResultLimit
|
||||
}
|
||||
|
||||
// FetchSize returns the configured fetch batch size.
|
||||
func (n *N) FetchSize() int {
|
||||
return n.fetchSize
|
||||
}
|
||||
|
||||
// MaxConnPoolSize returns the configured connection pool size.
|
||||
func (n *N) MaxConnPoolSize() int {
|
||||
return n.maxConnPoolSize
|
||||
}
|
||||
|
||||
@@ -223,10 +223,25 @@ RETURN e.id AS id,
|
||||
// Add ordering (most recent first)
|
||||
orderClause := " ORDER BY e.created_at DESC"
|
||||
|
||||
// Add limit if specified
|
||||
// Add limit - use the smaller of requested limit and configured max limit
|
||||
// This prevents unbounded queries that could exhaust memory
|
||||
limitClause := ""
|
||||
requestedLimit := 0
|
||||
if f.Limit != nil && *f.Limit > 0 {
|
||||
params["limit"] = *f.Limit
|
||||
requestedLimit = int(*f.Limit)
|
||||
}
|
||||
|
||||
// Apply the configured query result limit as a safety cap
|
||||
// If queryResultLimit is 0 (unlimited), only use the requested limit
|
||||
effectiveLimit := requestedLimit
|
||||
if n.queryResultLimit > 0 {
|
||||
if effectiveLimit == 0 || effectiveLimit > n.queryResultLimit {
|
||||
effectiveLimit = n.queryResultLimit
|
||||
}
|
||||
}
|
||||
|
||||
if effectiveLimit > 0 {
|
||||
params["limit"] = effectiveLimit
|
||||
limitClause = " LIMIT $limit"
|
||||
}
|
||||
|
||||
@@ -358,11 +373,16 @@ func (n *N) QueryForSerials(c context.Context, f *filter.F) (
|
||||
return nil, fmt.Errorf("invalid query structure")
|
||||
}
|
||||
|
||||
// Rebuild query with serial-only return
|
||||
// Rebuild query with serial-only return, preserving ORDER BY and LIMIT
|
||||
cypher = cypherParts[0] + returnClause
|
||||
if strings.Contains(cypherParts[1], "ORDER BY") {
|
||||
orderPart := " ORDER BY" + strings.Split(cypherParts[1], "ORDER BY")[1]
|
||||
cypher += orderPart
|
||||
remainder := cypherParts[1]
|
||||
if strings.Contains(remainder, "ORDER BY") {
|
||||
orderAndLimit := " ORDER BY" + strings.Split(remainder, "ORDER BY")[1]
|
||||
cypher += orderAndLimit
|
||||
} else if strings.Contains(remainder, "LIMIT") {
|
||||
// No ORDER BY but has LIMIT
|
||||
limitPart := " LIMIT" + strings.Split(remainder, "LIMIT")[1]
|
||||
cypher += limitPart
|
||||
}
|
||||
|
||||
result, err := n.ExecuteRead(c, cypher, params)
|
||||
@@ -417,10 +437,16 @@ func (n *N) QueryForIds(c context.Context, f *filter.F) (
|
||||
return nil, fmt.Errorf("invalid query structure")
|
||||
}
|
||||
|
||||
// Rebuild query preserving ORDER BY and LIMIT
|
||||
cypher = cypherParts[0] + returnClause
|
||||
if strings.Contains(cypherParts[1], "ORDER BY") {
|
||||
orderPart := " ORDER BY" + strings.Split(cypherParts[1], "ORDER BY")[1]
|
||||
cypher += orderPart
|
||||
remainder := cypherParts[1]
|
||||
if strings.Contains(remainder, "ORDER BY") {
|
||||
orderAndLimit := " ORDER BY" + strings.Split(remainder, "ORDER BY")[1]
|
||||
cypher += orderAndLimit
|
||||
} else if strings.Contains(remainder, "LIMIT") {
|
||||
// No ORDER BY but has LIMIT
|
||||
limitPart := " LIMIT" + strings.Split(remainder, "LIMIT")[1]
|
||||
cypher += limitPart
|
||||
}
|
||||
|
||||
result, err := n.ExecuteRead(c, cypher, params)
|
||||
|
||||
Reference in New Issue
Block a user