Overview
Rate limiting is a critical security and performance mechanism that controls the number of requests a client can make to the Strike Auth Service within a specified time window. It protects against abuse, prevents DDoS attacks, ensures fair resource usage, and maintains service quality for all users.Rate Limiting Strategies
Fixed Window Rate Limiting
The simplest approach that resets the counter at fixed intervals.Copy
Ask AI
class FixedWindowLimiter {
constructor(maxRequests, windowMs) {
this.maxRequests = maxRequests;
this.windowMs = windowMs;
this.requests = new Map();
}
isAllowed(key) {
const now = Date.now();
const windowStart = Math.floor(now / this.windowMs) * this.windowMs;
const windowKey = `${key}:${windowStart}`;
const currentCount = this.requests.get(windowKey) || 0;
if (currentCount >= this.maxRequests) {
return {
allowed: false,
remaining: 0,
resetTime: windowStart + this.windowMs
};
}
this.requests.set(windowKey, currentCount + 1);
return {
allowed: true,
remaining: this.maxRequests - currentCount - 1,
resetTime: windowStart + this.windowMs
};
}
}
Sliding Window Rate Limiting
More accurate approach that maintains a rolling window of requests.Copy
Ask AI
class SlidingWindowLimiter {
constructor(maxRequests, windowMs) {
this.maxRequests = maxRequests;
this.windowMs = windowMs;
this.requests = new Map();
}
isAllowed(key) {
const now = Date.now();
const userRequests = this.requests.get(key) || [];
// Remove requests outside the current window
const validRequests = userRequests.filter(
timestamp => now - timestamp < this.windowMs
);
if (validRequests.length >= this.maxRequests) {
const oldestRequest = Math.min(...validRequests);
return {
allowed: false,
remaining: 0,
resetTime: oldestRequest + this.windowMs
};
}
validRequests.push(now);
this.requests.set(key, validRequests);
return {
allowed: true,
remaining: this.maxRequests - validRequests.length,
resetTime: now + this.windowMs
};
}
}
Token Bucket Rate Limiting
Allows burst traffic while maintaining average rate limits.Copy
Ask AI
class TokenBucketLimiter {
constructor(capacity, refillRate, refillPeriod) {
this.capacity = capacity;
this.refillRate = refillRate;
this.refillPeriod = refillPeriod;
this.buckets = new Map();
}
isAllowed(key, tokens = 1) {
const now = Date.now();
let bucket = this.buckets.get(key) || {
tokens: this.capacity,
lastRefill: now
};
// Refill tokens based on time elapsed
const timePassed = now - bucket.lastRefill;
const tokensToAdd = Math.floor(timePassed / this.refillPeriod) * this.refillRate;
bucket.tokens = Math.min(this.capacity, bucket.tokens + tokensToAdd);
bucket.lastRefill = now;
if (bucket.tokens >= tokens) {
bucket.tokens -= tokens;
this.buckets.set(key, bucket);
return {
allowed: true,
remaining: bucket.tokens,
resetTime: null
};
}
this.buckets.set(key, bucket);
return {
allowed: false,
remaining: bucket.tokens,
resetTime: bucket.lastRefill + (this.refillPeriod * Math.ceil((tokens - bucket.tokens) / this.refillRate))
};
}
}
Implementation with Express.js
Basic Rate Limiting Middleware
Copy
Ask AI
const rateLimit = require('express-rate-limit');
// General API rate limiting
const createRateLimiter = (options) => {
return rateLimit({
windowMs: options.windowMs || 15 * 60 * 1000, // 15 minutes
max: options.max || 100,
message: options.message || 'Too many requests from this IP',
standardHeaders: true,
legacyHeaders: false,
keyGenerator: options.keyGenerator || ((req) => req.ip),
skip: options.skip || (() => false),
onLimitReached: options.onLimitReached || (() => {}),
...options
});
};
// Authentication endpoints - strict limits
const authLimiter = createRateLimiter({
windowMs: 15 * 60 * 1000, // 15 minutes
max: 5, // 5 attempts per window
message: {
error: 'Too many authentication attempts',
retryAfter: 15 * 60 // seconds
},
skipSuccessfulRequests: true
});
// General API endpoints
const apiLimiter = createRateLimiter({
windowMs: 15 * 60 * 1000,
max: 100,
message: {
error: 'Rate limit exceeded',
retryAfter: 15 * 60
}
});
// Apply to routes
app.use('/auth/login', authLimiter);
app.use('/auth/signup', authLimiter);
app.use('/auth/recover', authLimiter);
app.use('/api/', apiLimiter);
Advanced Rate Limiting Strategies
User-Based Rate Limiting
Copy
Ask AI
// Rate limiting based on authenticated user
const userBasedLimiter = createRateLimiter({
windowMs: 15 * 60 * 1000,
max: (req) => {
// Different limits based on user role
if (req.user?.role === 'premium') return 1000;
if (req.user?.role === 'admin') return 10000;
return 100; // default for regular users
},
keyGenerator: (req) => {
// Use user ID if authenticated, otherwise IP
return req.user?.id || req.ip;
},
skip: (req) => {
// Skip rate limiting for admin users
return req.user?.role === 'admin';
}
});
Endpoint-Specific Rate Limiting
Copy
Ask AI
// Different limits for different endpoints
const endpointLimiters = {
login: createRateLimiter({
windowMs: 15 * 60 * 1000,
max: 5,
message: 'Too many login attempts'
}),
signup: createRateLimiter({
windowMs: 60 * 60 * 1000, // 1 hour
max: 3,
message: 'Too many signup attempts'
}),
passwordReset: createRateLimiter({
windowMs: 60 * 60 * 1000, // 1 hour
max: 3,
message: 'Too many password reset requests'
}),
emailVerification: createRateLimiter({
windowMs: 5 * 60 * 1000, // 5 minutes
max: 3,
message: 'Too many verification emails sent'
})
};
// Apply specific limiters
app.use('/auth/login', endpointLimiters.login);
app.use('/auth/signup', endpointLimiters.signup);
app.use('/auth/recover', endpointLimiters.passwordReset);
app.use('/auth/resend', endpointLimiters.emailVerification);
Redis-Based Rate Limiting
For distributed systems, use Redis to store rate limiting data.Redis Implementation
Copy
Ask AI
const redis = require('redis');
const client = redis.createClient();
class RedisRateLimiter {
constructor(maxRequests, windowMs, keyPrefix = 'rate_limit') {
this.maxRequests = maxRequests;
this.windowMs = windowMs;
this.keyPrefix = keyPrefix;
}
async isAllowed(identifier) {
const key = `${this.keyPrefix}:${identifier}`;
const now = Date.now();
const windowStart = Math.floor(now / this.windowMs) * this.windowMs;
const windowKey = `${key}:${windowStart}`;
try {
// Use Redis pipeline for atomic operations
const pipeline = client.pipeline();
pipeline.incr(windowKey);
pipeline.expire(windowKey, Math.ceil(this.windowMs / 1000));
const results = await pipeline.exec();
const currentCount = results[0][1];
if (currentCount > this.maxRequests) {
return {
allowed: false,
remaining: 0,
resetTime: windowStart + this.windowMs,
totalHits: currentCount
};
}
return {
allowed: true,
remaining: this.maxRequests - currentCount,
resetTime: windowStart + this.windowMs,
totalHits: currentCount
};
} catch (error) {
console.error('Redis rate limiter error:', error);
// Fail open - allow request if Redis is down
return { allowed: true, remaining: this.maxRequests, resetTime: now + this.windowMs };
}
}
}
// Usage with Express
const redisLimiter = new RedisRateLimiter(100, 15 * 60 * 1000);
const redisRateLimitMiddleware = async (req, res, next) => {
const identifier = req.user?.id || req.ip;
const result = await redisLimiter.isAllowed(identifier);
// Set rate limit headers
res.set({
'X-RateLimit-Limit': redisLimiter.maxRequests,
'X-RateLimit-Remaining': result.remaining,
'X-RateLimit-Reset': new Date(result.resetTime).toISOString()
});
if (!result.allowed) {
return res.status(429).json({
error: 'Rate limit exceeded',
retryAfter: Math.ceil((result.resetTime - Date.now()) / 1000)
});
}
next();
};
Sliding Window with Redis
Copy
Ask AI
class RedisSlidingWindowLimiter {
constructor(maxRequests, windowMs, keyPrefix = 'sliding_limit') {
this.maxRequests = maxRequests;
this.windowMs = windowMs;
this.keyPrefix = keyPrefix;
}
async isAllowed(identifier) {
const key = `${this.keyPrefix}:${identifier}`;
const now = Date.now();
const windowStart = now - this.windowMs;
try {
const pipeline = client.pipeline();
// Remove old entries
pipeline.zremrangebyscore(key, 0, windowStart);
// Count current entries
pipeline.zcard(key);
// Add current request
pipeline.zadd(key, now, `${now}-${Math.random()}`);
// Set expiry
pipeline.expire(key, Math.ceil(this.windowMs / 1000));
const results = await pipeline.exec();
const currentCount = results[1][1];
if (currentCount >= this.maxRequests) {
// Remove the request we just added since it's not allowed
await client.zrem(key, `${now}-${Math.random()}`);
return {
allowed: false,
remaining: 0,
resetTime: now + this.windowMs
};
}
return {
allowed: true,
remaining: this.maxRequests - currentCount - 1,
resetTime: now + this.windowMs
};
} catch (error) {
console.error('Redis sliding window limiter error:', error);
return { allowed: true, remaining: this.maxRequests, resetTime: now + this.windowMs };
}
}
}
Rate Limiting Patterns
Hierarchical Rate Limiting
Implement multiple layers of rate limiting for comprehensive protection.Copy
Ask AI
class HierarchicalRateLimiter {
constructor() {
this.limiters = {
// Global rate limiting
global: new RedisRateLimiter(10000, 60 * 1000), // 10k requests per minute globally
// Per-IP rate limiting
ip: new RedisRateLimiter(100, 15 * 60 * 1000), // 100 requests per 15 minutes per IP
// Per-user rate limiting
user: new RedisRateLimiter(1000, 15 * 60 * 1000), // 1000 requests per 15 minutes per user
// Per-endpoint rate limiting
endpoint: new Map([
['/auth/login', new RedisRateLimiter(5, 15 * 60 * 1000)],
['/auth/signup', new RedisRateLimiter(3, 60 * 60 * 1000)],
['/auth/recover', new RedisRateLimiter(3, 60 * 60 * 1000)]
])
};
}
async checkLimits(req) {
const checks = [];
// Global check
checks.push({
name: 'global',
result: await this.limiters.global.isAllowed('global')
});
// IP-based check
checks.push({
name: 'ip',
result: await this.limiters.ip.isAllowed(req.ip)
});
// User-based check (if authenticated)
if (req.user?.id) {
checks.push({
name: 'user',
result: await this.limiters.user.isAllowed(req.user.id)
});
}
// Endpoint-specific check
const endpointLimiter = this.limiters.endpoint.get(req.path);
if (endpointLimiter) {
const identifier = req.user?.id || req.ip;
checks.push({
name: 'endpoint',
result: await endpointLimiter.isAllowed(identifier)
});
}
// Find the most restrictive limit
const failedCheck = checks.find(check => !check.result.allowed);
if (failedCheck) {
return {
allowed: false,
reason: failedCheck.name,
...failedCheck.result
};
}
// Return the most restrictive remaining count
const minRemaining = Math.min(...checks.map(check => check.result.remaining));
return {
allowed: true,
remaining: minRemaining,
resetTime: Math.max(...checks.map(check => check.result.resetTime))
};
}
}
// Middleware implementation
const hierarchicalLimiter = new HierarchicalRateLimiter();
const hierarchicalRateLimitMiddleware = async (req, res, next) => {
const result = await hierarchicalLimiter.checkLimits(req);
res.set({
'X-RateLimit-Remaining': result.remaining,
'X-RateLimit-Reset': new Date(result.resetTime).toISOString()
});
if (!result.allowed) {
return res.status(429).json({
error: `Rate limit exceeded (${result.reason})`,
retryAfter: Math.ceil((result.resetTime - Date.now()) / 1000)
});
}
next();
};
Adaptive Rate Limiting
Dynamically adjust rate limits based on system load and user behavior.Copy
Ask AI
class AdaptiveRateLimiter {
constructor() {
this.baseLimits = {
login: 5,
signup: 3,
api: 100
};
this.systemLoad = 0; // 0-1 scale
this.suspiciousIPs = new Set();
}
async getAdjustedLimit(endpoint, identifier, req) {
let baseLimit = this.baseLimits[endpoint] || this.baseLimits.api;
// Adjust based on system load
const loadMultiplier = Math.max(0.1, 1 - this.systemLoad);
baseLimit = Math.floor(baseLimit * loadMultiplier);
// Reduce limits for suspicious IPs
if (this.suspiciousIPs.has(req.ip)) {
baseLimit = Math.floor(baseLimit * 0.5);
}
// Increase limits for trusted users
if (req.user?.trusted) {
baseLimit = Math.floor(baseLimit * 2);
}
// Reduce limits during peak hours
const hour = new Date().getHours();
if (hour >= 9 && hour <= 17) { // Business hours
baseLimit = Math.floor(baseLimit * 0.8);
}
return Math.max(1, baseLimit); // Ensure at least 1 request is allowed
}
async updateSystemLoad() {
// Monitor system metrics and update load
// This could integrate with monitoring systems
const metrics = await this.getSystemMetrics();
this.systemLoad = (metrics.cpu + metrics.memory + metrics.responseTime) / 3;
}
markSuspiciousIP(ip) {
this.suspiciousIPs.add(ip);
// Remove after 1 hour
setTimeout(() => this.suspiciousIPs.delete(ip), 60 * 60 * 1000);
}
}
Rate Limiting Headers
Implement standard rate limiting headers for client transparency.Copy
Ask AI
function setRateLimitHeaders(res, limiter, result) {
const headers = {
'X-RateLimit-Limit': limiter.maxRequests,
'X-RateLimit-Remaining': result.remaining,
'X-RateLimit-Reset': Math.ceil(result.resetTime / 1000),
'X-RateLimit-Reset-After': Math.ceil((result.resetTime - Date.now()) / 1000)
};
// Add retry-after header if rate limited
if (!result.allowed) {
headers['Retry-After'] = Math.ceil((result.resetTime - Date.now()) / 1000);
}
res.set(headers);
}
// Enhanced middleware with proper headers
const rateLimitWithHeaders = (limiter) => {
return async (req, res, next) => {
const identifier = req.user?.id || req.ip;
const result = await limiter.isAllowed(identifier);
setRateLimitHeaders(res, limiter, result);
if (!result.allowed) {
return res.status(429).json({
error: 'Rate limit exceeded',
message: 'Too many requests, please try again later',
retryAfter: Math.ceil((result.resetTime - Date.now()) / 1000)
});
}
next();
};
};
Monitoring and Analytics
Rate Limiting Metrics
Copy
Ask AI
class RateLimitingMetrics {
constructor() {
this.metrics = {
totalRequests: 0,
blockedRequests: 0,
byEndpoint: new Map(),
byIP: new Map(),
byUser: new Map()
};
}
recordRequest(endpoint, identifier, blocked = false) {
this.metrics.totalRequests++;
if (blocked) {
this.metrics.blockedRequests++;
}
// Track by endpoint
const endpointStats = this.metrics.byEndpoint.get(endpoint) || { total: 0, blocked: 0 };
endpointStats.total++;
if (blocked) endpointStats.blocked++;
this.metrics.byEndpoint.set(endpoint, endpointStats);
// Track by identifier
const identifierStats = this.metrics.byIP.get(identifier) || { total: 0, blocked: 0 };
identifierStats.total++;
if (blocked) identifierStats.blocked++;
this.metrics.byIP.set(identifier, identifierStats);
}
getMetrics() {
return {
...this.metrics,
blockRate: this.metrics.totalRequests > 0
? (this.metrics.blockedRequests / this.metrics.totalRequests) * 100
: 0
};
}
getTopBlockedIPs(limit = 10) {
return Array.from(this.metrics.byIP.entries())
.sort((a, b) => b[1].blocked - a[1].blocked)
.slice(0, limit);
}
}
// Integration with rate limiting middleware
const metrics = new RateLimitingMetrics();
const monitoredRateLimit = (limiter) => {
return async (req, res, next) => {
const identifier = req.user?.id || req.ip;
const result = await limiter.isAllowed(identifier);
metrics.recordRequest(req.path, identifier, !result.allowed);
setRateLimitHeaders(res, limiter, result);
if (!result.allowed) {
return res.status(429).json({
error: 'Rate limit exceeded',
retryAfter: Math.ceil((result.resetTime - Date.now()) / 1000)
});
}
next();
};
};
Best Practices
Configuration Management
Environment-Based Limits
Configure different rate limits for development, staging, and production
Graceful Degradation
Fail open when rate limiting infrastructure is unavailable
Clear Error Messages
Provide helpful error messages with retry information
Monitoring Integration
Track rate limiting metrics and set up alerts
Implementation Guidelines
Copy
Ask AI
// Environment-based configuration
const getRateLimitConfig = () => {
const env = process.env.NODE_ENV;
const configs = {
development: {
auth: { max: 100, windowMs: 15 * 60 * 1000 },
api: { max: 1000, windowMs: 15 * 60 * 1000 }
},
staging: {
auth: { max: 10, windowMs: 15 * 60 * 1000 },
api: { max: 200, windowMs: 15 * 60 * 1000 }
},
production: {
auth: { max: 5, windowMs: 15 * 60 * 1000 },
api: { max: 100, windowMs: 15 * 60 * 1000 }
}
};
return configs[env] || configs.production;
};
// Graceful error handling
const createResilientLimiter = (config) => {
return async (req, res, next) => {
try {
const result = await rateLimiter.isAllowed(req.ip);
if (!result.allowed) {
return res.status(429).json({
error: 'Rate limit exceeded',
message: 'Please wait before making another request',
retryAfter: Math.ceil((result.resetTime - Date.now()) / 1000)
});
}
next();
} catch (error) {
console.error('Rate limiter error:', error);
// Log error but allow request to proceed
next();
}
};
};
Testing Rate Limits
Copy
Ask AI
// Jest test for rate limiting
describe('Rate Limiting', () => {
test('should block requests after limit exceeded', async () => {
const limiter = new FixedWindowLimiter(3, 60000); // 3 requests per minute
// First 3 requests should be allowed
for (let i = 0; i < 3; i++) {
const result = limiter.isAllowed('test-ip');
expect(result.allowed).toBe(true);
}
// 4th request should be blocked
const blockedResult = limiter.isAllowed('test-ip');
expect(blockedResult.allowed).toBe(false);
expect(blockedResult.remaining).toBe(0);
});
test('should reset after window expires', async () => {
const limiter = new FixedWindowLimiter(1, 100); // 1 request per 100ms
// First request allowed
expect(limiter.isAllowed('test-ip').allowed).toBe(true);
// Second request blocked
expect(limiter.isAllowed('test-ip').allowed).toBe(false);
// Wait for window to reset
await new Promise(resolve => setTimeout(resolve, 150));
// Request should be allowed again
expect(limiter.isAllowed('test-ip').allowed).toBe(true);
});
});