class Rack::Attack
Rack::Attack.cache.store = ActiveSupport::Cache::RedisCacheStore.new(url: ENV['REDIS_URL'])
safelist('allow-localhost') do |req|
req.ip == '127.0.0.1' || req.ip == '::1'
end
throttle('api/v1/auth', limit: 5, period: 1.minute) do |req|
req.ip if req.path.start_with?('/api/v1/auth') && req.post?
end
throttle('api/v1/authenticated', limit: 300, period: 5.minutes) do |req|
if req.path.start_with?('/api/v1') && req.env['HTTP_AUTHORIZATION'].present?
token = req.env['HTTP_AUTHORIZATION'].split(' ').last
payload = JwtService.decode(token)
payload['user_id'] if payload
end
end
throttle('api/v1/anonymous', limit: 60, period: 5.minutes) do |req|
req.ip if req.path.start_with?('/api/v1') && req.env['HTTP_AUTHORIZATION'].blank?
end
self.throttled_response = ->(env) {
retry_after = (env['rack.attack.match_data'] || {})[:period]
[
429,
{ 'Content-Type' => 'application/json', 'Retry-After' => retry_after.to_s },
[{ error: 'RATE_LIMIT_EXCEEDED', retry_after: retry_after }.to_json]
]
}
end
Rate limiting is essential protection against abuse and ensures fair resource distribution across API consumers. Rack::Attack with Redis backing provides a robust, shared state solution that works across multiple application servers. I define different throttle rules for authenticated vs anonymous users, with more generous limits for known clients. The key is to return proper 429 Too Many Requests responses with Retry-After headers so well-behaved clients can back off gracefully. I also configure safelist rules for internal services and admin IPs. For production systems, I monitor throttle hits via metrics to distinguish between legitimate traffic spikes and actual attacks, which informs capacity planning decisions.