class HttpClient
def initialize(base_url:, logger: Rails.logger)
@base_url = base_url
@logger = logger
end
def get(path, headers: {})
with_timing('GET', path) do
conn.get(path) do |req|
req.headers['Accept'] = 'application/json'
headers.each { |k, v| req.headers[k] = v }
end
end
end
private
def conn
@conn ||= Faraday.new(url: @base_url) do |f|
f.options.open_timeout = 2
f.options.timeout = 5
f.request :retry, max: 2, interval: 0.2, backoff_factor: 2,
exceptions: [Faraday::TimeoutError, Faraday::ConnectionFailed]
f.response :raise_error
f.adapter Faraday.default_adapter
end
end
def with_timing(method, path)
started = Process.clock_gettime(Process::CLOCK_MONOTONIC)
yield
ensure
elapsed_ms = ((Process.clock_gettime(Process::CLOCK_MONOTONIC) - started) * 1000).round(1)
@logger.info({ msg: 'http.request', method: method, path: path, elapsed_ms: elapsed_ms, request_id: Current.request_id }.to_json)
end
end
I wrapped external HTTP calls once I realized most “flaky APIs” were actually my fault: no timeouts, unclear retries, and logs that didn’t tell a story. In Client with timeouts, I centralize a Faraday connection with explicit open_timeout and timeout, plus a bounded retry policy that only retries the exceptions I can plausibly recover from. I also use f.response :raise_error so failures don’t get silently ignored. The part I lean on the most is with_timing, which measures elapsed time with Process::CLOCK_MONOTONIC and logs structured JSON including method, path, and Current.request_id. That makes it trivial to correlate a slow request to a specific outbound call, and it keeps the retry behavior consistent across the app.