How to Use Proxies with Twitter/X API
Twitter/X enforces strict rate limits on their API. Proxy infrastructure helps distribute API calls, access geo-targeted content, and maintain reliable access for social listening, brand monitoring, and research applications.
**Disclaimer**: Comply with Twitter/X Terms of Service and API usage policies. Use authorized API access and stay within rate limits. This guide covers proxy configuration for legitimate API integration.
Twitter/X API Rate Limits
Twitter/X API v2 enforces per-app and per-user rate limits:
| Endpoint | Rate Limit | Window | |----------|-----------|--------| | Search tweets | 450 requests | 15 minutes | | User lookup | 300 requests | 15 minutes | | Timeline | 1500 requests | 15 minutes | | Followers | 15 requests | 15 minutes |
Proxy-Enhanced API Client
import httpx
import time@dataclass(frozen=True) class TwitterConfig: bearer_token: str proxy_url: str
def create_twitter_client(config: TwitterConfig) -> httpx.Client: return httpx.Client( proxy=config.proxy_url, timeout=30, headers={ "Authorization": f"Bearer {config.bearer_token}", "Accept": "application/json", }, )
def search_tweets(client: httpx.Client, query: str, max_results: int = 100) -> dict: resp = client.get( "https://api.twitter.com/2/tweets/search/recent", params={ "query": query, "max_results": max_results, "tweet.fields": "created_at,public_metrics,author_id", }, ) resp.raise_for_status() return resp.json() ```
Rate Limit Aware Client
class RateLimitedTwitterClient:
def __init__(self, config: TwitterConfig):
self._config = config
self._client = create_twitter_client(config)
self._rate_remaining: dict[str, int] = {}def _check_rate_limit(self, endpoint: str) -> None: remaining = self._rate_remaining.get(endpoint, 100) reset_at = self._rate_reset.get(endpoint, 0) if remaining <= 1 and time.time() < reset_at: sleep_time = reset_at - time.time() + 1 time.sleep(sleep_time)
def _update_rate_limit(self, endpoint: str, headers: dict) -> None: self._rate_remaining = { **self._rate_remaining, endpoint: int(headers.get("x-rate-limit-remaining", "100")), } self._rate_reset = { **self._rate_reset, endpoint: float(headers.get("x-rate-limit-reset", "0")), }
def search(self, query: str, max_results: int = 100) -> dict: endpoint = "search" self._check_rate_limit(endpoint) resp = self._client.get( "https://api.twitter.com/2/tweets/search/recent", params={"query": query, "max_results": max_results}, ) self._update_rate_limit(endpoint, dict(resp.headers)) resp.raise_for_status() return resp.json()
def get_user(self, username: str) -> dict: endpoint = "user_lookup" self._check_rate_limit(endpoint) resp = self._client.get( f"https://api.twitter.com/2/users/by/username/{username}", params={"user.fields": "public_metrics,description,location"}, ) self._update_rate_limit(endpoint, dict(resp.headers)) resp.raise_for_status() return resp.json()
def close(self) -> None: self._client.close() ```
Social Listening Pipeline
from dataclasses import dataclass@dataclass(frozen=True) class BrandMention: tweet_id: str text: str author_id: str created_at: str likes: int retweets: int query: str
def monitor_brand( brand_queries: list[str], config: TwitterConfig, ) -> list[BrandMention]: """Monitor brand mentions across multiple search queries.""" client = RateLimitedTwitterClient(config) mentions: list[BrandMention] = []
for query in brand_queries: result = client.search(query, max_results=100) tweets = result.get("data", []) for tweet in tweets: metrics = tweet.get("public_metrics", {}) mentions = [*mentions, BrandMention( tweet_id=tweet["id"], text=tweet["text"], author_id=tweet["author_id"], created_at=tweet.get("created_at", ""), likes=metrics.get("like_count", 0), retweets=metrics.get("retweet_count", 0), query=query, )] time.sleep(2)
client.close() return mentions ```
Multi-Account Rate Limit Distribution
If your application uses multiple API keys, distribute them across different proxy sessions:
def create_distributed_clients(
api_keys: list[str],
proxy_username: str,
proxy_password: str,
) -> list[RateLimitedTwitterClient]:
clients = []
for i, key in enumerate(api_keys):
proxy = f"http://{proxy_username}-session-twitter-{i}:{proxy_password}@gate.hexproxies.com:8080"
config = TwitterConfig(bearer_token=key, proxy_url=proxy)
clients = [*clients, RateLimitedTwitterClient(config)]
return clientsGeo-Targeted Tweet Discovery
Use proxy geo-targeting to discover region-specific content:
def search_geo_tweets(query: str, country: str, username: str, password: str, bearer: str) -> dict:
proxy = f"http://{username}-country-{country}:{password}@gate.hexproxies.com:8080"
config = TwitterConfig(bearer_token=bearer, proxy_url=proxy)
client = create_twitter_client(config)
resp = client.get(
"https://api.twitter.com/2/tweets/search/recent",
params={"query": f"{query} place_country:{country.upper()}", "max_results": 100},
)
client.close()
return resp.json()Hex Proxies ISP proxies deliver sub-50ms latency for fast API responses, while our residential network provides geographic diversity for multi-region social listening.