

Copy-paste examples for cURL, Python, and Node.js. Includes authentication, proxy rotation, session management, and framework integrations.
All proxy requests use HTTP Basic Auth with your API key as username. The password can be any string (commonly left empty or set to a session ID).
# Basic proxy request with authentication
curl -x "http://YOUR_API_KEY:@proxy.zentislabs.com:8080" -L "https://ip.zentislabs.com/json"Get a new IP address for every request. Perfect for web scraping and avoiding rate limits.
# Each request gets a new IP
for i in {1..5}; do
curl -x "http://YOUR_API_KEY:@rotate.zentislabs.com:8080" -s "https://ip.zentislabs.com/json" | jq '.ip'
doneKeep the same IP address for multiple requests. Use a session ID (any string) as the password to maintain the same IP for up to 10 minutes.
# Use session ID as password to keep same IP
SESSION_ID="session_12345"
# All requests with same session ID use same IP
curl -x "http://YOUR_API_KEY:$SESSION_ID@sticky.zentislabs.com:8080" "https://ip.zentislabs.com/json"
curl -x "http://YOUR_API_KEY:$SESSION_ID@sticky.zentislabs.com:8080" "https://target-site.com/api/data"Target specific countries by using country-specific proxy endpoints.
# Available countries: us, uk, de, fr, jp, ca, au, nl, sg
# United States
curl -x "http://YOUR_API_KEY:@us.zentislabs.com:8080" "https://ip.zentislabs.com/json"
# Germany
curl -x "http://YOUR_API_KEY:@de.zentislabs.com:8080" "https://ip.zentislabs.com/json"
# Japan
curl -x "http://YOUR_API_KEY:@jp.zentislabs.com:8080" "https://ip.zentislabs.com/json"# settings.py
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 1,
}
# Configure ZentisLabs proxy
HTTP_PROXY = 'http://YOUR_API_KEY:@rotate.zentislabs.com:8080'
# spider.py
import scrapy
class MySpider(scrapy.Spider):
name = 'example'
custom_settings = {
'HTTP_PROXY': 'http://YOUR_API_KEY:@rotate.zentislabs.com:8080'
}
def start_requests(self):
urls = ['https://example.com/page1', 'https://example.com/page2']
for url in urls:
yield scrapy.Request(url, callback=self.parse)
def parse(self, response):
# Each request automatically uses a different IP
yield {'title': response.css('h1::text').get()}const puppeteer = require('puppeteer');
(async () => {
const browser = await puppeteer.launch({
headless: true,
args: [
'--proxy-server=rotate.zentislabs.com:8080'
]
});
const page = await browser.newPage();
// Authenticate with proxy
await page.authenticate({
username: 'YOUR_API_KEY',
password: ''
});
await page.goto('https://example.com');
// For new IP, create new page
const page2 = await browser.newPage();
await page2.authenticate({ username: 'YOUR_API_KEY', password: '' });
await page2.goto('https://example.com');
await browser.close();
})();from selenium import webdriver
from selenium.webdriver.chrome.options import Options
# Configure Chrome with proxy
chrome_options = Options()
chrome_options.add_argument('--proxy-server=rotate.zentislabs.com:8080')
# For authenticated proxies, use extension or proxy with embedded auth
# Option 1: Extension-based auth (recommended for Selenium)
# Option 2: Use proxy without auth in URL + IP whitelist
driver = webdriver.Chrome(options=chrome_options)
# Navigate to site
driver.get('https://example.com')
# Each new driver instance gets new IP
driver.quit()Handle common proxy errors with retries and exponential backoff.
import requests
import time
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
# Configure retry strategy
retry_strategy = Retry(
total=3,
backoff_factor=1,
status_forcelist=[429, 500, 502, 503, 504],
)
adapter = HTTPAdapter(max_retries=retry_strategy)
http = requests.Session()
http.mount("http://", adapter)
http.mount("https://", adapter)
proxies = {
'http': 'http://YOUR_API_KEY:@rotate.zentislabs.com:8080',
'https': 'http://YOUR_API_KEY:@rotate.zentislabs.com:8080'
}
def fetch_with_retry(url, max_retries=3):
for attempt in range(max_retries):
try:
response = http.get(url, proxies=proxies, timeout=30)
response.raise_for_status()
return response
except requests.exceptions.ProxyError as e:
print(f"Proxy error on attempt {attempt + 1}: {e}")
if attempt < max_retries - 1:
time.sleep(2 ** attempt) # Exponential backoff
else:
raise
# Usage
try:
response = fetch_with_retry('https://example.com')
print(response.text)
except Exception as e:
print(f"Failed after retries: {e}")