How to Scrape Fiverr Gig Data with Playwright in Python (2026)
How to Scrape Fiverr Gig Data with Playwright in Python (2026)
Fiverr has no public API for browsing gigs. If you want to analyze pricing trends, compare seller offerings, or build a dataset of freelance service data, you need to scrape it.
The catch: Fiverr runs aggressive anti-bot protection. Simple HTTP requests get blocked immediately — the site requires full JavaScript rendering and uses fingerprinting to detect automation. Playwright is the tool for this job.
Setup
pip install playwright beautifulsoup4
playwright install chromium
Why Playwright, Not Requests
Fiverr serves a JavaScript-heavy frontend. The initial HTML response contains almost no gig data — it's all rendered client-side. You need a real browser engine. Playwright gives you that, plus better stealth options than Selenium.
Basic Gig Search Scraping
Let's start by scraping search results for a category:
import asyncio
from playwright.async_api import async_playwright
from bs4 import BeautifulSoup
import json
async def scrape_fiverr_search(query, max_pages=3):
"""Scrape Fiverr search results for a given query."""
gigs = []
async with async_playwright() as p:
browser = await p.chromium.launch(
headless=True,
args=[
"--disable-blink-features=AutomationControlled",
"--no-sandbox"
]
)
context = await browser.new_context(
viewport={"width": 1920, "height": 1080},
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
locale="en-US"
)
# Remove automation indicators
page = await context.new_page()
await page.add_init_script("""
Object.defineProperty(navigator, 'webdriver', {get: () => undefined});
delete navigator.__proto__.webdriver;
""")
for page_num in range(1, max_pages + 1):
url = f"https://www.fiverr.com/search/gigs?query={query}&page={page_num}"
await page.goto(url, wait_until="networkidle", timeout=30000)
await page.wait_for_timeout(2000)
content = await page.content()
soup = BeautifulSoup(content, "lxml")
# Fiverr gig cards
for card in soup.select("[class*='gig-card']"):
title_el = card.select_one("h3, [class*='title']")
seller_el = card.select_one("[class*='seller-name'], [class*='username']")
price_el = card.select_one("[class*='price']")
rating_el = card.select_one("[class*='rating']")
reviews_el = card.select_one("[class*='reviews'], [class*='rating-count']")
link_el = card.select_one("a[href*='/']")
gig = {
"title": title_el.text.strip() if title_el else None,
"seller": seller_el.text.strip() if seller_el else None,
"starting_price": price_el.text.strip() if price_el else None,
"rating": rating_el.text.strip() if rating_el else None,
"review_count": reviews_el.text.strip() if reviews_el else None,
"url": "https://www.fiverr.com" + link_el["href"] if link_el else None
}
gigs.append(gig)
# Random delay between pages
await page.wait_for_timeout(3000 + int(2000 * __import__("random").random()))
await browser.close()
return gigs
gigs = asyncio.run(scrape_fiverr_search("web scraping", max_pages=2))
print(f"Found {len(gigs)} gigs")
for g in gigs[:5]:
print(f" {g['title']} — {g['starting_price']} ({g['rating']})")
Scraping Individual Gig Pages
Gig detail pages contain the full pricing table (basic/standard/premium tiers), seller stats, reviews, and FAQ:
async def scrape_gig_detail(gig_url):
"""Scrape detailed info from a single Fiverr gig page."""
async with async_playwright() as p:
browser = await p.chromium.launch(
headless=True,
args=["--disable-blink-features=AutomationControlled"]
)
context = await browser.new_context(
viewport={"width": 1920, "height": 1080},
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
)
page = await context.new_page()
await page.add_init_script("""
Object.defineProperty(navigator, 'webdriver', {get: () => undefined});
""")
await page.goto(gig_url, wait_until="networkidle", timeout=30000)
await page.wait_for_timeout(2000)
content = await page.content()
soup = BeautifulSoup(content, "lxml")
# Gig title and description
title = soup.select_one("h1")
description = soup.select_one("[class*='description']")
# Pricing tiers
tiers = []
for tab in soup.select("[class*='package-tab'], [class*='pricing']"):
tier_name = tab.select_one("[class*='title'], [class*='header']")
tier_price = tab.select_one("[class*='price']")
tier_desc = tab.select_one("[class*='description']")
delivery = tab.select_one("[class*='delivery']")
tiers.append({
"name": tier_name.text.strip() if tier_name else None,
"price": tier_price.text.strip() if tier_price else None,
"description": tier_desc.text.strip() if tier_desc else None,
"delivery_time": delivery.text.strip() if delivery else None
})
# Seller info
seller_level = soup.select_one("[class*='seller-level']")
response_time = soup.select_one("[class*='response-time']")
orders_in_queue = soup.select_one("[class*='orders-in-queue']")
# Reviews
reviews = []
for review in soup.select("[class*='review-item']")[:10]:
reviewer = review.select_one("[class*='username']")
rating = review.select_one("[class*='star']")
text = review.select_one("[class*='review-description']")
reviews.append({
"reviewer": reviewer.text.strip() if reviewer else None,
"rating": rating.text.strip() if rating else None,
"text": text.text.strip() if text else None
})
await browser.close()
return {
"title": title.text.strip() if title else None,
"description": description.text.strip()[:500] if description else None,
"pricing_tiers": tiers,
"seller_level": seller_level.text.strip() if seller_level else None,
"response_time": response_time.text.strip() if response_time else None,
"orders_in_queue": orders_in_queue.text.strip() if orders_in_queue else None,
"reviews": reviews
}
Scraping Seller Profiles
Seller pages aggregate all their gigs, ratings, and stats:
async def scrape_seller_profile(username):
"""Scrape a Fiverr seller's public profile."""
url = f"https://www.fiverr.com/{username}"
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
context = await browser.new_context(
viewport={"width": 1920, "height": 1080},
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
)
page = await context.new_page()
await page.goto(url, wait_until="networkidle", timeout=30000)
await page.wait_for_timeout(2000)
content = await page.content()
soup = BeautifulSoup(content, "lxml")
display_name = soup.select_one("[class*='seller-name'], h1")
member_since = soup.select_one("[class*='member-since']")
country = soup.select_one("[class*='country']")
languages = [l.text.strip() for l in soup.select("[class*='language']")]
avg_response = soup.select_one("[class*='avg-response']")
# Active gigs on profile
profile_gigs = []
for gig in soup.select("[class*='gig-card']"):
gig_title = gig.select_one("h3, [class*='title']")
gig_price = gig.select_one("[class*='price']")
gig_link = gig.select_one("a")
profile_gigs.append({
"title": gig_title.text.strip() if gig_title else None,
"price": gig_price.text.strip() if gig_price else None,
"url": gig_link["href"] if gig_link else None
})
await browser.close()
return {
"username": username,
"display_name": display_name.text.strip() if display_name else None,
"member_since": member_since.text.strip() if member_since else None,
"country": country.text.strip() if country else None,
"languages": languages,
"avg_response_time": avg_response.text.strip() if avg_response else None,
"active_gigs": profile_gigs
}
Anti-Bot Measures on Fiverr
Fiverr uses some of the more aggressive anti-bot systems among marketplace sites. Here's what you're dealing with:
Cloudflare protection: Fiverr sits behind Cloudflare with bot management enabled. Standard headless browsers get challenged immediately.
JavaScript fingerprinting: Fiverr checks browser properties — WebGL renderer, canvas fingerprint, installed plugins, timezone, screen resolution. Headless Chrome has telltale defaults that get flagged.
Behavioral analysis: Rapid-fire page loads from one IP get blocked fast. Fiverr expects human browsing patterns — time on page, scroll events, mouse movements.
The Playwright webdriver override in the code above handles the basic detection. For sustained scraping at scale, you'll also want:
async def create_stealth_context(playwright):
"""Create a browser context with anti-detection measures."""
browser = await playwright.chromium.launch(
headless=True,
args=[
"--disable-blink-features=AutomationControlled",
"--disable-features=IsolateOrigins,site-per-process",
"--no-sandbox"
]
)
context = await browser.new_context(
viewport={"width": 1920, "height": 1080},
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
locale="en-US",
timezone_id="America/New_York",
geolocation={"latitude": 40.7128, "longitude": -74.0060},
permissions=["geolocation"]
)
# Inject stealth scripts
await context.add_init_script("""
Object.defineProperty(navigator, 'webdriver', {get: () => undefined});
Object.defineProperty(navigator, 'plugins', {get: () => [1, 2, 3, 4, 5]});
Object.defineProperty(navigator, 'languages', {get: () => ['en-US', 'en']});
window.chrome = { runtime: {} };
""")
return browser, context
Residential proxies are essential for Fiverr scraping beyond a handful of pages. Datacenter IPs get blocked almost immediately. A service like ThorData provides residential proxy rotation that pairs well with Playwright — each browser session gets a different residential IP, making your requests indistinguishable from real users browsing from different locations.
async def launch_with_proxy(playwright, proxy_server, proxy_user, proxy_pass):
"""Launch browser with proxy authentication."""
browser = await playwright.chromium.launch(
headless=True,
proxy={
"server": proxy_server,
"username": proxy_user,
"password": proxy_pass
}
)
return browser
Category Browsing
Fiverr organizes gigs into categories and subcategories. You can systematically scrape entire verticals:
async def scrape_category(category_path, max_pages=5):
"""Scrape all gigs in a Fiverr category.
category_path examples:
'programming-tech/web-programming'
'graphics-design/logo-design'
'writing-translation/articles-blogposts'
"""
all_gigs = []
async with async_playwright() as p:
browser, context = await create_stealth_context(p)
page = await context.new_page()
for page_num in range(1, max_pages + 1):
url = f"https://www.fiverr.com/categories/{category_path}?page={page_num}"
try:
await page.goto(url, wait_until="networkidle", timeout=30000)
await page.wait_for_timeout(2000)
# Scroll to load lazy content
await page.evaluate("window.scrollTo(0, document.body.scrollHeight / 2)")
await page.wait_for_timeout(1000)
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
await page.wait_for_timeout(1000)
content = await page.content()
soup = BeautifulSoup(content, "lxml")
cards = soup.select("[class*='gig-card']")
if not cards:
break
for card in cards:
title = card.select_one("h3, [class*='title']")
price = card.select_one("[class*='price']")
seller = card.select_one("[class*='seller']")
all_gigs.append({
"title": title.text.strip() if title else None,
"price": price.text.strip() if price else None,
"seller": seller.text.strip() if seller else None,
"category": category_path,
"page": page_num
})
# Human-like delay between pages
import random
await page.wait_for_timeout(3000 + int(3000 * random.random()))
except Exception as e:
print(f"Error on page {page_num}: {e}")
break
await browser.close()
return all_gigs
Data Export
Once you've collected gig data, export it for analysis:
import csv
def export_gigs_csv(gigs, filename="fiverr_gigs.csv"):
"""Export scraped gigs to CSV."""
if not gigs:
return
keys = gigs[0].keys()
with open(filename, "w", newline="", encoding="utf-8") as f:
writer = csv.DictWriter(f, fieldnames=keys)
writer.writeheader()
writer.writerows(gigs)
print(f"Exported {len(gigs)} gigs to {filename}")
Rate Limiting Strategy
Fiverr bans aggressively. Stay under the radar:
- Maximum 1 request every 5 seconds for search/category pages
- Maximum 1 gig detail page every 8-10 seconds
- Rotate User-Agent strings every 10-20 requests
- Use residential proxies — datacenter IPs are dead on arrival
- Add scroll and mouse events to mimic human behavior
- Don't scrape during peak hours (US business hours) when monitoring is tightest
Fiverr isn't a site you can casually blast with 1000 requests. Plan your scraping sessions, collect what you need, and keep the volume reasonable.
Legal Considerations
Fiverr's Terms of Service explicitly prohibit scraping. This guide is educational. If you're building a product that relies on Fiverr data, understand the legal risks. Publicly visible data on the open web is generally considered fair game for research and personal analysis, but commercial use of scraped marketplace data sits in a gray area. Consult a lawyer if you're going beyond personal research.
Systematic Category Analysis
For competitive intelligence, scrape entire Fiverr verticals:
import asyncio
import json
import sqlite3
from datetime import datetime
FIVERR_CATEGORIES = {
"programming-tech": {
"web-programming": "Web Development",
"mobile-apps": "Mobile Apps",
"data-science": "Data Science",
"api-integration": "APIs",
"ecommerce-development": "eCommerce",
"scripts-plugins-extensions": "Scripts & Plugins",
},
"graphics-design": {
"logo-design": "Logo Design",
"illustration": "Illustration",
"web-design": "Web Design",
"social-media-design": "Social Media Design",
},
"writing-translation": {
"articles-blog-posts": "Articles & Blogs",
"copy-editing": "Copywriting",
"translation": "Translation",
"technical-writing": "Technical Writing",
},
"digital-marketing": {
"social-media-marketing": "Social Media",
"seo": "SEO",
"content-marketing": "Content Marketing",
"email-marketing": "Email Marketing",
},
}
def init_fiverr_db(db_path: str = "fiverr.db") -> sqlite3.Connection:
conn = sqlite3.connect(db_path)
conn.executescript("""
CREATE TABLE IF NOT EXISTS gigs (
url TEXT PRIMARY KEY,
title TEXT,
seller TEXT,
category TEXT,
subcategory TEXT,
starting_price_text TEXT,
starting_price_usd REAL,
rating_text TEXT,
rating REAL,
review_count INTEGER,
seller_level TEXT,
orders_in_queue INTEGER,
scraped_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS gig_tiers (
id INTEGER PRIMARY KEY AUTOINCREMENT,
gig_url TEXT,
tier_name TEXT,
price_text TEXT,
price_usd REAL,
description TEXT,
delivery_time TEXT,
FOREIGN KEY (gig_url) REFERENCES gigs(url)
);
CREATE TABLE IF NOT EXISTS sellers (
username TEXT PRIMARY KEY,
display_name TEXT,
member_since TEXT,
country TEXT,
seller_level TEXT,
avg_response_time TEXT,
num_active_gigs INTEGER,
scraped_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_gigs_category
ON gigs(category, subcategory);
CREATE INDEX IF NOT EXISTS idx_gigs_price
ON gigs(starting_price_usd);
CREATE INDEX IF NOT EXISTS idx_gigs_seller
ON gigs(seller);
""")
conn.commit()
return conn
def parse_price(price_text: str) -> float:
"""Extract numeric price from Fiverr's price text."""
if not price_text:
return None
import re
# Handle "From $X" or "$X" formats
match = re.search(r'\$([\d,]+(?:\.\d+)?)', price_text)
if match:
return float(match.group(1).replace(",", ""))
return None
def parse_rating(rating_text: str) -> tuple:
"""Extract rating and review count from rating text."""
import re
if not rating_text:
return None, None
rating_match = re.search(r'([\d.]+)', rating_text)
count_match = re.search(r'\((\d+)\)', rating_text)
rating = float(rating_match.group(1)) if rating_match else None
count = int(count_match.group(1)) if count_match else None
return rating, count
def save_gig(conn: sqlite3.Connection, gig: dict, tiers: list = None):
price = parse_price(gig.get("starting_price"))
rating, review_count = parse_rating(gig.get("rating"))
conn.execute(
"""INSERT OR REPLACE INTO gigs
(url, title, seller, category, subcategory, starting_price_text,
starting_price_usd, rating_text, rating, review_count)
VALUES (?,?,?,?,?,?,?,?,?,?)""",
(
gig.get("url"),
gig.get("title"),
gig.get("seller"),
gig.get("category"),
gig.get("subcategory"),
gig.get("starting_price"),
price,
gig.get("rating"),
rating,
review_count,
),
)
if tiers:
for tier in tiers:
if not any([tier.get("name"), tier.get("price")]):
continue
conn.execute(
"""INSERT INTO gig_tiers (gig_url, tier_name, price_text, price_usd, description, delivery_time)
VALUES (?,?,?,?,?,?)""",
(
gig.get("url"),
tier.get("name"),
tier.get("price"),
parse_price(tier.get("price")),
tier.get("description"),
tier.get("delivery_time"),
),
)
conn.commit()
Market Analysis: Pricing and Competition by Category
import statistics
def analyze_fiverr_market(
db_path: str = "fiverr.db",
category: str = None,
) -> dict:
"""
Analyze Fiverr market data for a category.
Returns pricing stats, competition levels, and seller insights.
"""
conn = sqlite3.connect(db_path)
base_query = """
SELECT starting_price_usd, rating, review_count,
seller, title, subcategory
FROM gigs
WHERE starting_price_usd IS NOT NULL
"""
params = ()
if category:
base_query += " AND category = ?"
params = (category,)
rows = conn.execute(base_query, params).fetchall()
if not rows:
conn.close()
return {"error": "no_data"}
prices = [row[0] for row in rows if row[0]]
ratings = [row[1] for row in rows if row[1]]
review_counts = [row[2] for row in rows if row[2]]
# Price distribution
price_bands = {
"under_5": sum(1 for p in prices if p < 5),
"5_to_20": sum(1 for p in prices if 5 <= p < 20),
"20_to_50": sum(1 for p in prices if 20 <= p < 50),
"50_to_100": sum(1 for p in prices if 50 <= p < 100),
"100_to_250": sum(1 for p in prices if 100 <= p < 250),
"over_250": sum(1 for p in prices if p >= 250),
}
# Subcategory breakdown
subcat_data = {}
for row in rows:
subcat = row[5] or "unknown"
if subcat not in subcat_data:
subcat_data[subcat] = {"prices": [], "reviews": []}
if row[0]:
subcat_data[subcat]["prices"].append(row[0])
if row[2]:
subcat_data[subcat]["reviews"].append(row[2])
subcat_summary = {}
for subcat, data in subcat_data.items():
if len(data["prices"]) >= 5:
subcat_summary[subcat] = {
"count": len(data["prices"]),
"median_price": round(statistics.median(data["prices"]), 2),
"avg_reviews": round(statistics.mean(data["reviews"]), 1) if data["reviews"] else 0,
}
conn.close()
return {
"category": category or "all",
"total_gigs": len(rows),
"price_stats": {
"median": round(statistics.median(prices), 2) if prices else 0,
"mean": round(statistics.mean(prices), 2) if prices else 0,
"min": min(prices) if prices else 0,
"max": max(prices) if prices else 0,
"p25": sorted(prices)[len(prices)//4] if prices else 0,
"p75": sorted(prices)[3*len(prices)//4] if prices else 0,
},
"price_distribution": price_bands,
"avg_rating": round(statistics.mean(ratings), 2) if ratings else 0,
"avg_reviews": round(statistics.mean(review_counts), 1) if review_counts else 0,
"subcategories": dict(sorted(subcat_summary.items(), key=lambda x: x[1]["count"], reverse=True)),
}
Identifying Top Sellers and Their Strategies
def analyze_top_sellers(
db_path: str = "fiverr.db",
category: str = None,
top_n: int = 20,
) -> list:
"""Identify top sellers by review volume and analyze their pricing."""
conn = sqlite3.connect(db_path)
query = """
SELECT seller,
COUNT(*) as num_gigs,
SUM(review_count) as total_reviews,
AVG(rating) as avg_rating,
MIN(starting_price_usd) as min_price,
MAX(starting_price_usd) as max_price,
AVG(starting_price_usd) as avg_price
FROM gigs
WHERE seller IS NOT NULL
"""
params = ()
if category:
query += " AND category = ?"
params = (category,)
query += """
GROUP BY seller
HAVING num_gigs >= 1 AND total_reviews IS NOT NULL
ORDER BY total_reviews DESC
LIMIT ?
"""
params = params + (top_n,)
rows = conn.execute(query, params).fetchall()
conn.close()
return [
{
"seller": row[0],
"num_gigs": row[1],
"total_reviews": row[2] or 0,
"avg_rating": round(row[3] or 0, 2),
"price_range": f"${row[4]:.0f}-${row[5]:.0f}" if row[4] and row[5] else None,
"avg_starting_price": round(row[6] or 0, 2),
}
for row in rows
]
# Get top sellers in each subcategory
sellers = analyze_top_sellers(db_path="fiverr.db", category="programming-tech", top_n=10)
print("Top sellers in programming-tech:")
for s in sellers:
print(f" {s['seller']:<25} {s['total_reviews']:>6} reviews {s['price_range']} {s['avg_rating']}/5")
Price Tier Analysis
Understanding Fiverr's tiered pricing structure:
def analyze_tier_pricing(
db_path: str = "fiverr.db",
category: str = None,
) -> dict:
"""Analyze the Basic/Standard/Premium tier pricing patterns."""
conn = sqlite3.connect(db_path)
query = """
SELECT gt.tier_name, gt.price_usd, g.rating, g.review_count
FROM gig_tiers gt
JOIN gigs g ON g.url = gt.gig_url
WHERE gt.price_usd IS NOT NULL AND gt.price_usd > 0
"""
params = ()
if category:
query += " AND g.category = ?"
params = (category,)
rows = conn.execute(query, params).fetchall()
conn.close()
tier_data = {}
for row in rows:
tier = (row[0] or "").lower()
if "basic" in tier:
tier_key = "basic"
elif "standard" in tier:
tier_key = "standard"
elif "premium" in tier:
tier_key = "premium"
else:
continue
if tier_key not in tier_data:
tier_data[tier_key] = []
tier_data[tier_key].append(row[1])
analysis = {}
for tier, prices in tier_data.items():
if len(prices) >= 5:
analysis[tier] = {
"count": len(prices),
"median": round(statistics.median(prices), 2),
"mean": round(statistics.mean(prices), 2),
"p25": sorted(prices)[len(prices)//4],
"p75": sorted(prices)[3*len(prices)//4],
}
if all(k in analysis for k in ["basic", "standard", "premium"]):
analysis["typical_multipliers"] = {
"standard_over_basic": round(analysis["standard"]["median"] / analysis["basic"]["median"], 2),
"premium_over_basic": round(analysis["premium"]["median"] / analysis["basic"]["median"], 2),
}
return analysis
Complete Pipeline with Proxy Rotation
async def run_fiverr_pipeline(
categories: list = None,
db_path: str = "fiverr.db",
proxy_server: str = None,
proxy_user: str = None,
proxy_pass: str = None,
):
"""
Full Fiverr data collection pipeline.
Collects gig data across multiple categories and subcategories.
"""
if categories is None:
categories = list(FIVERR_CATEGORIES.keys())
conn = init_fiverr_db(db_path)
total_gigs = 0
for category in categories:
subcats = FIVERR_CATEGORIES.get(category, {})
for subcat_path, subcat_name in subcats.items():
full_path = f"{category}/{subcat_path}"
print(f"\nScraping: {full_path}")
async with async_playwright() as p:
launch_kwargs = {"headless": True}
if proxy_server:
launch_kwargs["proxy"] = {
"server": proxy_server,
"username": proxy_user,
"password": proxy_pass,
}
browser, context = await create_stealth_context(p)
gigs = await scrape_category(full_path, max_pages=3)
for gig in gigs:
gig["category"] = category
gig["subcategory"] = subcat_name
save_gig(conn, gig)
total_gigs += len(gigs)
print(f" {len(gigs)} gigs collected (total: {total_gigs})")
await browser.close()
await asyncio.sleep(random.uniform(15, 30))
conn.close()
print(f"\nPipeline complete: {total_gigs} gigs in {db_path}")
# Run with ThorData residential proxies
asyncio.run(run_fiverr_pipeline(
proxy_server="http://proxy.thordata.com:9000",
proxy_user="USERNAME",
proxy_pass="PASSWORD",
))
Key Takeaways for Fiverr Scraping in 2026
- Fiverr requires Playwright -- the site is a heavy SPA and all gig data is client-side rendered
- Remove
navigator.webdriverviaadd_init_scriptbefore navigating -- this is mandatory for basic detection evasion - Rate limit to one request every 5-10 seconds and rotate User-Agents every 10-20 pages
- Residential proxies are essential; datacenter IPs fail on first contact with Cloudflare
- ThorData's residential proxy network provides per-session IP rotation that pairs well with Playwright browser contexts
- The most actionable analysis: tier pricing (Basic/Standard/Premium), top sellers by review volume, and price distribution by subcategory
- Export to SQLite for persistence and use the analysis functions above for market intelligence