Scraping PyPI Package Metadata in 2026: Downloads, Dependencies, and Release History
Scraping PyPI Package Metadata in 2026: Downloads, Dependencies, and Release History
PyPI hosts over 600,000 Python packages. If you're building a dependency auditor, tracking package popularity, researching the Python ecosystem, or building security tooling, you need reliable access to package metadata. Unlike most scraping targets, PyPI actually wants you to use its data — but the details of how to get everything you need are spread across three different systems.
Here's the complete approach, including dependency tree construction, download stats via BigQuery, change monitoring, proxy setup for large-scale crawls, and a full SQLite storage pipeline.
The Three Data Sources
PyPI doesn't have one API — it has three complementary systems:
- PyPI JSON API — Package metadata, version history, file URLs, maintainers. No auth needed.
- PyPI BigQuery dataset — Download statistics. Requires a Google Cloud account (free tier works).
- Simple API (PEP 503) — The index
pipuses. Minimal metadata but useful for crawling all packages.
For most use cases, the JSON API is all you need.
Installation
pip install httpx pypistats
For download stats without BigQuery:
pip install pypistats
Fetching Package Metadata
The JSON API is clean and stable. Every package has a predictable endpoint:
import httpx
import time
import json
import sqlite3
from datetime import datetime, timezone
from pathlib import Path
client = httpx.Client(
timeout=30,
headers={"Accept": "application/json"},
)
def get_package(name: str) -> dict:
"""Get full metadata for a PyPI package."""
resp = client.get(f"https://pypi.org/pypi/{name}/json")
resp.raise_for_status()
data = resp.json()
info = data["info"]
# Normalize package name to canonical form
canonical_name = info["name"].lower().replace("-", "_").replace(".", "_")
return {
"name": info["name"],
"canonical_name": canonical_name,
"version": info["version"],
"summary": info.get("summary", ""),
"description": info.get("description", ""), # full README text
"author": info.get("author") or info.get("maintainer", ""),
"author_email": info.get("author_email") or info.get("maintainer_email", ""),
"license": info.get("license", ""),
"home_page": info.get("home_page") or info.get("project_url", ""),
"requires_python": info.get("requires_python", ""),
"keywords": info.get("keywords", ""),
"classifiers": info.get("classifiers", []),
"project_urls": info.get("project_urls", {}),
"requires_dist": info.get("requires_dist") or [],
"total_releases": len(data["releases"]),
"yanked": info.get("yanked", False),
"yanked_reason": info.get("yanked_reason", ""),
}
pkg = get_package("requests")
print(f"{pkg['name']} v{pkg['version']}")
print(f" Author: {pkg['author']}")
print(f" License: {pkg['license']}")
print(f" Requires Python: {pkg['requires_python']}")
print(f" Total releases: {pkg['total_releases']}")
print(f" Dependencies: {len(pkg['requires_dist'])}")
print(f" Classifiers: {len(pkg['classifiers'])}")
Extracting Dependency Trees
The requires_dist field gives direct dependencies. Build a full tree by recursing:
from collections import deque
import re
def parse_dep_name(dep_str: str) -> str:
"""Extract package name from a requirement string like 'urllib3<3,>=1.21.1'."""
return re.split(r"[<>=!\s;\[]", dep_str)[0].strip().lower()
def is_optional_dep(dep_str: str) -> bool:
"""Check if this is an extras-only dependency."""
return 'extra ==' in dep_str or '; extra ==' in dep_str
def build_dependency_tree(
root: str,
max_depth: int = 3,
delay: float = 0.2,
) -> dict[str, list[str]]:
"""Build a dependency tree up to max_depth levels."""
tree: dict[str, list[str]] = {}
queue: deque[tuple[str, int]] = deque([(root.lower(), 0)])
seen: set[str] = set()
while queue:
pkg_name, depth = queue.popleft()
if pkg_name in seen or depth > max_depth:
continue
seen.add(pkg_name)
try:
resp = client.get(f"https://pypi.org/pypi/{pkg_name}/json")
if resp.status_code == 404:
tree[pkg_name] = []
continue
resp.raise_for_status()
data = resp.json()
deps_raw = data["info"].get("requires_dist") or []
except Exception as e:
print(f" Error fetching {pkg_name}: {e}")
continue
# Filter optional/extras dependencies
deps = [
parse_dep_name(d)
for d in deps_raw
if not is_optional_dep(d) and d.strip()
]
# Deduplicate
deps = list(dict.fromkeys(deps))
tree[pkg_name] = deps
if depth < max_depth:
for dep in deps:
if dep not in seen:
queue.append((dep, depth + 1))
if delay > 0:
time.sleep(delay)
return tree
def print_dependency_tree(tree: dict, root: str, indent: int = 0) -> None:
"""Print the dependency tree in a readable format."""
deps = tree.get(root, [])
for dep in deps[:10]: # limit output width
print(" " * indent + f"├─ {dep}")
if dep in tree and indent < 3:
print_dependency_tree(tree, dep, indent + 1)
# FastAPI dependency tree
deps = build_dependency_tree("fastapi", max_depth=2)
print(f"\nfastapi dependency tree ({len(deps)} packages):")
print_dependency_tree(deps, "fastapi")
Release History and Version Analysis
Every package version is available with full metadata:
def get_release_history(name: str) -> list[dict]:
"""Get all releases with upload dates and file sizes."""
resp = client.get(f"https://pypi.org/pypi/{name}/json")
resp.raise_for_status()
releases = resp.json()["releases"]
history = []
for version, files in releases.items():
if not files:
continue
# Use the earliest file's upload time as release date
oldest_file = min(files, key=lambda f: f["upload_time"])
total_size = sum(f.get("size", 0) for f in files)
file_types = list({f.get("packagetype", "") for f in files})
history.append({
"version": version,
"upload_date": oldest_file["upload_time"][:10],
"files": len(files),
"total_size_kb": total_size // 1024,
"python_requires": oldest_file.get("requires_python") or "",
"yanked": any(f.get("yanked", False) for f in files),
"yanked_reason": next(
(f.get("yanked_reason", "") for f in files if f.get("yanked")), ""
),
"has_wheel": any(f.get("packagetype") == "bdist_wheel" for f in files),
"has_sdist": any(f.get("packagetype") == "sdist" for f in files),
"file_types": file_types,
})
history.sort(key=lambda r: r["upload_date"], reverse=True)
return history
def analyze_release_cadence(name: str) -> dict:
"""Analyze how frequently a package is released."""
history = get_release_history(name)
if len(history) < 2:
return {"releases": len(history), "cadence_days": None}
# Calculate average days between releases
from datetime import date
dates = sorted([r["upload_date"] for r in history if not r["yanked"]])
if len(dates) < 2:
return {"releases": len(history), "cadence_days": None}
intervals = []
for i in range(1, len(dates)):
d1 = date.fromisoformat(dates[i-1])
d2 = date.fromisoformat(dates[i])
intervals.append((d2 - d1).days)
return {
"releases": len(history),
"first_release": dates[0],
"latest_release": dates[-1],
"avg_days_between": sum(intervals) / len(intervals) if intervals else None,
"yanked_count": sum(1 for r in history if r["yanked"]),
}
releases = get_release_history("django")
print(f"Django: {len(releases)} releases")
for r in releases[:8]:
yanked = " [YANKED]" if r["yanked"] else ""
wheel = "W" if r["has_wheel"] else " "
sdist = "S" if r["has_sdist"] else " "
print(f" {r['version']:15s} {r['upload_date']} [{wheel}{sdist}] {r['total_size_kb']:>8,} KB{yanked}")
Download Statistics via pypistats
PyPI doesn't serve download counts through its JSON API — that data lives in Google BigQuery. The pypistats library wraps the public API:
import pypistats
import json
def get_download_stats(name: str) -> dict:
"""Get download stats for a package."""
try:
# Overall downloads (last 30 days)
overall_raw = pypistats.overall(name, total=True, format="json")
overall = json.loads(overall_raw)
total_row = next(
(r for r in overall["data"] if r["category"] == "Total"), {}
)
# By Python version
ver_raw = pypistats.python_minor(name, total=True, format="json")
versions = json.loads(ver_raw)
top_versions = sorted(
[r for r in versions["data"] if r["category"] not in ("null", None, "")],
key=lambda x: x["downloads"],
reverse=True,
)[:5]
# By system
sys_raw = pypistats.system(name, total=True, format="json")
systems = json.loads(sys_raw)
sys_breakdown = {
r["category"]: r["downloads"]
for r in systems["data"]
if r["category"]
}
return {
"name": name,
"total_30d": total_row.get("downloads", 0),
"top_python_versions": [
{"version": r["category"], "downloads": r["downloads"]}
for r in top_versions
],
"by_system": sys_breakdown,
}
except Exception as e:
return {"name": name, "error": str(e)}
stats = get_download_stats("requests")
print(f"requests: {stats['total_30d']:,} downloads (last 30 days)")
print("Top Python versions:")
for v in stats.get("top_python_versions", []):
print(f" Python {v['version']}: {v['downloads']:,}")
print(f"By OS: {stats.get('by_system', {})}")
BigQuery for Custom Download Queries
For custom historical analysis, query BigQuery directly. The dataset is bigquery-public-data.pypi.file_downloads:
-- Monthly downloads for a package over the past year
SELECT
file.project AS package,
FORMAT_DATE('%Y-%m', DATE(timestamp)) AS month,
COUNT(*) AS downloads
FROM `bigquery-public-data.pypi.file_downloads`
WHERE file.project = 'requests'
AND DATE(timestamp) >= DATE_SUB(CURRENT_DATE(), INTERVAL 12 MONTH)
GROUP BY package, month
ORDER BY month DESC;
-- Compare package ecosystems
SELECT
file.project,
COUNT(*) AS total_downloads,
APPROX_COUNT_DISTINCT(file.installer.name) AS installer_count
FROM `bigquery-public-data.pypi.file_downloads`
WHERE file.project IN ('django', 'flask', 'fastapi', 'tornado', 'aiohttp')
AND DATE(timestamp) >= DATE_SUB(CURRENT_DATE(), INTERVAL 30 DAY)
GROUP BY file.project
ORDER BY total_downloads DESC;
-- Python version adoption for a package
SELECT
details.python AS python_version,
COUNT(*) AS downloads
FROM `bigquery-public-data.pypi.file_downloads`
WHERE file.project = 'numpy'
AND DATE(timestamp) >= DATE_SUB(CURRENT_DATE(), INTERVAL 30 DAY)
AND details.python IS NOT NULL
GROUP BY python_version
ORDER BY downloads DESC
LIMIT 15;
The BigQuery dataset is updated daily and goes back to 2016.
Crawling PyPI at Scale
For crawling the full catalog of 600K+ packages, use the Simple API:
def get_all_package_names() -> list[str]:
"""Fetch the complete list of PyPI package names."""
resp = client.get(
"https://pypi.org/simple/",
headers={"Accept": "application/vnd.pypi.simple.v1+json"},
)
resp.raise_for_status()
data = resp.json()
return [p["name"] for p in data["projects"]]
def crawl_packages_batch(
names: list[str],
concurrency: int = 5,
delay: float = 0.12,
) -> list[dict]:
"""Crawl a batch of packages with rate limiting."""
results = []
errors = 0
for i, name in enumerate(names):
try:
pkg = get_package(name)
results.append(pkg)
if (i + 1) % 100 == 0:
print(f" {i+1}/{len(names)} packages ({errors} errors)")
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
pass # Package deleted or renamed
else:
errors += 1
print(f" Error {name}: {e.response.status_code}")
except Exception as e:
errors += 1
if errors < 10:
print(f" Error {name}: {e}")
time.sleep(delay)
return results
Scraping all 600K+ packages at 5 req/s takes about 33 hours. At that scale, your IP will get throttled. ThorData's rotating residential proxies solve this cleanly — distribute requests across IPs to stay within per-IP rate limits:
# For large-scale PyPI crawls, rotate proxies
proxied_client = httpx.Client(
proxy="http://USER:[email protected]:9000",
timeout=30,
headers={"Accept": "application/json"},
)
def get_package_proxied(name: str) -> dict:
resp = proxied_client.get(f"https://pypi.org/pypi/{name}/json")
resp.raise_for_status()
data = resp.json()
info = data["info"]
return {
"name": info["name"],
"version": info["version"],
"summary": info.get("summary", ""),
"requires_dist": info.get("requires_dist") or [],
}
SQLite Storage Pipeline
def init_pypi_db(db_path: str = "pypi.db") -> sqlite3.Connection:
"""Initialize the PyPI SQLite database."""
conn = sqlite3.connect(db_path)
conn.execute("PRAGMA journal_mode=WAL")
conn.executescript("""
CREATE TABLE IF NOT EXISTS packages (
name TEXT PRIMARY KEY,
canonical_name TEXT,
version TEXT,
summary TEXT,
author TEXT,
author_email TEXT,
license TEXT,
requires_python TEXT,
keywords TEXT,
total_releases INTEGER,
requires_dist TEXT,
classifiers TEXT,
project_urls TEXT,
yanked INTEGER DEFAULT 0,
fetched_at TEXT DEFAULT (datetime('now'))
);
CREATE TABLE IF NOT EXISTS releases (
id INTEGER PRIMARY KEY AUTOINCREMENT,
package_name TEXT NOT NULL,
version TEXT NOT NULL,
upload_date TEXT,
total_size_kb INTEGER,
file_count INTEGER,
has_wheel INTEGER,
has_sdist INTEGER,
yanked INTEGER DEFAULT 0,
yanked_reason TEXT,
UNIQUE(package_name, version),
FOREIGN KEY (package_name) REFERENCES packages(name)
);
CREATE TABLE IF NOT EXISTS download_stats (
id INTEGER PRIMARY KEY AUTOINCREMENT,
package_name TEXT NOT NULL,
period TEXT,
total_downloads INTEGER,
fetched_at TEXT DEFAULT (datetime('now')),
FOREIGN KEY (package_name) REFERENCES packages(name)
);
CREATE INDEX IF NOT EXISTS idx_packages_license ON packages(license);
CREATE INDEX IF NOT EXISTS idx_packages_python ON packages(requires_python);
CREATE INDEX IF NOT EXISTS idx_releases_date ON releases(upload_date);
""")
conn.commit()
return conn
def upsert_package(conn: sqlite3.Connection, pkg: dict) -> bool:
"""Insert or update a package. Returns True if newly inserted."""
existing = conn.execute(
"SELECT name FROM packages WHERE name = ?", (pkg["name"],)
).fetchone()
conn.execute("""
INSERT INTO packages
(name, canonical_name, version, summary, author, author_email,
license, requires_python, keywords, total_releases,
requires_dist, classifiers, project_urls, yanked, fetched_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(name) DO UPDATE SET
version=excluded.version,
summary=excluded.summary,
total_releases=excluded.total_releases,
requires_dist=excluded.requires_dist,
yanked=excluded.yanked,
fetched_at=excluded.fetched_at
""", (
pkg["name"], pkg.get("canonical_name"), pkg["version"],
pkg.get("summary"), pkg.get("author"), pkg.get("author_email"),
pkg.get("license"), pkg.get("requires_python"), pkg.get("keywords"),
pkg.get("total_releases", 0),
json.dumps(pkg.get("requires_dist", [])),
json.dumps(pkg.get("classifiers", [])),
json.dumps(pkg.get("project_urls", {})),
int(pkg.get("yanked", False)),
datetime.now(timezone.utc).isoformat(),
))
conn.commit()
return existing is None
def insert_releases(conn: sqlite3.Connection, package_name: str, releases: list[dict]) -> None:
for r in releases:
try:
conn.execute("""
INSERT OR IGNORE INTO releases
(package_name, version, upload_date, total_size_kb, file_count,
has_wheel, has_sdist, yanked, yanked_reason)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (
package_name, r["version"], r["upload_date"],
r["total_size_kb"], r["files"],
int(r["has_wheel"]), int(r["has_sdist"]),
int(r["yanked"]), r.get("yanked_reason", ""),
))
except sqlite3.IntegrityError:
pass
conn.commit()
Monitoring Package Changes
For ongoing monitoring — tracking new releases, yanked versions, maintainer changes:
def get_recent_updates(package: str) -> list[dict]:
"""Check PyPI's RSS feed for recent package updates."""
resp = client.get(f"https://pypi.org/rss/project/{package}/releases.xml")
resp.raise_for_status()
import xml.etree.ElementTree as ET
root = ET.fromstring(resp.text)
updates = []
for item in root.findall(".//item"):
updates.append({
"title": item.findtext("title", ""),
"link": item.findtext("link", ""),
"date": item.findtext("pubDate", ""),
"description": item.findtext("description", ""),
})
return updates
def monitor_packages(package_list: list[str], db: sqlite3.Connection) -> list[dict]:
"""Check for new releases across a list of packages."""
new_releases = []
for name in package_list:
try:
updates = get_recent_updates(name)
if updates:
# Check against what we have stored
latest_stored = db.execute(
"SELECT MAX(upload_date) FROM releases WHERE package_name = ?",
(name,)
).fetchone()[0]
for update in updates[:3]:
new_releases.append({
"package": name,
"title": update["title"],
"date": update["date"],
"link": update["link"],
"is_new": True, # simplified; compare dates for accuracy
})
time.sleep(0.2)
except Exception as e:
print(f"Monitor error for {name}: {e}")
return new_releases
# Monitor your critical dependencies
critical_deps = [
"django", "fastapi", "sqlalchemy", "pydantic",
"httpx", "requests", "cryptography", "pillow"
]
db = init_pypi_db("pypi_monitor.db")
print("Checking for recent releases...")
new_releases = monitor_packages(critical_deps, db)
for r in new_releases[:10]:
print(f" {r['package']}: {r['title']} ({r['date']})")
Vulnerability Cross-Reference
Combine PyPI metadata with OSV (Open Source Vulnerabilities) for security scanning:
def check_vulnerabilities(name: str, version: str) -> list[dict]:
"""Check a package version against the OSV vulnerability database."""
payload = {
"package": {
"name": name,
"ecosystem": "PyPI",
},
"version": version,
}
resp = httpx.post(
"https://api.osv.dev/v1/query",
json=payload,
timeout=15,
)
resp.raise_for_status()
vulns = resp.json().get("vulns", [])
return [
{
"id": v["id"],
"summary": v.get("summary", ""),
"severity": v.get("database_specific", {}).get("severity", "UNKNOWN"),
"published": v.get("published", ""),
"aliases": v.get("aliases", []),
}
for v in vulns
]
# Scan your deps for known vulnerabilities
packages_to_audit = [
("django", "4.0.0"),
("pillow", "9.0.0"),
("cryptography", "38.0.0"),
]
for pkg_name, pkg_version in packages_to_audit:
vulns = check_vulnerabilities(pkg_name, pkg_version)
if vulns:
print(f"\n{pkg_name} {pkg_version}: {len(vulns)} vulnerabilities")
for v in vulns:
print(f" [{v['severity']}] {v['id']}: {v['summary'][:80]}")
else:
print(f"{pkg_name} {pkg_version}: no known vulnerabilities")
Practical Tips
- Cache aggressively. Package metadata for older versions never changes. Cache everything except the latest version endpoint.
- Use conditional requests. PyPI supports
If-Modified-SinceandETagheaders — use them to avoid re-downloading unchanged data. - Check
yankedstatus. Yanked releases still appear in the API but shouldn't be installed. Your scraper should flag them. - Normalize package names.
Pillow,pillow, andPILLOWare the same package. Usename.lower().replace("-", "_")for consistent keys. - Respect robots.txt. PyPI's robots.txt allows the JSON API and Simple API but restricts HTML scraping. Don't scrape HTML pages when an API exists.
- Watch the rate limits. PyPI's JSON API has no documented hard rate limit, but sustained traffic over 10 req/s from a single IP will get throttled. Stay under 8 req/s to be safe.
PyPI is one of the most scraper-friendly platforms out there. The JSON API is stable, fast, and well-structured. Start there, add pypistats for download counts, query BigQuery for custom analytics, and scale with rotating proxies when you need to crawl the full index.
Discovering Trending and New Packages
PyPI provides RSS feeds for new uploads and package updates — useful for trend tracking without polling the full index:
import xml.etree.ElementTree as ET
def get_newest_packages(limit: int = 40) -> list[dict]:
"""Get the most recently uploaded packages from PyPI RSS."""
resp = client.get("https://pypi.org/rss/packages.xml")
resp.raise_for_status()
root = ET.fromstring(resp.text)
packages = []
for item in root.findall(".//item")[:limit]:
packages.append({
"title": item.findtext("title", ""),
"link": item.findtext("link", ""),
"description": item.findtext("description", ""),
"published": item.findtext("pubDate", ""),
})
return packages
def get_newest_updates(limit: int = 40) -> list[dict]:
"""Get the most recently updated packages from PyPI RSS."""
resp = client.get("https://pypi.org/rss/updates.xml")
resp.raise_for_status()
root = ET.fromstring(resp.text)
updates = []
for item in root.findall(".//item")[:limit]:
updates.append({
"title": item.findtext("title", ""),
"link": item.findtext("link", ""),
"description": item.findtext("description", ""),
"published": item.findtext("pubDate", ""),
})
return updates
# Check what's new
print("Newest packages:")
for p in get_newest_packages(10):
print(f" {p['title']}: {p['description'][:80]}")
Analyzing the Python Ecosystem by License
One practical use case: understanding license distribution across the ecosystem, relevant for compliance audits:
def analyze_licenses(conn: sqlite3.Connection) -> list[dict]:
"""Get license distribution from the database."""
rows = conn.execute("""
SELECT license, COUNT(*) AS count
FROM packages
WHERE license IS NOT NULL AND license != ''
GROUP BY license
ORDER BY count DESC
LIMIT 30
""").fetchall()
return [{"license": r[0], "count": r[1]} for r in rows]
def find_permissive_packages(conn: sqlite3.Connection) -> list[dict]:
"""Find packages with permissive licenses (MIT, BSD, Apache, Public Domain)."""
rows = conn.execute("""
SELECT name, version, license, summary
FROM packages
WHERE license LIKE '%MIT%'
OR license LIKE '%BSD%'
OR license LIKE '%Apache%'
OR license LIKE '%Public Domain%'
OR license LIKE '%CC0%'
ORDER BY name
""").fetchall()
return [
{"name": r[0], "version": r[1], "license": r[2], "summary": r[3]}
for r in rows
]
def find_gpl_dependencies(dep_tree: dict) -> list[str]:
"""Identify GPL-licensed packages in a dependency tree — copyleft risk."""
gpl_pkgs = []
for pkg_name in dep_tree:
try:
pkg = get_package(pkg_name)
license_str = (pkg.get("license") or "").upper()
if "GPL" in license_str and "LGPL" not in license_str:
gpl_pkgs.append(pkg_name)
except Exception:
pass
time.sleep(0.1)
return gpl_pkgs
Detecting Abandoned Packages
Packages with no releases for 3+ years and no stated maintainer are security risks in dependency trees:
def find_abandoned_packages(conn: sqlite3.Connection, years_inactive: int = 3) -> list[dict]:
"""Find packages with no releases in N years."""
cutoff = f"{datetime.now().year - years_inactive}-01-01"
rows = conn.execute("""
SELECT p.name, p.version, p.author, MAX(r.upload_date) AS last_release
FROM packages p
LEFT JOIN releases r ON r.package_name = p.name
GROUP BY p.name
HAVING last_release < ? OR last_release IS NULL
ORDER BY last_release ASC
LIMIT 100
""", (cutoff,)).fetchall()
return [
{
"name": r[0], "version": r[1], "author": r[2],
"last_release": r[3] or "unknown",
}
for r in rows
]
def check_maintained_status(package_name: str) -> dict:
"""Check if a package appears actively maintained."""
pkg = get_package(package_name)
history = get_release_history(package_name)
if not history:
return {"name": package_name, "status": "unknown"}
latest = history[0]
from datetime import date
try:
last_date = date.fromisoformat(latest["upload_date"])
days_since = (date.today() - last_date).days
except ValueError:
days_since = None
status = "active" if (days_since or 9999) < 365 else \
"slow" if (days_since or 9999) < 730 else "abandoned"
return {
"name": package_name,
"latest_version": latest["version"],
"last_release": latest["upload_date"],
"days_since_release": days_since,
"total_releases": len(history),
"status": status,
"yanked_releases": sum(1 for r in history if r["yanked"]),
}
# Audit a set of dependencies
audit_list = ["requests", "urllib3", "certifi", "charset-normalizer", "idna"]
for pkg in audit_list:
info = check_maintained_status(pkg)
print(f" {info['name']:25s} v{info['latest_version']:12s} "
f"[{info['status'].upper():8s}] last: {info['last_release']}")
time.sleep(0.15)