Skip to content

Instantly share code, notes, and snippets.

@Nadiar
Created March 12, 2026 23:50
Show Gist options
  • Select an option

  • Save Nadiar/7af28b3c53122e3fa0e3ec4cab7e8675 to your computer and use it in GitHub Desktop.

Select an option

Save Nadiar/7af28b3c53122e3fa0e3ec4cab7e8675 to your computer and use it in GitHub Desktop.
Example of pulling recently released comics from comicvine
import requests
import sys
from datetime import datetime, timedelta
def run_demo(api_key):
headers = {"User-Agent": "Comic-Calendar-Client/1.0"}
# Calculate date range: +/- 1 week rounded outward
today = datetime.now()
# Snap start back to Monday of last week
raw_start = today - timedelta(days=7)
start_snap = raw_start - timedelta(days=raw_start.weekday())
# Snap end forward to Sunday of next week
raw_end = today + timedelta(days=7)
end_snap = raw_end + timedelta(days=(6 - raw_end.weekday()))
start_str = start_snap.strftime('%Y-%m-%d')
end_str = end_snap.strftime('%Y-%m-%d')
print(f"Fetching range: {start_str} to {end_str}")
# 1. Fetch ALL Issues (with Pagination)
all_issues = []
offset = 0
url = "https://comicvine.gamespot.com/api/issues/"
while True:
params = {
"api_key": api_key,
"format": "json",
"limit": 100,
"offset": offset,
"filter": f"store_date:{start_str}|{end_str}",
"field_list": "issue_number,store_date,volume"
}
try:
data = requests.get(url, params=params, headers=headers).json()
results = data.get('results', [])
all_issues.extend(results)
total = data.get('number_of_total_results', 0)
if len(all_issues) >= total or not results:
break
offset += 100
except Exception as e:
print(f"API Error: {e}")
break
# 2. Batch Fetch Publishers for unique volumes
vol_ids = {str(i['volume']['id']) for i in all_issues if i.get('volume')}
pub_map = {}
if vol_ids:
# ComicVine filter limit is usually 100 IDs; for a 2-week window,
# unique volumes rarely exceed this, so we process as one batch.
v_url = "https://comicvine.gamespot.com/api/volumes/"
v_params = {
"api_key": api_key,
"format": "json",
"filter": f"id:{'|'.join(list(vol_ids)[:100])}",
"field_list": "id,publisher"
}
v_data = requests.get(v_url, params=v_params, headers=headers).json()
pub_map = {v['id']: v.get('publisher', {}).get('name', 'N/A') for v in v_data.get('results', [])}
# 3. Output Results
print(f"\n{'DATE':<12} | {'PUBLISHER':<15} | {'ISSUE'}")
print("-" * 65)
for i in all_issues:
p_name = pub_map.get(i['volume']['id'], "N/A")
v_name = i['volume'].get('name', 'Unknown')
print(f"{i['store_date']:<12} | {p_name[:15]:<15} | {v_name} #{i['issue_number']}")
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python3 calendarquery.py <API_KEY>")
else:
run_demo(sys.argv[1])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment