Skip to content

Instantly share code, notes, and snippets.

@LandonTClipp
Created January 8, 2026 21:06
Show Gist options
  • Select an option

  • Save LandonTClipp/45a69b909420953cb2fe2883e1c5899c to your computer and use it in GitHub Desktop.

Select an option

Save LandonTClipp/45a69b909420953cb2fe2883e1c5899c to your computer and use it in GitHub Desktop.
#!/usr/bin/env python3
import argparse
import json
import sys
import time
from collections import deque
from urllib.parse import urljoin, urlparse
import requests
def iter_odata_links(obj):
"""
Yield every string found in '@odata.id' fields anywhere in a JSON structure.
"""
if isinstance(obj, dict):
for k, v in obj.items():
if k == "@odata.id" and isinstance(v, str):
yield v
else:
yield from iter_odata_links(v)
elif isinstance(obj, list):
for item in obj:
yield from iter_odata_links(item)
def normalize_link(link, base_url):
"""
Turn relative Redfish links into absolute URLs based on the provided base_url.
"""
# Some Redfish links are absolute, some are relative (start with '/')
if link.startswith("http://") or link.startswith("https://"):
return link
return urljoin(base_url, link)
def same_host(url_a, url_b):
return urlparse(url_a).netloc == urlparse(url_b).netloc
def should_follow(url):
"""
Skip known POST-only / side-effectful action endpoints, which often live under /Actions/.
"""
# Conservative: don't try to GET action targets
return "/Actions/" not in url
def fetch_json(session, url, verify, timeout):
r = session.get(url, verify=verify, timeout=timeout)
r.raise_for_status()
# Some BMCs return application/json; others may set weird content-types but still be JSON
return r.json()
def main():
ap = argparse.ArgumentParser(description="Depth-first crawl of Redfish @odata.id links.")
ap.add_argument("root", help="Root Redfish URL, e.g. https://10.13.55.161/redfish/v1")
ap.add_argument("-u", "--username", required=True, help="Username")
ap.add_argument("-p", "--password", required=True, help="Password")
ap.add_argument("--insecure", action="store_true", help="Disable TLS verification")
ap.add_argument("--timeout", type=float, default=10, help="Per-request timeout (seconds)")
ap.add_argument("--sleep", type=float, default=0.0, help="Sleep between requests (seconds)")
ap.add_argument("--max", dest="max_docs", type=int, default=0,
help="Max number of documents to fetch (0 = unlimited)")
ap.add_argument("--headers", action="append", default=[],
help='Extra header, can repeat. Format: "Name: value"')
args = ap.parse_args()
root = args.root.rstrip("/")
verify = not args.insecure
session = requests.Session()
session.auth = (args.username, args.password)
# Default Redfish headers
session.headers.update({
"Accept": "application/json"
})
# Any user-specified headers
for h in args.headers:
if ":" not in h:
print(f"Invalid header format: {h}", file=sys.stderr)
sys.exit(2)
name, value = h.split(":", 1)
session.headers[name.strip()] = value.strip()
visited = set()
stack = deque([root]) # DFS: use stack (LIFO)
base_host = urlparse(root).netloc
fetched_count = 0
while stack:
if args.max_docs and fetched_count >= args.max_docs:
break
url = stack.pop()
# Normalize trivial trailing slashes
if url.endswith("/"):
url = url[:-1]
if url in visited:
continue
visited.add(url)
# Stay on same host, and avoid action endpoints
if not same_host(url, root) or not should_follow(url):
continue
try:
doc = fetch_json(session, url, verify, timeout=args.timeout)
except Exception as e:
print(f"# ERROR fetching {url}: {e}", file=sys.stderr)
continue
# Print the document with a comment header so you can grep/segment output
print(f"# {url}")
print(json.dumps(doc, indent=2, sort_keys=True))
sys.stdout.flush()
fetched_count += 1
# Find next links and push onto stack (DFS)
try:
links = list(iter_odata_links(doc))
except Exception as e:
print(f"# ERROR extracting links from {url}: {e}", file=sys.stderr)
links = []
# Normalize + filter + push
for link in links:
abs_url = normalize_link(link, url + "/") # urljoin needs trailing slash for relative paths
# Keep to same host; avoid re-adding visited
if same_host(abs_url, root) and should_follow(abs_url) and abs_url not in visited:
stack.append(abs_url)
if args.sleep > 0:
time.sleep(args.sleep)
if __name__ == "__main__":
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment