Created
January 24, 2026 03:10
-
-
Save pawlowskialex/3ddce5ac5d7dd47121a3dbc467881c06 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env -S uvx --script | |
| # /// script | |
| # requires-python = ">=3.10" | |
| # dependencies = ["pycryptodome"] | |
| # /// | |
| """ | |
| Apple AirPort firmware (basebinary) parser and extractor. | |
| Parses APPLE-FIRMWARE format files, handles AES-CBC decryption, | |
| extracts kernel images, bootloaders, and FFS filesystems. | |
| Supported models: 107, 108, 115, 120 | |
| """ | |
| import argparse | |
| import gzip | |
| import io | |
| import logging | |
| import os | |
| import struct | |
| import sys | |
| import zlib | |
| from pathlib import Path | |
| from Crypto.Cipher import AES | |
| # Known encryption keys by model number (128-bit AES keys) | |
| BASEBINARY_KEYS = { | |
| 107: bytes.fromhex("5249c351028bf1fd2bd1849e28b23f24"), | |
| 108: bytes.fromhex("bb7deb0970d8ee2e00fa46cb1c3c098e"), | |
| 115: bytes.fromhex("1075e806f4770cd4763bd285a64e9174"), | |
| 120: bytes.fromhex("688cdd3b1b6bdda207b6cec2735292d2"), | |
| } | |
| HEADER_MAGIC = b"APPLE-FIRMWARE\x00" | |
| HEADER_FORMAT = struct.Struct(">15sB2I4BI") | |
| HEADER_SIZE = HEADER_FORMAT.size | |
| # FFS (Unix Fast File System) constants | |
| FFS_MAGIC = 0x00011954 | |
| FFS_MAGIC_BYTES_BE = bytes([0x00, 0x01, 0x19, 0x54]) | |
| FFS_SUPERBLOCK_OFFSET = 0x2000 # Superblock is 8KB into partition | |
| FFS_MAGIC_OFFSET_IN_SB = 0x55C # Magic is at this offset in superblock | |
| class BasebinaryError(Exception): | |
| """Exception raised for basebinary parsing errors.""" | |
| pass | |
| def derive_key(model: int) -> bytes: | |
| """Derive the actual decryption key from the stored key for a model.""" | |
| if model not in BASEBINARY_KEYS: | |
| raise BasebinaryError(f"No key available for model {model}") | |
| base_key = BASEBINARY_KEYS[model] | |
| derived = bytes(b ^ (i + 0x19) for i, b in enumerate(base_key)) | |
| logging.debug(f"Derived key: {derived.hex()}") | |
| return derived | |
| def parse_header(data: bytes) -> dict: | |
| """Parse the basebinary header.""" | |
| ( | |
| magic, | |
| byte_0x0F, | |
| model, | |
| version, | |
| byte_0x18, | |
| byte_0x19, | |
| byte_0x1A, | |
| flags, | |
| unk_0x1C, | |
| ) = HEADER_FORMAT.unpack(data) | |
| if magic != HEADER_MAGIC: | |
| raise BasebinaryError(f"Bad header magic: {magic!r}") | |
| return { | |
| "byte_0x0F": byte_0x0F, | |
| "model": model, | |
| "version": version, | |
| "flags": flags, | |
| "encrypted": bool(flags & 2), | |
| } | |
| def decrypt_chunk(encrypted_data: bytes, key: bytes, iv: bytes) -> bytes: | |
| """Decrypt a single chunk using AES-CBC.""" | |
| cipher = AES.new(key, AES.MODE_CBC, iv) | |
| decrypted = bytearray() | |
| offset = 0 | |
| length = len(encrypted_data) | |
| while offset < length: | |
| remaining = length - offset | |
| if remaining >= 16: | |
| block_size = (remaining // 16) * 16 | |
| decrypted.extend( | |
| cipher.decrypt(encrypted_data[offset : offset + block_size]) | |
| ) | |
| offset += block_size | |
| else: | |
| decrypted.extend(encrypted_data[offset:]) | |
| break | |
| return bytes(decrypted) | |
| def decrypt(data: bytes, model: int, byte_0x0F: int) -> bytes: | |
| """Decrypt the firmware payload.""" | |
| iv = HEADER_MAGIC + bytes([byte_0x0F]) | |
| key = derive_key(model) | |
| decrypted = bytearray() | |
| chunk_size = 0x8000 | |
| offset = 0 | |
| while offset < len(data): | |
| chunk = data[offset : offset + chunk_size] | |
| decrypted.extend(decrypt_chunk(chunk, key, iv)) | |
| offset += chunk_size | |
| return bytes(decrypted) | |
| def is_basebinary(data: bytes) -> bool: | |
| """Check if data starts with basebinary header magic.""" | |
| return len(data) >= HEADER_SIZE and data[:15] == HEADER_MAGIC[:15] | |
| def parse(data: bytes) -> tuple[bytes, dict]: | |
| """Parse a basebinary firmware file.""" | |
| if len(data) < HEADER_SIZE + 4: | |
| raise BasebinaryError(f"File too small: {len(data)} bytes") | |
| header_data = data[:HEADER_SIZE] | |
| inner_data = data[HEADER_SIZE:-4] | |
| stored_checksum = struct.unpack(">I", data[-4:])[0] | |
| info = parse_header(header_data) | |
| logging.info( | |
| f"Model: {info['model']}, Version: {info['version']}, Flags: {info['flags']:#x}" | |
| ) | |
| if info["encrypted"]: | |
| logging.info("Firmware is encrypted, decrypting...") | |
| inner_data = decrypt(inner_data, info["model"], info["byte_0x0F"]) | |
| checksum = zlib.adler32(header_data + inner_data) & 0xFFFFFFFF | |
| logging.debug(f"Stored checksum: {stored_checksum:#010x}") | |
| logging.debug(f"Calculated checksum: {checksum:#010x}") | |
| if stored_checksum != checksum: | |
| raise BasebinaryError( | |
| f"Checksum mismatch: stored {stored_checksum:#010x}, calculated {checksum:#010x}" | |
| ) | |
| return inner_data, info | |
| def find_gzip(data: bytes) -> tuple[int, str | None]: | |
| """Find a valid gzip stream in data. Returns (offset, original_filename).""" | |
| gzip_magic = b"\x1f\x8b\x08" | |
| offset = 0 | |
| while True: | |
| try: | |
| gzip_offset = data.index(gzip_magic, offset) | |
| except ValueError: | |
| break | |
| gzdata = data[gzip_offset:] | |
| try: | |
| with gzip.GzipFile(fileobj=io.BytesIO(gzdata)) as f: | |
| f.read(1) | |
| # Extract filename from gzip header | |
| filename = None | |
| if len(gzdata) > 10: | |
| flags = gzdata[3] | |
| pos = 10 | |
| if flags & 0x04: # FEXTRA | |
| if len(gzdata) > pos + 2: | |
| extra_len = gzdata[pos] | (gzdata[pos + 1] << 8) | |
| pos += 2 + extra_len | |
| if flags & 0x08: # FNAME | |
| end = gzdata.index(b"\x00", pos) | |
| filename = gzdata[pos:end].decode("latin-1") | |
| return gzip_offset, filename | |
| except Exception: | |
| pass | |
| offset = gzip_offset + 1 | |
| return -1, None | |
| def extract_gzip(data: bytes, offset: int = 0) -> tuple[bytes, str | None]: | |
| """Extract gzip-compressed data starting at offset.""" | |
| gzdata = data[offset:] | |
| # Get filename | |
| filename = None | |
| if len(gzdata) > 10: | |
| flags = gzdata[3] | |
| pos = 10 | |
| if flags & 0x04: | |
| if len(gzdata) > pos + 2: | |
| extra_len = gzdata[pos] | (gzdata[pos + 1] << 8) | |
| pos += 2 + extra_len | |
| if flags & 0x08: | |
| try: | |
| end = gzdata.index(b"\x00", pos) | |
| filename = gzdata[pos:end].decode("latin-1") | |
| except (ValueError, UnicodeDecodeError): | |
| pass | |
| with gzip.GzipFile(fileobj=io.BytesIO(gzdata)) as f: | |
| return f.read(), filename | |
| def find_ffs(data: bytes) -> list[tuple[int, dict]]: | |
| """ | |
| Find FFS (Unix Fast File System) partitions in data. | |
| Returns list of (partition_offset, superblock_info) tuples. | |
| """ | |
| results = [] | |
| offset = 0 | |
| while offset < len(data) - FFS_SUPERBLOCK_OFFSET - FFS_MAGIC_OFFSET_IN_SB - 4: | |
| try: | |
| magic_offset = data.index(FFS_MAGIC_BYTES_BE, offset) | |
| except ValueError: | |
| break | |
| # Calculate where partition would start | |
| sb_offset = magic_offset - FFS_MAGIC_OFFSET_IN_SB | |
| part_offset = sb_offset - FFS_SUPERBLOCK_OFFSET | |
| if part_offset >= 0 and sb_offset >= 0: | |
| # Verify this looks like a real superblock | |
| sb_data = ( | |
| data[sb_offset : sb_offset + 0x600] | |
| if len(data) > sb_offset + 0x600 | |
| else None | |
| ) | |
| if sb_data and len(sb_data) >= 0x560: | |
| # Check magic at expected position | |
| found_magic = struct.unpack(">I", sb_data[0x55C:0x560])[0] | |
| if found_magic == FFS_MAGIC: | |
| # Parse some superblock fields | |
| try: | |
| fs_size = struct.unpack(">I", sb_data[0x10:0x14])[0] # fs_size | |
| fs_bsize = struct.unpack(">I", sb_data[0x30:0x34])[ | |
| 0 | |
| ] # block size | |
| fs_fsize = struct.unpack(">I", sb_data[0x34:0x38])[ | |
| 0 | |
| ] # fragment size | |
| info = { | |
| "magic_offset": magic_offset, | |
| "superblock_offset": sb_offset, | |
| "partition_offset": part_offset, | |
| "fs_size_blocks": fs_size, | |
| "block_size": fs_bsize, | |
| "fragment_size": fs_fsize, | |
| } | |
| # Estimate filesystem size | |
| if fs_bsize > 0 and fs_size > 0: | |
| info["estimated_size"] = fs_size * fs_fsize | |
| results.append((part_offset, info)) | |
| logging.debug( | |
| f"Found FFS at offset {part_offset:#x}, block_size={fs_bsize}, frag_size={fs_fsize}" | |
| ) | |
| except struct.error: | |
| pass | |
| offset = magic_offset + 1 | |
| return results | |
| def find_elf(data: bytes) -> list[tuple[int, dict]]: | |
| """Find ELF binaries in data.""" | |
| ELF_MAGIC = b"\x7fELF" | |
| results = [] | |
| offset = 0 | |
| while offset < len(data) - 52: # Minimum ELF header size | |
| try: | |
| elf_offset = data.index(ELF_MAGIC, offset) | |
| except ValueError: | |
| break | |
| if len(data) > elf_offset + 52: | |
| elf_header = data[elf_offset : elf_offset + 52] | |
| ei_class = elf_header[4] # 1=32-bit, 2=64-bit | |
| ei_data = elf_header[5] # 1=LE, 2=BE | |
| ei_osabi = elf_header[7] | |
| if ei_class in (1, 2) and ei_data in (1, 2): | |
| endian = "<" if ei_data == 1 else ">" | |
| bits = 32 if ei_class == 1 else 64 | |
| e_type = struct.unpack(f"{endian}H", elf_header[16:18])[0] | |
| e_machine = struct.unpack(f"{endian}H", elf_header[18:20])[0] | |
| # Machine types | |
| machines = { | |
| 0x03: "x86", | |
| 0x08: "MIPS", | |
| 0x14: "PowerPC", | |
| 0x28: "ARM", | |
| 0x3E: "x86-64", | |
| 0xB7: "AArch64", | |
| } | |
| types = { | |
| 1: "relocatable", | |
| 2: "executable", | |
| 3: "shared", | |
| 4: "core", | |
| } | |
| info = { | |
| "offset": elf_offset, | |
| "bits": bits, | |
| "endian": "little" if ei_data == 1 else "big", | |
| "type": types.get(e_type, f"unknown({e_type})"), | |
| "machine": machines.get(e_machine, f"unknown({e_machine:#x})"), | |
| } | |
| results.append((elf_offset, info)) | |
| logging.debug( | |
| f"Found ELF at {elf_offset:#x}: {bits}-bit {info['machine']} {info['type']}" | |
| ) | |
| offset = elf_offset + 1 | |
| return results | |
| def extract_ffs_files(fs_data: bytes, output_dir: Path) -> int: | |
| """ | |
| Extract files from FFS filesystem using basic inode parsing. | |
| Returns number of files extracted. | |
| This is a simplified extractor - for complete extraction use tsk_recover. | |
| """ | |
| # This is complex to implement fully - we'll create the image and | |
| # provide instructions for using external tools | |
| return 0 | |
| def extract_all(data: bytes, output_dir: Path, base_name: str) -> dict: | |
| """ | |
| Extract all components from firmware data. | |
| Returns dict with extraction results. | |
| """ | |
| results = { | |
| "basebinary_layers": [], | |
| "bootloader": None, | |
| "kernel": None, | |
| "filesystem": None, | |
| "elf_binaries": [], | |
| } | |
| output_dir.mkdir(parents=True, exist_ok=True) | |
| current_data = data | |
| layer = 0 | |
| # Process nested basebinary containers | |
| while is_basebinary(current_data): | |
| layer += 1 | |
| try: | |
| parsed, info = parse(current_data) | |
| enc_str = "encrypted" if info["encrypted"] else "unencrypted" | |
| results["basebinary_layers"].append( | |
| { | |
| "layer": layer, | |
| "model": info["model"], | |
| "version": info["version"], | |
| "encrypted": info["encrypted"], | |
| "size": len(parsed), | |
| } | |
| ) | |
| logging.info(f"Layer {layer}: basebinary model {info['model']} ({enc_str})") | |
| current_data = parsed | |
| except BasebinaryError as e: | |
| logging.warning(f"Failed to parse basebinary layer {layer}: {e}") | |
| break | |
| # Find gzip-compressed kernel | |
| gzip_offset, gzip_filename = find_gzip(current_data) | |
| if gzip_offset >= 0: | |
| # Extract bootloader (data before gzip) | |
| if gzip_offset > 0: | |
| bootloader_data = current_data[:gzip_offset] | |
| bootloader_path = output_dir / f"{base_name}.bootloader.bin" | |
| bootloader_path.write_bytes(bootloader_data) | |
| results["bootloader"] = { | |
| "path": bootloader_path, | |
| "size": len(bootloader_data), | |
| "offset": 0, | |
| } | |
| logging.info(f"Extracted bootloader: {len(bootloader_data)} bytes") | |
| # Check for ELF in bootloader | |
| elf_list = find_elf(bootloader_data) | |
| for elf_offset, elf_info in elf_list: | |
| results["elf_binaries"].append( | |
| {"location": "bootloader", "offset": elf_offset, **elf_info} | |
| ) | |
| # Extract kernel | |
| try: | |
| kernel_data, kernel_name = extract_gzip(current_data, gzip_offset) | |
| if kernel_name: | |
| # Clean up filename | |
| kernel_name = kernel_name.replace("/", "_").replace("\\", "_") | |
| kernel_path = output_dir / f"{base_name}.{kernel_name}" | |
| else: | |
| kernel_path = output_dir / f"{base_name}.kernel.bin" | |
| kernel_path.write_bytes(kernel_data) | |
| results["kernel"] = { | |
| "path": kernel_path, | |
| "size": len(kernel_data), | |
| "gzip_offset": gzip_offset, | |
| "original_name": kernel_name, | |
| } | |
| logging.info( | |
| f"Extracted kernel: {len(kernel_data)} bytes -> {kernel_path.name}" | |
| ) | |
| # Find FFS filesystem in kernel | |
| ffs_list = find_ffs(kernel_data) | |
| if ffs_list: | |
| # Use the first (usually only) FFS | |
| fs_offset, fs_info = ffs_list[0] | |
| # Extract filesystem image | |
| fs_data = kernel_data[fs_offset:] | |
| fs_path = output_dir / f"{base_name}.rootfs.ffs" | |
| fs_path.write_bytes(fs_data) | |
| results["filesystem"] = { | |
| "path": fs_path, | |
| "type": "FFS", | |
| "offset_in_kernel": fs_offset, | |
| "size": len(fs_data), | |
| **fs_info, | |
| } | |
| logging.info( | |
| f"Extracted FFS filesystem: {len(fs_data)} bytes at kernel offset {fs_offset:#x}" | |
| ) | |
| # Create extraction directory for manual extraction | |
| rootfs_dir = output_dir / f"{base_name}.rootfs" | |
| rootfs_dir.mkdir(exist_ok=True) | |
| # Write helper script | |
| helper_script = rootfs_dir / "EXTRACT_README.txt" | |
| helper_script.write_text(f"""FFS Filesystem Extraction | |
| ======================== | |
| The file '{fs_path.name}' contains a NetBSD FFS (Unix Fast File System). | |
| To extract files, use The Sleuth Kit: | |
| # Install (macOS) | |
| brew install sleuthkit | |
| # List files | |
| fls -r '{fs_path.name}' | |
| # Extract all files | |
| tsk_recover -e '{fs_path.name}' extracted/ | |
| # Extract specific file by inode | |
| icat '{fs_path.name}' <inode_number> > output_file | |
| Filesystem info: | |
| Block size: {fs_info.get("block_size", "unknown")} | |
| Fragment size: {fs_info.get("fragment_size", "unknown")} | |
| Offset in kernel: {fs_offset:#x} | |
| """) | |
| # Find ELF binaries in kernel | |
| elf_list = find_elf(kernel_data) | |
| for elf_offset, elf_info in elf_list[:10]: # Limit to first 10 | |
| results["elf_binaries"].append( | |
| {"location": "kernel", "offset": elf_offset, **elf_info} | |
| ) | |
| except Exception as e: | |
| logging.warning(f"Failed to extract kernel: {e}") | |
| else: | |
| # No gzip found, save raw payload | |
| raw_path = output_dir / f"{base_name}.payload.bin" | |
| raw_path.write_bytes(current_data) | |
| logging.info(f"No gzip found, saved raw payload: {len(current_data)} bytes") | |
| return results | |
| def print_results(results: dict, output_dir: Path): | |
| """Print extraction results summary.""" | |
| print(f"\nExtraction complete: {output_dir}\n") | |
| print("=" * 60) | |
| # Basebinary layers | |
| if results["basebinary_layers"]: | |
| print("\nBasebinary layers:") | |
| for layer in results["basebinary_layers"]: | |
| enc = "encrypted" if layer["encrypted"] else "unencrypted" | |
| print( | |
| f" Layer {layer['layer']}: model {layer['model']}, {enc}, {layer['size']:,} bytes" | |
| ) | |
| # Bootloader | |
| if results["bootloader"]: | |
| bl = results["bootloader"] | |
| print(f"\nBootloader:") | |
| print(f" {bl['path'].name} ({bl['size']:,} bytes)") | |
| # Kernel | |
| if results["kernel"]: | |
| k = results["kernel"] | |
| print(f"\nKernel:") | |
| print(f" {k['path'].name} ({k['size']:,} bytes)") | |
| if k.get("original_name"): | |
| print(f" Original name: {k['original_name']}") | |
| # Filesystem | |
| if results["filesystem"]: | |
| fs = results["filesystem"] | |
| print(f"\nFilesystem ({fs['type']}):") | |
| print(f" {fs['path'].name} ({fs['size']:,} bytes)") | |
| print(f" Block size: {fs.get('block_size', 'unknown')}") | |
| print(f" Fragment size: {fs.get('fragment_size', 'unknown')}") | |
| print(f" Offset in kernel: {fs['offset_in_kernel']:#x}") | |
| print(f"\n To extract files:") | |
| print(f" tsk_recover -e '{fs['path'].name}' rootfs/") | |
| # ELF binaries | |
| if results["elf_binaries"]: | |
| print(f"\nELF binaries found: {len(results['elf_binaries'])}") | |
| for elf in results["elf_binaries"][:5]: | |
| print( | |
| f" {elf['location']}+{elf['offset']:#x}: {elf['bits']}-bit {elf['machine']} {elf['type']}" | |
| ) | |
| if len(results["elf_binaries"]) > 5: | |
| print(f" ... and {len(results['elf_binaries']) - 5} more") | |
| print("\n" + "=" * 60) | |
| def main(): | |
| parser = argparse.ArgumentParser( | |
| description="Apple AirPort firmware (basebinary) extractor", | |
| formatter_class=argparse.RawDescriptionHelpFormatter, | |
| epilog=f""" | |
| Supported models: {", ".join(map(str, sorted(BASEBINARY_KEYS.keys())))} | |
| This tool extracts: | |
| - Nested basebinary containers (with decryption) | |
| - Bootloader (MIPS code before kernel) | |
| - Kernel image (gzip-compressed) | |
| - FFS root filesystem | |
| Examples: | |
| %(prog)s firmware.basebinary | |
| %(prog)s firmware.basebinary -o output_dir/ | |
| %(prog)s firmware.basebinary --info | |
| """, | |
| ) | |
| parser.add_argument("input", type=Path, help="Input firmware file") | |
| parser.add_argument( | |
| "-o", | |
| "--output", | |
| type=Path, | |
| help="Output directory (default: <input>.extracted/)", | |
| ) | |
| parser.add_argument( | |
| "--info", | |
| action="store_true", | |
| help="Only show firmware info, don't extract", | |
| ) | |
| parser.add_argument( | |
| "-v", | |
| "--verbose", | |
| action="count", | |
| default=0, | |
| help="Increase verbosity (-v for info, -vv for debug)", | |
| ) | |
| args = parser.parse_args() | |
| # Configure logging | |
| log_level = logging.WARNING | |
| if args.verbose >= 2: | |
| log_level = logging.DEBUG | |
| elif args.verbose >= 1: | |
| log_level = logging.INFO | |
| logging.basicConfig(level=log_level, format="%(levelname)s: %(message)s") | |
| # Read input file | |
| try: | |
| data = args.input.read_bytes() | |
| except FileNotFoundError: | |
| print(f"Error: File not found: {args.input}", file=sys.stderr) | |
| sys.exit(1) | |
| except IOError as e: | |
| print(f"Error reading {args.input}: {e}", file=sys.stderr) | |
| sys.exit(1) | |
| base_name = args.input.stem | |
| # --info mode | |
| if args.info: | |
| print(f"Analyzing: {args.input}\n") | |
| current_data = data | |
| layer = 0 | |
| while is_basebinary(current_data): | |
| layer += 1 | |
| try: | |
| parsed, info = parse(current_data) | |
| enc_str = "encrypted" if info["encrypted"] else "unencrypted" | |
| print(f"Layer {layer}: basebinary") | |
| print(f" Model: {info['model']}") | |
| print(f" Version: {info['version']}") | |
| print(f" Encrypted: {info['encrypted']}") | |
| print(f" Payload size: {len(parsed):,} bytes") | |
| print() | |
| current_data = parsed | |
| except BasebinaryError as e: | |
| print(f"Layer {layer}: parse error: {e}") | |
| break | |
| # Check for gzip | |
| gzip_offset, orig_filename = find_gzip(current_data) | |
| if gzip_offset >= 0: | |
| print(f"Gzip stream at offset: {gzip_offset:#x}") | |
| if orig_filename: | |
| print(f" Original filename: {orig_filename}") | |
| print(f" Bootloader size: {gzip_offset:,} bytes") | |
| # Extract and check kernel | |
| try: | |
| kernel_data, _ = extract_gzip(current_data, gzip_offset) | |
| print(f" Kernel size (uncompressed): {len(kernel_data):,} bytes") | |
| # Check for FFS | |
| ffs_list = find_ffs(kernel_data) | |
| if ffs_list: | |
| fs_offset, fs_info = ffs_list[0] | |
| print(f"\nFFS filesystem in kernel:") | |
| print(f" Offset: {fs_offset:#x}") | |
| print(f" Block size: {fs_info.get('block_size', 'unknown')}") | |
| print(f" Size: {len(kernel_data) - fs_offset:,} bytes") | |
| except Exception as e: | |
| print(f" Error analyzing kernel: {e}") | |
| sys.exit(0) | |
| # Default: full extraction | |
| if args.output: | |
| output_dir = args.output | |
| else: | |
| output_dir = args.input.parent / f"{base_name}.extracted" | |
| try: | |
| results = extract_all(data, output_dir, base_name) | |
| print_results(results, output_dir) | |
| except BasebinaryError as e: | |
| print(f"Error: {e}", file=sys.stderr) | |
| sys.exit(1) | |
| if __name__ == "__main__": | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment