Created
July 28, 2025 13:17
-
-
Save wjkennedy/e32d0731cc472435933914f5a8d6cc9f to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import csv | |
| from jira import JIRA | |
| # Configuration for both instances | |
| DC_SERVER = 'https://your-datacenter-jira-instance.com' | |
| DC_API_TOKEN = 'your_dc_api_token' | |
| DC_EMAIL = 'your_dc_email@example.com' | |
| CLOUD_SERVER = 'https://your-cloud-jira-instance.atlassian.net' | |
| CLOUD_API_TOKEN = 'your_cloud_api_token' | |
| CLOUD_EMAIL = 'your_cloud_email@example.com' | |
| # Connect to Jira instances | |
| jira_dc = JIRA(server=DC_SERVER, token_auth=DC_API_TOKEN) | |
| jira_cloud = JIRA(server=CLOUD_SERVER, basic_auth=(CLOUD_EMAIL, CLOUD_API_TOKEN)) | |
| # Fetch recent issues with large footprints | |
| def fetch_issues(jira_instance, jql, max_issues=50): | |
| return jira_instance.search_issues(jql, maxResults=max_issues, fields="summary,description,attachment,issuelinks,comment,subtasks") | |
| # Extract issue details | |
| def extract_issue_data(issue): | |
| return { | |
| 'key': issue.key, | |
| 'summary': issue.fields.summary, | |
| 'description_length': len(issue.fields.description or ''), | |
| 'attachment_count': len(getattr(issue.fields, 'attachment', [])), | |
| 'link_count': len(getattr(issue.fields, 'issuelinks', [])), | |
| 'comment_count': issue.fields.comment.total if hasattr(issue.fields.comment, 'total') else 0, | |
| 'subtask_count': len(issue.fields.subtasks or []) | |
| } | |
| # Compare two lists of issues | |
| def compare_issues(dc_issues, cloud_issues): | |
| matches = 0 | |
| comparisons = [] | |
| for dc_issue in dc_issues: | |
| dc_data = extract_issue_data(dc_issue) | |
| try: | |
| cloud_issue = next(i for i in cloud_issues if i.key == dc_issue.key) | |
| cloud_data = extract_issue_data(cloud_issue) | |
| similarity = all(dc_data[k] == cloud_data[k] for k in dc_data if k != 'key') | |
| matches += similarity | |
| comparisons.append({**dc_data, **{f'cloud_{k}': cloud_data[k] for k in cloud_data if k != 'key'}, 'match': similarity}) | |
| except StopIteration: | |
| comparisons.append({**dc_data, 'cloud_missing': True, 'match': False}) | |
| return comparisons, matches / len(dc_issues) * 100 if dc_issues else 0 | |
| # Write results to CSV | |
| def write_comparison_csv(data, filename): | |
| if not data: | |
| return | |
| headers = sorted(set(k for row in data for k in row)) | |
| with open(filename, mode='w', newline='', encoding='utf-8') as f: | |
| writer = csv.DictWriter(f, fieldnames=headers) | |
| writer.writeheader() | |
| for row in data: | |
| writer.writerow(row) | |
| # Catalog active sprints per project | |
| def catalog_active_sprints(jira_instance, suffix): | |
| boards = jira_instance.boards() | |
| sprint_data = [] | |
| for board in boards: | |
| sprints = jira_instance.sprints(board.id, state='active') | |
| for sprint in sprints: | |
| sprint_data.append({'project': board.name, 'sprint_id': sprint.id, 'sprint_name': sprint.name}) | |
| write_comparison_csv(sprint_data, f'active_sprints_{suffix}.csv') | |
| # Main assessment logic | |
| dc_issues = fetch_issues(jira_dc, 'ORDER BY updated DESC', 50) | |
| cloud_issues = fetch_issues(jira_cloud, 'ORDER BY updated DESC', 100) | |
| comparison, similarity_score = compare_issues(dc_issues, cloud_issues) | |
| write_comparison_csv(comparison, 'issue_comparison.csv') | |
| catalog_active_sprints(jira_dc, 'dc') | |
| catalog_active_sprints(jira_cloud, 'cloud') | |
| print(f"Cloud is {similarity_score:.2f}% like DC based on recent issues.") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment