Last active
February 23, 2026 06:40
-
-
Save itdependsnetworks/c66d0ba4bf1d53a4b992bc707d082f2e to your computer and use it in GitHub Desktop.
Run Tests For Nautobot View Validation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # Usage: | |
| # Manual (nbshell): Just copy-paste the entire contents of this file into `nautobot-server nbshell` and run it. Make sure to restart sessions between runs or risk not including your latest code changes. | |
| # Automated (invoke): nautobot-server shell --interface=python --command "exec(open('/source/nbshell_view_tests.py').read())" | |
| # | |
| # Pre-requisites: | |
| # 1. Standard Nautobot patterns must be followed for things like pyproject.toml configuration, view definitions, and filterset definitions. This is necessary for the test to be able to discover the models and views to test. | |
| # 2. User created with permissions to access all views (e.g. superuser) must be used to run the test and be the first user in the database (or you can modify the code to select a different user). | |
| # | |
| # Tests two things for every model in the app defined by pyproject.toml: | |
| # 1. Every sortable table column returns HTTP 200 | |
| # 2. Every filter form field accepts a valid value and returns HTTP 200 | |
| # without "Invalid filters were specified" | |
| import traceback | |
| from django import forms as django_forms | |
| from django.apps import apps | |
| from django.contrib.auth import get_user_model | |
| from django.test import Client | |
| from django.urls import NoReverseMatch, reverse | |
| from nautobot.extras.models import Tag | |
| from nautobot.core.filters import MACAddressFilter, MultiValueMACAddressFilter | |
| from nautobot.core.forms import ( | |
| DynamicModelMultipleChoiceField, | |
| MultiValueCharField, | |
| NullableDateField, | |
| NumericArrayField, | |
| TagFilterField, | |
| ) | |
| from nautobot.core.templatetags.helpers import validated_viewname | |
| from nautobot.core.utils.lookup import ContentType, get_filterset_for_model, get_view_for_model | |
| # --------------------------------------------------------------------------- | |
| # Setup: authenticated client as superuser | |
| # --------------------------------------------------------------------------- | |
| User = get_user_model() | |
| user = User.objects.first() | |
| client = Client() | |
| client.force_login(user) | |
| # --------------------------------------------------------------------------- | |
| # Helpers | |
| # --------------------------------------------------------------------------- | |
| PASS = "PASS" | |
| FAIL = "FAIL" | |
| SKIP = "SKIP" | |
| ERROR = "ERROR" | |
| results = [] # (status, model_name, test, detail) | |
| # This log file will contain details for all SKIPped tests and all FAIL/ERRORs, to make it easier to analyze results across multiple runs without losing details due to truncation in the console output. | |
| # This file will not be automatically cleared between runs, so you can keep a cumulative log of results. You may want to clear it manually before the first run to avoid confusion with old results. | |
| def record(status, model_name, test, detail=""): | |
| results.append((status, model_name, test, detail)) | |
| icon = {"PASS": "✓", "FAIL": "✗", "SKIP": "-", "ERROR": "!"}.get(status, "?") | |
| print(f" [{icon}] {test}: {detail}" if detail else f" [{icon}] {test}") | |
| def get_list_url(model): | |
| try: | |
| return reverse(validated_viewname(model, "list")) | |
| except NoReverseMatch: | |
| return None | |
| def iter_choices(choices): | |
| """Flatten optgroup-style grouped choices.""" | |
| for c in choices: | |
| if isinstance(c[1], (list, tuple)): | |
| yield from c[1] | |
| else: | |
| yield c | |
| # --------------------------------------------------------------------------- | |
| # Test 1: sortable table columns | |
| # --------------------------------------------------------------------------- | |
| def check_table_columns(model, view, list_url): | |
| table_class = getattr(view, "table_class", getattr(view, "table", None)) | |
| if not table_class: | |
| record(SKIP, model.__name__, "table_columns", "no table_class on view") | |
| return | |
| queryset = model.objects.all() | |
| table = table_class(queryset) | |
| column_names = list(table.base_columns.keys()) | |
| user.set_config(f"tables.{table_class.__name__}.columns", column_names, commit=True) | |
| # Mirror the test: configure all columns visible | |
| # (In shell we skip user config; just use the table directly) | |
| for name in column_names: | |
| column = table.base_columns[name] | |
| if name in ("actions", "pk"): | |
| continue | |
| if hasattr(column, "orderable") and column.orderable is False: | |
| continue | |
| url = f"{list_url}?sort={name}" | |
| try: | |
| response = client.get(url) | |
| if response.status_code == 200: | |
| record(PASS, model.__name__, f"sort:{name}") | |
| else: | |
| record(FAIL, model.__name__, f"sort:{name}", f"HTTP {response.status_code}") | |
| except Exception as exc: | |
| record(ERROR, model.__name__, f"sort:{name}", str(exc)) | |
| traceback.print_exc() | |
| # --------------------------------------------------------------------------- | |
| # Test 2: filter form fields | |
| # --------------------------------------------------------------------------- | |
| def check_filter_form_fields(model, view, list_url): | |
| filter_form_class = getattr(view, "filterset_form_class", getattr(view, "filterset_form", None)) | |
| if not filter_form_class: | |
| record(SKIP, model.__name__, "filter_form", "no filterset_form_class on view") | |
| return | |
| filterset_class = get_filterset_for_model(model) | |
| if not filterset_class: | |
| record(FAIL, model.__name__, "filter_form", "no filterset found") | |
| return | |
| for field_name, form_field in filter_form_class().fields.items(): | |
| if field_name.startswith(("cr_", "cf_", "cpf_")): | |
| continue | |
| if not filterset_class.base_filters.get(field_name): | |
| record( | |
| FAIL, | |
| model.__name__, | |
| f"filter:{field_name}", | |
| f"{filter_form_class.__name__} field has no matching filter on {filterset_class.__name__}", | |
| ) | |
| continue | |
| filter_field = filterset_class.base_filters[field_name] | |
| value = None | |
| if hasattr(form_field, "choices") and form_field.choices: | |
| valid_choices = [c[0] for c in iter_choices(form_field.choices) if c[0] is not None] | |
| if not valid_choices: | |
| record(SKIP, model.__name__, f"filter:{field_name}", "no valid choices") | |
| continue | |
| value = valid_choices[0] | |
| elif type(form_field) in (django_forms.CharField, django_forms.TypedChoiceField): | |
| value = "aa:bb:cc:dd:ee:ff" if type(filter_field) in (MACAddressFilter, MultiValueMACAddressFilter) else "test-name" | |
| elif type(form_field) in (django_forms.IntegerField, django_forms.ModelChoiceField, NumericArrayField): | |
| value = 1 | |
| elif type(form_field) is django_forms.FloatField: | |
| value = 1.1 | |
| elif type(form_field) in (django_forms.BooleanField, django_forms.NullBooleanField): | |
| value = True | |
| elif type(form_field) in (django_forms.DateField, NullableDateField): | |
| value = "2026-01-01" | |
| elif type(form_field) is django_forms.DateTimeField: | |
| value = "2026-01-01T00:00:00" | |
| elif type(form_field) in (django_forms.ModelMultipleChoiceField, DynamicModelMultipleChoiceField): | |
| queryset = getattr(form_field, "queryset", None) | |
| if queryset is not None and queryset.exists(): | |
| value = queryset.first().pk | |
| else: | |
| record(SKIP, model.__name__, f"filter:{field_name}", "empty queryset") | |
| continue | |
| elif type(form_field) is django_forms.URLField: | |
| value = "https://example.com" | |
| elif type(form_field) is MultiValueCharField: | |
| value = "test1" | |
| elif type(form_field) is TagFilterField: | |
| # Attempt to get a valid filter, if not just use test-tag | |
| tag = Tag.objects.filter(content_types=ContentType.objects.get_for_model(model)) | |
| if tag: | |
| value = tag.name | |
| else: | |
| tag = Tag.objects.first() | |
| if tag: | |
| value = tag.name | |
| else: | |
| value = "test-tag" | |
| else: | |
| record(FAIL, model.__name__, f"filter:{field_name}", f"unsupported field type {form_field.__class__.__name__}") | |
| continue | |
| url = f"{list_url}?{field_name}={value!s}" | |
| try: | |
| response = client.get(url) | |
| if response.status_code != 200: | |
| record(FAIL, model.__name__, f"filter:{field_name}", f"HTTP {response.status_code} at {url}") | |
| elif "Invalid filters were specified" in response.content.decode("utf-8", errors="replace"): | |
| record(FAIL, model.__name__, f"filter:{field_name}", f"'Invalid filters were specified' in response") | |
| else: | |
| record(PASS, model.__name__, f"filter:{field_name}") | |
| except Exception as exc: | |
| record(ERROR, model.__name__, f"filter:{field_name}", str(exc)) | |
| traceback.print_exc() | |
| # --------------------------------------------------------------------------- | |
| # Main loop: iterate all models with a list view | |
| # --------------------------------------------------------------------------- | |
| checked = 0 | |
| skipped = 0 | |
| import re | |
| from pathlib import Path | |
| # Resolve the app under test from pyproject.toml in the current directory. | |
| pyproject_path = Path.cwd() / "pyproject.toml" | |
| if not pyproject_path.exists(): | |
| raise SystemExit(f"No pyproject.toml found in {Path.cwd()}") | |
| content = pyproject_path.read_text() | |
| match = re.search(r'packages\s*=\s*\[\s*\{\s*include\s*=\s*"([^"]+)"', content, re.DOTALL) | |
| # This did not work on welcome_wizard since it wasn't nautobot_welcome_wizard. | |
| # match = re.search(r'\[tool\.poetry\][^\[]*name\s*=\s*"([^"]+)"', content, re.DOTALL) | |
| if not match: | |
| raise SystemExit("Could not find 'packages' include entry in pyproject.toml") | |
| package_name = match.group(1).replace("-", "_") | |
| print(f"Testing app: {package_name}") | |
| target_configs = [ | |
| cfg for cfg in apps.app_configs.values() | |
| if cfg.name == package_name or cfg.name.startswith(f"{package_name}.") | |
| ] | |
| if not target_configs: | |
| raise SystemExit(f"No installed app found matching '{package_name}'. Is it in INSTALLED_APPS?") | |
| print(f"App configs: {[cfg.label for cfg in target_configs]}") | |
| for app_config in target_configs: | |
| for model in app_config.get_models(): | |
| list_url = get_list_url(model) | |
| if not list_url: | |
| skipped += 1 | |
| continue | |
| view = get_view_for_model(model, view_type="List") | |
| if not view: | |
| skipped += 1 | |
| continue | |
| checked += 1 | |
| print(f"\n=== {model._meta.app_label}.{model.__name__} ===") | |
| check_table_columns(model, view, list_url) | |
| check_filter_form_fields(model, view, list_url) | |
| print("\n") | |
| # --------------------------------------------------------------------------- | |
| # Summary | |
| # --------------------------------------------------------------------------- | |
| counts = {s: sum(1 for r in results if r[0] == s) for s in (PASS, FAIL, SKIP, ERROR)} | |
| print(f"\n{'='*60}") | |
| print(f"Models checked: {checked} (skipped no-list-view: {skipped})") | |
| print(f"PASS={counts[PASS]} FAIL={counts[FAIL]} SKIP={counts[SKIP]} ERROR={counts[ERROR]}") | |
| if counts[FAIL] or counts[ERROR]: | |
| print("\nList of failures and errors:\n") | |
| print("\n Error Type | Model | Test | Field | Detail\n ------ | ----- | ---- | ---- | ------") | |
| for status, model_name, test, detail in results: | |
| test_parts = test.split(":", 1) | |
| test_type = test_parts[0] | |
| test_field = test_parts[1] if len(test_parts) > 1 else "" | |
| if status in (FAIL, ERROR): | |
| print(f" [{status}] | {model_name} | {test_type} | {test_field} | {detail}") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment