{"name":"AxonQA MCP Server","version":"1.0.0","protocolVersion":"2024-11-05","description":"MCP server exposing AxonQA QA automation tools","documentation":"/docs/mcp","endpoints":{"mcp":"/api/mcp","stream":"/api/mcp/stream","openapi":"/api/mcp/openapi.json"},"configTemplates":{"claudeDesktop":"/mcp/claude_desktop_config.example.json","vscode":"/mcp/vscode_settings.example.json","codex":"/mcp/codex_config.example.json"},"authentication":{"type":"bearer","description":"Use Bearer token in Authorization header","example":"Authorization: Bearer your-api-key-here"},"rateLimits":{"default":"100 requests per minute"},"tools":[{"name":"import_jira","description":"Import a Jira ticket by ID to extract acceptance criteria and generate tests. Returns ticket details including title, description, and acceptance criteria.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"search_jira","description":"Search Jira for tickets using JQL or natural text query. Returns matching issues with key, summary, status, type, priority, and assignee.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"update_jira_issue","description":"Update a Jira issue's summary, description, or priority.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"add_jira_comment","description":"Add a comment to a Jira issue. Useful for posting test results, status updates, or notes.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"transition_jira_issue","description":"Move a Jira issue to a new status (e.g., \"Done\", \"In Progress\", \"In Review\"). Lists available transitions and applies the matching one.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"generate_tests","description":"Generate test cases from acceptance criteria. Can generate from a Jira ticket or custom description. Returns generated test cases with coverage metrics.","category":"server","requiresConfirmation":false,"longRunning":true},{"name":"improve_tests","description":"Improve existing test suite by adding missing coverage, edge cases, or refining test steps.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"list_test_suites","description":"List all test suites for the current user. Returns summary of each suite including coverage and test counts.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"get_test_suite","description":"Get details of a specific test suite including all test cases. Accepts a ticket reference (e.g. DEV-12) or a suite ID.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"check_agent_status","description":"Check if the AxonQA Agent is connected and ready for DOM capture or test execution.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"capture_dom","description":"Capture DOM elements from a webpage using the connected agent. Extracts interactive elements like buttons, inputs, and links with selectors. After starting, call check_capture_status to see results.","category":"agent","requiresConfirmation":false,"longRunning":true},{"name":"check_capture_status","description":"Check the status of a DOM capture session. Call this after starting a capture to see if it completed and get results.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"list_captures","description":"List recent DOM capture sessions to see what pages have been captured.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"run_tests","description":"Execute tests using the connected agent. Can run all tests, specific test cases, or tests by type/tag.\n\nSUPPORTS FILTERING BY TEST TYPE:\n- testType: \"smoke\" | \"regression\" | \"sanity\" | \"critical\" | \"e2e\" | \"integration\"\n- This filters tests based on their @tag annotation in the test code\n- Example: \"run smoke tests\" → testType: \"smoke\"\n- Example: \"run critical tests for Swag\" → testType: \"critical\", frameworkName: \"Swag\"\n\nYou can also filter by:\n- frameworkId or frameworkName: Run tests from a specific automation framework\n- testIds: Run specific test IDs\n- headed: Show visible browser (true) or run headless (false)","category":"agent","requiresConfirmation":false,"longRunning":true},{"name":"get_run_status","description":"Get the current status and results of a test run.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"cancel_run","description":"Cancel a running test execution.","category":"agent","requiresConfirmation":true,"longRunning":false},{"name":"wait_for_run_completion","description":"Wait for a test run to complete and return final results with summary.\n\nIMPORTANT: Always call this after run_tests to get the actual results.\nThe run_tests tool only STARTS the run - it doesn't wait for completion.\n\nUse this to:\n- Get final pass/fail counts\n- Get test execution duration\n- Check for healing suggestions\n- Provide results summary to user","category":"server","requiresConfirmation":false,"longRunning":true},{"name":"list_locators","description":"List all locator libraries (DOM captures) for the current user.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"get_locator_library","description":"Get details of a specific locator library including all captured elements.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"get_heal_suggestions","description":"Get healing suggestions (selector fixes) from a test run. Shows proposed fixes for broken locators.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"apply_heals","description":"Apply healing fixes to update broken locators. Can apply specific fixes or all safe fixes.","category":"server","requiresConfirmation":true,"longRunning":false},{"name":"revert_heals","description":"Revert previously applied healing fixes.","category":"server","requiresConfirmation":true,"longRunning":false},{"name":"export_tests","description":"Export test cases to various formats (JSON, Excel, Markdown, or automation code).","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"generate_automation_code","description":"Generate automation test code (Playwright, Cypress) from test cases.\n\nSMART DEFAULTS (use these when not specified):\n- language: \"typescript\" (default)\n- baseUrl: \"http://localhost:3000\" (default)\n- headless: true (default)\n\nREQUIRED PARAMETERS:\n- framework: MUST be specified or extracted from user message\n- testSuiteId OR ticketRef OR testCount: Need to know which tests to generate\n\nUSAGE PATTERNS:\n1. By ticket reference: ticketRef=\"DEV-7\" → finds suite by ticket\n2. By suite ID: testSuiteId=\"abc123\" → uses specific suite\n3. First N tests: testCount=4, testSuiteId=\"...\" → generates first N tests\n4. Specific tests: testCaseIds=[\"id1\",\"id2\"] → specific test cases only\n5. With locators: locatorLibraryName=\"Login Page\" → uses existing DOM capture\n\nLOCATOR LIBRARY INTEGRATION:\n- User can specify locatorLibraryName (e.g., \"Login Page\") to use an existing DOM capture\n- Or specify locatorLibraryId directly\n- Captured locators will be included in generated code\n\nDO NOT ask users for language/baseUrl/headless - use defaults.\nONLY ask for framework and test scope if not clear from context.","category":"server","requiresConfirmation":false,"longRunning":true},{"name":"save_automation_framework","description":"Save an automation framework to the database so the user can access it on the Automation page.\n\nUSE THIS when you write test code yourself (e.g., user describes scenarios and you build the framework).\nThe file tree is auto-built from generatedCode paths — no need to pass fileTree.\n\nREQUIRED: name, framework, language, generatedCode\ngeneratedCode format: [{path: \"playwright.config.ts\", content: \"...\"}, {path: \"tests/login.spec.ts\", content: \"...\"}]","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"list_frameworks","description":"List all automation frameworks created by the user.\nReturns framework name, type, test count, and last run status.\nUse this to find frameworks before updating or running them.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"get_framework_code","description":"Read the generated code files from an existing automation framework.\nReturns the full source code for all files (or a specific file).\n\nUSE THIS BEFORE modifying code with update_framework — you need to see the current code first.\n\nLOOKUP OPTIONS (use one):\n- frameworkId: Exact framework ID\n- frameworkName: Partial name match (e.g., \"Login\" matches \"Login Tests - Playwright\")\n- ticketRef: Ticket reference (e.g., \"DEV-7\") — finds framework generated from that ticket\n\nOPTIONAL:\n- filePath: Return only a specific file (e.g., \"tests/login.spec.ts\")\n- includeMetadata: Also return framework config (framework type, language, base URL, test cases)","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"update_framework","description":"Update an existing automation framework's code or configuration.\nUse this to modify test code, add new tests, update locators, or change settings.\n\nCOMMON USE CASES:\n- Add new test cases to existing framework\n- Update specific test file content\n- Change framework configuration\n- Update locators after new DOM capture\n- Add extra instructions for regeneration\n\nREQUIRES: frameworkId or frameworkName to identify the framework","category":"server","requiresConfirmation":true,"longRunning":false},{"name":"sync_framework","description":"Sync framework files between cloud and local workspace.\n\nSYNC DIRECTIONS:\n- \"pull\" (cloud → local): Get latest code from cloud to local workspace\n- \"push\" (local → cloud): Upload local changes to cloud\n- \"auto\": Intelligently choose direction based on timestamps\n\nCONFLICT RESOLUTION (when both cloud and local have changes):\n- \"cloud_wins\": Cloud version overwrites local (default for pull)\n- \"local_wins\": Local version overwrites cloud (default for push)\n- \"manual\": Return both versions for manual resolution\n- \"merge\": Attempt to merge changes (keeps both with conflict markers)\n\nREQUIRES: frameworkId or frameworkName + direction","category":"server","requiresConfirmation":true,"longRunning":false},{"name":"get_pending_syncs","description":"Get all frameworks with pending sync changes.\nReturns frameworks that have unsynced cloud or local changes.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"open_framework_in_ide","description":"Open an automation framework in VS Code, Cursor, or another IDE.\n\nThis tool will:\n1. Sync the framework files to a local directory\n2. Open the directory in the specified IDE\n\nSUPPORTED IDEs:\n- \"vscode\" or \"code\" - Visual Studio Code (default)\n- \"cursor\" - Cursor IDE\n- \"webstorm\" - JetBrains WebStorm\n- \"idea\" - IntelliJ IDEA\n- \"sublime\" - Sublime Text\n- \"atom\" - Atom Editor\n\nEXAMPLES:\n- \"Open SwagLabs framework in VS Code\" → frameworkName: \"SwagLabs\", ide: \"vscode\"\n- \"Open my tests in Cursor\" → ide: \"cursor\"\n\nREQUIRES: Connected agent to write files and open IDE.","category":"agent","requiresConfirmation":false,"longRunning":false},{"name":"get_settings","description":"Get current user settings including auto-apply configuration.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"update_settings","description":"Update user settings like auto-apply configuration.","category":"server","requiresConfirmation":true,"longRunning":false},{"name":"check_browserstack_status","description":"Check if BrowserStack is connected and return account information.\n\nUse this to:\n- Verify BrowserStack is set up before running cloud tests\n- Check available parallel sessions\n- See account plan details","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"get_browserstack_configs","description":"Get available browser, OS, and device configurations for BrowserStack tests.\n\nReturns:\n- Desktop browsers (Chrome, Firefox, Safari, Edge)\n- Operating systems (Windows 11/10, macOS Sonoma/Ventura/Monterey)\n- Mobile devices (iPhone 15 Pro, Pixel 8, Galaxy S24, etc.)\n\nUse this to show users what configurations they can choose from.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"run_tests_browserstack","description":"Run tests on BrowserStack cloud infrastructure for cross-browser testing.\n\nREQUIRES:\n- BrowserStack to be connected (use check_browserstack_status first)\n- A connected AxonQA agent to execute the tests\n\nCONFIGURATION OPTIONS:\n- browsers: Array of browsers to test on (chrome, firefox, safari, edge)\n- os: Operating system (Windows, OS X)\n- osVersion: OS version (11, 10 for Windows; Sonoma, Ventura, Monterey for macOS)\n- devices: Array of mobile device IDs for mobile testing\n\nEXAMPLES:\n- \"Run tests on Chrome and Firefox on Windows 11\"\n  → browsers: [\"chrome\", \"firefox\"], os: \"Windows\", osVersion: \"11\"\n\n- \"Test on Safari on macOS Sonoma\"\n  → browsers: [\"safari\"], os: \"OS X\", osVersion: \"Sonoma\"\n\n- \"Run on iPhone 15 Pro\"\n  → devices: [\"iPhone 15 Pro\"]\n\nThe agent will execute Playwright tests against BrowserStack's cloud browsers.","category":"agent","requiresConfirmation":false,"longRunning":false},{"name":"get_browserstack_run_status","description":"Get the status of a BrowserStack test run including session details, video URLs, and logs.\n\nUse after run_tests_browserstack to check progress and get results.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"create_epic","description":"Create a new epic in a project. Use this when the user wants to manually add an epic (not import from Jira). Returns the created epic details. The epic context is automatically extracted and added to the Project Brain.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"create_story","description":"Create a new user story in a project, optionally under an epic. Use this when the user wants to add a user story (not an epic). If adding to an epic, use list_epics first to find the epicId.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"list_epics","description":"List all epics in a project. Use this to find an epic ID when the user wants to add a story to a specific epic.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"list_projects","description":"List all projects the user has access to. Use this to find a project ID when the user mentions a project by name.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"list_test_cases","description":"List all test cases for a story. Returns test case IDs, titles, types, AC coverage, and automation status.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"edit_test_case","description":"Edit an existing test case. You can update the title, type (happy/negative/edge), steps, or expected result. Use list_test_cases first to get test case IDs.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"delete_test_case","description":"Delete a test case from a story. This will recalculate AC coverage. Use with caution — it may reduce coverage.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"file_jira_defect","description":"Create a Jira Bug issue from a failed test. The defect includes the error message, test steps, and metadata. Requires Jira to be connected.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"list_quality_gates","description":"List all quality gates configured for the user. Quality gates define pass rate thresholds and failure blocking rules for CI/CD pipelines.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"get_automation_status","description":"Get the automation status for a story, including frameworks, run history, pass rates, and self-healing stats.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"get_failure_details","description":"Get detailed failure information for a test run — which tests failed, error messages, failed steps, and healing events. Use this when the user asks \"what failed?\", \"why did tests fail?\", or \"tell me about the failure\". Always use this AFTER get_run_status or wait_for_run_completion shows failures.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"start_crawl","description":"Start a Discover crawl on a website. The agent will crawl the site, extract elements, detect forms, capture screenshots, and identify user flows.\n\nRequires a connected AxonQA Agent. Use check_agent_status first if unsure.\n\nOptions:\n- crawlMode: \"quick\" (30 pages, fast), \"standard\" (50 pages, balanced), \"deep\" (100 pages, full audit)\n- loginUsername + loginPassword: provide login credentials if the site requires authentication\n- enableInteractions: true — AI clicks buttons, fills forms to discover hidden pages\n- excludePatterns: skip URLs matching glob patterns (e.g. \"/admin/*\", \"*.pdf\")\n- enableAccessibilityAudit: run WCAG accessibility checks on each page\n- enablePerformanceMetrics: collect Core Web Vitals (LCP, CLS, TTI)\n- enableVisualBaselines: store high-quality screenshots for visual regression","category":"agent","requiresConfirmation":false,"longRunning":true},{"name":"get_crawl_status","description":"Check the progress or final status of a Discover crawl. Shows pages crawled, forms found, health breakdown, and detected flows.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"list_crawl_sessions","description":"List all Discover crawl sessions for a project. Shows past and active crawls with their status and page counts.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"get_crawl_results","description":"Get detailed results from a completed Discover crawl. Can return pages, detected flows, health data, or everything.\n\nUse include parameter to filter:\n- \"pages\" — list of all crawled pages with URL, type, element/form counts\n- \"flows\" — detected user flows (login, registration, checkout, etc.) with step details\n- \"health\" — pages grouped by health grade with error summaries\n- \"all\" — everything (default)\n\nUse this after a crawl completes to understand the site structure before generating tests.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"cancel_crawl","description":"Cancel an active Discover crawl. The agent will stop crawling and the session will be marked as cancelled.","category":"server","requiresConfirmation":true,"longRunning":false},{"name":"generate_tests_from_crawl","description":"Generate test automation code from Discover crawl results. Can generate tests for a single page or a detected user flow.\n\nUse get_crawl_results first to see available pages and flows, then pass the page or flow ID.\n\n- type \"page\": Generates a smoke test that navigates to the page and verifies key elements are visible\n- type \"flow\": Generates an end-to-end test that follows the flow steps (login, checkout, etc.)","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"wait_for_crawl_completion","description":"Wait for a Discover crawl to complete and return final results.\n\nIMPORTANT: Always call this after start_crawl. Crawls run in the background via the agent.\nstart_crawl only launches the crawl — it does not wait for it to finish.\n\nUse this to:\n- Get final page count, flow count, and health summary\n- Know when it's safe to call get_crawl_results or generate_tests_from_crawl\n- Get the navigation link to view the Discover map\n\nCrawls typically take 2-20 minutes depending on site size. This tool will poll every 5 seconds.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"get_project_analytics","description":"Get analytics and metrics for a project — test run stats, pass rates, test case counts, crawl data, and recent activity. Use when users ask about stats, metrics, coverage, trends, or \"how are my tests doing?\".","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"bulk_improve_tests","description":"Improve multiple test suites at once. Fetches test suites for a project and improves each one. Use when the user says \"improve all my tests\" or \"fix all failing tests\".","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"bulk_generate_from_crawl","description":"Generate test suites for multiple pages from a Discover crawl at once. Filters pages by type (all, forms-only, or interactive) and generates tests for each.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"edit_framework_file","description":"Edit a specific file in an automation framework's generated code.\n\nUse this to make targeted changes to page objects, test specs, helpers, config, or any file in the framework.\nThe edit is saved to the framework's generatedCode (single source of truth).\n\nExamples:\n- Fix a selector in a page object\n- Update a test step\n- Change a config value\n- Add a new helper function\n\nYou can either replace the entire file content or do a find-and-replace within the file.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"get_visual_regression","description":"Get visual regression data for a test run — shows which steps have visual changes compared to the baseline.\n\nReturns per-step screenshot comparisons with diff scores. A diff score > 0.02 (2%) indicates a visual change.\n\nUse this to:\n- Check if a test run introduced visual changes\n- Identify which steps look different from the baseline\n- Get diff image URLs for side-by-side comparison","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"run_visual_comparison","description":"Run visual comparison for a specific screenshot against its baseline.\n\nIf no baseline exists, the screenshot is set as the baseline for future comparisons.\nReturns the diff score and diff image URL.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"debug_test_failure","description":"Get detailed debugging information for a failed test in a run.\n\nReturns:\n- Full error message and stack trace\n- Failed step details (which step, what selector, what line)\n- DOM snapshot at failure time (if captured)\n- Screenshot at failure time\n- Test code around the failed line\n- Page object code containing the broken selector\n- Suggested fixes with confidence scores\n\nUse this to deeply investigate why a test failed and recommend specific fixes.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"create_api_collection","description":"Create a new API collection in a project. Use when the user wants to organize their API endpoints into a named group.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"import_api_spec","description":"Import API endpoints from an OpenAPI/Swagger spec, Postman collection, cURL command, or HAR file. Provide the content directly or a URL to fetch.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"add_api_endpoint","description":"Add an API endpoint to a collection. Specify method, path, headers, body, and authentication.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"run_api_test","description":"Execute API tests for an endpoint or entire collection. Runs all active test cases and returns results with pass/fail details.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"get_api_test_results","description":"Get detailed results of an API test run, including assertion results, request/response data, and timing.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"generate_api_tests","description":"AI-generate test cases for an API endpoint. Creates functional, negative, security, and edge case tests based on the endpoint definition and any saved response.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"create_api_chain","description":"Build a multi-step API request chain with variable extraction between steps. Use for testing flows like Register → Login → Create Resource → Verify.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"debug_api_endpoint","description":"Diagnose why an API request is failing. Sends the request and analyzes the response — checks auth, headers, body format, status codes, and common issues.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"list_api_collections","description":"List all API collections in a project with endpoint counts.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"manage_api_environment","description":"Create or update an API testing environment with variables like baseUrl, apiKey, tokens, etc.","category":"server","requiresConfirmation":false,"longRunning":false},{"name":"configure_api_auth","description":"Configure automatic token-based authentication for an API collection. Designates a login/token endpoint that is called automatically to obtain and inject auth tokens into subsequent requests. Use when the user wants to set up auth for their API collection, or when they mention needing to log in / get a token first.","category":"server","requiresConfirmation":false,"longRunning":false}],"methods":[{"name":"initialize","description":"Handshake and capability negotiation"},{"name":"tools/list","description":"List all available tools"},{"name":"tools/call","description":"Execute a tool"},{"name":"tools/info","description":"Get detailed info about a tool"},{"name":"ping","description":"Health check"}]}