Skip to main content

Code Examples

Practical, ready-to-use code examples for common Checkmate API operations in multiple programming languages.

Complete Examples

1. Create Project → Add Tests → Create Run

const axios = require('axios');

const API_TOKEN = process.env.CHECKMATE_TOKEN;
const BASE_URL = 'http://localhost:3000';

const api = axios.create({
baseURL: BASE_URL,
headers: {
'Authorization': `Bearer ${API_TOKEN}`,
'Content-Type': 'application/json'
}
});

async function completeWorkflow() {
try {
// 1. Create a project
console.log('Creating project...');
const projectResponse = await api.post('/api/v1/project/create', {
projectName: 'Mobile App Testing',
description: 'Test suite for iOS and Android',
orgId: 1
});
const projectId = projectResponse.data.data.projectId;
console.log(`✓ Project created with ID: ${projectId}`);

// 2. Create test cases
console.log('\nCreating test cases...');
const testIds = [];

const tests = [
{
title: 'Verify user login',
description: 'Test login functionality',
steps: '1. Open app\n2. Enter credentials\n3. Tap login',
expectedResult: 'User logged in successfully'
},
{
title: 'Verify profile update',
description: 'Test profile editing',
steps: '1. Navigate to profile\n2. Update details\n3. Save',
expectedResult: 'Profile updated successfully'
},
{
title: 'Verify logout',
description: 'Test logout functionality',
steps: '1. Tap logout\n2. Confirm',
expectedResult: 'User logged out'
}
];

for (const test of tests) {
const testResponse = await api.post('/api/v1/test/create', {
...test,
projectId,
sectionId: 1, // Adjust based on your setup
priorityId: 2,
automationStatusId: 1
});
testIds.push(testResponse.data.data.testId);
console.log(`✓ Created test: ${test.title}`);
}

// 3. Create a test run
console.log('\nCreating test run...');
const runResponse = await api.post('/api/v1/run/create', {
runName: 'Sprint 1 Regression',
projectId,
testIds
});
const runId = runResponse.data.data.runId;
console.log(`✓ Run created with ID: ${runId}`);

// 4. Update test statuses
console.log('\nUpdating test statuses...');
await api.put('/api/v1/run/update-test-status', {
runId,
testIdStatusArray: [
{ testId: testIds[0], status: 'Passed' },
{ testId: testIds[1], status: 'Failed' },
{ testId: testIds[2], status: 'Passed' }
],
comment: 'Initial test execution'
});
console.log('✓ Test statuses updated');

console.log('\n✅ Workflow completed successfully!');

return { projectId, testIds, runId };
} catch (error) {
console.error('Error:', error.response?.data || error.message);
throw error;
}
}

// Run the workflow
completeWorkflow()
.then(result => console.log('\nResult:', result))
.catch(err => console.error('Failed:', err));

2. Bulk Test Import from CSV

import requests
import csv
import os

API_TOKEN = os.getenv('CHECKMATE_TOKEN')
BASE_URL = 'http://localhost:3000'

def bulk_import_tests(csv_file_path: str, project_id: int):
"""Import tests from CSV file"""

# Prepare headers
headers = {
'Authorization': f'Bearer {API_TOKEN}'
}

# Open and send file
with open(csv_file_path, 'rb') as file:
files = {'file': ('tests.csv', file, 'text/csv')}
data = {'projectId': project_id}

response = requests.post(
f'{BASE_URL}/api/v1/tests/upload',
headers=headers,
files=files,
data=data
)

if response.status_code == 201:
result = response.json()
print(f"✅ Successfully imported {len(result['data']['created'])} tests")
return result
else:
print(f"❌ Import failed: {response.json()}")
return None

# Example usage
bulk_import_tests('tests.csv', project_id=1)

3. Generate Daily Test Report

import requests
import os
from datetime import datetime, timedelta
import csv

API_TOKEN = os.getenv('CHECKMATE_TOKEN')
BASE_URL = 'http://localhost:3000'

def generate_daily_report(project_id: int, output_file: str = None):
"""Generate a daily test execution report"""

headers = {
'Authorization': f'Bearer {API_TOKEN}',
'Content-Type': 'application/json'
}

# Get all runs for the project
response = requests.get(
f'{BASE_URL}/api/v1/project/runs',
headers=headers,
params={'projectId': project_id, 'page': 1, 'pageSize': 100}
)
runs = response.json()['data']['runs']

# Filter runs from last 24 hours
yesterday = datetime.now() - timedelta(days=1)
recent_runs = [
run for run in runs
if datetime.fromisoformat(run['createdOn'].replace('Z', '+00:00')) > yesterday
]

print(f"\n📊 Daily Report for Project {project_id}")
print(f"Date: {datetime.now().strftime('%Y-%m-%d')}")
print(f"{'='*60}\n")

report_data = []

for run in recent_runs:
# Get run details
run_response = requests.get(
f'{BASE_URL}/api/v1/run',
headers=headers,
params={'runId': run['runId'], 'page': 1, 'pageSize': 1000}
)
run_details = run_response.json()['data']

# Calculate statistics
total_tests = run_details['totalCount']
passed = sum(1 for t in run_details['tests'] if t.get('status') == 'Passed')
failed = sum(1 for t in run_details['tests'] if t.get('status') == 'Failed')
blocked = sum(1 for t in run_details['tests'] if t.get('status') == 'Blocked')
untested = sum(1 for t in run_details['tests'] if t.get('status') == 'Untested')

pass_rate = (passed / total_tests * 100) if total_tests > 0 else 0

print(f"Run: {run['runName']}")
print(f" Total Tests: {total_tests}")
print(f" ✓ Passed: {passed} ({pass_rate:.1f}%)")
print(f" ✗ Failed: {failed}")
print(f" ⊘ Blocked: {blocked}")
print(f" ○ Untested: {untested}")
print(f" Created By: {run['createdBy']}")
print(f" Status: {'🔒 Locked' if run['isLocked'] else '🔓 Active'}\n")

report_data.append({
'Run Name': run['runName'],
'Total Tests': total_tests,
'Passed': passed,
'Failed': failed,
'Blocked': blocked,
'Untested': untested,
'Pass Rate %': f"{pass_rate:.1f}",
'Created By': run['createdBy'],
'Created On': run['createdOn'],
'Locked': 'Yes' if run['isLocked'] else 'No'
})

# Save to CSV if output file specified
if output_file and report_data:
with open(output_file, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=report_data[0].keys())
writer.writeheader()
writer.writerows(report_data)
print(f"📄 Report saved to: {output_file}")

return report_data

# Example usage
generate_daily_report(project_id=1, output_file='daily_report.csv')

4. Sync Tests from Selenium/Pytest

import pytest
import requests
import os
import inspect

API_TOKEN = os.getenv('CHECKMATE_TOKEN')
BASE_URL = 'http://localhost:3000'
PROJECT_ID = 1

class CheckmateSyncPlugin:
"""Pytest plugin to sync tests with Checkmate"""

def __init__(self):
self.api_headers = {
'Authorization': f'Bearer {API_TOKEN}',
'Content-Type': 'application/json'
}
self.test_mapping = {} # Maps pytest test names to Checkmate test IDs

def pytest_collection_finish(self, session):
"""Sync test cases after collection"""
print("\n🔄 Syncing tests with Checkmate...")

for item in session.items:
# Extract test information
test_name = item.name
test_doc = inspect.getdoc(item.function) or "No description"
test_file = item.fspath.relto(session.startdir)

# Check if test exists in Checkmate
test_id = self.find_or_create_test(test_name, test_doc, test_file)
self.test_mapping[test_name] = test_id

print(f"✅ Synced {len(self.test_mapping)} tests")

def find_or_create_test(self, title, description, link):
"""Find existing test or create new one"""

# Search for existing test
search_response = requests.get(
f'{BASE_URL}/api/v1/project/tests',
headers=self.api_headers,
params={
'projectId': PROJECT_ID,
'page': 1,
'pageSize': 1000,
'textSearch': title
}
)

tests = search_response.json()['data']['tests']
existing = next((t for t in tests if t['title'] == title), None)

if existing:
return existing['testId']

# Create new test
create_response = requests.post(
f'{BASE_URL}/api/v1/test/create',
headers=self.api_headers,
json={
'title': title,
'description': description,
'projectId': PROJECT_ID,
'sectionId': 1,
'priorityId': 2,
'automationStatusId': 2, # Automated
'link': link
}
)

return create_response.json()['data']['testId']

@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(self, item, call):
"""Update test status after execution"""
outcome = yield
report = outcome.get_result()

if report.when == 'call':
test_name = item.name
test_id = self.test_mapping.get(test_name)

if test_id:
status = 'Passed' if report.passed else 'Failed'
self.update_test_result(test_id, status, str(report.longrepr) if report.failed else '')

def update_test_result(self, test_id, status, comment):
"""Update test result in Checkmate"""
# This would typically be done in the context of a run
# For simplicity, this is a placeholder
pass

# Register the plugin
def pytest_configure(config):
config.pluginmanager.register(CheckmateSyncPlugin())

# Example tests
def test_user_login():
"""Verify user can login with valid credentials"""
# Your test code here
assert True

def test_user_logout():
"""Verify user can logout successfully"""
# Your test code here
assert True

5. CI/CD Integration (GitHub Actions)

# .github/workflows/test-execution.yml
name: Test Execution

on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main ]
schedule:
- cron: '0 9 * * 1-5' # Run weekdays at 9 AM

jobs:
test:
runs-on: ubuntu-latest

steps:
- name: Checkout code
uses: actions/checkout@v3

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'

- name: Install dependencies
run: |
pip install -r requirements.txt

- name: Create Checkmate Run
id: create_run
env:
CHECKMATE_TOKEN: ${{ secrets.CHECKMATE_API_TOKEN }}
CHECKMATE_URL: ${{ secrets.CHECKMATE_URL }}
run: |
RUN_ID=$(python scripts/create_run.py)
echo "run_id=$RUN_ID" >> $GITHUB_OUTPUT

- name: Run tests
run: |
pytest tests/ --json-report --json-report-file=test_results.json

- name: Update Checkmate Results
if: always()
env:
CHECKMATE_TOKEN: ${{ secrets.CHECKMATE_API_TOKEN }}
CHECKMATE_URL: ${{ secrets.CHECKMATE_URL }}
RUN_ID: ${{ steps.create_run.outputs.run_id }}
run: |
python scripts/update_results.py

- name: Lock Run
if: always()
env:
CHECKMATE_TOKEN: ${{ secrets.CHECKMATE_API_TOKEN }}
CHECKMATE_URL: ${{ secrets.CHECKMATE_URL }}
RUN_ID: ${{ steps.create_run.outputs.run_id }}
run: |
python scripts/lock_run.py

scripts/create_run.py:

import requests
import os
import sys
from datetime import datetime

API_TOKEN = os.getenv('CHECKMATE_TOKEN')
BASE_URL = os.getenv('CHECKMATE_URL')
PROJECT_ID = 1

# Get all test IDs for the project
response = requests.get(
f'{BASE_URL}/api/v1/project/tests',
headers={'Authorization': f'Bearer {API_TOKEN}'},
params={'projectId': PROJECT_ID, 'page': 1, 'pageSize': 1000}
)

test_ids = [test['testId'] for test in response.json()['data']['tests']]

# Create run
run_name = f"CI Run - {datetime.now().strftime('%Y-%m-%d %H:%M')}"
run_response = requests.post(
f'{BASE_URL}/api/v1/run/create',
headers={
'Authorization': f'Bearer {API_TOKEN}',
'Content-Type': 'application/json'
},
json={
'runName': run_name,
'projectId': PROJECT_ID,
'testIds': test_ids
}
)

run_id = run_response.json()['data']['runId']
print(run_id) # Output to stdout for GitHub Actions

scripts/update_results.py:

import requests
import os
import json

API_TOKEN = os.getenv('CHECKMATE_TOKEN')
BASE_URL = os.getenv('CHECKMATE_URL')
RUN_ID = os.getenv('RUN_ID')

# Read test results
with open('test_results.json') as f:
results = json.load(f)

# Map pytest results to Checkmate statuses
status_updates = []
for test in results['tests']:
status = 'Passed' if test['outcome'] == 'passed' else 'Failed'
# You would need to map test names to test IDs
# This is simplified for demonstration
test_id = test.get('checkmate_id') # Assuming you store this
if test_id:
status_updates.append({
'testId': test_id,
'status': status
})

# Update statuses
requests.put(
f'{BASE_URL}/api/v1/run/update-test-status',
headers={
'Authorization': f'Bearer {API_TOKEN}',
'Content-Type': 'application/json'
},
json={
'runId': int(RUN_ID),
'testIdStatusArray': status_updates,
'comment': f'Automated execution via CI/CD'
}
)

print(f"✅ Updated {len(status_updates)} test statuses")

Quick Snippets

Get Test Statistics

def get_test_statistics(project_id):
response = requests.get(
f'{BASE_URL}/api/v1/project/tests',
headers={'Authorization': f'Bearer {API_TOKEN}'},
params={'projectId': project_id, 'page': 1, 'pageSize': 10000}
)

tests = response.json()['data']['tests']

stats = {
'total': len(tests),
'by_priority': {},
'by_automation': {},
'by_section': {}
}

for test in tests:
# Count by priority
priority = test.get('priorityName', 'Unknown')
stats['by_priority'][priority] = stats['by_priority'].get(priority, 0) + 1

# Count by automation status
automation = test.get('automationStatusName', 'Unknown')
stats['by_automation'][automation] = stats['by_automation'].get(automation, 0) + 1

# Count by section
section = test.get('sectionName', 'Unknown')
stats['by_section'][section] = stats['by_section'].get(section, 0) + 1

return stats

Bulk Update Test Priority

async function updateTestPriorities(testIds, newPriorityId) {
return await api.put('/api/v1/tests/update', {
testIds,
propertiesToUpdate: {
priorityId: newPriorityId
}
});
}

// Usage
await updateTestPriorities([101, 102, 103], 1); // Set to Critical

Download and Parse Test Report

def download_and_analyze_report(project_id):
# Download report
response = requests.get(
f'{BASE_URL}/api/v1/project/download-tests',
headers={'Authorization': f'Bearer {API_TOKEN}'},
params={'projectId': project_id}
)

# Save to file
with open('tests.csv', 'wb') as f:
f.write(response.content)

# Parse and analyze
import csv
with open('tests.csv', 'r') as f:
reader = csv.DictReader(f)
tests = list(reader)

print(f"Total Tests: {len(tests)}")
print(f"Automated: {sum(1 for t in tests if 'Automated' in t.get('Automation Status', ''))}")
print(f"Manual: {sum(1 for t in tests if 'Manual' in t.get('Automation Status', ''))}")

Next Steps

tip

Have a use case not covered here? Open an issue or join our Discord!