ramidamolis-alt

e2e-testing

0
0
# Install this skill:
npx skills add ramidamolis-alt/agent-skills-workflows --skill "e2e-testing"

Install specific skill from multi-skill repository

# Description

End-to-End Testing Framework skill - Browser automation, API testing, performance benchmarking, test report generation, and chaos engineering basics. Use for comprehensive application testing.

# SKILL.md


name: e2e-testing
description: End-to-End Testing Framework skill - Browser automation, API testing, performance benchmarking, test report generation, and chaos engineering basics. Use for comprehensive application testing.
triggers: ["e2e", "test", "browser test", "playwright", "cypress", "performance", "benchmark", "เธ—เธ”เธชเธญเธš"]


๐Ÿงช E2E Testing Master Skill

Expert in end-to-end testing with browser automation, API testing, and performance benchmarking.


Capability Overview

capabilities:
  browser_testing:
    - playwright: "Cross-browser automation"
    - cypress: "Component and E2E testing"
    - puppeteer: "Chrome automation"

  api_testing:
    - rest: "HTTP endpoint testing"
    - graphql: "Query/mutation testing"
    - websocket: "Real-time testing"

  performance:
    - load_testing: "Concurrent user simulation"
    - stress_testing: "Breaking point analysis"
    - benchmark: "Response time measurement"

  quality:
    - coverage: "Code coverage tracking"
    - visual_regression: "Screenshot comparison"
    - accessibility: "a11y compliance"

Browser Testing with Playwright

Basic Test Structure

import { test, expect } from '@playwright/test';

test.describe('User Authentication', () => {
  test.beforeEach(async ({ page }) => {
    await page.goto('/login');
  });

  test('successful login', async ({ page }) => {
    // Fill login form
    await page.fill('[data-testid="email"]', '[email protected]');
    await page.fill('[data-testid="password"]', 'password123');
    await page.click('[data-testid="submit"]');

    // Assert redirect to dashboard
    await expect(page).toHaveURL('/dashboard');
    await expect(page.locator('h1')).toHaveText('Welcome');
  });

  test('invalid credentials', async ({ page }) => {
    await page.fill('[data-testid="email"]', '[email protected]');
    await page.fill('[data-testid="password"]', 'wrongpass');
    await page.click('[data-testid="submit"]');

    // Assert error message
    await expect(page.locator('[data-testid="error"]')).toBeVisible();
    await expect(page.locator('[data-testid="error"]')).toHaveText('Invalid credentials');
  });
});

Page Object Model

// pages/LoginPage.ts
export class LoginPage {
  constructor(private page: Page) {}

  async goto() {
    await this.page.goto('/login');
  }

  async login(email: string, password: string) {
    await this.page.fill('[data-testid="email"]', email);
    await this.page.fill('[data-testid="password"]', password);
    await this.page.click('[data-testid="submit"]');
  }

  async getErrorMessage() {
    return this.page.locator('[data-testid="error"]').textContent();
  }
}

// tests/login.spec.ts
test('login flow', async ({ page }) => {
  const loginPage = new LoginPage(page);
  await loginPage.goto();
  await loginPage.login('[email protected]', 'password123');
  await expect(page).toHaveURL('/dashboard');
});

Cross-Browser Testing

// playwright.config.ts
import { defineConfig, devices } from '@playwright/test';

export default defineConfig({
  projects: [
    {
      name: 'chromium',
      use: { ...devices['Desktop Chrome'] },
    },
    {
      name: 'firefox',
      use: { ...devices['Desktop Firefox'] },
    },
    {
      name: 'webkit',
      use: { ...devices['Desktop Safari'] },
    },
    {
      name: 'Mobile Chrome',
      use: { ...devices['Pixel 5'] },
    },
    {
      name: 'Mobile Safari',
      use: { ...devices['iPhone 12'] },
    },
  ],
});

API Testing

REST API Testing

import { test, expect } from '@playwright/test';

test.describe('API Tests', () => {
  const baseUrl = 'https://api.example.com';

  test('GET /users', async ({ request }) => {
    const response = await request.get(`${baseUrl}/users`);

    expect(response.status()).toBe(200);

    const data = await response.json();
    expect(data).toHaveProperty('users');
    expect(Array.isArray(data.users)).toBe(true);
  });

  test('POST /users', async ({ request }) => {
    const response = await request.post(`${baseUrl}/users`, {
      data: {
        name: 'John Doe',
        email: '[email protected]'
      }
    });

    expect(response.status()).toBe(201);

    const data = await response.json();
    expect(data.name).toBe('John Doe');
    expect(data).toHaveProperty('id');
  });

  test('authenticated request', async ({ request }) => {
    const response = await request.get(`${baseUrl}/profile`, {
      headers: {
        'Authorization': `Bearer ${process.env.API_TOKEN}`
      }
    });

    expect(response.status()).toBe(200);
  });
});

GraphQL Testing

test('GraphQL query', async ({ request }) => {
  const response = await request.post('https://api.example.com/graphql', {
    data: {
      query: `
        query GetUser($id: ID!) {
          user(id: $id) {
            id
            name
            email
          }
        }
      `,
      variables: { id: '123' }
    }
  });

  const data = await response.json();
  expect(data.data.user.id).toBe('123');
});

Performance Testing

Load Testing with k6

// load-test.js
import http from 'k6/http';
import { check, sleep } from 'k6';

export const options = {
  stages: [
    { duration: '30s', target: 20 },   // Ramp up
    { duration: '1m', target: 20 },    // Hold
    { duration: '10s', target: 0 },    // Ramp down
  ],
  thresholds: {
    http_req_duration: ['p(95)<200'],  // 95% requests under 200ms
    http_req_failed: ['rate<0.01'],    // Less than 1% failures
  },
};

export default function() {
  const response = http.get('https://api.example.com/health');

  check(response, {
    'status is 200': (r) => r.status === 200,
    'response time < 200ms': (r) => r.timings.duration < 200,
  });

  sleep(1);
}

Benchmark Script

import time
import statistics
import asyncio
import aiohttp

async def benchmark_endpoint(url: str, num_requests: int = 100):
    """
    Benchmark API endpoint performance
    """
    times = []
    errors = 0

    async with aiohttp.ClientSession() as session:
        for _ in range(num_requests):
            start = time.time()
            try:
                async with session.get(url) as response:
                    await response.text()
                    if response.status != 200:
                        errors += 1
            except Exception:
                errors += 1
            times.append(time.time() - start)

    return {
        "total_requests": num_requests,
        "errors": errors,
        "error_rate": errors / num_requests,
        "min_ms": min(times) * 1000,
        "max_ms": max(times) * 1000,
        "avg_ms": statistics.mean(times) * 1000,
        "p50_ms": statistics.median(times) * 1000,
        "p95_ms": sorted(times)[int(num_requests * 0.95)] * 1000,
        "p99_ms": sorted(times)[int(num_requests * 0.99)] * 1000,
    }

Visual Regression Testing

Screenshot Comparison

import { test, expect } from '@playwright/test';

test('visual regression', async ({ page }) => {
  await page.goto('/dashboard');

  // Full page screenshot comparison
  await expect(page).toHaveScreenshot('dashboard.png', {
    maxDiffPixels: 100,
  });

  // Component screenshot
  const header = page.locator('header');
  await expect(header).toHaveScreenshot('header.png');
});

Visual Testing Workflow

visual_testing:
  baseline:
    - Capture screenshots on main branch
    - Store in version control or cloud

  comparison:
    - Capture screenshots on PR branch
    - Compare against baseline
    - Flag differences > threshold

  review:
    - Visual diff review
    - Accept or reject changes
    - Update baseline if accepted

Test Report Generation

HTML Report

// playwright.config.ts
export default defineConfig({
  reporter: [
    ['html', { open: 'never' }],
    ['json', { outputFile: 'test-results.json' }],
    ['junit', { outputFile: 'junit-results.xml' }],
  ],
});

Custom Report Generator

import json
from jinja2 import Template

def generate_html_report(results: dict, output_path: str):
    """
    Generate HTML test report
    """
    template = Template("""
    <!DOCTYPE html>
    <html>
    <head>
        <title>Test Report</title>
        <style>
            .pass { color: green; }
            .fail { color: red; }
            .skip { color: orange; }
        </style>
    </head>
    <body>
        <h1>Test Report</h1>
        <p>Generated: {{ timestamp }}</p>

        <h2>Summary</h2>
        <ul>
            <li>Total: {{ total }}</li>
            <li class="pass">Passed: {{ passed }}</li>
            <li class="fail">Failed: {{ failed }}</li>
            <li class="skip">Skipped: {{ skipped }}</li>
        </ul>

        <h2>Test Results</h2>
        <table>
            <tr>
                <th>Test</th>
                <th>Status</th>
                <th>Duration</th>
            </tr>
            {% for test in tests %}
            <tr>
                <td>{{ test.name }}</td>
                <td class="{{ test.status }}">{{ test.status }}</td>
                <td>{{ test.duration }}ms</td>
            </tr>
            {% endfor %}
        </table>
    </body>
    </html>
    """)

    html = template.render(**results)

    with open(output_path, 'w') as f:
        f.write(html)

Chaos Engineering Basics

Fault Injection

chaos_patterns:
  network_faults:
    - latency: "Add artificial delay"
    - packet_loss: "Drop random packets"
    - timeout: "Force connection timeouts"

  service_faults:
    - crash: "Stop random service instances"
    - cpu_stress: "High CPU load"
    - memory_pressure: "Memory exhaustion"

  data_faults:
    - corruption: "Inject incorrect data"
    - delay: "Slow database responses"

Resilience Testing

test.describe('Resilience Tests', () => {
  test('handles API timeout', async ({ page }) => {
    // Mock slow API response
    await page.route('**/api/data', async route => {
      await new Promise(r => setTimeout(r, 5000));
      await route.fulfill({ status: 408 });
    });

    await page.goto('/data-page');

    // Verify timeout handling
    await expect(page.locator('[data-testid="timeout-error"]')).toBeVisible();
    await expect(page.locator('[data-testid="retry-button"]')).toBeVisible();
  });

  test('handles 500 errors gracefully', async ({ page }) => {
    await page.route('**/api/data', route => {
      route.fulfill({
        status: 500,
        body: JSON.stringify({ error: 'Internal Server Error' })
      });
    });

    await page.goto('/data-page');

    // Verify error handling
    await expect(page.locator('[data-testid="error-message"]')).toBeVisible();
  });
});

MCP Integration

Test Strategy with UltraThink

async def design_test_strategy(application_description):
    """
    Design comprehensive test strategy using UltraThink
    """
    return await mcp_UltraThink_ultrathink(
        thought=f"""
        Designing E2E test strategy for:
        {application_description}

        Analysis:
        1. Critical user journeys to test
        2. API endpoints to cover
        3. Performance requirements
        4. Edge cases and error scenarios
        5. Browser/device coverage

        Test plan:
        """,
        total_thoughts=20
    )

Research Testing Best Practices

async def research_testing_approach(technology):
    """
    Research testing best practices with MCP
    """
    results = await asyncio.gather(
        mcp_Context7_query_docs(
            libraryId="/microsoft/playwright",
            query="best practices testing patterns"
        ),
        mcp_Brave_brave_web_search(f"{technology} e2e testing 2026"),
        mcp_Memory_search_nodes("testing patterns")
    )

    return await mcp_UltraThink_ultrathink(
        thought=f"""
        Synthesizing testing research:
        - Playwright docs: {results[0][:1000]}
        - Latest practices: {results[1][:1000]}
        - Past patterns: {results[2]}

        Recommended approach: ...
        """,
        total_thoughts=15
    )

Quick Reference

Test Pyramid

          /\
         /  \     E2E Tests (10%)
        /----\    - Full user journeys
       /      \   - Critical paths only
      /--------\  Integration Tests (20%)
     /          \ - API contracts
    /------------\- Service boundaries
   /              \ Unit Tests (70%)
  /----------------\ - Fast, isolated
 /                  \- High coverage

Command Cheatsheet

# Playwright
npx playwright test                    # Run all tests
npx playwright test --ui               # Interactive mode
npx playwright test --project=chromium # Specific browser
npx playwright codegen                 # Generate tests

# k6 Load Testing
k6 run load-test.js                    # Run load test
k6 run --vus 50 --duration 30s test.js # Quick load test

# Coverage
npx nyc npm test                       # Coverage report

  • omega-agent: Complex test orchestration
  • debugger: Test failure analysis
  • performance-optimizer: Performance test results
  • rollback-engine: Test environment management

# Supported AI Coding Agents

This skill is compatible with the SKILL.md standard and works with all major AI coding agents:

Learn more about the SKILL.md standard and how to use these skills with your preferred AI coding agent.