Skip to content

Examples

Complete, production-ready code examples in Python and JavaScript.

All examples include error handling, rate limiting, and best practices.


Table of Contents


Basic Request

The simplest way to make a request and get a response.

Python
import requests
import time

API_KEY = "your_api_key_here"
BASE_URL = "https://app.beginswithai.com/v1"

headers = {
    "Authorization": f"Bearer {API_KEY}",
    "Content-Type": "application/json"
}

# Submit request
response = requests.post(
    f"{BASE_URL}/ai",
    json={
        "model": "gpt-5-1",
        "prompt": "Explain async APIs in one sentence"
    },
    headers=headers
)

request_id = response.json()["request_id"]
print(f"Request submitted: {request_id}")

# Poll for result
while True:
    poll_response = requests.get(
        f"{BASE_URL}/ai",
        params={"req_id": request_id},
        headers=headers
    )
    result = poll_response.json()

    if "response" in result:
        print(f"\nResponse: {result['response']}")
        break

    print("Processing...")
    time.sleep(5)
JavaScript
const API_KEY = "your_api_key_here";
const BASE_URL = "https://app.beginswithai.com/v1";

const headers = {
    "Authorization": `Bearer ${API_KEY}`,
    "Content-Type": "application/json"
};

async function main() {
    // Submit request
    const response = await fetch(`${BASE_URL}/ai`, {
        method: "POST",
        headers: headers,
        body: JSON.stringify({
            model: "gpt-5-1",
            prompt: "Explain async APIs in one sentence"
        })
    });

    const data = await response.json();
    const requestId = data.request_id;
    console.log(`Request submitted: ${requestId}`);

    // Poll for result
    while (true) {
        const pollResponse = await fetch(
            `${BASE_URL}/ai?req_id=${requestId}`,
            { headers: headers }
        );
        const result = await pollResponse.json();

        if (result.response) {
            console.log(`\nResponse: ${result.response}`);
            break;
        }

        console.log("Processing...");
        await new Promise(r => setTimeout(r, 5000));
    }
}

main();

Batch Processing

Process multiple prompts efficiently with rate limiting.

Python
import requests
import time
from typing import List, Dict

API_KEY = "your_api_key_here"
BASE_URL = "https://app.beginswithai.com/v1"

headers = {
    "Authorization": f"Bearer {API_KEY}",
    "Content-Type": "application/json"
}

def submit_requests(prompts: List[str], model: str = "gpt-5-1") -> List[str]:
    """Submit multiple requests and return request IDs"""
    request_ids = []

    for i, prompt in enumerate(prompts):
        # Rate limiting: 10 requests per minute for Starter tier
        if i > 0 and i % 10 == 0:
            print(f"Submitted {i} requests. Waiting 60s...")
            time.sleep(60)

        try:
            response = requests.post(
                f"{BASE_URL}/ai",
                json={"model": model, "prompt": prompt},
                headers=headers,
                timeout=30
            )
            response.raise_for_status()
            request_id = response.json()["request_id"]
            request_ids.append(request_id)
            print(f"Submitted: {request_id}")
        except Exception as e:
            print(f"Failed to submit prompt: {e}")
            request_ids.append(None)

    return request_ids

def poll_all_results(request_ids: List[str]) -> List[Dict]:
    """Poll for all results with timeout"""
    results = [None] * len(request_ids)
    pending = set(i for i, rid in enumerate(request_ids) if rid is not None)

    start_time = time.time()
    timeout = 600  # 10 minutes total timeout

    while pending and (time.time() - start_time < timeout):
        for i in list(pending):
            request_id = request_ids[i]

            try:
                response = requests.get(
                    f"{BASE_URL}/ai",
                    params={"req_id": request_id},
                    headers=headers,
                    timeout=30
                )
                response.raise_for_status()
                result = response.json()

                if "response" in result:
                    results[i] = result
                    pending.remove(i)
                    print(f"Completed: {request_id}")
            except Exception as e:
                print(f"Error polling {request_id}: {e}")

        if pending:
            time.sleep(5)

    return results

# Usage
prompts = [
    "What is machine learning?",
    "Explain neural networks briefly",
    "What is deep learning?",
    "Describe gradient descent",
    "What are transformers in AI?"
]

print("Submitting requests...")
request_ids = submit_requests(prompts, model="gpt-5-1")

print("\nPolling for results...")
results = poll_all_results(request_ids)

print("\n=== RESULTS ===")
for i, result in enumerate(results):
    if result:
        print(f"\nPrompt: {prompts[i]}")
        print(f"Response: {result['response'][:200]}...")
    else:
        print(f"\nPrompt: {prompts[i]}")
        print("Response: Failed or timed out")
JavaScript
const API_KEY = "your_api_key_here";
const BASE_URL = "https://app.beginswithai.com/v1";

const headers = {
    "Authorization": `Bearer ${API_KEY}`,
    "Content-Type": "application/json"
};

async function submitRequests(prompts, model = "gpt-5-1") {
    const requestIds = [];

    for (let i = 0; i < prompts.length; i++) {
        // Rate limiting: 10 requests per minute for Starter tier
        if (i > 0 && i % 10 === 0) {
            console.log(`Submitted ${i} requests. Waiting 60s...`);
            await new Promise(r => setTimeout(r, 60000));
        }

        try {
            const response = await fetch(`${BASE_URL}/ai`, {
                method: "POST",
                headers: headers,
                body: JSON.stringify({ model, prompt: prompts[i] })
            });

            if (!response.ok) throw new Error(`HTTP ${response.status}`);

            const data = await response.json();
            requestIds.push(data.request_id);
            console.log(`Submitted: ${data.request_id}`);
        } catch (error) {
            console.log(`Failed to submit prompt: ${error}`);
            requestIds.push(null);
        }
    }

    return requestIds;
}

async function pollAllResults(requestIds) {
    const results = new Array(requestIds.length).fill(null);
    const pending = new Set(
        requestIds.map((rid, i) => rid ? i : null).filter(i => i !== null)
    );

    const startTime = Date.now();
    const timeout = 600000;  // 10 minutes

    while (pending.size > 0 && (Date.now() - startTime < timeout)) {
        for (const i of [...pending]) {
            const requestId = requestIds[i];

            try {
                const response = await fetch(
                    `${BASE_URL}/ai?req_id=${requestId}`,
                    { headers: headers }
                );

                if (!response.ok) throw new Error(`HTTP ${response.status}`);

                const result = await response.json();

                if (result.response) {
                    results[i] = result;
                    pending.delete(i);
                    console.log(`Completed: ${requestId}`);
                }
            } catch (error) {
                console.log(`Error polling ${requestId}: ${error}`);
            }
        }

        if (pending.size > 0) {
            await new Promise(r => setTimeout(r, 5000));
        }
    }

    return results;
}

// Usage
async function main() {
    const prompts = [
        "What is machine learning?",
        "Explain neural networks briefly",
        "What is deep learning?",
        "Describe gradient descent",
        "What are transformers in AI?"
    ];

    console.log("Submitting requests...");
    const requestIds = await submitRequests(prompts, "gpt-5-1");

    console.log("\nPolling for results...");
    const results = await pollAllResults(requestIds);

    console.log("\n=== RESULTS ===");
    for (let i = 0; i < results.length; i++) {
        if (results[i]) {
            console.log(`\nPrompt: ${prompts[i]}`);
            console.log(`Response: ${results[i].response.substring(0, 200)}...`);
        } else {
            console.log(`\nPrompt: ${prompts[i]}`);
            console.log("Response: Failed or timed out");
        }
    }
}

main();

Rate-Limited Client

A client that automatically handles rate limiting.

Python
import requests
import time
from collections import deque
from typing import Optional

class RateLimitedClient:
    def __init__(self, api_key: str, requests_per_minute: int = 10):
        self.api_key = api_key
        self.base_url = "https://app.beginswithai.com/v1"
        self.headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }
        self.requests_per_minute = requests_per_minute
        self.request_times = deque()

    def _wait_if_needed(self):
        """Wait if we're at the rate limit"""
        now = time.time()

        # Remove requests older than 1 minute
        while self.request_times and self.request_times[0] < now - 60:
            self.request_times.popleft()

        # If at limit, wait
        if len(self.request_times) >= self.requests_per_minute:
            wait_time = 60 - (now - self.request_times[0]) + 1
            if wait_time > 0:
                print(f"Rate limit reached. Waiting {wait_time:.1f}s...")
                time.sleep(wait_time)
                self.request_times.clear()

        self.request_times.append(time.time())

    def submit_request(self, model: str, prompt: str) -> Optional[str]:
        """Submit a request with automatic rate limiting"""
        self._wait_if_needed()

        try:
            response = requests.post(
                f"{self.base_url}/ai",
                json={"model": model, "prompt": prompt},
                headers=self.headers,
                timeout=30
            )
            response.raise_for_status()
            return response.json()["request_id"]
        except Exception as e:
            print(f"Error submitting request: {e}")
            return None

    def poll_result(self, request_id: str, timeout: int = 300) -> Optional[str]:
        """Poll for result with timeout"""
        start_time = time.time()

        while time.time() - start_time < timeout:
            try:
                response = requests.get(
                    f"{self.base_url}/ai",
                    params={"req_id": request_id},
                    headers=self.headers,
                    timeout=30
                )
                response.raise_for_status()
                result = response.json()

                if "response" in result:
                    return result["response"]

                time.sleep(5)
            except Exception as e:
                print(f"Error polling: {e}")
                time.sleep(5)

        return None

# Usage
client = RateLimitedClient("your_api_key_here", requests_per_minute=10)

# This will automatically handle rate limiting
for i in range(25):
    request_id = client.submit_request("gpt-5-1", f"What is question {i}?")
    if request_id:
        print(f"Submitted request {i}: {request_id}")
JavaScript
class RateLimitedClient {
    constructor(apiKey, requestsPerMinute = 10) {
        this.apiKey = apiKey;
        this.baseUrl = "https://app.beginswithai.com/v1";
        this.headers = {
            "Authorization": `Bearer ${apiKey}`,
            "Content-Type": "application/json"
        };
        this.requestsPerMinute = requestsPerMinute;
        this.requestTimes = [];
    }

    async waitIfNeeded() {
        const now = Date.now();

        // Remove requests older than 1 minute
        this.requestTimes = this.requestTimes.filter(
            time => time > now - 60000
        );

        // If at limit, wait
        if (this.requestTimes.length >= this.requestsPerMinute) {
            const waitTime = 60000 - (now - this.requestTimes[0]) + 1000;
            if (waitTime > 0) {
                console.log(`Rate limit reached. Waiting ${(waitTime/1000).toFixed(1)}s...`);
                await new Promise(r => setTimeout(r, waitTime));
                this.requestTimes = [];
            }
        }

        this.requestTimes.push(Date.now());
    }

    async submitRequest(model, prompt) {
        await this.waitIfNeeded();

        try {
            const response = await fetch(`${this.baseUrl}/ai`, {
                method: "POST",
                headers: this.headers,
                body: JSON.stringify({ model, prompt })
            });

            if (!response.ok) throw new Error(`HTTP ${response.status}`);

            const data = await response.json();
            return data.request_id;
        } catch (error) {
            console.log(`Error submitting request: ${error}`);
            return null;
        }
    }

    async pollResult(requestId, timeout = 300000) {
        const startTime = Date.now();

        while (Date.now() - startTime < timeout) {
            try {
                const response = await fetch(
                    `${this.baseUrl}/ai?req_id=${requestId}`,
                    { headers: this.headers }
                );

                if (!response.ok) throw new Error(`HTTP ${response.status}`);

                const result = await response.json();

                if (result.response) {
                    return result.response;
                }

                await new Promise(r => setTimeout(r, 5000));
            } catch (error) {
                console.log(`Error polling: ${error}`);
                await new Promise(r => setTimeout(r, 5000));
            }
        }

        return null;
    }
}

// Usage
const client = new RateLimitedClient("your_api_key_here", 10);

async function main() {
    // This will automatically handle rate limiting
    for (let i = 0; i < 25; i++) {
        const requestId = await client.submitRequest("gpt-5-1", `What is question ${i}?`);
        if (requestId) {
            console.log(`Submitted request ${i}: ${requestId}`);
        }
    }
}

main();

Production-Ready Client Class

A complete client with error handling, retries, and logging.

Python
import requests
import time
import logging
from typing import Optional, Dict
from enum import Enum

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class Model(Enum):
    GPT_5_1 = "gpt-5-1"
    GPT_5_1_THINKING = "gpt-5-1-thinking"
    GPT_4O = "gpt-4o"
    GROK_4_1 = "grok-4-1"
    GROK_4_1_THINKING = "grok-4-1-thinking"
    GEMINI_3_PRO = "gemini-3-pro"
    GEMINI_2_5_PRO = "gemini-2-5-pro"
    GEMINI_2_5_FLASH = "gemini-2-5-flash"
    DEEPSEEK_V3_2 = "deepseek-v3-2"

class BeginswithAIClient:
    def __init__(self, api_key: str):
        self.api_key = api_key
        self.base_url = "https://app.beginswithai.com/v1"
        self.headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }

    def submit(self, model: Model, prompt: str, max_retries: int = 3) -> Optional[str]:
        """Submit request with retry logic"""
        for attempt in range(max_retries):
            try:
                response = requests.post(
                    f"{self.base_url}/ai",
                    json={"model": model.value, "prompt": prompt},
                    headers=self.headers,
                    timeout=30
                )

                if response.status_code == 429:
                    logger.warning("Rate limited. Waiting 60s...")
                    time.sleep(60)
                    continue
                elif response.status_code == 500:
                    if attempt < max_retries - 1:
                        wait_time = 5 * (2 ** attempt)
                        logger.warning(f"Server error. Retrying in {wait_time}s...")
                        time.sleep(wait_time)
                        continue

                response.raise_for_status()
                request_id = response.json()["request_id"]
                logger.info(f"Request submitted: {request_id}")
                return request_id

            except requests.exceptions.HTTPError as e:
                if e.response.status_code in [400, 401, 403, 404]:
                    error_detail = e.response.json().get("detail", "Unknown error")
                    logger.error(f"Client error: {error_detail}")
                    return None
                elif attempt == max_retries - 1:
                    logger.error(f"Max retries exceeded: {e}")
                    return None
            except Exception as e:
                if attempt == max_retries - 1:
                    logger.error(f"Unexpected error: {e}")
                    return None
                time.sleep(5)

        return None

    def poll(self, request_id: str, poll_interval: int = 5, timeout: int = 300) -> Optional[str]:
        """Poll for result with timeout"""
        start_time = time.time()

        while time.time() - start_time < timeout:
            try:
                response = requests.get(
                    f"{self.base_url}/ai",
                    params={"req_id": request_id},
                    headers=self.headers,
                    timeout=30
                )

                if response.status_code == 404:
                    logger.error("Request not found or expired")
                    return None

                response.raise_for_status()
                result = response.json()

                if "response" in result:
                    logger.info(f"Result received for {request_id}")
                    return result["response"]

                time.sleep(poll_interval)

            except Exception as e:
                logger.warning(f"Polling error: {e}")
                time.sleep(poll_interval)

        logger.error(f"Timeout waiting for {request_id}")
        return None

    def complete(self, model: Model, prompt: str) -> Optional[str]:
        """Submit and poll in one call"""
        request_id = self.submit(model, prompt)
        if not request_id:
            return None

        return self.poll(request_id)

# Usage
client = BeginswithAIClient("your_api_key_here")

# Simple usage
response = client.complete(Model.GPT_5_1, "Explain async APIs")
if response:
    print(f"Response: {response}")

# Or separate submit and poll
request_id = client.submit(Model.GEMINI_2_5_FLASH, "What is AI?")
if request_id:
    # Do other work...
    result = client.poll(request_id)
    if result:
        print(f"Result: {result}")
JavaScript
const Model = {
    GPT_5_1: "gpt-5-1",
    GPT_5_1_THINKING: "gpt-5-1-thinking",
    GPT_4O: "gpt-4o",
    GROK_4_1: "grok-4-1",
    GROK_4_1_THINKING: "grok-4-1-thinking",
    GEMINI_3_PRO: "gemini-3-pro",
    GEMINI_2_5_PRO: "gemini-2-5-pro",
    GEMINI_2_5_FLASH: "gemini-2-5-flash",
    DEEPSEEK_V3_2: "deepseek-v3-2"
};

class BeginswithAIClient {
    constructor(apiKey) {
        this.apiKey = apiKey;
        this.baseUrl = "https://app.beginswithai.com/v1";
        this.headers = {
            "Authorization": `Bearer ${apiKey}`,
            "Content-Type": "application/json"
        };
    }

    async submit(model, prompt, maxRetries = 3) {
        for (let attempt = 0; attempt < maxRetries; attempt++) {
            try {
                const response = await fetch(`${this.baseUrl}/ai`, {
                    method: "POST",
                    headers: this.headers,
                    body: JSON.stringify({ model, prompt })
                });

                if (response.status === 429) {
                    console.warn("Rate limited. Waiting 60s...");
                    await new Promise(r => setTimeout(r, 60000));
                    continue;
                } else if (response.status === 500) {
                    if (attempt < maxRetries - 1) {
                        const waitTime = 5000 * (2 ** attempt);
                        console.warn(`Server error. Retrying in ${waitTime/1000}s...`);
                        await new Promise(r => setTimeout(r, waitTime));
                        continue;
                    }
                }

                if (!response.ok) {
                    if ([400, 401, 403, 404].includes(response.status)) {
                        const error = await response.json();
                        console.error(`Client error: ${error.detail}`);
                        return null;
                    }
                    throw new Error(`HTTP ${response.status}`);
                }

                const data = await response.json();
                console.log(`Request submitted: ${data.request_id}`);
                return data.request_id;
            } catch (error) {
                if (attempt === maxRetries - 1) {
                    console.error(`Max retries exceeded: ${error}`);
                    return null;
                }
                await new Promise(r => setTimeout(r, 5000));
            }
        }

        return null;
    }

    async poll(requestId, pollInterval = 5000, timeout = 300000) {
        const startTime = Date.now();

        while (Date.now() - startTime < timeout) {
            try {
                const response = await fetch(
                    `${this.baseUrl}/ai?req_id=${requestId}`,
                    { headers: this.headers }
                );

                if (response.status === 404) {
                    console.error("Request not found or expired");
                    return null;
                }

                if (!response.ok) {
                    throw new Error(`HTTP ${response.status}`);
                }

                const result = await response.json();

                if (result.response) {
                    console.log(`Result received for ${requestId}`);
                    return result.response;
                }

                await new Promise(r => setTimeout(r, pollInterval));
            } catch (error) {
                console.warn(`Polling error: ${error}`);
                await new Promise(r => setTimeout(r, pollInterval));
            }
        }

        console.error(`Timeout waiting for ${requestId}`);
        return null;
    }

    async complete(model, prompt) {
        const requestId = await this.submit(model, prompt);
        if (!requestId) {
            return null;
        }

        return await this.poll(requestId);
    }
}

// Usage
const client = new BeginswithAIClient("your_api_key_here");

// Simple usage
const response = await client.complete(Model.GPT_5_1, "Explain async APIs");
if (response) {
    console.log(`Response: ${response}`);
}

// Or separate submit and poll
const requestId = await client.submit(Model.GEMINI_2_5_FLASH, "What is AI?");
if (requestId) {
    // Do other work...
    const result = await client.poll(requestId);
    if (result) {
        console.log(`Result: ${result}`);
    }
}

Parallel Processing

Submit multiple requests in parallel and collect results efficiently.

Python
import requests
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import List, Dict

API_KEY = "your_api_key_here"
BASE_URL = "https://app.beginswithai.com/v1"

headers = {
    "Authorization": f"Bearer {API_KEY}",
    "Content-Type": "application/json"
}

def process_single_prompt(prompt: str, model: str = "gpt-5-1") -> Dict:
    """Submit, poll, and return result for a single prompt"""
    try:
        # Submit
        response = requests.post(
            f"{BASE_URL}/ai",
            json={"model": model, "prompt": prompt},
            headers=headers,
            timeout=30
        )
        response.raise_for_status()
        request_id = response.json()["request_id"]

        # Poll
        start_time = time.time()
        while time.time() - start_time < 300:
            poll_response = requests.get(
                f"{BASE_URL}/ai",
                params={"req_id": request_id},
                headers=headers,
                timeout=30
            )
            poll_response.raise_for_status()
            result = poll_response.json()

            if "response" in result:
                return {
                    "prompt": prompt,
                    "response": result["response"],
                    "status": "success"
                }

            time.sleep(5)

        return {
            "prompt": prompt,
            "response": None,
            "status": "timeout"
        }

    except Exception as e:
        return {
            "prompt": prompt,
            "response": None,
            "status": f"error: {str(e)}"
        }

# Process 20 prompts in parallel (respecting rate limits)
prompts = [f"Explain concept {i}" for i in range(20)]

# Use ThreadPoolExecutor with max workers = rate limit
# For Starter tier (10 RPM), use 5 workers to stay safe
with ThreadPoolExecutor(max_workers=5) as executor:
    futures = {
        executor.submit(process_single_prompt, prompt, "gpt-5-1"): prompt
        for prompt in prompts
    }

    results = []
    for future in as_completed(futures):
        result = future.result()
        results.append(result)
        print(f"Completed: {result['prompt'][:50]}... - Status: {result['status']}")

# Print summary
success_count = sum(1 for r in results if r["status"] == "success")
print(f"\nCompleted: {success_count}/{len(results)} requests")
JavaScript
const API_KEY = "your_api_key_here";
const BASE_URL = "https://app.beginswithai.com/v1";

const headers = {
    "Authorization": `Bearer ${API_KEY}`,
    "Content-Type": "application/json"
};

async function processSinglePrompt(prompt, model = "gpt-5-1") {
    try {
        // Submit
        const response = await fetch(`${BASE_URL}/ai`, {
            method: "POST",
            headers: headers,
            body: JSON.stringify({ model, prompt })
        });

        if (!response.ok) throw new Error(`HTTP ${response.status}`);

        const data = await response.json();
        const requestId = data.request_id;

        // Poll
        const startTime = Date.now();
        while (Date.now() - startTime < 300000) {
            const pollResponse = await fetch(
                `${BASE_URL}/ai?req_id=${requestId}`,
                { headers: headers }
            );

            if (!pollResponse.ok) throw new Error(`HTTP ${pollResponse.status}`);

            const result = await pollResponse.json();

            if (result.response) {
                return {
                    prompt,
                    response: result.response,
                    status: "success"
                };
            }

            await new Promise(r => setTimeout(r, 5000));
        }

        return {
            prompt,
            response: null,
            status: "timeout"
        };
    } catch (error) {
        return {
            prompt,
            response: null,
            status: `error: ${error.message}`
        };
    }
}

// Process 20 prompts in parallel (respecting rate limits)
const prompts = Array.from({ length: 20 }, (_, i) => `Explain concept ${i}`);

// Process in batches to respect rate limits
// For Starter tier (10 RPM), use batch size of 5
const batchSize = 5;
const results = [];

for (let i = 0; i < prompts.length; i += batchSize) {
    const batch = prompts.slice(i, i + batchSize);
    const batchResults = await Promise.all(
        batch.map(prompt => processSinglePrompt(prompt, "gpt-5-1"))
    );

    results.push(...batchResults);

    for (const result of batchResults) {
        console.log(`Completed: ${result.prompt.substring(0, 50)}... - Status: ${result.status}`);
    }

    // Wait between batches
    if (i + batchSize < prompts.length) {
        await new Promise(r => setTimeout(r, 12000));
   }
}

// Print summary
const successCount = results.filter(r => r.status === "success").length;
console.log(`\nCompleted: ${successCount}/${results.length} requests`);

Model Comparison

Test the same prompt across multiple models to compare responses.

Python
import requests
import time
from typing import Dict, List

API_KEY = "your_api_key_here"
BASE_URL = "https://app.beginswithai.com/v1"

headers = {
    "Authorization": f"Bearer {API_KEY}",
    "Content-Type": "application/json"
}

def compare_models(prompt: str, models: List[str]) -> Dict[str, Dict]:
    """Send same prompt to multiple models and compare results"""
    results = {}

    for model in models:
        print(f"\nTesting {model}...")
        start_time = time.time()

        try:
            # Submit
            response = requests.post(
                f"{BASE_URL}/ai",
                json={"model": model, "prompt": prompt},
                headers=headers,
                timeout=30
            )
            response.raise_for_status()
            request_id = response.json()["request_id"]

            # Poll
            while time.time() - start_time < 120:
                poll_response = requests.get(
                    f"{BASE_URL}/ai",
                    params={"req_id": request_id},
                    headers=headers,
                    timeout=30
                )
                poll_response.raise_for_status()
                result = poll_response.json()

                if "response" in result:
                    elapsed = time.time() - start_time
                    results[model] = {
                        "response": result["response"],
                        "time": round(elapsed, 2),
                        "status": "success"
                    }
                    print(f"  Completed in {elapsed:.2f}s")
                    break

                time.sleep(5)
            else:
                results[model] = {
                    "response": None,
                    "time": None,
                    "status": "timeout"
                }

        except Exception as e:
            results[model] = {
                "response": None,
                "time": None,
                "status": f"error: {str(e)}"
            }

        # Rate limit pause between models
        time.sleep(6)

    return results

# Compare models
prompt = "What are the key principles of good API design?"

models_to_test = [
    "gpt-5-1",
    "gemini-2-5-flash",
    "grok-4-1",
    "deepseek-v3-2"
]

results = compare_models(prompt, models_to_test)

print("\n" + "=" * 60)
print("COMPARISON RESULTS")
print("=" * 60)

for model, data in results.items():
    print(f"\n--- {model} ---")
    print(f"Time: {data['time']}s" if data['time'] else "Time: N/A")
    print(f"Status: {data['status']}")
    if data['response']:
        print(f"Response preview: {data['response'][:300]}...")
JavaScript
const API_KEY = "your_api_key_here";
const BASE_URL = "https://app.beginswithai.com/v1";

const headers = {
    "Authorization": `Bearer ${API_KEY}`,
    "Content-Type": "application/json"
};

async function compareModels(prompt, models) {
    const results = {};

    for (const model of models) {
        console.log(`\nTesting ${model}...`);
        const startTime = Date.now();

        try {
            // Submit
            const response = await fetch(`${BASE_URL}/ai`, {
                method: "POST",
                headers: headers,
                body: JSON.stringify({ model, prompt })
            });

            if (!response.ok) throw new Error(`HTTP ${response.status}`);

            const data = await response.json();
            const requestId = data.request_id;

            // Poll
            let completed = false;
            while (Date.now() - startTime < 120000) {
                const pollResponse = await fetch(
                    `${BASE_URL}/ai?req_id=${requestId}`,
                    { headers: headers }
                );

                if (!pollResponse.ok) throw new Error(`HTTP ${pollResponse.status}`);

                const result = await pollResponse.json();

                if (result.response) {
                    const elapsed = (Date.now() - startTime) / 1000;
                    results[model] = {
                        response: result.response,
                        time: Math.round(elapsed * 100) / 100,
                        status: "success"
                    };
                    console.log(`  Completed in ${elapsed.toFixed(2)}s`);
                    completed = true;
                    break;
                }

                await new Promise(r => setTimeout(r, 5000));
            }

            if (!completed) {
                results[model] = {
                    response: null,
                    time: null,
                    status: "timeout"
                };
            }
        } catch (error) {
            results[model] = {
                response: null,
                time: null,
                status: `error: ${error.message}`
            };
        }

        // Rate limit pause between models
        await new Promise(r => setTimeout(r, 6000));
    }

    return results;
}

// Compare models
const prompt = "What are the key principles of good API design?";

const modelsToTest = [
    "gpt-5-1",
    "gemini-2-5-flash",
    "grok-4-1",
    "deepseek-v3-2"
];

const results = await compareModels(prompt, modelsToTest);

console.log("\n" + "=".repeat(60));
console.log("COMPARISON RESULTS");
console.log("=".repeat(60));

for (const [model, data] of Object.entries(results)) {
    console.log(`\n--- ${model} ---`);
    console.log(`Time: ${data.time ? data.time + "s" : "N/A"}`);
    console.log(`Status: ${data.status}`);
    if (data.response) {
        console.log(`Response preview: ${data.response.substring(0, 300)}...`);
    }
}