Quick Integration
One-click connection to 30+ mainstream AI service providersZero-code migration · Intelligent routing · Cost optimization
Core Concept
EvoLink adopts asynchronous task architecture, all AI services are processed as tasks:Submit Task
Send request, immediately get task ID
Real-time Monitoring
Query progress, get status updates
Get Results
Retrieve final results after task completion
Prerequisites
Get API Key
Register Account
Visit EvoLink Console to complete registration
API key has full account permissions, please keep it safe. If leaked, reset immediately.
30-Second Experience
Image Generation Example
Copy
# Submit image generation task
curl -X POST https://api.evolink.ai/v1/images/generations \\
-H "Authorization: Bearer YOUR_API_KEY" \\
-H "Content-Type: application/json" \\
-d '{
"model": "gpt-4o-image",
"prompt": "A cute cat running on the grass at sunset with warm lighting"
}'
Copy
{
"created": 1757156493,
"id": "task-unified-1757156493-imcg5zqt",
"model": "gpt-4o-image",
"object": "image.generation.task",
"progress": 0,
"status": "pending",
"task_info": {
"can_cancel": true,
"estimated_time": 100
},
"type": "image",
"usage": {
"billing_rule": "per_call",
"credits_reserved": 252,
"estimated_cost": 0.252,
"user_group": "default"
}
}
Query Task Status
Copy
# Use returned task ID to query status
curl https://api.evolink.ai/v1/tasks/task-unified-1757156493-imcg5zqt \\
-H "Authorization: Bearer YOUR_API_KEY"
Copy
{
"created": 1757156493,
"id": "task-unified-1757156493-imcg5zqt",
"model": "gpt-4o-image",
"object": "image.generation.task",
"progress": 100,
"results": [
"https://tempfile.aiquickdraw.com/s/generated_image_url.png"
],
"status": "completed",
"task_info": {
"can_cancel": false
},
"type": "image"
}
Multi-modal AI Capabilities
🎨 Image Generation
- GPT-4O Image
- Seedream 4.0
Copy
import requests
# GPT-4O Image generation
response = requests.post(
"https://api.evolink.ai/v1/images/generations",
headers={"Authorization": "Bearer YOUR_API_KEY"},
json={
"model": "gpt-4o-image",
"prompt": "A beautiful sunset over the ocean with vibrant colors",
"size": "1024x1024",
"n": 1
}
)
task = response.json()
print(f"Task ID: {task['id']}")
print(f"Estimated time: {task['task_info']['estimated_time']} seconds")
Copy
# Seedream 4.0 image generation
response = requests.post(
"https://api.evolink.ai/v1/images/generations",
headers={"Authorization": "Bearer YOUR_API_KEY"},
json={
"model": "doubao-seedream-4.0",
"prompt": "Futuristic city with neon lights at night"
}
)
task = response.json()
print(f"Task created: {task['created']}")
print(f"Estimated cost: ${task['usage']['estimated_cost']}")
🎬 Video Generation
Copy
import requests
import time
# Veo3-Fast video generation
def generate_video():
# Submit video generation task
response = requests.post(
"https://api.evolink.ai/v1/videos/generations",
headers={"Authorization": "Bearer YOUR_API_KEY"},
json={
"model": "veo3-fast",
"prompt": "A cat chasing butterflies in the garden, slow motion",
"aspect_ratio": "16:9"
}
)
task = response.json()
task_id = task['id']
print(f"Video task created: {task_id}")
print(f"Estimated completion time: {task['task_info']['estimated_time']} seconds")
print(f"Estimated cost: ${task['usage']['estimated_cost']}")
return task_id
# Monitor task progress
def monitor_task(task_id):
while True:
response = requests.get(
f"https://api.evolink.ai/v1/tasks/{task_id}",
headers={"Authorization": "Bearer YOUR_API_KEY"}
)
task = response.json()
status = task['status']
progress = task['progress']
print(f"Status: {status}, Progress: {progress}%")
if status == "completed":
return task['results'][0] # Video URL in results array
elif status == "failed":
raise Exception(f"Task failed: {task.get('error')}")
time.sleep(10) # Query every 10 seconds
# Usage example
task_id = generate_video()
video_url = monitor_task(task_id)
print(f"Video generation completed: {video_url}")
SDK Quick Integration
Python SDK
Copy
import openai
import requests
class EvoLinkClient:
def __init__(self, api_key):
self.api_key = api_key
self.base_url = "https://api.evolink.ai"
self.headers = {"Authorization": f"Bearer {api_key}"}
def create_image(self, model, prompt, **kwargs):
"""Create image generation task"""
response = requests.post(
f"{self.base_url}/v1/images/generations",
headers=self.headers,
json={"model": model, "prompt": prompt, **kwargs}
)
return response.json()
def create_video(self, model, prompt, **kwargs):
"""Create video generation task"""
response = requests.post(
f"{self.base_url}/v1/videos/generations",
headers=self.headers,
json={"model": model, "prompt": prompt, **kwargs}
)
return response.json()
def get_task(self, task_id):
"""Get task status"""
response = requests.get(
f"{self.base_url}/v1/tasks/{task_id}",
headers=self.headers
)
return response.json()
def wait_for_completion(self, task_id, timeout=300):
"""Wait for task completion"""
import time
start_time = time.time()
while time.time() - start_time < timeout:
task = self.get_task(task_id)
if task['status'] == 'completed':
return task
elif task['status'] == 'failed':
raise Exception(f"Task failed: {task.get('error')}")
time.sleep(5)
raise TimeoutError("Task execution timeout")
# Usage example
client = EvoLinkClient("YOUR_API_KEY")
# Generate image
task = client.create_image(
model="gpt-4o-image",
prompt="A modern intelligent office building"
)
result = client.wait_for_completion(task['id'])
print(f"Image URL: {result['results'][0]}")
JavaScript/Node.js
Copy
class EvoLinkClient {
constructor(apiKey) {
this.apiKey = apiKey;
this.baseURL = 'https://api.evolink.ai';
}
async createImage(model, prompt, options = {}) {
const response = await fetch(`${this.baseURL}/v1/images/generations`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${this.apiKey}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({ model, prompt, ...options })
});
return response.json();
}
async getTask(taskId) {
const response = await fetch(`${this.baseURL}/v1/tasks/${taskId}`, {
headers: { 'Authorization': `Bearer ${this.apiKey}` }
});
return response.json();
}
async waitForCompletion(taskId, timeout = 300000) {
const startTime = Date.now();
while (Date.now() - startTime < timeout) {
const task = await this.getTask(taskId);
if (task.status === 'completed') {
return task;
} else if (task.status === 'failed') {
throw new Error(`Task failed: ${task.error}`);
}
await new Promise(resolve => setTimeout(resolve, 5000));
}
throw new Error('Task execution timeout');
}
}
// Usage example
async function generateImage() {
const client = new EvoLinkClient('YOUR_API_KEY');
try {
// Create task
const task = await client.createImage(
'doubao-seedream-4.0',
'An abstract art painting with rich colors'
);
console.log('Task created:', task.id);
// Wait for completion
const result = await client.waitForCompletion(task.id);
console.log('Image generation completed:', result.results[0]);
} catch (error) {
console.error('Generation failed:', error.message);
}
}
generateImage();
Production Environment Best Practices
Error Handling Strategy
Copy
import requests
from typing import Optional
import time
class EvoLinkError(Exception):
"""EvoLink API exception base class"""
pass
class RateLimitError(EvoLinkError):
"""Rate limit exception"""
pass
class QuotaExhaustedError(EvoLinkError):
"""Quota exhausted exception"""
pass
def handle_api_call(func, *args, **kwargs):
"""Unified API call error handling"""
max_retries = 3
retry_delay = 1
for attempt in range(max_retries):
try:
response = func(*args, **kwargs)
if response.status_code == 200:
return response.json()
elif response.status_code == 429:
# Rate limit, wait and retry
wait_time = int(response.headers.get('Retry-After', retry_delay))
print(f"Rate limit hit, waiting {wait_time} seconds before retry...")
time.sleep(wait_time)
retry_delay *= 2
continue
elif response.status_code == 402:
raise QuotaExhaustedError("Insufficient account balance, please recharge")
else:
error_data = response.json()
raise EvoLinkError(f"API call failed: {error_data.get('error', {}).get('message')}")
except requests.exceptions.RequestException as e:
if attempt == max_retries - 1:
raise EvoLinkError(f"Network request failed: {str(e)}")
time.sleep(retry_delay)
retry_delay *= 2
raise EvoLinkError("Maximum retry attempts reached")
# Usage example
try:
result = handle_api_call(
requests.post,
"https://api.evolink.ai/v1/images/generations",
headers={"Authorization": "Bearer YOUR_API_KEY"},
json={"model": "gpt-4o-image", "prompt": "Test image"}
)
print("Task created successfully:", result['id'])
except QuotaExhaustedError:
print("Please recharge in the console: https://evolink.ai/dashboard/billing")
except EvoLinkError as e:
print(f"API call exception: {e}")
Performance Optimization Tips
- Batch Processing
- Task Pool Management
Copy
import asyncio
import aiohttp
async def create_task_async(session, model, prompt):
"""Async task creation"""
async with session.post(
"https://api.evolink.ai/v1/images/generations",
json={"model": model, "prompt": prompt}
) as response:
return await response.json()
async def batch_generate_images(prompts, model="gpt-4o-image"):
"""Batch image generation"""
headers = {"Authorization": "Bearer YOUR_API_KEY"}
async with aiohttp.ClientSession(headers=headers) as session:
# Concurrently create all tasks
tasks = [
create_task_async(session, model, prompt)
for prompt in prompts
]
results = await asyncio.gather(*tasks)
# Return task ID list
return [result['id'] for result in results]
# Usage example
prompts = [
"Modern office design",
"Natural landscape painting",
"Abstract artwork",
"Tech-style UI interface"
]
task_ids = asyncio.run(batch_generate_images(prompts))
print(f"Created {len(task_ids)} tasks")
Copy
from concurrent.futures import ThreadPoolExecutor
import queue
import threading
class TaskManager:
def __init__(self, api_key, max_workers=5):
self.api_key = api_key
self.headers = {"Authorization": f"Bearer {api_key}"}
self.task_queue = queue.Queue()
self.result_queue = queue.Queue()
self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.running = True
def submit_task(self, model, prompt, **kwargs):
"""Submit task to queue"""
task_data = {"model": model, "prompt": prompt, **kwargs}
future = self.executor.submit(self._process_task, task_data)
return future
def _process_task(self, task_data):
"""Process single task"""
# Create task
response = requests.post(
"https://api.evolink.ai/v1/images/generations",
headers=self.headers,
json=task_data
)
task = response.json()
# Wait for completion
return self._wait_for_completion(task['id'])
def _wait_for_completion(self, task_id):
"""Wait for task completion"""
while True:
response = requests.get(
f"https://api.evolink.ai/v1/tasks/{task_id}",
headers=self.headers
)
task = response.json()
if task['status'] == 'completed':
return task
elif task['status'] == 'failed':
raise Exception(f"Task failed: {task.get('error')}")
time.sleep(5)
# Usage example
manager = TaskManager("YOUR_API_KEY")
futures = []
for i in range(10):
future = manager.submit_task(
model="gpt-4o-image",
prompt=f"Design {i+1}"
)
futures.append(future)
# Get all results
for i, future in enumerate(futures):
try:
result = future.result(timeout=300)
print(f"Task {i+1} completed: {result['results'][0]}")
except Exception as e:
print(f"Task {i+1} failed: {e}")
Monitoring & Analytics
Usage Monitoring
Visit EvoLink Console to view real-time:Real-time Monitoring
- API call statistics
- Success and error rates
- Response time analysis
- Concurrent request monitoring
Cost Analysis
- Cost distribution by model
- Daily/monthly usage trends
- Cost forecasting and alerts
- Quota usage status
Custom Monitoring
Copy
import logging
from datetime import datetime
class EvoLinkMonitor:
def __init__(self):
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('evolink_api.log'),
logging.StreamHandler()
]
)
self.logger = logging.getLogger(__name__)
def log_task_creation(self, task_id, model, cost):
"""Log task creation"""
self.logger.info(f"Task created | ID:{task_id} | Model:{model} | Cost:${cost}")
def log_task_completion(self, task_id, duration, status):
"""Log task completion"""
self.logger.info(f"Task completed | ID:{task_id} | Duration:{duration}s | Status:{status}")
def log_error(self, error, context=""):
"""Log errors"""
self.logger.error(f"API error | {context} | {str(error)}")
# Usage example
monitor = EvoLinkMonitor()
# Use in API calls
start_time = time.time()
try:
task = client.create_image("gpt-4o-image", "Test prompt")
monitor.log_task_creation(
task['id'],
task['model'],
task['usage']['estimated_cost']
)
result = client.wait_for_completion(task['id'])
duration = time.time() - start_time
monitor.log_task_completion(task['id'], int(duration), "completed")
except Exception as e:
monitor.log_error(e, f"Task ID: {task.get('id', 'unknown')}")
Best Practices Summary
Development Recommendations
- Always implement error handling
- Use async programming for efficiency
- Set reasonable timeout values
- Log detailed call information
Performance Optimization
- Batch processing reduces network overhead
- Use task pools for concurrency management
- Monitor API quota usage
- Cache frequently used results
Security Considerations
- Secure API key storage
- Use environment variables for configuration
- Regularly rotate access keys
- Monitor abnormal call behavior
Cost Control
- Set reasonable budget limits
- Choose appropriate models
- Optimize prompt length
- Monitor usage trends
Ready to Start?
Begin Your AI Creation Journey NowConsole
Manage API keys and monitor usage
API Documentation
View detailed interface documentation
Task Management
Learn about task status queries