L
Initializing Studio...
Complete REST API documentation for integrating LangTrain into your applications. All endpoints use the base URL: https://api.langtrain.xyz/v1
Standard REST patterns with JSON request/response bodies
Full OpenAPI 3.0 spec available for code generation
1000 requests/minute on Pro, 10000 on Enterprise
Real-time event notifications for training and inference
Authorization header as a Bearer token.1# Authentication examples2import requests3import os45# Set your API key as environment variable6API_KEY = os.getenv('LANGTRAIN_API_KEY')7BASE_URL = 'https://api.langtrain.xyz/v1'89# Headers for all requests10headers = {11 'Authorization': f'Bearer {API_KEY}',12 'Content-Type': 'application/json',13 'User-Agent': 'LangTrain-Python/1.0.0'14}1516# Test authentication17response = requests.get(f'{BASE_URL}/user/profile', headers=headers)1819if response.status_code == 200:20 print("✅ Authentication successful")21 user_data = response.json()22 print(f"Welcome, {user_data['name']}!")23else:24 print(f"❌ Authentication failed: {response.status_code}")25 print(response.json())
GET /v1/hub/ - List all available modelsGET /v1/hub/models/{model_id} - Get model detailsGET /v1/hub/tiers - Get pricing tiersGET /v1/hub/featured - Get featured models1# Models API examples23# 1. List all available models4def list_models():5 response = requests.get(f'{BASE_URL}/hub/', headers=headers)6 data = response.json()78 print(f"Found {len(data['models'])} models:")9 for model in data['models']:10 print(f" - {model['id']}: {model['name']} ({model['params_billions']}B params)")1112 return data1314# 2. Get specific model details15def get_model_details(model_id):16 response = requests.get(f'{BASE_URL}/hub/models/{model_id}', headers=headers)1718 if response.status_code == 200:19 model = response.json()20 return {21 'id': model['id'],22 'name': model['name'],23 'description': model['description'],24 'params_billions': model['params_billions'],25 'context_length': model['context_length'],26 'methods': model['methods'],27 'pricing': model['pricing']28 }29 return None3031# 3. Get pricing tiers32def get_pricing_tiers():33 response = requests.get(f'{BASE_URL}/hub/tiers', headers=headers)34 return response.json()3536# Usage examples37models = list_models()38llama_details = get_model_details('llama-3.1-8b')39print(f"Llama 3.1 8B context length: {llama_details['context_length']}")
POST /v1/datasets/ - Upload new datasetGET /v1/datasets/ - List your datasetsGET /v1/datasets/{dataset_id} - Get dataset detailsDELETE /v1/datasets/{dataset_id} - Delete datasetPOST /v1/datasets/{dataset_id}/validate - Validate dataset format1# Datasets API examples23# 1. Upload a new dataset4def upload_dataset(file_path, name, description=None):5 with open(file_path, 'rb') as f:6 files = {'file': (file_path, f)}7 data = {8 'name': name,9 'description': description or f'Uploaded {name}',10 'format': 'jsonl'11 }12 response = requests.post(13 f'{BASE_URL}/datasets/',14 headers={'Authorization': f'Bearer {API_KEY}'},15 files=files,16 data=data17 )18 return response.json()1920# 2. List all your datasets21def list_datasets():22 response = requests.get(f'{BASE_URL}/datasets/', headers=headers)23 datasets = response.json()2425 for ds in datasets['items']:26 print(f"📁 {ds['name']} ({ds['rows']:,} rows)")27 print(f" Format: {ds['format']} | Size: {ds['size_mb']:.1f}MB")28 return datasets2930# 3. Validate dataset before training31def validate_dataset(dataset_id):32 response = requests.post(33 f'{BASE_URL}/datasets/{dataset_id}/validate',34 headers=headers35 )36 result = response.json()3738 if result['valid']:39 print("✅ Dataset is valid and ready for training")40 print(f" Samples: {result['total_samples']:,}")41 print(f" Avg tokens: {result['avg_tokens_per_sample']}")42 else:43 print("❌ Validation errors:")44 for error in result['errors']:45 print(f" - {error}")46 return result4748# Usage49dataset = upload_dataset('./training_data.jsonl', 'Customer Support v1')50validate_dataset(dataset['id'])
POST /v1/fine-tuning/jobs - Create fine-tuning jobGET /v1/fine-tuning/jobs - List all jobsGET /v1/fine-tuning/jobs/{job_id} - Get job statusPOST /v1/fine-tuning/jobs/{job_id}/cancel - Cancel running jobGET /v1/fine-tuning/jobs/{job_id}/events - Stream job events1# Fine-tuning API examples23# 1. Create fine-tuning job4def create_finetune_job(model_id, dataset_id, config=None):5 default_config = {6 'method': 'qlora',7 'lora_config': {8 'r': 32,9 'alpha': 64,10 'dropout': 0.05,11 'target_modules': ['q_proj', 'v_proj', 'k_proj', 'o_proj']12 },13 'training_config': {14 'epochs': 3,15 'batch_size': 4,16 'learning_rate': 2e-4,17 'warmup_ratio': 0.118 }19 }2021 payload = {22 'model_id': model_id,23 'dataset_id': dataset_id,24 'config': config or default_config,25 }2627 response = requests.post(28 f'{BASE_URL}/fine-tuning/jobs',29 headers=headers,30 json=payload31 )32 return response.json()3334# 2. Monitor fine-tuning progress35def get_finetune_status(job_id):36 response = requests.get(37 f'{BASE_URL}/fine-tuning/jobs/{job_id}',38 headers=headers39 )4041 if response.status_code == 200:42 job = response.json()43 return {44 'status': job['status'],45 'progress': job.get('progress', 0),46 'current_epoch': job.get('current_epoch', 0),47 'loss': job.get('metrics', {}).get('train_loss'),48 'estimated_completion': job.get('estimated_completion')49 }50 return None5152# 3. Stream training events in real-time53def stream_events(job_id):54 response = requests.get(55 f'{BASE_URL}/fine-tuning/jobs/{job_id}/events',56 headers=headers,57 stream=True58 )5960 for line in response.iter_lines():61 if line:62 event = json.loads(line)63 print(f"[{event['type']}] {event['message']}")6465# Usage66job = create_finetune_job('llama-3.1-8b', 'dataset_abc123')67print(f"Started job {job['id']}, status: {job['status']}")
POST /v1/completions - Text completion (legacy)POST /v1/chat/completions - Chat completion (recommended)POST /v1/embeddings - Text embeddings1# Inference API examples23# 1. Chat completion (recommended)4def chat_completion(model_id, messages, **kwargs):5 payload = {6 'model': model_id,7 'messages': messages,8 'max_tokens': kwargs.get('max_tokens', 512),9 'temperature': kwargs.get('temperature', 0.7),10 'top_p': kwargs.get('top_p', 0.9),11 }1213 response = requests.post(14 f'{BASE_URL}/chat/completions',15 headers=headers,16 json=payload17 )18 return response.json()1920# 2. Streaming chat completion21def stream_chat(model_id, messages):22 payload = {23 'model': model_id,24 'messages': messages,25 'stream': True26 }2728 response = requests.post(29 f'{BASE_URL}/chat/completions',30 headers=headers,31 json=payload,32 stream=True33 )3435 full_response = ""36 for line in response.iter_lines():37 if line.startswith(b'data: '):38 data = json.loads(line[6:])39 if data != '[DONE]':40 chunk = data['choices'][0]['delta'].get('content', '')41 full_response += chunk42 print(chunk, end='', flush=True)4344 return full_response4546# 3. Generate embeddings47def get_embeddings(texts, model='text-embedding-3-small'):48 payload = {49 'model': model,50 'input': texts51 }52 response = requests.post(f'{BASE_URL}/embeddings', headers=headers, json=payload)53 return response.json()['data']5455# Usage examples56response = chat_completion(57 'llama-3.1-8b',58 [{"role": "user", "content": "Explain quantum computing in simple terms"}],59 max_tokens=200,60 temperature=0.861)62print(response['choices'][0]['message']['content'])
POST /v1/webhooks - Register webhook endpointGET /v1/webhooks - List registered webhooksDELETE /v1/webhooks/{webhook_id} - Remove webhooktraining.started - Fine-tuning job startedtraining.completed - Fine-tuning job finishedtraining.failed - Fine-tuning job erroredmodel.deployed - Model deployed to productionusage.threshold - Usage threshold reachedX-LangTrain-Signature for verification.1# Webhook examples23# 1. Register a webhook4def register_webhook(url, events):5 payload = {6 'url': url,7 'events': events,8 'secret': 'your-webhook-secret'9 }1011 response = requests.post(12 f'{BASE_URL}/webhooks',13 headers=headers,14 json=payload15 )16 return response.json()1718# 2. Webhook receiver (Flask example)19from flask import Flask, request20import hmac21import hashlib2223app = Flask(__name__)24WEBHOOK_SECRET = 'your-webhook-secret'2526def verify_signature(payload, signature):27 expected = hmac.new(28 WEBHOOK_SECRET.encode(),29 payload,30 hashlib.sha25631 ).hexdigest()32 return hmac.compare_digest(expected, signature)3334@app.route('/webhook', methods=['POST'])35def handle_webhook():36 signature = request.headers.get('X-LangTrain-Signature')3738 if not verify_signature(request.data, signature):39 return {'error': 'Invalid signature'}, 4014041 event = request.json4243 if event['type'] == 'training.completed':44 job_id = event['data']['job_id']45 model_id = event['data']['model_id']46 print(f"✅ Training completed: {job_id} -> {model_id}")47 # Deploy model, send notification, etc.4849 return {'received': True}5051# Register webhook for training events52webhook = register_webhook(53 'https://yourapp.com/webhook',54 ['training.started', 'training.completed', 'training.failed']55)56print(f"Webhook registered: {webhook['id']}")
200 OK - Request succeeded201 Created - Resource created successfully400 Bad Request - Invalid request parameters401 Unauthorized - Invalid or missing API key403 Forbidden - Insufficient permissions404 Not Found - Resource doesn't exist429 Too Many Requests - Rate limit exceeded500 Internal Server Error - Server-side errorerror object containing code, message, and optional details.1# Error handling examples23import requests4from requests.exceptions import Timeout, RequestException56class LangTrainError(Exception):7 def __init__(self, code, message, details=None):8 self.code = code9 self.message = message10 self.details = details11 super().__init__(f"[{code}] {message}")1213def api_request(method, endpoint, **kwargs):14 """Make API request with proper error handling"""15 url = f'{BASE_URL}{endpoint}'1617 try:18 response = requests.request(19 method, url,20 headers=headers,21 timeout=30,22 **kwargs23 )2425 # Raise for HTTP errors26 if response.status_code >= 400:27 error = response.json().get('error', {})28 raise LangTrainError(29 code=error.get('code', 'unknown_error'),30 message=error.get('message', 'An error occurred'),31 details=error.get('details')32 )3334 return response.json()3536 except Timeout:37 raise LangTrainError('timeout', 'Request timed out')38 except RequestException as e:39 raise LangTrainError('connection_error', str(e))4041# Usage with retry logic42import time43from functools import wraps4445def retry_on_rate_limit(max_retries=3, backoff=1):46 def decorator(func):47 @wraps(func)48 def wrapper(*args, **kwargs):49 for attempt in range(max_retries):50 try:51 return func(*args, **kwargs)52 except LangTrainError as e:53 if e.code == 'rate_limit_exceeded' and attempt < max_retries - 1:54 wait = backoff * (2 ** attempt)55 print(f"Rate limited. Retrying in {wait}s...")56 time.sleep(wait)57 else:58 raise59 return wrapper60 return decorator6162@retry_on_rate_limit(max_retries=3)63def safe_api_call():64 return api_request('GET', '/hub/')
X-RateLimit-Limit - Requests allowed per windowX-RateLimit-Remaining - Requests remainingX-RateLimit-Reset - Unix timestamp when limit resets1# Rate limit handling23def check_rate_limits(response):4 """Extract and display rate limit info from response headers"""5 limit = response.headers.get('X-RateLimit-Limit')6 remaining = response.headers.get('X-RateLimit-Remaining')7 reset = response.headers.get('X-RateLimit-Reset')89 if remaining and int(remaining) < 10:10 print(f"⚠️ Warning: Only {remaining}/{limit} requests remaining")11 print(f" Resets at: {datetime.fromtimestamp(int(reset))}")1213 return {14 'limit': int(limit) if limit else None,15 'remaining': int(remaining) if remaining else None,16 'reset': int(reset) if reset else None17 }1819# Batch requests for efficiency20def batch_inference(prompts, model_id, batch_size=10):21 """Process multiple prompts efficiently"""22 results = []2324 for i in range(0, len(prompts), batch_size):25 batch = prompts[i:i + batch_size]2627 response = requests.post(28 f'{BASE_URL}/chat/completions/batch',29 headers=headers,30 json={31 'model': model_id,32 'requests': [33 {'messages': [{'role': 'user', 'content': p}]}34 for p in batch35 ]36 }37 )3839 rate_info = check_rate_limits(response)40 results.extend(response.json()['responses'])4142 # Respect rate limits43 if rate_info['remaining'] and rate_info['remaining'] < 5:44 time.sleep(1)4546 return results4748# Usage49prompts = ["Summarize this:", "Translate this:", "Explain this:"]50results = batch_inference(prompts, 'llama-3.1-8b')