from dataclasses import dataclass
from typing import Dict, Any, List, Optional, Callable
import jsonschema
from functools import wraps
@dataclass
class TransformRule:
"""Request transformation rule configuration"""
name: str
transformer: Callable
priority: int = 100
enabled: bool = True
class RequestTransformer:
"""AI API request transformation pipeline"""
def __init__(self):
self.rules: List[TransformRule] = []
self.validators: Dict[str, dict] = {}
self.templates: Dict[str, str] = {}
def add_rule(
self,
name: str,
priority: int = 100
):
"""Decorator to add transformation rule"""
def decorator(func: Callable):
rule = TransformRule(
name=name,
transformer=func,
priority=priority
)
self.rules.append(rule)
self.rules.sort(key=lambda r: r.priority)
return func
return decorator
def add_validator(self, name: str, schema: dict):
"""Add JSON schema validator"""
self.validators[name] = schema
def add_template(self, name: str, template: str):
"""Add prompt template"""
self.templates[name] = template
async def transform(
self,
request: dict,
context: Optional[Dict] = None
) -> dict:
"""Apply all transformation rules to request"""
context = context or {}
transformed = request.copy()
for rule in self.rules:
if not rule.enabled:
continue
try:
transformed = await rule.transformer(
transformed, context
)
except Exception as e:
print(f"Transform error in {rule.name}: {e}")
raise
return transformed
def validate(
self,
request: dict,
schema_name: str
) -> bool:
"""Validate request against schema"""
if schema_name not in self.validators:
return True
try:
jsonschema.validate(
request,
self.validators[schema_name]
)
return True
except jsonschema.ValidationError:
return False
transformer = RequestTransformer()
transformer.add_validator("chat_request", {
"type": "object",
"required": ["message"],
"properties": {
"message": {"type": "string"},
"temperature": {
"type": "number",
"minimum": 0,
"maximum": 2
}
}
})
transformer.add_template(
"assistant",
"You are a helpful AI assistant. Be concise and accurate."
)
@transformer.add_rule("schema_mapping", priority=10)
async def map_schema(request: dict, context: dict) -> dict:
"""Map request to OpenAI-compatible schema"""
if "text" in request:
request["messages"] = [{
"role": "user",
"content": request.pop("text")
}]
return request
@transformer.add_rule("system_prompt", priority=20)
async def inject_system_prompt(request: dict, context: dict) -> dict:
"""Inject system prompt from template"""
messages = request.get("messages", [])
system_content = transformer.templates.get(
"assistant",
"You are a helpful assistant."
)
request["messages"] = [{
"role": "system",
"content": system_content
}] + messages
return request
@transformer.add_rule("enrichment", priority=30)
async def enrich_request(request: dict, context: dict) -> dict:
"""Enrich request with context metadata"""
if "model" not in request:
request["model"] = context.get(
"default_model",
"gpt-4-turbo"
)
if "user" in context:
request["user"] = context["user"]
request["metadata"] = {
"request_id": context.get("request_id"),
"timestamp": context.get("timestamp")
}
return request
@transformer.add_rule("parameter_optimization", priority=40)
async def optimize_parameters(request: dict, context: dict) -> dict:
"""Optimize model parameters"""
request.setdefault("temperature", 0.7)
request.setdefault("max_tokens", 2048)
use_case = context.get("use_case")
if use_case == "code_generation":
request["temperature"] = 0.3
elif use_case == "creative_writing":
request["temperature"] = 1.2
return request
async def process_request(raw_request: dict):
"""Process request through transformation pipeline"""
if not transformer.validate(raw_request, "chat_request"):
raise ValueError("Invalid request schema")
context = {
"user": "user_123",
"default_model": "gpt-4-turbo",
"use_case": "general"
}
transformed = await transformer.transform(raw_request, context)
return transformed