128 lines
5.1 KiB
Python
128 lines
5.1 KiB
Python
from pydantic import BaseModel, Field
|
|
from typing import List, Optional, Any
|
|
|
|
|
|
from typing import List, Optional, Any, Dict, Literal, Union
|
|
|
|
# --- MCP Tool Configuration Models ---
|
|
|
|
class StdioConfig(BaseModel):
|
|
"""Configuration for Stdio-based MCP server."""
|
|
command: str = Field(..., description="Executable command (e.g., 'npx', 'python')")
|
|
args: List[str] = Field(default_factory=list, description="Command arguments")
|
|
env: Optional[Dict[str, str]] = Field(None, description="Environment variables")
|
|
|
|
class SSEConfig(BaseModel):
|
|
"""Configuration for SSE-based MCP server."""
|
|
url: str = Field(..., description="Server URL (e.g., 'http://localhost:8000/sse')")
|
|
|
|
class MCPServerConfig(BaseModel):
|
|
"""Configuration for an MCP Server connection."""
|
|
name: str = Field(..., description="Unique name for the toolset")
|
|
type: Literal["stdio", "sse"] = Field("stdio", description="Connection type")
|
|
stdio_config: Optional[StdioConfig] = Field(None, description="Stdio configuration")
|
|
sse_config: Optional[SSEConfig] = Field(None, description="SSE configuration")
|
|
tool_filter: Optional[List[str]] = Field(None, description="Optional whitelist of tools to enable")
|
|
|
|
# --- LLM Configuration Models ---
|
|
|
|
class LLMConfig(BaseModel):
|
|
"""Custom LLM API configuration for per-request override."""
|
|
api_key: Optional[str] = Field(None, description="API key for the LLM service")
|
|
api_base: Optional[str] = Field(None, description="Base URL for the LLM API")
|
|
model: Optional[str] = Field(None, description="Model name (e.g., 'openai/gpt-4o')")
|
|
|
|
|
|
class GenerationConfig(BaseModel):
|
|
"""LLM generation parameters for fine-tuning responses."""
|
|
temperature: Optional[float] = Field(None, ge=0.0, le=2.0, description="Randomness (0.0-2.0)")
|
|
max_output_tokens: Optional[int] = Field(None, gt=0, description="Max response length")
|
|
top_p: Optional[float] = Field(None, ge=0.0, le=1.0, description="Nucleus sampling")
|
|
top_k: Optional[int] = Field(None, gt=0, description="Top-k sampling")
|
|
|
|
|
|
# --- Request Models ---
|
|
|
|
class CreateSessionRequest(BaseModel):
|
|
"""Request to create a new chat session."""
|
|
user_id: str
|
|
app_name: Optional[str] = None
|
|
|
|
|
|
|
|
class ThinkingConfigModel(BaseModel):
|
|
"""Configuration for BuiltInPlanner thinking features."""
|
|
include_thoughts: bool = Field(True, description="Whether to include internal thoughts in the response")
|
|
thinking_budget: int = Field(1024, gt=0, description="Token budget for thinking")
|
|
|
|
|
|
class AgentConfig(BaseModel):
|
|
"""Comprehensive configuration for creating an ADK Agent."""
|
|
name: str = Field(..., description="Unique name for the agent")
|
|
description: Optional[str] = Field(None, description="Description of agent capabilities")
|
|
model: str = Field(..., description="Model identifier (e.g., 'gemini-2.5-flash')")
|
|
instruction: str = Field(..., description="System instruction / persona for the agent")
|
|
tools: List[str] = Field(default_factory=list, description="List of tool names to enable")
|
|
enable_code_execution: bool = Field(False, description="Enable built-in code executor")
|
|
|
|
thinking_config: Optional[ThinkingConfigModel] = Field(None, description="Configuration for reasoning/planning")
|
|
|
|
# Nested configurations
|
|
llm_config: Optional[LLMConfig] = Field(None, description="Custom LLM API connection details")
|
|
generation_config: Optional[GenerationConfig] = Field(None, description="Generation parameters")
|
|
|
|
|
|
|
|
class AgentDefinition(AgentConfig):
|
|
"""Persisted Agent definition with unique ID."""
|
|
id: str = Field(..., description="Unique identifier for the agent")
|
|
created_at: float = Field(default_factory=lambda: __import__("time").time(), description="Creation timestamp")
|
|
updated_at: float = Field(default_factory=lambda: __import__("time").time(), description="Last update timestamp")
|
|
|
|
|
|
class ChatTurnRequest(BaseModel):
|
|
"""Request for a single chat turn with a specific agent."""
|
|
agent_id: str = Field(..., description="ID of the agent to use for this turn")
|
|
message: str = Field(..., description="User message content")
|
|
# Optional overrides for this specific turn
|
|
streaming_mode: Optional[str] = Field("sse", description="Streaming mode: 'sse', 'none'")
|
|
|
|
|
|
class ChatRequest(BaseModel):
|
|
"""
|
|
LEGACY: Request for chat completion.
|
|
Used by the old /chat/stream endpoint.
|
|
"""
|
|
session_id: str
|
|
user_id: str
|
|
message: str
|
|
app_name: Optional[str] = None
|
|
|
|
agent_config: Optional[AgentConfig] = Field(None, description="Ephemeral agent config")
|
|
max_llm_calls: Optional[int] = Field(500, gt=0)
|
|
streaming_mode: Optional[str] = "sse"
|
|
|
|
|
|
class SessionResponse(BaseModel):
|
|
"""Response for session operations."""
|
|
id: str
|
|
app_name: str
|
|
user_id: str
|
|
updated_at: Any = None
|
|
|
|
|
|
class HistoryEvent(BaseModel):
|
|
"""A single event in chat history."""
|
|
type: str
|
|
role: str
|
|
content: str
|
|
agent_name: Optional[str] = None
|
|
timestamp: Optional[Any] = None
|
|
invocation_id: Optional[str] = None
|
|
|
|
|
|
class SessionDetailResponse(SessionResponse):
|
|
"""Session details including chat history."""
|
|
history: List[HistoryEvent] = []
|
|
events_count: int = 0
|
|
created_at: Any = None |