首次提交

This commit is contained in:
lhr
2026-01-19 22:04:54 +08:00
parent 1fa5a4947a
commit 12ef0292b7
16 changed files with 1147 additions and 0 deletions

143
app/agent_factory.py Normal file
View File

@@ -0,0 +1,143 @@
import logging
from typing import Optional, List, Callable, Dict, Any, Union
from google.adk.agents.llm_agent import LlmAgent, Agent
from google.adk.models.lite_llm import LiteLlm
from google.adk.planners import BuiltInPlanner
from google.adk.code_executors import BuiltInCodeExecutor
from google.genai import types
from app.config import settings
from app.models import AgentConfig, LLMConfig, GenerationConfig
from app.services.tool_service import get_tool_service
from app.tools import TOOL_REGISTRY
logger = logging.getLogger(__name__)
# ==========================
# 🏭 Agent Factory
# ==========================
def _create_llm_model(
model_name: str,
llm_config: Optional[LLMConfig] = None
) -> Union[str, LiteLlm]:
"""
Create the LLM model instance or return model identifier string.
Logic:
1. If model name implies Gemini (official Google model), return the string directly.
ADK's LlmAgent handles 'gemini-...' strings using the native google-genai SDK.
2. For other models (GPT, Claude, etc.), wrap in LiteLlm adapter.
"""
# Normalize comparison
name_lower = model_name.lower()
# Check if likely native Gemini (no provider prefix)
# Exclude obvious proxy prefixes that require LiteLLM (e.g. openai/gemini...)
provider_prefixes = ["openai/", "azure/", "anthropic/", "bedrock/", "mistral/"]
has_provider_prefix = any(p in name_lower for p in provider_prefixes)
is_gemini = "gemini" in name_lower and not has_provider_prefix
# Check if custom configuration forces a specific path
# If API Base is provided and distinct from default Google, usually implies using LiteLLM/Proxy
# even for Gemini models (e.g. via OpenAI-compatible endpoint).
has_custom_base = llm_config and llm_config.api_base and "googleapis.com" not in llm_config.api_base
if is_gemini and not has_custom_base:
logger.info(f"Using Native Gemini Model: {model_name}")
return model_name
# Fallback / Non-Gemini -> Use LiteLLM
api_key = (llm_config.api_key if llm_config and llm_config.api_key else settings.LLM_API_KEY)
api_base = (llm_config.api_base if llm_config and llm_config.api_base else settings.LLM_API_BASE)
logger.info(f"Using LiteLLM for: {model_name} (Base: {api_base})")
return LiteLlm(
model=model_name,
api_base=api_base,
api_key=api_key
)
def create_agent(config: AgentConfig) -> Agent:
"""
Create a fully configured ADK Agent based on the provided AgentConfig.
"""
logger.info(f"Creating Agent: {config.name} ({config.model})")
# 1. Model Initialization
# Returns either a str (for native Gemini) or LiteLlm object
llm = _create_llm_model(config.model, config.llm_config)
# 2. Tools Selection
selected_tools = []
tool_service = get_tool_service()
for tool_name in config.tools:
# A. Check Legacy/Hardcoded Registry
if tool_name in TOOL_REGISTRY:
selected_tools.append(TOOL_REGISTRY[tool_name])
continue
# B. Check Local Tools (tools/ folder)
local_tool = tool_service.load_local_tool(tool_name)
if local_tool:
selected_tools.append(local_tool)
continue
# C. Check MCP Servers
mcp_tool = tool_service.get_mcp_toolset(tool_name)
if mcp_tool:
selected_tools.append(mcp_tool)
continue
logger.warning(f"Tool '{tool_name}' not found (checked Registry, Local, MCP). Skipping.")
# 3. Code Execution
code_executor = None
if config.enable_code_execution:
logger.info("Enabling BuiltInCodeExecutor")
code_executor = BuiltInCodeExecutor()
# 4. Planner / Thinking
# Only applicable for models that support it (mostly Gemini)
planner = None
if config.thinking_config:
logger.info(f"Enabling BuiltInPlanner with budget {config.thinking_config.thinking_budget}")
t_config = types.ThinkingConfig(
include_thoughts=config.thinking_config.include_thoughts,
thinking_budget=config.thinking_config.thinking_budget
)
planner = BuiltInPlanner(thinking_config=t_config)
# 5. Generation Config
gen_config = None
if config.generation_config:
g_params = {}
if config.generation_config.temperature is not None:
g_params["temperature"] = config.generation_config.temperature
if config.generation_config.max_output_tokens is not None:
g_params["max_output_tokens"] = config.generation_config.max_output_tokens
if config.generation_config.top_p is not None:
g_params["top_p"] = config.generation_config.top_p
if g_params:
gen_config = types.GenerateContentConfig(**g_params)
# 6. Assemble LlmAgent
return LlmAgent(
name=config.name,
model=llm,
description=config.description or "",
instruction=config.instruction,
tools=selected_tools,
code_executor=code_executor,
planner=planner,
generate_content_config=gen_config
)