Docs
Model Preferences - Python SDK

Model Preferences - Python SDK

Configure AI model preferences with the Python SDK

Model Preferences

Configure which AI models are used for different tasks across your tenant.

Get Model Preferences

from lumnisai import Client
 
client = Client(api_key="your-api-key")
 
# Get current preferences
prefs = client.get_model_preferences(include_defaults=True)
 
for pref in prefs.preferences:
    print(f"{pref.model_type}: {pref.provider}/{pref.model_name}")
    
    if pref.is_default:
        print("  (system default)")
    else:
        print("  (custom)")

Update Model Preferences

from lumnisai import ModelType
 
# Update specific model type
client.update_model_preferences({
    ModelType.SMART_MODEL: {
        "provider": "openai",
        "model_name": "gpt-4.1"
    }
})
 
# Or use string keys
client.update_model_preferences({
    "SMART_MODEL": {
        "provider": "anthropic",
        "model_name": "claude-3-7-sonnet-20250219"
    }
})

Bulk Update Multiple Models

# Update multiple model types at once
preferences = {
    "SMART_MODEL": {
        "provider": "openai",
        "model_name": "gpt-4.1"
    },
    "FAST_MODEL": {
        "provider": "openai",
        "model_name": "gpt-4.1-mini"
    },
    "REASONING_MODEL": {
        "provider": "openai",
        "model_name": "o4-mini"
    }
}
 
updated = client.update_model_preferences(preferences)
 
print("Updated preferences:")
for pref in updated.preferences:
    print(f"  {pref.model_type}: {pref.provider}/{pref.model_name}")

Model Types

Available Model Types

from lumnisai import ModelType
 
# Standard model types
model_types = [
    ModelType.SMART_MODEL,      # High-capability model for complex tasks
    ModelType.FAST_MODEL,       # Fast model for simple tasks
    ModelType.REASONING_MODEL,  # Deep reasoning model (e.g., o1, o3)
    ModelType.VISION_MODEL,     # Vision-capable model
    ModelType.EMBEDDING_MODEL   # Embedding model for semantic search
]
 
# Get preferences for specific types
prefs = client.get_model_preferences()
 
for pref in prefs.preferences:
    if pref.model_type in [ModelType.SMART_MODEL, ModelType.REASONING_MODEL]:
        print(f"{pref.model_type}: {pref.provider}/{pref.model_name}")

Provider Options

Supported providers:

  • openai - OpenAI models (GPT-4, GPT-4.1, o1, o3, etc.)
  • anthropic - Anthropic models (Claude 3.x, Claude 4)
  • google_genai - Google Gemini models
  • azure_openai - Azure-hosted OpenAI models

Using Model Preferences

With Agent Configuration

from lumnisai import Client, AgentConfig
 
client = Client(api_key="your-api-key")
 
# Set tenant-wide preferences
client.update_model_preferences({
    "SMART_MODEL": {
        "provider": "openai",
        "model_name": "gpt-4.1"
    }
})
 
# Override per-request with AgentConfig
agent_config = AgentConfig(
    coordinator_model_name="anthropic:claude-3-7-sonnet-20250219",
    planner_model_name="openai:gpt-4.1",
    orchestrator_model_name="openai:gpt-4.1",
    use_cognitive_tools=True
)
 
response = client.invoke(
    "Complex analysis task",
    agent_config=agent_config,
    user_id="user@example.com"
)

Default Behavior

# Without setting preferences, uses system defaults
response = client.invoke("Simple task")
 
# After setting preferences, uses your configuration
client.update_model_preferences({
    "SMART_MODEL": {
        "provider": "openai",
        "model_name": "gpt-4.1"
    }
})
 
response = client.invoke("Simple task")  # Uses gpt-4.1

Complete Configuration Example

from lumnisai import Client, AgentConfig
import os
 
# Initialize client
client = Client(api_key="your-api-key")
 
# Step 1: Check current preferences
print("Current model preferences:")
current = client.get_model_preferences(include_defaults=True)
 
for pref in current.preferences:
    default_marker = " (default)" if pref.is_default else " (custom)"
    print(f"  {pref.model_type}: {pref.provider}/{pref.model_name}{default_marker}")
 
# Step 2: Update preferences for your use case
print("\nUpdating preferences...")
 
new_preferences = {
    "SMART_MODEL": {
        "provider": "openai",
        "model_name": "gpt-4.1"
    },
    "FAST_MODEL": {
        "provider": "openai",
        "model_name": "gpt-4.1-mini"
    },
    "REASONING_MODEL": {
        "provider": "openai",
        "model_name": "o4-mini"
    }
}
 
updated = client.update_model_preferences(new_preferences)
 
print("✓ Preferences updated")
 
# Step 3: Test with a request
print("\nTesting with new preferences...")
 
response = client.invoke(
    "What are the latest AI trends?",
    user_id="user@example.com"
)
 
print(f"Response generated using configured models")
print(f"Output length: {len(response.output_text)} chars")
 
# Step 4: Override for specific task
print("\nOverriding for reasoning task...")
 
reasoning_config = AgentConfig(
    coordinator_model_name="openai:o4-mini",  # Use reasoning model
    planner_model_name="openai:gpt-4.1",
    use_cognitive_tools=True,
    enable_task_validation=True,
    generate_comprehensive_output=True
)
 
response = client.invoke(
    "Solve this complex problem: ...",
    agent_config=reasoning_config,
    user_id="user@example.com"
)
 
print("✓ Task completed with reasoning model")

Advanced Configuration

Provider-Specific Models

# OpenAI models
client.update_model_preferences({
    "SMART_MODEL": {
        "provider": "openai",
        "model_name": "gpt-4.1"
    }
})
 
# Anthropic Claude
client.update_model_preferences({
    "SMART_MODEL": {
        "provider": "anthropic",
        "model_name": "claude-3-7-sonnet-20250219"
    }
})
 
# Google Gemini
client.update_model_preferences({
    "SMART_MODEL": {
        "provider": "google_genai",
        "model_name": "gemini-2.5-pro"
    }
})
 
# Azure OpenAI
client.update_model_preferences({
    "SMART_MODEL": {
        "provider": "azure_openai",
        "model_name": "gpt-4"
    }
})

Reset to Defaults

# To reset a preference to system default, you can use the resource directly
async with client:
    # Delete custom preference (reverts to default)
    await client.model_preferences.delete("SMART_MODEL")
 
# Or update all to known defaults
default_preferences = {
    "SMART_MODEL": {
        "provider": "openai",
        "model_name": "gpt-4o"
    },
    "FAST_MODEL": {
        "provider": "openai",
        "model_name": "gpt-4o-mini"
    }
}
 
client.update_model_preferences(default_preferences)

Per-Task Model Selection

Using AgentConfig

from lumnisai import Client, AgentConfig
 
client = Client(api_key="your-api-key")
 
# Different models for different agent roles
agent_config = AgentConfig(
    # Coordinator decides which sub-tasks to create
    coordinator_model_name="openai:gpt-4.1",
    
    # Planner creates execution plans
    planner_model_name="openai:gpt-4.1",
    
    # Orchestrator manages execution
    orchestrator_model_name="openai:gpt-4.1",
    
    # Enable advanced capabilities
    use_cognitive_tools=True,
    enable_task_validation=True,
    generate_comprehensive_output=True
)
 
response = client.invoke(
    "Research and analyze the latest AI papers",
    agent_config=agent_config,
    user_id="user@example.com"
)

Reasoning vs Speed

# For complex reasoning tasks
reasoning_config = AgentConfig(
    coordinator_model_name="openai:o4-mini",  # Reasoning model
    planner_model_name="openai:gpt-4.1",      # Smart model
    use_cognitive_tools=True
)
 
# For fast, simple tasks
fast_config = AgentConfig(
    coordinator_model_name="openai:gpt-4.1-mini",  # Fast model
    planner_model_name="openai:gpt-4.1-mini",
    use_cognitive_tools=False
)

Async Model Preferences

from lumnisai import AsyncClient
 
async def configure_models():
    client = AsyncClient(api_key="your-api-key")
    
    async with client:
        # Get current preferences
        current = await client.get_model_preferences()
        
        print("Current preferences:")
        for pref in current.preferences:
            print(f"  {pref.model_type}: {pref.provider}/{pref.model_name}")
        
        # Update preferences
        new_prefs = {
            "SMART_MODEL": {
                "provider": "anthropic",
                "model_name": "claude-3-7-sonnet-20250219"
            }
        }
        
        updated = await client.update_model_preferences(new_prefs)
        
        print("\nUpdated preferences:")
        for pref in updated.preferences:
            print(f"  {pref.model_type}: {pref.provider}/{pref.model_name}")
 
import asyncio
asyncio.run(configure_models())

Best Practices

Set Tenant-Wide Defaults

# Configure once for entire tenant
client.update_model_preferences({
    "SMART_MODEL": {
        "provider": "openai",
        "model_name": "gpt-4.1"
    },
    "FAST_MODEL": {
        "provider": "openai",
        "model_name": "gpt-4.1-mini"
    }
})
 
# All requests now use these defaults
response1 = client.invoke("Task 1", user_id="user1@example.com")
response2 = client.invoke("Task 2", user_id="user2@example.com")

Override When Needed

# Use tenant defaults most of the time
response = client.invoke("Standard task")
 
# Override for special cases
special_config = AgentConfig(
    coordinator_model_name="openai:o4-mini"  # Use reasoning model
)
 
response = client.invoke(
    "Complex reasoning task",
    agent_config=special_config
)

Monitor Model Usage

# Get preferences to see what's being used
prefs = client.get_model_preferences()
 
print("Current model configuration:")
for pref in prefs.preferences:
    if not pref.is_default:
        print(f"  {pref.model_type}: {pref.provider}/{pref.model_name} (custom)")

Model Selection Guidelines

# Fast tasks: use FAST_MODEL
simple_response = client.invoke("What is 2+2?")
 
# Complex analysis: use SMART_MODEL
analysis_response = client.invoke(
    "Analyze this dataset and provide detailed insights"
)
 
# Deep reasoning: use REASONING_MODEL with AgentConfig
reasoning_config = AgentConfig(
    coordinator_model_name="openai:o4-mini"
)
 
reasoning_response = client.invoke(
    "Solve this mathematical proof",
    agent_config=reasoning_config
)