Development Environment Setup
A clean, reproducible development environment is the foundation of a successful agent project. This lesson walks through setting up a professional Python project for AI agent development — with proper dependency management, secrets handling, and local testing infrastructure.
Project Structure
Start with a clear directory structure:
my-agent/
├── .env # Local secrets (NEVER commit)
├── .env.example # Template with placeholder values (commit this)
├── .gitignore
├── pyproject.toml # Project metadata and dependencies
├── README.md
├── agent/
│ ├── __init__.py
│ ├── core.py # AgentExecutor and main loop
│ ├── config.py # Settings loaded from environment
│ ├── prompts.py # System prompts and prompt templates
│ └── tools/
│ ├── __init__.py
│ ├── github.py # GitHub API tools
│ ├── search.py # Web search tools
│ └── database.py # Database tools
├── tests/
│ ├── __init__.py
│ ├── conftest.py # Pytest fixtures
│ ├── unit/
│ │ └── test_tools.py
│ └── integration/
│ └── test_agent.py
├── scripts/
│ └── run_local.py # Local development runner
└── docker/
├── Dockerfile
└── docker-compose.yml
Dependency Management with uv
uv is the modern Python package manager — significantly faster than pip and supports lockfiles:
# Install uv
curl -LsSf https://astral.sh/uv/install.sh | sh
# Create project
uv init my-agent
cd my-agent
# Add dependencies
uv add langchain langchain-openai langchain-anthropic
uv add langchain-community chromadb
uv add pydantic python-dotenv httpx
uv add fastapi uvicorn # If building an API
# Add dev dependencies
uv add --dev pytest pytest-asyncio pytest-mock
uv add --dev black ruff mypy
uv add --dev ipykernel # For Jupyter notebooks during development
# This creates pyproject.toml and uv.lock (commit both)
Configuration Management
# agent/config.py
from pydantic_settings import BaseSettings, SettingsConfigDict
from pydantic import Field, SecretStr
from typing import Optional
class AgentSettings(BaseSettings):
"""
All configuration loaded from environment variables.
pydantic-settings automatically reads from .env files and environment.
SecretStr prevents values from appearing in logs or repr().
"""
model_config = SettingsConfigDict(
env_file=".env",
env_file_encoding="utf-8",
case_sensitive=False,
)
# LLM Provider
openai_api_key: SecretStr = Field(description="OpenAI API key")
anthropic_api_key: Optional[SecretStr] = Field(None, description="Anthropic API key (optional)")
default_model: str = Field(default="gpt-4o-mini", description="Default LLM model to use")
# GitHub Integration
github_token: Optional[SecretStr] = Field(None, description="GitHub personal access token")
github_repo: Optional[str] = Field(None, description="Target GitHub repo (owner/repo)")
# Vector Store
chroma_persist_dir: str = Field(default="./.chroma_db", description="ChromaDB persistence directory")
# Agent Behavior
max_iterations: int = Field(default=10, ge=1, le=50, description="Max agent reasoning steps")
verbose: bool = Field(default=False, description="Enable verbose agent output")
# Database
database_url: Optional[str] = Field(None, description="PostgreSQL connection string")
# Singleton instance — import this everywhere
settings = AgentSettings()
# .env (never commit this file)
OPENAI_API_KEY=sk-proj-your-actual-key-here
ANTHROPIC_API_KEY=sk-ant-your-key-here
GITHUB_TOKEN=ghp_your-token-here
GITHUB_REPO=myorg/my-repo
DEFAULT_MODEL=gpt-4o-mini
MAX_ITERATIONS=10
VERBOSE=false
DATABASE_URL=postgresql://localhost:5432/agent_dev
# .env.example (commit this as a template)
OPENAI_API_KEY=sk-proj-your-key-here
ANTHROPIC_API_KEY=sk-ant-your-key-here
GITHUB_TOKEN=ghp_your-token-here
GITHUB_REPO=owner/repo-name
DEFAULT_MODEL=gpt-4o-mini
MAX_ITERATIONS=10
VERBOSE=false
DATABASE_URL=postgresql://localhost:5432/agent_dev
Setting Up Testing Infrastructure
# tests/conftest.py
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
from agent.config import AgentSettings
@pytest.fixture
def mock_settings() -> AgentSettings:
"""Settings with test values — no real API keys needed."""
with patch.dict("os.environ", {
"OPENAI_API_KEY": "sk-test-fake-key-for-testing",
"GITHUB_TOKEN": "ghp-test-fake-token",
"GITHUB_REPO": "testowner/testrepo",
"DEFAULT_MODEL": "gpt-4o-mini",
}):
yield AgentSettings()
@pytest.fixture
def mock_llm():
"""Mock LLM that returns predefined responses."""
mock = MagicMock()
mock.invoke.return_value = MagicMock(content="Mocked LLM response")
return mock
@pytest.fixture
def mock_openai_client():
"""Mock OpenAI client for testing without API calls."""
with patch("openai.OpenAI") as mock_client:
mock_completion = MagicMock()
mock_completion.choices[0].message.content = "Test response"
mock_completion.usage.prompt_tokens = 100
mock_completion.usage.completion_tokens = 50
mock_client.return_value.chat.completions.create.return_value = mock_completion
yield mock_client.return_value
Git Configuration
# .gitignore
.env
.env.local
*.env
# Python
__pycache__/
*.pyc
.venv/
dist/
# AI/ML
.chroma_db/
*.pkl
models/
# Development
.pytest_cache/
.mypy_cache/
.ruff_cache/
*.log
Local Development Runner
# scripts/run_local.py
"""Local development runner with hot reload and debug output."""
import asyncio
import sys
from pathlib import Path
# Add project root to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from agent.config import settings
from agent.core import create_agent, run_agent
async def main():
print(f"Starting agent with model: {settings.default_model}")
print(f"Max iterations: {settings.max_iterations}")
print(f"Verbose: {settings.verbose}\n")
agent = create_agent()
while True:
try:
user_input = input("You: ").strip()
if not user_input:
continue
if user_input.lower() in ("exit", "quit", "q"):
print("Goodbye!")
break
response = await run_agent(agent, user_input)
print(f"\nAgent: {response}\n")
except KeyboardInterrupt:
print("\nInterrupted. Goodbye!")
break
if __name__ == "__main__":
asyncio.run(main())
With this environment in place, you're ready to build the actual agent logic in the next lesson. The time invested in setup now pays dividends throughout the project — clean configuration, testable architecture, and reproducible environments from the start.