LLM Integrations¶
Aegis provides native integrations with popular LLM providers for automatic PII protection.
OpenAI Integration¶
Installation¶
AegisOpenAI - Drop-in Replacement¶
from aegis_sdk import AegisOpenAI
# Drop-in replacement for OpenAI client
client = AegisOpenAI(api_key="sk-...")
# All requests automatically have PII masked
response = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "user", "content": "Customer email: [email protected]"}
]
)
# OpenAI only sees: "Customer email: j***@example.com"
Streaming Support¶
# Streaming works the same way
stream = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": user_input}],
stream=True
)
for chunk in stream:
print(chunk.choices[0].delta.content or "", end="")
Function Calling¶
# Function arguments are also checked
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Search for user [email protected]"}],
tools=[{
"type": "function",
"function": {
"name": "search_user",
"parameters": {...}
}
}]
)
Anthropic Integration¶
Installation¶
AegisAnthropic - Drop-in Replacement¶
from aegis_sdk import AegisAnthropic
client = AegisAnthropic(api_key="...")
message = client.messages.create(
model="claude-3-opus-20240229",
max_tokens=1024,
messages=[
{"role": "user", "content": "Customer SSN: 123-45-6789"}
]
)
# Claude only sees: "Customer SSN: XXX-XX-6789"
Generic LLM Gateway¶
For any LLM provider, use the AegisLLMGateway:
from aegis_sdk import AegisLLMGateway
gateway = AegisLLMGateway(destination="AI_TOOL")
# Mask a single prompt
masked_prompt = gateway.mask_prompt("My email is [email protected]")
# Returns: "My email is j***@example.com"
# Mask a message list
messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "My SSN is 123-45-6789"}
]
masked_messages = gateway.mask_messages(messages)
# Returns messages with SSN masked
Complete Integration Example¶
from aegis_sdk import AegisLLMGateway
import anthropic
gateway = AegisLLMGateway(destination="AI_TOOL")
client = anthropic.Anthropic()
def safe_chat(user_message: str) -> str:
# Mask PII before sending
masked = gateway.mask_prompt(user_message)
response = client.messages.create(
model="claude-3-sonnet-20240229",
max_tokens=1024,
messages=[{"role": "user", "content": masked}]
)
return response.content[0].text
Manual Integration Pattern¶
For complete control, use the core Aegis class:
from aegis_sdk import Aegis, Decision
import openai
aegis = Aegis()
client = openai.OpenAI()
def protected_chat(user_input: str) -> str:
# Process with full policy evaluation
result = aegis.process(user_input, destination="AI_TOOL")
# Handle policy decisions
if result.decision == Decision.BLOCKED:
return f"Request blocked: {result.summary}"
# Use masked content
safe_input = result.masked_content if result.masked_content else user_input
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": safe_input}]
)
return response.choices[0].message.content
LangChain Integration¶
Installation¶
Using with LangChain¶
from aegis_sdk import Aegis
from langchain_openai import ChatOpenAI
from langchain.schema import HumanMessage
aegis = Aegis()
def safe_langchain_call(user_input: str):
# Process input
result = aegis.process(user_input, destination="AI_TOOL")
if result.is_blocked:
raise ValueError(result.summary)
# Use with LangChain
llm = ChatOpenAI(model="gpt-4")
response = llm.invoke([
HumanMessage(content=result.masked_content or user_input)
])
return response.content
RAG Pipeline Protection¶
from aegis_sdk import Aegis
from langchain.vectorstores import Chroma
aegis = Aegis()
def safe_rag_query(query: str, vectorstore: Chroma) -> str:
# Check query for PII
query_result = aegis.process(query, destination="AI_TOOL")
if query_result.is_blocked:
return "Query contains blocked content"
safe_query = query_result.masked_content or query
# Retrieve documents
docs = vectorstore.similarity_search(safe_query, k=3)
# Check retrieved content
context = "\n".join([d.page_content for d in docs])
context_result = aegis.process(context, destination="AI_TOOL")
if context_result.is_blocked:
return "Retrieved documents contain blocked content"
safe_context = context_result.masked_content or context
# Generate with safe content
return generate_response(safe_query, safe_context)
Batch Processing¶
For bulk LLM calls:
from aegis_sdk import Aegis, Decision
aegis = Aegis()
def process_batch(prompts: list[str]) -> list[str]:
results = []
for prompt in prompts:
result = aegis.process(prompt, destination="AI_TOOL")
if result.decision != Decision.BLOCKED:
safe_prompt = result.masked_content or prompt
response = llm_client.generate(safe_prompt)
results.append(response)
else:
results.append(f"[BLOCKED: {result.summary}]")
return results
Best Practices¶
1. Check Both Input and Output¶
# Check user input before LLM
input_result = aegis.process(user_input, destination="AI_TOOL")
# Get LLM response
response = llm.generate(input_result.masked_content)
# Check LLM output before returning to user
output_result = aegis.process(response, destination="CUSTOMER")
2. Use Streaming for Large Content¶
from aegis_sdk import StreamingProcessor
processor = StreamingProcessor(chunk_size_mb=1)
# Process large prompts in chunks
result = processor.process_file(
input_path="large_prompt.txt",
output_path="safe_prompt.txt",
destination="AI_TOOL"
)
3. Handle Errors Gracefully¶
from aegis_sdk import AegisError
try:
result = aegis.process(content, destination="AI_TOOL")
except AegisError as e:
logger.error(f"Aegis processing failed: {e}")
# Decide on fallback behavior
result = fallback_check(content)
4. Use Policy Groups for Different Use Cases¶
# Different policies for different LLM use cases
chat_aegis = aegis.with_policy_group("chat")
code_aegis = aegis.with_policy_group("code_assistant")
analytics_aegis = aegis.with_policy_group("analytics")
See Also¶
- SDK Quick Start - Basic SDK usage
- API Reference - Full SDK documentation
- CLI Reference - Command-line usage