import time
from functools import wraps
def rate_limit(calls: int, period: float):
min_interval = period / calls
last_called = [0.0]
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
elapsed = time.time() - last_called[0]
if elapsed < min_interval:
time.sleep(min_interval - elapsed)
result = func(*args, **kwargs)
last_called[0] = time.time()
return result
return wrapper
return decorator
@rate_limit(calls=3, period=1.0) # 3 calls per second
def rate_limited_ai(state: AgentState) -> AgentState:
return ai(state)'
from textblob import TextBlob
def analyze_sentiment(text: str) -> float:
"""Returns sentiment score between -1 (negative) and 1 (positive)"""
return TextBlob(text).sentiment.polarity
def enhanced_ai(state: AgentState) -> AgentState:
messages = state["messages"]
last_message = messages[-1].content
# Analyze user sentiment
sentiment = analyze_sentiment(last_message)
# Adjust system prompt based on sentiment
base_prompt = "You are a helpful AI assistant."
if sentiment < -0.3:
system_prompt = f"{base_prompt} Please respond with extra empathy and support."
elif sentiment > 0.3:
system_prompt = f"{base_prompt} Match the user's positive energy."
else:
system_prompt = base_prompt
llm = Ollama(base_url="<http://localhost:11434>", model="llama3")
context = f"{system_prompt}\\\\n\\\\nUser: {last_message}"
response = llm.invoke(context)
state["messages"].append(AIMessage(content=response))
state["next"] = "human"
return state
def safe_ai_response(state: AgentState) -> AgentState:
try:
return ai(state)
except Exception as e:
error_message = f"An error occurred: {str(e)}"
state["messages"].append(AIMessage(content=error_message))
state["next"] = "human"
return state
No comments:
Post a Comment