Skip to content

Ymn dev #114

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import yaml
from pocketflow import Node, BatchNode
from utils.crawl_github_files import crawl_github_files
from utils.call_llm import call_llm
from utils.call_llm_openai import call_llm
from utils.crawl_local_files import crawl_local_files


Expand Down
1 change: 1 addition & 0 deletions utils/call_llm.py → utils/call_llm_gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,3 +233,4 @@ def call_llm(prompt: str, use_cache: bool = True) -> str:
print("Making call...")
response1 = call_llm(test_prompt, use_cache=False)
print(f"Response: {response1}")

96 changes: 96 additions & 0 deletions utils/call_llm_openai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import os
import logging
import json
from datetime import datetime
from openai import OpenAI

# Configure logging
def setup_logger():
log_directory = os.getenv("LOG_DIR", "logs")
os.makedirs(log_directory, exist_ok=True)
log_file = os.path.join(
log_directory,
f"llm_calls_{datetime.now().strftime('%Y%m%d')}.log"
)

logger = logging.getLogger("llm_logger")
logger.setLevel(logging.INFO)
logger.propagate = False # Don't propagate to root logger

file_handler = logging.FileHandler(log_file, encoding="utf-8")
file_handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.addHandler(file_handler)
return logger

logger = setup_logger()
cache_file = os.getenv("LLM_CACHE_FILE", "llm_cache.json")

# Wrapper for calling OpenAI o-series models with optional caching
def call_llm(prompt: str, use_cache: bool = True) -> str:
"""
Send a prompt to the OpenAI thinking model (default: o4-mini) with chain-of-thought enabled.
Caches responses in llm_cache.json by default.

Args:
prompt (str): The input prompt for the LLM.
use_cache (bool): Whether to read/write from cache.

Returns:
str: The model's response text.
"""
logger.info(f"PROMPT: {prompt}")

# Load or check cache
if use_cache and os.path.exists(cache_file):
try:
with open(cache_file, "r", encoding="utf-8") as f:
cache = json.load(f)
if prompt in cache:
logger.info("CACHE HIT")
logger.info(f"RESPONSE: {cache[prompt]}")
return cache[prompt]
except Exception:
logger.warning("Failed to load cache; continuing without cache")

# Initialize OpenAI client
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", ""))
model_name = os.getenv("OPENAI_MODEL", "o4-mini")

# Send chat completion request
response = client.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
max_completion_tokens=4000,
reasoning_effort="medium",
store=False
)
response_text = response.choices[0].message.content

logger.info(f"RESPONSE: {response_text}")

# Write to cache
if use_cache:
try:
cache = {}
if os.path.exists(cache_file):
with open(cache_file, "r", encoding="utf-8") as f:
cache = json.load(f)
except Exception:
cache = {}

cache[prompt] = response_text
try:
with open(cache_file, "w", encoding="utf-8") as f:
json.dump(cache, f, ensure_ascii=False, indent=2)
except Exception as e:
logger.error(f"Failed to save cache: {e}")

return response_text

if __name__ == "__main__":
test_prompt = "Hello, how are you?"
print("Making call...")
response = call_llm(test_prompt, use_cache=False)
print(f"Response: {response}")