Forgot to commit in small portions again

This commit is contained in:
2025-04-24 11:22:21 -04:00
parent 94084084f3
commit ef80aa825d
11 changed files with 679 additions and 3 deletions

View File

@ -0,0 +1,15 @@
import json
class AllianceSelectionAssistant:
"""Assists in alliance selection using LLM and team data."""
def __init__(self, llm_model):
self.llm_model = llm_model
def generate_pick_list(self, teams_data, strategy):
teams_data_str = json.dumps(
teams_data
) # Serialize the list of dictionaries to a string
prompt = f"Given the following teams data: {teams_data_str} and pick strategy: {pick_strategy}, generate an alliance pick list."
return self.query_ollama(prompt)

View File

@ -0,0 +1,32 @@
import json
import requests
class OLLAMAConnector:
"""
Abstract class to interact with a local LLM using Ollama for predictions.
"""
def __init__(
self, model_name: str, ollama_base_url: str = "http://localhost:11434"
):
self.model_name = model_name
self.ollama_base_url = ollama_base_url
def query_ollama(self, prompt: str):
"""
Helper function to query the Ollama API.
"""
url = f"{self.ollama_base_url}/api/generate"
data = {
"prompt": prompt,
"model": self.model_name,
"stream": False, # Set to False to get the full response at once
}
try:
response = requests.post(url, json=data, stream=False)
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
return response.json()["response"]
except requests.exceptions.RequestException as e:
print(f"Error querying Ollama: {e}")
return None

View File

@ -0,0 +1,9 @@
class MatchPredictor:
"""Predicts match outcomes using LLM."""
def __init__(self, llm_model):
self.llm_model = llm_model
def predict_outcome(self, blue_alliance_data, red_alliance_data):
prompt = f"Given blue alliance data: {blue_alliance_data} and red alliance data: {red_alliance_data}, predict the match outcome."
return self.query_ollama(prompt)

View File

@ -0,0 +1,17 @@
class TeamRatingGenerator:
"""Generates subjective team ratings using LLM predictions"""
def __init__(self, llm_model):
self.llm_model = llm_model
def rate_team(
self,
perfomance_metrics: dict,
raw_event_data: dict,
isa_data: dict,
isa_notes: dict,
) -> str | None:
"""Rates a team based on available data"""
return self.llm_model.query_ollama(
prompt=f"The following First Robotics Competition (FRC), data comes from three different sources covering the exact same team and event, please cross reference them to identify possible problems. ```{perfomance_metrics}```, ```{raw_event_data}```, ```{isa_data}``` Do note that while the data source is the same for all three, the presentation of the data doesn't match up perfectly. I also have the following notes about the team in the data: ```{isa_notes}```Once you've done so, please use the data you have collected and referenced to give a comprehensive subjective rating to the team. This is not an interactive conversation, so please give an output that covers everything that you think the user may want in a single message including examples to support the conclusions. THE DATA ONLY CONTAINS ONE TEAM, OUTPUT MUST BE IN HTML FORMAT."
)