From ef80aa825d459cea026226f32784225ba60f20c8 Mon Sep 17 00:00:00 2001 From: Moonlit Jolteon Date: Thu, 24 Apr 2025 11:22:21 -0400 Subject: [PATCH] Forgot to commit in small portions again --- .gitignore | 4 +- README.md | 44 +++ classes.puml | 101 +++++++ config.json.example | 4 +- llm_integration/alliance_selection.py | 15 + llm_integration/llm_model.py | 32 +++ llm_integration/match_outcome_prediction.py | 9 + llm_integration/team_subjective_rating.py | 17 ++ main.py | 154 +++++++++++ requirements.txt | 12 + templates/index.html | 290 ++++++++++++++++++++ 11 files changed, 679 insertions(+), 3 deletions(-) create mode 100644 README.md create mode 100644 classes.puml create mode 100644 llm_integration/alliance_selection.py create mode 100644 llm_integration/llm_model.py create mode 100644 llm_integration/match_outcome_prediction.py create mode 100644 llm_integration/team_subjective_rating.py create mode 100644 main.py create mode 100644 requirements.txt create mode 100644 templates/index.html diff --git a/.gitignore b/.gitignore index cf74395..1b3a97a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ config.json -*/**/__pycache__ \ No newline at end of file +*/**/__pycache__ +.venv +__main__ \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..dd91247 --- /dev/null +++ b/README.md @@ -0,0 +1,44 @@ +## Installation and Setup + +1. **Prerequisites:** + * Python 3.8+ + * [Ollama](https://ollama.com/) + +2. **Clone the Repository:** (If you have not already, skip if you have the file contents) + +3. **Create a Virtual Environment:** + + ``` + python -m venv .venv + source venv/bin/activate # On Windows use `venv\Scripts\activate` + ``` + +4. **Install Dependencies:** + + ``` + pip install -r requirements.txt + ``` + +5. **Set up Ollama:** + + * Download and install Ollama from [https://ollama.com/](https://ollama.com/). + * Run `ollama pull ` to download the desired language model (e.g., `ollama pull llama2`). + +## Configuration + +1. **Configure:** + * The code uses `config.json.example` as a template. Copy this file to `config.json`. + * Edit `config.json` to provide + * The Blue Alliance (TBA) API key + * Indiana Scouting Alliance (ISA) API key + * Preferred Ollama model + + +## Running the Application + +1. **Run the Main Script:** + + ``` + python main.py + ``` +2. [**Open the page in your browser**](http://localhost:5000) \ No newline at end of file diff --git a/classes.puml b/classes.puml new file mode 100644 index 0000000..ef9389c --- /dev/null +++ b/classes.puml @@ -0,0 +1,101 @@ +@startuml + +' Data Sources +class DataSource { + {abstract} +get_status() : tuple[DataSourceStatus, dict] + {abstract} +get_team_info(team_number: int) + {abstract} +get_event_matches(event_code: str, team_number: int | None = None) + {abstract} +get_team_performance_metrics(team_number, event_code: str | None = None) +} + +enum DataSourceStatus { + CONNECTED + UNAUTHENTICATED + NOT_FOUND +} + +class TheBlueAllianceConnector { + +__init__(api_token: str, year: int = datetime.now().year) + +get_status() : tuple[DataSourceStatus, dict] + +get_team_info(team_number: int) : dict | None + +get_event_matches(event_code: str, team_number: int | None = None) : dict | None + +get_team_performance_metrics(team_number, event_code: str | None = None) : dict | None + -__calculate_auto_performance(performance: dict, match_points: dict, match_record: dict, alliance_data: dict, robot_position: int) : void + -__calculate_teleop_performance(performance: dict, match_points: dict, match_record: dict, alliance_data: dict, robot_position: int) : void + -__calculate_endgame_performance(performance: dict, match_points: dict, match_record: dict, alliance_data: dict, robot_position: int) : void +} + +class IndianaScoutingAllianceConnector { + +__init__(api_token: str, year=datetime.now().year) + +get_status() : tuple[DataSourceStatus, dict] + +get_event_matches(event_code: str, team_number: int | None = None) + +get_robot_notes(team_number: int, event_code: str | None = None) + +get_team_info(team_number: int) + +get_team_performance_metrics(team_number, event_code: str | None = None) + -__build_ISA_robot_url(include_flags: str, teams: list = [], event_key: str = "") : str + -__build_ISA_human_url(include_flags: str, teams: list = [], event_key: str = "") : str +} + +DataSourceStatus <|.. DataSource +DataSource <|-- TheBlueAllianceConnector +DataSource <|-- IndianaScoutingAllianceConnector + +' LLM Integration +class AllianceSelectionAssistant { + +__init__(llm: OLLAMAConnector) + +select_alliance(teams: list, criteria: dict) : list +} + +class OLLAMAConnector { + +__init__(model_name: str) + +generate_text(prompt: str) : str +} + +class MatchPredictor { + +__init__(llm: OLLAMAConnector) + +predict_outcome(match_data: dict) : str +} + +class TeamRatingGenerator { + +__init__(llm: OLLAMAConnector) + +rate_team(team_data: dict) : str +} + +' Utils +class ConfigurationManager { + +get_config(key: str) : any + +set_config(key: str, value: any) +} + +class Logger { + +__init__(name: str) + +log(message: str, level: str) + +info(message: str) + +warning(message: str) + +error(message: str) + +debug(message: str) +} + +' Main +class FRCRatingApp { + +__init__() + +setup_routes() : void + +index() + +team_info(team_number: int) + +run(debug: bool = True) : void +} + +' Relationships +AllianceSelectionAssistant --> OLLAMAConnector : Has +MatchPredictor --> OLLAMAConnector : Has +TeamRatingGenerator --> OLLAMAConnector : Has + +FRCRatingApp --> ConfigurationManager : Uses +FRCRatingApp --> Logger : Uses +FRCRatingApp --> TheBlueAllianceConnector : Uses +FRCRatingApp --> IndianaScoutingAllianceConnector : Uses +FRCRatingApp --> AllianceSelectionAssistant : Uses +FRCRatingApp --> MatchPredictor : Uses +FRCRatingApp --> TeamRatingGenerator : Uses + +@enduml \ No newline at end of file diff --git a/config.json.example b/config.json.example index e68b79f..8d89bb1 100644 --- a/config.json.example +++ b/config.json.example @@ -1,6 +1,6 @@ { "TBA_TOKEN": "Get your read API token here: https://www.thebluealliance.com/account", "USE_ISA_DATA": false, - "ISA_TOKEN": "If you are a member of the Indiana Scouting Alliance, put your token here. Reach out to the discord if you don't know how to get it", - "OLLAMA_MODEL": "dolphin-mixtral:latest" + "ISA_TOKEN": "If you are a member of the Indiana Scouting Alliance,put your token here. Reach out to the discord if you don't know how to get it", + "OLLAMA_MODEL": "llama2-uncensored:7b-chat-q2_K" } \ No newline at end of file diff --git a/llm_integration/alliance_selection.py b/llm_integration/alliance_selection.py new file mode 100644 index 0000000..e7bc269 --- /dev/null +++ b/llm_integration/alliance_selection.py @@ -0,0 +1,15 @@ +import json + + +class AllianceSelectionAssistant: + """Assists in alliance selection using LLM and team data.""" + + def __init__(self, llm_model): + self.llm_model = llm_model + + def generate_pick_list(self, teams_data, strategy): + teams_data_str = json.dumps( + teams_data + ) # Serialize the list of dictionaries to a string + prompt = f"Given the following teams data: {teams_data_str} and pick strategy: {pick_strategy}, generate an alliance pick list." + return self.query_ollama(prompt) diff --git a/llm_integration/llm_model.py b/llm_integration/llm_model.py new file mode 100644 index 0000000..238e68a --- /dev/null +++ b/llm_integration/llm_model.py @@ -0,0 +1,32 @@ +import json +import requests + + +class OLLAMAConnector: + """ + Abstract class to interact with a local LLM using Ollama for predictions. + """ + + def __init__( + self, model_name: str, ollama_base_url: str = "http://localhost:11434" + ): + self.model_name = model_name + self.ollama_base_url = ollama_base_url + + def query_ollama(self, prompt: str): + """ + Helper function to query the Ollama API. + """ + url = f"{self.ollama_base_url}/api/generate" + data = { + "prompt": prompt, + "model": self.model_name, + "stream": False, # Set to False to get the full response at once + } + try: + response = requests.post(url, json=data, stream=False) + response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx) + return response.json()["response"] + except requests.exceptions.RequestException as e: + print(f"Error querying Ollama: {e}") + return None diff --git a/llm_integration/match_outcome_prediction.py b/llm_integration/match_outcome_prediction.py new file mode 100644 index 0000000..1b47494 --- /dev/null +++ b/llm_integration/match_outcome_prediction.py @@ -0,0 +1,9 @@ +class MatchPredictor: + """Predicts match outcomes using LLM.""" + + def __init__(self, llm_model): + self.llm_model = llm_model + + def predict_outcome(self, blue_alliance_data, red_alliance_data): + prompt = f"Given blue alliance data: {blue_alliance_data} and red alliance data: {red_alliance_data}, predict the match outcome." + return self.query_ollama(prompt) diff --git a/llm_integration/team_subjective_rating.py b/llm_integration/team_subjective_rating.py new file mode 100644 index 0000000..821ea62 --- /dev/null +++ b/llm_integration/team_subjective_rating.py @@ -0,0 +1,17 @@ +class TeamRatingGenerator: + """Generates subjective team ratings using LLM predictions""" + + def __init__(self, llm_model): + self.llm_model = llm_model + + def rate_team( + self, + perfomance_metrics: dict, + raw_event_data: dict, + isa_data: dict, + isa_notes: dict, + ) -> str | None: + """Rates a team based on available data""" + return self.llm_model.query_ollama( + prompt=f"The following First Robotics Competition (FRC), data comes from three different sources covering the exact same team and event, please cross reference them to identify possible problems. ```{perfomance_metrics}```, ```{raw_event_data}```, ```{isa_data}``` Do note that while the data source is the same for all three, the presentation of the data doesn't match up perfectly. I also have the following notes about the team in the data: ```{isa_notes}```Once you've done so, please use the data you have collected and referenced to give a comprehensive subjective rating to the team. This is not an interactive conversation, so please give an output that covers everything that you think the user may want in a single message including examples to support the conclusions. THE DATA ONLY CONTAINS ONE TEAM, OUTPUT MUST BE IN HTML FORMAT." + ) diff --git a/main.py b/main.py new file mode 100644 index 0000000..401f3b9 --- /dev/null +++ b/main.py @@ -0,0 +1,154 @@ +# from flask import Flask, render_template, jsonify + + +# from data_sources.tba import TheBlueAllianceConnector +# from data_sources.isa import IndianaScoutingAllianceConnector +# from llm_integration.llm_model import OLLAMAConnector +# from llm_integration.team_subjective_rating import TeamRatingGenerator +# from llm_integration.match_outcome_prediction import MatchPredictor +# from llm_integration.alliance_selection import AllianceSelectionAssistant +# from utils.config_manager import ConfigurationManager +# from utils.logger import Logger + + +# app = Flask(__name__) + +# config = ConfigurationManager() +# logger = Logger(__name__) + +# # Initialize data sources +# tba_api_key = config.get("TBA_TOKEN") +# tba_connector = TheBlueAllianceConnector(tba_api_key) + +# isa_api_key = config.get("ISA_TOKEN") +# isa_connector = IndianaScoutingAllianceConnector(isa_api_key) + +# # Initialize LLM model +# llm_model = config.get("OLLAMA_MODEL") +# llm = OLLAMAConnector(llm_model) + +# # Initialize prediction and rating services +# team_rater = TeamRatingGenerator(llm) +# match_predictor = MatchPredictor(llm) +# alliance_assistant = AllianceSelectionAssistant(llm) + + +# @app.route("/") +# def index(): +# return render_template("index.html") + + +# @app.route("/team/") +# def team_info(team_number): +# event_code = "2025incmp" +# tba_team_performance_metrics = tba_connector.get_team_performance_metrics( +# team_number, event_code +# ) +# tba_raw_event_data = tba_connector.get_event_matches(event_code, team_number) +# isa_data = isa_connector.get_event_matches(event_code, team_number) +# isa_notes = isa_connector.get_robot_notes(team_number, event_code) + +# if tba_team_performance_metrics: +# # logger.info(f"Team {team_number} Metrics: {tba_team_performance_metrics}") + +# # Generate subjective team rating +# logger.info(f"Generating Team rating...") +# team_rating = team_rater.rate_team( +# tba_team_performance_metrics, tba_raw_event_data, isa_data, isa_notes +# ) +# output = f"Subjective Team Rating: {team_rating}" +# logger.info(output) +# return output + +# else: +# output = ( +# f"Could not retrieve metrics for team {team_number} at event {event_code}" +# ) +# logger.info(output) +# return output + + +# if __name__ == "__main__": +# app.run(debug=True) + + +from flask import Flask, render_template, jsonify + + +from data_sources.tba import TheBlueAllianceConnector +from data_sources.isa import IndianaScoutingAllianceConnector +from llm_integration.llm_model import OLLAMAConnector +from llm_integration.team_subjective_rating import TeamRatingGenerator +from llm_integration.match_outcome_prediction import MatchPredictor +from llm_integration.alliance_selection import AllianceSelectionAssistant +from utils.config_manager import ConfigurationManager +from utils.logger import Logger + + +class FrcRatingApp: + def __init__(self): + self.app = Flask(__name__) + self.config = ConfigurationManager() + self.logger = Logger(__name__) + + # Initialize data sources + tba_api_key = self.config.get("TBA_TOKEN") + self.tba_connector = TheBlueAllianceConnector(tba_api_key) + + isa_api_key = self.config.get("ISA_TOKEN") + self.isa_connector = IndianaScoutingAllianceConnector(isa_api_key) + + # Initialize LLM model + llm_model = self.config.get("OLLAMA_MODEL") + self.llm = OLLAMAConnector(llm_model) + + # Initialize prediction and rating services + self.team_rater = TeamRatingGenerator(self.llm) + self.match_predictor = MatchPredictor(self.llm) + self.alliance_assistant = AllianceSelectionAssistant(self.llm) + + self.setup_routes() + + def setup_routes(self): + self.app.add_url_rule("/", "index", self.index) + self.app.add_url_rule("/team/", "team_info", self.team_info) + + def index(self): + return render_template("index.html") + + def team_info(self, team_number): + event_code = "2025incmp" + tba_team_performance_metrics = self.tba_connector.get_team_performance_metrics( + team_number, event_code + ) + tba_raw_event_data = self.tba_connector.get_event_matches( + event_code, team_number + ) + isa_data = self.isa_connector.get_event_matches(event_code, team_number) + isa_notes = self.isa_connector.get_robot_notes(team_number, event_code) + + if tba_team_performance_metrics: + # Generate subjective team rating + self.logger.info(f"Generating Team rating...") + team_rating = self.team_rater.rate_team( + tba_team_performance_metrics, + tba_raw_event_data, + isa_data, + isa_notes, + ) + output = f"Subjective Team Rating: {team_rating}" + self.logger.info(output) + return output + + else: + output = f"Could not retrieve metrics for team {team_number} at event {event_code}" + self.logger.info(output) + return output + + def run(self, debug=True): + self.app.run(debug=debug) + + +if __name__ == "__main__": + frc_rating_app = FrcRatingApp() + frc_rating_app.run() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..8333eb5 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,12 @@ +blinker==1.9.0 +certifi==2025.1.31 +charset-normalizer==3.4.1 +click==8.1.8 +Flask==3.1.0 +idna==3.10 +itsdangerous==2.2.0 +Jinja2==3.1.6 +MarkupSafe==3.0.2 +requests==2.32.3 +urllib3==2.4.0 +Werkzeug==3.1.3 diff --git a/templates/index.html b/templates/index.html new file mode 100644 index 0000000..58020a3 --- /dev/null +++ b/templates/index.html @@ -0,0 +1,290 @@ + + + + + + Team Info Lookup + + + + + + + + +
+

Lookup FRC Team Info

+
+
+ + +
+ +
+

The following result is generated using an LLM, please note that anything said in the following block of + text does not reflect my own personal views and I do not vouch for it's accuracy.

+ + +
+

I have data about the following teams:

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TeamNameLocation
45TechnoKats Robotics TeamKokomo, Indiana, USA
71Team HammondHammond, Indiana, USA
135Penn Robotics Black KnightsMishawaka, Indiana, USA
234Cyber BlueIndianapolis, Indiana, USA
292PantherTechRussiaville, Indiana, USA
328Penn Robotics Golden RooksMishawaka, Indiana, USA
447Team RobotoAnderson, Indiana, USA
461Westside Boiler InvasionWest Lafayette, Indiana, USA
829The Digital GoatsIndianapolis, Indiana, USA
868TechHOUNDSCarmel, Indiana, USA
1018Pike RoboDevilsIndianapolis, Indiana, USA
1024Kil-A-BytesIndianapolis, Indiana, USA
1501Team THRUSTHuntington, Indiana, USA
1741Red AlertGreenwood, Indiana, USA
1747Harrison Boiler RoboticsWest Lafayette, Indiana, USA
2171RoboDogsCrown Point, Indiana, USA
2197Las PumasNew Carlisle, Indiana, USA
3176Purple PrecisionBrownsburg, Indiana, USA
3487Red Pride RoboticsPlainfield, Indiana, USA
3494The QuadranglesBloomington, Indiana, USA
3940CyberToothKokomo, Indiana, USA
4272Maverick RoboticsLafayette, Indiana, USA
4485Tribe Tech RoboticsDanville, Indiana, USA
4926GalacTechColumbus, Indiana, USA
5010Tiger DynastyFishers, Indiana, USA
5188Area 5188: Classified RoboticsTerre Haute, Indiana, USA
5402Wreckless RoboticsLoganpsport/Walton, Indiana, USA
5484Career Academy Robotics - Wolf PackSouth Bend, Indiana, USA
6721Tindley TrailblazersIndianapolis, Indiana, USA
6956SHAM-ROCK-BOTICS ☘Westfield, Indiana, USA
7454Huskies on HogsEvansville, Indiana, USA
7457suPURDUEper RoboticsIndianapolis, Indiana, USA
7617RoboBlazersCarmel, Indiana, USA
7657ThunderBotsEvansville, Indiana, USA
8103Knight RoboticsKendallville, Indiana, USA
8430The Hatch BatchWashington, Indiana, USA
10021Guerin Catholic Golden GearsNoblesville, Indiana, USA
10332Carroll Charger RoboticsFort Wayne, Indiana, USA
10492Bosse BYTEForceEvansville, Indiana, USA
+
+
+ +
+ + + + + \ No newline at end of file