commit 4d449f533331ef1ca081a863759eff49e8b6dfca Author: 0x01FE Date: Tue Sep 30 12:01:44 2025 -0500 init diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1c847ee --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +.cache.sqlite +__pycache__/ +.vscode/ +latlong + +audio/ +venv/ + diff --git a/install.sh b/install.sh new file mode 100755 index 0000000..986f09e --- /dev/null +++ b/install.sh @@ -0,0 +1 @@ +apt-get -y install espeak-ng diff --git a/main.py b/main.py new file mode 100644 index 0000000..805d9f0 --- /dev/null +++ b/main.py @@ -0,0 +1,11 @@ +import tts +from textgen import tool_funcs + +t = tts.TTS() + +save_path = 'audio/weather.wav' + +t.create_audio(tool_funcs.get_high_low(), save_path) + + + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..dc09bdb --- /dev/null +++ b/requirements.txt @@ -0,0 +1,9 @@ +kokoro>=0.9.4 +soundfile +misaki[en] + +openmeteo-requests +requests-cache +retry-requests +numpy +pandas diff --git a/textgen/__init__.py b/textgen/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/textgen/llm.py b/textgen/llm.py new file mode 100644 index 0000000..291f6f7 --- /dev/null +++ b/textgen/llm.py @@ -0,0 +1,40 @@ +# Import the Llama class from the llama_cpp library +import llama_cpp +import json + +tools = json.loads(open('tools.json', 'r').read())['tools'] + +class TextGen: + + llm: llama_cpp.Llama + + def __init__(self, model_path: str, n_ctx: int, n_gpu_layers: int): + # 1. Instantiate the Llama model + # Provide the path to your downloaded .gguf file + # n_ctx is the maximum context size (number of tokens) the model can handle. + # n_gpu_layers specifies how many layers to offload to the GPU. -1 means offload all possible layers. + llm = llama_cpp.Llama( + model_path="./models/mistral-7b-instruct-v0.2.Q4_K_M.gguf", # Path to your GGUF model + n_ctx=n_ctx, # Context window size + n_gpu_layers=n_gpu_layers, # Offload all layers to GPU. Set to 0 if no GPU. + verbose=False # Suppress verbose output + ) + + def generate(self, prompt: str) -> str: + # 3. Generate text + # The llm object is callable. Pass the prompt to it. + # max_tokens is the maximum number of tokens to generate. + # stop is a list of strings that will stop the generation when encountered. + # echo=True will include your prompt in the output. + output = self.llm( + prompt, + max_tokens=200, + echo=True + ) + + # 4. Print the result + # The output is a dictionary. The generated text is in 'choices'[0]['text']. + text = output['choices'][0]['text'] + print(text) + + return text diff --git a/textgen/tool_funcs.py b/textgen/tool_funcs.py new file mode 100644 index 0000000..c086385 --- /dev/null +++ b/textgen/tool_funcs.py @@ -0,0 +1,12 @@ +from . import weather + +wapi = weather.OpenMeteo() # Weather API + +def get_high_low() -> str: + + weather_dataframe = wapi.get_weather() + + high = round(weather_dataframe.max(axis=0)['temperature_2m']) + low = round(weather_dataframe.min(axis=0)['temperature_2m']) + + return f'The high today is {high}, and the low is {low}.' diff --git a/textgen/tools.json b/textgen/tools.json new file mode 100644 index 0000000..6e5d5c2 --- /dev/null +++ b/textgen/tools.json @@ -0,0 +1,11 @@ +{ + "tools" : [ + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the weather where the user lives" + } + } + ] +} diff --git a/textgen/weather.py b/textgen/weather.py new file mode 100644 index 0000000..8b2022d --- /dev/null +++ b/textgen/weather.py @@ -0,0 +1,64 @@ +import openmeteo_requests + +import pytz +import pandas as pd +import requests_cache +from retry_requests import retry + +# Setup the Open-Meteo API client with cache and retry on error +cache_session = requests_cache.CachedSession('.cache', expire_after = 3600) +retry_session = retry(cache_session, retries = 5, backoff_factor = 0.2) +URL = "https://api.open-meteo.com/v1/forecast" + + +class OpenMeteo: + + openmeteo: openmeteo_requests.Client + + def __init__(self): + self.openmeteo = openmeteo_requests.Client(session = retry_session) + + def get_weather(self) -> pd.DataFrame: + params = { + "latitude": 36.0626, + "longitude": -94.1574, + "hourly": ["temperature_2m", "rain"], + "timezone": "America/Chicago", + # "forecast_days": 1, + "start_date": "2025-09-30", + "end_date": "2025-09-30", + "temperature_unit": 'fahrenheit', + # "bounding_box": "-90,-180,90,180", + # "models": "dwd_icon_global" + } + + responses = self.openmeteo.weather_api(URL, params=params) + + # Process first location. Add a for-loop for multiple locations or weather models + response = responses[0] + print(f"Coordinates: {response.Latitude()}°N {response.Longitude()}°E") + print(f"Elevation: {response.Elevation()} m asl") + print(f"Timezone: {response.Timezone()}{response.TimezoneAbbreviation()}") + print(f"Timezone difference to GMT+0: {response.UtcOffsetSeconds()}s") + + # Process hourly data. The order of variables needs to be the same as requested. + hourly = response.Hourly() + hourly_temperature_2m = hourly.Variables(0).ValuesAsNumpy() + hourly_rain = hourly.Variables(1).ValuesAsNumpy() + + hourly_data = {"date": pd.date_range( + start = pd.to_datetime(hourly.Time(), unit = "s", utc = True).astimezone(pytz.timezone('US/Central')), + end = pd.to_datetime(hourly.TimeEnd(), unit = "s", utc = True).astimezone(pytz.timezone('US/Central')), + freq = pd.Timedelta(seconds = hourly.Interval()), + inclusive = "left" + )} + + hourly_data["temperature_2m"] = hourly_temperature_2m + hourly_data['rain'] = hourly_rain + + hourly_dataframe = pd.DataFrame(data = hourly_data) + print("\nHourly data\n", hourly_dataframe) + + return hourly_dataframe + + diff --git a/tts.py b/tts.py new file mode 100644 index 0000000..12c5009 --- /dev/null +++ b/tts.py @@ -0,0 +1,20 @@ +from kokoro import KPipeline +from IPython.display import display, Audio +import soundfile as sf +import torch + +class TTS: + + pipeline = KPipeline(lang_code='a') + + def __init__(self): + pass + + # Should be saved as .wav + def create_audio(self, text: str, save_path: str) -> None: + generator = self.pipeline(text, voice='af_heart') + for i, (gs, ps, audio) in enumerate(generator): + print(i, gs, ps) + display(Audio(data=audio, rate=24000, autoplay=i==0)) + sf.write(save_path, audio, 24000) +