This commit is contained in:
0x01FE 2025-09-30 12:01:44 -05:00
commit 4d449f5333
10 changed files with 176 additions and 0 deletions

8
.gitignore vendored Normal file
View File

@ -0,0 +1,8 @@
.cache.sqlite
__pycache__/
.vscode/
latlong
audio/
venv/

1
install.sh Executable file
View File

@ -0,0 +1 @@
apt-get -y install espeak-ng

11
main.py Normal file
View File

@ -0,0 +1,11 @@
import tts
from textgen import tool_funcs
t = tts.TTS()
save_path = 'audio/weather.wav'
t.create_audio(tool_funcs.get_high_low(), save_path)

9
requirements.txt Normal file
View File

@ -0,0 +1,9 @@
kokoro>=0.9.4
soundfile
misaki[en]
openmeteo-requests
requests-cache
retry-requests
numpy
pandas

0
textgen/__init__.py Normal file
View File

40
textgen/llm.py Normal file
View File

@ -0,0 +1,40 @@
# Import the Llama class from the llama_cpp library
import llama_cpp
import json
tools = json.loads(open('tools.json', 'r').read())['tools']
class TextGen:
llm: llama_cpp.Llama
def __init__(self, model_path: str, n_ctx: int, n_gpu_layers: int):
# 1. Instantiate the Llama model
# Provide the path to your downloaded .gguf file
# n_ctx is the maximum context size (number of tokens) the model can handle.
# n_gpu_layers specifies how many layers to offload to the GPU. -1 means offload all possible layers.
llm = llama_cpp.Llama(
model_path="./models/mistral-7b-instruct-v0.2.Q4_K_M.gguf", # Path to your GGUF model
n_ctx=n_ctx, # Context window size
n_gpu_layers=n_gpu_layers, # Offload all layers to GPU. Set to 0 if no GPU.
verbose=False # Suppress verbose output
)
def generate(self, prompt: str) -> str:
# 3. Generate text
# The llm object is callable. Pass the prompt to it.
# max_tokens is the maximum number of tokens to generate.
# stop is a list of strings that will stop the generation when encountered.
# echo=True will include your prompt in the output.
output = self.llm(
prompt,
max_tokens=200,
echo=True
)
# 4. Print the result
# The output is a dictionary. The generated text is in 'choices'[0]['text'].
text = output['choices'][0]['text']
print(text)
return text

12
textgen/tool_funcs.py Normal file
View File

@ -0,0 +1,12 @@
from . import weather
wapi = weather.OpenMeteo() # Weather API
def get_high_low() -> str:
weather_dataframe = wapi.get_weather()
high = round(weather_dataframe.max(axis=0)['temperature_2m'])
low = round(weather_dataframe.min(axis=0)['temperature_2m'])
return f'The high today is {high}, and the low is {low}.'

11
textgen/tools.json Normal file
View File

@ -0,0 +1,11 @@
{
"tools" : [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the weather where the user lives"
}
}
]
}

64
textgen/weather.py Normal file
View File

@ -0,0 +1,64 @@
import openmeteo_requests
import pytz
import pandas as pd
import requests_cache
from retry_requests import retry
# Setup the Open-Meteo API client with cache and retry on error
cache_session = requests_cache.CachedSession('.cache', expire_after = 3600)
retry_session = retry(cache_session, retries = 5, backoff_factor = 0.2)
URL = "https://api.open-meteo.com/v1/forecast"
class OpenMeteo:
openmeteo: openmeteo_requests.Client
def __init__(self):
self.openmeteo = openmeteo_requests.Client(session = retry_session)
def get_weather(self) -> pd.DataFrame:
params = {
"latitude": 36.0626,
"longitude": -94.1574,
"hourly": ["temperature_2m", "rain"],
"timezone": "America/Chicago",
# "forecast_days": 1,
"start_date": "2025-09-30",
"end_date": "2025-09-30",
"temperature_unit": 'fahrenheit',
# "bounding_box": "-90,-180,90,180",
# "models": "dwd_icon_global"
}
responses = self.openmeteo.weather_api(URL, params=params)
# Process first location. Add a for-loop for multiple locations or weather models
response = responses[0]
print(f"Coordinates: {response.Latitude()}°N {response.Longitude()}°E")
print(f"Elevation: {response.Elevation()} m asl")
print(f"Timezone: {response.Timezone()}{response.TimezoneAbbreviation()}")
print(f"Timezone difference to GMT+0: {response.UtcOffsetSeconds()}s")
# Process hourly data. The order of variables needs to be the same as requested.
hourly = response.Hourly()
hourly_temperature_2m = hourly.Variables(0).ValuesAsNumpy()
hourly_rain = hourly.Variables(1).ValuesAsNumpy()
hourly_data = {"date": pd.date_range(
start = pd.to_datetime(hourly.Time(), unit = "s", utc = True).astimezone(pytz.timezone('US/Central')),
end = pd.to_datetime(hourly.TimeEnd(), unit = "s", utc = True).astimezone(pytz.timezone('US/Central')),
freq = pd.Timedelta(seconds = hourly.Interval()),
inclusive = "left"
)}
hourly_data["temperature_2m"] = hourly_temperature_2m
hourly_data['rain'] = hourly_rain
hourly_dataframe = pd.DataFrame(data = hourly_data)
print("\nHourly data\n", hourly_dataframe)
return hourly_dataframe

20
tts.py Normal file
View File

@ -0,0 +1,20 @@
from kokoro import KPipeline
from IPython.display import display, Audio
import soundfile as sf
import torch
class TTS:
pipeline = KPipeline(lang_code='a')
def __init__(self):
pass
# Should be saved as .wav
def create_audio(self, text: str, save_path: str) -> None:
generator = self.pipeline(text, voice='af_heart')
for i, (gs, ps, audio) in enumerate(generator):
print(i, gs, ps)
display(Audio(data=audio, rate=24000, autoplay=i==0))
sf.write(save_path, audio, 24000)