trying to get tooling working

This commit is contained in:
0x01FE 2025-10-02 13:12:46 -05:00
parent 4d71275f12
commit 16d4998d70
4 changed files with 46 additions and 17 deletions

17
main.py
View File

@ -1,11 +1,24 @@
import tts
from textgen import tool_funcs
from textgen import llm
MODEL_PATH = "./textgen/models/mistral-7b-instruct-v0.3.Q4_K_M.gguf"
t = tts.TTS()
save_path = 'audio/weather.wav'
t.create_audio(tool_funcs.get_high_low(), save_path)
l = llm.TextGen(
MODEL_PATH,
2048,
0
)
r = l.chat_completion("What is the weather like today?")
print(r)
t.create_audio(r, save_path)

View File

@ -1,10 +1,11 @@
# Import the Llama class from the llama_cpp library
import llama_cpp
import json
import random
from . import tool_funcs
tools: list[dict] = json.loads(open('tools.json', 'r').read())['tools']
tools: list[dict] = json.loads(open('./textgen/tools.json', 'r').read())['tools']
class TextGen:
@ -19,8 +20,8 @@ class TextGen:
# Provide the path to your downloaded .gguf file
# n_ctx is the maximum context size (number of tokens) the model can handle.
# n_gpu_layers specifies how many layers to offload to the GPU. -1 means offload all possible layers.
llm = llama_cpp.Llama(
model_path="./models/mistral-7b-instruct-v0.2.Q4_K_M.gguf", # Path to your GGUF model
self.llm = llama_cpp.Llama(
model_path=model_path, # Path to your GGUF model
n_ctx=n_ctx, # Context window size
n_gpu_layers=n_gpu_layers, # Offload all layers to GPU. Set to 0 if no GPU.
verbose=False, # Suppress verbose output
@ -52,14 +53,18 @@ class TextGen:
"content": user_message
})
print(tools)
response = self.llm.create_chat_completion(
messages=self.messages,
tools=tools,
tool_choice='auto'
tool_choice='auto',
seed=random.randint(0, 20000000000)
)
tool_call = response['choices'][0]['message'].get('tool_calls')
if not tool_call:
# print(response['choices'][0]['message']['content'])
return response['choices'][0]['message']['content']
call_info = tool_call[0]['function']
@ -69,4 +74,15 @@ class TextGen:
tool_output = tool_funcs.get_high_low()
self.messages.append(response['choices'][0]['message'])
self.messages.append(
{
"role" : "tool",
"content": tool_output
}
)
final_response = self.llm.create_chat_completion(messages=self.messages)
return final_response['choices'][0]['message']['content']

View File

@ -36,10 +36,10 @@ class OpenMeteo:
# Process first location. Add a for-loop for multiple locations or weather models
response = responses[0]
print(f"Coordinates: {response.Latitude()}°N {response.Longitude()}°E")
print(f"Elevation: {response.Elevation()} m asl")
print(f"Timezone: {response.Timezone()}{response.TimezoneAbbreviation()}")
print(f"Timezone difference to GMT+0: {response.UtcOffsetSeconds()}s")
# print(f"Coordinates: {response.Latitude()}°N {response.Longitude()}°E")
# print(f"Elevation: {response.Elevation()} m asl")
# print(f"Timezone: {response.Timezone()}{response.TimezoneAbbreviation()}")
# print(f"Timezone difference to GMT+0: {response.UtcOffsetSeconds()}s")
# Process hourly data. The order of variables needs to be the same as requested.
hourly = response.Hourly()
@ -57,7 +57,7 @@ class OpenMeteo:
hourly_data['rain'] = hourly_rain
hourly_dataframe = pd.DataFrame(data = hourly_data)
print("\nHourly data\n", hourly_dataframe)
# print("\nHourly data\n", hourly_dataframe)
return hourly_dataframe

2
tts.py
View File

@ -14,7 +14,7 @@ class TTS:
def create_audio(self, text: str, save_path: str) -> None:
generator = self.pipeline(text, voice='af_heart')
for i, (gs, ps, audio) in enumerate(generator):
print(i, gs, ps)
# print(i, gs, ps)
display(Audio(data=audio, rate=24000, autoplay=i==0))
sf.write(save_path, audio, 24000)