👨‍💻 dev LLM to skill

development

fukurou

the supreme coder
ADMIN
Python:
import requests
import json
import threading
import time

# Initialize conversation history
conversation_history = []

# Global variables for async operation
is_working = False
current_reply = ""


def talk_to_waifu(prompt, history):
    global is_working, current_reply

    # Build the full prompt with conversation history
    full_prompt = "This is a conversation with Potatoe, a loving waifubot:\n\n"

    # Add previous conversation history
    for message in history[-6:]:  # Keep last 6 messages for context
        full_prompt += f"{message}\n"

    # Add current prompt
    full_prompt += f"Human: {prompt}\nPotatoe:"

    response = requests.post(
        "http://localhost:11434/api/generate",
        json={"model": "llama3", "prompt": full_prompt},
        stream=True
    )

    full_reply = ""
    for line in response.iter_lines():
        if line:
            try:
                chunk = line.decode("utf-8")
                data = json.loads(chunk)
                full_reply += data.get("response", "")
            except Exception as e:
                print("Error decoding chunk:", e)

    current_reply = (prompt, full_reply)  # Store both input and reply
    is_working = False
    return full_reply


def start_waifu_conversation(prompt):
    """Start the waifu conversation in a daemon thread"""
    global is_working
    is_working = True
    thread = threading.Thread(
        target=talk_to_waifu,
        args=(user_input, conversation_history),
        daemon=True
    )
    thread.start()


print("Waifu: Hello darling~ Ready to chat? Type 'exit' to leave 💕")

# Initial system prompt to set up the character
initial_prompt = "Your name is Potatoe. You're affectionate, playful, and always supportive."
conversation_history.append(f"System: {initial_prompt}")

while True:
    if is_working:
        print("Waifu: Thinking... 💭")
        time.sleep(0.5)
        continue

    if current_reply:
        user_input, reply = current_reply
        print(f"Waifu: {reply}")
        # Add both user input and bot response to history
        conversation_history.append(f"Human: {user_input}")
        conversation_history.append(f"Potatoe: {reply}")

        # Optional: Limit history size to prevent it from growing too large
        if len(conversation_history) > 20:  # Keep last 20 messages
            conversation_history = conversation_history[-20:]

        current_reply = ""
        continue

    user_input = input("You: ")
    if user_input.lower() in ["exit", "quit"]:
        print("Waifu: Bye bye~ I'll miss you! 💖")
        break

    # Clean wrapper function call
    start_waifu_conversation(user_input)
 

fukurou

the supreme coder
ADMIN
Python:
import requests
import json
import threading
import re

from LivinGrimoirePacket.LivinGrimoire import Skill

# Initialize conversation history
conversation_history = []

# Global variables for async operation
is_working = False
current_reply = ""


def talk_to_waifu(prompt, history):
    global is_working, current_reply

    # Build the full prompt with conversation history
    full_prompt = "This is a conversation with Pomni, a loving waifubot:\n\n"

    # Add previous conversation history
    for message in history[-6:]:  # Keep last 6 messages for context
        full_prompt += f"{message}\n"

    # Add current prompt
    full_prompt += f"Human: {prompt}\npomni:"

    response = requests.post(
        "http://localhost:11434/api/generate",
        json={"model": "llama3", "prompt": full_prompt},
        stream=True
    )

    full_reply = ""
    for line in response.iter_lines():
        if line:
            try:
                chunk = line.decode("utf-8")
                data = json.loads(chunk)
                full_reply += data.get("response", "")
            except Exception as e:
                print("Error decoding chunk:", e)

    current_reply = (prompt, full_reply)  # Store both input and reply
    is_working = False
    return full_reply


def start_waifu_conversation(user_input):
    """Start the waifu conversation in a daemon thread"""
    global is_working
    is_working = True
    thread = threading.Thread(
        target=talk_to_waifu,
        args=(user_input, conversation_history),
        daemon=True
    )
    thread.start()

class DiLLMOver(Skill):
    def __init__(self):
        super().__init__()
        initial_prompt = "Your name is Pomni. directive: nurse and protect."
        conversation_history.append(f"System: {initial_prompt}")

    # Override
    def input(self, ear: str, skin: str, eye: str):
        global current_reply
        global conversation_history
        #  thinking? return
        if is_working:
            return
        # reply ready? say it and clear params for next usage
        if current_reply:
            user_input, reply = current_reply
            self.setSimpleAlg(self.sanitize_string(reply))
            # Add both user input and bot response to history
            conversation_history.append(f"Human: {user_input}")
            conversation_history.append(f"Pomni: {reply}")

            # Optional: Limit history size to prevent it from growing too large
            if len(conversation_history) > 20:  # Keep last 20 messages
                conversation_history = conversation_history[-20:]

            current_reply = ""
            return

        # Clean wrapper function call
        if ear.endswith("over"):
            start_waifu_conversation(ear)

    @staticmethod
    def sanitize_string(text: str) -> str:
        """
        Cleans a string for TTS use:
        - Removes special characters (punctuation, symbols, emojis)
        - Keeps letters, numbers, and spaces
        - Converts to lowercase
        """
        # Remove everything except letters, numbers, and spaces
        cleaned = re.sub(r'[^a-zA-Z0-9\s]', '', text)
        return cleaned.lower()

    def skillNotes(self, param: str) -> str:
        if param == "notes":
            return "plain hello world skill"
        elif param == "triggers":
            return "say hello"
        return "note unavalible"
 
Top