venice ai skill dev

fukurou

the supreme coder
ADMIN
Python:
import requests
import json

def call_venice_api(prompt, api_key):
    api_url = "https://api.venice.ai/v1/generate"

    payload = {
        "prompt": prompt,
        "model": "venice-uncensored-1.1",
        "max_tokens": 150
    }

    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {api_key}"
    }

    try:
        response = requests.post(api_url, headers=headers, data=json.dumps(payload))
        response.raise_for_status()  # Raise an exception for 4xx/5xx status codes
        data = response.json()
        return data
    except requests.exceptions.HTTPError as http_err:
        print(f"HTTP error occurred: {http_err}")
    except requests.exceptions.ConnectionError as conn_err:
        print(f"Connection error occurred: {conn_err}")
    except requests.exceptions.Timeout as timeout_err:
        print(f"Timeout error occurred: {timeout_err}")
    except requests.exceptions.RequestException as req_err:
        print(f"An error occurred: {req_err}")

# Example usage
api_key = "your_api_key_here"
prompt = "Tell me a dirty joke."
response_data = call_venice_api(prompt, api_key)
if response_data:
    print("Generated text:", response_data.get("choices", [{}])[0].get("text", ""))
 

fukurou

the supreme coder
ADMIN
Python:
import threading
import requests
import json

# Assume DEEPSEEK_API_KEY is replaced with VENICE_API_KEY
VENICE_API_KEY = "your_venice_api_key_here"

class DaVeniceRun(ShorniSplash):
    def __init__(self):
        super().__init__()
        self.input_text = ""  # Temporary storage for input text
        #  Venice.ai api key (place it in DLC/api_keys/venice_api_key.txt)
        self.apikey: str = VENICE_API_KEY

    def trigger(self, ear: str, skin: str, eye: str) -> bool:
        # Check if the ear string ends with the word "run"
        return ear.strip().endswith("run")

    @staticmethod
    def _async_func(this_cls):
        # Use the stored input text
        input_text = this_cls.input_text
        data = {
            "prompt": input_text,
            "model": "venice-uncensored-1.1",  # Specify the model you want to use
            "max_tokens": 150  # Adjust the number of tokens as needed
        }

        # Call the Venice.ai API (replace with actual API endpoint and logic)
        try:
            response = this_cls.call_venice_api(data, this_cls.apikey)
            this_cls._result = response
        except Exception as e:
            this_cls._result = f"Error calling Venice.ai API: {str(e)}"

    def input(self, ear: str, skin: str, eye: str):
        # Check if the skill should trigger
        if self.trigger(ear, skin, eye):
            # Remove the last word "run" from the ear string
            self.input_text = ear.rsplit(" ", 1)[0].strip()

            # Start the async operation in a daemon thread
            my_thread = threading.Thread(
                target=self._async_func,
                args=(self,)  # Pass self as the only argument
            )
            my_thread.daemon = True
            my_thread.start()

        # Output the result if available
        if len(self._result) > 0:
            self.output_result()
            self._result = ""

    @staticmethod
    def call_venice_api(input_text: str, venice_api_key: str) -> str:
        # Replace this with the actual Venice.ai API call logic
        # Example:
        api_url = "https://api.venice.ai/v1/generate"
        payload = input_text
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {venice_api_key}"
        }
        response = requests.post(api_url, json=payload, headers=headers)
        return response.json().get("choices", [{}])[0].get("text", "No response from API")

    def skillNotes(self, param: str) -> str:
        if param == "notes":
            return "Venice.ai rest API"
        elif param == "triggers":
            return "end your input with run."
        return "note unavailable"
 

fukurou

the supreme coder
ADMIN
no try version:
Python:
import threading
import requests
import json

# Assume DEEPSEEK_API_KEY is replaced with VENICE_API_KEY
VENICE_API_KEY = "your_venice_api_key_here"

class DaVeniceRun(ShorniSplash):
    def __init__(self):
        super().__init__()
        self.input_text = ""  # Temporary storage for input text
        #  Venice.ai api key (place it in DLC/api_keys/venice_api_key.txt)
        self.apikey: str = VENICE_API_KEY

    def trigger(self, ear: str, skin: str, eye: str) -> bool:
        # Check if the ear string ends with the word "run"
        return ear.strip().endswith("run")

    @staticmethod
    def _async_func(this_cls):
        # Use the stored input text
        input_text = this_cls.input_text
        data = {
            "prompt": input_text,
            "model": "venice-uncensored-1.1",  # Specify the model you want to use
            "max_tokens": 150  # Adjust the number of tokens as needed
        }

        # Call the Venice.ai API (replace with actual API endpoint and logic)
        response = this_cls.call_venice_api(data, this_cls.apikey)
        if response.status_code == 200:
            this_cls._result = response.json().get("choices", [{}])[0].get("text", "No response from API")
        else:
            this_cls._result = f"Error calling Venice.ai API: Status code {response.status_code}"

    def input(self, ear: str, skin: str, eye: str):
        # Check if the skill should trigger
        if self.trigger(ear, skin, eye):
            # Remove the last word "run" from the ear string
            self.input_text = ear.rsplit(" ", 1)[0].strip()

            # Start the async operation in a daemon thread
            my_thread = threading.Thread(
                target=self._async_func,
                args=(self,)  # Pass self as the only argument
            )
            my_thread.daemon = True
            my_thread.start()

        # Output the result if available
        if len(self._result) > 0:
            self.output_result()
            self._result = ""

    @staticmethod
    def call_venice_api(input_text: str, venice_api_key: str) -> str:
        # Replace this with the actual Venice.ai API call logic
        # Example:
        api_url = "https://api.venice.ai/v1/generate"
        payload = input_text
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {venice_api_key}"
        }
        response = requests.post(api_url, json=payload, headers=headers)
        return response

    def skillNotes(self, param: str) -> str:
        if param == "notes":
            return "Venice.ai rest API"
        elif param == "triggers":
            return "end your input with run."
        return "note unavailable"
 

fukurou

the supreme coder
ADMIN

Uncensored & High Compliance Models​

python
"model": "venice-uncensored-1.1" # Minimal filtering, high compliance
"model": "dolphin-2.9.2-qwen2-72b" # Most uncensored, great for raw output

⚡ Fast & Cost-Efficient Models​

python
"model": "qwen3-4b" # Venice Small – fast and cheap
"model": "llama-3.2-3b" # Fastest Llama variant

🧠 Reasoning & Intelligence Models​

python
"model": "deepseek-r1-671b" # Advanced reasoning, expensive
"model": "qwen-2.5-qwq-32b" # Venice Reasoning – logic-focused
"model": "llama-3.1-405b" # Most intelligent, massive context

🧩 Coding & Function Calling​

python
"model": "qwen-2.5-coder-32b" # Code generation
"model": "deepseek-coder-v2-lite" # Lightweight coding model

👁️ Vision-Capable Models (if you expand later)​

python
"model": "mistral-31-24b"
 

fukurou

the supreme coder
ADMIN

🔥 Top Ollama Models for Dirty Talk & Roleplay​

Model NameDescription
MythoMax-L2-13BLegendary for uncensored storytelling, flirtation, and spicy roleplay.
Nous-Hermes-Llama2-13BSmart and expressive, great for seductive or romantic dialogue.
Chronos-Hermes-13BA creative fusion of Chronos + Hermes, tuned for fantasy and adult RP.
Vicuna-13B-v1.5-16KHandles long-form, complex character interactions with minimal filtering.
Stheno-v3.2-ZetaDesigned for 1-on-1 roleplay and scenario-driven chats.

These models are all uncensored, meaning they won’t shut down or refuse adult-themed prompts. You can run them locally with Ollama using commands like:

bash
ollama pull mythomax:13b
ollama run mythomax:13b

Just make sure your system has enough RAM and VRAM—most of these need 24–32 GB RAM and a decent GPU to run smoothly.
 

fukurou

the supreme coder
ADMIN
with character maintaining:
Python:
import threading
import requests
import json

# Assume DEEPSEEK_API_KEY is replaced with VENICE_API_KEY
VENICE_API_KEY = "your_venice_api_key_here"

class DaVeniceRun(ShorniSplash):
    def __init__(self):
        super().__init__()
        self.input_text = ""  # Temporary storage for input text
        self.context = ""  # Store context and preferences here
        #  Venice.ai api key (place it in DLC/api_keys/venice_api_key.txt)
        self.apikey: str = VENICE_API_KEY

    def trigger(self, ear: str, skin: str, eye: str) -> bool:
        # Check if the ear string ends with the word "run"
        return ear.strip().endswith("run")

    @staticmethod
    def _async_func(this_cls):
        # Use the stored input text and context
        input_text = this_cls.input_text
        context = this_cls.context
        data = {
            "prompt": f"{context}\n{input_text}",
            "model": "venice-uncensored-1.1",  # Specify the model you want to use
            "max_tokens": 150  # Adjust the number of tokens as needed
        }

        # Call the Venice.ai API (replace with actual API endpoint and logic)
        response = this_cls.call_venice_api(data, this_cls.apikey)
        if response.status_code == 200:
            this_cls._result = response.json().get("choices", [{}])[0].get("text", "No response from API")
        else:
            this_cls._result = f"Error calling Venice.ai API: Status code {response.status_code}"

    def input(self, ear: str, skin: str, eye: str):
        # Check if the skill should trigger
        if self.trigger(ear, skin, eye):
            # Remove the last word "run" from the ear string
            self.input_text = ear.rsplit(" ", 1)[0].strip()

            # Start the async operation in a daemon thread
            my_thread = threading.Thread(
                target=self._async_func,
                args=(self,)  # Pass self as the only argument
            )
            my_thread.daemon = True
            my_thread.start()

        # Output the result if available
        if len(self._result) > 0:
            self.output_result()
            self._result = ""

    @staticmethod
    def call_venice_api(input_text: str, venice_api_key: str) -> str:
        # Replace this with the actual Venice.ai API call logic
        # Example:
        api_url = "https://api.venice.ai/v1/generate"
        payload = input_text
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {venice_api_key}"
        }
        response = requests.post(api_url, json=payload, headers=headers)
        return response

    def skillNotes(self, param: str) -> str:
        if param == "notes":
            return "Venice.ai rest API"
        elif param == "triggers":
            return "end your input with run."
        return "note unavailable"

    def set_context(self, context: str):
        """Set the context and preferences to be maintained across API calls."""
        self.context = context

    def get_context(self) -> str:
        """Get the current context and preferences."""
        return self.context
 
Top