👨‍💻 dev creamers of the corn

development

owly

闇の伝説
Staff member
戦闘 コーダー
we have reached a point where we have many skills. not great not terrible.
thing is, now we need a catalog, to serve as a quick reference as well as a roadmap of shit to come

**********
*defconic*
**********
motivational
philosophy
spider sense
hunger
home returner

****************
*conversational*
****************
logger
reminders
cusser

*******
*filth*
*******
echo

*******
*tools*
*******
recipes
list extractor
auto coder
predictor/story teller
traslator
search engine

*******
*feeds*
*******

news
weather


******************
*gamification(-+)*
******************

********
*friend*
********
empathy
befriender
fitness(workout/yoga)
greeter

*********
*monitor*
*********
trader
emotion awareness
skill awareness
 

fukurou

the supreme coder
ADMIN
creamers of the corn?!
you mean team Fuki#4 ?!!!
the legendary catalogers?

shit in the ass man
 

fukurou

the supreme coder
ADMIN
Python:
        if temp:
            result = self.rcb.respondDialog(temp)
            if self.filter.strContainsResponse(result):
                return  # filter out
            self.rcb.learnV2(temp, self.elizaDeducer)
            self.setSimpleAlg(Eliza.PhraseMatcher.reflect(result))
 

fukurou

the supreme coder
ADMIN
Python:
class DiDeducer(Skill):
    def __init__(self, deducer: ElizaDeducer):
        super().__init__()
        self.rcb: RailChatBot = RailChatBot()
        self.dialog: AXCmdBreaker = AXCmdBreaker("babe")
        self.filter: UniqueItemSizeLimitedPriorityQueue = UniqueItemSizeLimitedPriorityQueue(5)
        self.bads: AXCmdBreaker = AXCmdBreaker("is bad")
        self.goods: AXCmdBreaker = AXCmdBreaker("is good")
        self.filterTemp: str = ""
        self.elizaDeducer: ElizaDeducer = deducer

    def setQueLim(self, lim):
        self.filter.setLimit(lim)

    def input(self, ear, skin, eye):
        # filter learn:
        self.filterTemp = self.bads.extractCmdParam(ear)
        if self.filterTemp:
            self.filter.insert(self.filterTemp)
            self.filterTemp = ""
            self.setSimpleAlg("i will keep that in mind")
            return
        self.filterTemp = self.goods.extractCmdParam(ear)
        if self.filterTemp:
            self.filter.removeItem(self.filterTemp)
            self.filterTemp = ""
            self.setSimpleAlg("understood")
            return
        if self.filter.strContainsResponse(ear):
            return  # filter in
        temp = self.dialog.extractCmdParam(ear)
        if temp:
            self.rcb.learnV2(temp, self.elizaDeducer)
            result = self.rcb.respondDialog(temp)
            if self.filter.strContainsResponse(result):
                return  # filter out
            self.setSimpleAlg(result)
 

fukurou

the supreme coder
ADMIN
Python:
from AXPython import RegexUtil
from LivinGrimoire23 import Skill, Brain
import serial
import time
import atexit
import serial.tools.list_ports


# terminal: pip install pyserial

def is_port_available(param):
    ports = serial.tools.list_ports.comports()
    for port in ports:
        if port.device == param:
            return True
    return False


name_of_port = 'COM3'
if is_port_available(name_of_port):
    ser = serial.Serial(name_of_port, 9600, timeout=0.1)
    print("Arduino connected successfully.")
else:
    ser = None
    print("Arduino not connected. Please check the connection.")


def close():
    if ser:
        ser.close()


class SerialReader:

    @staticmethod
    def read_serial_data(num_readings=10) -> str:
        try:
            for _ in range(num_readings):
                if ser.in_waiting > 0:
                    line = ser.readline().decode('utf-8').rstrip()
                    line = RegexUtil.extractRegex("[-+]?[0-9]{1,3}", line)
                    if len(line) > 0 and line != "0":
                        return f'{int(line) - 10}'
                time.sleep(1)  # Delay between readings
        except serial.SerialException as e:
            return f"Error reading serial data: {e}"
        return "i do not know"


class DiArduinoTemperature(Skill):
    # example skill for reading Arduino data
    def __init__(self):
        super().__init__()

    # Override
    def input(self, ear: str, skin: str, eye: str):
        if ear == "check temperature":
            self.setVerbatimAlg(4, SerialReader.read_serial_data())


class DiBlinker(Skill):
    # blinks the Arduino led, this is an example of sending commands to the Arduino
    def __init__(self):
        super().__init__()

    # Override
    def input(self, ear: str, skin: str, eye: str):
        if ear == "blink":
            self.setVerbatimAlg(4, "blinking")
            ser.write(b'1')


def add_DLC_skills(brain: Brain):
    atexit.register(close)  # wrap up serial object when program closes
    brain.add_logical_skill(DiArduinoTemperature())
    brain.add_logical_skill(DiBlinker())

Python:
        babble_tmp.append(PhraseMatcher("you are just a (.*)", [
            AXKeyValuePair("you are just a {0}", "i will be the best {0} then"),
            AXKeyValuePair("you are just a {0}", "kiss my {0} butt")
        ]))  # anti bully

Python:
class DiDeducer(Skill):
    def __init__(self, deducer: ElizaDeducer):
        super().__init__()
        self.rcb: RailChatBot = RailChatBot()
        self.dialog: AXCmdBreaker = AXCmdBreaker("babe")
        self.filter: UniqueItemSizeLimitedPriorityQueue = UniqueItemSizeLimitedPriorityQueue(5)
        self.bads: AXCmdBreaker = AXCmdBreaker("is bad")
        self.goods: AXCmdBreaker = AXCmdBreaker("is good")
        self.filterTemp: str = ""
        self.elizaDeducer: ElizaDeducer = deducer

    def setQueLim(self, lim):
        self.filter.setLimit(lim)

    def input(self, ear, skin, eye):
        # filter learn:
        self.filterTemp = self.bads.extractCmdParam(ear)
        if self.filterTemp:
            self.filter.insert(self.filterTemp)
            self.filterTemp = ""
            self.setSimpleAlg("i will keep that in mind")
            return
        self.filterTemp = self.goods.extractCmdParam(ear)
        if self.filterTemp:
            self.filter.removeItem(self.filterTemp)
            self.filterTemp = ""
            self.setSimpleAlg("understood")
            return
        if self.filter.strContainsResponse(ear):
            return  # filter in
        temp = self.dialog.extractCmdParam(ear)
        if temp:
            self.rcb.learnV2(temp, self.elizaDeducer)
            result = self.rcb.respondDialog(temp)
            if self.filter.strContainsResponse(result):
                return  # filter out
            self.setSimpleAlg(result)
 

the living tribunal

Moderator
Staff member
moderator
Python:
import os

def read_file_as_string(file_path: str) -> str:
    """
    Reads the contents of a file and returns it as a single string.

    :param file_path: The path to the file to be read.
    :return: The entire content of the file as a string.
    """
    with open(file_path, 'r') as file:
        content = file.read()
    return content

# Usage example:
file_path = os.path.join('canvas', 'yourfile.txt')
file_content = read_file_as_string(file_path)
print(file_content)
 

the living tribunal

Moderator
Staff member
moderator
Python:
import os

def clear_file_contents(file_path: str) -> None:
    """
    Clears the contents of the specified file.

    :param file_path: The path to the file to be cleared.
    """
    with open(file_path, 'w') as file:
        pass  # Opening a file in 'w' mode and not writing anything to it clears the file contents.

# Usage example:
file_path = os.path.join('canvas', 'yourfile.txt')
clear_file_contents(file_path)
 

the living tribunal

Moderator
Staff member
moderator
Async mode
Python:
import os
import aiofiles

async def clear_file_contents_async(file_path: str) -> None:
    """
    Asynchronously clears the contents of the specified file.

    :param file_path: The path to the file to be cleared.
    """
    async with aiofiles.open(file_path, 'w') as file:
        pass  # Opening a file in 'w' mode and not writing anything to it clears the file contents.

# Usage example with asyncio:
import asyncio

file_path = os.path.join('canvas', 'yourfile.txt')
await clear_file_contents_async(file_path)
 

fukurou

the supreme coder
ADMIN
Python:
import requests
from bs4 import BeautifulSoup
from collections import Counter
from urllib.parse import urljoin, urlparse
from typing import List

mainstream_sites = ['google.com', 'bing.com', 'facebook.com', 'youtube.com']


def crawl_web(url: str) -> List[str]:
    try:
        response = requests.get(url)
        response.raise_for_status()  # Check for request errors
        soup = BeautifulSoup(response.content, 'html.parser')
        links = [urljoin(url, a['href']) for a in soup.find_all('a', href=True)]
        return links
    except requests.RequestException as e:
        print(f"Error crawling {url}: {e}")
        return []


def filter_mainstream_sites(links: List[str], seed_sites: List[str]) -> List[str]:
    seed_domains = [urlparse(seed_site).netloc for seed_site in seed_sites]
    return [link for link in links if
            not any(mainstream_site in link for mainstream_site in mainstream_sites) and not any(
                seed_domain in link for seed_domain in seed_domains)]


def filter_mainstream_sites_keep_next_level(links: List[str], seed_sites) -> List[str]:
    seed_domains = [urlparse(seed_site).netloc for seed_site in seed_sites]
    return [link for link in links if
            not any(mainstream_site in link for mainstream_site in mainstream_sites) and any(
                seed_domain in link for seed_domain in seed_domains)]


def filter_mainstream_sites_keepSeeds(links: List[str]) -> List[str]:
    return [link for link in links if
            not any(mainstream_site in link for mainstream_site in mainstream_sites)]


def rank_sites(links: List[str]) -> list[tuple[str, int]]:
    site_counter = Counter(links)
    ranked_sites = site_counter.most_common()
    return ranked_sites


def search_engine(*seed_sites: str) -> list[tuple[str, int]]:
    all_links = []
    for seed_site in seed_sites:
        links = crawl_web(seed_site)
        # filtered_links = filter_mainstream_sites(links, seed_sites)  # not used
        # filtered_links = filter_mainstream_sites_keepSeeds(links, seed_sites)
        filtered_links = filter_mainstream_sites_keep_next_level(links, seed_sites)
        all_links.extend(filtered_links)
    ranked_sites = rank_sites(all_links)
    return ranked_sites[:20]  # Return top 10 non-mainstream sites


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    site_seeds = ['https://example.net', 'https://jizz.is/threads/seed-test.1521/']
    results = search_engine(*site_seeds)
    for site, count in results:
        print(f'{site} (count: {count})')

# See PyCharm help at https://www.jetbrains.com/help/pycharm/
rank rough code:
Python:
from typing import List

def url_contains(url: str, *args: str) -> bool:
    return any(arg in url for arg in args)

# Usage example:
url = "https://example.com/path/to/resource"
strings_to_check = ["example", "resource", "not_in_url"]

if url_contains(url, *strings_to_check):
    print("The URL contains one of the specified strings.")
else:
    print("The URL does not contain any of the specified strings.")
 

fukurou

the supreme coder
ADMIN
Python:
def search_engine(*seed_sites: str) -> List[str]: all_links = [] for seed_site in seed_sites: links = crawl_web(seed_site) filtered_links = filter_mainstream_sites(links, seed_sites) all_links.extend(filtered_links) ranked_sites = rank_sites(all_links) return [site for site, _ in ranked_sites[:10]] # Return top 10 non-mainstream sites as strings
 

owly

闇の伝説
Staff member
戦闘 コーダー
we're going higher order func on this mofo

Python:
def greet(name):
    return f"Hello, {name}!"

def call_function(func, *args):
    return func(*args)

# Usage example:
result = call_function(greet, "Alice")
print(result)  # Output: Hello, Alice!

shit in the aaaaaaaaass! :s53:
 

the living tribunal

Moderator
Staff member
moderator
Python:
# Define a higher-order function
def apply_func(func, x, y):
    return func(x, y)

# Define a simple function to be used as a parameter
def multiply(a, b):
    return a * b

# Use the higher-order function with the multiply function
result = apply_func(multiply, 5, 3)
print(result)  # Output: 15
 

fukurou

the supreme coder
ADMIN
Python:
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
from typing import List

mainstream_sites = ['google.com', 'bing.com', 'facebook.com', 'youtube.com']


def crawl_web(url: str) -> List[str]:
    response = requests.get(url)
    response.raise_for_status()  # Check for request errors
    soup = BeautifulSoup(response.content, 'html.parser')
    photo_extensions = ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.svg', '.webp')
    links = [urljoin(url, a['href']) for a in soup.find_all('a', href=True) if not a['href'].startswith('javascript:') and not a['href'].lower().endswith(photo_extensions)]
    return links


def filter_mainstream_sites(links: List[str], seed_sites) -> List[str]:
    seed_domains = [urlparse(seed_site).netloc for seed_site in seed_sites]
    return [link for link in links if
            not any(mainstream_site in link for mainstream_site in mainstream_sites) and not any(
                seed_domain in link for seed_domain in seed_domains)]


def filter_mainstream_sites_keep_next_level(links: List[str], seed_sites) -> List[str]:
    seed_domains = [urlparse(seed_site).netloc for seed_site in seed_sites]
    return list(set([link for link in links if
                     not any(mainstream_site in link for mainstream_site in mainstream_sites) and any(
                         seed_domain in link for seed_domain in seed_domains)]))


def search_engine(*seed_sites: str) -> list[str]:
    all_links = []
    for seed_site in seed_sites:
        links = crawl_web(seed_site)
        filtered_links = filter_mainstream_sites(links, seed_sites)
        all_links.extend(filtered_links)
    return all_links


def inflate_links(*seed_sites: str) -> list[str]:
    all_links = []
    for seed_site in seed_sites:
        links = crawl_web(seed_site)
        filtered_links = filter_mainstream_sites_keep_next_level(links, seed_sites)
        all_links.extend(filtered_links)
    return all_links


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    site_seeds = ['https://incels.is/', 'https://jizz.is/threads/seed-test.1521/']
    results = search_engine(*site_seeds)
    # results = inflate_links(*site_seeds)
    for site1 in results:
        print(site1)

# See PyCharm help at https://www.jetbrains.com/help/pycharm/
 

fukurou

the supreme coder
ADMIN
Python:
def check_codephrase(s: str) -> str:
    codephrase = "codephrase"
    if s.startswith(codephrase):
        return s[len(codephrase):]
    return s

# Example usage
input_string = "codephraseThis is the leftover part."
result = check_codephrase(input_string)
print(result)  # Output: "This is the leftover part."
 
Top