🐍 python python grimoire

python

fukurou

the supreme coder
ADMIN
tkinter timer manipulations inside a function :

Python:
global current_card, flip_timer
window.after_cancel(flip_timer)
flip_timer = window.after(3000, func=flip_card)
 

fukurou

the supreme coder
ADMIN
python sending emails programmatically

set up gmail :

manage acc, security,
set use phone to sign in -> off
set 2 step verification -> off
less secure apps -> on

set up yahoo mail

account info, acc security
generate new app pass
other app, custom name: python code, generate
copy the generated code and use it in the code as the password

Python:
import smtplib

my_email = "asjkl;dbfgjkbgjkadbfgjkbadffjklndfjkngklasdmgl;amkl;[email protected]"
password = "qwerty"
target = "dm,fasghbnjkasdfbnhgjkbnadfjkghjkqdnhadfjklgnakljmfgl;[email protected]"
connection = smtplib.SMTP("smtp.gmail.com")
connection.starttls()  # encrypt message
connection.login(user=my_email, password=password)
connection.sendmail(from_addr=my_email, to_addrs=target, msg="subject: omelet du fromaz\n\nand this is the msg body")
 

fukurou

the supreme coder
ADMIN
Python:
#  auto sender
# csv example :
# name,email,year,month,day
# Test,[email protected],1961,12,21


from datetime import datetime
import pandas
import random
import smtplib

MY_EMAIL = "YOUR EMAIL"
MY_PASSWORD = "YOUR PASSWORD"

today = datetime.now()
today_tuple = (today.month, today.day)  # tuple as dictionary key
#  panda csv list comprehention
data = pandas.read_csv("birthdays.csv")
birthdays_dict = {(data_row["month"], data_row["day"]): data_row for (index, data_row) in data.iterrows()}
if today_tuple in birthdays_dict:
    birthday_person = birthdays_dict[today_tuple]
    file_path = f"letter_templates/letter_{random.randint(1,3)}.txt"
    with open(file_path) as letter_file:
        contents = letter_file.read()
        contents = contents.replace("[NAME]", birthday_person["name"])

    with smtplib.SMTP("YOUR EMAIL PROVIDER SMTP SERVER ADDRESS") as connection:
        connection.starttls()
        connection.login(MY_EMAIL, MY_PASSWORD)
        connection.sendmail(
            from_addr=MY_EMAIL,
            to_addrs=birthday_person["email"],
            msg=f"Subject:Happy Birthday!\n\n{contents}"
        )
 

fukurou

the supreme coder
ADMIN
scedule run the code online :

pythonanywhere.com and create an account
1 files, upload the pythone project files respective of the directories set up
2 consoles, bash (or click the main.py, bash to just test), type : python3 main.py

if you see the smtplib.SMTPAuthenticationError :
go to the now displayed support.google... url
click the displayUnlockCaptcha link

and now the auto email sender should be enabled

3 on pythonanywhere : Tasks :
set a time in utc (search UTC to get your utc time)
, and on the run entry type: python3 main.py
 

fukurou

the supreme coder
ADMIN
python API

Python:
import requests

# url=endpoint
response = requests.get(url="http://api.open-notify.org/iss-now.json")  #ISS satelite
# response codes :
# httpstatuses.com
# 1xx: hold on
# 2xx: here you go
# 3xx: go away, no permission
# 4xx: you screwed up
# 5xx: server screwed up
response.raise_for_status()
print(response.status_code)
data = response.json() #treat as dictionary type
print(data)
#example{'message': 'success', 'timestamp': 1635563104, 'iss_position': {'latitude': '-35.6263', 'longitude': '-84.0181'}}
iss_position = (data["iss_position"]["longitude"], data["iss_position"]["latitude"])  # good old tuple
print(iss_position)  # latlong.net to display coordinates on map
 

fukurou

the supreme coder
ADMIN
example python API with Tkinter UI

Code:
from tkinter import *
import requests

def get_quote():
    response = requests.get(url="https://api.kanye.rest/")
    quote = response.json()["quote"]
    canvas.itemconfig(quote_text, text=quote)


window = Tk()
window.title("Kanye Says...")
window.config(padx=50, pady=50)

canvas = Canvas(width=300, height=414)
background_img = PhotoImage(file="background.png")
canvas.create_image(150, 207, image=background_img)
quote_text = canvas.create_text(150, 207, text="Kanye Quote Goes HERE", width=250, font=("Arial", 30, "bold"), fill="white")
canvas.grid(row=0, column=0)

kanye_img = PhotoImage(file="kanye.png")
kanye_button = Button(image=kanye_img, highlightthickness=0, command=get_quote)
kanye_button.grid(row=1, column=0)



window.mainloop()
 

fukurou

the supreme coder
ADMIN
Python:
import requests

# example 1 API with parameter (yoda api):
txt = "i like to eat hummus"
response = requests.get(url=f"https://api.funtranslations.com/translate/yoda.json?text={txt}")  # ISS satelite
response.raise_for_status()
print(response.status_code)
data = response.json()  # treat as dictionary type
print(data["contents"]["translated"])  # Hummus,  I like to eat

Python:
import requests

MY_LAT = 51.406681
MY_LONG = 30.046425
# example 2 API with parameter2 via dictionary (sunrise/sunset api):
parameters = {
    "lat": MY_LAT,
    "Lng": MY_LONG
}
# parameter keys must match parameter names from the API documentation
response = requests.get(url="https://api.sunrise-sunset.org/json",params=parameters)  # ISS satelite
response.raise_for_status()
data = response.json()  # treat as dictionary type
sunrise = data["results"]["sunrise"]
sunset = data["results"]["sunset"]
print(sunrise)

json chrome plugin:
 

fukurou

the supreme coder
ADMIN
escape unescape html :

Python:
import html

str1 = ""eating burgers, without no honey mustard""
print(html.unescape(str1))  # "eating burgers, without no honey mustard"

explicit type :

str2: str = ""

explicit function :

def isThin(BMI: int) -> bool:
    return BMI < 17
 

fukurou

the supreme coder
ADMIN
python : read gmail inbox programmatically :

Python:
ORG_EMAIL = "@gmail.com"
FROM_EMAIL = "your_email" + ORG_EMAIL
FROM_PWD = "your-password"
SMTP_SERVER = "imap.gmail.com"
SMTP_PORT = 993

def read_email_from_gmail():
    try:
        mail = imaplib.IMAP4_SSL(SMTP_SERVER)
        mail.login(FROM_EMAIL,FROM_PWD)
        mail.select('inbox')

        data = mail.search(None, 'ALL')
        mail_ids = data[1]
        id_list = mail_ids[0].split()   
        first_email_id = int(id_list[0])
        latest_email_id = int(id_list[-1])

        for i in range(latest_email_id,first_email_id, -1):
            data = mail.fetch(str(i), '(RFC822)' )
            for response_part in data:
                arr = response_part[0]
                if isinstance(arr, tuple):
                    msg = email.message_from_string(str(arr[1],'utf-8'))
                    email_subject = msg['subject']
                    email_from = msg['from']
                    print('From : ' + email_from + '\n')
                    print('Subject : ' + email_subject + '\n')

    except Exception as e:
        traceback.print_exc()
        print(str(e))

read_email_from_gmail()

jsonviewer.stack.hu

Code:
# api.openweathermap.org/data/2.5/weather?q={city name}&appid={API key}
# https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={lon}&exclude={part}&appid={API key}
import requests

API_endpoint = "https://api.openweathermap.org/data/2.5/onecall"
MY_LAT = 51.406681
MY_LONG = 30.046425
APPID = ""
# example 3 API
parameters = {
    "lat": MY_LAT,
    "lon": MY_LONG,
    "appid": APPID,
    "exclude": "current,minutely,daily"
}
# parameter keys must match parameter names from the API documentation
response = requests.get(url=API_endpoint, params=parameters)
response.raise_for_status()
data = response.json()  # treat as dictionary type
upcoming = [int(x["weather"][0]["id"]) for x in data["hourly"][:11]]
gonna_rain: bool=False;
for weather_id in upcoming:
    if weather_id < 700:
        gonna_rain = True
        break
print(upcoming)
if gonna_rain:
    print("bring an umbrella")
 

fukurou

the supreme coder
ADMIN
environment variables

create environment var on mac :
terminal, type env, to get a list of environment variables used to hide API keys
export var1=something

create environment variable on windows :
windows btn, type environment variables, on the user variables window add the var
type a name on top and a value on the bottom

import os
// assuming the environment var was named var1 with the value of something
v1 = os.environ.get("var1") // replace API keys and tokens with this variables
print(v1) //something

so when you schedule the code on python anywhere
run :
export var1=something; export var2=something_else; python3 main.py

more APIs :
apilist.fun
 

fukurou

the supreme coder
ADMIN
get : get data

post : post data

put : update data

delete

pixe.la



post command to create user account

Python:
import requests



import PlayGround



pixela_endpoint = "https://pixe.la/v1/users"

USERNAME = "forge"

TOKEN = "secret"

GRAPH_ID = "graph1"

# token can be any 8 to 128 char string

user_parameters = {

    "token": TOKEN,

    "username": USERNAME,

    "agreeTermsOfService": "yes",

    "notMinor": "yes"

}

# response = requests.post(url=pixela_endpoint, json=user_parameters)

# print(response.text)  # shift f10 to run

# {"message":"Success. Let's visit https://pixe.la/@forge , it is your profile page!","isSuccess":true}

# step 2 : create the graph

graph_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs"

# graph id : [a-z][a-z0-9-]{1,16} starts wich letters, up to 16 characters

graph_config = {

    "id": "graph1",

    "name": "jizz graph",

    "unit": "times",

    "type": "int",

    "color": "sora"

}

# hide the credential data with headers :

headers = {

    "X-USER-TOKEN": TOKEN

}

# response = requests.post(url=graph_endpoint, json=graph_config, headers=headers)

# print(response.text)  # {"message":"Success.","isSuccess":true} the graph has been set up

# the graph will have been visible at https://pixe.la/v1/users/forge/graphs/graph1.html



# populate graph with data : https://docs.pixe.la/entry/post-pixel

pixel_creation_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}"

pl = PlayGround.PlayGround()

todays_entry = {

    "date": f"{pl.today()}",

    "quantity": "4",

    "optionalData": "{\"material\":\"feet\"}"

}

response = requests.post(url=pixel_creation_endpoint, json=todays_entry, headers=headers)

print(response.text)  # {"message":"Success.","isSuccess":true}

update pixel :

# update pixel :



Python:
update_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}/{pl.today()}"

new_pixel_data = {"quantity": "5"}

response = requests.put(url=update_endpoint, json=new_pixel_data, headers=headers)

print(response.text)  # {"message":"Success.","isSuccess":true}



delete the graph :

# delete graph

response = requests.delete(url=pixel_creation_endpoint, headers=headers)

print(response.text) #



 

fukurou

the supreme coder
ADMIN
FirstInFirstOut queue
fifo queue :

Python:
import queue

q1: queue = queue.SimpleQueue()
q1.put(800)
q1.put(11)
q1.put(17)
q1.put(12)

print(q1.get())
print(q1.get())
q1.put(20)
print(q1.get())
print(q1.get())
print(q1.get())
print(q1.get())
print(q1.get())
 

fukurou

the supreme coder
ADMIN
scraping site data with beautiful soup


a simple website.html has been added to the project :

android studio jsoup version :

Python:
from bs4 import BeautifulSoup

# import lxml # use as alternative to html.parser

with open("website.html", "r", encoding='utf-8') as file:
    contents: str = file.read()

soup = BeautifulSoup(contents, "html.parser")  # use lxml if this line errors for some sites also enable import lxml
print(soup.title) # print an html tag
print(soup.title.name)  # title
print(soup.title.string) # just the string inside the tag
print(soup.prettify()) # print the whole contents
all_anchor_tags = soup.find_all(name='a') # list of all results
print(all_anchor_tags[1])

for tag in all_anchor_tags:
    print(tag.getText())  # str value in tag
    print(tag.get("href")) # get link attribute

# QURIES
#####################################
heading = soup.find(name="h1", id="name")  # query
print(heading.getText())

section_heading = soup.find(name="h3", class_="heading")
print(section_heading.getText())

company_url = soup.select_one(selector="p a")  #this looks for an a tag inside a p tag
print(company_url)

#  quary based on id value selector
name = soup.select_one(selector="#name")  #this looks for tags with name like id="name"
print(name)

# class query selector :

heading = soup.select(".heading") # select element with heading class. class = "heading"
print(heading)

get site source code programmatically :

Python:
import requests

response = requests.get("https://news.ycombinator.com/news")
print(response.text)

example mining tags from a site :
only for 1st result

Python:
from bs4 import BeautifulSoup
import requests

response = requests.get("https://news.ycombinator.com/news")
yc_web_page = response.text
soup = BeautifulSoup(yc_web_page, "html.parser")
print(soup.title)

article_tag = soup.find(name="a", class_="titlelink")
article_text = article_tag.getText()
article_link = article_tag.get("href")
article_upvote = soup.find(name="span", class_="score").getText()

print(article_text)
print(article_link)
print(article_upvote)

for all results of tags in the site:

Python:
from bs4 import BeautifulSoup
import requests

response = requests.get("https://news.ycombinator.com/news")
yc_web_page = response.text
soup = BeautifulSoup(yc_web_page, "html.parser")
print(soup.title)

articles = soup.find_all(name="a", class_="titlelink")  # it's titlelink after right click, inspect of the tag element
print(articles)
article_texts = []
article_links = []  # respective links to texts of articles in tags
for article_tag in articles:
    text = article_tag.getText()
    article_texts.append(text)
    link = article_tag.get("href") # the link is the href attribute of the tag
    article_links.append(link)

print(article_texts)
print(article_links)

article_upvotes = [score.getText() for score in soup.find_all(name="span", class_="score")] # list comprehenssion the
# same as loop above
print(article_upvotes)

example 2 :
program that gets latest article links and titles from dailystormer.su

Python:
from bs4 import BeautifulSoup
import requests

response = requests.get("https://dailystormer.su/")
dailystormer_site = response.text
soup = BeautifulSoup(dailystormer_site, "html.parser")
print(soup.title)

articles = soup.find_all(name="a", rel="bookmark")  # it's titlelink after right click, inspect of the tag element
article_texts = []
article_links = []  # respective links to texts of articles in tags
for article_tag in articles:
    text = article_tag.getText()
    article_texts.append(text)
    link = article_tag.get("href")  # the link is the href attribute of the tag
    article_links.append(link)

article_texts = article_texts[1:]
article_texts = [element for element in article_texts if not (element == '')]  # remove empty cases
for n in article_texts:
    print(n)
print(article_links)

see url root .robots.txt for sites scraping policy
 

fukurou

the supreme coder
ADMIN
Selenium : automating the browser

see which version you need :
chrome browser, help, about

put the drive inside some folder and copy the location link of the file you extracted

in pycharm :

Python:
from selenium import webdriver

chrome_driver_path: str = "C:\chrome driver\chromedriver.exe"
driver = webdriver.Chrome(executable_path=chrome_driver_path)
# note other browsers are available

driver.get("https://jizz.is/")
driver.close()  # this will close down the active tab
# driver.quit() #this will shut down the browser

on a mac the code will err,
to fix enable the driver:

apple symbol-> sys pref-> security and privacy
where it says chrome driver was blocked from use-> allow anyway

now the code should run also on a mac and not only on a windows

this example gets the price of an amazon product

Python:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By

s = Service('C:\chrome driver\chromedriver.exe')
driver = webdriver.Chrome(service=s)
# chrome_driver_path: str = "C:\chrome driver\chromedriver.exe"
# driver = webdriver.Chrome(executable_path=chrome_driver_path)
# note other browsers are available

driver.get("https://www.amazon.com/Rearz-Squirts-Splash-Diapers-X-Large/dp/B08LNY72J5/ref=sr_1_14?crid=30BCDAQ0FFQTA&keywords=abdl&qid=1645744594&sprefix=abdl%2Caps%2C238&sr=8-14")
# driver.close()  # this will close down the active tab
# driver.quit() #this will shut down the browser
price = driver.find_element(By.XPATH,"//*[@id='corePrice_desktop']/div/table/tbody/tr/td[2]/span[1]/span[2]")
# to get the XPath right click inspect the element, copy, copy Xpath
print(price.text)
driver.quit()

grabbing html elements

Python:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By

s = Service('C:\chrome driver\chromedriver.exe')
driver = webdriver.Chrome(service=s)
# chrome_driver_path: str = "C:\chrome driver\chromedriver.exe"
# driver = webdriver.Chrome(executable_path=chrome_driver_path)
# note other browsers are available

driver.get("https://jizz.is/")
# driver.close()  # this will close down the active tab
# driver.quit() #this will shut down the browser
search_bar = driver.find_element(By.NAME, 'keywords')

print(search_bar.tag_name) # input
print(search_bar.get_attribute("placeholder"))  # Search…
# get link
link = driver.find_element(By.CSS_SELECTOR, '.p-navEl-link ')
print(link.get_attribute('href'))  #https://jizz.is/
driver.quit()

getting element list examplified using CSS selector


Python:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By

s = Service('C:\chrome driver\chromedriver.exe')
driver = webdriver.Chrome(service=s)

driver.get("https://www.python.org/")
# dig down by element using the CSS Selector
# get the ALL the time elements inside the element with the event-widget CSS
event_times = driver.find_elements(By.CSS_SELECTOR, ".event-widget time")
for time in event_times:
    print(time.text)

# get the ALL the name elements inside an li element inside the element with the event-widget CSS
event_names = driver.find_elements(By.CSS_SELECTOR, ".event-widget li a")
for name in event_names:
    print(name.text)
# place the data into a dictionary :
events = {}
for n in range(len(event_times)):
    events[n] = {
        "time":event_times[n].text,
            "name":event_names[n].text
    }
print(events)
driver.quit()

example css selector with element id (formula):
driver.find_elements(By.CSS_SELECTOR, "#id_value inner_tag1 inner_tag2")
 

fukurou

the supreme coder
ADMIN
.send_keys('') and .click() methodes on an element example

in this code the bot logs in then makes a post

Python:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By

s = Service('C:\chrome driver\chromedriver.exe')
driver = webdriver.Chrome(service=s)
driver.get("https://jizz.is/forums/bots-post.10/post-thread")
# use full xpath !
user_name = driver.find_element(By.XPATH, "/html/body/div[1]/div[3]/div[2]/div[3]/div[2]/div/div[2]/form/div[1]/div/dl[1]/dd/input")
print(user_name.id)
user_name.send_keys("sarval chan")
user_pass = driver.find_element(By.XPATH, "/html/body/div[1]/div[3]/div[2]/div[3]/div[2]/div/div[2]/form/div[1]/div/dl[2]/dd/div/div/input")
user_pass.send_keys("qwerty")
login_btn = driver.find_element(By.XPATH, "/html/body/div[1]/div[3]/div[2]/div[3]/div[2]/div/div[2]/form/div[1]/dl/dd/div/div[2]/button/span")
print(login_btn.click())
driver.get("https://jizz.is/forums/bots-post.10/post-thread")
title_element = driver.find_element(By.XPATH, '/html/body/div[1]/div[3]/div[2]/div[3]/div[2]/div/form/div/div[1]/dl/dd/div/div[1]/input')
title_element.send_keys("waifubot xenforo test 1")
body = driver.find_element(By.XPATH,'/html/body/div[1]/div[3]/div[2]/div[3]/div[2]/div/form/div/div[1]/div/dl[1]/dd/div/div[2]/div')
body.send_keys("the test has been successful @fukurou")
send = driver.find_element(By.XPATH, '/html/body/div[1]/div[3]/div[2]/div[3]/div[2]/div/form/div/dl/dd/div/div[2]/button/span')
send.click()

driver.quit()

selenium opening multiple tabs and switching tabs :

Python:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By

s = Service('C:\chrome driver\chromedriver.exe')
driver = webdriver.Chrome(service=s)

# Lets open google.com in the first tab
driver.get('http://google.com')

# Lets open https://www.bing.com/ in the second tab
driver.execute_script("window.open('about:blank','secondtab');")
driver.switch_to.window("secondtab")
driver.get('https://www.bing.com/')

# Lets open https://www.facebook.com/ in the third tab
driver.execute_script("window.open('about:blank','thirdtab');")
driver.switch_to.window("thirdtab")
driver.get('https://www.facebook.com/')
driver.switch_to.window("secondtab")

selenium get body tags text :

Python:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By

s = Service('C:\chrome driver\chromedriver.exe')
driver = webdriver.Chrome(service=s)

driver.get("url goes here")

# print(driver.title)

# Printing the whole body text
print(driver.find_element(By.XPATH, "/html/body").text)
driver.close()

selenium get all links in url

Python:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By

s = Service('C:\chrome driver\chromedriver.exe')
driver = webdriver.Chrome(service=s)

driver.get("https://www.youtube.com/watch?v=iquQANe3JKk")
elem = driver.find_elements(By.XPATH, '//*[@href]')
for link in elem:
    print(link.get_attribute('href'))

driver.close()
 

fukurou

the supreme coder
ADMIN
selenium forum miner

Python:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By

s = Service('C:\chrome driver\chromedriver.exe')
driver = webdriver.Chrome(service=s)

driver.get("https://incels.is/threads/in-the-ukraine-poland-border-little-white-girl-is-allowed-to-go-in-the-train-but-the-little-black-girl-is-not.357250/")

# print(driver.title)

# Printing the whole body text
# print(driver.find_element(By.XPATH, "/html/body").text)

t1 = driver.find_element(By.XPATH, "/html/body").text
result = t1.index('The Lounge')
text = t1[result:-1]
end = text.index("You must log in")
text = text[len('The Lounge'):end]
sentences = text.split("Joined", -1)

for line in sentences:
    if not line.find(" ") == -1:
        print(line + '\n')

driver.close()
 

fukurou

the supreme coder
ADMIN
python snip article chunk

Python:
file = open("story.txt", encoding="utf8")
contents = file.read()
# print(contents[:10000])
# print(contents[10000:20000])
print(contents[20000:])
file.close()
 
Top