🐍 python jizzy vision dev

python

fukurou

the supreme coder
ADMIN
Python:
import cv2
import numpy as np

class DiHuskyLensReco(Skill):
    def __init__(self):
        super().__init__()
        self._skill_lobe = 5
        self.cap = cv2.VideoCapture(0)
        self._object_registry = {}  # object_hash -> unique_id
        self._next_id = 1
        self.detector = cv2.SimpleBlobDetector_create()

    def input(self, ear: str, skin: str, eye: str):
        ret, frame = self.cap.read()
        if not ret:
            return

        # Convert to HSV for better color segmentation
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        
        # Detect colored objects (adjust ranges for your environment)
        lower_red = np.array([0, 120, 70])
        upper_red = np.array([10, 255, 255])
        mask = cv2.inRange(hsv, lower_red, upper_red)
        
        # Find contours of detected objects
        contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        
        detected_ids = []
        for contour in contours:
            if cv2.contourArea(contour) > 300:  # Filter small noise
                x, y, w, h = cv2.boundingRect(contour)
                roi = frame[y:y+h, x:x+w]
                
                # Create unique signature for this object
                obj_hash = self._generate_object_hash(roi)
                
                # Assign or retrieve unique ID
                if obj_hash not in self._object_registry:
                    self._object_registry[obj_hash] = self._next_id
                    self._next_id += 1
                
                detected_ids.append(self._object_registry[obj_hash])
        
        if detected_ids:
            unique_objects = sorted(set(detected_ids))
            self.setVerbatimAlg(3, f"Objects detected: {', '.join(map(str, unique_objects))}")

    def _generate_object_hash(self, roi):
        # Create a unique hash based on color histogram and shape features
        gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
        hist = cv2.calcHist([gray], [0], None, [8], [0, 256])
        hist_hash = hash(tuple(hist.flatten()))
        return hist_hash

    def __del__(self):
        if hasattr(self, 'cap'):
            self.cap.release()

    def skillNotes(self, param: str) -> str:
        if param == "notes":
            return "Husky Lens-like object recognition with persistent unique ID assignment"
        elif param == "triggers":
            return "Continuous camera monitoring, assigns unique IDs to colored objects"
        return "note unavailable"
 
Last edited:

fukurou

the supreme coder
ADMIN
improved v1

Python:
import cv2
import numpy as np

class DiHuskyLensReco(Skill):
    def __init__(self):
        super().__init__()
        self.set_skill_type(3)      # Continuous skill
        self.set_skill_lobe(5)      # Eye lobe
        self.cap = cv2.VideoCapture(0)
        self._object_registry = {}  # object_hash -> unique_id
        self._next_id = 1

    def input(self, ear: str, skin: str, eye: str):
        ret, frame = self.cap.read()
        if not ret:
            return

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        # Red color range (adjust as needed)
        lower_red1 = np.array([0, 120, 70])
        upper_red1 = np.array([10, 255, 255])
        lower_red2 = np.array([170, 120, 70])
        upper_red2 = np.array([180, 255, 255])
        mask1 = cv2.inRange(hsv, lower_red1, upper_red1)
        mask2 = cv2.inRange(hsv, lower_red2, upper_red2)
        mask = cv2.bitwise_or(mask1, mask2)

        contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        detected_ids = []
        for contour in contours:
            if cv2.contourArea(contour) > 300:
                x, y, w, h = cv2.boundingRect(contour)
                roi = frame[y:y+h, x:x+w]
                obj_hash = self._generate_object_hash(roi)

                if obj_hash not in self._object_registry:
                    self._object_registry[obj_hash] = self._next_id
                    self._next_id += 1

                detected_ids.append(self._object_registry[obj_hash])

        if detected_ids:
            unique_objects = sorted(set(detected_ids))
            self.setVerbatimAlg(3, f"Objects detected: {', '.join(map(str, unique_objects))}")

    def _generate_object_hash(self, roi):
        gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
        resized = cv2.resize(gray, (32, 32))
        hist = cv2.calcHist([resized], [0], None, [8], [0, 256])
        hist_hash = hash(tuple(hist.flatten()))
        return hist_hash

    def __del__(self):
        if hasattr(self, 'cap'):
            self.cap.release()

    def skillNotes(self, param: str) -> str:
        if param == "notes":
            return "Improved HuskyLens-style object recognition with persistent ID assignment"
        elif param == "triggers":
            return "Runs continuously, detects red objects and assigns unique IDs"
        return "note unavailable"
 

fukurou

the supreme coder
ADMIN
yolo ver
Python:
import cv2
import numpy as np

class DiYOLOReco(Skill):
    def __init__(self):
        super().__init__()
        self._skill_lobe = 5
        self.cap = cv2.VideoCapture(0)
       
        # Load YOLO
        self.net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
        self.layer_names = self.net.getLayerNames()
        self.output_layers = [self.layer_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]
       
        # Load COCO class names
        with open("coco.names", "r") as f:
            self.classes = [line.strip() for line in f.readlines()]
       
        self._detection_history = {}

    def input(self, ear: str, skin: str, eye: str):
        ret, frame = self.cap.read()
        if not ret:
            return

        height, width, channels = frame.shape

        # YOLO detection
        blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
        self.net.setInput(blob)
        outs = self.net.forward(self.output_layers)

        # Process detections
        class_ids = []
        confidences = []
        boxes = []
       
        for out in outs:
            for detection in out:
                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = scores[class_id]
                if confidence > 0.5:
                    # Object detected
                    center_x = int(detection[0] * width)
                    center_y = int(detection[1] * height)
                    w = int(detection[2] * width)
                    h = int(detection[3] * height)
                   
                    # Rectangle coordinates
                    x = int(center_x - w / 2)
                    y = int(center_y - h / 2)
                   
                    boxes.append([x, y, w, h])
                    confidences.append(float(confidence))
                    class_ids.append(class_id)

        # Apply non-maximum suppression
        indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
       
        detected_objects = []
        if len(indexes) > 0:
            for i in indexes.flatten():
                label = str(self.classes[class_ids[i]])
                confidence = confidences[i]
                detected_objects.append(f"{label}({confidence:.2f})")
       
        if detected_objects:
            self.setSimpleAlg(f"YOLO detected: {', '.join(detected_objects)}")

    def __del__(self):
        if hasattr(self, 'cap'):
            self.cap.release()

    def skillNotes(self, param: str) -> str:
        if param == "notes":
            return "YOLO object detection using OpenCV DNN module"
        elif param == "requirements":
            return "yolov3.weights, yolov3.cfg, coco.names files required"
        elif param == "triggers":
            return "Continuous camera monitoring with YOLO real-time detection"
        return "note unavailable"

You need to download 3 files and put them in the same folder as your code:

1. **yolov3.weights** - The brain file (biggest file)
2. **yolov3.cfg** - The settings file
3. **coco.names** - The list of things YOLO can recognize

**How to get them:**

1. Go to this website: `https://pjreddie.com/darknet/yolo/`
2. Scroll down to "YOLO v3" section
3. Click "Download yolov3.weights" (this is the big file, about 250MB)
4. For the other two files, go to: `https://github.com/pjreddie/darknet/tree/master/cfg`
- Right click "yolov3.cfg" → Save Link As
- Right click "coco.names" → Save Link As

**Install this one thing:**
```bash
pip install opencv-python
```

**Put all 3 files in your project folder** so the code can find them.

That's it. The code will use these files to see objects through your camera.
 

fukurou

the supreme coder
ADMIN
yolov8 :

Python:
```python
import cv2
import numpy as np
from ultralytics import YOLO

class DiYOLOv8Reco(Skill):
    def __init__(self):
        super().__init__()
        self.set_skill_lobe(5)  # Eye skill
       
        # Load YOLOv8 model (much simpler than v3)
        self.model = YOLO("yolov8n.pt")  # Uses the small version
       
        # Start camera
        self.cap = cv2.VideoCapture(0)

    def input(self, ear: str, skin: str, eye: str):
        ret, frame = self.cap.read()
        if not ret:
            return

        # Run YOLOv8 detection (super easy now)
        results = self.model(frame)
       
        detected_objects = []
        for result in results:
            for box in result.boxes:
                class_id = int(box.cls[0])
                confidence = float(box.conf[0])
                label = self.model.names[class_id]
                detected_objects.append(f"{label}({confidence:.1f})")
       
        if detected_objects:
            self.setSimpleAlg(f"Detected: {', '.join(detected_objects)}")

    def __del__(self):
        if hasattr(self, 'cap'):
            self.cap.release()

    def skillNotes(self, param: str) -> str:
        if param == "notes":
            return "YOLOv8 object detection - much simpler than v3"
        elif param == "requirements":
            return "ultralytics package and yolov8n.pt file"
        return "note unavailable"

```
**requirements.txt:**
```txt
ultralytics>=8.0.0
opencv-python>=4.5.0
```

**Installation:**
```bash
pip install ultralytics opencv-python
```

**That's it.** The code will automatically download the YOLOv8 model file (`yolov8n.pt`) on first run. No manual downloads needed.
 
Top