Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

License Plate Reader #361 #362

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
269 changes: 269 additions & 0 deletions ML/youtube/LicensePlateReader/License_Plate_Detector.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,269 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/github/Mochoye/Python/blob/master/ML/youtube/LicensePlateReader/License_Plate_Detector.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "q_vZQuFlgliE"
},
"source": []
},
{
"cell_type": "code",
"source": [
"!git clone https://github.com/Mochoye/Licence-Plate-Detection-using-TensorFlow-Lite\n",
"!pip install easyocr"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "g510McSeProD",
"outputId": "750f71c7-3894-405b-effc-f2755407a162"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Cloning into 'Licence-Plate-Detection-using-TensorFlow-Lite'...\n",
"remote: Enumerating objects: 59, done.\u001b[K\n",
"remote: Counting objects: 100% (59/59), done.\u001b[K\n",
"remote: Compressing objects: 100% (51/51), done.\u001b[K\n",
"remote: Total 59 (delta 13), reused 7 (delta 2), pack-reused 0\u001b[K\n",
"Receiving objects: 100% (59/59), 55.52 MiB | 15.21 MiB/s, done.\n",
"Resolving deltas: 100% (13/13), done.\n",
"Collecting easyocr\n",
" Downloading easyocr-1.7.1-py3-none-any.whl (2.9 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.9/2.9 MB\u001b[0m \u001b[31m13.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (from easyocr) (2.0.1+cu118)\n",
"Requirement already satisfied: torchvision>=0.5 in /usr/local/lib/python3.10/dist-packages (from easyocr) (0.15.2+cu118)\n",
"Requirement already satisfied: opencv-python-headless in /usr/local/lib/python3.10/dist-packages (from easyocr) (4.8.0.76)\n",
"Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from easyocr) (1.11.2)\n",
"Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from easyocr) (1.23.5)\n",
"Requirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from easyocr) (9.4.0)\n",
"Requirement already satisfied: scikit-image in /usr/local/lib/python3.10/dist-packages (from easyocr) (0.19.3)\n",
"Collecting python-bidi (from easyocr)\n",
" Downloading python_bidi-0.4.2-py2.py3-none-any.whl (30 kB)\n",
"Requirement already satisfied: PyYAML in /usr/local/lib/python3.10/dist-packages (from easyocr) (6.0.1)\n",
"Requirement already satisfied: Shapely in /usr/local/lib/python3.10/dist-packages (from easyocr) (2.0.1)\n",
"Collecting pyclipper (from easyocr)\n",
" Downloading pyclipper-1.3.0.post5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (908 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m908.3/908.3 kB\u001b[0m \u001b[31m23.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting ninja (from easyocr)\n",
" Downloading ninja-1.11.1-py2.py3-none-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (145 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m146.0/146.0 kB\u001b[0m \u001b[31m20.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from torchvision>=0.5->easyocr) (2.31.0)\n",
"Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch->easyocr) (3.12.2)\n",
"Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch->easyocr) (4.5.0)\n",
"Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch->easyocr) (1.12)\n",
"Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch->easyocr) (3.1)\n",
"Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch->easyocr) (3.1.2)\n",
"Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.10/dist-packages (from torch->easyocr) (2.0.0)\n",
"Requirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch->easyocr) (3.27.4.1)\n",
"Requirement already satisfied: lit in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch->easyocr) (16.0.6)\n",
"Requirement already satisfied: six in /usr/local/lib/python3.10/dist-packages (from python-bidi->easyocr) (1.16.0)\n",
"Requirement already satisfied: imageio>=2.4.1 in /usr/local/lib/python3.10/dist-packages (from scikit-image->easyocr) (2.31.3)\n",
"Requirement already satisfied: tifffile>=2019.7.26 in /usr/local/lib/python3.10/dist-packages (from scikit-image->easyocr) (2023.8.30)\n",
"Requirement already satisfied: PyWavelets>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-image->easyocr) (1.4.1)\n",
"Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from scikit-image->easyocr) (23.1)\n",
"Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch->easyocr) (2.1.3)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision>=0.5->easyocr) (3.2.0)\n",
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision>=0.5->easyocr) (3.4)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision>=0.5->easyocr) (2.0.4)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision>=0.5->easyocr) (2023.7.22)\n",
"Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch->easyocr) (1.3.0)\n",
"Installing collected packages: pyclipper, ninja, python-bidi, easyocr\n",
"Successfully installed easyocr-1.7.1 ninja-1.11.1 pyclipper-1.3.0.post5 python-bidi-0.4.2\n"
]
}
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "nnTKXXRRgcUp",
"outputId": "afb0ceed-a7d9-4c4b-8183-1dbb6c27a584"
},
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"WARNING:easyocr.easyocr:Downloading detection model, please wait. This may take several minutes depending upon your network connection.\n"
]
},
{
"output_type": "stream",
"name": "stdout",
"text": [
"Progress: |██████████████████████████████████████████████████| 100.0% Complete"
]
},
{
"output_type": "stream",
"name": "stderr",
"text": [
"WARNING:easyocr.easyocr:Downloading recognition model, please wait. This may take several minutes depending upon your network connection.\n"
]
},
{
"output_type": "stream",
"name": "stdout",
"text": [
"Progress: |██████████████████████████████████████████████████| 100.0% Complete"
]
}
],
"source": [
"import easyocr\n",
"import numpy as np\n",
"import cv2\n",
"import os\n",
"import sys\n",
"import glob\n",
"import random\n",
"import importlib.util\n",
"from tensorflow.lite.python.interpreter import Interpreter\n",
"\n",
"\n",
"reader = easyocr.Reader(['en'])\n",
"\n",
"modelpath = '/content/Licence-Plate-Detection-using-TensorFlow-Lite/custom_model_lite/detect.tflite'\n",
"lblpath = '/content/Licence-Plate-Detection-using-TensorFlow-Lite/custom_model_lite/labelmap.txt'\n",
"min_conf = 0.5\n",
"input_video_path = '/content/Licence-Plate-Detection-using-TensorFlow-Lite/custom_model_lite/demo.mp4'\n",
"output_video_path = 'output.mp4' # Change this to your desired output path\n",
"\n",
"cap = cv2.VideoCapture(input_video_path)\n",
"\n",
"# Define the codec and create a VideoWriter object\n",
"fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n",
"frame_width = int(cap.get(3))\n",
"frame_height = int(cap.get(4))\n",
"out = cv2.VideoWriter(output_video_path, fourcc, 20.0, (frame_width, frame_height))\n",
"\n",
"interpreter = Interpreter(model_path=modelpath)\n",
"interpreter.allocate_tensors()\n",
"input_details = interpreter.get_input_details()\n",
"output_details = interpreter.get_output_details()\n",
"height = input_details[0]['shape'][1]\n",
"width = input_details[0]['shape'][2]\n",
"\n",
"float_input = (input_details[0]['dtype'] == np.float32)\n",
"\n",
"input_mean = 127.5\n",
"input_std = 127.5\n",
"\n",
"crp_img = None # Initialize variable to store the cropped image\n",
"\n",
"with open(lblpath, 'r') as f:\n",
" labels = [line.strip() for line in f.readlines()]\n",
"\n",
"while True:\n",
" ret, frame = cap.read()\n",
" if not ret:\n",
" break\n",
"\n",
" image_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
" imH, imW, _ = frame.shape\n",
" image_resized = cv2.resize(image_rgb, (width, height))\n",
" input_data = np.expand_dims(image_resized, axis=0)\n",
"\n",
" # Normalize pixel values if using a floating model (i.e. if model is non-quantized)\n",
" if float_input:\n",
" input_data = (np.float32(input_data) - input_mean) / input_std\n",
"\n",
" # Perform the actual detection by running the model with the image as input\n",
" interpreter.set_tensor(input_details[0]['index'], input_data)\n",
" interpreter.invoke()\n",
"\n",
" boxes = interpreter.get_tensor(output_details[1]['index'])[0] # Bounding box coordinates of detected objects\n",
" classes = interpreter.get_tensor(output_details[3]['index'])[0] # Class index of detected objects\n",
" scores = interpreter.get_tensor(output_details[0]['index'])[0] # Confidence of detected objects\n",
"\n",
" detections = []\n",
"\n",
" for i in range(len(scores)):\n",
" if (scores[i] > min_conf) and (scores[i] <= 1.0):\n",
" # Get bounding box coordinates and draw box\n",
" ymin = int(max(1, (boxes[i][0] * imH)))\n",
" xmin = int(max(1, (boxes[i][1] * imW)))\n",
" ymax = int(min(imH, (boxes[i][2] * imH)))\n",
" xmax = int(min(imW, (boxes[i][3] * imW)))\n",
"\n",
" # Crop the detected region\n",
" crp_img = frame[ymin:ymax, xmin:xmax]\n",
"\n",
" cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (10, 255, 0), 2)\n",
" object_name = labels[int(classes[i])] # Look up object name from \"labels\" array using class index\n",
" label = '%s: %d%%' % (object_name, int(scores[i] * 100)) # Example: 'person: 72%'\n",
" labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)\n",
" label_ymin = max(ymin, labelSize[1] + 10)\n",
"\n",
" result = reader.readtext(crp_img)\n",
" if len(result) == 0:\n",
" text = 'LicensePlate'\n",
" else:\n",
" text= result[0][-2]\n",
" font = cv2.FONT_HERSHEY_SIMPLEX\n",
"\n",
"\n",
" cv2.rectangle(frame, (xmin, label_ymin - labelSize[1] - 10), (xmin + labelSize[0], label_ymin + baseLine - 10), (255, 255, 255), cv2.FILLED)\n",
" cv2.putText(frame, text, (xmin, label_ymin - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)\n",
" detections.append([object_name, scores[i], xmin, ymin, xmax, ymax])\n",
"\n",
"\n",
"\n",
" out.write(frame) # Write the frame to the output video\n",
"\n",
"cap.release()\n",
"out.release() # Release the output video\n",
"cv2.destroyAllWindows()\n",
"\n",
"# The cropped image is now stored in the variable crp_img"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "_CwA6oYRiY80"
},
"outputs": [],
"source": []
}
],
"metadata": {
"colab": {
"provenance": [],
"gpuType": "T4",
"authorship_tag": "ABX9TyPv8tphV8mWFEuQrK73ZIy8",
"include_colab_link": true
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"nbformat": 4,
"nbformat_minor": 0
}
4 changes: 4 additions & 0 deletions ML/youtube/LicensePlateReader/Readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Reading License Plates with EasyOCR

EasyOCR is a Python library for Optical Character Recognition (OCR) that makes it relatively simple to extract text from images. In this guide, we will demonstrate how to use EasyOCR to read license plates from an image.

Loading