The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
Error code: DatasetGenerationError Exception: UnicodeDecodeError Message: 'utf-8' codec can't decode byte 0x89 in position 0: invalid start byte Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1855, in _prepare_split_single for _, table in generator: File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 690, in wrapped for item in generator(*args, **kwargs): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/csv/csv.py", line 188, in _generate_tables csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/streaming.py", line 75, in wrapper return function(*args, download_config=download_config, **kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/utils/file_utils.py", line 1213, in xpandas_read_csv return pd.read_csv(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/pandas/io/parsers/readers.py", line 1026, in read_csv return _read(filepath_or_buffer, kwds) File "/src/services/worker/.venv/lib/python3.9/site-packages/pandas/io/parsers/readers.py", line 620, in _read parser = TextFileReader(filepath_or_buffer, **kwds) File "/src/services/worker/.venv/lib/python3.9/site-packages/pandas/io/parsers/readers.py", line 1620, in __init__ self._engine = self._make_engine(f, self.engine) File "/src/services/worker/.venv/lib/python3.9/site-packages/pandas/io/parsers/readers.py", line 1898, in _make_engine return mapping[engine](f, **self.options) File "/src/services/worker/.venv/lib/python3.9/site-packages/pandas/io/parsers/c_parser_wrapper.py", line 93, in __init__ self._reader = parsers.TextReader(src, **kwds) File "parsers.pyx", line 574, in pandas._libs.parsers.TextReader.__cinit__ File "parsers.pyx", line 663, in pandas._libs.parsers.TextReader._get_header File "parsers.pyx", line 874, in pandas._libs.parsers.TextReader._tokenize_rows File "parsers.pyx", line 891, in pandas._libs.parsers.TextReader._check_tokenize_status File "parsers.pyx", line 2053, in pandas._libs.parsers.raise_parser_error UnicodeDecodeError: 'utf-8' codec can't decode byte 0x89 in position 0: invalid start byte The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1431, in compute_config_parquet_and_info_response parquet_operations, partial, estimated_dataset_info = stream_convert_to_parquet( File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 992, in stream_convert_to_parquet builder._prepare_split( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1742, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1898, in _prepare_split_single raise DatasetGenerationError("An error occurred while generating the dataset") from e datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
filepath
string | text
string | length
int64 | complexity
string | generation_mode
string | sample_number
int64 | width
int64 | height
int64 | cycle
int64 |
---|---|---|---|---|---|---|---|---|
samples/KTQe/00001.png | KTQe | 4 | easy | random | 1 | 199 | 59 | 1 |
samples/HkZEZG/00001.png | HkZEZG | 6 | easy | random | 1 | 235 | 82 | 1 |
samples/KQlq/00001.png | KQlq | 4 | easy | random | 1 | 280 | 55 | 1 |
samples/uFCw9SDS/00001.png | uFCw9SDS | 8 | easy | random | 1 | 250 | 78 | 1 |
samples/rAFhqEb/00001.png | rAFhqEb | 7 | easy | random | 1 | 299 | 69 | 1 |
samples/QUadpc/00001.png | QUadpc | 6 | easy | random | 1 | 217 | 70 | 1 |
samples/67NR9OD/00001.png | 67NR9OD | 7 | easy | random | 1 | 256 | 96 | 1 |
samples/2S33OY/00001.png | 2S33OY | 6 | easy | random | 1 | 277 | 72 | 1 |
samples/Ty1tFg/00001.png | Ty1tFg | 6 | easy | random | 1 | 175 | 59 | 1 |
samples/OxHdI/00001.png | OxHdI | 5 | easy | random | 1 | 177 | 91 | 1 |
samples/rx8hv/00001.png | rx8hv | 5 | easy | random | 1 | 252 | 67 | 1 |
samples/qftyO76/00001.png | qftyO76 | 7 | easy | random | 1 | 252 | 66 | 1 |
samples/FNv4/00001.png | FNv4 | 4 | easy | random | 1 | 285 | 76 | 1 |
samples/KrXE8ymS/00001.png | KrXE8ymS | 8 | easy | random | 1 | 162 | 91 | 1 |
samples/BHKQ/00001.png | BHKQ | 4 | easy | random | 1 | 257 | 72 | 1 |
samples/4J7IITZI/00001.png | 4J7IITZI | 8 | easy | random | 1 | 161 | 96 | 1 |
samples/w2K0/00001.png | w2K0 | 4 | easy | random | 1 | 175 | 78 | 1 |
samples/p9y8fF4/00001.png | p9y8fF4 | 7 | easy | random | 1 | 188 | 61 | 1 |
samples/fvr3/00001.png | fvr3 | 4 | easy | random | 1 | 269 | 54 | 1 |
samples/ZW6LD5lw/00001.png | ZW6LD5lw | 8 | easy | random | 1 | 180 | 65 | 1 |
samples/3Gr76/00001.png | 3Gr76 | 5 | easy | random | 1 | 272 | 63 | 1 |
samples/nN7K9eaE/00001.png | nN7K9eaE | 8 | easy | random | 1 | 165 | 73 | 1 |
samples/hnvzOPZh/00001.png | hnvzOPZh | 8 | easy | random | 1 | 199 | 92 | 1 |
samples/9WfrijJ/00001.png | 9WfrijJ | 7 | easy | random | 1 | 210 | 84 | 1 |
samples/z8unL/00001.png | z8unL | 5 | easy | random | 1 | 279 | 92 | 1 |
samples/RzDe/00001.png | RzDe | 4 | easy | random | 1 | 193 | 75 | 1 |
samples/rS9f/00001.png | rS9f | 4 | easy | random | 1 | 214 | 54 | 1 |
samples/ozrjeh/00001.png | ozrjeh | 6 | easy | random | 1 | 252 | 100 | 1 |
samples/spyCryO/00001.png | spyCryO | 7 | easy | random | 1 | 163 | 53 | 1 |
samples/gVMQf/00001.png | gVMQf | 5 | easy | random | 1 | 233 | 88 | 1 |
samples/6LAG/00001.png | 6LAG | 4 | easy | random | 1 | 254 | 64 | 1 |
samples/rsZxaCt/00001.png | rsZxaCt | 7 | easy | random | 1 | 280 | 100 | 1 |
samples/GcfEPW5/00001.png | GcfEPW5 | 7 | easy | random | 1 | 208 | 87 | 1 |
samples/KqN0cC/00001.png | KqN0cC | 6 | easy | random | 1 | 283 | 94 | 1 |
samples/Hqzc/00001.png | Hqzc | 4 | easy | random | 1 | 256 | 62 | 1 |
samples/h8Bz/00001.png | h8Bz | 4 | easy | random | 1 | 187 | 64 | 1 |
samples/o0IGGF/00001.png | o0IGGF | 6 | easy | random | 1 | 198 | 81 | 1 |
samples/R0BdKNuI/00001.png | R0BdKNuI | 8 | easy | random | 1 | 230 | 68 | 1 |
samples/PBTEG/00001.png | PBTEG | 5 | easy | random | 1 | 215 | 100 | 1 |
samples/GHivj7D/00001.png | GHivj7D | 7 | easy | random | 1 | 284 | 56 | 1 |
samples/Htqi4fp/00001.png | Htqi4fp | 7 | easy | random | 1 | 223 | 76 | 1 |
samples/3ew5U30/00001.png | 3ew5U30 | 7 | easy | random | 1 | 178 | 63 | 1 |
samples/2mlcq/00001.png | 2mlcq | 5 | easy | random | 1 | 177 | 91 | 1 |
samples/N8VByr/00001.png | N8VByr | 6 | easy | random | 1 | 254 | 58 | 1 |
samples/EmG7R88/00001.png | EmG7R88 | 7 | easy | random | 1 | 181 | 100 | 1 |
samples/WaJa/00001.png | WaJa | 4 | easy | random | 1 | 297 | 65 | 1 |
samples/PqahO9Zg/00001.png | PqahO9Zg | 8 | easy | random | 1 | 207 | 50 | 1 |
samples/dX9YFgM/00001.png | dX9YFgM | 7 | easy | random | 1 | 285 | 81 | 1 |
samples/h1SvoojM/00001.png | h1SvoojM | 8 | easy | random | 1 | 167 | 79 | 1 |
samples/aR5Q/00001.png | aR5Q | 4 | easy | random | 1 | 174 | 53 | 1 |
samples/3hTDOEJ/00001.png | 3hTDOEJ | 7 | easy | random | 1 | 247 | 64 | 1 |
samples/KdNKBk/00001.png | KdNKBk | 6 | easy | random | 1 | 181 | 78 | 1 |
samples/wphhQpTb/00001.png | wphhQpTb | 8 | easy | random | 1 | 277 | 65 | 1 |
samples/sTVPqO08/00001.png | sTVPqO08 | 8 | easy | random | 1 | 175 | 51 | 1 |
samples/t4ie/00001.png | t4ie | 4 | easy | random | 1 | 195 | 62 | 1 |
samples/pUAvivt/00001.png | pUAvivt | 7 | easy | random | 1 | 282 | 57 | 1 |
samples/2m1VVJ7/00001.png | 2m1VVJ7 | 7 | easy | random | 1 | 223 | 92 | 1 |
samples/J0rSKpF/00001.png | J0rSKpF | 7 | easy | random | 1 | 248 | 92 | 1 |
samples/gZpRx/00001.png | gZpRx | 5 | easy | random | 1 | 154 | 92 | 1 |
samples/vHDx939/00001.png | vHDx939 | 7 | easy | random | 1 | 161 | 84 | 1 |
samples/UoJQ2Pmz/00001.png | UoJQ2Pmz | 8 | easy | random | 1 | 296 | 87 | 1 |
samples/ii3idGQ/00001.png | ii3idGQ | 7 | easy | random | 1 | 272 | 64 | 1 |
samples/fbYKjN4/00001.png | fbYKjN4 | 7 | easy | random | 1 | 164 | 97 | 1 |
samples/wzUgn/00001.png | wzUgn | 5 | easy | random | 1 | 156 | 92 | 1 |
samples/wzi67n/00001.png | wzi67n | 6 | easy | random | 1 | 192 | 96 | 1 |
samples/CS0fJx9F/00001.png | CS0fJx9F | 8 | easy | random | 1 | 159 | 94 | 1 |
samples/khYAtn/00001.png | khYAtn | 6 | easy | random | 1 | 206 | 59 | 1 |
samples/3gl3F/00001.png | 3gl3F | 5 | easy | random | 1 | 251 | 61 | 1 |
samples/nClW/00001.png | nClW | 4 | easy | random | 1 | 262 | 74 | 1 |
samples/tNx4igIB/00001.png | tNx4igIB | 8 | easy | random | 1 | 260 | 68 | 1 |
samples/uvZoKk/00001.png | uvZoKk | 6 | easy | random | 1 | 255 | 100 | 1 |
samples/Tnv2xJ/00001.png | Tnv2xJ | 6 | easy | random | 1 | 215 | 75 | 1 |
samples/CzOUBpS/00001.png | CzOUBpS | 7 | easy | random | 1 | 245 | 50 | 1 |
samples/EwZNoI/00001.png | EwZNoI | 6 | easy | random | 1 | 180 | 54 | 1 |
samples/tdNof/00001.png | tdNof | 5 | easy | random | 1 | 203 | 58 | 1 |
samples/5cmSo/00001.png | 5cmSo | 5 | easy | random | 1 | 258 | 76 | 1 |
samples/UGxC60z/00001.png | UGxC60z | 7 | easy | random | 1 | 178 | 100 | 1 |
samples/i1OkaqQ/00001.png | i1OkaqQ | 7 | easy | random | 1 | 247 | 77 | 1 |
samples/BeV2/00001.png | BeV2 | 4 | easy | random | 1 | 229 | 84 | 1 |
samples/vqwNYB/00001.png | vqwNYB | 6 | easy | random | 1 | 256 | 94 | 1 |
samples/a3kMi/00001.png | a3kMi | 5 | easy | random | 1 | 226 | 99 | 1 |
samples/ssGWeYv/00001.png | ssGWeYv | 7 | easy | random | 1 | 159 | 74 | 1 |
samples/5bLV4MU/00001.png | 5bLV4MU | 7 | easy | random | 1 | 197 | 75 | 1 |
samples/iB4WRyfi/00001.png | iB4WRyfi | 8 | easy | random | 1 | 175 | 75 | 1 |
samples/hW71ldc/00001.png | hW71ldc | 7 | easy | random | 1 | 229 | 91 | 1 |
samples/DmXZF2/00001.png | DmXZF2 | 6 | easy | random | 1 | 164 | 70 | 1 |
samples/oTixM/00001.png | oTixM | 5 | easy | random | 1 | 155 | 67 | 1 |
samples/t6mDt/00001.png | t6mDt | 5 | easy | random | 1 | 179 | 73 | 1 |
samples/X7Ibcfm/00001.png | X7Ibcfm | 7 | easy | random | 1 | 272 | 66 | 1 |
samples/VCkcUfW/00001.png | VCkcUfW | 7 | easy | random | 1 | 218 | 61 | 1 |
samples/G9Q5QniH/00001.png | G9Q5QniH | 8 | easy | random | 1 | 297 | 61 | 1 |
samples/UXk4lO/00001.png | UXk4lO | 6 | easy | random | 1 | 282 | 52 | 1 |
samples/oF9zB/00001.png | oF9zB | 5 | easy | random | 1 | 290 | 61 | 1 |
samples/7vLmXCY/00001.png | 7vLmXCY | 7 | easy | random | 1 | 285 | 54 | 1 |
samples/BwDSSpNE/00001.png | BwDSSpNE | 8 | easy | random | 1 | 293 | 73 | 1 |
samples/OdKdqEO7/00001.png | OdKdqEO7 | 8 | easy | random | 1 | 283 | 61 | 1 |
samples/slCZ/00001.png | slCZ | 4 | easy | random | 1 | 237 | 99 | 1 |
samples/hVO5Gc/00001.png | hVO5Gc | 6 | easy | random | 1 | 227 | 98 | 1 |
samples/O3jEFCs4/00001.png | O3jEFCs4 | 8 | easy | random | 1 | 197 | 92 | 1 |
samples/ErDMwXN/00001.png | ErDMwXN | 7 | easy | random | 1 | 175 | 77 | 1 |
1.8 Million Synthetic Alphanumeric CAPTCHAs What's This All About?
Ever needed a big dataset of CAPTCHA images to train a solid recognition model but never managed to get your grubby hands on it? Me too. That's why I created this one.
This is a synthetic, artificially generated dataset of ~1.8 million CAPTCHA images. Every image includes a random, case-sensitive sequence of letters (a-z, A-Z) and numbers (0-9). It was specifically built for training and testing Optical Character Recognition (OCR) models, especially models like CRNN with a CTC loss function.
What Can You Do With It?
The snippet use here is Image-to-Text or Optical Character Recognition. You can utilize this dataset to:
Train a high-accuracy CAPTCHA solver from scratch.
Benchmark different OCR model architectures.
Experiment with data augmentation techniques on a big scale.
Pre-train a model on a more focused text recognition task.
How is the Data Organized?
The data set is straightforward. You will have two major files in the repository:
samples.zip: A big zip file containing all 1.8 million image files.
dataset.csv: Your gold key! One CSV file that links every image file to its correct text label. This is what you'll use to load the data.
What's in the Files? (The Data Fields)
The dataset.csv file has only a few columns, but you're really interested in just two of them:
filepath (string): The path to the image file as it was originally structured (e.g., samples/W9friJ/00001.png).
text (string): The actual text—the string of characters you see in the image (e.g., "W9friJ").
length (int): Text length.
(Ignore these): The width, height, and cycle columns were part of my generation process. You won't require them for training, as you will be resizing the images anyhow.
A Peek at the Data
Things to Keep in Mind (Limitations & Biases)
This data set is big but not perfect. Here are some qualifications:
It's Synthetic: These images are computer-generated. A model trained on this data will likely do well on similarly appearing synthetic CAPTCHAs but may need to be fine-tuned in order to work well on real-world CAPTCHAs from websites, which have different fonts, noise patterns, and distortions.
Relatively Clean: These CAPTCHAs are fairly straightforward. They do not possess dense occlusions, crazy background noise, or extreme character distorting.
English Alphanumeric Only: The characters are limited to a-z, A-Z, and 0-9. It will not support special characters or letters of other languages.
How Can You Use It? (License)
This data set is published under the Apache 2.0 license. This is an extremely permissive license which allows you to use, modify, and distribute the work for any purpose (private or commercial) with very few restrictions. Pretty much, go make something amazing with it!
How to Cite This Work
If you're using this dataset in your work or research, please don't hesitate to give credit by a citation. Here is a BibTeX entry you can use:
@misc{szilard_2024_captcha,
author = {Pálnagy Szilárd},
title = {1.8 Million Synthetic Alphanumeric CAPTCHAs},
year = {2024},
publisher = {Hugging Face},
journal = {Hugging Face Hub},
howpublished = {\\url{https://huggingface.co/datasets/szili2011/captcha-ocr-dataset}},
}
If you're using Google Colab to train models, use the code below:
# =============================================================================#
# Step 1: Install and Import Everything We Need
# =================================================================JavascriptAPIopled!
# First, let's get our environment ready by installing the Hugging Face libraries.
print("--> Step 1: Installing libraries.")
!pip install datasets -q
!pip install huggingface_hub -q
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import os
import shutil
from datasets import load_dataset
from huggingface_hub import hf_hub_download, login
from google.colab import drive
print("
--> Libraries installed and imported successfully!")
# =================================================================================
# Step 2: Connect to Hugging Face and Google Drive
# ==============================================================================
# We will authenticate to Hugging Face so that we are able to download your dataset.
# You will be prompted to insert a token, which you may retrieve from:
# https://huggingface.co/settings/tokens
print("
--> Step 2: Connecting to services.")
login()
# We'll also connect to Google Drive to store our model once it's trained.
drive.mount('/content/drive')
print("--> Services connected!")
# ==============================================================================
# Step 3: Load Your Dataset from the Hub
# ==============================================================================
# Here, we load your wonderful dataset directly from the Hugging Face Hub.
print("
--> Step 3: Loading the dataset.")
DATASET_REPO = "szili2011/captcha-ocr-dataset"
# We first load the metadata from your 'dataset.csv' file.
# This won't download the images yet, just the text information.
print(" - Loading metadata from dataset.csv.")
dataset_info = load_dataset(DATASET_REPO, data_files="dataset.csv")
# Now we download the huge 'samples.zip' file. This will take a while!
# hf_hub_download is smart and caches the file so you don't re-download it every time.
print(" - Downloading samples.zip (this may take a long time).")
zip_path = hf_hub_download(repo_id=DATASET_REPO, filename="samples.zip", repo_type="dataset")
# Unzip the images into Colab's fast local storage for fast access during training.
UNZIPPED_PATH = "/content/images"
print(f" - Unzipping images to {UNZIPPED_PATH}.")
if os.path.exists(UNZIPPED_PATH):
shutil.rmtree(UNZIPPED_PATH) # Cleaning up from previous runs
shutil.unpack_archive(zip_path, UNZIPPED_PATH, 'zip')
# The images are now at '/content/images/samples/'
# Now let's create a full path for each image in our dataset object.
SAMPLES_PATH = os.path.join(UNZIPPED_PATH, "samples")
def create_full_path(example):
# The 'filepath' from your CSV is 'samples/xyz/123.png'.
# We'll build the full, correct path to where we just unzipped the files.
example['image_path'] = os.path.join(SAMPLES_PATH, *example['filepath'].split('/')[1:])
return example
print(" - Creating full image paths.")
dataset = dataset_info.map(create_full_path, num_proc=4) # num_proc > 1 speeds this up
# Clean up columns we no longer need.
dataset = dataset.remove_columns(['filepath', 'width', 'height', 'cycle', 'complexity', 'generation_mode', 'sample_number'])
# Split the data into a training set (95%) and a validation set (5%)
dataset = dataset["train"].train_test_split(test_size=0.05, seed=42)
train_ds = dataset["train"]
val_ds = dataset["test"]
print(f"
--> Dataset is ready! Training samples: {len(train_ds)}, Validation samples: {len(val_ds)}")
# =============================================================================
# Step 4: Create the character vocabulary
# ===========================================================================
# A model can't read 'A', 'B', 'C'. It requires numbers.
# Here, we create a mapping from each character to a special number.
print("
--> Step 4: Creating character vocabulary.")
# Get all the unique characters in the training text
all_text = "".join(train_ds["text"])
characters = sorted(list(set(all_text)))
# The StringLookup layer does the heavy lifting of creating the mapping.
char_to_num = layers.StringLookup(vocabulary=list(characters), mask_token=None)
num_to_char = layers.StringLookup(vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True)
print(f" - Vocabulary size: {char_to_num.vocabulary_size()}")
print(f" - Vocabulary: {''.join(char_to_num.get_vocabulary())}")
# ==============================================================================
# Step 5: The Preprocessing Pipeline (tf.data)
# ==============================================================================
# This is our data assembly line. It will convert image paths and labels into
# ready-to-use tensors for our model.
print("
--> Step 5: Creating TensorFlow data pipeline.")
# Constant values for our images and batches
IMG_HEIGHT = 50
IMG_WIDTH = 200
BATCH_SIZE = 128
def encode_for_tf(example):
# 1. Read the image file from its path
img = tf.io.read_file(example['image_path'])
# 2. Decode it as a grayscale PNG (1 channel)
img = tf.io.decode_png(img, channels=1)
# 3. Convert pixels to the [0, 1] range
img = tf.image.convert_image_dtype(img, tf.float32)
# 4. Resize to a consistent size
img = tf.image.resize(img, [IMG_HEIGHT, IMG_WIDTH])
# 5. Transpose the image! This is a key step for CRNNs.
# We want the RNN to see a sequence of vertical slices of the image.
img = tf.transpose(img, perm=[1, 0, 2])
# 6. Convert the text label into a sequence of numbers according to our vocabulary
label_str = tf.cast(example['text'], tf.string)
label = char_to_num(tf.strings.unicode_split(label_str, input_encoding="UTF-8"))
return {"image": img, "label": label}
# Convert our Hugging Face Datasets to TensorFlow Datasets
# This pipeline will shuffle, batch, and prefetch data for maximum efficiency.
tf_train_dataset = train_ds.to_tf_dataset(
columns=['image_path', 'text'],
shuffle=True,
batch_size=BATCH_SIZE
).map(encode_for_tf, num_parallel_calls=tf.data.AUTOTUNE).prefetch(tf.data.AUTOTUNE)
tf_val_dataset = val_ds.to_tf_dataset(
columns=['image_path', 'text'],
shuffle=False,
batch_size=BATCH_SIZE
).map(encode_for_tf, num_parallel_calls=tf.data.AUTOTUNE).prefetch(tf.data.AUTOTUNE)
print("--> Data pipeline is built!")
# ===============================================================================
# Step 6: The Model Architecture (The Brains of the Operation)
# ===============================================================================
# Here's the CRNN model itself. It consists of three sections:
# 1. CNN (Convolutional) layers to "see" features in the image.
# 2. RNN (Recurrent) layers to "read" the sequence of features.
# 3. A final layer for CTC loss computation.
print("
--> Step 6: Building the CRNN model.")
class CTCLayer(layers.Layer):
"""A custom Keras layer to calculate the CTC loss."""
def __init__(self, name=None):
super().__init__(name=name)
self.loss_fn = keras.backend.ctc_batch_cost
def call(self, y_true, y_pred):
batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64")
input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64")
label_length = tf.cast(tf.shape(y_true)[1], dtype="int64")
input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64")
label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64")
loss = self.loss_fn(y_true, y_pred, input_length, label_length)
self.add_loss(loss)
return y_pred
def build_model():
# Inputs to the model
input_img = layers.Input(shape=(IMG_WIDTH, IMG_HEIGHT, 1), name="image", dtype="float32")
labels = layers.Input(name="label", shape=(None,), dtype="float32")
# Part 1: The CNN to extract visual features
x = layers.Conv2D(32, (3, 3), activation="relu", kernel_initializer="he_normal", padding="same")(input_img)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Conv2D(64, (3, 3), activation="relu", kernel_initializer="he_normal", padding="same")(x)
x = layers.MaxPooling2D((2, 2))(x)
# Reshape the output of the CNN to be a sequence for the RNN
new_shape = ((IMG_WIDTH // 4), (IMG_HEIGHT // 4) * 64)
x = layers.Reshape(target_shape=new_shape)(x)
x = layers.Dense(64, activation="relu")(x)
x = layers.Dropout(0.2)(x)
# Part 2: The RNN to read the sequence
x = layers.Bidirectional(layers.LSTM(128, return_sequences=True, dropout=0.25))(x)
x = layers.Bidirectional(layers.LSTM(64, return_sequences=True, dropout=0.25))(x)
# Part 3: The output layer
# The number of units is our vocabulary size + 1 (for the special CTC blank token)
x = layers.Dense(char_to_num.vocabulary_size() + 1, activation="softmax", name="dense_output")(x)
# Add the CTC loss calculation as a layer
output = CTCLayer(name="ctc_loss")(labels, x)
# Create the final model
model = keras.models.Model(inputs=[input_img, labels], outputs=output, name="captcha_ocr_model")
# Compile the model with the Adam optimizer
model.compile(optimizer=keras.optimizers.Adam())
return model
# Let's build it and see a summary
model = build_model()
model.summary()
# ==============================================================================
# Step 7: Train the Model!
# ==============================================================================
# This is the moment we've been waiting for. Let's start training.
print("
--> Step 7: Starting training.")
EPOCHS = 20 # You can increase this for best results
# This callback will save the best version of our model to your Google Drive
# It monitors the validation loss and only saves when it improves.
checkpoint_path = "/content/drive/My Drive/captcha_model_best.h5"
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
save_weights_only=False,
monitor='val_loss',
mode='min',
save_best_only=True,
verbose=1
)
# This callback will cause early stopping if the model doesn't improve early_stopping_callback = tf.keras.callbacks.EarlyStopping( monitor="val_loss", patience=5, restore_best_weights=True, verbose=1)
# Let's start!
history = model.fit( tf_train_dataset, validation_data=tf_val_dataset, epochs=EPOCHS, callbacks=[model_checkpoint_callback, early_stopping_callback],)
print("
--> Training complete! Best model saved to your Google Drive.")
# =============================================================================#
# Step 8: Check the Results (Inference)#
# =============================================================================
# Now that we have a trained model, let's attempt to check how well it does.
print("
--> Step 8: Checking some predictions.")
# First, we construct a prediction-only model from our trained model.
# This model takes an image and outputs the raw predictions.
prediction_model = keras.models.Model(
model.get_layer(name="image").input, model.get_layer(name="dense_output").output
)
# A helper function to decode the raw predictions into human-readable text
def decode_batch_predictions(pred):
input_len = np.ones(pred.shape[0]) * pred.shape[1]
# We perform a greedy search for simplicity. Beam search can provide more accurate results.
results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0]
output_text = []
for res in results:
# The [UNK] token is from our StringLookup layer, we can safely delete it.
res = tf.strings.reduce_join(num_to_char(res)).numpy().decode("utf-8").replace("[UNK]", "")
output_text.append(res)
return output_text
# Let's take one batch from our validation set and examine the predictions
for batch in tf_val_dataset.take(1):
batch_images = batch["image"]
batch_labels = batch["label"]
preds = prediction_model.predict(batch_images)
pred_texts = decode_batch_predictions(preds)
orig_texts = []
for label in batch_labels:
label = tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8").replace("[UNK]", "")
orig_texts.append(label)
for i in range(min(10, len(pred_texts))):
# Show the first 10 predictions
print(f" Original: {orig_texts[i]:<10} | Predicted: {pred_texts[i]}")
Good Luck!
- Downloads last month
- 116