车牌识别tensorflow源码_tensorflow车牌识别源码-程序员宅基地

技术标签: # 图像处理  tensorflow  # TensorFlow1.x  车牌识别  # 算法程序实现  lpr  

1 小序

1.0 环境

  • Ubuntu18.04
  • Tensorflow(cpu)1.12.0
  • python3.x
  • OpenCV4.0.0

1.2 项目目录

lpr_tensorflow
├── font
├── genplate.py
├── images
├── input_data.py
├── logs
├── model
├── network_model.py
├── NoPlates
├── plate_generate.py
├── __pycache__
└── train_model.py

其中font,images,NoPlates从项目获取https://github.com/szad670401/end-to-end-for-chinese-plate-recognition,genplate.py文件使用下面的即可.

1 生成车牌数据

1.0 生成图像函数

【Demo:genplate.py】

#coding=utf-8
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
import cv2
import numpy as np
import os
from math import *
from six.moves import xrange

index = {
    "京": 0, "沪": 1, "津": 2, "渝": 3, "冀": 4, "晋": 5, "蒙": 6, "辽": 7, "吉": 8, "黑": 9, "苏": 10, "浙": 11, "皖": 12,
         "闽": 13, "赣": 14, "鲁": 15, "豫": 16, "鄂": 17, "湘": 18, "粤": 19, "桂": 20, "琼": 21, "川": 22, "贵": 23, "云": 24,
         "藏": 25, "陕": 26, "甘": 27, "青": 28, "宁": 29, "新": 30, "0": 31, "1": 32, "2": 33, "3": 34, "4": 35, "5": 36,
         "6": 37, "7": 38, "8": 39, "9": 40, "A": 41, "B": 42, "C": 43, "D": 44, "E": 45, "F": 46, "G": 47, "H": 48,
         "J": 49, "K": 50, "L": 51, "M": 52, "N": 53, "P": 54, "Q": 55, "R": 56, "S": 57, "T": 58, "U": 59, "V": 60,
         "W": 61, "X": 62, "Y": 63, "Z": 64}

chars = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂",
             "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A",
             "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X",
             "Y", "Z"
             ]

def AddSmudginess(img, Smu):
    rows = r(Smu.shape[0] - 50)
    cols = r(Smu.shape[1] - 50)
    adder = Smu[rows:rows + 50, cols:cols + 50]
    adder = cv2.resize(adder, (50, 50))
    #   adder = cv2.bitwise_not(adder)
    img = cv2.resize(img,(50,50))
    img = cv2.bitwise_not(img)
    img = cv2.bitwise_and(adder, img)
    img = cv2.bitwise_not(img)
    return img

def rot(img,angel,shape,max_angel):
    size_o = [shape[1],shape[0]]
    size = (shape[1]+ int(shape[0]*cos((float(max_angel )/180) * 3.14)),shape[0])
    interval = abs( int( sin((float(angel) /180) * 3.14)* shape[0]))
    pts1 = np.float32([[0,0]         ,[0,size_o[1]],[size_o[0],0],[size_o[0],size_o[1]]])
    if(angel>0):
        pts2 = np.float32([[interval,0],[0,size[1]  ],[size[0],0  ],[size[0]-interval,size_o[1]]])
    else:
        pts2 = np.float32([[0,0],[interval,size[1]  ],[size[0]-interval,0  ],[size[0],size_o[1]]])
    M  = cv2.getPerspectiveTransform(pts1,pts2)
    dst = cv2.warpPerspective(img,M,size)

    return dst

def rotRandrom(img, factor, size):
    shape = size
    pts1 = np.float32([[0, 0], [0, shape[0]], [shape[1], 0], [shape[1], shape[0]]])
    pts2 = np.float32([[r(factor), r(factor)], [ r(factor), shape[0] - r(factor)], [shape[1] - r(factor),  r(factor)],
                       [shape[1] - r(factor), shape[0] - r(factor)]])
    M = cv2.getPerspectiveTransform(pts1, pts2)
    dst = cv2.warpPerspective(img, M, size)
    return dst

def tfactor(img):
    hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
    hsv[:,:,0] = hsv[:,:,0]*(0.8+ np.random.random()*0.2)
    hsv[:,:,1] = hsv[:,:,1]*(0.3+ np.random.random()*0.7)
    hsv[:,:,2] = hsv[:,:,2]*(0.2+ np.random.random()*0.8)
    img = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
    return img

def random_envirment(img,data_set):
    index=r(len(data_set))
    env = cv2.imread(data_set[index])
    env = cv2.resize(env,(img.shape[1],img.shape[0]))
    bak = (img==0)
    bak = bak.astype(np.uint8)*255
    inv = cv2.bitwise_and(bak,env)
    img = cv2.bitwise_or(inv,img)
    return img

def GenCh(f,val):
    img=Image.new("RGB", (45,70),(255,255,255))
    draw = ImageDraw.Draw(img)
    draw.text((0, 3),val,(0,0,0),font=f)
    img =  img.resize((23,70))
    A = np.array(img)

    return A
def GenCh1(f,val):
    img=Image.new("RGB", (23,70),(255,255,255))
    draw = ImageDraw.Draw(img)
    # draw.text((0, 2),val.decode('utf-8'),(0,0,0),font=f)
    draw.text((0, 2),val,(0,0,0),font=f)
    A = np.array(img)
    return A
def AddGauss(img, level):
    return cv2.blur(img, (level * 2 + 1, level * 2 + 1))

def r(val):
    return int(np.random.random() * val)

def AddNoiseSingleChannel(single):
    diff = 255-single.max()
    noise = np.random.normal(0,1+r(6),single.shape)
    noise = (noise - noise.min())/(noise.max()-noise.min())
    noise= diff*noise
    noise= noise.astype(np.uint8)
    dst = single + noise
    return dst

def addNoise(img,sdev = 0.5,avg=10):
    img[:,:,0] =  AddNoiseSingleChannel(img[:,:,0])
    img[:,:,1] =  AddNoiseSingleChannel(img[:,:,1])
    img[:,:,2] =  AddNoiseSingleChannel(img[:,:,2])
    return img

class GenPlate:
    def __init__(self,fontCh,fontEng,NoPlates):
        self.fontC =  ImageFont.truetype(fontCh,43,0)
        self.fontE =  ImageFont.truetype(fontEng,60,0)
        self.img=np.array(Image.new("RGB", (226,70),(255,255,255)))
        self.bg  = cv2.resize(cv2.imread("./images/template.bmp"),(226,70))
        self.smu = cv2.imread("./images/smu2.jpg")
        self.noplates_path = []
        for parent,parent_folder,filenames in os.walk(NoPlates):
            for filename in filenames:
                path = parent+"/"+filename
                self.noplates_path.append(path)
    def draw(self,val):
        offset= 2
        self.img[0:70,offset+8:offset+8+23]= GenCh(self.fontC,val[0])
        self.img[0:70,offset+8+23+6:offset+8+23+6+23]= GenCh1(self.fontE,val[1])
        for i in range(5):
            base = offset+8+23+6+23+17 +i*23 + i*6 
            self.img[0:70, base  : base+23]= GenCh1(self.fontE,val[i+2])
        return self.img
    def generate(self,text):
        if len(text) == 7:
            # fg = self.draw(text.decode(encoding="utf-8"))
            fg = self.draw(text)
            # print("fg: {}".format(fg))
            fg = cv2.bitwise_not(fg)
            com = cv2.bitwise_or(fg,self.bg)
            com = rot(com,r(60)-30,com.shape,30)
            com = rotRandrom(com,10,(com.shape[1],com.shape[0]))
            com = tfactor(com)
            com = random_envirment(com,self.noplates_path)
            com = AddGauss(com, 1+r(4))
            com = addNoise(com)

            return com

    def genPlateString(self,pos,val):
        plateStr = ""
        box = [0,0,0,0,0,0,0]
        if(pos!=-1):
            box[pos]=1
        for unit,cpos in zip(box,xrange(len(box))):
            if unit == 1:
                plateStr += val
            else:
                if cpos == 0:
                    plateStr += chars[r(31)]
                elif cpos == 1:
                    plateStr += chars[41+r(24)]
                else:
                    plateStr += chars[31 + r(34)]
        return plateStr

    def genBatch(self, batchSize,pos,charRange, outputPath,size):
        if (not os.path.exists(outputPath)):
            os.mkdir(outputPath)
        l_plateStr = []
        l_plateImg = []
        for i in range(batchSize):
            plateStr = G.genPlateString(-1,-1)
            # print("plate string: {}".format(plateStr))
            # print("length of plate: {}".format(len(plateStr)))
            # print("type of plate: {}".format(type(plateStr)))
            img =  G.generate(plateStr)
            img = cv2.resize(img,size)
            filename = os.path.join(outputPath, str(i).zfill(4) + '.' + plateStr + ".jpg")
            # cv2.imwrite(filename, img)
            cv2.imwrite(filename, img)
            l_plateStr.append(plateStr)
            l_plateImg.append(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        return l_plateStr, l_plateImg

1.2 生成训练数据

【Demo:input_data.py】

import numpy as np
import cv2
from genplate import *

#产生用于训练的数据
class OCRIter():
    def __init__(self,batch_size,height,width):
        super(OCRIter, self).__init__()
        self.genplate = GenPlate("./font/platech.ttf",'./font/platechar.ttf','./NoPlates')
        self.batch_size = batch_size
        self.height = height
        self.width = width
    def iter(self):
        data = []
        label = []
        for i in range(self.batch_size):
            num, img = gen_sample(self.genplate, self.width, self.height)
            data.append(img)
            label.append(num)
        data_all = data
        label_all = label
        return data_all,label_all   
def rand_range(lo,hi):
    return lo+r(hi-lo)
def gen_rand():
    name = ""
    label=[]
    label.append(rand_range(0,31))  #产生车牌开头32个省的标签
    label.append(rand_range(41,65)) #产生车牌第二个字母的标签
    for i in range(5):
        label.append(rand_range(31,65)) #产生车牌后续5个字母的标签
    name+=chars[label[0]]
    name+=chars[label[1]]
    for i in range(5):
        name+=chars[label[i+2]]
    return name,label

def gen_sample(genplate, width, height):
    num,label =gen_rand()
    img = genplate.generate(num)
    # print("img: {}".format(img))
    img = cv2.resize(img,(width,height))
    img = np.multiply(img,1/255.0) #[height,width,channel]
    #img = img.transpose(2,0,1)
    #img = img.transpose(1,0,2)
    return label,img        #返回的label为标签,img为深度为3的图像像素

2 搭建神经网络

在这里插入图片描述

图2.1 神经网络结构

【Demo:network_model.py】

import tensorflow as tf 

def init_weights_biases(name_w, name_b, shape):
    weights = tf.get_variable(name=name_w, shape=shape, dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1))
    biases = tf.get_variable(name=name_b, shape=[shape[-1]], dtype=tf.float32, initializer=tf.constant_initializer(0.1))
    return weights, biases

def conv2d(input_tensor, ksize, strides, pad, name_w, name_b):
    weights = tf.get_variable(name=name_w, shape=ksize, dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1))
    biases = tf.get_variable(name=name_b, shape=[ksize[-1]], dtype=tf.float32, initializer=tf.constant_initializer(0.1))
    conv = tf.nn.conv2d(input_tensor, weights, strides=strides, padding=pad)
    conv = tf.nn.relu(conv + biases)
    return conv

def max_pooling(input_tensor, ksize, strides, pad):
    max_pool = tf.nn.max_pool(input_tensor, ksize=ksize, strides=strides, padding=pad)   
    return max_pool 

def fullc(input_tensor, wsize, name_w, name_b):
    weights = tf.get_variable(name=name_w, shape=wsize, dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1))
    biases = tf.get_variable(name=name_b, shape=[wsize[-1]], dtype=tf.float32, initializer=tf.constant_initializer(0.1))
    fullc = tf.matmul(input_tensor, weights) + biases
    return fullc

def small_basic_block(input_tensor, ksize, strides, pad):
    conv_s1 = conv2d(input_tensor, [ksize[0],ksize[1],ksize[2],ksize[2]/4], strides, pad, "a", "b")
    conv_s2 = conv2d(conv_s1, [3,1,ksize[2]/4,ksize[2]/4], strides, pad, "c", "d")
    conv_s3 = conv2d(conv_s2, [1,3,ksize[2]/4,ksize[2]/4], strides, pad, "e", "f")
    conv_s4 = conv2d(conv_s3 , [ksize[0],ksize[1],ksize[2]/4,ksize[2]], strides, pad, "g", "h")
    return conv_s4


def inference(inputs, keep_prob):
    with tf.name_scope("conv_1"):
        '''output data:[batch_size, 36, 136, 64]'''
        conv_1 = conv2d(inputs, [3,3,3,32], [1,1,1,1], "VALID", "cw_1", "cb_1")

    with tf.name_scope("conv_2"):
        '''output data:[batch_size, 36, 136, 64]'''
        conv_2 = conv2d(conv_1, [3,3,32,32], [1,1,1,1], "VALID", "cw_2", "cb_2")

    with tf.name_scope("max_pool_1"):
        '''output data:[batch_size, 36, 136, 64]'''
        pooling_1 = max_pooling(conv_2, [1,2,2,1], [1,2,2,1], "VALID")

    with tf.name_scope("conv_3"):
        '''output data:[batch_size, 36, 136, 64]'''
        conv_3 = conv2d(pooling_1, [3,3,32,64], [1,1,1,1], "VALID", "cw_3", "cb_3")

    with tf.name_scope("conv_4"):
        '''output data:[batch_size, 36, 136, 64]'''
        conv_4 = conv2d(conv_3, [3,3,64,64], [1,1,1,1], "VALID", "cw_4", "cb_4")

    with tf.name_scope("max_pool_2"):
        '''output data:[batch_size, 36, 136, 64]'''
        pooling_2 = max_pooling(conv_4, [1,2,2,1], [1,2,2,1], "VALID")

    with tf.name_scope("conv_5"):
        '''output data:[batch_size, 36, 136, 64]'''
        conv_5 = conv2d(pooling_2, [3,3,64,128], [1,1,1,1], "VALID", "cw_5", "cb_5")

    with tf.name_scope("conv_6"):
        '''output data:[batch_size, 36, 136, 64]'''
        conv_6 = conv2d(conv_5, [3,3,128, 128], [1,1,1,1], "VALID", "cw_6", "cb_6")

    with tf.name_scope("max_pool_3"):
        '''output data:[batch_size, 36, 136, 64]'''
        pooling_3 = max_pooling(conv_6, [1,2,2,1], [1,2,2,1], "VALID")

    with tf.name_scope("fullc_1"):
        output_shape = pooling_3.get_shape()
        flatten_1 = output_shape[1].value*output_shape[2].value*output_shape[3].value
        reshape_output = tf.reshape(pooling_3, [-1, flatten_1])
        fc_1 = tf.nn.dropout(reshape_output, keep_prob)

    with tf.name_scope("fullc_21"):
        # flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
        flatten = reshape_output.get_shape()[-1].value
        fc_21 = fullc(fc_1, [flatten, 65], "fw2_1", "fb2_1")

    with tf.name_scope("fullc_22"):
        # flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
        flatten = reshape_output.get_shape()[-1].value
        fc_22 = fullc(fc_1, [flatten, 65], "fw2_2", "fb2_2")

    with tf.name_scope("fullc_23"):
        # flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
        flatten = reshape_output.get_shape()[-1].value
        fc_23 = fullc(fc_1, [flatten, 65], "fw2_3", "fb2_3")

    with tf.name_scope("fullc_24"):
        # flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
        flatten = reshape_output.get_shape()[-1].value
        fc_24 = fullc(fc_1, [flatten, 65], "fw2_4", "fb2_4")

    with tf.name_scope("fullc_25"):
        # flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
        flatten = reshape_output.get_shape()[-1].value
        fc_25 = fullc(fc_1, [flatten, 65], "fw2_5", "fb2_5")

    with tf.name_scope("fullc_26"):
        # flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
        flatten = reshape_output.get_shape()[-1].value
        fc_26 = fullc(fc_1, [flatten, 65], "fw2_6", "fb2_6")

    with tf.name_scope("fullc_27"):
        # flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
        flatten = reshape_output.get_shape()[-1].value
        fc_27 = fullc(fc_1, [flatten, 65], "fw2_7", "fb2_7")
    return fc_21, fc_22, fc_23, fc_24, fc_25, fc_26, fc_27

def losses(logits_1, logits_2, logits_3, logits_4,logits_5, logits_6, logits_7, labels):
    labels = tf.convert_to_tensor(labels, tf.int32)
    with tf.name_scope("loss_1"):
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_1, labels=labels[:,0])
        loss_1 = tf.reduce_mean(cross_entropy)
        tf.summary.scalar("loss_1", loss_1)
    
    with tf.name_scope("loss_2"):
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_2, labels=labels[:,1])
        loss_2 = tf.reduce_mean(cross_entropy)
        tf.summary.scalar("loss_2", loss_2)
    
    with tf.name_scope("loss_3"):
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_3, labels=labels[:,2])
        loss_3 = tf.reduce_mean(cross_entropy)
        tf.summary.scalar("loss_3", loss_3)

    with tf.name_scope("loss_4"):
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_4, labels=labels[:,3])
        loss_4 = tf.reduce_mean(cross_entropy)
        tf.summary.scalar("loss_4", loss_4)
    
    with tf.name_scope("loss_5"):
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_5, labels=labels[:,4])
        loss_5 = tf.reduce_mean(cross_entropy)
        tf.summary.scalar("loss_5", loss_5)

    with tf.name_scope("loss_6"):
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_6, labels=labels[:,5])
        loss_6 = tf.reduce_mean(cross_entropy)
        tf.summary.scalar("loss_6", loss_6)

    with tf.name_scope("loss_7"):
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_7, labels=labels[:,6])
        loss_7 = tf.reduce_mean(cross_entropy)
        tf.summary.scalar("loss_7", loss_7)
    return loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7

def train(loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, learning_rate):
    with tf.name_scope("optimizer_1"):
        train_op_1 = tf.train.AdamOptimizer(learning_rate).minimize(loss_1)
    
    with tf.name_scope("optimizer_2"):
        train_op_2 = tf.train.AdamOptimizer(learning_rate).minimize(loss_2)

    with tf.name_scope("optimizer_3"):
        train_op_3 = tf.train.AdamOptimizer(learning_rate).minimize(loss_3)

    with tf.name_scope("optimizer_4"):
        train_op_4 = tf.train.AdamOptimizer(learning_rate).minimize(loss_4)

    with tf.name_scope("optimizer_5"):
        train_op_5 = tf.train.AdamOptimizer(learning_rate).minimize(loss_5)

    with tf.name_scope("optimizer_6"):
        train_op_6 = tf.train.AdamOptimizer(learning_rate).minimize(loss_6)

    with tf.name_scope("optimizer_7"):
        train_op_7 = tf.train.AdamOptimizer(learning_rate).minimize(loss_7)

    return train_op_1, train_op_2, train_op_3, train_op_4, train_op_5, train_op_6, train_op_7

def evaluation(logits_1, logits_2, logits_3, logits_4,logits_5, logits_6, logits_7, labels):
    '''shape:(8,65)'''
    # print("shape of logits_1: {}".format(logits_1.shape))
    '''shape all logits:(7,8,65)'''
    '''shape: (56, 65)'''
    logits_all = tf.concat([logits_1, logits_2, logits_3, logits_4,logits_5, logits_6, logits_7], 0)
    # print("shape of logits all: {}".format(logits_all.shape))

    '''shape: (8,7)'''
    labels = tf.convert_to_tensor(labels, tf.int32)

    '''shape: (56, 1)'''
    labels_all = tf.reshape(tf.transpose(labels), [-1])
    # print("shape of labels all: {}".format(labels_all.shape))
    with tf.name_scope("accuracy"):
        correct = tf.nn.in_top_k(logits_all, labels_all, 1)
        correct = tf.cast(correct, tf.float16)
        accuracy = tf.reduce_mean(correct)
        tf.summary.scalar("accuracy", accuracy)
    return accuracy

3 训练神经网络

【Demo:train_model.py】

import tensorflow as tf 
import network_model 
from input_data import OCRIter
import numpy as np
import os

batch_size = 8
image_h = 72
image_w = 272
learning_rate = 0.0001
count = 30000
num_label = 7
channels = 3

LOG_DIR = "./logs"
if not os.path.exists(LOG_DIR):
    os.makedirs(LOG_DIR)

with tf.name_scope("source_data"):
    '''output data:[batch_size, 36, 136, 3]
    [batch_size, height, width, channels]
    '''
    inputs = tf.placeholder(tf.float32, [batch_size, image_h, image_w, channels], name="inputs")
    labels = tf.placeholder(tf.int32, [batch_size, num_label], name="labels")
    keep_prob = tf.placeholder(tf.float32)

def get_batch():
    data_batch = OCRIter(batch_size, image_h, image_w)
    image_batch, label_batch = data_batch.iter()
    image_batch = np.array(image_batch)
    label_batch = np.array(label_batch)
    return image_batch, label_batch 

train_logits_1, train_logits_2, train_logits_3, train_logits_4, \
    train_logits_5, train_logits_6, train_logits_7 = network_model.inference(inputs, keep_prob)

train_loss_1, train_loss_2, train_loss_3, train_loss_4, \
    train_loss_5, train_loss_6, train_loss_7 = network_model.losses(train_logits_1, train_logits_2, train_logits_3, train_logits_4, \
        train_logits_5, train_logits_6, train_logits_7, labels)

train_op_1, train_op_2, train_op_3, train_op_4, \
    train_op_5, train_op_6, train_op_7 = network_model.train(train_loss_1, train_loss_2, train_loss_3, train_loss_4, \
        train_loss_5, train_loss_6, train_loss_7, learning_rate)

train_acc = network_model.evaluation(train_logits_1, train_logits_2, train_logits_3, train_logits_4, \
    train_logits_5, train_logits_6, train_logits_7, labels)


summary_op = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES))
# summary_op = tf.summary.merge_all(tf.get_collection(tf.GraphKeys.SUMMARIES))

if __name__ == "__main__":
    saver = tf.train.Saver()
    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        summary_writer = tf.summary.FileWriter(LOG_DIR, sess.graph)
        # x_batch, y_batch = get_batch()
        # print("data:{}, label: {}".format(x_batch, y_batch))
        for step in range(count):
            x_batch, y_batch = get_batch()
            # print("data:{}, label: {}".format(type(x_batch), type(y_batch)))
            feed_dict = {
    inputs: x_batch, labels: y_batch, keep_prob: 0.5}
            _, _, _, _, _, _, _, loss_1, loss_2, loss_3, loss_4, \
                loss_5, loss_6, loss_7, acc, summary = sess.run([train_op_1,\
                    train_op_2, train_op_3, train_op_4, train_op_5, train_op_6, train_op_7,\
                        train_loss_1, train_loss_2, train_loss_3, train_loss_4, \
                            train_loss_5, train_loss_6, train_loss_7, train_acc, summary_op], \
                                feed_dict=feed_dict)
            
            loss_all = loss_1 + loss_2 + loss_3 + loss_4 + \
                loss_5 + loss_6 + loss_7
            ckpt_dir = "./model/lpr.ckpt"
            if not os.path.exists("./model"):
                os.makedirs("./model")
            
            if step % 10 == 0:
                print("loss1:{}, loss2:{}, loss3:{}, loss4:{}, loss5:{}, loss6:{}, loss7: {}".format(loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7))
                print("Total loss: {}, accuracy: {}".format(loss_all, acc))
                
            if step % 1000 == 0 or (step+1) == count:
                saver.save(sess, save_path=ckpt_dir, global_step=step)
            summary_writer.add_summary(summary, step)
        coord.request_stop()
        coord.join(threads)
    summary_writer.close()

4 总结

(1) 车牌长度:python2中为9,python3中为7;
(2) 代码解析后续更新;


【参考文献】
[1]https://blog.csdn.net/ssmixi/article/details/78220039
[2]https://blog.csdn.net/ssmixi/article/details/78223907
[3]https://cloud.tencent.com/developer/article/1005199
[4]https://ypwhs.github.io/captcha/


版权声明:本文为博主原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。
本文链接:https://blog.csdn.net/Xin_101/article/details/89945614

智能推荐

什么是内部类?成员内部类、静态内部类、局部内部类和匿名内部类的区别及作用?_成员内部类和局部内部类的区别-程序员宅基地

文章浏览阅读3.4k次,点赞8次,收藏42次。一、什么是内部类?or 内部类的概念内部类是定义在另一个类中的类;下面类TestB是类TestA的内部类。即内部类对象引用了实例化该内部对象的外围类对象。public class TestA{ class TestB {}}二、 为什么需要内部类?or 内部类有什么作用?1、 内部类方法可以访问该类定义所在的作用域中的数据,包括私有数据。2、内部类可以对同一个包中的其他类隐藏起来。3、 当想要定义一个回调函数且不想编写大量代码时,使用匿名内部类比较便捷。三、 内部类的分类成员内部_成员内部类和局部内部类的区别

分布式系统_分布式系统运维工具-程序员宅基地

文章浏览阅读118次。分布式系统要求拆分分布式思想的实质搭配要求分布式系统要求按照某些特定的规则将项目进行拆分。如果将一个项目的所有模板功能都写到一起,当某个模块出现问题时将直接导致整个服务器出现问题。拆分按照业务拆分为不同的服务器,有效的降低系统架构的耦合性在业务拆分的基础上可按照代码层级进行拆分(view、controller、service、pojo)分布式思想的实质分布式思想的实质是为了系统的..._分布式系统运维工具

用Exce分析l数据极简入门_exce l趋势分析数据量-程序员宅基地

文章浏览阅读174次。1.数据源准备2.数据处理step1:数据表处理应用函数:①VLOOKUP函数; ② CONCATENATE函数终表:step2:数据透视表统计分析(1) 透视表汇总不同渠道用户数, 金额(2)透视表汇总不同日期购买用户数,金额(3)透视表汇总不同用户购买订单数,金额step3:讲第二步结果可视化, 比如, 柱形图(1)不同渠道用户数, 金额(2)不同日期..._exce l趋势分析数据量

宁盾堡垒机双因素认证方案_horizon宁盾双因素配置-程序员宅基地

文章浏览阅读3.3k次。堡垒机可以为企业实现服务器、网络设备、数据库、安全设备等的集中管控和安全可靠运行,帮助IT运维人员提高工作效率。通俗来说,就是用来控制哪些人可以登录哪些资产(事先防范和事中控制),以及录像记录登录资产后做了什么事情(事后溯源)。由于堡垒机内部保存着企业所有的设备资产和权限关系,是企业内部信息安全的重要一环。但目前出现的以下问题产生了很大安全隐患:密码设置过于简单,容易被暴力破解;为方便记忆,设置统一的密码,一旦单点被破,极易引发全面危机。在单一的静态密码验证机制下,登录密码是堡垒机安全的唯一_horizon宁盾双因素配置

谷歌浏览器安装(Win、Linux、离线安装)_chrome linux debian离线安装依赖-程序员宅基地

文章浏览阅读7.7k次,点赞4次,收藏16次。Chrome作为一款挺不错的浏览器,其有着诸多的优良特性,并且支持跨平台。其支持(Windows、Linux、Mac OS X、BSD、Android),在绝大多数情况下,其的安装都很简单,但有时会由于网络原因,无法安装,所以在这里总结下Chrome的安装。Windows下的安装:在线安装:离线安装:Linux下的安装:在线安装:离线安装:..._chrome linux debian离线安装依赖

烤仔TVの尚书房 | 逃离北上广?不如押宝越南“北上广”-程序员宅基地

文章浏览阅读153次。中国发达城市榜单每天都在刷新,但无非是北上广轮流坐庄。北京拥有最顶尖的文化资源,上海是“摩登”的国际化大都市,广州是活力四射的千年商都。GDP和发展潜力是衡量城市的数字指...

随便推点

java spark的使用和配置_使用java调用spark注册进去的程序-程序员宅基地

文章浏览阅读3.3k次。前言spark在java使用比较少,多是scala的用法,我这里介绍一下我在项目中使用的代码配置详细算法的使用请点击我主页列表查看版本jar版本说明spark3.0.1scala2.12这个版本注意和spark版本对应,只是为了引jar包springboot版本2.3.2.RELEASEmaven<!-- spark --> <dependency> <gro_使用java调用spark注册进去的程序

汽车零部件开发工具巨头V公司全套bootloader中UDS协议栈源代码,自己完成底层外设驱动开发后,集成即可使用_uds协议栈 源代码-程序员宅基地

文章浏览阅读4.8k次。汽车零部件开发工具巨头V公司全套bootloader中UDS协议栈源代码,自己完成底层外设驱动开发后,集成即可使用,代码精简高效,大厂出品有量产保证。:139800617636213023darcy169_uds协议栈 源代码

AUTOSAR基础篇之OS(下)_autosar 定义了 5 种多核支持类型-程序员宅基地

文章浏览阅读4.6k次,点赞20次,收藏148次。AUTOSAR基础篇之OS(下)前言首先,请问大家几个小小的问题,你清楚:你知道多核OS在什么场景下使用吗?多核系统OS又是如何协同启动或者关闭的呢?AUTOSAR OS存在哪些功能安全等方面的要求呢?多核OS之间的启动关闭与单核相比又存在哪些异同呢?。。。。。。今天,我们来一起探索并回答这些问题。为了便于大家理解,以下是本文的主题大纲:[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-JCXrdI0k-1636287756923)(https://gite_autosar 定义了 5 种多核支持类型

VS报错无法打开自己写的头文件_vs2013打不开自己定义的头文件-程序员宅基地

文章浏览阅读2.2k次,点赞6次,收藏14次。原因:自己写的头文件没有被加入到方案的包含目录中去,无法被检索到,也就无法打开。将自己写的头文件都放入header files。然后在VS界面上,右键方案名,点击属性。将自己头文件夹的目录添加进去。_vs2013打不开自己定义的头文件

【Redis】Redis基础命令集详解_redis命令-程序员宅基地

文章浏览阅读3.3w次,点赞80次,收藏342次。此时,可以将系统中所有用户的 Session 数据全部保存到 Redis 中,用户在提交新的请求后,系统先从Redis 中查找相应的Session 数据,如果存在,则再进行相关操作,否则跳转到登录页面。此时,可以将系统中所有用户的 Session 数据全部保存到 Redis 中,用户在提交新的请求后,系统先从Redis 中查找相应的Session 数据,如果存在,则再进行相关操作,否则跳转到登录页面。当数据量很大时,count 的数量的指定可能会不起作用,Redis 会自动调整每次的遍历数目。_redis命令

URP渲染管线简介-程序员宅基地

文章浏览阅读449次,点赞3次,收藏3次。URP的设计目标是在保持高性能的同时,提供更多的渲染功能和自定义选项。与普通项目相比,会多出Presets文件夹,里面包含着一些设置,包括本色,声音,法线,贴图等设置。全局只有主光源和附加光源,主光源只支持平行光,附加光源数量有限制,主光源和附加光源在一次Pass中可以一起着色。URP:全局只有主光源和附加光源,主光源只支持平行光,附加光源数量有限制,一次Pass可以计算多个光源。可编程渲染管线:渲染策略是可以供程序员定制的,可以定制的有:光照计算和光源,深度测试,摄像机光照烘焙,后期处理策略等等。_urp渲染管线

推荐文章

热门文章

相关标签