import os
import tensorflow as tf
import numpy as np
import cv2
import matplotlib.pyplot as plt
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
Class_Nums = 5 # 共有 5 个种类
Sample_Nums = 90 # 每类取 10 个样本,共 50 个样本
def load_images():
img_list = []
for i in range(Class_Nums):
path = 'F:\Python WorkSpace\FaceRecognize\\train\%d\\' % (i+1)
for j in range(Sample_Nums):
file_name = '%03d.jpg' % (j+1)
file = path + file_name
image = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
img_list.append(image)
return img_list
def get_accuracy(logits, targets):
batch_prediction = np.argmax(logits, axis=1)
num_correct = np.sum(np.equal(batch_prediction, targets))
return 100.* num_correct / batch_prediction.shape[0]
# 以下代码用于实现卷积网络
def weight_init(shape, name):
return tf.get_variable(name, shape, initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))
def bias_init(shape, name):
return tf.get_variable(name, shape, initializer=tf.constant_initializer(0.0))
def conv2d(input_data, conv_w):
return tf.nn.conv2d(input_data, conv_w, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(input_data, size):
return tf.nn.max_pool(input_data, ksize=[1, size, size, 1], strides=[1, size, size, 1], padding='SAME')
def conv_net(input_data, name):
with tf.name_scope('conv1'):
w_conv1 = weight_init([3, 3, 1, 8], 'conv1_w') # 卷积核大小是 3*3 输入是 1 通道,输出为 8 通道,即提取8特征
b_conv1 = bias_init([8], 'conv1_b')
h_conv1 = tf.nn.relu(tf.nn.bias_add(conv2d(input_data, w_conv1), b_conv1))
bn1 = tf.contrib.layers.batch_norm(h_conv1)
h_pool1 = max_pool(bn1, 2)
with tf.name_scope('conv2'):
w_conv2 = weight_init([5, 5, 8, 8], 'conv2_w') # 卷积核大小是 5*5 输入是64,输出为 32
b_conv2 = bias_init([8], 'conv2_b')
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
bn2 = tf.contrib.layers.batch_norm(h_conv2)
h_pool2 = max_pool(bn2, 2)
with tf.name_scope('conv3'):
w_conv3 = weight_init([5, 5, 8, 8], 'conv3_w') # 卷积核大小是 5*5 输入是8,输出为 8
b_conv3 = bias_init([8], 'conv3_b')
h_conv3 = tf.nn.relu(conv2d(h_pool2, w_conv3) + b_conv3)
bn3 = tf.contrib.layers.batch_norm(h_conv3)
h_pool3 = max_pool(bn3, 2)
with tf.name_scope('fc1'):
w_fc1 = weight_init([23 * 23 * 8, 120], 'fc1_w') # 三层卷积后得到的图像大小为 22 * 22,共 50 个样本
b_fc1 = bias_init([120], 'fc1_b')
h_fc1 = tf.nn.relu(tf.matmul(tf.reshape(h_pool3, [-1, 23 * 23 * 8]), w_fc1) + b_fc1)
with tf.name_scope('fc2'):
w_fc2 = weight_init([120, Class_Nums], 'fc2_w') # 将 130 个特征映射到 26 个类别上
b_fc2 = bias_init([Class_Nums], 'fc2_b')
h_fc2 = tf.nn.softmax(tf.matmul(h_fc1, w_fc2) + b_fc2)
return h_fc2
# 生成 tfrecord 文件
def gen_tfrecord(path):
tf_writer = tf.python_io.TFRecordWriter(path)
for i in range(Class_Nums):
path = 'F:\Python WorkSpace\FaceRecognize\\train\%d\\' % (i + 1)
label_data = i
for j in range(Sample_Nums):
file_name = '%03d.jpg' % (j + 1)
file = path + file_name
image = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
image_bytes= image.tostring()
height = image.shape[0]
width = image.shape[1]
channels = 1
example = tf.train.Example()
feature = example.features.feature
feature['height'].int64_list.value.append(height)
feature['width'].int64_list.value.append(width)
feature['channels'].int64_list.value.append(channels)
feature['image_data'].bytes_list.value.append(image_bytes)
feature['label'].int64_list.value.append(label_data)
tf_writer.write(example.SerializeToString())
tf_writer.close()
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64),
'image_data':tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64)
}
)
image = tf.decode_raw(features['image_data'], tf.uint8)
image = tf.reshape(image, [180, 180, 1])
image = tf.cast(image, tf.float32)
label = tf.cast(features['label'], tf.int32)
return image, label
# 该函数用于统计 TFRecord 文件中的样本数量(总数)
def total_sample(file_name):
sample_nums = 0
for record in tf.python_io.tf_record_iterator(file_name):
sample_nums += 1
return sample_nums
def train_data():
batch_size = 10
batch_num = total_sample('Faces.tfrecord') / batch_size
filename_queue = tf.train.string_input_producer(['Faces.tfrecord'], shuffle=False)
image, label = read_and_decode(filename_queue)
image_train, label_train = tf.train.batch([image, label], batch_size=batch_size, num_threads=1, capacity=32)
#image_train, label_train = tf.train.shuffle_batch([image, label], batch_size, num_threads=1, capacity=5+batch_size*3, min_after_dequeue=5)
train_labels_one_hot = tf.one_hot(label_train, 5, on_value=1.0, off_value=0.0)
x_data = tf.placeholder(tf.float32, shape=[None, 180, 180, 1], name='x_data')
y_target = tf.placeholder(tf.float32, shape=[None, Class_Nums], name='label')
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False, dtype=tf.int32)
model_output = conv_net(x_data, name = "model_output")
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_target, logits=model_output))
optimizer = tf.train.AdamOptimizer(10e-5).minimize(loss, global_step=global_step)
train_correct_prediction = tf.equal(tf.argmax(model_output, 1), tf.argmax(y_target, 1))
train_accuracy = tf.reduce_mean(tf.cast(train_correct_prediction, tf.float32))
tf.summary.scalar('loss', loss)
tf.summary.scalar('acc', train_accuracy)
merge = tf.summary.merge_all()
init = tf.global_variables_initializer()
config = tf.ConfigProto(allow_soft_placement = True) # 如果指定设备不存在,允许TF自动分配设备
config.gpu_options.per_process_gpu_memory_fraction = 0.4 # 占用GPU40%的显存
session = tf.Session(config=config)
writer = tf.summary.FileWriter("./log", session.graph)
saver = tf.train.Saver(max_to_keep=4) # 保存训练的参数模型
tf.add_to_collection("predict", model_output)
session.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=session, coord=coord)
loss_list = []
acc_list = []
try:
for i in range(1000):
cost_avg = 0
acc_avg = 0
for j in range(int(batch_num)):
image_batch, label_batch = session.run([image_train, train_labels_one_hot])
_,step, acc, cost, rs = session.run([optimizer, global_step, train_accuracy, loss, merge], feed_dict={x_data: image_batch, y_target:label_batch})
acc_avg += (acc/batch_num)
cost_avg += (cost/batch_num)
writer.add_summary(rs, step)
print("step %d, training accuracy %0.10f loss %0.10f" % (i, acc_avg, cost_avg))
loss_list.append(cost_avg)
acc_list.append(acc_avg)
if step % 60 == 0:
没有合适的资源?快使用搜索试试~ 我知道了~
基于CNN的人脸识别程序

共60个文件
pyd:24个
desktop-vsiso2d:10个
dll:6个

需积分: 49 53 下载量 144 浏览量
2018-07-14
21:37:45
上传
评论 13
收藏 23.97MB ZIP 举报
温馨提示
本程序代码为本人学习过程中的示例程序,本程序主要操作和示例,在本人博客中有讲解,博客地址:https://blog.csdn.net/lingtianyulong/article/details/80555908
资源推荐
资源详情
资源评论




















收起资源包目录








































































共 60 条
- 1
资源评论


lingtianyulong
- 粉丝: 169
上传资源 快速赚钱
我的内容管理 展开
我的资源 快来上传第一个资源
我的收益
登录查看自己的收益我的积分 登录查看自己的积分
我的C币 登录后查看C币余额
我的收藏
我的下载
下载帮助


最新资源
- 全矿机电提运系统安全评价.doc
- 《计算机应用基础》(周南岳)配套电子教案第1章.ppt
- 论计算机辅助翻译技术对翻译质量的积极和负面影响.docx
- 大数据时代背景下人工智能在计算机网络技术中的应用研究.docx
- 传统架构升级微服务的设计与实现.docx
- 船用自动化电站模拟试验装置技术参数.doc
- 实验3类和对象程序设计方案.doc.doc
- 计算机信息系统安全技术的研究及其应用.doc
- 论互联网通讯及其维护措施.docx
- 医院集成化网络化监控方案的分析-公共场所其他.docx
- 工程项目管理复试卷附参考完整答案.doc
- 华中科技大学 20 级计算机视觉实验资料存档记录
- XX制药有限公司网站重建项目方案.doc
- 互联网金融对商业银行信用卡业务的影响因素分析.docx
- 基于移动5G的智能家居产品市场推广分析.docx
- 校园信息网络的方案设计书与实现.doc
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈



安全验证
文档复制为VIP权益,开通VIP直接复制
