#coding:utf-8
import os
import random
import skimage.data
import skimage.transform
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import time
import argparse
import sys
import tempfile
from skimage import color
from PIL import Image
print (__doc__)
EPOCH_SIZE = 500
BATCH_SIZE = 40
IMAGE_SIZE =32
HIDDEN_SIZE = 256
CLASS_LABELS = 62
CROP_SIZE = 32
LEARNING_RATE = tf.Variable(0.0001, dtype=tf.float32)
train_data_dir = r"C:\Users\metro_miccall\eclipse-workspace\CNN_tensorflow\CNN_traffic\datasets\BelgiumTS\Training"
test_data_dir = r"C:\Users\metro_miccall\eclipse-workspace\CNN_tensorflow\CNN_traffic\datasets\BelgiumTS\Testing"
#参数概要
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)#平均值
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)#标准差
tf.summary.scalar('max', tf.reduce_max(var))#最大值
tf.summary.scalar('min', tf.reduce_min(var))#最小值
tf.summary.histogram('histogram', var)#直方图
def load_datasets(data_dir,img_crop_size):
directories = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))]
#print("load_datasets directories:%s"%(directories))
images = []
labels = []
for d in directories:
label_dir = os.path.join(data_dir, d)
file_names = [os.path.join(label_dir, f) for f in os.listdir(label_dir) if f.endswith(".ppm")]
for f in file_names:
img = skimage.data.imread(f)
image = skimage.transform.resize(img, (img_crop_size,img_crop_size ))
image=color.rgb2gray(image)
images.append(image)
id = int(d)
#print("\nid:%s"%id)
labels.append(id)
return images, labels
def load_datasets_norm(data_dir,img_crop_size):
directories = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))]
#print("load_datasets directories:%s"%(directories))
images = []
labels = []
for d in directories:
label_dir = os.path.join(data_dir, d)
file_names = [os.path.join(label_dir, f) for f in os.listdir(label_dir) if f.endswith(".ppm")]
for f in file_names:
img = skimage.data.imread(f)
image = skimage.transform.resize(img, (img_crop_size,img_crop_size))
image=color.rgb2gray(image)
images.append(image)
id = int(d)
#print("\nid:%s"%id)
label =[0]*CLASS_LABELS
for i in range(CLASS_LABELS):
if i == id:
label[i] = 1
labels.append(label)
images_a = np.array(images)
images = images_a.reshape([-1,IMAGE_SIZE,IMAGE_SIZE,1])
labels_a = np.array(labels)
labels = labels_a.reshape([-1,CLASS_LABELS])
print("\nload_datasets_norm images shape:", images.shape)
print("\nload_datasets_norm labels shape:", labels.shape)
return images, labels
def display_images_and_labels(images, labels):
#print("\ndisplay_images_and_labels labels:",labels)
unique_labels = set(labels)
print("\ndisplay_images_and_labels unique_labels:%s"%(unique_labels))
plt.figure(figsize=(15, 15))
i = 1
for label in unique_labels:
image = images[labels.index(label)]
plt.subplot(8, 8, i)
plt.axis('off')
plt.title("Label {0} ({1})".format(label, labels.count(label)))
i += 1
_ = plt.imshow(image)
plt.show()
def predict_testimages(data_dir,sess,predicted_labels,images_ph,imagesize=IMAGE_SIZE):
# Load the test dataset.
pred_labels =[]
test_images, test_labels = load_datasets(data_dir,CROP_SIZE)
test_images_a = np.array(test_images)
test_images_a = test_images_a.reshape([-1,IMAGE_SIZE,IMAGE_SIZE,1])
display_images_and_labels(test_images, test_labels)
# Run predictions against the full test set.
prediction = sess.run([predicted_labels],feed_dict={images_ph: test_images_a})
#predlen = prediction.shape[1]
'''
print("predict_testimages predlen:%s"%(predlen))
testlen = len(test_labels)
print("predict_testimages testlen:%s"%(testlen))
for i in range(testlen):
predicted = prediction[i]
pred = np.argmax(predicted)
pred_labels.append(pred)
match_count = sum([int(y == y_) for y, y_ in zip(test_labels, pred_labels)])
test_len = len(test_labels)
accuracy = match_count / test_len
print("match_count: {:.3f}".format(match_count),"test_len: {:.3f}".format(test_len))
print("Accuracy: {:.3f}".format(accuracy))
print("time_duration: {:.5f}s".format(time_duration))
'''
def input_layer():
x = tf.placeholder(tf.float32, [None, IMAGE_SIZE,IMAGE_SIZE,1], name="x-input")
y_ = tf.placeholder(tf.float32, [None, CLASS_LABELS], name="y-input")
keep_prob = tf.placeholder(tf.float32)
return x,y_,keep_prob
def compute_cross_entropy(x,y):
diff = -(x * tf.log(y))
cross_entropy = tf.reduce_mean(diff)
return cross_entropy
def train_optimizer(cross_entropy):
train_opt = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cross_entropy)
return train_opt
def correct_prediction(x,y):
correct_prediction = tf.equal(tf.argmax(x, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
def cnn_model(input_x,input_y,keep_prob):
weight1=tf.Variable(tf.truncated_normal(shape=[3,3,1,32],stddev=5e-2))
kernel1=tf.nn.conv2d(input_x,weight1,[1,1,1,1],padding='SAME')
bias1=tf.Variable(tf.constant(0.0,shape=[32]))
conv1=tf.nn.relu(tf.nn.bias_add(kernel1,bias1))
pool1=tf.nn.max_pool(conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
weight2=tf.Variable(tf.truncated_normal(shape=[3,3,32,64],stddev=5e-2))
kernel2=tf.nn.conv2d(pool1,weight2,[1,1,1,1],padding='SAME')
bias2=tf.Variable(tf.constant(0.0,shape=[64]))
conv2=tf.nn.relu(tf.nn.bias_add(kernel2,bias2))
pool2=tf.nn.max_pool(conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
pool2_flat = tf.reshape(pool2, [-1, 8*8*64])
weight3=tf.Variable(tf.truncated_normal(shape=[8*8*64,1024],stddev=0.04))
bias3=tf.Variable(tf.constant(0.1,shape=[1024]))
local3=tf.nn.relu(tf.matmul(pool2_flat,weight3)+bias3)
weight4=tf.Variable(tf.truncated_normal(shape=[1024,CLASS_LABELS],stddev=5e-2))
bias4=tf.Variable(tf.constant(0.1,shape=[CLASS_LABELS]))
y=tf.nn.softmax(tf.matmul(local3,weight4)+bias4)
cross_entropy = compute_cross_entropy(input_y,y)
train_opt = train_optimizer(cross_entropy)
accuracy = correct_prediction(y,input_y)
return train_opt,accuracy,y
def predict_testrandomimages(data_dir,sess,predicted_labels,images_ph,imagesize=IMAGE_SIZE):
# Load the test dataset.
images, labels = load_datasets(data_dir,CROP_SIZE)
# Pick 10 random images
sample_indexes = random.sa