■ 컨볼루션 신경망을 만드는 방법을 보여준다.
▶ 예제 코드 (PY)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import tensorflow as tf import tensorflow.examples.tutorials.mnist as mnist imageSize = 28 batchSize = 100 outputLayerNodeCount = 10 learningRate = 0.001 epochCount = 10 dropoutRate = 0.8 mnistDatasets = mnist.input_data.read_data_sets("data", one_hot = True) inputLayerTensor = tf.placeholder(tf.float32, [None, imageSize, imageSize, 1]) correctOutputTensor = tf.placeholder(tf.float32, [None, outputLayerNodeCount]) dropoutRateTensor = tf.placeholder(tf.float32) convolutionLayer1WeightVariable = tf.Variable(tf.random_normal([4, 4, 1, 16 ], stddev = 0.01)) convolutionLayer2WeightVariable = tf.Variable(tf.random_normal([4, 4, 16, 32 ], stddev = 0.01)) fullyConnectedLayerWeightVariable = tf.Variable(tf.random_normal([7 * 7 * 32, 256 ], stddev = 0.01)) outputLayerWeightVariable = tf.Variable(tf.random_normal([256, outputLayerNodeCount], stddev = 0.01)) convolutionLayer1OutputTensor = tf.nn.conv2d(inputLayerTensor, convolutionLayer1WeightVariable, strides = [1, 1, 1, 1], padding = "SAME") convolutionLayer1OutputTensor = tf.nn.relu(convolutionLayer1OutputTensor) convolutionLayer1OutputTensor = tf.nn.max_pool(convolutionLayer1OutputTensor, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME") convolutionLayer2OutputTensor = tf.nn.conv2d(convolutionLayer1OutputTensor, convolutionLayer2WeightVariable, strides = [1, 1, 1, 1], padding = "SAME") convolutionLayer2OutputTensor = tf.nn.relu(convolutionLayer2OutputTensor) convolutionLayer2OutputTensor = tf.nn.max_pool(convolutionLayer2OutputTensor, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME") fullyConnectedLayerOutputTensor = tf.reshape(convolutionLayer2OutputTensor, [-1, 7 * 7 * 32]) fullyConnectedLayerOutputTensor = tf.matmul(fullyConnectedLayerOutputTensor, fullyConnectedLayerWeightVariable) fullyConnectedLayerOutputTensor = tf.nn.relu(fullyConnectedLayerOutputTensor) fullyConnectedLayerOutputTensor = tf.nn.dropout(fullyConnectedLayerOutputTensor, dropoutRateTensor) outputLayerOutputTensor = tf.matmul(fullyConnectedLayerOutputTensor, outputLayerWeightVariable) costTensor = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = outputLayerOutputTensor, labels = correctOutputTensor)) optimizerOperation = tf.train.AdamOptimizer(learningRate).minimize(costTensor) with tf.Session() as session: session.run(tf.global_variables_initializer()) totalBatch = int(mnistDatasets.train.num_examples / batchSize) for epoch in range(epochCount): totalCost = 0 for batch in range(totalBatch): batchInputNDArray, batchCorrectOutputNDArray = mnistDatasets.train.next_batch(batchSize) batchInputNDArray = batchInputNDArray.reshape(-1, imageSize, imageSize, 1) _, cost = session.run([optimizerOperation, costTensor], feed_dict = {inputLayerTensor : batchInputNDArray, correctOutputTensor : batchCorrectOutputNDArray,\ dropoutRateTensor : dropoutRate}) totalCost += cost print("반복 : ", "%04d" % (epoch + 1), "평균 비용 : ", "{:.4f}".format(totalCost / totalBatch)) print("학습 완료!") scoreTensor = tf.equal(tf.argmax(outputLayerOutputTensor, 1), tf.argmax(correctOutputTensor, 1)) accuracyTensor = tf.reduce_mean(tf.cast(scoreTensor, tf.float32)) print("정확도 : ", session.run(accuracyTensor, feed_dict = {inputLayerTensor : mnistDatasets.test.images.reshape(-1, imageSize, imageSize, 1),\ correctOutputTensor : mnistDatasets.test.labels, dropoutRateTensor : 1})) |