[PYTHON/TENSORFLOW] 다층 퍼셉트론 신경망 만들기 (MNIST)
■ 다층 퍼셉트론 신경망을 만드는 방법을 보여준다. ▶ mlp.py
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
import input_data import matplotlib.pyplot as pp import tensorflow as tf numberPixelDatasets = input_data.read_data_sets("MNIST_data/", one_hot = True) learningRate = 0.001 trainingEpochCount = 20 batchSize = 100 displayStep = 1 inputNodeCount = 784 # 입력 노드 카운트 ; 28×28 픽셀 이미지 hiddenNodeCount1 = 256 # 은닉 노드 카운트 1 hiddenNodeCount2 = 256 # 은닉 노드 카운트 2 outputNodeCount = 10 # 출력 노드 카운트 ; 슷자 0-9 inputValueTensor = tf.placeholder("float", [None, inputNodeCount]) outputValueTensor = tf.placeholder("float", [None, outputNodeCount]) hiddenLayerWeightVariable1 = tf.Variable(tf.random_normal([inputNodeCount, hiddenNodeCount1])) hiddenLayerBiasVariable1 = tf.Variable(tf.random_normal([hiddenNodeCount1])) hiddenLayerTensor1 = tf.nn.sigmoid(tf.add(tf.matmul(inputValueTensor, hiddenLayerWeightVariable1), hiddenLayerBiasVariable1)) hiddenLayerWeightVariable2 = tf.Variable(tf.random_normal([hiddenNodeCount1, hiddenNodeCount2])) hiddenLayerBiasVariable2 = tf.Variable(tf.random_normal([hiddenNodeCount2])) hiddenLayerTensor2 = tf.nn.sigmoid(tf.add(tf.matmul(hiddenLayerTensor1, hiddenLayerWeightVariable2), hiddenLayerBiasVariable2)) outputLayerWeightVariable = tf.Variable(tf.random_normal([hiddenNodeCount2, outputNodeCount])) outputLayerBiasVariable = tf.Variable(tf.random_normal([outputNodeCount])) outputLayerTensor = tf.matmul(hiddenLayerTensor2, outputLayerWeightVariable) + outputLayerBiasVariable costTensor = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = outputLayerTensor, labels = outputValueTensor)) optimizerOperation = tf.train.AdamOptimizer(learningRate).minimize(costTensor) averageList = [] epochList = [] initializerOperation = tf.global_variables_initializer() with tf.Session() as sess: sess.run(initializerOperation) for epoch in range(trainingEpochCount): averageCost = 0. totalBatch = int(numberPixelDatasets.train.num_examples / batchSize) for i in range(totalBatch): xBatchNDArray, yBatchNDArray = numberPixelDatasets.train.next_batch(batchSize) sess.run(optimizerOperation, feed_dict = {inputValueTensor : xBatchNDArray, outputValueTensor : yBatchNDArray}) averageCost += sess.run(costTensor, feed_dict = {inputValueTensor : xBatchNDArray, outputValueTensor : yBatchNDArray}) / totalBatch if epoch % displayStep == 0: print("회차 :", '%04d' % (epoch + 1), "비용 =", "{:.9f}".format(averageCost)) averageList.append(averageCost) epochList.append(epoch + 1) print("훈련 단계 완료") correctPredictionTensor = tf.equal(tf.argmax(outputLayerTensor, 1), tf.argmax(outputValueTensor, 1)) accuracyTensor = tf.reduce_mean(tf.cast(correctPredictionTensor, "float")) print("모델 정확도 :", accuracyTensor.eval({inputValueTensor : numberPixelDatasets.test.images, outputValueTensor : numberPixelDatasets.test.labels})) pp.plot(epochList, averageList, 'o', label = "MLP Training phase") pp.ylabel("cost") pp.xlabel("epoch") pp.legend() pp.show() |
▶ 결과
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 |
회차 : 0001 비용 = 1.835462693 회차 : 0002 비용 = 0.543075322 회차 : 0003 비용 = 0.365367627 회차 : 0004 비용 = 0.269016541 회차 : 0005 비용 = 0.207292393 회차 : 0006 비용 = 0.162509755 회차 : 0007 비용 = 0.128678610 회차 : 0008 비용 = 0.102387188 회차 : 0009 비용 = 0.080819757 회차 : 0010 비용 = 0.064745211 회차 : 0011 비용 = 0.051405598 회차 : 0012 비용 = 0.040682739 회차 : 0013 비용 = 0.032230697 회차 : 0014 비용 = 0.025317747 회차 : 0015 비용 = 0.019939722 회차 : 0016 비용 = 0.015559807 회차 : 0017 비용 = 0.011993421 회차 : 0018 비용 = 0.009289965 회차 : 0019 비용 = 0.007239956 회차 : 0020 비용 = 0.005603419 훈련 단계 완료 모델 정확도 : 0.9465 |