■ 순방향 신경망(Feed-Forward Neural Network, FFNN)을 만드는 방법을 보여준다.
▶ 예제 코드 (PY)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import numpy as np import random import tensorflow as tf import tensorflow.examples.tutorials.mnist as mnist inputLayerNodeCount = 784 outputLayerNodeCount = 10 learningRate = 0.5 epochCount = 10 batchSize = 100 summaryLogDirectoryPath = "log_mnist_1_layer_softmax" mnistDatasets = mnist.input_data.read_data_sets("data", one_hot = True) inputLayerTensor = tf.placeholder(tf.float32, [None, inputLayerNodeCount], name = "input") hiddenLayerWeightVariable = tf.Variable(tf.zeros([inputLayerNodeCount, outputLayerNodeCount])) hiddenLayerBiasVariable = tf.Variable(tf.zeros([outputLayerNodeCount])) outputLayerOutputTensor = tf.nn.softmax(tf.matmul(inputLayerTensor, hiddenLayerWeightVariable) + hiddenLayerBiasVariable, name = "output") correctOutputTensor = tf.placeholder(tf.float32, [None, outputLayerNodeCount]) costTensor = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = correctOutputTensor, logits = outputLayerOutputTensor)) correctPredictionTensor = tf.equal(tf.argmax(outputLayerOutputTensor, 1), tf.argmax(correctOutputTensor, 1)) accuracyTensor = tf.reduce_mean(tf.cast(correctPredictionTensor, tf.float32)) optimizerOperation = tf.train.GradientDescentOptimizer(learningRate).minimize(costTensor) tf.summary.scalar("cost" , costTensor ) tf.summary.scalar("accuracy", accuracyTensor) summaryTensor = tf.summary.merge_all() with tf.Session() as session: session.run(tf.global_variables_initializer()) fileWriter = tf.summary.FileWriter(summaryLogDirectoryPath, graph = tf.get_default_graph()) batchCount = int(mnistDatasets.train.num_examples / batchSize) for epoch in range(epochCount): for batch in range(batchCount): batchInputNDArray, batchCorrectOutputNDArray = mnistDatasets.train.next_batch(batchSize) _, summaryBytes = session.run([optimizerOperation, summaryTensor], feed_dict = {inputLayerTensor : batchInputNDArray, correctOutputTensor : batchCorrectOutputNDArray}) fileWriter.add_summary(summaryBytes, epoch * batchCount + batch) print("Epoch : ", epoch) print("정확도 : ", accuracyTensor.eval(feed_dict = {inputLayerTensor : mnistDatasets.test.images, correctOutputTensor : mnistDatasets.test.labels})) print("학습이 완료되었습니다.") testImageIndex = random.randint(0, mnistDatasets.test.images.shape[0]) testImageNDArray = mnistDatasets.test.images[testImageIndex] classificationNDArray = session.run(tf.argmax(outputLayerOutputTensor, 1), feed_dict = {inputLayerTensor : [testImageNDArray]}) print("정답 : ", np.argmax(mnistDatasets.test.labels[testImageIndex])) print("판단 : ", classificationNDArray[0]) saver = tf.train.Saver() saveFilePath = saver.save(session, "data/mnist_1_layer_softmax.ckpt") print("모델이 저장되었습니다 : %s" % saveFilePath) |