Iris Data Classification

view jupyter notebook code

List of Tensorflow 2.0 Tutorials


(Review) Multinomial Classification

Softmax Function (Hypothesis)

\[S(y_{i}) = {e^{y_i} \over \sum_{j=1}^n e^{y_{j}}}\] \[n: Number\ of\ classes \\ i: i\ class\]

Cost Function

\[Cost(W) = {1 \over m} {\sum_{i=1}^m D(S(X_{i}W + b),L_{i})} \\ m: Number\ of\ instances \\ i: i\ instance\]

Cross Entropy in Multinomial Classification

\[D(S, L) = - \sum_{j=1}^n L_{j} log(S(y_{j})) \\ n: Number\ of\ classes \\ j: j\ class\]

Minimizing Cost

\[W_{new} = W_{old} - \alpha {\partial \over {\partial W}} Cost(W)\]


Implement

import tensorflow as tf
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris

print("TensorFlow Version: %s" % (tf.__version__))
TensorFlow Version: 2.0.0


Data

df = load_iris()
df.keys()
dict_keys(['data', 'target', 'target_names', 'DESCR', 'feature_names', 'filename'])
x_data = np.array(df.data, dtype=np.float32)
y_data = np.array(df.target, dtype=np.int32)

nb_features = x_data.shape[1]
nb_classes = len(set(y_data))

print("x_data:", x_data.shape)
print("y_data:", y_data.shape)

print("nb_features:", nb_features)
print("nb_classes:", nb_classes)
x_data: (150, 4)
y_data: (150,)
nb_features: 4
nb_classes: 3


Initializing Weights

# Weights
tf.random.set_seed(2020)
W = tf.Variable(tf.random.normal([nb_features, nb_classes], mean=0.0))
b = tf.Variable(tf.random.normal([nb_classes], mean=0.0))

# One-Hot Encoding
y_one_hot = tf.one_hot(indices=list(y_data), depth=nb_classes)

print('# Weights: \n', W.numpy(), '\n\n# Bias: \n', b.numpy())
# Weights:
 [[-0.10099822  0.6847899   1.6258513 ]
 [ 0.88112587 -0.63692456 -0.1427695 ]
 [ 0.82411087 -0.91326994 -0.4510184 ]
 [ 0.58053356  1.3066356  -0.60428965]]

# Bias:
 [ 0.38414612 -0.6159301  -0.5453214 ]


Training

# Learning Rate
learning_rate = 0.01

# Softmax Function
def softmax(X):
    sm = tf.nn.softmax(tf.matmul(x_data, W) + b)
    return sm

# Training
for i in range(10000+1):

    with tf.GradientTape() as tape:

        sm = softmax(x_data)
        cost = tf.reduce_mean(-tf.reduce_sum(y_one_hot*tf.math.log(sm), axis=1))        
        W_grad, b_grad = tape.gradient(cost, [W, b])

        W.assign_sub(learning_rate * W_grad)
        b.assign_sub(learning_rate * b_grad)

    if i % 1000 == 0:
        print(">>> #%s \n Weights: \n%s \n Bias: \n%s \n cost: %s\n" % (i, W.numpy(), b.numpy(), cost.numpy()))
>>> #0
 Weights:
[[-0.11743642  0.70453906  1.6225402 ]
 [ 0.87678254 -0.62771267 -0.14763813]
 [ 0.80348897 -0.8990887  -0.44457778]
 [ 0.5725719   1.3110503  -0.60074264]]
 Bias:
[ 0.38213396 -0.61260384 -0.54663545]
 cost: 3.9675086

>>> #1000
 Weights:
[[ 0.16433352  1.3574841   0.68781847]
 [ 1.7573066  -0.6487719  -1.0071023 ]
 [-0.8606213  -0.5406396   0.8610837 ]
 [-0.19550529  1.0364839   0.44190007]]
 Bias:
[ 0.5655053  -0.43386927 -0.9087408 ]
 cost: 0.40306073

>>> #2000
 Weights:
[[ 0.31688696  1.5321444   0.3606074 ]
 [ 2.0306034  -0.699498   -1.2296748 ]
 [-1.2645828  -0.59373426  1.3181392 ]
 [-0.38164774  0.7259959   0.9385294 ]]
 Bias:
[ 0.6280675  -0.33476618 -1.0704058 ]
 cost: 0.29343706

>>> #3000
 Weights:
[[ 0.42719242  1.6423255   0.14011964]
 [ 2.222088   -0.7123837  -1.4082739 ]
 [-1.5467921  -0.6306485   1.6372662 ]
 [-0.51172817  0.4970247   1.297582  ]]
 Bias:
[ 0.6729265 -0.2532307 -1.1967993]
 cost: 0.23829417

>>> #4000
 Weights:
[[ 0.5144236   1.717074   -0.02186037]
 [ 2.3705711  -0.7091585  -1.5599803 ]
 [-1.7671052  -0.6548329   1.8817648 ]
 [-0.6135871   0.31790695  1.5785577 ]]
 Bias:
[ 0.7084192  -0.18307595 -1.302447  ]
 cost: 0.20493008

>>> #5000
 Weights:
[[ 0.5868196   1.7708837  -0.14806792]
 [ 2.491839   -0.6979823  -1.692425  ]
 [-1.9482718  -0.67131     2.0794072 ]
 [-0.697666    0.17082885  1.809714  ]]
 Bias:
[ 0.73791    -0.12065024 -1.3943636 ]
 cost: 0.18246923

>>> #6000
 Weights:
[[ 0.648823    1.8113612  -0.25055158]
 [ 2.5943112  -0.6827013  -1.8101772 ]
 [-2.102253   -0.6829624   2.2450397 ]
 [-0.7694064   0.04584497  2.0064383 ]]
 Bias:
[ 0.76319474 -0.06381484 -1.476484  ]
 cost: 0.16624019

>>> #7000
 Weights:
[[ 0.7031186   1.842829   -0.33631983]
 [ 2.683033   -0.6653318  -1.9162688 ]
 [-2.2362247  -0.6914313   2.3874798 ]
 [-0.83205914 -0.06300665  2.177942  ]]
 Bias:
[ 0.78535825 -0.01123109 -1.5512308 ]
 cost: 0.1539142

>>> #8000
 Weights:
[[ 0.7514567   1.8679215  -0.40975812]
 [ 2.7612686  -0.6469993  -2.012836  ]
 [-2.3548448  -0.6977029   2.5123684 ]
 [-0.8877303  -0.15955041  2.3301554 ]]
 Bias:
[ 0.8051083   0.03799404 -1.6202061 ]
 cost: 0.14420128

>>> #9000
 Weights:
[[ 0.7950464   1.8883388  -0.4737722 ]
 [ 2.8312438  -0.628357   -2.1014526 ]
 [-2.4613101  -0.7024015   2.6235294 ]
 [-0.93786424 -0.24637954  2.467118  ]]
 Bias:
[ 0.8229338  0.0844864 -1.684524 ]
 cost: 0.13632807

>>> #10000
 Weights:
[[ 0.8347582  1.9052253 -0.5303759]
 [ 2.8945463 -0.6097894 -2.183319 ]
 [-2.5579088 -0.7059436  2.7236636]
 [-0.9834962 -0.3253334  2.591702 ]]
 Bias:
[ 0.83918715  0.12870272 -1.7449929 ]
 cost: 0.12980223


Predict

predicted = tf.argmax(softmax(x_data), axis=1)
real = tf.argmax(y_one_hot, axis=1)

def acc(predicted, real):
    accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, real), dtype=tf.float32))
    return accuracy

accuracy = acc(predicted, real).numpy()
print("Accuracy: %s" % accuracy)
Accuracy: 0.97333336
predicted.numpy()
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
       2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
       2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
real.numpy()
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
       2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
       2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])


tf.nn.softmax_cross_entropy_with_logits

Initializing Weights

# Weights
tf.random.set_seed(2020)
W = tf.Variable(tf.random.normal([nb_features, nb_classes], mean=0.0), name='Weights')
b = tf.Variable(tf.random.normal([nb_classes], mean=0.0), name='Bias')
variables=[W, b]

# One-Hot Encoding
y_one_hot = tf.one_hot(indices=list(y_data), depth=nb_classes)

print('# Weights: \n', W.numpy(), '\n\n# Bias: \n', b.numpy())
# Weights:
 [[-0.10099822  0.6847899   1.6258513 ]
 [ 0.88112587 -0.63692456 -0.1427695 ]
 [ 0.82411087 -0.91326994 -0.4510184 ]
 [ 0.58053356  1.3066356  -0.60428965]]

# Bias:
 [ 0.38414612 -0.6159301  -0.5453214 ]
def logit_fn(X):
    return tf.matmul(X, W) + b

def hypothesis(X):
    return tf.nn.softmax(logit_fn(X))

def cost_fn(X, Y):
    logits = logit_fn(X)
    cost_i = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y) # Y: One-Hot Encoded
    cost = tf.reduce_mean(cost_i)
    return cost

def grad_fn(X, Y):
    with tf.GradientTape() as tape:
        loss = cost_fn(X, Y)
        grads = tape.gradient(loss, variables)
    return grads

def prediction(X, Y):
    pred = tf.argmax(hypothesis(X), 1)
    correct_prediction = tf.equal(pred, tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    return accuracy

def fit(X, Y, epochs=10000, verbose=1000, learning_rate=0.01):
    for i in range(epochs+1):
        grads = grad_fn(X, Y)
        variables[0].assign_sub(learning_rate * grads[0])
        variables[1].assign_sub(learning_rate * grads[1])

        if i % verbose == 0:
            acc = prediction(X, Y).numpy()
            loss = cost_fn(X, Y).numpy()
            print("Epoch: {} \t Loss: {} \t Acc: {}".format(i, loss, acc))
fit(x_data, y_one_hot)
Epoch: 0 	 Loss: 3.812716007232666 	 Acc: 0.0
Epoch: 1000 	 Loss: 0.4028949439525604 	 Acc: 0.9133333563804626
Epoch: 2000 	 Loss: 0.2933638393878937 	 Acc: 0.95333331823349
Epoch: 3000 	 Loss: 0.2382526695728302 	 Acc: 0.9666666388511658
Epoch: 4000 	 Loss: 0.20490330457687378 	 Acc: 0.9733333587646484
Epoch: 5000 	 Loss: 0.18245039880275726 	 Acc: 0.9733333587646484
Epoch: 6000 	 Loss: 0.16622616350650787 	 Acc: 0.9733333587646484
Epoch: 7000 	 Loss: 0.15390338003635406 	 Acc: 0.9733333587646484
Epoch: 8000 	 Loss: 0.1441926211118698 	 Acc: 0.9733333587646484
Epoch: 9000 	 Loss: 0.13632096350193024 	 Acc: 0.9733333587646484
Epoch: 10000 	 Loss: 0.12979625165462494 	 Acc: 0.9733333587646484


List of Tensorflow 2.0 Tutorials

태그:

카테고리:

업데이트:

댓글남기기