SoFunction
Updated on 2024-10-30

tensorflow implementation of logistic regression models

logistic regression model

Logistic regression is a very widely used classification machine learning algorithm that fits data into a logit function (or logistic function as it is called) to be able to accomplish predictions of the probability of an event occurring.

import numpy as np
import tensorflow as tf
import  as plt
from  import input_data
# Downloaded mnist dataset in F:/mnist/data/.
mnist = input_data.read_data_sets('F:/mnist/data/',one_hot = True)
print(.num_examples)
print(.num_examples)

trainimg = 
trainlabel = 
testimg = 
testlabel = 

print(type(trainimg))
print(,)
print(,)
print(,)
print(,)

nsample = 5
randidx = ([0],size = nsample)

for i in randidx:
  curr_img = (trainimg[i,:],(28,28))
  curr_label = (trainlabel[i,:])
  (curr_img,cmap=plt.get_cmap('gray'))
  (""+str(i)+"th Training Data"+"label is"+str(curr_label))
  print(""+str(i)+"th Training Data"+"label is"+str(curr_label))
  ()


x = ("float",[None,784])
y = ("float",[None,10])
W = (([784,10]))
b = (([10]))

#
actv = ((x,W)+b)
# Calculated losses
cost = tf.reduce_mean(-tf.reduce_sum(y*(actv),reduction_indices=1))
# Learning rate
learning_rate = 0.01
# Stochastic gradient descent
optm = (learning_rate).minimize(cost)

#Compare the predicted value index with the label index, and return True if it is the same.
pred = ((actv,1),(y,1))
# Convert True and false to float types 0, 1
# Add all predictions together for accuracy
accr = tf.reduce_mean((pred,"float"))
init = tf.global_variables_initializer()
"""
# Test code
sess = ()
arr = ([[31,23,4,24,27,34],[18,3,25,4,5,6],[4,3,2,1,5,67]])
# Returns the dimension of the array 2
print((arr).eval())
# Return the number of rows and columns of the array [3 6]
print((arr).eval())
# Returns the index of the largest element in each column of the array [0 0 1 0 0 0 2]
print((arr,0).eval())
# Returns the index of the largest element in each row of the array [5 2 5].
print((arr,1).eval()) 
J"""
# Iterate all samples 50 times
training_epochs = 50
# How many samples to select per iteration
batch_size = 100
display_step = 5

sess = ()
(init)

# Loop iteration
for epoch in range(training_epochs):
  avg_cost = 0
  num_batch = int(.num_examples/batch_size)
  for i in range(num_batch):
    batch_xs,batch_ys = .next_batch(batch_size)
    (optm,feed_dict = {x:batch_xs,y:batch_ys})
    feeds = {x:batch_xs,y:batch_ys}
    avg_cost += (cost,feed_dict = feeds)/num_batch

  if epoch % display_step ==0:
    feeds_train = {x:batch_xs,y:batch_ys}
    feeds_test = {x:,y:}
    train_acc = (accr,feed_dict = feeds_train)
    test_acc = (accr,feed_dict = feeds_test)
    # Print the message every five epochs
    print("Epoch:%03d/%03d cost:%.9f train_acc:%.3f test_acc: %.3f" %(epoch,training_epochs,avg_cost,train_acc,test_acc))

print("Done")

The results of the program training are as follows:

Epoch:000/050 cost:1.177228655 train_acc:0.800 test_acc: 0.855
Epoch:005/050 cost:0.440933891 train_acc:0.890 test_acc: 0.894
Epoch:010/050 cost:0.383387268 train_acc:0.930 test_acc: 0.905
Epoch:015/050 cost:0.357281335 train_acc:0.930 test_acc: 0.909
Epoch:020/050 cost:0.341473956 train_acc:0.890 test_acc: 0.913
Epoch:025/050 cost:0.330586549 train_acc:0.920 test_acc: 0.915
Epoch:030/050 cost:0.322370980 train_acc:0.870 test_acc: 0.916
Epoch:035/050 cost:0.315942993 train_acc:0.940 test_acc: 0.916
Epoch:040/050 cost:0.310728854 train_acc:0.890 test_acc: 0.917
Epoch:045/050 cost:0.306357428 train_acc:0.870 test_acc: 0.918
Done

This is the whole content of this article.