import tensorflow as tf a = tf.placeholder(shape=[3, 2], dtype=tf.int32) b = a * 7 sess = tf.Session() print(sess.run(b, feed_dict={a: [[1, 2], [3, 4], [5, 6]]}))
import tensorflow as tf a = tf.placeholder(shape=[3, 2], dtype=tf.float32) b = a * 7 c = tf.nn.softmax(b) sess = tf.Session() print(sess.run(c, feed_dict={a: [[1, 2], [3, 4], [5, 6]]}))
import tensorflow as tf import random a = tf.placeholder(dtype=tf.float32) b = tf.nn.sigmoid(a) sess = tf.Session() for i in range(5): print(sess.run(b, feed_dict={a: random.random() * 40 - 20}))
import tensorflow as tf x = tf.placeholder(shape=[3], dtype=tf.float32) yTrain = tf.placeholder(shape=[], dtype=tf.float32) w = tf.Variable(tf.zeros([3]), dtype=tf.float32) b = tf.Variable(0, dtype=tf.float32) y = tf.reduce_sum(tf.nn.sigmoid(x * w + b)) loss = tf.abs(y - yTrain) optimizer = tf.train.RMSPropOptimizer(0.1) train = optimizer.minimize(loss) sess = tf.Session() sess.run(tf.global_variables_initializer()) for i in range(1000): print(sess.run([train, y, yTrain, loss], feed_dict={x: [1, 1, 1], yTrain: 2})) print(sess.run([train, y, yTrain, loss], feed_dict={x: [1, 0, 1], yTrain: 1})) print(sess.run([train, y, yTrain, loss], feed_dict={x: [1, 2, 3], yTrain: 3}))
import numpy as np import pandas as pd fileData = pd.read_csv('dataq.csv', dtype=np.float32, header=None, usecols=(0, 1, 3, 4)) wholeData = fileData.as_matrix() print(wholeData)
import numpy as np import pandas as pd import random fileData = pd.read_csv('dataq.csv', dtype=np.float32, header=None, converters={(2): lambda s: random.random()}) wholeData = fileData.as_matrix() print(wholeData)
import tensorflow as tf import numpy as np m1 = tf.placeholder(shape=[3, 3], dtype=tf.float32) m2 = tf.placeholder(shape=[3, 4], dtype=tf.float32) y = tf.matmul(m1, m2) sess = tf.Session() print(sess.run([y], feed_dict={m1: [[1, 2, 3], [7, 8, 9], [4, 5, 6]], m2: [[1, 2, 3, 4], [3, 4, 5, 6], [5, 6, 7, 8]]})) print(np.matmul([[1, 2, 3], [7, 8, 9], [4, 5, 6]], [[1, 2, 3, 4], [3, 4, 5, 6], [5, 6, 7, 8]]))
import tensorflow as tf x = tf.placeholder(shape=[1, 3], dtype=tf.float32) yTrain = tf.placeholder(shape=[], dtype=tf.float32) w = tf.Variable(tf.zeros([3, 3]), dtype=tf.float32) b = tf.Variable(0, dtype=tf.float32) y = tf.reduce_sum(tf.nn.sigmoid(tf.matmul(x, w) + b)) loss = tf.abs(y - yTrain) optimizer = tf.train.RMSPropOptimizer(0.01) train = optimizer.minimize(loss) sess = tf.Session() sess.run(tf.global_variables_initializer()) for i in range(2000): print(sess.run([train, y, yTrain, loss], feed_dict={x: [[1, 1, 1]], yTrain: 2})) print(sess.run([train, y, yTrain, loss], feed_dict={x: [[1, 0, 1]], yTrain: 1})) print(sess.run([train, y, yTrain, loss], feed_dict={x: [[1, 2, 3]], yTrain: 3}))
import tensorflow as tf import sys argt = sys.argv[1:] for v in argt: if v == "-v": print(tf.__version__)
import tensorflow as tf import random import os import sys ifRestartT = False argt = sys.argv[1:] for v in argt: if v == "-restart": ifRestartT = True trainResultPath = "./save/idcard2" random.seed() x = tf.placeholder(tf.float32) yTrain = tf.placeholder(tf.float32) w1 = tf.Variable(tf.random_normal([4, 8], mean=0.5, stddev=0.1), dtype=tf.float32) b1 = tf.Variable(0, dtype=tf.float32) xr = tf.reshape(x, [1, 4]) n1 = tf.nn.tanh(tf.matmul(xr, w1) + b1) w2 = tf.Variable(tf.random_normal([8, 2], mean=0.5, stddev=0.1), dtype=tf.float32) b2 = tf.Variable(0, dtype=tf.float32) n2 = tf.matmul(n1, w2) + b2 y = tf.nn.softmax(tf.reshape(n2, [2])) loss = tf.reduce_mean(tf.square(y - yTrain)) optimizer = tf.train.RMSPropOptimizer(0.01) train = optimizer.minimize(loss) sess = tf.Session() if ifRestartT == True: print("force restart...") sess.run(tf.global_variables_initializer()) elif os.path.exists(trainResultPath + ".index"): resultT = input('Would you like to load? (y/n)') if resultT == "y": print("loading: %s" % trainResultPath) tf.train.Saver().restore(sess, save_path=trainResultPath) else: print("initializing...") sess.run(tf.global_variables_initializer()) else: print("train result path not exists: %s" % trainResultPath) sess.run(tf.global_variables_initializer()) lossSum = 0.0 for i in range(5): xDataRandom = [int(random.random() * 10), int(random.random() * 10), int(random.random() * 10), int(random.random() * 10)] if xDataRandom[2] % 2 == 0: yTrainDataRandom = [0, 1] else: yTrainDataRandom = [1, 0] result = sess.run([train, x, yTrain, y, loss], feed_dict={x: xDataRandom, yTrain: yTrainDataRandom}) lossSum = lossSum + float(result[len(result) - 1]) print("i: %d, loss: %10.10f, avgLoss: %10.10f" % (i, float(result[len(result) - 1]), lossSum / (i + 1))) if os.path.exists("save.txt"): os.remove("save.txt") print("saving...") tf.train.Saver().save(sess, save_path=trainResultPath) resultT = input('Would you like to save? (y/n)') if resultT == "y": print("saving...") tf.train.Saver().save(sess, save_path=trainResultPath)
import tensorflow as tf x = tf.placeholder(shape=[1, 3], dtype=tf.float32) w = tf.Variable(tf.ones([1, 3]), dtype=tf.float32) b = tf.Variable(1, dtype=tf.float32) y = x * w + b sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(y, feed_dict={x: [[1, 2, 3]]}) writer = tf.summary.FileWriter("graph", sess.graph)
import tensorflow as tf x = tf.placeholder(shape=[1, 3], dtype=tf.float32) w = tf.Variable(tf.ones([1, 3]), dtype=tf.float32) b = tf.Variable(1, dtype=tf.float32) y = tf.multiply(x, w, name="MyMul") + b sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(y, feed_dict={x: [[1, 2, 3]]}) writer = tf.summary.FileWriter("graph", sess.graph)
import tensorflow as tf import numpy as np import sys predictData = None argt = sys.argv[1:] for v in argt: if v.startswith("-predict="): tmpStr = v[len("-predict="):] predictData = np.fromstring(tmpStr, dtype=np.float32, sep=",") print("predictData: %s" % predictData) x1 = tf.placeholder(dtype=tf.float32) x2 = tf.placeholder(dtype=tf.float32) x3 = tf.placeholder(dtype=tf.float32) yTrain = tf.placeholder(dtype=tf.float32) w1 = tf.Variable(0.1, dtype=tf.float32) w2 = tf.Variable(0.1, dtype=tf.float32) w3 = tf.Variable(0.1, dtype=tf.float32) n1 = x1 * w1 n2 = x2 * w2 n3 = x3 * w3 y = n1 + n2 + n3 loss = tf.abs(y - yTrain) optimizer = tf.train.RMSPropOptimizer(0.001) train = optimizer.minimize(loss) sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) for i in range(200): result = sess.run([train, x1, x2, x3, w1, w2, w3, y, yTrain, loss], feed_dict={x1: 90, x2: 80, x3: 70, yTrain: 85}) print(result) result = sess.run([train, x1, x2, x3, w1, w2, w3, y, yTrain, loss], feed_dict={x1: 98, x2: 95, x3: 87, yTrain: 96}) print(result) if predictData is not None: print(sess.run(y, feed_dict={x1: predictData[0], x2: predictData[1], x3: predictData[2]}))
import tensorflow as tf import numpy as np import sys x1 = tf.placeholder(dtype=tf.float32) x2 = tf.placeholder(dtype=tf.float32) x3 = tf.placeholder(dtype=tf.float32) yTrain = tf.placeholder(dtype=tf.float32) w1 = tf.Variable(0.1, dtype=tf.float32) w2 = tf.Variable(0.1, dtype=tf.float32) w3 = tf.Variable(0.1, dtype=tf.float32) n1 = x1 * w1 n2 = x2 * w2 n3 = x3 * w3 y = n1 + n2 + n3 loss = tf.abs(y - yTrain) optimizer = tf.train.RMSPropOptimizer(0.001) train = optimizer.minimize(loss) sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) for i in range(200): result = sess.run([train, x1, x2, x3, w1, w2, w3, y, yTrain, loss], feed_dict={x1: 90, x2: 80, x3: 70, yTrain: 85}) print(result) result = sess.run([train, x1, x2, x3, w1, w2, w3, y, yTrain, loss], feed_dict={x1: 98, x2: 95, x3: 87, yTrain: 96}) print(result) resultT = input('Please input data: ') predictData = np.fromstring(resultT, dtype=np.float32, sep=",") print("predictData: %s" % predictData) if predictData is not None: print(sess.run(y, feed_dict={x1: predictData[0], x2: predictData[1], x3: predictData[2]}))
import tensorflow.contrib.keras as k import numpy as np model = k.models.Sequential() model.add(k.layers.Dense(3, input_dim=3, activation='linear')) model.add(k.layers.Dense(1, input_dim=3, activation='linear')) model.compile(loss='mean_squared_error', optimizer="RMSProp", metrics=['accuracy']) model.fit([[90, 80, 70], [98, 95, 87]], [85, 96], epochs=10000, batch_size=1, verbose=2) xTestData = np.array([[80, 80, 80], [99, 98, 97]], dtype=np.float32) resultAry = model.predict(xTestData) print(resultAry)
import tensorflow.contrib.keras as k import numpy as np model = k.models.Sequential() model.add(k.layers.Dense(8, input_dim=3, activation='tanh')) model.add(k.layers.Dense(1, input_dim=3, activation='linear')) model.compile(loss='mean_squared_error', optimizer="RMSProp", metrics=['accuracy']) model.fit([[1, 1, 1], [1, 0, 1], [1, 2, 3]], [2, 1, 3], epochs=1000, batch_size=1, verbose=2) xTestData = np.array([[1, 2, 2], [2, 3, 3]], dtype=np.float32) resultAry = model.predict(xTestData) print(resultAry)
import tensorflow as tf import numpy as np import pandas as pd import sys roundCount = 100 learnRate = 0.01 argt = sys.argv[1:] for v in argt: if v.startswith("-round="): roundCount = int(v[len("-round="):]) if v.startswith("-learnrate="): learnRate = float(v[len("-learnrate="):]) fileData = pd.read_csv('checkData64.txt', dtype=np.float32, header=None) wholeData = fileData.as_matrix() rowCount = wholeData.shape[0] print("wholeData=%s" % wholeData) print("rowSize=%d" % wholeData.shape[1]) print("rowCount=%d" % rowCount) x = tf.placeholder(shape=[64], dtype=tf.float32) yTrain = tf.placeholder(shape=[3], dtype=tf.float32) filter1T = tf.Variable(tf.ones([2, 2, 1, 1]), dtype=tf.float32) n1 = tf.nn.conv2d(input=tf.reshape(x, [1, 8, 8, 1]), filter=filter1T, strides=[1, 1, 1, 1], padding='SAME') filter2T = tf.Variable(tf.ones([2, 2, 1, 1]), dtype=tf.float32) n2 = tf.nn.conv2d(input=tf.reshape(n1, [1, 8, 8, 1]), filter=filter2T, strides=[1, 1, 1, 1], padding='VALID') filter3T = tf.Variable(tf.ones([2, 2, 1, 1]), dtype=tf.float32) n3 = tf.nn.conv2d(input=tf.reshape(n2, [1, 7, 7, 1]), filter=filter3T, strides=[1, 1, 1, 1], padding='VALID') filter4T = tf.Variable(tf.ones([2, 2, 1, 1]), dtype=tf.float32) n4 = tf.nn.conv2d(input=tf.reshape(n3, [1, 6, 6, 1]), filter=filter4T, strides=[1, 1, 1, 1], padding='VALID') n4f = tf.reshape(n4, [1, 25]) w4 = tf.Variable(tf.random_normal([25, 32]), dtype=tf.float32) b4 = tf.Variable(0, dtype=tf.float32) n4 = tf.nn.tanh(tf.matmul(n4f, w4) + b4) w5 = tf.Variable(tf.random_normal([32, 3]), dtype=tf.float32) b5 = tf.Variable(0, dtype=tf.float32) n5 = tf.reshape(tf.matmul(n4, w5) + b5, [-1]) y = tf.nn.softmax(n5) loss = -tf.reduce_mean(yTrain * tf.log(tf.clip_by_value(y, 1e-10, 1.0))) optimizer = tf.train.RMSPropOptimizer(learnRate) train = optimizer.minimize(loss) sess = tf.Session() sess.run(tf.global_variables_initializer()) for i in range(roundCount): lossSum = 0.0 for j in range(rowCount): result = sess.run([train, x, yTrain, y, loss], feed_dict={x: wholeData[j][0:64], yTrain: wholeData[j][64:67]}) lossT = float(result[len(result) - 1]) lossSum = lossSum + lossT if j == (rowCount - 1): print("i: %d, loss: %10.10f, avgLoss: %10.10f" % (i, lossT, lossSum / (rowCount + 1)))
import tensorflow as tf import numpy as np import pandas as pd import sys roundT = 1000 learnRateT = 0.1 argt = sys.argv[1:] print("argt: %s" % argt) for v in argt: if v.startswith("-round="): roundT = int(v[len("-round="):]) if v.startswith("-learnrate="): learnRateT = float(v[len("-learnrate="):]) fileData = pd.read_csv('exchangeData2.txt', dtype=np.float32, header=None) wholeData = np.reshape(fileData.as_matrix(), (-1, 2)) print("wholeData: %s" % wholeData) cellCount = 3 unitCount = 5 testData = wholeData[-cellCount:] print("testData: %s\n" % testData) rowCount = wholeData.shape[0] - cellCount print("rowCount: %d\n" % rowCount) xData = [wholeData[i:i + cellCount] for i in range(rowCount)] yTrainData = [wholeData[i + cellCount] for i in range(rowCount)] print("xData: %s\n" % xData) print("yTrainData: %s\n" % yTrainData) x = tf.placeholder(shape=[cellCount, 2], dtype=tf.float32) yTrain = tf.placeholder(dtype=tf.float32) cellT = tf.nn.rnn_cell.BasicLSTMCell(unitCount) initState = cellT.zero_state(1, dtype=tf.float32) h, finalState = tf.nn.dynamic_rnn(cellT, tf.reshape(x, [1, cellCount, 2]), initial_state=initState, dtype=tf.float32) hr = tf.reshape(h, [cellCount, unitCount]) w2 = tf.Variable(tf.random_normal([unitCount, 2]), dtype=tf.float32) b2 = tf.Variable(tf.zeros([2]), dtype=tf.float32) y = tf.reduce_sum(tf.matmul(hr, w2) + b2, axis=0) loss = tf.reduce_mean(tf.square(y - yTrain)) optimizer = tf.train.RMSPropOptimizer(learnRateT) train = optimizer.minimize(loss) sess = tf.Session() sess.run(tf.global_variables_initializer()) for i in range(roundT): lossSum = 0.0 for j in range(rowCount): result = sess.run([train, x, yTrain, y, h, finalState, loss], feed_dict={x: xData[j], yTrain: yTrainData[j]}) lossSum = lossSum + float(result[len(result) - 1]) if j == (rowCount - 1): print("i: %d, x: %s, yTrain: %s, y: %s, h: %s, finalState: %s, loss: %s, avgLoss: %10.10f\n" % (i, result[1], result[2], result[3], result[4], result[5], result[6], (lossSum / rowCount))) result = sess.run([x, y], feed_dict={x: testData}) print("x: %s, y: %s\n" % (result[0], result[1]))