几个 TensorFlow 小考题

原帖是 Tiezhen 在做创新工场 workshop 活动时向参会者提出的问题,很有意义,转发如下:

请依次完成以下任务,并回复本帖。

Test 0

import tensorflow as tf
import sys

print (tf.__version__)
print (sys.version)
# Question: What are the outputs and why?

Test 1.0

import tensorflow as tf

a = tf.constant (1)
b = tf.constant (1)
c = tf.add (a, b)    

print (c)

with tf.Session () as sess:
    print (sess.run (c))

# Question: What's the output?

Test 1.1

import tensorflow as tf
# 2-D tensor `a`
# [[1, 2, 3],
#  [4, 5, 6]]

a = tf.constant ([1, 2, 3, 4, 5, 6], shape=[2, 3])

# 2-D tensor `b`
# [[ 7,  8],
#  [ 9, 10],
#  [11, 12]]
b = tf.constant ([7, 8, 9, 10, 11, 12], shape=[3, 2])

# Question: write a program to calculate the matrix mautiplication of a and b (i.e. a * b) # and what's the output?
# API doc is available here: https://tensorflow.google.cn/api_docs/python/tf/
# YOUR CODE HERE

Test 1.2

import tensorflow as tf
x = tf.get_variable ('x', shape=[1], initializer=tf.constant_initializer (3))
y = tf.square (x)
y_grad = tf.gradients (y, [x])

init = tf.global_variables_initializer ()
with tf.Session () as sess:
    sess.run (init)
    print (sess.run (y_grad))

# Question: what's the output and what does the output mean?

Test 1.3

import tensorflow as tf
f = lambda x: 3*x
x_init = 1
y_expected = 5

def find_root (f, x_init, y_expected, learning_rate):
    x = tf.get_variable ('x', shape=[1], initializer=tf.constant_initializer (x_init))

    init = tf.global_variables_initializer ()
    with tf.Session () as sess:
        sess.run (init)
        for _ in range (EPOCHS):
            y_predict = ...
            loss = ...
            y_grad = tf.gradients (loss, [x])
            op = tf.assign_sub (x, ...)
            op = tf.Print (op, [...], "updates: ")
            sess.run (op)

        return sess.run (x)

# Question: write a function to calculate x so that f (x) = y_expected using gradient descent method
# Question: Try to change the learning rate and explain what you found
# Question: How to reduce the number of epoch?

Test 2

# Question: Write a XOR model to prevent the result of a XOR b
# Question: Write your own metrics, loss and optimizer
import tensorflow as tf
import numpy as np

X = np.array ([[0,0],[0,1],[1,0],[1,1]])
y = np.array ([[0],[1],[1],[0]])

class XOR (tf.keras.Model):
    def __init__(self):
        super (XOR, self).__init__(self)
        self.dense1 = ...
        self.dense2 = ...

    def call (self, inputs):
        x = self.dense1 (inputs)
        x = self.dense2 (x)
        return x

def accuracy (y_true, y_pred):
    return UnImplemented

def loss (y_true, y_pred):
    return UnImplemented


class Optimizer (tf.keras.optimizers.Optimizer):

    def __init__(self, learning_rate):
        super (Optimizer, self).__init__()
        self._learning_rate = learning_rate
        ...

    def get_updates (self, loss, params):
        updates = []
        # TODO: Implement here
        return updates

def run ():
    m = XOR ()
    m.compile (loss=loss, optimizer=Optimizer (...), metrics=[accuracy])
    m.fit (X, y, epochs=EPOCHS)
    print (m.predict (X))

if __name__ == "__main__":
    run ()