diff --git a/README.md b/README.md index c30bd88..067e8e7 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,6 @@ +* Example code has been modified to support Tensorflow 1.0 or higher API by BGPark +* If you are using an IDE such as PyCharm, it is strongly recommended that you create a project for each chapter. If not, pay attention to the directory relative path of the dataset. + # _TensorFlow for Machine Intelligence_ ![TensorFlow for Machine Intelligence book cover](img/book_cover.jpg) diff --git a/chapters/03_tensorflow_fundamentals/TensorFlow Fundamentals - Basic Graph.ipynb b/chapters/03_tensorflow_fundamentals/TensorFlow Fundamentals - Basic Graph.ipynb index 54b7bd4..b78fcd0 100644 --- a/chapters/03_tensorflow_fundamentals/TensorFlow Fundamentals - Basic Graph.ipynb +++ b/chapters/03_tensorflow_fundamentals/TensorFlow Fundamentals - Basic Graph.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 4, "metadata": { "collapsed": true }, @@ -14,7 +14,7 @@ "# Build our graph nodes, starting from the inputs\n", "a = tf.constant(5, name=\"input_a\")\n", "b = tf.constant(3, name=\"input_b\")\n", - "c = tf.mul(a,b, name=\"mul_c\")\n", + "c = tf.multiply(a,b, name=\"mul_c\")\n", "d = tf.add(a,b, name=\"add_d\")\n", "e = tf.add(c,d, name=\"add_e\")\n", "\n", @@ -25,7 +25,7 @@ "output = sess.run(e)\n", "\n", "# Open a TensorFlow SummaryWriter to write our graph to disk\n", - "writer = tf.train.SummaryWriter('./my_graph', sess.graph)\n", + "writer = tf.summary.FileWriter('./my_graph', sess.graph)\n", "\n", "# Close our SummaryWriter and Session objects\n", "writer.close()\n", @@ -37,7 +37,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 5, "metadata": { "collapsed": true }, @@ -49,7 +49,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 7, "metadata": { "collapsed": true }, @@ -58,14 +58,14 @@ "# Build our graph nodes, starting from the inputs\n", "a = tf.constant(5, name=\"input_a\")\n", "b = tf.constant(3, name=\"input_b\")\n", - "c = tf.mul(a,b, name=\"mul_c\")\n", + "c = tf.multiply(a,b, name=\"mul_c\")\n", "d = tf.add(a,b, name=\"add_d\")\n", "e = tf.add(c,d, name=\"add_e\")" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 8, "metadata": { "collapsed": true }, @@ -77,7 +77,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 9, "metadata": { "collapsed": false }, @@ -89,19 +89,19 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 11, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# Open a TensorFlow SummaryWriter to write our graph to disk\n", - "writer = tf.train.SummaryWriter('./my_graph', sess.graph)" + "writer = tf.summary.FileWriter('./my_graph', sess.graph)" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 12, "metadata": { "collapsed": true }, @@ -130,7 +130,9 @@ "collapsed": true }, "outputs": [], - "source": [] + "source": [ + "" + ] } ], "metadata": { @@ -142,7 +144,7 @@ "language_info": { "codemirror_mode": { "name": "ipython", - "version": 2 + "version": 2.0 }, "file_extension": ".py", "mimetype": "text/x-python", @@ -154,4 +156,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file diff --git a/chapters/03_tensorflow_fundamentals/TensorFlow Fundamentals - Name Scope Examples.ipynb b/chapters/03_tensorflow_fundamentals/TensorFlow Fundamentals - Name Scope Examples.ipynb index 08e7a8d..e8e4b66 100644 --- a/chapters/03_tensorflow_fundamentals/TensorFlow Fundamentals - Name Scope Examples.ipynb +++ b/chapters/03_tensorflow_fundamentals/TensorFlow Fundamentals - Name Scope Examples.ipynb @@ -12,11 +12,11 @@ "\n", "with tf.name_scope(\"Scope_A\"):\n", " a = tf.add(1, 2, name=\"A_add\")\n", - " b = tf.mul(a, 3, name=\"A_mul\")\n", + " b = tf.multiply(a, 3, name=\"A_mul\")\n", "\n", "with tf.name_scope(\"Scope_B\"):\n", " c = tf.add(4, 5, name=\"B_add\")\n", - " d = tf.mul(c, 6, name=\"B_mul\")\n", + " d = tf.multiply(c, 6, name=\"B_mul\")\n", "\n", "e = tf.add(b, d, name=\"output\")" ] @@ -29,7 +29,7 @@ }, "outputs": [], "source": [ - "writer = tf.train.SummaryWriter('./name_scope_1', graph=tf.get_default_graph())" + "writer = tf.summary.FileWriter('./name_scope_1', graph=tf.get_default_graph())" ] }, { @@ -45,7 +45,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 8, "metadata": { "collapsed": false }, @@ -61,12 +61,12 @@ " with tf.name_scope(\"Transformation\"):\n", "\n", " with tf.name_scope(\"A\"):\n", - " A_mul = tf.mul(in_1, const)\n", - " A_out = tf.sub(A_mul, in_1)\n", + " A_mul = tf.multiply(in_1, const)\n", + " A_out = tf.subtract(A_mul, in_1)\n", "\n", " with tf.name_scope(\"B\"):\n", - " B_mul = tf.mul(in_2, const)\n", - " B_out = tf.sub(B_mul, in_2)\n", + " B_mul = tf.multiply(in_2, const)\n", + " B_out = tf.subtract(B_mul, in_2)\n", "\n", " with tf.name_scope(\"C\"):\n", " C_div = tf.div(A_out, B_out)\n", @@ -78,7 +78,7 @@ "\n", " out = tf.maximum(C_out, D_out) \n", "\n", - "writer = tf.train.SummaryWriter('./name_scope_2', graph=graph)\n", + "writer = tf.summary.FileWriter('./name_scope_2', graph=graph)\n", "writer.close()" ] }, @@ -108,7 +108,9 @@ "collapsed": true }, "outputs": [], - "source": [] + "source": [ + "" + ] } ], "metadata": { @@ -120,7 +122,7 @@ "language_info": { "codemirror_mode": { "name": "ipython", - "version": 3 + "version": 3.0 }, "file_extension": ".py", "mimetype": "text/x-python", @@ -132,4 +134,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file diff --git a/chapters/03_tensorflow_fundamentals/TensorFlow Fundamentals - Vectorized Graph with Summaries and Name Scopes.ipynb b/chapters/03_tensorflow_fundamentals/TensorFlow Fundamentals - Vectorized Graph with Summaries and Name Scopes.ipynb index d88d329..d7823ed 100644 --- a/chapters/03_tensorflow_fundamentals/TensorFlow Fundamentals - Vectorized Graph with Summaries and Name Scopes.ipynb +++ b/chapters/03_tensorflow_fundamentals/TensorFlow Fundamentals - Vectorized Graph with Summaries and Name Scopes.ipynb @@ -14,9 +14,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 8, "metadata": { - "collapsed": false + "collapsed": true }, "outputs": [], "source": [ @@ -61,22 +61,22 @@ " avg = tf.div(update_total, tf.cast(increment_step, tf.float32), name=\"average\")\n", " \n", " # Creates summaries for output node\n", - " tf.scalar_summary(b'Output', output, name=\"output_summary\")\n", - " tf.scalar_summary(b'Sum of outputs over time', update_total, name=\"total_summary\")\n", - " tf.scalar_summary(b'Average of outputs over time', avg, name=\"average_summary\")\n", + " tf.summary.scalar('Output', output)\n", + " tf.summary.scalar('Sum_of_outputs_over_time', update_total)\n", + " tf.summary.scalar('Average_of_outputs_over_time', avg)\n", " \n", " # Global Variables and Operations\n", " with tf.name_scope(\"global_ops\"):\n", " # Initialization Op\n", - " init = tf.initialize_all_variables() \n", + " init = tf.global_variables_initializer()\n", " # Merge all summaries into one Operation\n", - " merged_summaries = tf.merge_all_summaries()\n", + " merged_summaries = tf.summary.merge_all()\n", "\n", "# Start a Session, using the explicitly created Graph\n", "sess = tf.Session(graph=graph)\n", "\n", "# Open a SummaryWriter to save summaries\n", - "writer = tf.train.SummaryWriter('./improved_graph', graph)\n", + "writer = tf.summary.FileWriter('./improved_graph', graph)\n", "\n", "# Initialize Variables\n", "sess.run(init)" @@ -84,7 +84,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 9, "metadata": { "collapsed": false }, @@ -101,7 +101,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 10, "metadata": { "collapsed": false, "scrolled": true @@ -123,7 +123,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 11, "metadata": { "collapsed": true }, @@ -135,7 +135,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 12, "metadata": { "collapsed": true }, @@ -147,7 +147,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 13, "metadata": { "collapsed": true }, @@ -177,7 +177,9 @@ "collapsed": true }, "outputs": [], - "source": [] + "source": [ + "" + ] } ], "metadata": { @@ -189,7 +191,7 @@ "language_info": { "codemirror_mode": { "name": "ipython", - "version": 3 + "version": 3.0 }, "file_extension": ".py", "mimetype": "text/x-python", @@ -201,4 +203,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file diff --git a/chapters/03_tensorflow_fundamentals/basic_graph.py b/chapters/03_tensorflow_fundamentals/basic_graph.py index 0017f4d..32d0ca0 100644 --- a/chapters/03_tensorflow_fundamentals/basic_graph.py +++ b/chapters/03_tensorflow_fundamentals/basic_graph.py @@ -4,7 +4,7 @@ # Build our graph nodes, starting from the inputs a = tf.constant(5, name="input_a") b = tf.constant(3, name="input_b") -c = tf.mul(a,b, name="mul_c") +c = tf.multiply(a,b, name="mul_c") d = tf.add(a,b, name="add_d") e = tf.add(c,d, name="add_e") @@ -15,7 +15,7 @@ sess.run(e) # Open a TensorFlow SummaryWriter to write our graph to disk -writer = tf.train.SummaryWriter('./my_graph', sess.graph) +writer = tf.summary.FileWriter('./my_graph', sess.graph) # Close our SummaryWriter and Session objects writer.close() diff --git a/chapters/03_tensorflow_fundamentals/name_scopes.py b/chapters/03_tensorflow_fundamentals/name_scopes.py index 5fb67d8..968f726 100644 --- a/chapters/03_tensorflow_fundamentals/name_scopes.py +++ b/chapters/03_tensorflow_fundamentals/name_scopes.py @@ -3,15 +3,15 @@ # Example 1 with tf.name_scope("Scope_A"): a = tf.add(1, 2, name="A_add") - b = tf.mul(a, 3, name="A_mul") + b = tf.multiply(a, 3, name="A_mul") with tf.name_scope("Scope_B"): c = tf.add(4, 5, name="B_add") - d = tf.mul(c, 6, name="B_mul") + d = tf.multiply(c, 6, name="B_mul") e = tf.add(b, d, name="output") -writer = tf.train.SummaryWriter('./name_scope_1', graph=tf.get_default_graph()) +writer = tf.summary.FileWriter('./name_scope_1', graph=tf.get_default_graph()) writer.close() @@ -26,12 +26,12 @@ with tf.name_scope("Transformation"): with tf.name_scope("A"): - A_mul = tf.mul(in_1, const) - A_out = tf.sub(A_mul, in_1) + A_mul = tf.multiply(in_1, const) + A_out = tf.subtract(A_mul, in_1) with tf.name_scope("B"): - B_mul = tf.mul(in_2, const) - B_out = tf.sub(B_mul, in_2) + B_mul = tf.multiply(in_2, const) + B_out = tf.subtract(B_mul, in_2) with tf.name_scope("C"): C_div = tf.div(A_out, B_out) @@ -43,7 +43,7 @@ out = tf.maximum(C_out, D_out) -writer = tf.train.SummaryWriter('./name_scope_2', graph=graph) +writer = tf.summary.FileWriter('./name_scope_2', graph=graph) writer.close() # To start TensorBoard after running this file, execute the following command: diff --git a/chapters/03_tensorflow_fundamentals/vectorized_graph.py b/chapters/03_tensorflow_fundamentals/vectorized_graph.py index 1b18baa..d4d565b 100644 --- a/chapters/03_tensorflow_fundamentals/vectorized_graph.py +++ b/chapters/03_tensorflow_fundamentals/vectorized_graph.py @@ -32,27 +32,27 @@ # Separate output layer with tf.name_scope("output"): d = tf.add(b, c, name="add_d") - output = tf.sub(d, previous_value, name="output") + output = tf.subtract(d, previous_value, name="output") update_prev = previous_value.assign(output) # Summary Operations with tf.name_scope("summaries"): - tf.scalar_summary(b'output', output, name="output_summary") # Creates summary for output node - tf.scalar_summary(b'product of inputs', b, name="prod_summary") - tf.scalar_summary(b'sum of inputs', c, name="sum_summary") + tf.summary.scalar('output', output) # Creates summary for output node + tf.summary.scalar('product of inputs', b) + tf.summary.scalar('sum of inputs', c) # Global Variables and Operations with tf.name_scope("global_ops"): # Initialization Op - init = tf.initialize_all_variables() + init = tf.global_variables_initializer() # Collect all summary Ops in graph - merged_summaries = tf.merge_all_summaries() + merged_summaries = tf.summary.merge_all() # Start a Session, using the explicitly created Graph sess = tf.Session(graph=graph) # Open a SummaryWriter to save summaries -writer = tf.train.SummaryWriter('./improved_graph', graph) +writer = tf.summary.FileWriter('./improved_graph', graph) # Initialize Variables sess.run(init) diff --git a/chapters/04_machine_learning_basics/generic.py b/chapters/04_machine_learning_basics/generic.py index f704b9a..45b85a5 100644 --- a/chapters/04_machine_learning_basics/generic.py +++ b/chapters/04_machine_learning_basics/generic.py @@ -45,7 +45,7 @@ def evaluate(sess, X, Y): sess.run([train_op]) # for debugging and learning purposes, see how the loss gets decremented thru training steps if step % 10 == 0: - print "loss: ", sess.run([total_loss]) + print("loss: ", sess.run([total_loss])) evaluate(sess, X, Y) diff --git a/chapters/04_machine_learning_basics/linear_regression.py b/chapters/04_machine_learning_basics/linear_regression.py index 4ddc41a..cdc4cd9 100644 --- a/chapters/04_machine_learning_basics/linear_regression.py +++ b/chapters/04_machine_learning_basics/linear_regression.py @@ -29,13 +29,13 @@ def train(total_loss): def evaluate(sess, X, Y): - print sess.run(inference([[80., 25.]])) # ~ 303 - print sess.run(inference([[65., 25.]])) # ~ 256 + print(sess.run(inference([[80., 25.]]))) # ~ 303 + print(sess.run(inference([[65., 25.]]))) # ~ 256 # Launch the graph in a session, setup boilerplate with tf.Session() as sess: - tf.initialize_all_variables().run() + tf.global_variables_initializer().run() X, Y = inputs() @@ -50,7 +50,7 @@ def evaluate(sess, X, Y): for step in range(training_steps): sess.run([train_op]) if step % 10 == 0: - print "loss: ", sess.run([total_loss]) + print("loss: ", sess.run([total_loss])) evaluate(sess, X, Y) diff --git a/chapters/04_machine_learning_basics/logistic_regression.py b/chapters/04_machine_learning_basics/logistic_regression.py index 6795599..7c16b63 100644 --- a/chapters/04_machine_learning_basics/logistic_regression.py +++ b/chapters/04_machine_learning_basics/logistic_regression.py @@ -20,7 +20,7 @@ def inference(X): def loss(X, Y): - return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(combine_inputs(X), Y)) + return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=combine_inputs(X), labels=Y)) def read_csv(batch_size, file_name, record_defaults): @@ -54,7 +54,7 @@ def inputs(): # Finally we pack all the features in a single matrix; # We then transpose to have a matrix with one example per row and one feature per column. - features = tf.transpose(tf.pack([is_first_class, is_second_class, is_third_class, gender, age])) + features = tf.transpose(tf.stack([is_first_class, is_second_class, is_third_class, gender, age])) survived = tf.reshape(survived, [100, 1]) return features, survived @@ -69,12 +69,12 @@ def evaluate(sess, X, Y): predicted = tf.cast(inference(X) > 0.5, tf.float32) - print sess.run(tf.reduce_mean(tf.cast(tf.equal(predicted, Y), tf.float32))) + print(sess.run(tf.reduce_mean(tf.cast(tf.equal(predicted, Y), tf.float32)))) # Launch the graph in a session, setup boilerplate with tf.Session() as sess: - tf.initialize_all_variables().run() + tf.global_variables_initializer().run() X, Y = inputs() @@ -90,7 +90,7 @@ def evaluate(sess, X, Y): sess.run([train_op]) # for debugging and learning purposes, see how the loss gets decremented thru training steps if step % 10 == 0: - print "loss: ", sess.run([total_loss]) + print("loss: ", sess.run([total_loss])) evaluate(sess, X, Y) diff --git a/chapters/04_machine_learning_basics/softmax.py b/chapters/04_machine_learning_basics/softmax.py index 902d9e1..f693e8f 100644 --- a/chapters/04_machine_learning_basics/softmax.py +++ b/chapters/04_machine_learning_basics/softmax.py @@ -19,13 +19,13 @@ def inference(X): def loss(X, Y): - return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(combine_inputs(X), Y)) + return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=combine_inputs(X), labels=Y)) def read_csv(batch_size, file_name, record_defaults): filename_queue = tf.train.string_input_producer([os.path.dirname(__file__) + "/" + file_name]) - reader = tf.TextLineReader(skip_header_lines=1) + reader = tf.TextLineReader() key, value = reader.read(filename_queue) # decode_csv will convert a Tensor from type string (the text line) in @@ -43,10 +43,10 @@ def read_csv(batch_size, file_name, record_defaults): def inputs(): sepal_length, sepal_width, petal_length, petal_width, label =\ - read_csv(100, "iris.data", [[0.0], [0.0], [0.0], [0.0], [""]]) + read_csv(100, "./iris.data", [[0.0], [0.0], [0.0], [0.0], [""]]) # convert class names to a 0 based class index. - label_number = tf.to_int32(tf.argmax(tf.to_int32(tf.pack([ + label_number = tf.to_int32(tf.argmax(tf.to_int32(tf.stack([ tf.equal(label, ["Iris-setosa"]), tf.equal(label, ["Iris-versicolor"]), tf.equal(label, ["Iris-virginica"]) @@ -54,7 +54,7 @@ def inputs(): # Pack all the features that we care about in a single matrix; # We then transpose to have a matrix with one example per row and one feature per column. - features = tf.transpose(tf.pack([sepal_length, sepal_width, petal_length, petal_width])) + features = tf.transpose(tf.stack([sepal_length, sepal_width, petal_length, petal_width])) return features, label_number @@ -68,13 +68,13 @@ def evaluate(sess, X, Y): predicted = tf.cast(tf.arg_max(inference(X), 1), tf.int32) - print sess.run(tf.reduce_mean(tf.cast(tf.equal(predicted, Y), tf.float32))) + print(sess.run(tf.reduce_mean(tf.cast(tf.equal(predicted, Y), tf.float32)))) # Launch the graph in a session, setup boilerplate with tf.Session() as sess: - tf.initialize_all_variables().run() + tf.global_variables_initializer().run() X, Y = inputs() @@ -90,7 +90,7 @@ def evaluate(sess, X, Y): sess.run([train_op]) # for debugging and learning purposes, see how the loss gets decremented thru training steps if step % 10 == 0: - print "loss: ", sess.run([total_loss]) + print("loss: ", sess.run([total_loss])) evaluate(sess, X, Y) diff --git a/chapters/05_object_recognition_and_classification/Chapter 5 - 02 Convolutions.ipynb b/chapters/05_object_recognition_and_classification/Chapter 5 - 02 Convolutions.ipynb index 2bd7faa..b65032d 100644 --- a/chapters/05_object_recognition_and_classification/Chapter 5 - 02 Convolutions.ipynb +++ b/chapters/05_object_recognition_and_classification/Chapter 5 - 02 Convolutions.ipynb @@ -289,15 +289,14 @@ "fig.set_size_inches(4, 4)\n", "\n", "image_filename = \"./images/chapter-05-object-recognition-and-classification/convolution/n02113023_219.jpg\"\n", - "image_filename = \"/Users/erikerwitt/Downloads/images/n02085936-Maltese_dog/n02085936_804.jpg\"\n", - "filename_queue = tf.train.string_input_producer(\n", - " tf.train.match_filenames_once(image_filename))\n", + "# image_filename = \"/Users/erikerwitt/Downloads/images/n02085936-Maltese_dog/n02085936_804.jpg\"\n", + "filename_queue = tf.train.string_input_producer(tf.train.match_filenames_once(image_filename))\n", "\n", "image_reader = tf.WholeFileReader()\n", "_, image_file = image_reader.read(filename_queue)\n", "image = tf.image.decode_jpeg(image_file)\n", "\n", - "sess.run(tf.initialize_all_variables())\n", + "sess.run(tf.global_variables_initializer())\n", "coord = tf.train.Coordinator()\n", "threads = tf.train.start_queue_runners(coord=coord)\n", "\n", diff --git a/chapters/05_object_recognition_and_classification/Chapter 5 - 03 Layers.ipynb b/chapters/05_object_recognition_and_classification/Chapter 5 - 03 Layers.ipynb index 2ba4e3d..22550d7 100644 --- a/chapters/05_object_recognition_and_classification/Chapter 5 - 03 Layers.ipynb +++ b/chapters/05_object_recognition_and_classification/Chapter 5 - 03 Layers.ipynb @@ -504,14 +504,14 @@ "\n", "conv2d = tf.contrib.layers.convolution2d(\n", " image_input,\n", - " num_output_channels=4,\n", + " num_outputs=4,\n", " kernel_size=(1,1), # It's only the filter height and width.\n", " activation_fn=tf.nn.relu,\n", " stride=(1, 1), # Skips the stride values for image_batch and input_channels.\n", " trainable=True)\n", "\n", "# It's required to initialize the variables used in convolution2d's setup.\n", - "sess.run(tf.initialize_all_variables())\n", + "sess.run(tf.global_variables_initializer())\n", "sess.run(conv2d)" ] }, @@ -559,9 +559,9 @@ " [[1.2], [3.4]]\n", " ])\n", "\n", - "fc = tf.contrib.layers.fully_connected(features, num_output_units=2)\n", + "fc = tf.contrib.layers.fully_connected(features, num_outputs=2)\n", "# It's required to initialize all the variables first or there'll be an error about precondition failures.\n", - "sess.run(tf.initialize_all_variables())\n", + "sess.run(tf.global_variables_initializer())\n", "sess.run(fc)" ] }, diff --git a/chapters/05_object_recognition_and_classification/Chapter 5 - 04 Working with Images.ipynb b/chapters/05_object_recognition_and_classification/Chapter 5 - 04 Working with Images.ipynb index 9ff6d7b..0807f24 100644 --- a/chapters/05_object_recognition_and_classification/Chapter 5 - 04 Working with Images.ipynb +++ b/chapters/05_object_recognition_and_classification/Chapter 5 - 04 Working with Images.ipynb @@ -75,8 +75,7 @@ "source": [ "# The match_filenames_once will accept a regex but there is no need for this example.\n", "image_filename = \"./images/chapter-05-object-recognition-and-classification/working-with-images/test-input-image.jpg\"\n", - "filename_queue = tf.train.string_input_producer(\n", - " tf.train.match_filenames_once(image_filename))\n", + "filename_queue = tf.train.string_input_producer([image_filename])\n", "\n", "image_reader = tf.WholeFileReader()\n", "_, image_file = image_reader.read(filename_queue)\n", @@ -101,9 +100,9 @@ "outputs": [], "source": [ "# setup-only-ignore\n", - "sess.run(tf.initialize_all_variables())\n", + "sess.run(tf.global_variables_initializer())\n", "coord = tf.train.Coordinator()\n", - "threads = tf.train.start_queue_runners(coord=coord)" + "threads = tf.train.start_queue_runners(coord=coord, sess=sess)" ] }, { @@ -256,8 +255,7 @@ "outputs": [], "source": [ "# Load TFRecord\n", - "tf_record_filename_queue = tf.train.string_input_producer(\n", - " tf.train.match_filenames_once(\"./output/training-image.tfrecord\"))\n", + "tf_record_filename_queue = tf.train.string_input_producer([\"./output/training-image.tfrecord\"])\n", "\n", "# Notice the different record reader, this one is designed to work with TFRecord files which may\n", "# have more than one example in them.\n", @@ -311,9 +309,9 @@ "# setup-only-ignore\n", "sess.close()\n", "sess = tf.InteractiveSession()\n", - "sess.run(tf.initialize_all_variables())\n", + "sess.run(tf.global_variables_initializer())\n", "coord = tf.train.Coordinator()\n", - "threads = tf.train.start_queue_runners(coord=coord)" + "threads = tf.train.start_queue_runners(coord=coord, sess=sess)" ] }, { diff --git a/chapters/05_object_recognition_and_classification/Chapter 5 - 05 CNN Implementation.ipynb b/chapters/05_object_recognition_and_classification/Chapter 5 - 05 CNN Implementation.ipynb index f5638b2..8e6ade6 100644 --- a/chapters/05_object_recognition_and_classification/Chapter 5 - 05 CNN Implementation.ipynb +++ b/chapters/05_object_recognition_and_classification/Chapter 5 - 05 CNN Implementation.ipynb @@ -176,7 +176,7 @@ "\n", " # Converting to grayscale saves processing and memory but isn't required.\n", " grayscale_image = tf.image.rgb_to_grayscale(image)\n", - " resized_image = tf.image.resize_images(grayscale_image, 250, 151)\n", + " resized_image = tf.image.resize_images(grayscale_image, [250, 151])\n", "\n", " # tf.cast is used here because the resized images are floats but haven't been converted into\n", " # image floats where an RGB value is between [0,1).\n", @@ -291,14 +291,15 @@ "# Converting the images to a float of [0,1) to match the expected input to convolution2d\n", "float_image_batch = tf.image.convert_image_dtype(image_batch, tf.float32)\n", "\n", - "conv2d_layer_one = tf.contrib.layers.convolution2d(\n", + "conv2d_layer_one = tf.contrib.layers.conv2d(\n", " float_image_batch,\n", - " num_output_channels=32, # The number of filters to generate\n", + " num_outputs=32, # The number of filters to generate\n", " kernel_size=(5,5), # It's only the filter height and width.\n", " activation_fn=tf.nn.relu,\n", - " weight_init=tf.random_normal,\n", + " weights_initializer=tf.random_normal_initializer,\n", " stride=(2, 2),\n", " trainable=True)\n", + "\n", "pool_layer_one = tf.nn.max_pool(conv2d_layer_one,\n", " ksize=[1, 2, 2, 1],\n", " strides=[1, 2, 2, 1],\n", @@ -338,12 +339,12 @@ } ], "source": [ - "conv2d_layer_two = tf.contrib.layers.convolution2d(\n", + "conv2d_layer_two = tf.contrib.layers.conv2d(\n", " pool_layer_one,\n", - " num_output_channels=64, # More output channels means an increase in the number of filters\n", + " num_outputs=64, # More output channels means an increase in the number of filters\n", " kernel_size=(5,5),\n", " activation_fn=tf.nn.relu,\n", - " weight_init=tf.random_normal,\n", + " weights_initializer=tf.random_normal_initializer,\n", " stride=(1, 1),\n", " trainable=True)\n", "\n", @@ -415,7 +416,7 @@ "hidden_layer_three = tf.contrib.layers.fully_connected(\n", " flattened_layer_two,\n", " 512,\n", - " weight_init=lambda i, dtype: tf.truncated_normal([38912, 512], stddev=0.1),\n", + " weights_initializer=tf.truncated_normal_initializer(stddev=0.1),\n", " activation_fn=tf.nn.relu\n", ")\n", "\n", @@ -427,7 +428,7 @@ "final_fully_connected = tf.contrib.layers.fully_connected(\n", " hidden_layer_three,\n", " 120, # Number of dog breeds in the ImageNet Dogs dataset\n", - " weight_init=lambda i, dtype: tf.truncated_normal([512, 120], stddev=0.1)\n", + " weights_initializer=tf.truncated_normal_initializer(stddev=0.1)\n", ")" ] }, @@ -486,7 +487,7 @@ "# setup-only-ignore\n", "loss = tf.reduce_mean(\n", " tf.nn.sparse_softmax_cross_entropy_with_logits(\n", - " final_fully_connected, train_labels))\n", + " logits=final_fully_connected, labels=train_labels))\n", "\n", "batch = tf.Variable(0)\n", "learning_rate = tf.train.exponential_decay(\n", diff --git a/chapters/06_recurrent_neural_networks_and_natural_language_processing/01_wikipedia/EmbeddingModel.py b/chapters/06_recurrent_neural_networks_and_natural_language_processing/01_wikipedia/EmbeddingModel.py index ea5fcf4..48e141d 100644 --- a/chapters/06_recurrent_neural_networks_and_natural_language_processing/01_wikipedia/EmbeddingModel.py +++ b/chapters/06_recurrent_neural_networks_and_natural_language_processing/01_wikipedia/EmbeddingModel.py @@ -36,6 +36,6 @@ def cost(self): bias = tf.Variable(tf.zeros([self.params.vocabulary_size])) target = tf.expand_dims(self.target, 1) return tf.reduce_mean(tf.nn.nce_loss( - weight, bias, embedded, target, - self.params.contrastive_examples, - self.params.vocabulary_size)) \ No newline at end of file + weights=weight, biases=bias, inputs=embedded, labels=target, + num_sampled=self.params.contrastive_examples, + num_classes=self.params.vocabulary_size)) \ No newline at end of file diff --git a/chapters/06_recurrent_neural_networks_and_natural_language_processing/01_wikipedia/train.py b/chapters/06_recurrent_neural_networks_and_natural_language_processing/01_wikipedia/train.py index fcb68e1..917c5b2 100644 --- a/chapters/06_recurrent_neural_networks_and_natural_language_processing/01_wikipedia/train.py +++ b/chapters/06_recurrent_neural_networks_and_natural_language_processing/01_wikipedia/train.py @@ -26,15 +26,15 @@ model = EmbeddingModel(data, target, params) corpus = Wikipedia( - 'https://dumps.wikimedia.org/enwiki/20160501/' - 'enwiki-20160501-pages-meta-current1.xml-p000000010p000030303.bz2', + 'https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-meta-current1.xml-p10p30303.bz2', + # 'https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-meta-current.xml.bz2', WIKI_DOWNLOAD_DIR, params.vocabulary_size) examples = skipgrams(corpus, params.max_context) batches = batched(examples, params.batch_size) sess = tf.Session() -sess.run(tf.initialize_all_variables()) +sess.run(tf.global_variables_initializer()) average = collections.deque(maxlen=100) for index, batch in enumerate(batches): feed_dict = {data: batch[0], target: batch[1]} diff --git a/chapters/06_recurrent_neural_networks_and_natural_language_processing/02_imdb/train.py b/chapters/06_recurrent_neural_networks_and_natural_language_processing/02_imdb/train.py index 1a0a7a0..c70680e 100644 --- a/chapters/06_recurrent_neural_networks_and_natural_language_processing/02_imdb/train.py +++ b/chapters/06_recurrent_neural_networks_and_natural_language_processing/02_imdb/train.py @@ -12,7 +12,7 @@ WIKI_EMBED_DIR = '../01_wikipedia/wikipedia' params = AttrDict( - rnn_cell=tf.nn.rnn_cell.GRUCell, + rnn_cell=tf.contrib.rnn.GRUCell, rnn_hidden=300, optimizer=tf.train.RMSPropOptimizer(0.002), batch_size=20, @@ -31,7 +31,7 @@ model = SequenceClassificationModel(data, target, params) sess = tf.Session() -sess.run(tf.initialize_all_variables()) +sess.run(tf.global_variables_initializer()) for index, batch in enumerate(batches): feed = {data: batch[0], target: batch[1]} error, _ = sess.run([model.error, model.optimize], feed) diff --git a/chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/SequenceLabellingModel.py b/chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/SequenceLabellingModel.py index 8872367..1dee0e2 100644 --- a/chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/SequenceLabellingModel.py +++ b/chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/SequenceLabellingModel.py @@ -24,7 +24,7 @@ def length(self): @lazy_property def prediction(self): output, _ = tf.nn.dynamic_rnn( - tf.nn.rnn_cell.GRUCell(self.params.rnn_hidden), + tf.contrib.rnn.GRUCell(self.params.rnn_hidden), self.data, dtype=tf.float32, sequence_length=self.length, diff --git a/chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/train.py b/chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/train.py index eebd3f2..a6bf2ba 100644 --- a/chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/train.py +++ b/chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/train.py @@ -10,7 +10,7 @@ from batched import batched params = AttrDict( - rnn_cell=tf.nn.rnn_cell.GRUCell, + rnn_cell=tf.contrib.rnn.GRUCell, rnn_hidden=300, optimizer=tf.train.RMSPropOptimizer(0.002), gradient_clipping=5, @@ -50,7 +50,7 @@ def get_dataset(): batches = batched(train_data, train_target, params.batch_size) sess = tf.Session() -sess.run(tf.initialize_all_variables()) +sess.run(tf.global_variables_initializer()) for index, batch in enumerate(batches): batch_data = batch[0] batch_target = batch[1] diff --git a/chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/train_bidirectional.py b/chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/train_bidirectional.py index 8cdcea9..b3adfbe 100644 --- a/chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/train_bidirectional.py +++ b/chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/train_bidirectional.py @@ -11,7 +11,7 @@ from batched import batched params = AttrDict( - rnn_cell=tf.nn.rnn_cell.GRUCell, + rnn_cell=tf.contrib.rnn.GRUCell, rnn_hidden=300, optimizer=tf.train.RMSPropOptimizer(0.002), gradient_clipping=5, @@ -51,7 +51,7 @@ def get_dataset(): batches = batched(train_data, train_target, params.batch_size) sess = tf.Session() -sess.run(tf.initialize_all_variables()) +sess.run(tf.global_variables_initializer()) for index, batch in enumerate(batches): batch_data = batch[0] batch_target = batch[1]