diff --git a/README.md b/README.md
index 15f57eabe0036a177fdd6679f12664c21b22107e..8991b8cb14f05bcf397e264400bd73643d68a10d 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@ This port is based directly on the torch implementation, and not on an existing
 ## Setup
 
 ### Prerequisites
-- Tensorflow 0.12.1
+- Tensorflow 1.0.0
 
 ### Recommended
 - Linux with Tensorflow GPU edition + cuDNN
@@ -47,24 +47,14 @@ If you have Docker installed, you can use the provided Docker image to run pix2p
 
 ```sh
 # train the model
-sudo nvidia-docker run \
-  --volume $PWD:/prj \
-  --workdir /prj \
-  --env PYTHONUNBUFFERED=x \
-  affinelayer/pix2pix-tensorflow \
-    python pix2pix.py \
+python tools/dockrun.py pix2pix.py \
       --mode train \
       --output_dir facades_train \
       --max_epochs 200 \
       --input_dir facades/train \
       --which_direction BtoA
 # test the model
-sudo nvidia-docker run \
-  --volume $PWD:/prj \
-  --workdir /prj \
-  --env PYTHONUNBUFFERED=x \
-  affinelayer/pix2pix-tensorflow \
-    python pix2pix.py \
+python tools/dockrun.py pix2pix.py \
       --mode test \
       --output_dir facades_test \
       --input_dir facades/val \
@@ -81,7 +71,7 @@ For example:
 
 <img src="docs/418.png" width="256px"/>
 
-Some datasets have been made available by the authors of the pix2pix paper.  To download those datasets, use the included script `tools/download-dataset.py`.  There are also links to pre-trained models alongside each dataset:
+Some datasets have been made available by the authors of the pix2pix paper.  To download those datasets, use the included script `tools/download-dataset.py`.  There are also links to pre-trained models alongside each dataset, note that these pre-trained models require the Tensorflow 0.12.1 version of pix2pix.py since they have not been regenerated with the 1.0.0 release:
 
 | dataset | example |
 | --- | --- |
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 4f916f8b94fe67eaa1543ede094cb72290c3c625..ec5f7380a95d3b1d9f5e767f44c7b98881ba1d59 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -119,4 +119,4 @@ RUN pip install \
     rsa==3.4.2 \
     six==1.10.0 \
     uritemplate==3.0.0 \
-    https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.1-cp27-none-linux_x86_64.whl
+    tensorflow-gpu==1.0.0
diff --git a/pix2pix.py b/pix2pix.py
index 5dd82d894199f65974a0bcd4a734dd3549bc7ce9..842bb4f5b957df1c17a0b42ceb5a84cace8afd25 100644
--- a/pix2pix.py
+++ b/pix2pix.py
@@ -118,7 +118,7 @@ def batchnorm(input):
         input = tf.identity(input)
 
         channels = input.get_shape()[3]
-        offset = tf.get_variable("offset", [channels], dtype=tf.float32, initializer=tf.zeros_initializer)
+        offset = tf.get_variable("offset", [channels], dtype=tf.float32, initializer=tf.zeros_initializer())
         scale = tf.get_variable("scale", [channels], dtype=tf.float32, initializer=tf.random_normal_initializer(1.0, 0.02))
         mean, variance = tf.nn.moments(input, axes=[0, 1, 2], keep_dims=False)
         variance_epsilon = 1e-5
@@ -372,7 +372,7 @@ def create_generator(generator_inputs, generator_outputs_channels):
                 # since it is directly connected to the skip_layer
                 input = layers[-1]
             else:
-                input = tf.concat_v2([layers[-1], layers[skip_layer]], axis=3)
+                input = tf.concat([layers[-1], layers[skip_layer]], axis=3)
 
             rectified = tf.nn.relu(input)
             # [batch, in_height, in_width, in_channels] => [batch, in_height*2, in_width*2, out_channels]
@@ -386,7 +386,7 @@ def create_generator(generator_inputs, generator_outputs_channels):
 
     # decoder_1: [batch, 128, 128, ngf * 2] => [batch, 256, 256, generator_outputs_channels]
     with tf.variable_scope("decoder_1"):
-        input = tf.concat_v2([layers[-1], layers[0]], axis=3)
+        input = tf.concat([layers[-1], layers[0]], axis=3)
         rectified = tf.nn.relu(input)
         output = deconv(rectified, generator_outputs_channels)
         output = tf.tanh(output)
@@ -401,7 +401,7 @@ def create_model(inputs, targets):
         layers = []
 
         # 2x [batch, height, width, in_channels] => [batch, height, width, in_channels * 2]
-        input = tf.concat_v2([discrim_inputs, discrim_targets], axis=3)
+        input = tf.concat([discrim_inputs, discrim_targets], axis=3)
 
         # layer_1: [batch, 256, 256, in_channels * 2] => [batch, 128, 128, ndf]
         with tf.variable_scope("layer_1"):
@@ -538,8 +538,8 @@ def append_index(filesets, step=False):
 
 
 def main():
-    if tf.__version__ != "0.12.1":
-        raise Exception("Tensorflow version 0.12.1 required")
+    if tf.__version__ != "1.0.0":
+        raise Exception("Tensorflow version 1.0.0 required")
 
     if a.seed is None:
         a.seed = random.randint(0, 2**31 - 1)
diff --git a/server/README.md b/server/README.md
index 364159ad25986368d41d171959e24876b1bb4bb1..8edaf9ce79c40a1b5ef075cf6d25313ce43bdcf3 100644
--- a/server/README.md
+++ b/server/README.md
@@ -9,17 +9,15 @@ This is a simple python server that serves models exported from `pix2pix.py --mo
 Using the [pix2pix-tensorflow Docker image](https://hub.docker.com/r/affinelayer/pix2pix-tensorflow/):
 
 ```sh
-alias p2p-run="sudo docker run --rm --volume /:/host --workdir /host\$PWD --env PYTHONUNBUFFERED=x --env CUDA_CACHE_PATH=/host/tmp/cuda-cache --env HOME=/host\$HOME --publish 8000:8000 affinelayer/pix2pix-tensorflow"
-
 # export a model to upload
-p2p-run python export-example-model.py --output_dir models/example
+python ../tools/dockrun.py export-example-model.py --output_dir models/example
 # process an image with the model using local tensorflow
-p2p-run python process-local.py \
+python ../tools/dockrun.py  process-local.py \
     --model_dir models/example \
     --input_file static/facades-input.png \
     --output_file output.png
 # run local server
-p2p-run python serve.py --local_models_dir models
+python ../tools/dockrun.py --port 8000 serve.py --local_models_dir models
 # test the local server
 curl -X POST http://localhost:8000/example \
     --data-binary @static/facades-input.png >! output.png
@@ -39,13 +37,13 @@ For this you'll want to generate a service account JSON file from https://consol
 
 ```sh
 # upload model to google cloud ml
-p2p-run python upload-model.py \
+python ../tools/dockrun.py upload-model.py \
     --bucket your-models-bucket-name-here \
     --model_name example \
     --model_dir models/example \
     --credentials service-account.json
 # process an image with the model using google cloud ml
-p2p-run python process-cloud.py \
+python ../tools/dockrun.py process-cloud.py \
     --model example \
     --input_file static/facades-input.png \
     --output_file output.png \
diff --git a/tools/dockrun.py b/tools/dockrun.py
new file mode 100644
index 0000000000000000000000000000000000000000..04a5dda2bf2158afaee9c2a746b3c094124259e7
--- /dev/null
+++ b/tools/dockrun.py
@@ -0,0 +1,117 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import sys
+import argparse
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--port", type=int, help="port to publish from the container")
+
+# from python 3.3 source
+# https://github.com/python/cpython/blob/master/Lib/shutil.py
+def which(cmd, mode=os.F_OK | os.X_OK, path=None):
+    """Given a command, mode, and a PATH string, return the path which
+    conforms to the given mode on the PATH, or None if there is no such
+    file.
+    `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
+    of os.environ.get("PATH"), or can be overridden with a custom search
+    path.
+    """
+    # Check that a given file can be accessed with the correct mode.
+    # Additionally check that `file` is not a directory, as on Windows
+    # directories pass the os.access check.
+    def _access_check(fn, mode):
+        return (os.path.exists(fn) and os.access(fn, mode)
+                and not os.path.isdir(fn))
+
+    # If we're given a path with a directory part, look it up directly rather
+    # than referring to PATH directories. This includes checking relative to the
+    # current directory, e.g. ./script
+    if os.path.dirname(cmd):
+        if _access_check(cmd, mode):
+            return cmd
+        return None
+
+    if path is None:
+        path = os.environ.get("PATH", os.defpath)
+    if not path:
+        return None
+    path = path.split(os.pathsep)
+
+    if sys.platform == "win32":
+        # The current directory takes precedence on Windows.
+        if not os.curdir in path:
+            path.insert(0, os.curdir)
+
+        # PATHEXT is necessary to check on Windows.
+        pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
+        # See if the given file matches any of the expected path extensions.
+        # This will allow us to short circuit when given "python.exe".
+        # If it does match, only test that one, otherwise we have to try
+        # others.
+        if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
+            files = [cmd]
+        else:
+            files = [cmd + ext for ext in pathext]
+    else:
+        # On other platforms you don't have things like PATHEXT to tell you
+        # what file suffixes are executable, so just pass on cmd as-is.
+        files = [cmd]
+
+    seen = set()
+    for dir in path:
+        normdir = os.path.normcase(dir)
+        if not normdir in seen:
+            seen.add(normdir)
+            for thefile in files:
+                name = os.path.join(dir, thefile)
+                if _access_check(name, mode):
+                    return name
+    return None
+
+
+def main():
+    args = sys.argv[1:]
+    i = 0
+    while i < len(args):
+        if not args[i].startswith("--"):
+            break
+        i += 2
+    
+    a = parser.parse_args(args[:i])
+    cmd = args[i:]
+
+    # check if nvidia-docker or docker are on path
+    docker_path = which("nvidia-docker")
+    if docker_path is None:
+        docker_path = which("docker")
+    
+    if docker_path is None:
+        raise Exception("docker not found")
+
+    docker_args = [
+        "--rm",
+        "--volume",
+        "/:/host",
+        "--workdir",
+        "/host" + os.getcwd(),
+        "--env",
+        "PYTHONUNBUFFERED=x",
+        "--env",
+        "CUDA_CACHE_PATH=/host/tmp/cuda-cache",
+    ]
+
+    if a.port is not None:
+        docker_args += ["--publish", "%d:%d" % (a.port, a.port)]
+
+    args = [docker_path, "run"] + docker_args + ["affinelayer/pix2pix-tensorflow", "python"] + cmd
+
+    if not os.access("/var/run/docker.sock", os.R_OK):
+        args = ["sudo"] + args
+
+    os.execvp(args[0], args)
+
+
+main()
diff --git a/tools/test.py b/tools/test.py
index 7ee372a27cf7b539f314c8d6f25346d2399dd36d..0b745674189f78fc4200afe8bdcad99969b0f4f4 100644
--- a/tools/test.py
+++ b/tools/test.py
@@ -38,7 +38,7 @@ def main():
     else:
         # training
         for direction in ["AtoB", "BtoA"]:
-            for dataset in ["facades", "maps"]:
+            for dataset in ["facades"]:
                 name = dataset + "_" + direction
                 run("python pix2pix.py --mode train --output_dir test/%s_train --max_steps 1 --input_dir /data/official/%s/train --which_direction %s --seed 0" % (name, dataset, direction))
                 run("python pix2pix.py --mode test --output_dir test/%s_test --max_steps 1 --input_dir /data/official/%s/val --seed 0 --checkpoint test/%s_train" % (name, dataset, name))
@@ -50,17 +50,16 @@ def main():
             run("python pix2pix.py --mode test --output_dir test/%s_test --max_steps 1 --input_dir /data/%s/val --seed 0 --checkpoint test/%s_train" % (name, dataset, name))
 
         # using pretrained model
-        for dataset, direction in [("facades", "BtoA"), ("edges2shoes", "AtoB"), ("maps", "AtoB"), ("maps", "BtoA"), ("cityscapes", "AtoB"), ("cityscapes", "BtoA"), ("edges2handbags", "AtoB")]:
+        for dataset, direction in [("facades", "BtoA")]:
             name = dataset + "_" + direction
             run("python pix2pix.py --mode test --output_dir test/%s_pretrained_test --input_dir /data/official/%s/val --max_steps 100 --which_direction %s --seed 0 --checkpoint /data/pretrained/%s" % (name, dataset, direction, name))
             run("python pix2pix.py --mode export --output_dir test/%s_pretrained_export --checkpoint /data/pretrained/%s" % (name, name))
 
         # test python3
-        run("python pix2pix.py --mode train --output_dir test/py3_facades_AtoB_train --max_steps 1 --input_dir /data/official/facades/train --which_direction AtoB --seed 0", image="tensorflow/tensorflow:0.12.1-gpu-py3")
-        run("python pix2pix.py --mode test --output_dir test/py3_facades_AtoB_test --max_steps 1 --input_dir /data/official/facades/val --seed 0 --checkpoint test/py3_facades_AtoB_train", image="tensorflow/tensorflow:0.12.1-gpu-py3")
+        run("python pix2pix.py --mode train --output_dir test/py3_facades_AtoB_train --max_steps 1 --input_dir /data/official/facades/train --which_direction AtoB --seed 0", image="tensorflow/tensorflow:1.0.0-gpu-py3")
+        run("python pix2pix.py --mode test --output_dir test/py3_facades_AtoB_test --max_steps 1 --input_dir /data/official/facades/val --seed 0 --checkpoint test/py3_facades_AtoB_train", image="tensorflow/tensorflow:1.0.0-gpu-py3")
 
     print("elapsed", int(time.time() - start))
-    # short: 2521 (mac)
     # long: about 9 hours (linux)