diff --git a/capsule_net/README.md b/capsule_net/README.md index 5c0508ecb..7900dfae2 100644 --- a/capsule_net/README.md +++ b/capsule_net/README.md @@ -16,7 +16,7 @@ There might be some differences between this implemntation and the original pape To train a CapsNet, run: ```shell -python train.py [-c cuda.cudnn] [-d ] [--disable-grad-dynamic-routing] +python train.py [-c cudnn] [-d ] [--disable-grad-dynamic-routing] ``` You can see the list of options by running command with `-h` option. diff --git a/capsule_net/reconstruct_tweaked_capsules.py b/capsule_net/reconstruct_tweaked_capsules.py index 99b6e2b0b..d16022677 100644 --- a/capsule_net/reconstruct_tweaked_capsules.py +++ b/capsule_net/reconstruct_tweaked_capsules.py @@ -35,9 +35,11 @@ def get_args(monitor_path='tmp.monitor.capsnet'): type=str, default=monitor_path, help='Path monitoring logs saved.') parser.add_argument('--context', '-c', type=str, - default=None, help="Extension modules. ex) 'cpu', 'cuda.cudnn'.") - parser.add_argument("--device-id", "-d", type=int, default=0, + default=None, help="Extension modules. ex) 'cpu', 'cudnn'.") + parser.add_argument("--device-id", "-d", type=str, default='0', help='Device ID the training run on. This is only valid if you specify `-c cuda.cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type of computation. e.g. "float", "half".') args = parser.parse_args() assert os.path.isdir( args.monitor_path), "Run train.py before running this." @@ -97,12 +99,10 @@ def main(): args = get_args() # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Load parameters diff --git a/capsule_net/train.py b/capsule_net/train.py index fad4f7a14..1f7c5ac9f 100644 --- a/capsule_net/train.py +++ b/capsule_net/train.py @@ -55,9 +55,11 @@ def get_args(monitor_path='tmp.monitor.capsnet'): parser.add_argument("--max-epochs", "-e", type=int, default=50, help='Max epochs of training.') parser.add_argument('--context', '-c', type=str, - default=None, help="Extension modules. ex) 'cpu', 'cuda.cudnn'.") - parser.add_argument("--device-id", "-d", type=int, default=0, + default=None, help="Extension modules. ex) 'cpu', 'cudnn'.") + parser.add_argument("--device-id", "-d", type=str, default='0', help='Device ID the training run on. This is only valid if you specify `-c cuda.cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type of computation. e.g. "float", "half".') parser.add_argument("--disable-grad-dynamic-routing", "-g", dest='grad_dynamic_routing', action='store_false', default=True, @@ -78,12 +80,10 @@ def train(): seed(0) # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # TRAIN diff --git a/cifar10-100-collection/args.py b/cifar10-100-collection/args.py index 5f7f4177c..09256e254 100644 --- a/cifar10-100-collection/args.py +++ b/cifar10-100-collection/args.py @@ -31,13 +31,16 @@ def get_args(monitor_path='tmp.monitor', max_iter=40000, model_save_path='tmp.mo parser.add_argument("--val-iter", "-j", type=int, default=100) parser.add_argument("--weight-decay", "-w", type=float, default=weight_decay) - parser.add_argument("--device-id", "-d", type=int, default=0) + parser.add_argument("--device-id", "-d", type=str, default='0', + help='Device ID the training run on. This is only valid if you specify `-c cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type of computation. e.g. "float", "half".') parser.add_argument("--warmup-epoch", "-e", type=int, default=warmup_epoch) parser.add_argument("--model-save-interval", "-s", type=int, default=1000) parser.add_argument("--model-save-path", "-o", type=str, default=model_save_path) parser.add_argument('--context', '-c', type=str, - default=None, help="Extension path. ex) cpu, cuda.cudnn.") + default=None, help="Extension path. ex) cpu, cudnn.") parser.add_argument("--net", "-n", type=str, default='cifar10_resnet23', help="Neural network architecure type (used only in classification.py).\n" diff --git a/cifar10-100-collection/classification.py b/cifar10-100-collection/classification.py index 0414d49ff..4d1aa61c5 100644 --- a/cifar10-100-collection/classification.py +++ b/cifar10-100-collection/classification.py @@ -19,7 +19,7 @@ from cifar10_data import data_iterator_cifar10 from cifar100_data import data_iterator_cifar100 import nnabla as nn -from nnabla.contrib.context import extension_context +from nnabla.ext_utils import get_extension_context import nnabla.functions as F import nnabla.parametric_functions as PF import nnabla.solvers as S @@ -55,7 +55,8 @@ def train(): n_train_samples = 50000 bs_valid = args.batch_size extension_module = args.context - ctx = extension_context(extension_module, device_id=args.device_id) + ctx = get_extension_context( + extension_module, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) if args.net == "cifar10_resnet23": prediction = functools.partial( @@ -137,7 +138,7 @@ def train(): """ Call this script with `mpirun` or `mpiexec` - $ mpirun -n 4 python multi_device_multi_process.py --context "cuda.cudnn" -bs 64 + $ mpirun -n 4 python multi_device_multi_process.py --context "cudnn" -bs 64 """ train() diff --git a/cifar10-100-collection/models.py b/cifar10-100-collection/models.py index 72ede5051..a3a504a30 100644 --- a/cifar10-100-collection/models.py +++ b/cifar10-100-collection/models.py @@ -20,7 +20,7 @@ from cifar100_data import data_iterator_cifar100 import nnabla as nn import nnabla.communicators as C -from nnabla.contrib.context import extension_context +from nnabla.ext_utils import get_extension_context import nnabla.functions as F import nnabla.parametric_functions as PF import nnabla.solvers as S diff --git a/distributed/cifar10-100/README.md b/distributed/cifar10-100/README.md index b5b378626..43d95b738 100644 --- a/distributed/cifar10-100/README.md +++ b/distributed/cifar10-100/README.md @@ -17,7 +17,7 @@ NOTE that if you would like to run this example, please follow the bulid instruc When you run the script like the following, ``` -mpirun -n 4 python multi_device_multi_process_classification.py --context "cuda.cudnn" -b 64 +mpirun -n 4 python multi_device_multi_process_classification.py --context "cudnn" -b 64 ``` @@ -32,7 +32,7 @@ NOTE that if you would like to run this example, please follow the bulid instruc When you run the script like the following, ``` -mpirun --hostfile hostfile python multi_device_multi_process_classification.py --context "cuda.cudnn" -b 64 +mpirun --hostfile hostfile python multi_device_multi_process_classification.py --context "cudnn" -b 64 ``` diff --git a/distributed/cifar10-100/args.py b/distributed/cifar10-100/args.py index 3bc948e0a..e04978cae 100644 --- a/distributed/cifar10-100/args.py +++ b/distributed/cifar10-100/args.py @@ -31,14 +31,17 @@ def get_args(monitor_path='tmp.monitor', max_iter=234300, model_save_path='tmp.m parser.add_argument("--val-iter", "-j", type=int, default=100) parser.add_argument("--weight-decay", "-w", type=float, default=weight_decay) - parser.add_argument("--device-id", "-d", type=int, default=0) + parser.add_argument("--device-id", "-d", type=str, default='0', + help='Device ID the training run on. This is only valid if you specify `-c cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type of computation. e.g. "float", "half".') parser.add_argument("--n-devices", "-n", type=int, default=n_devices) parser.add_argument("--warmup-epoch", "-e", type=int, default=warmup_epoch) parser.add_argument("--model-save-interval", "-s", type=int, default=1000) parser.add_argument("--model-save-path", "-o", type=str, default=model_save_path) parser.add_argument('--context', '-c', type=str, - default=None, help="Extension path. ex) cpu, cuda.cudnn.") + default=None, help="Extension path. ex) cpu, cudnn.") parser.add_argument("--net", type=str, default='cifar10_resnet23', help="Neural network architecure type (used only in classification.py).\n" diff --git a/distributed/cifar10-100/models.py b/distributed/cifar10-100/models.py index 1dd4e8ae1..ce622564c 100644 --- a/distributed/cifar10-100/models.py +++ b/distributed/cifar10-100/models.py @@ -20,7 +20,7 @@ from cifar100_data import data_iterator_cifar100 import nnabla as nn import nnabla.communicators as C -from nnabla.contrib.context import extension_context +from nnabla.ext_utils import get_extension_context import nnabla.functions as F import nnabla.parametric_functions as PF import nnabla.solvers as S diff --git a/distributed/cifar10-100/multi_device_multi_process_classification.py b/distributed/cifar10-100/multi_device_multi_process_classification.py index bff70108d..539822cb0 100644 --- a/distributed/cifar10-100/multi_device_multi_process_classification.py +++ b/distributed/cifar10-100/multi_device_multi_process_classification.py @@ -22,7 +22,7 @@ from nnabla.utils.data_iterator import data_iterator import nnabla as nn import nnabla.communicators as C -from nnabla.contrib.context import extension_context +from nnabla.ext_utils import get_extension_context import nnabla.functions as F import nnabla.parametric_functions as PF import nnabla.solvers as S @@ -70,8 +70,8 @@ def train(): data_iterator = data_iterator_cifar100 # Create Communicator and Context - extension_module = "cuda.cudnn" - ctx = extension_context(extension_module) + extension_module = "cudnn" + ctx = get_extension_context(extension_module) comm = C.MultiProcessDataParalellCommunicator(ctx) comm.init() n_devices = comm.size @@ -192,7 +192,7 @@ def train(): """ Call this script with `mpirun` or `mpiexec` - $ mpirun -n 4 python multi_device_multi_process.py --context "cuda.cudnn" -bs 64 + $ mpirun -n 4 python multi_device_multi_process.py --context "cudnn" -bs 64 """ train() diff --git a/imagenet-classification/README.md b/imagenet-classification/README.md index 5a878d6df..9fb4ae867 100644 --- a/imagenet-classification/README.md +++ b/imagenet-classification/README.md @@ -16,7 +16,7 @@ In this example, "Residual Neural Network" (also called "ResNet") is trained on The following line executes the Tiny ImageNet training (with the setting the we recommended you to try first. It requires near 6GB memory available in the CUDA device. See more options in the help by the `-h` option.). ``` -python classification.py -c cuda.cudnn -a4 -b64 -L34 +python classification.py -c cudnn -a4 -b64 -L34 ``` Tiny ImageNet consists of 200 categories and each category has 500 of 64x64 size images in training set. diff --git a/imagenet-classification/args.py b/imagenet-classification/args.py index 9b1d6c1e5..5b1178b71 100644 --- a/imagenet-classification/args.py +++ b/imagenet-classification/args.py @@ -46,15 +46,17 @@ def get_args(monitor_path='tmp.monitor.imagenet', max_iter=500000, model_save_pa parser.add_argument("--weight-decay", "-w", type=float, default=weight_decay, help='Weight decay factor of SGD update.') - parser.add_argument("--device-id", "-d", type=int, default=0, - help='Device ID the training run on. This is only valid if you specify `-c cuda.cudnn`.') + parser.add_argument("--device-id", "-d", type=str, default='0', + help='Device ID the training run on. This is only valid if you specify `-c cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type configuration.') parser.add_argument("--model-save-interval", "-s", type=int, default=1000, help='The interval of saving model parameters.') parser.add_argument("--model-save-path", "-o", type=str, default=model_save_path, help='Path the model parameters saved.') parser.add_argument('--context', '-c', type=str, - default=None, help="Extension module. 'cuda.cudnn' is highly.recommended.") + default=None, help="Extension module. 'cudnn' is highly.recommended.") parser.add_argument("--num-layers", "-L", type=int, choices=[18, 34, 50, 101, 152], default=34, help='Number of layers of ResNet.') diff --git a/imagenet-classification/classification.py b/imagenet-classification/classification.py index 68fc0b89b..6c8f6a506 100644 --- a/imagenet-classification/classification.py +++ b/imagenet-classification/classification.py @@ -86,12 +86,13 @@ def train(): args = get_args() # Get context. - from nnabla.contrib.context import extension_context + from nnabla.ext_utils import get_extension_context extension_module = args.context if args.context is None: extension_module = 'cpu' logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + ctx = get_extension_context( + extension_module, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Dataset diff --git a/mnist-collection/args.py b/mnist-collection/args.py index 4cec89c3f..5db668ceb 100644 --- a/mnist-collection/args.py +++ b/mnist-collection/args.py @@ -41,8 +41,10 @@ def get_args(monitor_path='tmp.monitor', max_iter=10000, model_save_path=None, l parser.add_argument("--weight-decay", "-w", type=float, default=weight_decay, help='Weight decay factor of SGD update.') - parser.add_argument("--device-id", "-d", type=int, default=0, + parser.add_argument("--device-id", "-d", type=str, default='0', help='Device ID the training run on. This is only valid if you specify `-c cuda.cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type of computation. e.g. "float", "half".') parser.add_argument("--model-save-interval", "-s", type=int, default=1000, help='The interval of saving model parameters.') parser.add_argument("--model-save-path", "-o", @@ -52,7 +54,7 @@ def get_args(monitor_path='tmp.monitor', max_iter=10000, model_save_path=None, l default='lenet', help="Neural network architecure type (used only in classification*.py).\n classification.py: ('lenet'|'resnet'), classification_bnn.py: ('bincon'|'binnet'|'bwn'|'bwn'|'bincon_resnet'|'binnet_resnet'|'bwn_resnet')") parser.add_argument('--context', '-c', type=str, - default=None, help="Extension modules. ex) 'cpu', 'cuda.cudnn'.") + default='cpu', help="Extension modules. ex) 'cpu', 'cudnn'.") parser.add_argument('--augment-train', action='store_true', default=False, help="Enable data augmentation of training data.") parser.add_argument('--augment-test', action='store_true', diff --git a/mnist-collection/classification.py b/mnist-collection/classification.py index fd0f78f9c..d074661b1 100644 --- a/mnist-collection/classification.py +++ b/mnist-collection/classification.py @@ -27,6 +27,8 @@ from args import get_args from mnist_data import data_iterator_mnist +import numpy as np + def categorical_error(pred, label): """ @@ -129,12 +131,10 @@ def train(): seed(0) # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Create CNN network for both training and testing. @@ -186,6 +186,7 @@ def train(): for j in range(args.val_iter): vimage.d, vlabel.d = vdata.next() vpred.forward(clear_buffer=True) + vpred.data.cast(np.float32, ctx) ve += categorical_error(vpred.d, vlabel.d) monitor_verr.add(i, ve / args.val_iter) if i % args.model_save_interval == 0: @@ -198,6 +199,8 @@ def train(): loss.backward(clear_buffer=True) solver.weight_decay(args.weight_decay) solver.update() + loss.data.cast(np.float32, ctx) + pred.data.cast(np.float32, ctx) e = categorical_error(pred.d, label.d) monitor_loss.add(i, loss.d.copy()) monitor_err.add(i, e) diff --git a/mnist-collection/classification_bnn.py b/mnist-collection/classification_bnn.py index 32036848b..f03c51995 100644 --- a/mnist-collection/classification_bnn.py +++ b/mnist-collection/classification_bnn.py @@ -244,12 +244,10 @@ def train(): args = get_args(monitor_path='tmp.monitor.bnn') # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Initialize DataIterator for MNIST. diff --git a/mnist-collection/dcgan.py b/mnist-collection/dcgan.py index f2dbcb7c8..4835acdf9 100644 --- a/mnist-collection/dcgan.py +++ b/mnist-collection/dcgan.py @@ -113,12 +113,10 @@ def train(args): """ # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Create CNN network for both training and testing. diff --git a/mnist-collection/siamese.py b/mnist-collection/siamese.py index 5b7162305..172d0c88d 100644 --- a/mnist-collection/siamese.py +++ b/mnist-collection/siamese.py @@ -95,12 +95,10 @@ def train(args): """ # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Create CNN network for both training and testing. diff --git a/mnist-collection/vae.py b/mnist-collection/vae.py index 07ad4e160..eabe574ac 100644 --- a/mnist-collection/vae.py +++ b/mnist-collection/vae.py @@ -123,12 +123,10 @@ def main(): model_save_path=None, learning_rate=3e-4, batch_size=100, weight_decay=0) # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Initialize data provider diff --git a/mnist-collection/vat.py b/mnist-collection/vat.py index 0e26b758b..e57f92a4f 100644 --- a/mnist-collection/vat.py +++ b/mnist-collection/vat.py @@ -285,7 +285,10 @@ def get_args(): "-np", type=int, default=1) parser.add_argument("--xi-for-vat", "-er", type=float, default=10.0) parser.add_argument("--eps-for-vat", "-el", type=float, default=1.5) - parser.add_argument("--device-id", "-d", type=int, default=0) + parser.add_argument("--device-id", "-d", type=str, default='0', + help='Device ID the training run on. This is only valid if you specify `-c cuda.cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type of computation. e.g. "float", "half".') parser.add_argument("--model-save-path", "-o", type=str, default="tmp.monitor.vat") parser.add_argument('--context', '-c', type=str, @@ -319,12 +322,10 @@ def main(): args = get_args() # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) shape_x = (1, 28, 28) diff --git a/penn-treebank/args.py b/penn-treebank/args.py index c4c427ac7..18825a14d 100644 --- a/penn-treebank/args.py +++ b/penn-treebank/args.py @@ -24,7 +24,10 @@ def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--context', '-c', type=str, default=None, help="Extension path. ex) cpu, cuda.cudnn.") - parser.add_argument("--device-id", "-d", type=int, default=0) + parser.add_argument("--device-id", "-d", type=str, default='0', + help='Device ID the training run on. This is only valid if you specify `-c cuda.cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type of computation. e.g. "float", "half".') parser.add_argument("--work-dir", "-w", type=str, default="tmp.result/") parser.add_argument("--save-dir", "-s", type=str, @@ -34,7 +37,7 @@ def get_args(): parser.add_argument("--max-epoch", "-e", type=int, default=40) parser.add_argument("--monitor-interval", "-m", type=int, default=1000) parser.add_argument("--num-steps", "-n", type=int, default=35) - parser.add_argument("--state-size", "-t", type=int, default=650) + parser.add_argument("--state-size", "-S", type=int, default=650) parser.add_argument("--num-layers", "-a", type=int, default=2) parser.add_argument("--gradient-clipping-max-norm", "-g", type=int, default=0.25) diff --git a/penn-treebank/train.py b/penn-treebank/train.py index 668ae34b6..8f80f8e34 100644 --- a/penn-treebank/train.py +++ b/penn-treebank/train.py @@ -116,12 +116,11 @@ def main(): train_data, val_data, test_data = get_data() - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + # Get context. + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) from nnabla.monitor import Monitor, MonitorSeries diff --git a/reduction/cifar10/distillation/README.ipynb b/reduction/cifar10/distillation/README.ipynb index 782b31117..2c933a8ce 100644 --- a/reduction/cifar10/distillation/README.ipynb +++ b/reduction/cifar10/distillation/README.ipynb @@ -34,7 +34,7 @@ "metadata": {}, "source": [ "```sh\n", - "python classification.py -c \"cuda.cudnn\" \\\n", + "python classification.py -c \"cudnn\" \\\n", " --monitor-path \"monitor.teacher\" \\\n", " --model-save-path \"monitor.teacher\" \\\n", " -d 0\n", @@ -53,7 +53,7 @@ "metadata": {}, "source": [ "```sh\n", - "python distillation.py -c \"cuda.cudnn\" \\\n", + "python distillation.py -c \"cudnn\" \\\n", " --reduction-rate 0.5 \\\n", " --monitor-path \"monitor.student.rrate-05\" \\\n", " --model-save-path \"monitor.student.rrate-05\" \\\n", diff --git a/reduction/cifar10/distillation/args.py b/reduction/cifar10/distillation/args.py index 58505e482..94aadde45 100644 --- a/reduction/cifar10/distillation/args.py +++ b/reduction/cifar10/distillation/args.py @@ -39,12 +39,14 @@ def get_args(monitor_path='tmp.monitor', max_iter=234375, model_save_path=None, parser.add_argument("--weight-decay", "-w", type=float, default=weight_decay, help='Weight decay factor of SGD update.') - parser.add_argument("--device-id", "-d", type=int, default=0, - help='Device ID the training run on. This is only valid if you specify `-c cuda.cudnn`.') + parser.add_argument("--device-id", "-d", type=str, default='0', + help='Device ID the training run on. This is only valid if you specify `-c cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type of computation. e.g. "float", "half".') parser.add_argument("--model-save-path", "-o", type=str, default=model_save_path, help='Path the model parameters saved.') - parser.add_argument("--model-load-path", "-t", + parser.add_argument("--model-load-path", "-T", type=str, default=model_save_path, help='Path the model parameters loaded.') parser.add_argument("--net", "-n", type=str, @@ -52,7 +54,7 @@ def get_args(monitor_path='tmp.monitor', max_iter=234375, model_save_path=None, help="Neural network architecure type (used only in classification*.py)" "classification.py: ('mnist_resnet_prediction'|'cifar10_resnet23_prediction')") parser.add_argument('--context', '-c', type=str, - default=None, help="Extension modules. ex) 'cpu', 'cuda.cudnn'.") + default=None, help="Extension modules. ex) 'cpu', 'cudnn'.") parser.add_argument('--maps', type=int, default=64, help="the number of feature maps.") parser.add_argument('--reduction-rate', type=float, diff --git a/reduction/cifar10/distillation/classification.py b/reduction/cifar10/distillation/classification.py index 96c6d094a..3650f451b 100644 --- a/reduction/cifar10/distillation/classification.py +++ b/reduction/cifar10/distillation/classification.py @@ -34,12 +34,10 @@ def train(): args = get_args() # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Create CNN network for both training and testing. diff --git a/reduction/cifar10/distillation/distillation.py b/reduction/cifar10/distillation/distillation.py index e97113c13..a11bd2b5e 100644 --- a/reduction/cifar10/distillation/distillation.py +++ b/reduction/cifar10/distillation/distillation.py @@ -34,12 +34,10 @@ def distil(): args = get_args() # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Create CNN network for both training and testing. diff --git a/reduction/cifar10/quantization/README.ipynb b/reduction/cifar10/quantization/README.ipynb index 6359fe9d0..b73b23654 100644 --- a/reduction/cifar10/quantization/README.ipynb +++ b/reduction/cifar10/quantization/README.ipynb @@ -61,7 +61,7 @@ "metadata": {}, "source": [ "```sh\n", - "python classification.py -c \"cuda.cudnn\" \\\n", + "python classification.py -c \"cudnn\" \\\n", " --net cifar10_fp_connect_resnet23_prediction \\\n", " --monitor-path \"monitor.fpcon\" \\\n", " --model-save-path \"monitor.fpcon\" \\\n", diff --git a/reduction/cifar10/quantization/args.py b/reduction/cifar10/quantization/args.py index 4e5f7b2b0..d1288ce47 100644 --- a/reduction/cifar10/quantization/args.py +++ b/reduction/cifar10/quantization/args.py @@ -39,12 +39,14 @@ def get_args(monitor_path='tmp.monitor', max_iter=234375, model_save_path=None, parser.add_argument("--weight-decay", "-w", type=float, default=weight_decay, help='Weight decay factor of SGD update.') - parser.add_argument("--device-id", "-d", type=int, default=0, - help='Device ID the training run on. This is only valid if you specify `-c cuda.cudnn`.') + parser.add_argument("--device-id", "-d", type=str, default='0', + help='Device ID the training run on. This is only valid if you specify `-c cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type of computation. e.g. "float", "half".') parser.add_argument("--model-save-path", "-o", type=str, default=model_save_path, help='Path the model parameters saved.') - parser.add_argument("--model-load-path", "-t", + parser.add_argument("--model-load-path", "-T", type=str, default=model_save_path, help='Path the model parameters loaded.') parser.add_argument("--net", "-n", type=str, @@ -61,7 +63,7 @@ def get_args(monitor_path='tmp.monitor', max_iter=234375, model_save_path=None, "'cifar10_inq_resnet23_prediction\n'" ) parser.add_argument('--context', '-c', type=str, - default=None, help="Extension modules. ex) 'cpu', 'cuda.cudnn'.") + default=None, help="Extension modules. ex) 'cpu', 'cudnn'.") parser.add_argument("--bit-width", type=int, default=8, help='Bitwidth used, correspnding to n.') diff --git a/reduction/cifar10/quantization/classification.py b/reduction/cifar10/quantization/classification.py index 8918fa6c6..c77ce843f 100644 --- a/reduction/cifar10/quantization/classification.py +++ b/reduction/cifar10/quantization/classification.py @@ -44,12 +44,10 @@ def train(): args = get_args() # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Create CNN network for both training and testing. diff --git a/reduction/cifar10/resnet2rnn/README.ipynb b/reduction/cifar10/resnet2rnn/README.ipynb index 0a556f279..98740d343 100644 --- a/reduction/cifar10/resnet2rnn/README.ipynb +++ b/reduction/cifar10/resnet2rnn/README.ipynb @@ -29,7 +29,7 @@ "metadata": {}, "source": [ "```sh\n", - "python classification.py -c \"cuda.cudnn\" \\\n", + "python classification.py -c \"cudnn\" \\\n", " --monitor-path \"cifar10_resnet2rnn_3x3x4_prediction\" \\\n", " --model-save-path \"cifar10_resnet2rnn_3x3x4_prediction\" \\\n", " --net \"cifar10_resnet2rnn_3x3x4_prediction\" \\\n", diff --git a/reduction/cifar10/resnet2rnn/args.py b/reduction/cifar10/resnet2rnn/args.py index 217057e6a..41797dee1 100644 --- a/reduction/cifar10/resnet2rnn/args.py +++ b/reduction/cifar10/resnet2rnn/args.py @@ -39,12 +39,14 @@ def get_args(monitor_path='tmp.monitor', max_iter=234375, model_save_path=None, parser.add_argument("--weight-decay", "-w", type=float, default=weight_decay, help='Weight decay factor of SGD update.') - parser.add_argument("--device-id", "-d", type=int, default=0, - help='Device ID the training run on. This is only valid if you specify `-c cuda.cudnn`.') + parser.add_argument("--device-id", "-d", type=str, default='0', + help='Device ID the training run on. This is only valid if you specify `-c cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type of computation. e.g. "float", "half".') parser.add_argument("--model-save-path", "-o", type=str, default=model_save_path, help='Path the model parameters saved.') - parser.add_argument("--model-load-path", "-t", + parser.add_argument("--model-load-path", "-T", type=str, default=model_save_path, help='Path the model parameters loaded.') parser.add_argument("--net", "-n", type=str, @@ -58,7 +60,7 @@ def get_args(monitor_path='tmp.monitor', max_iter=234375, model_save_path=None, "'cifar10_resnet2rnn_5_prediction'" ) parser.add_argument('--context', '-c', type=str, - default=None, help="Extension modules. ex) 'cpu', 'cuda.cudnn'.") + default=None, help="Extension modules. ex) 'cpu', 'cudnn'.") args = parser.parse_args() if not os.path.isdir(args.model_save_path): os.makedirs(args.model_save_path) diff --git a/reduction/cifar10/resnet2rnn/classification.py b/reduction/cifar10/resnet2rnn/classification.py index 82d16eb95..6bf7de247 100644 --- a/reduction/cifar10/resnet2rnn/classification.py +++ b/reduction/cifar10/resnet2rnn/classification.py @@ -36,12 +36,10 @@ def train(): args = get_args() # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Create CNN network for both training and testing. diff --git a/reduction/cifar10/structured-sparsity/README.ipynb b/reduction/cifar10/structured-sparsity/README.ipynb index 79344ac08..0a1528563 100644 --- a/reduction/cifar10/structured-sparsity/README.ipynb +++ b/reduction/cifar10/structured-sparsity/README.ipynb @@ -122,7 +122,7 @@ "metadata": {}, "source": [ "```sh\n", - "python classification.py -c \"cuda.cudnn\" \\\n", + "python classification.py -c \"cudnn\" \\\n", " --monitor-path \"monitor.filter.lambda-5e4\" \\\n", " --model-save-path \"monitor.filter.lambda-5e4\" \\\n", " --filter-decay 5e-4 \\\n", @@ -144,7 +144,7 @@ "source": [ "```sh\n", "\n", - "python finetuning.py -c \"cuda.cudnn\" \\\n", + "python finetuning.py -c \"cudnn\" \\\n", " --monitor-path \"monitor.finetune.filter.lambda-5e4.rrate-025\" \\\n", " --model-save-path \"monitor.finetune.filter.lambda-5e4.rrate-025\" \\\n", " --model-load-path \"monitor.filter.lambda-5e4/${the best result}.h5\" \\\n", diff --git a/reduction/cifar10/structured-sparsity/args.py b/reduction/cifar10/structured-sparsity/args.py index 13ecd9e8d..d999a266e 100644 --- a/reduction/cifar10/structured-sparsity/args.py +++ b/reduction/cifar10/structured-sparsity/args.py @@ -39,12 +39,14 @@ def get_args(monitor_path='tmp.monitor', max_iter=234375, model_save_path=None, parser.add_argument("--weight-decay", "-w", type=float, default=weight_decay, help='Weight decay factor of SGD update.') - parser.add_argument("--device-id", "-d", type=int, default=0, - help='Device ID the training run on. This is only valid if you specify `-c cuda.cudnn`.') + parser.add_argument("--device-id", "-d", type=str, default='0', + help='Device ID the training run on. This is only valid if you specify `-c cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type of computation. e.g. "float", "half".') parser.add_argument("--model-save-path", "-o", type=str, default=model_save_path, help='Path the model parameters saved.') - parser.add_argument("--model-load-path", "-t", + parser.add_argument("--model-load-path", "-T", type=str, default="", help='Path the model parameters loaded.') parser.add_argument("--net", "-n", type=str, @@ -60,7 +62,7 @@ def get_args(monitor_path='tmp.monitor', max_iter=234375, model_save_path=None, default=5. * 1e-5, help="Decay coefficient for regularization of channel.") parser.add_argument('--context', '-c', type=str, - default=None, help="Extension modules. ex) 'cpu', 'cuda.cudnn'.") + default=None, help="Extension modules. ex) 'cpu', 'cudnn'.") parser.add_argument('--reduction-rate', type=float, default=0.25, help="Reduction rate, should be less than 1.") args = parser.parse_args() diff --git a/reduction/cifar10/structured-sparsity/classification.py b/reduction/cifar10/structured-sparsity/classification.py index affa3d66e..11f5ebaf1 100644 --- a/reduction/cifar10/structured-sparsity/classification.py +++ b/reduction/cifar10/structured-sparsity/classification.py @@ -36,12 +36,10 @@ def train(): args = get_args() # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Create CNN network for both training and testing. diff --git a/reduction/cifar10/structured-sparsity/finetuning.py b/reduction/cifar10/structured-sparsity/finetuning.py index e65dfdda4..c37d33fbc 100644 --- a/reduction/cifar10/structured-sparsity/finetuning.py +++ b/reduction/cifar10/structured-sparsity/finetuning.py @@ -38,12 +38,10 @@ def train(): args = get_args() # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Create CNN network for both training and testing. diff --git a/reduction/mnist/svd/README.ipynb b/reduction/mnist/svd/README.ipynb index 8feddfd75..de78054d9 100644 --- a/reduction/mnist/svd/README.ipynb +++ b/reduction/mnist/svd/README.ipynb @@ -42,7 +42,7 @@ "source": [ "```sh\n", "\n", - "python classification.py -c \"cuda.cudnn\"\n", + "python classification.py -c \"cudnn\"\n", "\n", "```" ] @@ -60,7 +60,7 @@ "source": [ "```sh\n", "\n", - "python classification_svd.py -c \"cuda.cudnn\" --model-load-path tmp.monitor/${the best result}.h5\n", + "python classification_svd.py -c \"cudnn\" --model-load-path tmp.monitor/${the best result}.h5\n", "\n", "```" ] diff --git a/reduction/mnist/svd/args.py b/reduction/mnist/svd/args.py index 7dee8490b..0d336c738 100644 --- a/reduction/mnist/svd/args.py +++ b/reduction/mnist/svd/args.py @@ -41,16 +41,18 @@ def get_args(monitor_path='tmp.monitor', max_iter=10000, model_save_path=None, l parser.add_argument("--weight-decay", "-w", type=float, default=weight_decay, help='Weight decay factor of SGD update.') - parser.add_argument("--device-id", "-d", type=int, default=0, - help='Device ID the training run on. This is only valid if you specify `-c cuda.cudnn`.') + parser.add_argument("--device-id", "-d", type=str, default='0', + help='Device ID the training run on. This is only valid if you specify `-c cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type of computation. e.g. "float", "half".') parser.add_argument("--model-save-path", "-o", type=str, default=model_save_path, help='Path the model parameters saved.') - parser.add_argument("--model-load-path", "-t", + parser.add_argument("--model-load-path", "-T", type=str, default=model_save_path, help='Path the model parameters loaded.') parser.add_argument('--context', '-c', type=str, - default=None, help="Extension modules. ex) 'cpu', 'cuda.cudnn'.") + default=None, help="Extension modules. ex) 'cpu', 'cudnn'.") args = parser.parse_args() if not os.path.isdir(args.model_save_path): os.makedirs(args.model_save_path) diff --git a/reduction/mnist/svd/classification.py b/reduction/mnist/svd/classification.py index bfda40117..7793b07c7 100644 --- a/reduction/mnist/svd/classification.py +++ b/reduction/mnist/svd/classification.py @@ -35,12 +35,10 @@ def train(): args = get_args() # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Create CNN network for both training and testing. diff --git a/reduction/mnist/svd/classification_svd.py b/reduction/mnist/svd/classification_svd.py index 3aa3e110b..a4d2f8aaa 100644 --- a/reduction/mnist/svd/classification_svd.py +++ b/reduction/mnist/svd/classification_svd.py @@ -36,12 +36,10 @@ def classification_svd(): args = get_args() # Get context. - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Create CNN network for both training and testing. diff --git a/word-embedding/word_embedding.py b/word-embedding/word_embedding.py index b868d9599..48546f22d 100644 --- a/word-embedding/word_embedding.py +++ b/word-embedding/word_embedding.py @@ -273,7 +273,10 @@ def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--context', '-c', type=str, default=None, help="Extension path. ex) cpu, cuda.cudnn.") - parser.add_argument("--device-id", "-d", type=int, default=0) + parser.add_argument("--device-id", "-d", type=str, default='0', + help='Device ID the training run on. This is only valid if you specify `-c cuda.cudnn`.') + parser.add_argument("--type-config", "-t", type=str, default='float', + help='Type of computation. e.g. "float", "half".') parser.add_argument("--work_dir", "-m", type=str, default="tmp.result.w2v/") @@ -300,13 +303,11 @@ def main(): # Load Dataset itow, wtoi, dataset = load_ptbset(data_file) - # Computation environment settings - from nnabla.contrib.context import extension_context - extension_module = args.context - if args.context is None: - extension_module = 'cpu' - logger.info("Running in %s" % extension_module) - ctx = extension_context(extension_module, device_id=args.device_id) + # Get context. + from nnabla.ext_utils import get_extension_context + logger.info("Running in %s" % args.context) + ctx = get_extension_context( + args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Create data provider