From fc402e0c2a79aadb55a1322002a6a94eb0da6a58 Mon Sep 17 00:00:00 2001 From: dengkaipeng Date: Tue, 29 Sep 2020 13:50:16 +0000 Subject: [PATCH 1/7] step lr_scheduler on epoch end in hapi/model.py fit. test=develop --- python/paddle/hapi/model.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 1bfe8f07a2fbd..cc5f10d82c7c4 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -1436,6 +1436,9 @@ def fit( cbks.on_end('eval', eval_logs) + # step learning rate scheduler on each epcoh end + self._optimizer._learning_rate.step() + cbks.on_end('train', logs) self._test_dataloader = None From 3791c00c1eff5c6dc0285fc869437ec2047231b5 Mon Sep 17 00:00:00 2001 From: dengkaipeng Date: Sat, 10 Oct 2020 03:25:59 +0000 Subject: [PATCH 2/7] adapt for float learning rate. test=develop --- python/paddle/hapi/model.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index cc5f10d82c7c4..4584d01d04ed9 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -1437,7 +1437,9 @@ def fit( cbks.on_end('eval', eval_logs) # step learning rate scheduler on each epcoh end - self._optimizer._learning_rate.step() + if isinstance(self._optimizer._learning_rate, + paddle.optimizer._LRScheduler): + self._optimizer._learning_rate.step() cbks.on_end('train', logs) self._test_dataloader = None From f39a396e49447bf78866b46e21d1e24ce785bbd0 Mon Sep 17 00:00:00 2001 From: dengkaipeng Date: Sat, 10 Oct 2020 08:26:26 +0000 Subject: [PATCH 3/7] fix format. test=develop --- python/paddle/hapi/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 2edffb4fa2c2a..e323085007428 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -1465,8 +1465,8 @@ def fit( cbks.on_end('eval', eval_logs) # step learning rate scheduler on each epcoh end - if isinstance(self._optimizer._learning_rate, - paddle.optimizer._LRScheduler): + if isinstance(self._optimizer._learning_rate, + paddle.optimizer._LRScheduler): self._optimizer._learning_rate.step() cbks.on_end('train', logs) From c75ddf3e2ce91dd90ab90e2110d97ff6ebfc4b96 Mon Sep 17 00:00:00 2001 From: dengkaipeng Date: Sat, 10 Oct 2020 13:08:58 +0000 Subject: [PATCH 4/7] add unittest for coverage CI. test=develop --- python/paddle/tests/test_model.py | 53 ++++++++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/python/paddle/tests/test_model.py b/python/paddle/tests/test_model.py index 96c4483a35ba8..bc12bbdae78d2 100644 --- a/python/paddle/tests/test_model.py +++ b/python/paddle/tests/test_model.py @@ -33,7 +33,7 @@ from paddle.metric import Accuracy from paddle.vision.datasets import MNIST from paddle.vision.models import LeNet -from paddle.io import DistributedBatchSampler +from paddle.io import DistributedBatchSampler, Dataset from paddle.hapi.model import prepare_distributed_context from paddle.fluid.dygraph.jit import declarative from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator @@ -295,6 +295,15 @@ def forward(self, x): return y +class MyDataset(Dataset): + def __getitem__(self, idx): + return np.random.random(size=(20,)).astype(np.float32), \ + np.random.randint(0, 10, size=(1,)).astype(np.int64) + + def __len__(self): + return 40 + + class TestModelFunction(unittest.TestCase): def set_seed(self, seed=1024): paddle.manual_seed(seed) @@ -581,6 +590,48 @@ def test_dygraph_export_deploy_model_without_inputs(self): shutil.rmtree(save_dir) +class TestModelWithLRScheduler(unittest.TestCase): + def test_fit(self): + def make_optimizer(parameters=None): + base_lr = 1e-3 + momentum = 0.9 + weight_decay = 5e-4 + boundaries = [5, 8] + values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)] + learning_rate = paddle.optimizer.PiecewiseLR( + boundaries=boundaries, values=values) + learning_rate = paddle.optimizer.LinearLrWarmup( + learning_rate=learning_rate, + warmup_steps=4, + start_lr=base_lr / 5., + end_lr=base_lr, verbose=True) + optimizer = paddle.optimizer.Momentum( + learning_rate=learning_rate, + weight_decay=weight_decay, + momentum=momentum, + parameters=parameters) + return optimizer + + device = paddle.set_device('cpu') + fluid.enable_dygraph(device) + net = MyModel() + inputs = [InputSpec([None, 20], 'float32', 'x')] + labels = [InputSpec([None, 1], 'int64', 'label')] + optim = make_optimizer(net.parameters()) + model = Model(net, inputs, labels) + model.prepare( + optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) + + dataset = MyDataset() + model.fit(dataset, + dataset, + batch_size=4, + epochs=10, + num_workers=0) + + paddle.enable_static() + + class TestRaiseError(unittest.TestCase): def test_input_without_name(self): net = MyModel() From 4c6e33507e02f520abfd7be438a23a66aeebb020 Mon Sep 17 00:00:00 2001 From: dengkaipeng Date: Mon, 12 Oct 2020 02:36:44 +0000 Subject: [PATCH 5/7] fix indent. test=develop --- python/paddle/tests/test_model.py | 54 +++++++++++++++---------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/python/paddle/tests/test_model.py b/python/paddle/tests/test_model.py index bc12bbdae78d2..0a15f0039945a 100644 --- a/python/paddle/tests/test_model.py +++ b/python/paddle/tests/test_model.py @@ -592,43 +592,43 @@ def test_dygraph_export_deploy_model_without_inputs(self): class TestModelWithLRScheduler(unittest.TestCase): def test_fit(self): - def make_optimizer(parameters=None): - base_lr = 1e-3 - momentum = 0.9 - weight_decay = 5e-4 - boundaries = [5, 8] - values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)] - learning_rate = paddle.optimizer.PiecewiseLR( - boundaries=boundaries, values=values) - learning_rate = paddle.optimizer.LinearLrWarmup( - learning_rate=learning_rate, - warmup_steps=4, - start_lr=base_lr / 5., - end_lr=base_lr, verbose=True) - optimizer = paddle.optimizer.Momentum( - learning_rate=learning_rate, - weight_decay=weight_decay, - momentum=momentum, - parameters=parameters) - return optimizer - + def make_optimizer(parameters=None): + base_lr = 1e-3 + momentum = 0.9 + weight_decay = 5e-4 + boundaries = [5, 8] + values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)] + learning_rate = paddle.optimizer.PiecewiseLR( + boundaries=boundaries, values=values) + learning_rate = paddle.optimizer.LinearLrWarmup( + learning_rate=learning_rate, + warmup_steps=4, + start_lr=base_lr / 5., + end_lr=base_lr, verbose=True) + optimizer = paddle.optimizer.Momentum( + learning_rate=learning_rate, + weight_decay=weight_decay, + momentum=momentum, + parameters=parameters) + return optimizer + device = paddle.set_device('cpu') fluid.enable_dygraph(device) net = MyModel() - inputs = [InputSpec([None, 20], 'float32', 'x')] - labels = [InputSpec([None, 1], 'int64', 'label')] + inputs = [InputSpec([None, 20], 'float32', 'x')] + labels = [InputSpec([None, 1], 'int64', 'label')] optim = make_optimizer(net.parameters()) - model = Model(net, inputs, labels) - model.prepare( - optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) - + model = Model(net, inputs, labels) + model.prepare( + optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) + dataset = MyDataset() model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0) - + paddle.enable_static() From 21350065fdceb5228862670deebb0b8a9f4c86ce Mon Sep 17 00:00:00 2001 From: dengkaipeng Date: Mon, 12 Oct 2020 07:12:00 +0000 Subject: [PATCH 6/7] fix format. test=develop --- python/paddle/tests/test_model.py | 36 ++++++++++++++----------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/python/paddle/tests/test_model.py b/python/paddle/tests/test_model.py index 0a15f0039945a..1f5a9252368cf 100644 --- a/python/paddle/tests/test_model.py +++ b/python/paddle/tests/test_model.py @@ -299,7 +299,7 @@ class MyDataset(Dataset): def __getitem__(self, idx): return np.random.random(size=(20,)).astype(np.float32), \ np.random.randint(0, 10, size=(1,)).astype(np.int64) - + def __len__(self): return 40 @@ -599,19 +599,20 @@ def make_optimizer(parameters=None): boundaries = [5, 8] values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)] learning_rate = paddle.optimizer.PiecewiseLR( - boundaries=boundaries, values=values) + boundaries=boundaries, values=values) learning_rate = paddle.optimizer.LinearLrWarmup( - learning_rate=learning_rate, - warmup_steps=4, - start_lr=base_lr / 5., - end_lr=base_lr, verbose=True) + learning_rate=learning_rate, + warmup_steps=4, + start_lr=base_lr / 5., + end_lr=base_lr, + verbose=True) optimizer = paddle.optimizer.Momentum( - learning_rate=learning_rate, - weight_decay=weight_decay, - momentum=momentum, - parameters=parameters) + learning_rate=learning_rate, + weight_decay=weight_decay, + momentum=momentum, + parameters=parameters) return optimizer - + device = paddle.set_device('cpu') fluid.enable_dygraph(device) net = MyModel() @@ -619,16 +620,11 @@ def make_optimizer(parameters=None): labels = [InputSpec([None, 1], 'int64', 'label')] optim = make_optimizer(net.parameters()) model = Model(net, inputs, labels) - model.prepare( - optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) - + model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) + dataset = MyDataset() - model.fit(dataset, - dataset, - batch_size=4, - epochs=10, - num_workers=0) - + model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0) + paddle.enable_static() From 9c8f2fce5c0b57197830e9df29d600ee3e66eb2d Mon Sep 17 00:00:00 2001 From: dengkaipeng Date: Wed, 14 Oct 2020 11:44:11 +0000 Subject: [PATCH 7/7] fix API update. test=develop --- python/paddle/hapi/model.py | 2 +- python/paddle/tests/test_model.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 02ca9f67ebe01..5890d9760ebc6 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -1463,7 +1463,7 @@ def fit( # step learning rate scheduler on each epcoh end if isinstance(self._optimizer._learning_rate, - paddle.optimizer._LRScheduler): + paddle.optimizer.lr.LRScheduler): self._optimizer._learning_rate.step() cbks.on_end('train', logs) diff --git a/python/paddle/tests/test_model.py b/python/paddle/tests/test_model.py index 87325a002a59d..63087922be7d5 100644 --- a/python/paddle/tests/test_model.py +++ b/python/paddle/tests/test_model.py @@ -616,9 +616,9 @@ def make_optimizer(parameters=None): weight_decay = 5e-4 boundaries = [5, 8] values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)] - learning_rate = paddle.optimizer.PiecewiseLR( + learning_rate = paddle.optimizer.lr.PiecewiseDecay( boundaries=boundaries, values=values) - learning_rate = paddle.optimizer.LinearLrWarmup( + learning_rate = paddle.optimizer.lr.LinearWarmup( learning_rate=learning_rate, warmup_steps=4, start_lr=base_lr / 5.,