Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[xdoctest] reformat example code with google style in No. 299 #56597

Merged
merged 8 commits into from
Aug 29, 2023
Merged
76 changes: 37 additions & 39 deletions python/paddle/incubate/optimizer/lbfgs.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,45 +76,43 @@ class LBFGS(Optimizer):
Examples:
.. code-block:: python

Liyulingyue marked this conversation as resolved.
Show resolved Hide resolved
import paddle
import numpy as np
from paddle.incubate.optimizer import LBFGS

paddle.disable_static()
np.random.seed(0)
np_w = np.random.rand(1).astype(np.float32)
np_x = np.random.rand(1).astype(np.float32)

inputs = [np.random.rand(1).astype(np.float32) for i in range(10)]
# y = 2x
targets = [2 * x for x in inputs]

class Net(paddle.nn.Layer):
def __init__(self):
super().__init__()
w = paddle.to_tensor(np_w)
self.w = paddle.create_parameter(shape=w.shape, dtype=w.dtype, default_initializer=paddle.nn.initializer.Assign(w))

def forward(self, x):
return self.w * x

net = Net()
opt = LBFGS(learning_rate=1, max_iter=1, max_eval=None, tolerance_grad=1e-07, tolerance_change=1e-09, history_size=100, line_search_fn='strong_wolfe', parameters=net.parameters())
def train_step(inputs, targets):
def closure():
outputs = net(inputs)
loss = paddle.nn.functional.mse_loss(outputs, targets)
print('loss: ', loss.item())
opt.clear_grad()
loss.backward()
return loss
opt.step(closure)


for input, target in zip(inputs, targets):
input = paddle.to_tensor(input)
target = paddle.to_tensor(target)
train_step(input, target)
>>> import paddle
>>> import numpy as np
>>> from paddle.incubate.optimizer import LBFGS

>>> paddle.disable_static()
>>> np.random.seed(0)
>>> np_w = np.random.rand(1).astype(np.float32)
>>> np_x = np.random.rand(1).astype(np.float32)

>>> inputs = [np.random.rand(1).astype(np.float32) for i in range(10)]
>>> # y = 2x
>>> targets = [2 * x for x in inputs]

>>> class Net(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
... w = paddle.to_tensor(np_w)
... self.w = paddle.create_parameter(shape=w.shape, dtype=w.dtype, default_initializer=paddle.nn.initializer.Assign(w))
... def forward(self, x):
... return self.w * x

>>> net = Net()
>>> opt = LBFGS(learning_rate=1, max_iter=1, max_eval=None, tolerance_grad=1e-07, tolerance_change=1e-09, history_size=100, line_search_fn='strong_wolfe', parameters=net.parameters())
>>> def train_step(inputs, targets):
... def closure():
... outputs = net(inputs)
... loss = paddle.nn.functional.mse_loss(outputs, targets)
... print('loss: ', loss.item())
... opt.clear_grad()
... loss.backward()
... return loss
... opt.step(closure)

>>> for input, target in zip(inputs, targets):
... input = paddle.to_tensor(input)
... target = paddle.to_tensor(target)
... train_step(input, target)

"""

Expand Down