Helper function to return a constructed pytorch optimizer from torch.optim
.
get_pycox_optim(
optimizer = "adam",
net,
rho = 0.9,
eps = 1e-08,
lr = 1,
weight_decay = 0,
learning_rate = 0.01,
lr_decay = 0,
betas = c(0.9, 0.999),
amsgrad = FALSE,
lambd = 1e-04,
alpha = 0.75,
t0 = 1e+06,
momentum = 0,
centered = TRUE,
etas = c(0.5, 1.2),
step_sizes = c(1e-06, 50),
dampening = 0,
nesterov = FALSE
)
(character(1))
Optimizer, see details for list of implemented methods.
(torch.nn.modules.module.Module)
Network architecture, can be built from
build_pytorch_net.
(numeric(1))
Passed to adadelta
.
(numeric(1))
Passed to all methods except asgd
, rprop
, and sgd
.
(numeric(1))
Passed to all methods except rprop
and sparse_adam
.
(numeric(1))
Passed to all methods except adadelta
.
(numeric(2))
Passed to adam
, adamax
, adamw
, and sparse_adam
.
(logical(1))
Passed to adam
and adamw
.
(numeric(1))
Passed to asgd
.
(numeric(1))
Passed to asgd
and rmsprop
.
(numeric(1))
Passed to rmsprop
and sgd
.
(logical(1))
Passed to rmsprop
.
(numeric(2))
Passed to rprop
.
(numeric(1))
Passed to sgd
.
(logical(1))
Passed to sgd
.
Implemented methods (with help pages) are
"adadelta"
reticulate::py_help(torch$optim$Adadelta)
"adagrad"
reticulate::py_help(torch$optim$Adagrad)
"adam"
reticulate::py_help(torch$optim$Adam)
"adamax"
reticulate::py_help(torch$optim$Adamax)
"adamw"
reticulate::py_help(torch$optim$AdamW)
"asgd"
reticulate::py_help(torch$optim$ASGD)
"rmsprop"
reticulate::py_help(torch$optim$RMSprop)
"rprop"
reticulate::py_help(torch$optim$Rprop)
"sgd"
reticulate::py_help(torch$optim$SGD)
"sparse_adam"
reticulate::py_help(torch$optim$SparseAdam)