Helper function to return a constructed pytorch optimizer from torch.optim.

get_pycox_optim(
  optimizer = "adam",
  net,
  rho = 0.9,
  eps = 1e-08,
  lr = 1,
  weight_decay = 0,
  learning_rate = 0.01,
  lr_decay = 0,
  betas = c(0.9, 0.999),
  amsgrad = FALSE,
  lambd = 1e-04,
  alpha = 0.75,
  t0 = 1e+06,
  momentum = 0,
  centered = TRUE,
  etas = c(0.5, 1.2),
  step_sizes = c(1e-06, 50),
  dampening = 0,
  nesterov = FALSE
)

Arguments

optimizer

(character(1))
Optimizer, see details for list of implemented methods.

net

(torch.nn.modules.module.Module)
Network architecture, can be built from build_pytorch_net.

rho, lr, lr_decay

(numeric(1))
Passed to adadelta.

eps

(numeric(1))
Passed to all methods except asgd, rprop, and sgd.

weight_decay

(numeric(1))
Passed to all methods except rprop and sparse_adam.

learning_rate

(numeric(1))
Passed to all methods except adadelta.

betas

(numeric(2))
Passed to adam, adamax, adamw, and sparse_adam.

amsgrad

(logical(1))
Passed to adam and adamw.

lambd, t0

(numeric(1))
Passed to asgd.

alpha

(numeric(1))
Passed to asgd and rmsprop.

momentum

(numeric(1))
Passed to rmsprop and sgd.

centered

(logical(1))
Passed to rmsprop.

etas, step_sizes

(numeric(2))
Passed to rprop.

dampening

(numeric(1))
Passed to sgd.

nesterov

(logical(1))
Passed to sgd.

Details

Implemented methods (with help pages) are

  • "adadelta"
    reticulate::py_help(torch$optim$Adadelta)

  • "adagrad"
    reticulate::py_help(torch$optim$Adagrad)

  • "adam"
    reticulate::py_help(torch$optim$Adam)

  • "adamax"
    reticulate::py_help(torch$optim$Adamax)

  • "adamw"
    reticulate::py_help(torch$optim$AdamW)

  • "asgd"
    reticulate::py_help(torch$optim$ASGD)

  • "rmsprop"
    reticulate::py_help(torch$optim$RMSprop)

  • "rprop"
    reticulate::py_help(torch$optim$Rprop)

  • "sgd"
    reticulate::py_help(torch$optim$SGD)

  • "sparse_adam"
    reticulate::py_help(torch$optim$SparseAdam)