tensorflow
/
org.jetbrains.kotlinx.dl.api.core.optimizer
/
AdaGradDA
/
AdaGradDA
Ada
Grad
DA
fun
AdaGradDA
(learningRate:
Float
= 0.1f, initialAccumulatorValue:
Float
= 0.01f, l1Strength:
Float
= 0.01f, l2Strength:
Float
= 0.01f, clipGradient:
ClipGradientAction
= NoClipGradient())
Content copied to clipboard