0.5.0
None / linear (default)
softmax
elu
selu
softplus
softsign
relu
tanh
sigmoid
hard_sigmoid
tanh
zero / zeros / Zeros
one / ones / Ones
constant / Constant
normal / random_normal / RandomNormal
uniform / random_uniform / RandomUniform
truncated_normal / TruncatedNormal
orthogonal / Orthogonal
identity / Identity
glorot_normal
glorot_uniform
he_normal
he_uniform
lecun_normal
lecun_uniform
None (default)
maxnorm / max_norm / MaxNorm
nonneg / non_neg / NonNeg
unitnorm / unit_norm / UnitNorm
min_max_norm / MinMaxNorm
Core -- Dense
Core -- Activation
Core -- Dropout
Core -- Flatten
Core -- Reshape
Core -- Permute
Core -- RepeatVector
Core -- ActivityRegularization
Core -- Masking
Core -- SpatialDropout1D
Core -- SpatialDropout2D
Core -- SpatialDropout3D
Convolutional -- Conv1D
Convolutional -- Conv2D
Convolutional -- SeparableConv1D
Convolutional -- SeparableConv2D
Convolutional -- DepthwiseConv2D
Convolutional -- Conv2DTranspose
Convolutional -- Conv3D
Convolutional -- Conv3DTranspose
Convolutional -- Cropping1D
Convolutional -- Cropping2D
Convolutional -- Cropping3D
Convolutional -- UpSampling1D
Convolutional -- UpSampling2D
Convolutional -- UpSampling3D
Convolutional -- ZeroPadding1D
Convolutional -- ZeroPadding2D
Convolutional -- ZeroPadding3D
Pooling -- MaxPooling1D
Pooling -- MaxPooling2D
Pooling -- MaxPooling3D
Pooling -- AveragePooling1D
Pooling -- AveragePooling2D
Pooling -- AveragePooling3D
Pooling -- GlobalMaxPooling1D
Pooling -- GlobalAveragePooling1D
Pooling -- GlobalMaxPooling2D
Pooling -- GlobalAveragePooling2D
Pooling -- GlobalMaxPooling3D
Pooling -- GlobalAveragePooling3D
Locally_connected -- LocallyConnected1D
Locally_connected -- LocallyConnected2D
Recurrent -- SimpleRNN
Recurrent -- GRU
Recurrent -- LSTM
Recurrent -- ConvLSTM2D
Recurrent -- ConvLSTM2DCell
Recurrent -- SimpleRNNCell
Recurrent -- GRUCell
Recurrent -- LSTMCell
Recurrent -- CuDNNGRU
Recurrent -- CuDNNLSTM
Embedding -- Embedding
Advanced activations -- LeakyReLU
Advanced activations -- PReLU
Advanced activations -- ELU
Advanced activations -- ThresholdedReLU
Advanced activations -- Softmax
Advanced activations -- ReLU
Normalization -- BatchNormalization
Noise -- GaussianNoise
Noise -- GaussianDropout
Noise -- AlphaDropout
Merge -- Add
Merge -- Subtract
Merge -- Multiply
Merge -- Average
Merge -- Maximum
Merge -- Minimum
Merge -- Concatenate
Merge -- Dot
channels_last
channels_first
float32
float64
int32
int64
channels_last - inputs with shape (batch, steps, channels)
channels_first - inputs with shape (batch, channels, steps)
channels_last - inputs with shape (batch, height, width, channels)
channels_first - inputs with shape (batch, channels, height, width)
channels_last - inputs with shape (batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)
channels_first - inputs with shape (batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)
channels_last - inputs with shape (batch, steps, channels)
channels_first - inputs with shape (batch, channels, steps)
channels_last - inputs with shape (batch, height, width, channels)
channels_first - inputs with shape (batch, channels, height, width)
channels_last - inputs with shape (batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)
channels_first - inputs with shape (batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)
None
EarlyStopping -- Stop training when a monitored quantity has stopped improving
TerminateOnNaN -- Terminates training when a NaN loss is encountered.
ReduceLROnPlateau -- Reduce learning rate when a metric has stopped improving
CSVLogger -- Streams epoch results to a csv file
val_loss
loss
auto -- the direction is automatically inferred from the name of the monitored quantity
min -- training will stop when the quantity monitored has stopped decreasing
max -- training will stop when the quantity monitored has stopped increasing
hg38
hg19
binary_crossentropy
mse / MSE/ mean_squared_error
mae / MAE / mean_absolute_error
mape / MAPE / mean_absolute_percentage_error
msle / MSLE / mean_squared_logarithmic_error
squared_hinge
hinge
categorical_hinge
logcosh
categorical_crossentropy
sparse_categorical_crossentropy
kld / KLD / kullback_leibler_divergence
poisson
cosine / cosine_proximity
SGD - Stochastic gradient descent optimizer
RMSprop - RMSProp optimizer
Adagrad - Adagrad optimizer
Adadelta - Adadelta optimizer
Adam - Adam optimizer
Adamax - A variant of Adam based on the infinity norm
Nadam - Nesterov Adam optimizer
acc / accruracy
binary_accuracy
categorical_accuracy
sparse_categorical_accuracy
mse / MSE / mean_squared_error
mae / MAE / mean_absolute_error
mape / MAPE / mean_absolute_percentage_error
cosine_proximity
cosine
none
@misc{chollet2015keras,
title={Keras},
url={https://keras.io},
author={Chollet, Fran\c{c}ois and others},
year={2015},
howpublished={https://keras.io},
}
@misc{tensorflow2015-whitepaper,
title={ {TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems},
url={https://www.tensorflow.org/},
note={Software available from tensorflow.org},
author={
Mart\'{\i}n~Abadi and
Ashish~Agarwal and
Paul~Barham and
Eugene~Brevdo and
Zhifeng~Chen and
Craig~Citro and
Greg~S.~Corrado and
Andy~Davis and
Jeffrey~Dean and
Matthieu~Devin and
Sanjay~Ghemawat and
Ian~Goodfellow and
Andrew~Harp and
Geoffrey~Irving and
Michael~Isard and
Yangqing Jia and
Rafal~Jozefowicz and
Lukasz~Kaiser and
Manjunath~Kudlur and
Josh~Levenberg and
Dandelion~Man\'{e} and
Rajat~Monga and
Sherry~Moore and
Derek~Murray and
Chris~Olah and
Mike~Schuster and
Jonathon~Shlens and
Benoit~Steiner and
Ilya~Sutskever and
Kunal~Talwar and
Paul~Tucker and
Vincent~Vanhoucke and
Vijay~Vasudevan and
Fernanda~Vi\'{e}gas and
Oriol~Vinyals and
Pete~Warden and
Martin~Wattenberg and
Martin~Wicke and
Yuan~Yu and
Xiaoqiang~Zheng},
year={2015},
}