Skip to content

Commit

Permalink
Class-based activation functions (#126)
Browse files Browse the repository at this point in the history
* Implementation of activation_function class for 1d activations

* 3d activations implemented using activation_function type

* get_name function added to the activation_function type

* Activation_function instances are now passed to contructors

* removal of redundant use statements

* Small fix to make the test build

* Tidy up and formatting

* Formatting

* Set alpha defaults from Keras

* Enable leaky ReLU

* Add tests for setting alpha values to parametric activations (ELU and leaky ReLU)

* Bump version

---------

Co-authored-by: milancurcic <[email protected]>
  • Loading branch information
ggoyman and milancurcic authored Apr 3, 2023
1 parent f328c8d commit d052bce
Show file tree
Hide file tree
Showing 17 changed files with 768 additions and 685 deletions.
3 changes: 1 addition & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@ include(cmake/json.cmake)
# library to archive (libneural.a)
add_library(neural
src/nf.f90
src/nf/nf_activation_1d.f90
src/nf/nf_activation_3d.f90
src/nf/nf_activation.f90
src/nf/nf_base_layer.f90
src/nf/nf_conv2d_layer.f90
src/nf/nf_conv2d_layer_submodule.f90
Expand Down
10 changes: 5 additions & 5 deletions example/cnn_mnist.f90
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ program cnn_mnist

use nf, only: network, sgd, &
input, conv2d, maxpool2d, flatten, dense, reshape, &
load_mnist, label_digits
load_mnist, label_digits, softmax, relu

implicit none

Expand All @@ -24,11 +24,11 @@ program cnn_mnist
net = network([ &
input(784), &
reshape([1,28,28]), &
conv2d(filters=8, kernel_size=3, activation='relu'), &
conv2d(filters=8, kernel_size=3, activation=relu()), &
maxpool2d(pool_size=2), &
conv2d(filters=16, kernel_size=3, activation='relu'), &
conv2d(filters=16, kernel_size=3, activation=relu()), &
maxpool2d(pool_size=2), &
dense(10, activation='softmax') &
dense(10, activation=softmax()) &
])

call net % print_info()
Expand Down Expand Up @@ -67,4 +67,4 @@ real function accuracy(net, x, y)
accuracy = real(good) / size(x, dim=2)
end function accuracy

end program cnn_mnist
end program cnn_mnist
2 changes: 1 addition & 1 deletion fpm.toml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
name = "neural-fortran"
version = "0.11.0"
version = "0.12.0"
license = "MIT"
author = "Milan Curcic"
maintainer = "[email protected]"
Expand Down
3 changes: 3 additions & 0 deletions src/nf.f90
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,7 @@ module nf
conv2d, dense, flatten, input, maxpool2d, reshape
use nf_network, only: network
use nf_optimizers, only: sgd
use nf_activation, only: activation_function, elu, exponential, &
gaussian, linear, relu, leaky_relu, &
sigmoid, softmax, softplus, step, tanhf
end module nf
Loading

0 comments on commit d052bce

Please sign in to comment.