Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] ResNet #21

Open
wants to merge 25 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .clang-format
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ IncludeCategories:
Priority: 1
# standard C++ headers
# TODO: add as needed
- Regex: ^<(algorithm|array|condition_variable|functional|iostream|map|memory|mutex|numeric|string|thread|vector)>$
- Regex: ^<(algorithm|array|condition_variable|functional|iomanip|iostream|map|memory|mutex|numeric|string|thread|type_traits|vector)>$
Priority: 2
# third party headers
- Regex: ^<stdtensor>
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
*.tar
/3rdparty
/bin
/build
/cmake-build
/examples/Makefile
/lib
Expand Down
3 changes: 2 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,9 @@ SET(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake)

FIND_PACKAGE(stdtensor REQUIRED)

ADD_DEFINITIONS(-Wfatal-errors)
ADD_DEFINITIONS(-Wall)
ADD_DEFINITIONS(-Werror)
ADD_DEFINITIONS(-Wfatal-errors)

INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/3rdparty/include)
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include)
Expand Down
3 changes: 2 additions & 1 deletion examples/example_1.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#include <nn/ops>
#include <stdtensor>

#include <nn/ops>

struct examples {
const uint32_t n = 10;
const uint32_t c = 3;
Expand Down
3 changes: 2 additions & 1 deletion examples/example_mlp.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
#include <string>

#include <nn/ops>
#include <stdtensor>

#include <nn/ops>

#include "utils.hpp"

void example_mlp()
Expand Down
6 changes: 5 additions & 1 deletion examples/example_mnist.cpp
Original file line number Diff line number Diff line change
@@ -1,14 +1,17 @@
#include <cstdlib>

#include <string>

#include <nn/ops>

#ifdef USE_OPENCV
#include <opencv2/opencv.hpp>
# include <opencv2/opencv.hpp>
#endif

#include <stdtensor>

#include "utils.hpp"

void example_mnist() {}

int main()
Expand All @@ -25,6 +28,7 @@ int main()
int i = 0;
system("mkdir -p images");
for (auto im : t) {
UNUSED(im);
char name[20];
sprintf(name, "images/%d.png", ++i);
#ifdef USE_OPENCV
Expand Down
125 changes: 125 additions & 0 deletions examples/example_model_plain34.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
// #define STDNN_OPS_HAVE_CBLAS

#include <algorithm>
#include <string>

#include <nn/models>

#include "utils.hpp"

template <typename image_order = nn::ops::nhwc,
typename filter_order = nn::ops::rscd>
class plain34_model
{
const size_t logits = 1000;

using relu = nn::ops::pointwise<nn::ops::relu>;
using bn_layer = nn::layers::batch_norm<image_order, relu>;

using flatten = nn::layers::flatten<1, 3>;
using dense = nn::layers::dense<>;
using softmax = nn::layers::activation<nn::ops::softmax>;

auto conv1(int d) const
{
using layer = nn::layers::conv<image_order, filter_order, true, relu>;
return layer(d, layer::ksize(7, 7), layer::padding_same(),
layer::stride(2, 2));
}

auto pool1() const
{
using layer = nn::layers::pool<nn::ops::pool_max, image_order>;
return layer(layer::ksize(3, 3), layer::padding_same(),
layer::stride(2, 2));
}

auto pool2() const
{
using layer = nn::layers::pool<nn::ops::pool_mean, image_order>;
return layer(layer::ksize(7, 7));
}

auto conv(int d, int s) const
{
using layer = nn::layers::conv<image_order, filter_order, false>;
return layer(d, layer::ksize(3, 3), layer::padding_same(),
layer::stride(s, s));
}

const std::string prefix_;

const auto p(const std::string &name) const
{
return nn::ops::readtar(prefix_, name);
}

public:
const size_t h = 224;
const size_t w = 224;

plain34_model(const std::string &prefix) : prefix_(prefix) {}

template <typename R>
auto operator()(const ttl::tensor_ref<R, 4> &x, int m = 5) const
{
auto layers = nn::models::make_sequential() //
<< conv1(64) //
<< pool1() //

<< conv(64, 1) << bn_layer() //
<< conv(64, 1) << bn_layer() << conv(64, 1) << bn_layer()
<< conv(64, 1) << bn_layer() << conv(64, 1) << bn_layer()
<< conv(64, 1) << bn_layer()

<< conv(128, 2) << bn_layer() //
<< conv(128, 1) << bn_layer() //
<< conv(128, 1) << bn_layer() //
<< conv(128, 1) << bn_layer() //
<< conv(128, 1) << bn_layer() //
<< conv(128, 1) << bn_layer() //
<< conv(128, 1) << bn_layer() //
<< conv(128, 1) << bn_layer() //

<< conv(256, 2) << bn_layer() //
<< conv(256, 1) << bn_layer() //
<< conv(256, 1) << bn_layer() //
<< conv(256, 1) << bn_layer() //
<< conv(256, 1) << bn_layer() //
<< conv(256, 1) << bn_layer() //
<< conv(256, 1) << bn_layer() //
<< conv(256, 1) << bn_layer() //
<< conv(256, 1) << bn_layer() //
<< conv(256, 1) << bn_layer() //
<< conv(256, 1) << bn_layer() //
<< conv(256, 1) << bn_layer() //

<< conv(512, 2) << bn_layer() //
<< conv(512, 1) << bn_layer() //
<< conv(512, 1) << bn_layer() //
<< conv(512, 1) << bn_layer() //
<< conv(512, 1) << bn_layer() //
<< conv(512, 1) << bn_layer() //

<< pool2() //
<< flatten() //
<< dense(logits) //
<< softmax() //
;

auto y = layers(x);
return y;
}
};

int main(int argc, char *argv[])
{
const std::string home(std::getenv("HOME"));
const std::string prefix = home + "/var/models/resnet";
plain34_model model(prefix);
const auto x = ttl::tensor<float, 4>(1, model.h, model.w, 3);
const auto y = model(ref(x));
PPRINT(x);
PPRINT(*y);
return 0;
}
Loading