tiny_dnn 1.0.0
A header only, dependency-free deep learning framework in C++11
Loading...
Searching...
No Matches
node.h
1/*
2 Copyright (c) 2016, Taiga Nomi
3 All rights reserved.
4
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7 * Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above copyright
10 notice, this list of conditions and the following disclaimer in the
11 documentation and/or other materials provided with the distribution.
12 * Neither the name of the <organization> nor the
13 names of its contributors may be used to endorse or promote products
14 derived from this software without specific prior written permission.
15
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
17 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26*/
27#pragma once
28#include <sstream>
29#include <iomanip>
30#include <memory>
31#include <numeric>
32#include <vector>
33#include <set>
34#include <queue>
35#include <unordered_set>
36
37#include "tiny_dnn/util/util.h"
38#include "tiny_dnn/util/product.h"
39#include "tiny_dnn/util/image.h"
40#include "tiny_dnn/util/weight_init.h"
41#include "tiny_dnn/optimizers/optimizer.h"
42
43#include "tiny_dnn/activations/activation_function.h"
44
45namespace tiny_dnn {
46
47class node;
48class layer;
49class edge;
50
51typedef node* nodeptr_t;
52typedef std::shared_ptr<edge> edgeptr_t;
53
54typedef layer* layerptr_t;
55
59class node : public std::enable_shared_from_this<node> {
60public:
61 node(serial_size_t in_size, serial_size_t out_size)
62 : prev_(in_size), next_(out_size) {}
63 virtual ~node() {}
64
65 const std::vector<edgeptr_t>& prev() const { return prev_; }
66 const std::vector<edgeptr_t>& next() const { return next_; }
67
68 serial_size_t prev_port(const edge& e) const {
69 auto it = std::find_if(prev_.begin(), prev_.end(),
70 [&](edgeptr_t ep) { return ep.get() == &e; });
71 return (serial_size_t)std::distance(prev_.begin(), it);
72 }
73
74 serial_size_t next_port(const edge& e) const {
75 auto it = std::find_if(next_.begin(), next_.end(),
76 [&](edgeptr_t ep) { return ep.get() == &e; });
77 return (serial_size_t)std::distance(next_.begin(), it);
78 }
79
80 std::vector<node*> prev_nodes() const; // @todo refactor and remove this method
81 std::vector<node*> next_nodes() const; // @todo refactor and remove this method
82 protected:
83 node() = delete;
84
85 friend void connect(layerptr_t head, layerptr_t tail,
86 serial_size_t head_index, serial_size_t tail_index);
87
88 mutable std::vector<edgeptr_t> prev_;
89 mutable std::vector<edgeptr_t> next_;
90};
91
95class edge {
96 public:
97 edge(node* prev, const shape3d& shape, vector_type vtype)
98 : shape_(shape),
99 vtype_(vtype),
100 data_({vec_t(shape.size())}),
101 grad_({vec_t(shape.size())}),
102 prev_(prev) {}
103
104 void merge_grads(vec_t *dst) {
105 dst->resize(grad_[0].size());
106 std::fill(dst->begin(), dst->end(), static_cast<float_t>(0));
107
108 // @todo consider adding parallelism
109 for (size_t sample = 0, sample_count = grad_.size(); sample < sample_count; ++sample) {
110 vectorize::reduce<float_t>(&grad_[sample][0], dst->size(), &(*dst)[0]);
111 }
112 }
113
114 void clear_grads() {
115 for (size_t sample = 0, sample_count = grad_.size(); sample < sample_count; ++sample) {
116 std::fill(grad_[sample].begin(), grad_[sample].end(), (float_t)0);
117 }
118 }
119
120 tensor_t* get_data() {
121 return &data_;
122 }
123
124 const tensor_t* get_data() const {
125 return &data_;
126 }
127
128 tensor_t* get_gradient() {
129 return &grad_;
130 }
131
132 const tensor_t* get_gradient() const {
133 return &grad_;
134 }
135
136 const std::vector<node*>& next() const { return next_; }
137 node* prev() { return prev_; }
138 const node* prev() const { return prev_; }
139
140 const shape3d& shape() const { return shape_; }
141 vector_type vtype() const { return vtype_; }
142 void add_next_node(node* next) { next_.push_back(next); }
143
144 private:
145 shape3d shape_;
146 vector_type vtype_;
147 tensor_t data_;
148 tensor_t grad_;
149 node* prev_; // previous node, "producer" of this tensor
150 std::vector<node*> next_; // next nodes, "consumers" of this tensor
151};
152
153inline std::vector<node*> node::prev_nodes() const {
154 std::set<node*> sets;
155 for (auto& e : prev_) {
156 if (e && e->prev()) sets.insert(e->prev());
157 }
158 return std::vector<node*>(sets.begin(), sets.end());
159}
160
161inline std::vector<node*> node::next_nodes() const {
162 std::set<node*> sets;
163 for (auto& e : next_) {
164 if (e) {
165 auto n = e->next();
166 sets.insert(n.begin(), n.end());
167 }
168 }
169 return std::vector<node*>(sets.begin(), sets.end());
170}
171
172template <typename T>
174 node_tuple(T l1, T l2) {
175 nodes_.push_back(l1); nodes_.push_back(l2);
176 }
177 std::vector<T> nodes_;
178};
179
180template <typename T>
182 return node_tuple<T*>(&l1, &l2);
183}
184
185template <typename T>
186node_tuple<std::shared_ptr<T>> operator , (std::shared_ptr<T> l1, std::shared_ptr<T> l2) {
187 return node_tuple<std::shared_ptr<T>>(l1, l2);
188}
189
190template <typename T>
191node_tuple<std::shared_ptr<T>> operator , (node_tuple<std::shared_ptr<T>> lhs, std::shared_ptr<T>& rhs) {
192 lhs.nodes_.push_back(rhs);
193 return lhs;
194}
195
196template <typename T>
197node_tuple<T*> operator , (node_tuple<T*> lhs, T& rhs) {
198 lhs.nodes_.push_back(&rhs);
199 return lhs;
200}
201
202template <typename T, typename U>
203inline std::shared_ptr<U>& operator << (std::shared_ptr<T>& lhs,
204 std::shared_ptr<U>& rhs) {
205 connect(lhs.get(), rhs.get());
206 return rhs;
207}
208
209template <typename T, typename U>
210inline U& operator << (const node_tuple<T>& lhs, U& rhs) {
211 for (serial_size_t i = 0; i < static_cast<serial_size_t>(lhs.nodes_.size()); i++) {
212 connect(&*lhs.nodes_[i], &*rhs, 0, i);
213 }
214 return rhs;
215}
216
217template <typename T, typename U>
218inline node_tuple<T>& operator << (U& lhs, const node_tuple<T>& rhs) {
219 for (serial_size_t i = 0; i < static_cast<serial_size_t>(rhs.nodes_.size()); i++) {
220 connect(&*lhs, &*rhs.nodes_[i], i, 0);
221 }
222 return rhs;
223}
224
225template <typename T, typename U>
226inline U& operator << (const node_tuple<T*>& lhs, U& rhs) {
227 for (serial_size_t i = 0; i < static_cast<serial_size_t>(lhs.nodes_.size()); i++) {
228 connect(lhs.nodes_[i], &rhs, 0, i);
229 }
230 return rhs;
231}
232
233template <typename T, typename U>
234inline node_tuple<T*>& operator << (U& lhs, const node_tuple<T*>& rhs) {
235 for (serial_size_t i = 0; i < static_cast<serial_size_t>(rhs.nodes_.size()); i++) {
236 connect(&lhs, rhs.nodes_[i], i, 0);
237 }
238 return rhs;
239}
240
241
242} // namespace tiny_dnn
class containing input/output data
Definition node.h:95
Simple image utility class.
Definition image.h:94
base class of all kind of NN layers
Definition layer.h:62
base class of all kind of tinny-cnn data
Definition node.h:59
Definition node.h:173