| 1 | // This file is part of OpenCV project. |
| 2 | // It is subject to the license terms in the LICENSE file found in the top-level directory |
| 3 | // of this distribution and at http://opencv.org/license.html. |
| 4 | |
| 5 | #include "precomp.hpp" |
| 6 | |
| 7 | namespace cv { |
| 8 | namespace dnn { |
| 9 | CV__DNN_INLINE_NS_BEGIN |
| 10 | |
| 11 | |
| 12 | Layer::Layer() { preferableTarget = DNN_TARGET_CPU; } |
| 13 | |
| 14 | Layer::Layer(const LayerParams& params) |
| 15 | : blobs(params.blobs) |
| 16 | , name(params.name) |
| 17 | , type(params.type) |
| 18 | { |
| 19 | preferableTarget = DNN_TARGET_CPU; |
| 20 | } |
| 21 | |
| 22 | void Layer::setParamsFrom(const LayerParams& params) |
| 23 | { |
| 24 | blobs = params.blobs; |
| 25 | name = params.name; |
| 26 | type = params.type; |
| 27 | } |
| 28 | |
| 29 | int Layer::inputNameToIndex(String) |
| 30 | { |
| 31 | return -1; |
| 32 | } |
| 33 | |
| 34 | int Layer::outputNameToIndex(const String&) |
| 35 | { |
| 36 | return 0; |
| 37 | } |
| 38 | |
| 39 | bool Layer::supportBackend(int backendId) |
| 40 | { |
| 41 | return backendId == DNN_BACKEND_OPENCV; |
| 42 | } |
| 43 | |
| 44 | Ptr<BackendNode> Layer::initCUDA( |
| 45 | void*, |
| 46 | const std::vector<Ptr<BackendWrapper>>&, |
| 47 | const std::vector<Ptr<BackendWrapper>>&) |
| 48 | { |
| 49 | CV_Error(Error::StsNotImplemented, "CUDA pipeline of " + type + " layers is not defined." ); |
| 50 | return Ptr<BackendNode>(); |
| 51 | } |
| 52 | |
| 53 | Ptr<BackendNode> Layer::initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs, |
| 54 | std::vector<Ptr<BackendWrapper> > &outputs) |
| 55 | { |
| 56 | CV_Error(Error::StsNotImplemented, "VkCom pipeline of " + type + " layers is not defined." ); |
| 57 | return Ptr<BackendNode>(); |
| 58 | } |
| 59 | |
| 60 | Ptr<BackendNode> Layer::initHalide(const std::vector<Ptr<BackendWrapper>>&) |
| 61 | { |
| 62 | CV_Error(Error::StsNotImplemented, "Halide pipeline of " + type + " layers is not defined." ); |
| 63 | return Ptr<BackendNode>(); |
| 64 | } |
| 65 | |
| 66 | Ptr<BackendNode> Layer::initNgraph(const std::vector<Ptr<BackendWrapper>>& inputs, const std::vector<Ptr<BackendNode>>& nodes) |
| 67 | { |
| 68 | CV_Error(Error::StsNotImplemented, "Inference Engine pipeline of " + type + " layers is not defined." ); |
| 69 | return Ptr<BackendNode>(); |
| 70 | } |
| 71 | |
| 72 | Ptr<BackendNode> Layer::initWebnn(const std::vector<Ptr<BackendWrapper>>& inputs, const std::vector<Ptr<BackendNode>>& nodes) |
| 73 | { |
| 74 | CV_Error(Error::StsNotImplemented, "WebNN pipeline of " + type + " layers is not defined." ); |
| 75 | return Ptr<BackendNode>(); |
| 76 | } |
| 77 | |
| 78 | Ptr<BackendNode> Layer::initTimVX(void* timVxInfo, |
| 79 | const std::vector<Ptr<BackendWrapper> > & inputsWrapper, |
| 80 | const std::vector<Ptr<BackendWrapper> > & outputsWrapper, |
| 81 | bool isLast) |
| 82 | { |
| 83 | CV_Error(Error::StsNotImplemented, "TimVX pipeline of " + type + |
| 84 | " layers is not defined." ); |
| 85 | return Ptr<BackendNode>(); |
| 86 | } |
| 87 | |
| 88 | Ptr<BackendNode> Layer::initCann(const std::vector<Ptr<BackendWrapper> > &inputs, |
| 89 | const std::vector<Ptr<BackendWrapper> > &outputs, |
| 90 | const std::vector<Ptr<BackendNode> >& nodes) |
| 91 | { |
| 92 | CV_Error(Error::StsNotImplemented, "CANN pipeline of " + type + " layers is not defined." ); |
| 93 | return Ptr<BackendNode>(); |
| 94 | } |
| 95 | |
| 96 | Ptr<BackendNode> Layer::tryAttach(const Ptr<BackendNode>& node) |
| 97 | { |
| 98 | return Ptr<BackendNode>(); |
| 99 | } |
| 100 | |
| 101 | bool Layer::setActivation(const Ptr<ActivationLayer>&) { return false; } |
| 102 | bool Layer::tryFuse(Ptr<Layer>&) { return false; } |
| 103 | void Layer::getScaleShift(Mat& scale, Mat& shift) const |
| 104 | { |
| 105 | scale = Mat(); |
| 106 | shift = Mat(); |
| 107 | } |
| 108 | |
| 109 | void Layer::getScaleZeropoint(float& scale, int& zeropoint) const |
| 110 | { |
| 111 | scale = 1.f; |
| 112 | zeropoint = 0; |
| 113 | } |
| 114 | |
| 115 | void Layer::unsetAttached() |
| 116 | { |
| 117 | setActivation(Ptr<ActivationLayer>()); |
| 118 | } |
| 119 | |
| 120 | template <typename T> |
| 121 | static void vecToPVec(const std::vector<T>& v, std::vector<T*>& pv) |
| 122 | { |
| 123 | pv.resize(v.size()); |
| 124 | for (size_t i = 0; i < v.size(); i++) |
| 125 | pv[i] = const_cast<T*>(&v[i]); |
| 126 | } |
| 127 | |
| 128 | void Layer::finalize(const std::vector<Mat>& inputs, std::vector<Mat>& outputs) |
| 129 | { |
| 130 | CV_TRACE_FUNCTION(); |
| 131 | this->finalize(inputs: (InputArrayOfArrays)inputs, outputs: (OutputArrayOfArrays)outputs); |
| 132 | } |
| 133 | |
| 134 | void Layer::finalize(const std::vector<Mat*>& input, std::vector<Mat>& output) |
| 135 | { |
| 136 | CV_UNUSED(input); |
| 137 | CV_UNUSED(output); |
| 138 | } |
| 139 | |
| 140 | void Layer::finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) |
| 141 | { |
| 142 | CV_TRACE_FUNCTION(); |
| 143 | std::vector<Mat> inputs, outputs; |
| 144 | inputs_arr.getMatVector(mv&: inputs); |
| 145 | outputs_arr.getMatVector(mv&: outputs); |
| 146 | |
| 147 | std::vector<Mat*> inputsp; |
| 148 | vecToPVec(v: inputs, pv&: inputsp); |
| 149 | this->finalize(input: inputsp, output&: outputs); |
| 150 | } |
| 151 | |
| 152 | std::vector<Mat> Layer::finalize(const std::vector<Mat>& inputs) |
| 153 | { |
| 154 | CV_TRACE_FUNCTION(); |
| 155 | |
| 156 | std::vector<Mat> outputs; |
| 157 | this->finalize(inputs, outputs); |
| 158 | return outputs; |
| 159 | } |
| 160 | |
| 161 | void Layer::forward(std::vector<Mat*>& input, std::vector<Mat>& output, std::vector<Mat>& internals) |
| 162 | { |
| 163 | // We kept this method for compatibility. DNN calls it now only to support users' implementations. |
| 164 | } |
| 165 | |
| 166 | void Layer::forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) |
| 167 | { |
| 168 | CV_TRACE_FUNCTION(); |
| 169 | CV_TRACE_ARG_VALUE(name, "name" , name.c_str()); |
| 170 | |
| 171 | Layer::forward_fallback(inputs: inputs_arr, outputs: outputs_arr, internals: internals_arr); |
| 172 | } |
| 173 | |
| 174 | void Layer::forward_fallback(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) |
| 175 | { |
| 176 | CV_TRACE_FUNCTION(); |
| 177 | CV_TRACE_ARG_VALUE(name, "name" , name.c_str()); |
| 178 | |
| 179 | if (preferableTarget == DNN_TARGET_OPENCL_FP16 && inputs_arr.depth() == CV_16F) |
| 180 | { |
| 181 | std::vector<UMat> inputs; |
| 182 | std::vector<UMat> outputs; |
| 183 | std::vector<UMat> internals; |
| 184 | |
| 185 | std::vector<UMat> orig_inputs; |
| 186 | std::vector<UMat> orig_outputs; |
| 187 | std::vector<UMat> orig_internals; |
| 188 | |
| 189 | inputs_arr.getUMatVector(umv&: orig_inputs); |
| 190 | outputs_arr.getUMatVector(umv&: orig_outputs); |
| 191 | internals_arr.getUMatVector(umv&: orig_internals); |
| 192 | |
| 193 | inputs.resize(new_size: orig_inputs.size()); |
| 194 | for (size_t i = 0; i < orig_inputs.size(); i++) |
| 195 | orig_inputs[i].convertTo(m: inputs[i], CV_32F); |
| 196 | |
| 197 | outputs.resize(new_size: orig_outputs.size()); |
| 198 | for (size_t i = 0; i < orig_outputs.size(); i++) |
| 199 | outputs[i].create(sizes: shape(mat: orig_outputs[i]), CV_32F); |
| 200 | |
| 201 | internals.resize(new_size: orig_internals.size()); |
| 202 | for (size_t i = 0; i < orig_internals.size(); i++) |
| 203 | internals[i].create(sizes: shape(mat: orig_internals[i]), CV_32F); |
| 204 | |
| 205 | forward(inputs_arr: inputs, outputs_arr: outputs, internals_arr: internals); |
| 206 | |
| 207 | for (size_t i = 0; i < outputs.size(); i++) |
| 208 | outputs[i].convertTo(m: orig_outputs[i], CV_16F); |
| 209 | |
| 210 | // sync results back |
| 211 | outputs_arr.assign(v: orig_outputs); |
| 212 | internals_arr.assign(v: orig_internals); |
| 213 | return; |
| 214 | } |
| 215 | std::vector<Mat> inpvec; |
| 216 | std::vector<Mat> outputs; |
| 217 | std::vector<Mat> internals; |
| 218 | |
| 219 | inputs_arr.getMatVector(mv&: inpvec); |
| 220 | outputs_arr.getMatVector(mv&: outputs); |
| 221 | internals_arr.getMatVector(mv&: internals); |
| 222 | |
| 223 | std::vector<Mat*> inputs(inpvec.size()); |
| 224 | for (int i = 0; i < inpvec.size(); i++) |
| 225 | inputs[i] = &inpvec[i]; |
| 226 | |
| 227 | this->forward(input&: inputs, output&: outputs, internals); |
| 228 | |
| 229 | // sync results back |
| 230 | outputs_arr.assign(v: outputs); |
| 231 | internals_arr.assign(v: internals); |
| 232 | } |
| 233 | |
| 234 | void Layer::run(const std::vector<Mat>& inputs, std::vector<Mat>& outputs, std::vector<Mat>& internals) |
| 235 | { |
| 236 | CV_TRACE_FUNCTION(); |
| 237 | |
| 238 | this->finalize(inputs, outputs); |
| 239 | this->forward(inputs_arr: inputs, outputs_arr: outputs, internals_arr: internals); |
| 240 | } |
| 241 | |
| 242 | bool Layer::tryQuantize(const std::vector<std::vector<float>>& scales, |
| 243 | const std::vector<std::vector<int>>& zeropoints, LayerParams& params) |
| 244 | { |
| 245 | return false; |
| 246 | } |
| 247 | |
| 248 | Layer::~Layer() {} |
| 249 | |
| 250 | bool Layer::getMemoryShapes(const std::vector<MatShape>& inputs, |
| 251 | const int requiredOutputs, |
| 252 | std::vector<MatShape>& outputs, |
| 253 | std::vector<MatShape>& internals) const |
| 254 | { |
| 255 | CV_Assert(inputs.size()); |
| 256 | outputs.assign(n: std::max(a: requiredOutputs, b: (int)inputs.size()), val: inputs[0]); |
| 257 | return false; |
| 258 | } |
| 259 | |
| 260 | bool Layer::updateMemoryShapes(const std::vector<MatShape>& inputs) |
| 261 | { |
| 262 | return true; |
| 263 | } |
| 264 | |
| 265 | CV__DNN_INLINE_NS_END |
| 266 | }} // namespace cv::dnn |
| 267 | |