Created
April 27, 2022 01:43
-
-
Save pranavsharma/b3e1faef9fff883beaa8baabd3bb864c to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| //#include <opencv2/highgui/highgui.hpp> | |
| //#include <opencv2/imgproc/imgproc.hpp> | |
| #include <onnxruntime_cxx_api.h> | |
| ///#include <cuda_provider_factory.h> | |
| /////#include <tensorrt_provider_factory.h> | |
| #include <bits/stdc++.h> | |
| // using namespace cv; | |
| using namespace std; | |
| int main() | |
| { | |
| bool useCUDA{true}; | |
| Ort::Env env(OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING, "Detection"); | |
| Ort::SessionOptions session_options; | |
| if (useCUDA) | |
| { | |
| auto *status = OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 0); | |
| if (status) | |
| { | |
| std::cout << "Error adding cuda provider\n"; | |
| return -1; | |
| } | |
| cout << "Added cuda provider\n"; | |
| } | |
| Ort::AllocatorWithDefaultOptions allocator; | |
| Ort::Session session(env, "model_issue_11235.onnx", session_options); | |
| static constexpr const int width = 1000; | |
| static constexpr const int height = 1000; | |
| static constexpr const int channel = 3; | |
| std::array<int64_t, 4> input_shape_{1, height, width, channel}; | |
| std::vector<int64_t> input_dims = session.GetInputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape(); | |
| std::vector<int64_t> output_dims = session.GetOutputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape(); | |
| std::vector<const char *> input_node_names = {"input_tensor"}; | |
| std::vector<const char *> output_node_names = {"detection_anchor_indices", "detection_boxes", "detection_classes", "detection_multiclass_scores", "detection_scores", "num_detections", "raw_detection_boxes", "raw_detection_scores"}; | |
| input_dims[0] = output_dims[0] = 1; // batch size = 1 | |
| std::vector<Ort::Value> input_tensors; | |
| auto memory_info = Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault); | |
| std::array<uint8_t, width * height * channel> input_image_{}; | |
| uint8_t *input = input_image_.data(); | |
| std::random_device rnd; // a source of machine-wide entropy | |
| std::default_random_engine eng(rnd()); // use it to initialise the psuedo-random engine | |
| std::uniform_int_distribution<uint8_t> uid1(0, 100); | |
| std::generate(input_image_.begin(), input_image_.end(), [&] | |
| { return uid1(eng); }); | |
| input_tensors.push_back(Ort::Value::CreateTensor(memory_info, input, input_image_.size(), input_shape_.data(), input_shape_.size())); | |
| std::vector<Ort::Value> output_tensors; | |
| output_tensors = session.Run(Ort::RunOptions{nullptr}, | |
| input_node_names.data(), // | |
| input_tensors.data(), // input tensors | |
| input_tensors.size(), // 1 | |
| output_node_names.data(), // | |
| output_node_names.size()); // 4 | |
| std::cout << "output_tensors.size() " << output_tensors.size() << "\n"; | |
| std::cout << "Done\n"; | |
| return 0; | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment