Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] - Feature train orb #31

Draft
wants to merge 23 commits into
base: dev
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion build/jsfeatES6cpp.js

Large diffs are not rendered by default.

383 changes: 363 additions & 20 deletions build/jsfeatES6cpp_debug.js

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion build/jsfeatcpp.js

Large diffs are not rendered by default.

383 changes: 363 additions & 20 deletions build/jsfeatcpp_debug.js

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions emscripten/bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ EMSCRIPTEN_BINDINGS(webarkit) {
// Extern jsfeat functions

function("load_jpeg_data", &load_jpeg_data);
function("train_orb_pattern", &train_orb_pattern);
function("yape06_detect", &yape06_detect);

};
134 changes: 134 additions & 0 deletions emscripten/webarkitJsfeat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
#include <jsfeat.h>
#include <stdio.h>

#include <cmath>
#include <memory>
#include <string>
#include <vector>

Expand Down Expand Up @@ -87,6 +89,138 @@ emscripten::val load_jpeg_data(std::string filename) {
return out;
};

void train_orb_pattern_internal(const char* filename) {
char* ext;
char buf1[512], buf2[512];

AR2JpegImageT* jpegImage;

auto lev = 0, i = 0;
auto sc = 1.0;
auto max_pattern_size = 512;
auto max_per_level = 300;
auto sc_inc = std::sqrt(2.0); // magic number ;)
auto new_width = 0, new_height = 0;
// var lev_corners, lev_descr;
auto corners_num = 0;

// if (!filename) return emscripten::val::null();
ext = arUtilGetFileExtensionFromPath(filename, 1);
if (!ext) {
webarkitLOGe(
"Error: unable to determine extension of file '%s'. Exiting.\n",
filename);
}
if (strcmp(ext, "jpeg") == 0 || strcmp(ext, "jpg") == 0 ||
strcmp(ext, "jpe") == 0) {
webarkitLOGi("Waiting for the jpeg...");
webarkitLOGi("Reading JPEG file...");
ar2UtilDivideExt(filename, buf1, buf2);
jpegImage = ar2ReadJpegImage(buf1, buf2);
if (jpegImage == NULL) {
webarkitLOGe(
"Error: unable to read JPEG image from file '%s'. Exiting.\n",
filename);
}
webarkitLOGi(" Done.");

if (jpegImage->nc != 1 && jpegImage->nc != 3) {
ARLOGe(
"Error: Input JPEG image is in neither RGB nor grayscale format. "
"%d bytes/pixel %sformat is unsupported. Exiting.\n",
jpegImage->nc, (jpegImage->nc == 4 ? "(possibly CMYK) " : ""));
}
webarkitLOGi("JPEG image number of channels: '%d'", jpegImage->nc);
webarkitLOGi("JPEG image width is: '%d'", jpegImage->xsize);
webarkitLOGi("JPEG image height is: '%d'", jpegImage->ysize);
webarkitLOGi("JPEG image, dpi is: '%d'", jpegImage->dpi);

if (jpegImage->dpi == 0.0f) {
webarkitLOGw(
"JPEG image '%s' does not contain embedded resolution data, and no "
"resolution specified on command-line.",
filename);
}

} else if (strcmp(ext, "png") == 0) {
webarkitLOGe(
"Error: file has extension '%s', which is not supported for "
"reading. Exiting.\n",
ext);
free(ext);
}
webarkitLOGi("Image done!");

JSLOGi("Starting detection routine...");
Copy link
Owner Author

@kalwalt kalwalt Nov 20, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These two printings works, they print tese messages:

Image done!
Starting detection routine...

but at the end of the code they fails to print in the console, i would understand why this happens.... see the comment above.


Orb orb;
Imgproc imgproc;
detectors::Detectors detectors;
auto width = jpegImage->xsize;
auto height = jpegImage->ysize;
std::unique_ptr<Matrix_t> lev0_img = std::make_unique<Matrix_t>(width, height, ComboTypes::U8C1_t);
std::unique_ptr<Matrix_t> lev_img = std::make_unique<Matrix_t>(width, height, ComboTypes::U8C1_t);
Array<std::unique_ptr<Matrix_t>> pattern_corners;

auto sc0 = std::min(max_pattern_size / height, max_pattern_size / width);
//new_width = (jpegImage->ysize * sc0) | 0;
//new_height = (jpegImage->xsize * sc0) | 0;
auto num_train_levels = 4;

JSLOGi("Converting the RGB image to GRAY...");

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Resampling is not needed in our case, because we provide our image with the right size. The code was taken by the jsfeat sample_orb example and in that case we simply resampled the image taken by the canvas(webcam) to a smaller size. Anyway the resample function has some issues, infact the log console "Image resampled, starting pyrmaid now..." can not be printed with this function enabled. (just comment out and recompile to test)

imgproc.grayscale_internal<u_char, Matrix_t>(jpegImage->image, width, height, lev0_img.get(), Colors::COLOR_RGB2GRAY);

JSLOGi("Image converted to GRAY.");

Array<KeyPoints*> lev_corners(num_train_levels);
//Array<std::unique_ptr<KeyPoints>> lev_corners;
//Array<std::unique_ptr<Matrix_t>> pattern_descriptors;

for (lev = 0; lev < num_train_levels; ++lev) {
// what we should do with this code?
// pattern_corners[lev] = [];
// lev_corners = pattern_corners[lev];

// preallocate corners array
//i = (new_width * new_height) >> lev;
i = (width * height) >> lev;
std::cout << i << std::endl;
while (--i >= 0) {
lev_corners[lev]->set_size(i);
//lev_corners[lev]->allocate();
Copy link
Owner Author

@kalwalt kalwalt Nov 27, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

no need to allocate if we create the vector: Array<KeyPoints*> lev_corners(num_train_levels);

//lev_corners[lev] = std::make_unique<KeyPoints>(i);
//lev_corners.push_back(std::unique_ptr<KeyPoints>(new KeyPoints(i)));
}
std::cout << "Num. of level: " << lev << std::endl;
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this print 4 levels in the console:
Num. of level: 0 and so on...

//pattern_descriptors.push_back(std::unique_ptr<Matrix_t>(new Matrix_t(32, max_per_level, ComboTypes::U8C1_t)));
}

//std::cout << "Size of first lev_corners: " << lev_corners[0]->kpoints.size() << std::endl;
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is not printed...


imgproc.gaussian_blur_internal(lev0_img.get(), lev_img.get(), 5, 2); // this is more robust

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is ok , it is printed...

JSLOGi("After Gaussian blur");

corners_num = detectors.detect_keypoints(lev_img.get(), lev_corners[0], max_per_level);

// orb.describe(lev_img.get(), lev_corners[0], corners_num, lev_descr.get());
// This probablly will work in a near future
// orb.describe(lev_img.get(), lev_corners[0], corners_num, &pattern_descriptors[0]);
Copy link
Owner Author

@kalwalt kalwalt Nov 20, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

orb.describe can not be yet used here because it accept in the first parameter a uintptr_t and in the second parameter an emscripten::val can not be managed here. I should create a new method in the Orb class:
orb.describe_internal(Matrix_t* mat, Keypoints* kp, int num corners, Matix_t* descr)


// console.log("train " + lev_img.cols + "x" + lev_img.rows + " points: " + corners_num);
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

...continuning from below, These two printings instead do nothing. I will open an issue as reminder.

std::cout << "Corners num: " << corners_num << std::endl;
//JSLOGi("Corners num: %i", corners_num);
//JSLOGi("train %i x %i points: %i\n", lev_img.get()->get_cols(), lev_img.get()->get_rows(), corners_num);
//std::cout << "train " << lev_img.get()->get_cols() << " x " << lev_img.get()->get_rows() << " points: " << corners_num << std::endl;
free(ext);
free(jpegImage);
};

void train_orb_pattern(std::string filename) {
train_orb_pattern_internal(filename.c_str());
}

emscripten::val yape06_detect(emscripten::val inputSrc, int w, int h) {
auto src = emscripten::convertJSArrayToNumberVector<u_char>(inputSrc);
Imgproc imgproc;
Expand Down
35 changes: 33 additions & 2 deletions examples/js/loader.js
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,9 @@ var jpeg_count = 0;
export function loadJpeg(url, callback, errorCallback) {
var filename = '/load_jpeg_' + jpeg_count++ + '.jpg';
var writeCallback = function (errorCode) {
//if (!Module._loadCamera) {
if (!jsfeat.load_jpeg_data) {
if (callback) callback(id); setTimeout(writeCallback, 10);
} else {
//var id = Module._loadCamera(filename);
var id = jsfeat.load_jpeg_data(filename)
if (callback) callback(id);
}
Expand Down Expand Up @@ -37,6 +35,39 @@ export function loadJpeg(url, callback, errorCallback) {
}
}

export function trainOrbPattern(url, callback, errorCallback) {
var filename = '/load_jpeg_' + jpeg_count++ + '.jpg';
var writeCallback = function (errorCode) {
if (!jsfeat.train_orb_pattern) {
if (callback) callback(id); setTimeout(writeCallback, 10);
} else {
var id = jsfeat.train_orb_pattern(filename)
if (callback) callback(id);
}
};
if (typeof url === 'object') { // Maybe it's a byte array
writeByteArrayToFS(filename, url, writeCallback);
} else if (url.indexOf("\n") > -1) { // Or a string with the jpeg path
writeStringToFS(filename, url, writeCallback);
} else {
fetch(url)
.then(response => {
if (!response.ok) {
throw new Error('Network response was not OK');
}
return response.arrayBuffer();
})
.then(buff => {
let buffer = new Uint8Array(buff)
writeByteArrayToFS(filename, buffer, writeCallback);
})

.catch(error => {
errorCallback(error)
});
}
}

// transfer image

function writeStringToFS(target, string, callback) {
Expand Down
17 changes: 17 additions & 0 deletions examples/train_orb_example.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
<html>

<body>

<script type="module">
import jsfeatCpp from "./../build/jsfeatES6cpp_debug.js"
import { trainOrbPattern } from "./js/loader.js"

const jsfeat = await jsfeatCpp();

trainOrbPattern("pinball.jpg", () => {}, ()=>{});

</script>

</body>

</html>
7 changes: 7 additions & 0 deletions run_docker.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
// before all:
git submodule update --init
//Assumend that you have emscripten engine installed under docker, you may run:
// for the first time
docker exec emscripten ./build.sh emscripten-all
// and then when WebarkitLib is compiled:
docker exec emscripten ./build.sh emscripten
84 changes: 84 additions & 0 deletions src/feature_detection/detectors.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
#ifndef DETECTORS_H
#define DETECTORS_H

#include <keypoint_t/keypoint_t.h>
#include <keypoints/keypoints.h>
#include <keypoints_filter/keypoints_filter.h>
#include <math/math.h>
#include <matrix_t/matrix_t.h>
#include <types/types.h>
#include <yape06/yape06.h>

namespace jsfeat {

namespace detectors {

class Detectors : public Yape06, public Math, public KeyPointsFilter {
public:
Detectors() {}
~Detectors() {}

int detect_keypoints(Matrix_t* img, KeyPoints* corners, int max_allowed) {
// detect features
auto kpc = detect_internal(img, corners, 17);
auto count = kpc.count;
std::cout << "here" << std::endl;
//std::cout << count << std::endl;
// sort by score and reduce the count if needed
if (count > max_allowed) {
// qsort_internal<KeyPoint_t, bool>(corners.kpoints, 0, count - 1, [](KeyPoint_t i, KeyPoint_t j){return (i.score < j.score);});
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure of this, maybe it's better to use another small different approach. I'm looking to the OpenCV code in the Orb implementation and there is another possibility.

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

retainBest is taken from OpenCV, but i need to figure out if this is correct.

retainBest(corners->kpoints, count);
count = max_allowed;
}

// calculate dominant orientation for each keypoint
for (auto i = 0; i < count; ++i) {
corners->kpoints[i].angle = ic_angle(img, corners->kpoints[i].x, corners->kpoints[i].y);
}

//std::cout << count << std::endl;

return count;
}

private:
// function(a, b) { return (b.score < a.score); }
// bool myfunction(KeyPoint_t i, KeyPoint_t j) { return (i.score < j.score); }
// central difference using image moments to find dominant orientation
// var u_max = new Int32Array([15, 15, 15, 15, 14, 14, 14, 13, 13, 12, 11, 10, 9, 8, 6, 3, 0]);
float ic_angle(Matrix_t* img, int px, int py) {
Array<u_int> u_max{15, 15, 15, 15, 14, 14, 14, 13, 13, 12, 11, 10, 9, 8, 6, 3, 0};
auto half_k = 15; // half patch size
auto m_01 = 0, m_10 = 0;
auto src = img->u8;
auto step = img->get_cols();
auto u = 0, v = 0, center_off = (py * step + px) | 0;
auto v_sum = 0, d = 0, val_plus = 0, val_minus = 0;

// Treat the center line differently, v=0
for (u = -half_k; u <= half_k; ++u)
m_10 += u * src[center_off + u];

// Go line by line in the circular patch
for (v = 1; v <= half_k; ++v) {
// Proceed over the two lines
v_sum = 0;
d = u_max[v];
for (u = -d; u <= d; ++u) {
val_plus = src[center_off + u + v * step];
val_minus = src[center_off + u - v * step];
v_sum += (val_plus - val_minus);
m_10 += u * (val_plus + val_minus);
}
m_01 += v * v_sum;
}

return std::atan2(m_01, m_10);
}
};

} // namespace detectors

} // namespace jsfeat

#endif
2 changes: 2 additions & 0 deletions src/jsfeat.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#include <feature_detection/detectors.h>
#include <imgproc/imgproc.h>
#include <keypoint_t/keypoint_t.h>
#include <keypoints/keypoints.h>
#include <keypoints_filter/keypoints_filter.h>
#include <matrix_smart/matrix_smart.h>
#include <matrix_t/matrix_t.h>
#include <orb/orb.h>
Expand Down
6 changes: 6 additions & 0 deletions src/keypoints/keypoints.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,12 @@ class KeyPoints {
this->size = kp.size;
this->kpoints = kp.kpoints;
}

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I added tha allocate function because if you initialize a KeyPoints with the default constructor it will not init the kpoints.

auto allocate() {
KeyPoint_t kpt(0, 0, 0, 0, -1);
kpoints.assign(this->size, kpt);
}

auto get_size() const {return size; };

auto set_size(int size) { this->size = size; };
Expand Down
Loading