diff --git a/.jekyll-cache/Jekyll/Cache/Jekyll--Cache/b7/9606fb3afea5bd1609ed40b622142f1c98125abcfe89a76a661b0e8e343910 b/.jekyll-cache/Jekyll/Cache/Jekyll--Cache/b7/9606fb3afea5bd1609ed40b622142f1c98125abcfe89a76a661b0e8e343910 new file mode 100644 index 0000000000..662c7368dc --- /dev/null +++ b/.jekyll-cache/Jekyll/Cache/Jekyll--Cache/b7/9606fb3afea5bd1609ed40b622142f1c98125abcfe89a76a661b0e8e343910 @@ -0,0 +1 @@ +I"{"source"=>"D:/001.Warkshop/005.blog/ahmohamed1.github.io", "destination"=>"D:/001.Warkshop/005.blog/ahmohamed1.github.io/_site", "collections_dir"=>"", "cache_dir"=>".jekyll-cache", "plugins_dir"=>"_plugins", "layouts_dir"=>"_layouts", "data_dir"=>"_data", "includes_dir"=>"_includes", "collections"=>{"posts"=>{"output"=>true, "permalink"=>"/:categories/:year/:month/:day/:title:output_ext"}}, "safe"=>false, "include"=>[".htaccess"], "exclude"=>[".sass-cache", ".jekyll-cache", "gemfiles", "Gemfile", "Gemfile.lock", "node_modules", "vendor/bundle/", "vendor/cache/", "vendor/gems/", "vendor/ruby/"], "keep_files"=>[".git", ".svn"], "encoding"=>"utf-8", "markdown_ext"=>"markdown,mkdown,mkdn,mkd,md", "strict_front_matter"=>false, "show_drafts"=>true, "limit_posts"=>0, "future"=>false, "unpublished"=>false, "whitelist"=>[], "plugins"=>["jekyll-feed", "jekyll-paginate", "jekyll-sitemap"], "markdown"=>"kramdown", "highlighter"=>"rouge", "lsi"=>false, "excerpt_separator"=>"\n\n", "incremental"=>false, "detach"=>false, "port"=>"4000", "host"=>"127.0.0.1", "baseurl"=>"", "show_dir_listing"=>false, "permalink"=>"date", "paginate_path"=>"/posts/page:num/", "timezone"=>nil, "quiet"=>false, "verbose"=>false, "defaults"=>[], "liquid"=>{"error_mode"=>"warn", "strict_filters"=>false, "strict_variables"=>false}, "kramdown"=>{"auto_ids"=>true, "toc_levels"=>[1, 2, 3, 4, 5, 6], "entity_output"=>"as_char", "smart_quotes"=>"lsquo,rsquo,ldquo,rdquo", "input"=>"GFM", "hard_wrap"=>false, "guess_lang"=>true, "footnote_nr"=>1, "show_warnings"=>false}, "title"=>"Ahmohamed", "email"=>"abdll1@hotmail.com", "description"=>"In this blog I add my projects", "author"=>"A.Mohamed", "url"=>"http://localhost:4000", "twitter_username"=>nil, "github_username"=>nil, "facebook_username"=>nil, "instagram_username"=>nil, "linkedin_username"=>nil, "google_analytics"=>"UA-XXXXXXXXX-X", "paginate"=>5, "livereload_port"=>35729, "serving"=>true, "watch"=>true}:ET \ No newline at end of file diff --git a/Gemfile b/Gemfile index 8cb9fd69d2..574190aab2 100644 --- a/Gemfile +++ b/Gemfile @@ -13,3 +13,7 @@ gem "tzinfo-data", platforms: [:mingw, :mswin, :x64_mingw, :jruby] # Performance-booster for watching directories on Windows gem "wdm", "~> 0.1.0" if Gem.win_platform? + +gem "webrick", "~> 1.7" + +gem "jekyll-mathjax-csp" diff --git a/_config.yml b/_config.yml index fb6e996934..eb0a5beff1 100644 --- a/_config.yml +++ b/_config.yml @@ -1,14 +1,14 @@ -title: Clean Blog -email: your-email@example.com -description: A Blog Theme by Start Bootstrap -author: Start Bootstrap -baseurl: "/startbootstrap-clean-blog-jekyll" -url: "https://startbootstrap.github.io" +title: Ahmohamed +email: abdll1@hotmail.com +description: In this blog I add my projects +author: A.Mohamed +baseurl: "" +url: "https://ahmohamed1.github.io" # Social Profiles -twitter_username: SBootstrap -github_username: StartBootstrap -facebook_username: StartBootstrap +twitter_username: +github_username: +facebook_username: instagram_username: linkedin_username: @@ -23,3 +23,4 @@ plugins: - jekyll-feed - jekyll-paginate - jekyll-sitemap ## Uncomment this line to silently generate a sitemaps.org compliant sitemap for your Jekyll site + diff --git a/_drafts/Kalman-filter.html b/_drafts/Kalman-filter.html new file mode 100644 index 0000000000..23968f8399 --- /dev/null +++ b/_drafts/Kalman-filter.html @@ -0,0 +1,128 @@ +--- +layout: post +title: "Kalman Filter" +subtitle: "implementation of Kalman filter on differential drive model in c++" +date: 2022-02-05 +background: '/img/posts/kalman-filter/2.PNG' +--- + +

+ I wrote a controller for a differential drives mobile robot with Arduino in the previous post. + The controller takes two inputs \(v\) and \(w\), and generate three output which is the state of the robot \(x, y,\theta\). + The output of the odometer was not accurate due to different factors which the most important one is the wheel slipping, + encoder resolutions and the measurement accuracy. + In this post, I am going to write an extended Kalman filter to correct the odometer using other information such as LADAR reading, + IMU or radar. +

+ +

+ Kalman filter is a well-known set of mathematical equations for discrete values. + The advantage of the Kalman filter is that use only the previous state to predict the current state. + To summarise, the Kalman filter is a set of mathematical equations used to estimate the future state + based on an educated guess, where the Kalman filter uses in any environment with uncertain information. +

+ +

+ To estimate the current state \(x_t\) using Kalman filter there are three input required which are controller \(u_t\), correction \(z_t\) and prevoiuse state \(x_{t-1}\). +

+ + +

+Starting with the model equations as shown below, which is an improved version of the old model. + The new model has two states which (1) \(D_l \neq D_r\) and the equation below shows the output +

+ + +$$ \begin{pmatrix}x^{‘} \\\ y^{‘} \\\ \theta^{‘} \end{pmatrix} = \begin{pmatrix}x \\\ y \\\ \theta \end{pmatrix} ++ \begin{pmatrix} (R + \frac w 2)(sin(\theta+\alpha) – sin(\theta) \\\ (R + \frac w 2)(-cos(\theta+\alpha) + cos(\theta) \\\ \alpha \end{pmatrix} \tag{1} \label{1} +$$ + +

+ and (2) where \(D_l = D_r\) and the model become as below which is matching what we descripe in the first post. +

+ +$$ \begin{pmatrix}x^{‘} \\\ y^{‘} \\\ \theta^{‘} \end{pmatrix} = \begin{pmatrix}x \\\ y \\\ \theta \end{pmatrix} ++ \begin{pmatrix} D_r sin(\theta) \\\ D_l cos(\theta) \\\ 0 \end{pmatrix} \tag{2} +$$ + +

+ Note that the steps below is applied for both cases when \(D_l \neq D_r \) and \(D_l = D_r \). +

+ +

+Where in \eqref{1} \(\alpha = \frac{D_r – D_l} w\) and \(R = \frac {D_l} {\alpha} \). \(D_l\) +and \(D_r \) are the distance the robot + moved for left and right wheel respectively. By close analysis of the above + equations we know that there are two group + \(x,u\) of variable which belong to state and controller. + And these variables are belong to \(g = (x, u)\) function. + Where \(x = (x,y,\theta)\) and \(u=(D_l , D_r)\). +

+$$ \begin{pmatrix}x^ {‘} \\\ y^{‘} \\\ \theta^{‘} \end{pmatrix} = g(x,y,\theta, D_l , D_r) \tag{3} $$ + +

+ The function below is used to compute the g matrix where the input is the state, and the output is G matrix 3x3. +

+ +{% highlight ruby %} +Computer G +{% endhighlight %} + + +

+ The blow is the linear equation of Kalman filters where \(A_t, B_t\) are the factors of prediction and correction, respectively. + +

+$$ x_t = A_t x_{t-1} + B_t u_t$$ +$$ \overline \mu_t = A_t \mu_{t-1} + B_t u_t$$ +$$ \sum = A_t \sum_{t-1} {A_t}^T + R_t$$ + +

+Because our problem is nonlinear due to the present of the rotation matrix we will take the Jacobian matrix of g which is the derivative of \(\frac {\delta g} {\delta state}\) where the state is \([x,y,\theta]^T\). Therefore \(G : \frac {\delta g_1} {\delta x} , \frac {\delta g_1} {\delta y}, \frac {\delta g_1} {\delta \theta} \) +

+ +$$ x_t = g(x_{t-1}, u_t) $$ +$$ \overline \mu_t = g(mu_{t-1}, u_t) $$ +$$ +G : \begin{pmatrix}\frac {\delta g_1} {\delta x} && \frac {\delta g_1} {\delta y}&& \frac {\delta g_1} {\delta \theta} \\\ + \frac {\delta g_2} {\delta x} && \frac {\delta g_2} {\delta y}&& \frac {\delta g_2} {\delta \theta} \\\ + \frac {\delta g_3} {\delta x}&& \frac {\delta g_3} {\delta y}&& \frac {\delta g_3} {\delta \theta} \end{pmatrix} +$$ + +

+ After taking the derivative of the state, we end up with the \(G\) matrix, which show below +

+ +$$ G : \begin{pmatrix} 1 && 0&& (R + \frac w 2)(-cos(\theta+\alpha) - cos(\theta) \\\ +0&& 1 && (R + \frac w 2)(sin (\theta+\alpha) - sin(\theta)\\\ 0 && 0 && 1\end{pmatrix} +$$ +

+The below code is the coding of the G where the function take the state as input and return a matrix \((3x3)\). +

+

+Next we need to compute controller where \(R_t = V_t \sum_{control} {V_t}^T\) where + \(\sum_{control} = \begin{pmatrix} {\sigma_{D_l}}^2 && 0 \\\ 0 && {\sigma_{D_r}}^2 \end{pmatrix} \) + is the covariance matrix (2x2), and \(V_t = \frac {\delta g} {\delta{control}} \) which is (3x2) matrix. + Therefore \(V_t\) is shown below after taking the derivative of g in relate to the control value. Note that \(\alpha = \frac{D_r – D_l} w\) + and \(R = \frac {D_l} {\alpha} \) during taking the derivative. +

+$$ V_t : \begin{pmatrix} \frac{wr} {D_r – D_l}^2 (sin(theta^{‘}) - sin(theta)) - \frac {D_r + D_l}{2(D_r-D_l)}cos(theta^{‘}) && + \frac{-wr} {D_r – D_l}^2 (sin(theta^{‘}) - sin(theta)) + \frac {D_r + D_l}{2(D_r-D_l)}cos(theta^{‘}) \\\ + \frac{wr} {D_r – D_l}^2 (cos(theta^{‘}) + cos(theta)) - \frac {D_r + D_l}{2(D_r-D_l)}sin(theta^{‘}) && + \frac{-wr} {D_r – D_l}^2 (cos(theta^{‘}) + cos(theta)) + \frac {D_r + D_l}{2(D_r-D_l)}sin(theta^{‘}) \\\ + -\frac 1 w && \frac 1 w \end{pmatrix} +$$ +

+To sum up the prediction step we need to computer both covariance matrix value in state estimation and control correction \(\sum_{state} \) and \( \sum_{control}\). +

+$$ {\mu_{D_l}}^2 = (\alpha_1 D_l)^2 + (\alpha_2 (D_L – D_R))^2$$ +$$ {\mu_{D_r}}^2 = (\alpha_1 D_r)^2 + (\alpha_2 (D_L – D_R))^2$$ + +

+ The code below shows the implementation of both functons, + where +

+ +{% highlight ruby %} +predition code +{% endhighlight %} diff --git a/_drafts/data-fusion.html b/_drafts/data-fusion.html new file mode 100644 index 0000000000..5bddb733ad --- /dev/null +++ b/_drafts/data-fusion.html @@ -0,0 +1,288 @@ +--- +layout: post +title: "Data Fusion" +subtitle: "What is the data fusion and how does it work" +date: 2022-02-10 +background: '/img/posts/kalman-filter/1.PNG' +--- + +

+ Data fusion is process used to combine mulitple sensor reading to get more accurate + reading. For example, we have a robot move forward and backword. The robot use ladir + to measure the obestical and estimate the distance. All sensor they have a noise in their reading + and accuracy ( foe example, the ladir the robot use has a std of 2 cm therefore, if the robot read the distance + 20cm we can say the robot position fall between 18cm and 22cm). The noise in the sensor can be experse as gussain noise. + Going back to our robot, if the ladir show the wall is 25cm away from the robot with std 5m as shown in the figure below the green curve. +

+ + + +

+ We have other reading came from the odometry using encoder and it shows the robot position is at 7m with std 7m. + If we keep reading both sensor independently, the error in ladiar will stay same while the odometery will increase to the point + that cannot be use as reliable. Therfore we use data fusion to combine both reading to get a more accurate reading value. The + figure below shows the output of combining both reading. As can be seen that blue curve shows the position at 6m with a std 3m + which is much better than each sensor works independently. +

+ + + + +

+ The process occurs at the top consisting of two steps which are (1) prediction step and (2) update step. + In the first step, the dynamic of the robot updates the current state \(x_t\) where the uncertainty grows while + the second step updates the current state \(x_t\) and reduces the uncertainty in the system. +

+

+ The fusion process can be done by Bayes' Theorem using different probability distributions or we can use the Kalman filter. +

+

+ The Kalman filter considers a robust mathematical model used in data fusion where it simplifies the problem. + The Kalman filter solve linear systems \( \hat x = f(x | z_1, .... z_n )\), where \(\hat x\) is the estimated state, \(x\) + is the uncertain state and \(z_1 , z_n \) is the noisy measured state. So the Kalman filter minimise the state using + the noizy measurment to get as close as possible to the accurate state \(\hat x = min J\). Therefore, the problem convert + to minimizing the error between the noizy state and the estimated state \(\overline x = x - \hat x \) , + where \(\overline x \) is the state estimation error. We have the state estimation Covariance \(P = [(x- \hat x)(x- \hat x)^T]\) + and the cost function Jacobian \( J= [(x- \hat x)^T(x- \hat x)] = Tr(P) \). + Therefore, we can minimise the cost function \(J\). +

+ +

+ There are differently typed Kalman filters (KF) due to the computation efficiency. + However, due to the Kalman filter process, only linear equations and Extended Kalman filter + (EKF) was designed to work on a non-linear model by approximating the model to linearize the + relationship and use a linear Covariance. + Therefore, EKF works with models that can linearize. + Since some models cannot approximate a linear model, an Unscented Kalman Filter (UKF) + developed to work with non-linear models to calculate the prediction and updated equations. + The UKF linearize the uncertainty Covariance instead of linearizing the models' dynamics. +

+ +

An implementation of KF in arduino

+ +

+ Now I am going to implement KF in Arduino to provide me with a more accurate data reading of + IMU sensor (MUP-6050). We will correct one roll angle of the sensor using Kalman gain. + Kalman gain uses to update the measure state in order to correct the new estimate state. +

+$$ + K = \frac {P*H} {H*P*H+R} +$$ +

+ where \(P\) is error covariance, \(H\) measure map scalar, \(Q\) initial estimation covariance, and + \(R\) is noise coverance. The Kalman gain should be between 0 and 1, which refer to the stabiltity of the system, + 0 is stable estimate and 1 is unstable estimate. +

+

+ The class below is the Kalman filter algorithms for a singel input state. + The class define multiple virables which define in the function to compute the kalman gain. + The update function is used to compute the updated kalman gain and the corrected state. +

+ + +{% highlight ruby %} +class Kalamn_filter +{ + public: + const float R = 20; // noise coverance + const float H = 1.00; // measure map scalar + float Q = 2; // initial estimation covariance + float P = 0; //error covariance + float x_hat = 0; // updated state + float K = 0; //kalman gain; + float Update(int x) + { + + K = P*H/(H*P*H+R); // update kalman gain + x_hat = x_hat + K*(x-H*x_hat); + + // update error covariance + P= (1-K*H)*P+Q; + + return x_hat; + } +}; +}; +{% endhighlight %} + +

+ The blow code is the setup and main function loop where we define the Serial and we initialize the sensor + and the kalman filter class. In the main loop we get the data from the snesor and pass it to the update function + to calculate the estimated state. +

+ +{% highlight ruby %} +Kalamn_filter kalman_filter(); +void setup() { + // put your setup code here, to run once: + Serial.begin(9600); + + Serial.println("Initialize MPU6050"); + + while(!mpu.begin(MPU6050_SCALE_2000DPS, MPU6050_RANGE_2G)) + { + Serial.println("Could not find a valid MPU6050 sensor, check wiring!"); + delay(500); + } + + // Initilize Kalman filter + } + + void loop() { + // put your main code here, to run repeatedly: + // Read normalized values + Vector normAccel = mpu.readNormalizeAccel(); + + // Calculate Roll + int roll = (atan2(normAccel.YAxis, normAccel.ZAxis)*180.0)/M_PI; + + float roll_correct = kalman_filter(roll); + // Output + Serial.print(" Roll = "); + Serial.print(roll); + Serial.print(" Roll_correct = "); + Serial.print(roll_correct); + + Serial.println(); + + delay(10); + } +{% endhighlight %} + +

+ The figure below shows the raw data (green) and the output of the Kalman filter (gray). + As can be seen that the estimated roll has a smooth curve and less noize comparing to the measured roll angle. +

+ + + +

+ The complete code is below +

+ + + +{% highlight ruby %} +#include + #include + + MPU6050 mpu; + + class Kalamn_filter + { + public: + const float R = 20; // noise coverance + const float H = 1.00; // measure map scalar + float Q = 2; // initial estimation covariance + float P = 0; //error covariance + float x_hat = 0; // updated state + float K = 0; //kalman gain; + float Update(int x) + { + + K = P*H/(H*P*H+R); // update kalman gain + x_hat = x_hat + K*(x-H*x_hat); + + // update error covariance + P= (1-K*H)*P+Q; + + return x_hat; + } + }; + + // Initilize Kalman filter + Kalamn_filter kalman_filter; + + void setup() { + // put your setup code here, to run once: + Serial.begin(9600); + + Serial.println("Initialize MPU6050"); + + while(!mpu.begin(MPU6050_SCALE_2000DPS, MPU6050_RANGE_2G)) + { + Serial.println("Could not find a valid MPU6050 sensor, check wiring!"); + delay(500); + } + + } + + void loop() { + // put your main code here, to run repeatedly: + // Read normalized values + Vector normAccel = mpu.readNormalizeAccel(); + + // Calculate Roll + int roll = (atan2(normAccel.YAxis, normAccel.ZAxis)*180.0)/M_PI; + + float roll_correct = kalman_filter.Update(roll); + // Output + Serial.print(" Roll = "); + Serial.print(roll); + Serial.print(" Roll_correct = "); + Serial.print(roll_correct); + Serial.println(); + + delay(10); + } +{% endhighlight %} +

+ + +

+ +

Example 2 of KF with Arduino

+ +

+ In this example I will use MPU6050 sensor that provide an acceleration data and gyroscop data. Using the acceleration data to compute + the state \(x\) and the gyroscop data to correct the state. +

+ + $$ + \theta = \theta_0 + \dot \theta t + \frac 1 2 \ddot \theta t^2 + $$ +

+ So the state will be \(\hat x = [\theta { , } \dot \theta]^T \). where decribe the angle and the anguar velocity. + These describe our best estimation (\(\mu)\). Now the covariance will be + \( P_k = \begin{pmatrix} \Sigma_{\theta \theta} && \Sigma_{\theta \dot \theta} \\\ \Sigma_{\dot \theta \theta} && \Sigma_{\dot \theta \dot \theta}\end{pmatrix} \). + And our A matrix is shown below. +

+ +$$ + A_k = \begin{pmatrix} 1 && \Delta T \\\ 0 && 1 \end{pmatrix} +$$ +

+ Therfore, the result of multiplication of \(x\) and \(A\) is a matrix (2x1) and shows below. +

+$$ + A_k x = \begin{pmatrix} \theta + \Delta T \dot \theta \\\ \dot \theta \end{pmatrix} +$$ +

+ now on the control pat which is the angular acceleration where \(u=[\ddot \theta]\), and + \(B = \begin{pmatrix} \frac 1 2 \Delta t^2 \\\ \Delta t \end{pmatrix} \) and + the result of the multiplication between \(u\) and \(B\) is +

+ +$$ + Bx = \begin{pmatrix} \ddot \theta \frac 1 2 \Delta t^2 \\\ \ddot \theta \Delta t \end{pmatrix} +$$ + +

+ Now we are going to update the state station using actual measurments \(Y_k = CX_k + z_k\), where + \(C=\begin{pmatrix} 1 && 0 \\\ 0 && 1 \end{pmatrix}\) and \(C=\begin{pmatrix} \theta \\\ \dot \theta \end{pmatrix}\) +

+ +

+ The below code is implemented in arduino using MPU6050 sensors and it is based on the work of + Lauszus . + Where I modified to be used in class form. +

+ +

+ The full code shows below +

+ + +

+ The figure blow shows the output of the code as can be seen the estimation row angle is getting much better and smoother. +

\ No newline at end of file diff --git a/_includes/scripts.html b/_includes/scripts.html index 2af510e4cd..d67e90671c 100644 --- a/_includes/scripts.html +++ b/_includes/scripts.html @@ -82,3 +82,4 @@ }); {% endif %} + diff --git a/_layouts/default.html b/_layouts/default.html index 6fc9a457ee..e2ad1c227a 100644 --- a/_layouts/default.html +++ b/_layouts/default.html @@ -18,4 +18,9 @@ + + + + + \ No newline at end of file diff --git a/_layouts/post.html b/_layouts/post.html index d4bf8fa7d8..8dd1d82034 100644 --- a/_layouts/post.html +++ b/_layouts/post.html @@ -52,3 +52,6 @@

{{ page.subtitle }}

+ + \ No newline at end of file diff --git a/_posts/2020-01-26-dinosaurs.html b/_posts/2020-01-26-dinosaurs.html deleted file mode 100644 index fc3c9f0b6a..0000000000 --- a/_posts/2020-01-26-dinosaurs.html +++ /dev/null @@ -1,40 +0,0 @@ ---- -layout: post -title: "Dinosaurs are extinct today" -subtitle: "because they lacked opposable thumbs and the brainpower to build a space program." -date: 2020-01-26 23:45:13 -0400 -background: '/img/posts/01.jpg' ---- - -

Never in all their history have men been able truly to conceive of the world as one: a single sphere, a globe, having the qualities of a globe, a round earth in which all the directions eventually meet, in which there is no center because every point, or none, is center — an equal earth which all men occupy as equals. The airman's earth, if free men make it, will be truly round: a globe in practice, not in theory.

- -

Science cuts two ways, of course; its products can be used for both good and evil. But there's no turning back from science. The early warnings about technological dangers also come from science.

- -

What was most significant about the lunar voyage was not that man set foot on the Moon but that they set eye on the earth.

- -

A Chinese tale tells of some men sent to harm a young girl who, upon seeing her beauty, become her protectors rather than her violators. That's how I felt seeing the Earth for the first time. I could not help but love and cherish her.

- -

For those who have seen the Earth from space, and for the hundreds and perhaps thousands more who will, the experience most certainly changes your perspective. The things that we share in our world are far more valuable than those which divide us.

- -

The Final Frontier

- -

There can be no thought of finishing for ‘aiming for the stars.’ Both figuratively and literally, it is a task to occupy the generations. And no matter how much progress one makes, there is always the thrill of just beginning.

- -

There can be no thought of finishing for ‘aiming for the stars.’ Both figuratively and literally, it is a task to occupy the generations. And no matter how much progress one makes, there is always the thrill of just beginning.

- -
The dreams of yesterday are the hopes of today and the reality of tomorrow. Science has not yet mastered prophecy. We predict too much for the next year and yet far too little for the next ten.
- -

Spaceflights cannot be stopped. This is not the work of any one man or even a group of men. It is a historical process which mankind is carrying out in accordance with the natural laws of human development.

- -

Reaching for the Stars

- -

As we got further and further away, it [the Earth] diminished in size. Finally it shrank to the size of a marble, the most beautiful you can imagine. That beautiful, warm, living object looked so fragile, so delicate, that if you touched it with a finger it would crumble and fall apart. Seeing this has to change a man.

- -Demo Image -To go places and do things that have never been done before – that’s what living is all about. - -

Space, the final frontier. These are the voyages of the Starship Enterprise. Its five-year mission: to explore strange new worlds, to seek out new life and new civilizations, to boldly go where no man has gone before.

- -

As I stand out here in the wonders of the unknown at Hadley, I sort of realize there’s a fundamental truth to our nature, Man must explore, and this is exploration at its greatest.

- -

Placeholder text by Space Ipsum. Photographs by Unsplash.

diff --git a/_posts/2020-01-27-dreams.html b/_posts/2020-01-27-dreams.html deleted file mode 100644 index f687623ea2..0000000000 --- a/_posts/2020-01-27-dreams.html +++ /dev/null @@ -1,39 +0,0 @@ ---- -layout: post -title: "The dreams of yesterday are the hopes of today and the reality of tomorrow." -date: 2020-01-27 23:45:13 -0400 -background: '/img/posts/02.jpg' ---- - -

Never in all their history have men been able truly to conceive of the world as one: a single sphere, a globe, having the qualities of a globe, a round earth in which all the directions eventually meet, in which there is no center because every point, or none, is center — an equal earth which all men occupy as equals. The airman's earth, if free men make it, will be truly round: a globe in practice, not in theory.

- -

Science cuts two ways, of course; its products can be used for both good and evil. But there's no turning back from science. The early warnings about technological dangers also come from science.

- -

What was most significant about the lunar voyage was not that man set foot on the Moon but that they set eye on the earth.

- -

A Chinese tale tells of some men sent to harm a young girl who, upon seeing her beauty, become her protectors rather than her violators. That's how I felt seeing the Earth for the first time. I could not help but love and cherish her.

- -

For those who have seen the Earth from space, and for the hundreds and perhaps thousands more who will, the experience most certainly changes your perspective. The things that we share in our world are far more valuable than those which divide us.

- -

The Final Frontier

- -

There can be no thought of finishing for ‘aiming for the stars.’ Both figuratively and literally, it is a task to occupy the generations. And no matter how much progress one makes, there is always the thrill of just beginning.

- -

There can be no thought of finishing for ‘aiming for the stars.’ Both figuratively and literally, it is a task to occupy the generations. And no matter how much progress one makes, there is always the thrill of just beginning.

- -
The dreams of yesterday are the hopes of today and the reality of tomorrow. Science has not yet mastered prophecy. We predict too much for the next year and yet far too little for the next ten.
- -

Spaceflights cannot be stopped. This is not the work of any one man or even a group of men. It is a historical process which mankind is carrying out in accordance with the natural laws of human development.

- -

Reaching for the Stars

- -

As we got further and further away, it [the Earth] diminished in size. Finally it shrank to the size of a marble, the most beautiful you can imagine. That beautiful, warm, living object looked so fragile, so delicate, that if you touched it with a finger it would crumble and fall apart. Seeing this has to change a man.

- -Demo Image -To go places and do things that have never been done before – that’s what living is all about. - -

Space, the final frontier. These are the voyages of the Starship Enterprise. Its five-year mission: to explore strange new worlds, to seek out new life and new civilizations, to boldly go where no man has gone before.

- -

As I stand out here in the wonders of the unknown at Hadley, I sort of realize there’s a fundamental truth to our nature, Man must explore, and this is exploration at its greatest.

- -

Placeholder text by Space Ipsum. Photographs by Unsplash.

diff --git a/_posts/2020-01-28-exploration.html b/_posts/2020-01-28-exploration.html deleted file mode 100644 index ec94119725..0000000000 --- a/_posts/2020-01-28-exploration.html +++ /dev/null @@ -1,40 +0,0 @@ ---- -layout: post -title: "Failure is not an option" -subtitle: "Many say exploration is part of our destiny, but it’s actually our duty to future generations." -date: 2020-01-28 23:45:13 -0400 -background: '/img/posts/03.jpg' ---- - -

Never in all their history have men been able truly to conceive of the world as one: a single sphere, a globe, having the qualities of a globe, a round earth in which all the directions eventually meet, in which there is no center because every point, or none, is center — an equal earth which all men occupy as equals. The airman's earth, if free men make it, will be truly round: a globe in practice, not in theory.

- -

Science cuts two ways, of course; its products can be used for both good and evil. But there's no turning back from science. The early warnings about technological dangers also come from science.

- -

What was most significant about the lunar voyage was not that man set foot on the Moon but that they set eye on the earth.

- -

A Chinese tale tells of some men sent to harm a young girl who, upon seeing her beauty, become her protectors rather than her violators. That's how I felt seeing the Earth for the first time. I could not help but love and cherish her.

- -

For those who have seen the Earth from space, and for the hundreds and perhaps thousands more who will, the experience most certainly changes your perspective. The things that we share in our world are far more valuable than those which divide us.

- -

The Final Frontier

- -

There can be no thought of finishing for ‘aiming for the stars.’ Both figuratively and literally, it is a task to occupy the generations. And no matter how much progress one makes, there is always the thrill of just beginning.

- -

There can be no thought of finishing for ‘aiming for the stars.’ Both figuratively and literally, it is a task to occupy the generations. And no matter how much progress one makes, there is always the thrill of just beginning.

- -
The dreams of yesterday are the hopes of today and the reality of tomorrow. Science has not yet mastered prophecy. We predict too much for the next year and yet far too little for the next ten.
- -

Spaceflights cannot be stopped. This is not the work of any one man or even a group of men. It is a historical process which mankind is carrying out in accordance with the natural laws of human development.

- -

Reaching for the Stars

- -

As we got further and further away, it [the Earth] diminished in size. Finally it shrank to the size of a marble, the most beautiful you can imagine. That beautiful, warm, living object looked so fragile, so delicate, that if you touched it with a finger it would crumble and fall apart. Seeing this has to change a man.

- -Demo Image -To go places and do things that have never been done before – that’s what living is all about. - -

Space, the final frontier. These are the voyages of the Starship Enterprise. Its five-year mission: to explore strange new worlds, to seek out new life and new civilizations, to boldly go where no man has gone before.

- -

As I stand out here in the wonders of the unknown at Hadley, I sort of realize there’s a fundamental truth to our nature, Man must explore, and this is exploration at its greatest.

- -

Placeholder text by Space Ipsum. Photographs by Unsplash.

diff --git a/_posts/2020-01-29-prophecy.html b/_posts/2020-01-29-prophecy.html deleted file mode 100644 index 65704ea439..0000000000 --- a/_posts/2020-01-29-prophecy.html +++ /dev/null @@ -1,40 +0,0 @@ ---- -layout: post -title: "Science has not yet mastered prophecy" -subtitle: "We predict too much for the next year and yet far too little for the next ten." -date: 2020-01-29 23:45:13 -0400 -background: '/img/posts/04.jpg' ---- - -

Never in all their history have men been able truly to conceive of the world as one: a single sphere, a globe, having the qualities of a globe, a round earth in which all the directions eventually meet, in which there is no center because every point, or none, is center — an equal earth which all men occupy as equals. The airman's earth, if free men make it, will be truly round: a globe in practice, not in theory.

- -

Science cuts two ways, of course; its products can be used for both good and evil. But there's no turning back from science. The early warnings about technological dangers also come from science.

- -

What was most significant about the lunar voyage was not that man set foot on the Moon but that they set eye on the earth.

- -

A Chinese tale tells of some men sent to harm a young girl who, upon seeing her beauty, become her protectors rather than her violators. That's how I felt seeing the Earth for the first time. I could not help but love and cherish her.

- -

For those who have seen the Earth from space, and for the hundreds and perhaps thousands more who will, the experience most certainly changes your perspective. The things that we share in our world are far more valuable than those which divide us.

- -

The Final Frontier

- -

There can be no thought of finishing for ‘aiming for the stars.’ Both figuratively and literally, it is a task to occupy the generations. And no matter how much progress one makes, there is always the thrill of just beginning.

- -

There can be no thought of finishing for ‘aiming for the stars.’ Both figuratively and literally, it is a task to occupy the generations. And no matter how much progress one makes, there is always the thrill of just beginning.

- -
The dreams of yesterday are the hopes of today and the reality of tomorrow. Science has not yet mastered prophecy. We predict too much for the next year and yet far too little for the next ten.
- -

Spaceflights cannot be stopped. This is not the work of any one man or even a group of men. It is a historical process which mankind is carrying out in accordance with the natural laws of human development.

- -

Reaching for the Stars

- -

As we got further and further away, it [the Earth] diminished in size. Finally it shrank to the size of a marble, the most beautiful you can imagine. That beautiful, warm, living object looked so fragile, so delicate, that if you touched it with a finger it would crumble and fall apart. Seeing this has to change a man.

- -Demo Image -To go places and do things that have never been done before – that’s what living is all about. - -

Space, the final frontier. These are the voyages of the Starship Enterprise. Its five-year mission: to explore strange new worlds, to seek out new life and new civilizations, to boldly go where no man has gone before.

- -

As I stand out here in the wonders of the unknown at Hadley, I sort of realize there’s a fundamental truth to our nature, Man must explore, and this is exploration at its greatest.

- -

Placeholder text by Space Ipsum. Photographs by Unsplash.

diff --git a/_posts/2020-01-30-heartbeats.html b/_posts/2020-01-30-heartbeats.html deleted file mode 100644 index a444e6cd18..0000000000 --- a/_posts/2020-01-30-heartbeats.html +++ /dev/null @@ -1,39 +0,0 @@ ---- -layout: post -title: "I believe every human has a finite number of heartbeats. I don't intend to waste any of mine." -date: 2020-01-30 23:45:13 -0400 -background: '/img/posts/05.jpg' ---- - -

Never in all their history have men been able truly to conceive of the world as one: a single sphere, a globe, having the qualities of a globe, a round earth in which all the directions eventually meet, in which there is no center because every point, or none, is center — an equal earth which all men occupy as equals. The airman's earth, if free men make it, will be truly round: a globe in practice, not in theory.

- -

Science cuts two ways, of course; its products can be used for both good and evil. But there's no turning back from science. The early warnings about technological dangers also come from science.

- -

What was most significant about the lunar voyage was not that man set foot on the Moon but that they set eye on the earth.

- -

A Chinese tale tells of some men sent to harm a young girl who, upon seeing her beauty, become her protectors rather than her violators. That's how I felt seeing the Earth for the first time. I could not help but love and cherish her.

- -

For those who have seen the Earth from space, and for the hundreds and perhaps thousands more who will, the experience most certainly changes your perspective. The things that we share in our world are far more valuable than those which divide us.

- -

The Final Frontier

- -

There can be no thought of finishing for ‘aiming for the stars.’ Both figuratively and literally, it is a task to occupy the generations. And no matter how much progress one makes, there is always the thrill of just beginning.

- -

There can be no thought of finishing for ‘aiming for the stars.’ Both figuratively and literally, it is a task to occupy the generations. And no matter how much progress one makes, there is always the thrill of just beginning.

- -
The dreams of yesterday are the hopes of today and the reality of tomorrow. Science has not yet mastered prophecy. We predict too much for the next year and yet far too little for the next ten.
- -

Spaceflights cannot be stopped. This is not the work of any one man or even a group of men. It is a historical process which mankind is carrying out in accordance with the natural laws of human development.

- -

Reaching for the Stars

- -

As we got further and further away, it [the Earth] diminished in size. Finally it shrank to the size of a marble, the most beautiful you can imagine. That beautiful, warm, living object looked so fragile, so delicate, that if you touched it with a finger it would crumble and fall apart. Seeing this has to change a man.

- -Demo Image -To go places and do things that have never been done before – that’s what living is all about. - -

Space, the final frontier. These are the voyages of the Starship Enterprise. Its five-year mission: to explore strange new worlds, to seek out new life and new civilizations, to boldly go where no man has gone before.

- -

As I stand out here in the wonders of the unknown at Hadley, I sort of realize there’s a fundamental truth to our nature, Man must explore, and this is exploration at its greatest.

- -

Placeholder text by Space Ipsum. Photographs by Unsplash.

diff --git a/_posts/2020-01-31-man-must-explore.html b/_posts/2020-01-31-man-must-explore.html deleted file mode 100644 index 4dd948b4d7..0000000000 --- a/_posts/2020-01-31-man-must-explore.html +++ /dev/null @@ -1,40 +0,0 @@ ---- -layout: post -title: "Man must explore, and this is exploration at its greatest" -subtitle: "Problems look mighty small from 150 miles up" -date: 2020-01-31 10:45:13 -0400 -background: '/img/posts/06.jpg' ---- - -

Never in all their history have men been able truly to conceive of the world as one: a single sphere, a globe, having the qualities of a globe, a round earth in which all the directions eventually meet, in which there is no center because every point, or none, is center — an equal earth which all men occupy as equals. The airman's earth, if free men make it, will be truly round: a globe in practice, not in theory.

- -

Science cuts two ways, of course; its products can be used for both good and evil. But there's no turning back from science. The early warnings about technological dangers also come from science.

- -

What was most significant about the lunar voyage was not that man set foot on the Moon but that they set eye on the earth.

- -

A Chinese tale tells of some men sent to harm a young girl who, upon seeing her beauty, become her protectors rather than her violators. That's how I felt seeing the Earth for the first time. I could not help but love and cherish her.

- -

For those who have seen the Earth from space, and for the hundreds and perhaps thousands more who will, the experience most certainly changes your perspective. The things that we share in our world are far more valuable than those which divide us.

- -

The Final Frontier

- -

There can be no thought of finishing for ‘aiming for the stars.’ Both figuratively and literally, it is a task to occupy the generations. And no matter how much progress one makes, there is always the thrill of just beginning.

- -

There can be no thought of finishing for ‘aiming for the stars.’ Both figuratively and literally, it is a task to occupy the generations. And no matter how much progress one makes, there is always the thrill of just beginning.

- -
The dreams of yesterday are the hopes of today and the reality of tomorrow. Science has not yet mastered prophecy. We predict too much for the next year and yet far too little for the next ten.
- -

Spaceflights cannot be stopped. This is not the work of any one man or even a group of men. It is a historical process which mankind is carrying out in accordance with the natural laws of human development.

- -

Reaching for the Stars

- -

As we got further and further away, it [the Earth] diminished in size. Finally it shrank to the size of a marble, the most beautiful you can imagine. That beautiful, warm, living object looked so fragile, so delicate, that if you touched it with a finger it would crumble and fall apart. Seeing this has to change a man.

- -Demo Image -To go places and do things that have never been done before – that’s what living is all about. - -

Space, the final frontier. These are the voyages of the Starship Enterprise. Its five-year mission: to explore strange new worlds, to seek out new life and new civilizations, to boldly go where no man has gone before.

- -

As I stand out here in the wonders of the unknown at Hadley, I sort of realize there’s a fundamental truth to our nature, Man must explore, and this is exploration at its greatest.

- -

Placeholder text by Space Ipsum. Photographs by Unsplash.

diff --git a/_posts/2022-01-15-ICP.html b/_posts/2022-01-15-ICP.html new file mode 100644 index 0000000000..815b0d003c --- /dev/null +++ b/_posts/2022-01-15-ICP.html @@ -0,0 +1,522 @@ +--- +layout: post +title: "2D Iterative Closest Point (ICP)" +subtitle: "ICP using c++ and eigen library" +date: 2022-01-15 +background: '/img/posts/01.jpg' +--- + +

+This work is based on the work of Igor Bongoslavskyi . +

+ +

+ ICP is an algorithm used to compute the rotation and the transformation ( rotation \(R\) and translation \(t\) ) + between two sets of points, which target points \(P = \{p_i\}\) and references points \(Q = \{q_i\}\), + where it uses and minimizes the differences of the two group of points. For example, ICP use in the 3D reconstruction point cloud + or in 2D to match between scanning (for robot localizations) +

+ +

In this projects I wrote a ICP using C++ and Eigne template. I used cv-Plot to show a visualization presentation of the points. + CV-Plot is a library developed using purely opencv that can work in real time with easy integration.

+ +

ICP algorithms

+ +

+ In this project, I used different algorithms: (1) ICP based on SVD (2) ICP based on nonlinear least square. +

+ +

+ First, I will start with SVD_ICP algorithms, where there are five steps to do the ICP which are. +

+ + + +

+ I will show the code and explain it for each step. However, before we start, it is essential to define the data type used in programming. + So starting by defining the point, I use Eigen template Vector2d to store the value of the point \((x,y)\). + And I used std::vectore to store the list of the point. Finally, I used Eigen::MatrixXd to process the rotation matrix and translation. + Note: We need to keep the definition of variable constant using the Eigen library. + +

+ + +

Computer center data and transform them

+ +

+ In this step we compute the mean center of all points in targets \(\overline P=\frac{1}{n}\sum_{i=1}^n p_i\) and + references \(\overline Q=\frac{1}{n}\sum_{i=1}^n q_i\). + After computing center of each point we reduce thier coordinate + \(p^{'} = p_i - \overline P\) and \(q^{'} = q_i - \overline Q\). +

+ +

+ The data used to test this project is hand made data \(Q\), then a transformation is applied to create the targets set \(P\). +

+ + + + +

+ The function of computing the centre of the points is shown below. + The input is a vector containing the list of points in Eigen::Vector2d format, and the output is Eigen::Vector2d as well. + +

+{% highlight ruby %} +Eigen::Vector2d Center_data(vector P) +{ + Eigen::Vector2d temp; + temp << 0, 0; + for (int i = 0; i < P.size(); i++) + { + temp += P[i]; + } + temp = temp / P.size(); + return temp; +} +{% endhighlight %} + +

+ After computing the centre of data or the mean, we reduce the coordinate of the points using the following code. +

+ +{% highlight ruby %} +for (int i = 0; i < Q.size(); i++) + { + Q_centered.push_back(Q[i] - center_of_Q); + } +{% endhighlight %} + +

The output of transformation show below

+ + + +

We can see how the target points \(P\) move to the center of refrence points \(Q\).

+ +

Compute correspondences

+

+ We need to compute the correspondences between the target and reference points. At this point, there are many algorithms we can use to do the matching, such as search Tree and ANN. + The name also knows correspondences algorithms or Registration algorithms. + To measure the matched point, I will use linalg_norm to calculate the normal vector. + The function is shown below, where the input is Vector2d, and the output is the difference. +

+ +{% highlight ruby %} +double linalg_norm(Eigen::Vector2d p) { + return sqrt(p(0) * p(0) + p(1) * p(1)); +} +{% endhighlight %} + +

+ The correspondence algorithms consist of two loops where the first loop is to go through the point in reference point \(Q\), and the second loop goes through the targets points \(P\). Each point is measured using linalg_norm and compared with the minimum value. + Then the index of both the target points and the reference points is stored in a vector. The function is shown below, where two vectors are the input + and the output is a vector containing the matched indexes. +

+ +{% highlight ruby %} +vector Get_correspondence_indices(vector P, vector Q) +{ + vector correspondences; + for (int i = 0; i < P.size(); i++) + { + double min_distance = 99999999.9; + int match_index = -1; + for (int j = 0; j < Q.size(); j++) + { + double distance = linalg_norm(Q[j] - P[i]); + if (distance < min_distance) + { + match_index = j; + min_distance = distance; + } + } + if (match_index != -1) + { + Eigen::Vector2d temp; + temp << i, match_index; + correspondences.push_back(temp); + } + } + return correspondences; +} +{% endhighlight %} + +

+ The figure below shows the output of the correspondences process, where each point from the reference points \(Q\) is linked to the target points \(P\). + As can be seen, the matching process is not a great one, but it does the work for a 2D and minor points set. +

+ + + + +

Compute cross covariance matrix

+

+ Now we need to compute the cross covariance \(K\) between the correspondeces points. +

+ +$$K=E[(q_i - \overline Q)(p_i - \overline P)^T)] $$ +$$ =\frac{1}{N} \sum(q_i - \overline Q)(p_i - \overline P)^T) $$ +$$ \approx \sum(q_i - \overline Q)(p_i - \overline P)^T) $$ + +

+ The output of this equation \(K\) will be a mratix of \(2x2\), due to that \(p_i , q_j \in \Bbb R^2\). +

+ +$$K = \begin{pmatrix}cov(p_{xi},q_{xj}) & cov(p_{xi},q_{yj})\\\ cov(p_{yi},q_{xj}) & cov(p_{yi},q_{yj})\end{pmatrix}$$ + +

+ After computing the covariance, we will add \(R\) and \(t\) using Singular Value Decompositions SVD. + SVD matrix is a factorization of the matrix. Where the output of SVD is three matrixes which \(S\), \(U\), and \(T_V\). + Therefore, \(R=UV^T\) and \(t=\mu_Q - R_{\mu_P}\). Using the Eigen library, we will compute the SVD of cov using Eigen::JacobiSVD; + then, we compute the rotation and translation as shown in the below snap code. +

+ +{% highlight ruby %} +// Step 4: Find 𝑅 and 𝑡 from SVD decomposition +Eigen::JacobiSVD svd(cov, Eigen::ComputeFullU | Eigen::ComputeFullV); +auto R_found = svd.matrixU() * svd.matrixV().transpose(); +auto T_found = center_of_Q - R_found * center_of_P; +{% endhighlight %} + + +

Apply transformation and computer error

+ +

+ After getting the new \(R\) and \(t\), we need to apply the transformation to the target points \(P\). + Using the following function Apply_transform(). The function's output is a vector containing a list of + corrected points. After that the Error is computed using linalg_norm function \(E={\sqrt {P^{'} - Q} }\). + Finally, we repeat the same step for few times until the error reaches the threshold. The function below is the ICP_SVD used. The function takes four inputs: two for reference points and target points. The vector takes the error + at each iteration and the number of iterations. +

+ +{% highlight ruby %} +vector ICP_svd(vector P, + vector Q, + vector& error, + int iterations = 10) + { + // Step 1: computer center data and transform them + Eigen::Vector2d center_of_Q = Center_data(Q); + vector Q_centered; + for (int i = 0; i < Q.size(); i++) + { + Q_centered.push_back(Q[i] - center_of_Q); + } + vector P_copy = P; + // Start the loop + for (int iter = 0; iter < iterations; iter++) + { + Plot_data(P_copy, Q); + + vector P_centered; + Eigen::Vector2d center_of_P = Center_data(P_copy); + for (int i = 0; i < P.size(); i++) + { + P_centered.push_back(P_copy[i] - center_of_P); + } + + // Step 2: Compute correspondences + auto correspondences = Get_correspondence_indices(P_centered, Q_centered); + + // Step 3: compute_cross_covariance + auto cov = compute_cross_covariance(P_centered, Q_centered, correspondences); + + // Step 4: Find 𝑅 and 𝑡 from SVD decomposition + Eigen::JacobiSVD svd(cov, Eigen::ComputeFullU | Eigen::ComputeFullV); + auto R_found = svd.matrixU() * svd.matrixV().transpose(); + auto T_found = center_of_Q - R_found * center_of_P; + + // Step 5: Apply transformation and computer error + P_copy = Apply_transform(P_copy, R_found, T_found); + double _error = Compute_Square_difference(P_copy, Q); + error.push_back(_error); + cout << "Squared diff: (P_corrected - Q) = " << _error << endl; + } + Plot_data(P_copy, Q, 400); + return P_copy; + } +{% endhighlight %} + +

+ The figures below show the output of ICP_SVD algorithms and the error. + As can be seen, the algorithm took seven iterations to drop the error to zero and match the two points sets. +

+ + + + + +

ICP based on nonlinear least square

+ +

+ In this problem, we are working to reduce the error of the sum square distance between two points. +

+ +$$E = \sum[R*p_i + t - q_i]$$ + +

+ \(E\) is the value should be minimise by updating \(R, t\). + Keep note that \(P\) is the only one update and due to the present of \(R\) in + the equation this function is non linear. +

+ +

+ The minimizing problem starts with computing the corresponding point between both sets. + We assume the pose is descripe as follow \(\mathbf{x} = [x,y,\theta]^T\) where \(\theta\) + place in the the rotation matrix \(R = \begin{pmatrix} cos(\theta) & -sin(\theta)\\\ sin(\theta) & cos(\theta)\end{pmatrix}\). +

+ + + +

+ As shown above, we solve the problem of matching using SVD + But other methods are more efficient. Instead of solving the SVD, + we try to minimise the error between the corresponding points +

+ +$$ +E = \sum_{i}[Rp_i + t - q_i]^2 +$$ + +

+ For the minimizing we update \(R, t\) which will be represent in a vector \(x=[x,y,\theta]^T\), + where \(R=\begin{pmatrix}cos(\theta) & -sin(\theta))\\\ sin(\theta) & -cos(\theta))\end{pmatrix}\) + and \(t = \begin{pmatrix} x \\\ y\end{pmatrix}\). + We will keep updating the target points \(p\) until they overlap on the reference point \(Q\). + Due to the presence of \(R\), the problem becomes non-linear. +

+ +

+ To solve this problem, we will use Gauss-Newton Method to minimise between the correspondence points. + Therefore the error is the sum of the error between the correspondence points, shown in the equation below. +

+$$ + e(x) = \sum_{i,j} e_{i,j}(x) +$$ +

+ Keep in mind that \(i,j\) refere to the correspondeces points between \(P\) and \(Q\) sets. +

+$$ + e_{i,j}(x) = R_{\theta}p_i + t - q_j +$$ + +

+ Therefore, we need to minimize the error function \(E(x)\). +

+ + +$$ + {min}_x \to {\sum_{i,j} ||R_{\theta}p_i + t - q_j||^2} +$$ + +

+ let \(h_i(x) = R_{\theta}p_i + t\). The error function becom +

+ +$$ + {min}_x \to {\sum_{i,j} ||h(x) - q_j||^2} +$$ + +

+ To compute the the local minimume value we will use Hessian matrix which is a square martix (n x n) of second-order partial derivative of a function + see for more information +

+$$ + H\Delta x = -E^{'} (x) +$$ +

+ And \(\Delta x\ = [\Delta x, \Delta, y, \Delta \theta]\) which represent the small change in \(x\). + Therefore the error function become +

+ +$$ + E^{'}(x) = J(x)e(x) +$$ + +

+ \(J(x)\) is the jacobian and it show below +

+ +$$ + J = \begin{pmatrix}1 & 0 & -sin(\theta){p_i}^x-cos(\theta){p_i}^y)\\\ 0 & 1 & cos(\theta){p_i}^x-sin(\theta){p_i}^y)\end{pmatrix} +$$ + +

+ The code below shows a function that creates the jacobian using Eigen::MatrixXd that generate a matrix size (2x3). +

+ + +{% highlight ruby %} +Eigen::MatrixXd Jacobian(Eigen::Vector3d _x, Eigen::Vector2d point) +{ + double angle = _x(2); + double x = point(0); + double y = point(1); + Eigen::MatrixXd J(2, 3); + J << 1, 0, -sin(angle) * x - cos(angle) * y, + 0, 1, cos(angle)* x - sin(angle) * y; + + return J; +} + +Eigen::Vector2d Error(Eigen::Vector3d x, Eigen::Vector2d p, Eigen::Vector2d q) +{ + auto rotation = Get_R(x(2)); + Eigen::Vector2d translation; + translation << x(0), x(1); + auto prediction = rotation * p - translation; + return (prediction - q); +} +{% endhighlight %} + +

+ Now we can solve Hessian \(H = \begin{pmatrix} 0 & 0& 0\\\ 0 & 0& 0\\\ 0 & 0& 0 \end{pmatrix}\) + and \(g =\begin{pmatrix} 0 \\\ 0 \\\ 0 \end{pmatrix}\). Therefore to solve the system we create a + function call Prepare_system to compute Hessian matrix. +

+ +

+ The function Prepare_system take seven inputs which are \(x\), + Point set \(P\) and \() Q\), the correspondences index vectors, H matrix, + g vector and the error chi. The last three variables were updated and returned using pass by reference. +

+

+ The function loop on all correspondence points to compute the Jacobian, then update the H and g. Where \(H= J^T J\) and + \(g=J^T e\). Note that the \(weight\) eliminates the outlier point above the threshold. +

+{% highlight ruby %} +void Prepare_system(Eigen::Vector3d x, + vector P, + vector Q, + vector correspondences, + Eigen::MatrixXd& H, + Eigen::Vector3d& g, + double& chi) +{ + for (int i = 0; i < correspondences.size(); i++) + { + auto p_point = P[correspondences[i](0)]; + auto q_point = Q[correspondences[i](1)]; + auto e = Error(x, p_point, q_point); + double weight = Kernal(10, e); + auto J = Jacobian(x, p_point); + H += weight * J.transpose() * J; + g += weight * J.transpose() * e; + chi += e.transpose() * e; + } +} +{% endhighlight %} + + +

ICP non-linear Least Square

+ +

+ ICP based on non-linear least-square has four steps which are shown in the code below. + The first step is to initialize the for loop for the limited iteration. + Then we initialize the x vector that contains the translation and \(\theta\). Also, + Hessian matrix \(H\) and \(g\) initilized with zeros. +

+

+ We start processing the point by computing the corresponding point using the defined function. + Next, we pass the data to the Prepare_system function to compute Hessain matrix \(H\) and \(g\). + Using \(H\) and \(g\), we solve the equation using the Eigen inbuild function. + bdcSvd . + the output of bdcSvd \(\delta x\) is the small motion computed between the both points group. +

+

+ Using \(\delta x\) to update the overall \(x\) and then computer \(R\) and \(t\). + The target point then updated using the descibe function Apply_transform and the final + translation and rotation is updated as well using \(R_{final} = R * R_{final}\) and \(T_{final} += t\). +

+ +

+ Note that we use chi as a threshold where it stop when it reach below a certine value. +

+{% highlight ruby %} +vector ICP_least_squares(vector P, + vector Q, + vector& error, + int iterations = 10) +{ + Eigen::MatrixXd R_final = Eigen::MatrixXd::Identity(2, 2); + Eigen::Vector2d T_final = Eigen::Vector2d::Zero(); + vector P_copy = P; + for (int i = 0; i < iterations; i++) + { + Plot_data(P_copy, Q, 80); + Eigen::Vector3d x = Eigen::Vector3d::Zero(); + Eigen::MatrixXd rot = Get_R(x(2)); + Eigen::Vector2d t; + t(0) = x(0); + t(1) = x(1); + Eigen::MatrixXd H = Eigen::MatrixXd::Zero(3, 3); + Eigen::Vector3d g = Eigen::Vector3d::Zero(3); + double chi = 0.0; + // step 1: compute correspondese + auto correspondences = Get_correspondence_indices(P_copy, Q); + + // step 2: compute H, g and Chi + Prepare_system(x, P_copy, Q, correspondences, H, g, chi); + error.push_back(chi); + // Step 3: solve the linear equation + Eigen::Vector3d dx = -1 * H.bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(g); + + // step 4: update x and apply transformation + x += dx; + x(2) = atan2(sin(x(2)), cos(x(2))); + rot = Get_R(x(2)); + t(0) = x(0); + t(1) = x(1); + P_copy = Apply_transform(P_copy, rot, t); + R_final = rot * R_final; + T_final += t; + if (chi < 0.0001) + { + break; + } + } + return P_copy; +} +{% endhighlight %} + + +

+ The figure below shows the output of ICP least square and the error output. +

+ + + + + + + + +

+ For the full code check this link Guthub_code +

+ + + + +

Refrences

+

+ Srinath Rajagopalan +

+

+ Link_1 +

+

+ and the explination of Cyrill Stachniss +

+

+ Jyoti Sunkara +

+

M. Greenspan and M. Yurick, "Approximate k-d tree search for efficient ICP," + Fourth International Conference on 3-D Digital Imaging and Modeling, 2003. 3DIM 2003. + Proceedings., 2003, pp. 442-448, doi: 10.1109/IM.2003.1240280.

\ No newline at end of file diff --git a/_posts/2022-01-20-search-algorithm.html b/_posts/2022-01-20-search-algorithm.html new file mode 100644 index 0000000000..fb0331528f --- /dev/null +++ b/_posts/2022-01-20-search-algorithm.html @@ -0,0 +1,433 @@ +--- +layout: post +title: "Search Algorithm" +subtitle: "implementation of depth-first, breadth-first and A star in c++" +date: 2022-01-20 +background: '/img/posts/04.jpg' +--- +

+ In this post I am going to go through my implementation of different search algorithms + for path finding based on Grid map. First, I will start with the global search + algorithms such as breadth first and depth first. Then I will present the A* algorithms + and how we can optimize the code. As usual I am going to use C++ to do the programming. +

+ +

+ There are different type of graphs which shown in the image below. +

+ + + +

+ In this project I define the map as a 2D vector std::vector>. + The node is a class designed to have the required information such as X, Y position, + state if clear or wall, parent position, + and to check if it visited or not. Below is the code defining class. +

+ +{% highlight ruby %} +class Node { + public: + int x, y; + State state; + bool visited; + + int parent_x; + int parent_y; + + int G; + int H; + int cost; + + Node(int _x, int _y, State _state, int px, int py, int g =0, int h =0) { + x = _x; + y = _y; + state = _state; + parent_x = px; + parent_y = py; + visited = false; + G = g; + H = h; + if (state == State::kObstecal) + { + cost = 255; + } + else + { + cost = 0; + } + } + }; +{% endhighlight %} + +

+ So, let’s start talking about the search algorithms. The search algorithms is + used to solve the problem of problem to find the shortest path between two point. + There are two type of search algorithms which is uniformed search and guided algorithm. +

+

+ Uniformed search algorithm has no other information despite the start position and the goal +position, where it searches all adjacent node until it finds the goal. There are two +approaches we will present in this work which is the breadth-first search and depth-first +algorithms. +

+

Breadth-first search

+

+ The figure below explains how breadth-first search work by + explore all the adjacent node before it moves to the child of the node. +

+ + + +

+ Every time the algorithm opens new Node it checks if the goal has been reaching or not, + the continue exploring the remaining. The figure below shows the algorithms steps. +

+ + + +

+ And the code is shown below, where is it take three input and return string. + The input is a 2D grid, start position and the goal position. + I used queue to store the open node. The reason to use queue is that this data + structure allowed for the first in last out FILO, which save the time in organize + the data. +

+ +{% highlight ruby %} +string BreadthFirst(vector < vector>& grid, int start[2], int goal[2]) { + // step 1: initialize nodeList and add the start node to the list + queue openList; + int x = start[0]; + int y = start[1]; + AddToOpenList(x, y, 0, 0, grid, openList); + // step 3: check if the list is empty + while (!openList.empty()) + { + // step 5: dequeue node as currentnode and remove it from the list + Node currentNode = openList.front(); + // remove the first element + openList.pop(); + + x = currentNode.x; + y = currentNode.y; + //cout << x << "," << y << "\n"; + grid[x][y].visited = true; + grid[x][y].state = State::kVisited; + + // step 6: check if the current node is the goal + if (x == goal[0] && y == goal[1]) + { + RetracePath(start, goal, grid); + grid[start[0]][start[1]].state = State::kStart; + grid[goal[0]][goal[1]].state = State::kFinish; + return "Path found"; + break; + } + // step 7: get the neighbors of the current node + ExpandNeighbor(currentNode, openList, grid); + } + return "No path Found"; + +} +{% endhighlight %} + +

+ I wrote a functions to expand the neighbor of the current node where it check the four + direction of the node which top, right, bottom and left. Note that this function made as + a template so I can change the openList vector. +

+ + +{% highlight ruby %} +// this function used to expand the neightbor of the cuurent Node +template +void ExpandNeighbor(Node currentNode, listTT& openList, vector>& grid) { + int x = currentNode.x; + int y = currentNode.y; + + for (auto neighbor : delta) + { + int x2 = x + neighbor[0]; + int y2 = y + neighbor[1]; + if (CheckValidCell(x2, y2, grid) == 1) + { + AddToOpenList(x2, y2, x, y, grid, openList); + grid[x2][y2].state = State::kVisited; + } + } +} +{% endhighlight %} + +

+ CheckValidCell is another function used to test if the node is accessible. +

+ +{% highlight ruby %} +bool CheckValidCell(int x, int y, std::vector>& grid) +{ + bool on_grid_x = (x >= 0 && x < grid.size()); + bool on_grid_y = (y >= 0 && y < grid[0].size()); + if (on_grid_x && on_grid_y) + { + State nodeState = grid[x][y].state; + if (nodeState != State::kObstecal && grid[x][y].visited == false) + { + return grid[x][y].state == State::kEmpty; + } + } + return false; +} +{% endhighlight %} + +

+ AddToOpenList is function designed + to add the node into openList due to the complexity of node class. +

+{% highlight ruby %} +template +void AddToOpenList(int x, int y, int p_x, int p_y, vector>& grid, listT& openList) { + Node currentNode{ x, y, State::kClose, p_x, p_y }; + openList.push(currentNode); + grid[x][y].parent_x = p_x; + grid[x][y].parent_y = p_y; + grid[x][y].state = State::kClose; +} +{% endhighlight %} + +

+ And finally, we have the function to trace the path from finish to start. + Here we will see why we define the parent for each node. + Every current node expands four new node where the current node will be the + parent of these node. +

+ +{% highlight ruby %} +vector RetracePath(int startNode[2], int goal[2], vector>& grid) + { + vector path; + Node currentNode = grid[goal[0]][goal[1]]; + int x = currentNode.x; + int y = currentNode.y; + while (x != startNode[0] || y != startNode[1]) + { + path.push_back(currentNode); + grid[currentNode.x][currentNode.y].state = State::kPath; + currentNode = grid[currentNode.parent_x][currentNode.parent_y]; + x = currentNode.x; + y = currentNode.y; + } + + reverse(path.begin(), path.end()); + cout << "Path length: " << path.size() << "\n"; + return path; + } + +{% endhighlight %} +

+ The second uniformed search is the depth-first where it explores the depth of + every node before it goes it its neighbor. The figure below shows the search of + the depth first. +

+ +

Depth-first search

+ +

+ The second uniformed search is the depth-first where it explores the depth of + every node before it goes it its neighbor. The figure below shows the search of + the depth first. +

+ + + +

+ The figure below shows the steps of depth first algorithm which follow the same steps. +

+ + + +

+ The below code shows the function of the depth first algorithms that has the same + input of breadth-first and the same output. But these algorithms use stack to store + the open list. The reason to use stack because stack worked based on the first in first + out FIFO. +

+ +{% highlight ruby %} +string DepthFirst(vector>& grid, int start[2], int goal[2]) { + // step 1: initialize openList and add that start node + stack openList; + int x = start[0]; + int y = start[1]; + AddToOpenList(x, y, 0, 0, grid, openList); + + // step 2: check iof open list is not empty + while (!openList.empty()) + { + // step 3: get the currentNode from the stack + Node currentNode = openList.top(); + openList.pop(); + x = currentNode.x; + y = currentNode.y; + //cout << x << "," << y << "\n"; + // step 4: mark the node as visited + grid[x][y].visited = true; + grid[x][y].state = State::kVisited; + // step 5: check if the node is the goal + if (x == goal[0] && y == goal[1]) + { + RetracePath(start, goal, grid); + grid[start[0]][start[1]].state = State::kStart; + grid[goal[0]][goal[1]].state = State::kFinish; + return "[INFO] path was found..."; + break; + } + + // step 6: check the neighbor of the node + ExpandNeighbor(currentNode, openList, grid); + } + return " [INFO] No path was found..."; +} +{% endhighlight %} + +

+ The output is show below of both the breadth-first and the depth-first. + As can be seen that the number of nodes explored is shown in purple and + the bath is shown in green and the yellow is the wall. +

+ + + +

+ As can be seen in the output that the explore in both methods are different + due to the nature of the explore way. Both algorithms able to reach the path but the + final path length is different also the number of node explore differe. +

+ +

A star Algorithm

+

+ Now we are going to use a heuristic function algorithm that use measure to chose the shortest path with minimum exploring nodes. + What we mean by heuristic function is that the algorithm use information such as the moving cost, + and the distance from the current node to the goal then decided on which node move. The same step node based on + the cost function. In this work we used the smallerst to largest. +

+ +

+ Revisiting the Node class there are two variable that we did not used in the uniformed search which is + H and G. Where H represent the distance between the node and the gaol and we measure it using euclidean distance. + And G is the move cost which is accomulated from the start node to the current node. +

+

+ We update the expandNeighbors function by adding the measurment of heuristic and compute the cost movement. +

+ +{% highlight ruby %} +/** +* Expand current nodes's neighbors and add them to the open list. +*/ +void ExpandNeighbors(Node& currentNode, + std::priority_queue, CompareDis>& openList, + std::vector>& grid, int goal[2]) +{ + // Get current node's data. + int x = currentNode.x; + int y = currentNode.y; + int g = currentNode.G; + // Loop through current node's potential neighbors. + for (auto neighnor : delta) + { + // Check that the potential neighbor's x2 and y2 values are on the grid and not closed. + int x2 = x + neighnor[0]; + int y2 = y + neighnor[1]; + // Increment g value, compute h value, and add neighbor to open list. + if (CheckValidCell(x2, y2, grid) == 1 && grid[x2][y2].visited == false) + { + int cost = grid[x2][y2].cost; + int g2 = g + 1; + int h2 = Heuristic(x2, y2, goal[0], goal[1], cost); + AddToOpen(x2, y2, g2, h2, x, y, openList, grid); + grid[x2][y2].visited = true; + } + } +} +{% endhighlight %} + +

+ the heuristic function is shown below. +

+ +{% highlight ruby %} +int Heuristic(int x1, int y1, int x2, int y2, int cost = 0) +{ + return std::sqrt(std::pow(x2 - x1,2) + std::pow(y2 - y1,2)); +} +{% endhighlight %} + +

+ The search algorithm is show below. +

+ +

+ We can check out the search algorithms function. +

+{% highlight ruby %} +std::vector Search(std::vector> &grid, int init[2], int goal[2]) { + + std::priority_queue, CompareDis> openList; + std::vector path; + int x = init[0]; + int y = init[1]; + int h = Heuristic(x, y, goal[0], goal[1]); + AddToOpen(x, y, 0, h, 0, 0, openList, grid); + int threshold = 0; + //while open vector is non empty { + while (!openList.empty()) { + //Get the x and y values from the current node, + auto currentNode = openList.top(); + openList.pop(); + x = currentNode.x; + y = currentNode.y; + grid[x][y].state = State::kVisited; + + // Check if you've reached the goal. If so, return grid. + if (x == goal[0] && y == goal[1]) + { + int _goal[2]{ x,y }; + auto path = RetracePath(init, _goal, grid); + + grid[init[0]][init[1]].state = State::kStart; + grid[goal[0]][goal[1]].state = State::kFinish; + std::cout << "Path found!\n"; + // PrintPath(path); + return path; + } + // If we're not done, expand search to current node's neighbors. + ExpandNeighbors(currentNode, openList, grid, goal); + } + std::cout << " No path found!" << "\n"; + return std::vector{}; +} +{% endhighlight %} + +

+ The reseault is shown in the figure below. + Where the the exoplored node is smaller and the path is the shortest compare to the other two algoritms. +

+ + + +

Conclusion

+

+ As can be seen there are two type of search algorithms which uninformed search and informed search. + In the uniformed search the expolation occur unguided which based on which node to explore first, + while the informed search algorithm it used information and heuristic function. +

+ +

+ Overall, the step of applying the algorithms is same but using different type of data structures + simplified the implementation process. +

+ +

+ you can get the full code here For the full code check this link Guthub_code +

\ No newline at end of file diff --git a/_posts/2022-01-31-diffrential-drive.html b/_posts/2022-01-31-diffrential-drive.html new file mode 100644 index 0000000000..3d8976007a --- /dev/null +++ b/_posts/2022-01-31-diffrential-drive.html @@ -0,0 +1,344 @@ +--- +layout: post +title: "Differential Drive Arduino" +subtitle: "Code the mathimatical model of differential drive robot" +date: 2022-01-31 +background: '/img/posts/01.jpg' +--- + +

+ This post I am going to present my work on developing the base code for a differential drive mobile robot base. The code will be coded to work with Arduino where we are going to use C language. + Let’s start with the picture below where, the mobile robot has two wheel each wheel rotates independently. There size of the robot is known so we have the distance between the center of + the wheel which is call w and the wheel radios is \(R\). Each wheel has velocity which is \(v_l\) and \(v_r\) for the left and right wheel respectively. + Using these information we can write the model equation for the velocity we get the following equations +

+ +
+ +$$\dot x = \frac{R}2 (v_r + v_l)*cos{\theta} -(1)$$ +$$\dot y = \frac{R}2 (v_r + v_l)*sin{\theta} -(2)$$ +$$\dot \theta = \frac{R}w (v_r - v_l) -(3)$$ + +

+ However, the standard control of mobile base is velocity \(v\) and angular velocity \(\omega\); therefore, we need to convert this input. Using a dynamic model of bicycle which is shown in the figure below we get the three equations +

+ +$$\dot x = v*cos{\theta} -(4)$$ +$$\dot y = v*sin{\theta} -(5)$$ +$$\dot \theta = \omega -(6)$$ + +Now we can solve the first three equation by substitute \(\dot x, \dot y, \dot\theta\) we get the following equations +$$\frac {2v} R = v_r + v_l -(7)$$ +$$\frac {2v} R = v_r + v_l -(8)$$ +$$\frac{\theta w} R = v_r + v_l -(9)$$ +

+ Now we solve equation 8 and 9 by subtract 8 of 9 and one more time by adding 8 to 9, therefore we will get: +

+$$V_l = \frac{2v - \omega l}{2R} -(10)$$ +$$V_r = \frac{2v + \omega l}{2R} -(11)$$ + +Odometry +

+ So far, we map the input \(v\) and \(\omega\) to differential drive mobile base in order to get the velocity in \(\dot x , \dot y , \dot \theta\). + Now we need to track the position of the base (state [x,y,\theta]) in relative to the world. + We will use encoder to measure the distance move. Looking for the figure below we see there are three arcs that the robot generate during + movement which is \(D_l\, D_r and D_c\) for the movement of left wheel, right wheel and center of base respectively. + The center distance is computed by taking the sum of left and right distance divided by two equations below +

+ +
+ + +$$D_c = \frac{D_r + D_l} 2 -(12)$$ +

+ And to compute \(D_r\) and \(D_l\) +

+ +$$D_r = 2 \ pi R \frac{\Delta tick_r} N -(13)$$ +$$D_l = 2 \ pi R \frac{\Delta tick_l} N -(14)$$ + +Where N is the number of ticks in one revolution. Now to compute the state of the robot we use the following equations +$$x^{'} = x + D_c + cos\theta -(15)$$ +$$y^{'} = y + D_c + sin\theta -(16)$$ +$$\theta^{'} = \theta + \frac{D_r - D_l}w -(17)$$ + +PID controller + +

+ To control the speed of the wheel we need to apply a PID controller to control the velocity of the wheel. + Figure below shows the implementation where the input velocity \(v_i\) where \(i=[r,l]\) the PID compute the output PWM to control the motor. +

+Arduino Code + +

+ Above we present the mathematical model of the base where we have a two input to control the base and we get the output state of the base \(x=[x,y,\theta]\). + First we include the PID library then we define the pins used to control the motor and read encoder. +

+ +{% highlight ruby %} +// include libraries +#include +// Define the encoder pins +#define leftEncoderPinA 2 +#define leftEncoderPinB 4 +#define rightEncoderPinA 3 +#define rightEncoderPinB 5 +#define leftSpeedPin 6 +#define rightSpeedPin 9 +#define leftDir1 7 +#define leftDir2 8 +#define rightDir1 10 +#define rightDir2 11 +{% endhighlight %} +

+Here I define the speed of the loop in the main function this will help us to get the updated state at fixed rate. +

+ +{% highlight ruby %} +// define the speed of the loop +unsigned long currentMillis; +long previousMillis = 0; // set up timers +int LoopSpeed = 30; // 30Hz +float deltaTime = 0; +{% endhighlight %} + +

+ Now I define the PID controller parameters and set the PID, note that these value need to tuned according to the need. +

+ +{% highlight ruby %} +// pid parameters:- +double leftSetpoint , leftCurrentVelocity , leftOutput; +double rightSetpoint,rightCurrentVelocity,rightOutput; +double Kp=0.00206812507453938; +double Ki=0.0339782106923304; +double Kd=3.14697363162208e-05; +// define object :- +PID leftWheelPID(&leftCurrentVelocity, &leftOutput, &leftSetpoint,Kp,Ki,Kd,DIRECT); +PID rightWheelPID(&rightCurrentVelocity, &rightOutput, &rightSetpoint,Kp,Ki,Kd,DIRECT); +{% endhighlight %} + +

+ Here I define the base parameters and measurement such as wheel size and distance between wheels +

+ +{% highlight ruby %} +// All parameter should be in meter +#define WHEEL_DIAMETER 0.1254 +#define WHEEL_SEPRATION 0.418 +#define ENCODER_TICK_PERREVOLUTION 72 +#define pi 3.14159265359 +{% endhighlight %} + +

+ Now I define the variables used to track the left and right wheel, and I define the target velocity and angulare velocity \(v\) and \(\omega\). +

+ + +{% highlight ruby %} +// define the gloable parameters +volatile int leftTick = 0; // universal count +volatile int rightTick = 0; // universal count +volatile int leftTickOld = 0; // universal count +volatile int rightTickOld = 0; // universal count +double targetVelocity; +double trargetAngularVelocity; +{% endhighlight %} + +

+ Here I create structures for wheel velocity and the robot state to simplify in creating function late. +

+ +{% highlight ruby %} +// This structure use to return the left and the right velocity of the wheels +struct WheelsVelocity{ + double leftVelocity; + double rightVelocity; +}; +struct Coordinate{ + float x; + float y; + float theta; +}; +{% endhighlight %} + +

+ Now I create the function that convert the input velocity and angular velocity and generate a separate velocity for the left and wright wheel. +

+ +{% highlight ruby %} +// This function use to compute the turning velocity for each wheel +// velocity : is the required speed to move +// angularVelocity: is the rotating speed +// the return is the speed of the left and right wheel turning +struct WheelsVelocity GetWheelVelocity(float baseVelocity, double angularVelocity){ + WheelsVelocity velocity; + velocity.leftVelocity = (2*baseVelocity + angularVelocity*WHEEL_SEPRATION)/WHEEL_DIAMETER; + velocity.rightVelocity = (2*baseVelocity - angularVelocity*WHEEL_SEPRATION)/WHEEL_DIAMETER; + return velocity; +} +{% endhighlight %} +

+ This function return the state of the robot by passing the encoder reading +

+ +{% highlight ruby %} +//This function use to compute the coordinate of the base +void GetCordinate(double leftTick, double rightTick, struct Coordinate *currentPosition){ + double leftDeltaTick = leftTick - leftTickOld; + double rightDeltaTick = rightTick - rightTickOld; + // assigne the current encoder value to old encoder value + leftTickOld = leftTick; + rightTickOld = rightTick; + double leftDistance = pi * WHEEL_DIAMETER* (leftDeltaTick/ENCODER_TICK_PERREVOLUTION); + double rightDistance = pi * WHEEL_DIAMETER* (rightDeltaTick/ENCODER_TICK_PERREVOLUTION); + double centerDistance = (leftDistance + rightDistance) / WHEEL_SEPRATION; + currentPosition->x = currentPosition->x + centerDistance*cos(currentPosition->theta); + currentPosition->y = currentPosition->y + centerDistance*sin(currentPosition->theta); + currentPosition->theta = currentPosition->theta + ((leftDistance - rightDistance) / WHEEL_SEPRATION); +} +{% endhighlight %} + +

+ Now we define the setup function and define the pins +

+{% highlight ruby %} +void setup() { + //------- Setup encoder pins + pinMode(leftEncoderPinA , INPUT); + pinMode(leftEncoderPinB , INPUT); + pinMode(rightEncoderPinA , INPUT); + pinMode(rightEncoderPinB , INPUT); + // interrupt 0 digital pin 2 positive edge trigger + attachInterrupt(0, LeftFlag, RISING); + // interrupt 1 digital pin 3 positive edge trigger + attachInterrupt(1, RightFlag, RISING); + + //------- Setup Motor pin to output + pinMode(leftDir1, OUTPUT); + pinMode(leftDir2, OUTPUT); + pinMode(leftSpeedPin, OUTPUT); + + pinMode(rightDir1, OUTPUT); + pinMode(rightDir2, OUTPUT); + pinMode(rightSpeedPin, OUTPUT); + + //------- Setup PID and initialize + rightWheelPID.SetMode(AUTOMATIC); //start calculation. + rightWheelPID.SetOutputLimits(-255,255); + rightWheelPID.SetSampleTime(LoopSpeed); + + leftWheelPID.SetMode(AUTOMATIC); //start calculation. + leftWheelPID.SetOutputLimits(-255,255); + leftWheelPID.SetSampleTime(LoopSpeed); + leftSetpoint=0; + rightSetpoint=0; + // start serial port at 9600 bps: + Serial.begin(9600); +} +{% endhighlight %} + +

+ The below code is the main loop of the base. Here we set the loop to work at 30Hz. The input take from the serial port. +

+ +{% highlight ruby %} +void loop() { + currentMillis = millis(); + if (currentMillis - previousMillis >= LoopSpeed) { // start timed event + deltaTime = (currentMillis - previousMillis)/1000; // divided by 1000 to be in second + previousMillis = currentMillis; + //-------step 1: Read serials for incoming data (a velocity angular_velocity) + if (Serial.available() > 0){ + // this to make sure that we are reading the correct value + if (Serial.read() == 'c'){ + targetVelocity = Serial.parseFloat(); + trargetAngularVelocity = Serial.parseFloat(); + } + } + + //-------step 2: compute the velocity of the left and right wheel + struct WheelsVelocity targetWheelsVelocity= GetWheelVelocity(targetVelocity, trargetAngularVelocity); + + //-------step 3: apply PID controller to get the PWM + // To acces the velocity of each wheel use the follow + //targetWheelsVelocity.leftVelocity; + //targetWheelsVelocity.rightVelocity; + leftSetpoint = targetWheelsVelocity.leftVelocity; + rightSetpoint = targetWheelsVelocity.rightVelocity; + leftCurrentVelocity = Get_speed(leftTick, leftTickOld, deltaTime); + rightCurrentVelocity = Get_speed(rightTick, rightTickOld, deltaTime); + rightWheelPID.Compute(); + leftWheelPID.Compute(); + + //-------step 4: move the motors + int leftWheelPWM = leftOutput; + int rightWheelPWM = rightOutput; + MoveMotor(leftDir1, leftDir2,leftSpeedPin, leftWheelPWM ); + MoveMotor(rightDir1, rightDir2,rightSpeedPin, rightWheelPWM ); + debugPID(); + //-------step 5: update the coordinate of the base + struct Coordinate currentPosition; + GetCordinate(leftTick, rightTick, ¤tPosition); + + //-------step 6: publish these cooridnate + Serial.print(currentPosition.x); + Serial.print(currentPosition.y); + Serial.print(currentPosition.theta); + Serial.print('\n'); + Serial.println(leftTick); + } +} +{% endhighlight %} + +

+ These functions used to control the motors +

+ +{% highlight ruby %} +void MoveMotor( int dirictionPin1, int directionPin2, int PWMPin, int PWMValue){ + if (PWMValue > 0){ + digitalWrite(dirictionPin1,HIGH); + digitalWrite(directionPin2,LOW); + }else{ + digitalWrite(dirictionPin1,LOW); + digitalWrite(directionPin2,HIGH); + } + analogWrite(PWMPin,PWMValue); +} +{% endhighlight %} + +

+ The help functions of encoder reading and one function to control the motor speed. +

+ + {% highlight ruby %} +void LeftFlag() { + // add 1 to count for CW + if (!digitalRead( leftEncoderPinB )) { + leftTick ++ ; + } + // subtract 1 from count for CCW + if (digitalRead( leftEncoderPinB )) { + leftTick --; + } +} + +void RightFlag() { + // add 1 to count for CW + if (!digitalRead( rightEncoderPinB )) { + leftTick ++ ; + } + // subtract 1 from count for CCW + if (digitalRead( rightEncoderPinB )) { + leftTick --; + } +} + +float Get_speed(int tick, int oldTick, int time){ + // make sure the time is in second so we divided by 60 to get in minutes + float rpm = (tick-oldTick)/(time/60); + return rpm; +} +{% endhighlight %} + diff --git a/about.html b/about.html index c661ef64e4..50b4e192f2 100644 --- a/about.html +++ b/about.html @@ -5,8 +5,8 @@ background: '/img/bg-about.jpg' --- -

Lorem ipsum dolor sit amet, consectetur adipisicing elit. Sed quisquam ut perspiciatis, repudiandae nulla animi iste vel, praesentium repellendus molestias aliquid consequatur, earum rem qui error voluptates eius enim consequuntur!

- -

Lorem ipsum dolor sit amet, consectetur adipisicing elit. Ex alias, earum consectetur quia natus ducimus voluptate explicabo, hic porro reprehenderit, quasi? Tenetur ipsum distinctio laboriosam perspiciatis officiis dolore, architecto id.

- -

Lorem ipsum dolor sit amet, consectetur adipisicing elit. Totam inventore aspernatur repellendus incidunt adipisci modi voluptates recusandae iste eligendi, repudiandae corporis quod aut, optio! Explicabo quaerat unde voluptatem! Itaque, eum!

+

+ Dedicated tech professional with advanced engineering and research skills, who enjoys problem solving and critical thinking. + Skilled in applications development, particularly with AI for agriculture/industrial, and seeking to expand on expertise + through conducting more scientific research and creating a unique and enjoyable student experience at graduate level. +

diff --git a/assets/icp/CPI_SVD_ERROR.jpg b/assets/icp/CPI_SVD_ERROR.jpg new file mode 100644 index 0000000000..8ad67ea281 Binary files /dev/null and b/assets/icp/CPI_SVD_ERROR.jpg differ diff --git a/assets/icp/Data.PNG b/assets/icp/Data.PNG new file mode 100644 index 0000000000..14de109315 Binary files /dev/null and b/assets/icp/Data.PNG differ diff --git a/assets/icp/ICP_LSQ_ERROR.jpg b/assets/icp/ICP_LSQ_ERROR.jpg new file mode 100644 index 0000000000..9c4acc5464 Binary files /dev/null and b/assets/icp/ICP_LSQ_ERROR.jpg differ diff --git a/assets/icp/ICP_least_square.gif b/assets/icp/ICP_least_square.gif new file mode 100644 index 0000000000..ed3ff6d635 Binary files /dev/null and b/assets/icp/ICP_least_square.gif differ diff --git a/assets/icp/correspondeces.jpg b/assets/icp/correspondeces.jpg new file mode 100644 index 0000000000..820937b644 Binary files /dev/null and b/assets/icp/correspondeces.jpg differ diff --git a/assets/icp/datas.jpg b/assets/icp/datas.jpg new file mode 100644 index 0000000000..4198b65ab8 Binary files /dev/null and b/assets/icp/datas.jpg differ diff --git a/assets/icp/icp_svd.gif b/assets/icp/icp_svd.gif new file mode 100644 index 0000000000..d9fad8103e Binary files /dev/null and b/assets/icp/icp_svd.gif differ diff --git a/assets/icp/reduce coordinate.PNG b/assets/icp/reduce coordinate.PNG new file mode 100644 index 0000000000..b38408c8f6 Binary files /dev/null and b/assets/icp/reduce coordinate.PNG differ diff --git a/img/posts/Data fusion/1.PNG b/img/posts/Data fusion/1.PNG new file mode 100644 index 0000000000..a396e20266 Binary files /dev/null and b/img/posts/Data fusion/1.PNG differ diff --git a/img/posts/Data fusion/2.PNG b/img/posts/Data fusion/2.PNG new file mode 100644 index 0000000000..62f1e52316 Binary files /dev/null and b/img/posts/Data fusion/2.PNG differ diff --git a/img/posts/Data fusion/3.PNG b/img/posts/Data fusion/3.PNG new file mode 100644 index 0000000000..eea9472e78 Binary files /dev/null and b/img/posts/Data fusion/3.PNG differ diff --git a/img/posts/Data fusion/4.PNG b/img/posts/Data fusion/4.PNG new file mode 100644 index 0000000000..bf7ab42d3d Binary files /dev/null and b/img/posts/Data fusion/4.PNG differ diff --git a/img/posts/Data fusion/5.PNG b/img/posts/Data fusion/5.PNG new file mode 100644 index 0000000000..9096a4d562 Binary files /dev/null and b/img/posts/Data fusion/5.PNG differ diff --git a/img/posts/Data fusion/6.PNG b/img/posts/Data fusion/6.PNG new file mode 100644 index 0000000000..188f3eb227 Binary files /dev/null and b/img/posts/Data fusion/6.PNG differ diff --git a/img/posts/Data fusion/after combine edited.PNG b/img/posts/Data fusion/after combine edited.PNG new file mode 100644 index 0000000000..e967c715f9 Binary files /dev/null and b/img/posts/Data fusion/after combine edited.PNG differ diff --git a/img/posts/Data fusion/before combine edited.PNG b/img/posts/Data fusion/before combine edited.PNG new file mode 100644 index 0000000000..2661735930 Binary files /dev/null and b/img/posts/Data fusion/before combine edited.PNG differ diff --git a/img/posts/Data fusion/kalman output.PNG b/img/posts/Data fusion/kalman output.PNG new file mode 100644 index 0000000000..480a5cac40 Binary files /dev/null and b/img/posts/Data fusion/kalman output.PNG differ diff --git a/img/posts/differential-drive/dynamic model.PNG b/img/posts/differential-drive/dynamic model.PNG new file mode 100644 index 0000000000..cf058bf5bb Binary files /dev/null and b/img/posts/differential-drive/dynamic model.PNG differ diff --git a/img/posts/differential-drive/odom.PNG b/img/posts/differential-drive/odom.PNG new file mode 100644 index 0000000000..68ef1dd840 Binary files /dev/null and b/img/posts/differential-drive/odom.PNG differ diff --git a/img/posts/kalman-filter/2.PNG b/img/posts/kalman-filter/2.PNG new file mode 100644 index 0000000000..c2b82272cd Binary files /dev/null and b/img/posts/kalman-filter/2.PNG differ diff --git a/img/posts/kalman-filter/Figure_1.png b/img/posts/kalman-filter/Figure_1.png new file mode 100644 index 0000000000..fcea54802e Binary files /dev/null and b/img/posts/kalman-filter/Figure_1.png differ diff --git a/img/posts/search algorithm/A-start output.PNG b/img/posts/search algorithm/A-start output.PNG new file mode 100644 index 0000000000..9c0941cdf9 Binary files /dev/null and b/img/posts/search algorithm/A-start output.PNG differ diff --git a/img/posts/search algorithm/Depth First algorithm.gif b/img/posts/search algorithm/Depth First algorithm.gif new file mode 100644 index 0000000000..ef296d6240 Binary files /dev/null and b/img/posts/search algorithm/Depth First algorithm.gif differ diff --git a/img/posts/search algorithm/Depth First.gif b/img/posts/search algorithm/Depth First.gif new file mode 100644 index 0000000000..6dccb14b9a Binary files /dev/null and b/img/posts/search algorithm/Depth First.gif differ diff --git a/img/posts/search algorithm/breadth-first algorithm.gif b/img/posts/search algorithm/breadth-first algorithm.gif new file mode 100644 index 0000000000..4c351266e3 Binary files /dev/null and b/img/posts/search algorithm/breadth-first algorithm.gif differ diff --git a/img/posts/search algorithm/breadth-first.gif b/img/posts/search algorithm/breadth-first.gif new file mode 100644 index 0000000000..6c9dc0f2af Binary files /dev/null and b/img/posts/search algorithm/breadth-first.gif differ diff --git a/img/posts/search algorithm/graphs.png b/img/posts/search algorithm/graphs.png new file mode 100644 index 0000000000..0498c1b0fd Binary files /dev/null and b/img/posts/search algorithm/graphs.png differ diff --git a/img/posts/search algorithm/output_breadth-depth.PNG b/img/posts/search algorithm/output_breadth-depth.PNG new file mode 100644 index 0000000000..dca24606ca Binary files /dev/null and b/img/posts/search algorithm/output_breadth-depth.PNG differ