-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathx_mpc_1_cpp.cpp
125 lines (88 loc) · 3.53 KB
/
x_mpc_1_cpp.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
// Markus Buchholz, 2023
// g++ x_mpc_1_cpp.cpp -o t -I/usr/include/python3.8 -lpython3.8
#include <iostream>
#include <vector>
#include <Eigen/Dense>
#include <tuple>
#include "matplotlibcpp.h"
namespace plt = matplotlibcpp;
using Eigen::MatrixXd;
using Eigen::MatrixXf;
using Eigen::VectorXd;
// MPC parameters
int horizon = 10; // Prediction horizon
float Q = 1.0f; // State cost
float R = 0.5f; // Control cost
//------------------------------------------------------------------------------------
std::tuple<std::vector<float>, std::vector<float>, std::vector<float>, std::vector<float>> run_mpc()
{
// Simulation parameters
int mpc_loop = 50;
float initial_state = 20.0f;
double desired_state = 25.0f;
std::vector<float> time;
// Model parameters
VectorXd A(1); // state transition matrix
VectorXd B(1); // control matrix
A << 0.9;
B << 0.5;
std::vector<float> states;
double state = 0.0f;
std::vector<float> controls;
double control_input = 0.0f;
state = initial_state;
//time.push_back(0.0);
for (int ii = 0; ii < mpc_loop; ii++)
{
time.push_back(ii);
VectorXd predicted_states = VectorXd::Zero(horizon + 1);
predicted_states[0] = state;
//states.push_back((float)state);
for (int jj = 1; jj < horizon; jj++)
{
predicted_states[jj] = A[0] * predicted_states[jj - 1] + B[0] * control_input;
}
VectorXd control_errors = VectorXd::Zero(horizon);
VectorXd state_errors = VectorXd::Zero(horizon + 1);
for (int ii = 0; ii < predicted_states.size(); ii++)
{
state_errors[ii] = predicted_states[ii] - desired_state;
}
VectorXd f = VectorXd::Zero(state_errors.size() + control_errors.size());
f << state_errors, control_errors;
MatrixXd Q_matrix = Eigen::MatrixXd::Zero(horizon + 1, horizon + 1);
Q_matrix.diagonal() = Eigen::VectorXd::Constant(horizon + 1, Q);
MatrixXd R_matrix = Eigen::MatrixXd::Zero(horizon, horizon);
R_matrix.diagonal() = Eigen::VectorXd::Constant(horizon, R);
MatrixXd H(21, 21);
H.setZero();
H.block(0, 0, Q_matrix.rows(), Q_matrix.cols()) = Q_matrix;
H.block(11, 11, R_matrix.rows(), R_matrix.cols()) = R_matrix;
// Solve H * control_input_delta = concatenaed
Eigen::VectorXd control_input_delta = H.colPivHouseholderQr().solve(-f);
control_input += control_input_delta[0]; // only first affect the decision
state = A[0] * state + B[0] * control_input;
states.push_back((float)state);
controls.push_back((float)control_input);
}
std::cout << time.size() << ": " << states.size() << " :" << controls.size() << "\n";
return std::make_tuple(time, states, controls, std::vector<float>(time.size(), desired_state));
}
//------------------------------------------------------------------------------------
void plot(std::vector<float> time, std::vector<float> states, std::vector<float> controls, std::vector<float> desired)
{
plt::title("Model Predictive Control (MPC)");
plt::named_plot("controls", time, controls);
plt::named_plot("states", time, states);
plt::named_plot("desired", time, desired, "r--");
plt::xlabel("time");
plt::ylabel("Y");
plt::legend();
plt::show();
}
//------------------------------------------------------------------------------------
int main()
{
auto mpc = run_mpc();
plot(std::get<0>(mpc), std::get<1>(mpc), std::get<2>(mpc), std::get<3>(mpc));
}