Professional Documents
Culture Documents
TUGAS 4
Mahasiswa:
Ahmad Hasyim A
Dosen:
Mr. Endra Pitowarno
December 4, 2012
0.1
examp 9.1.m
2
1
=
2
n
6
k=1 k
lim
0.2
examp 9.2.m
%Example 9.2
%Discrete Solution of Riccati Equation
%Optimal Tracking Control Problem
1
(1)
A=[0 1; -1 -1];
B=[0;1];
Q=[10 0;0 1];
R=[1];
F=[0.9859 -0.27;0.08808 0.76677];
G=[-0.9952 0.01409; -0.04598 -0.08808];
S=[0;0]; %Initialize
T=0;
V=0;
Ts=0.1;
[AD,BD]=c2d(A,B,Ts);
P=[0 0;0 0];
H=BD*P;
X=Ts*R+H*BD;
Y=H*AD;
K=X\Y;
%Discrete reverse-time solution of the Riccati equations (9.29)
%and (9.30)
%Discrete reverse-time solution of the state tracking equations
%(9.53) and (9.54)
for i=1:200
L=Ts*Q+K*Ts*R*K;
M=AD-BD*K;
PP1=L+M*P*M; %Value of Riccati matrix at time (N(k+1))T
RIN=[-sin(0.6284*T);0.6*cos(0.6284*T)];
SP1=F*S+G*RIN;
V=-B*SP1;
%Value of command vector at time (N(k+1))T
S=SP1;
T=T+Ts;
P=PP1;
H=BD*P;
X=Ts*R+H*BD;
Y=H*AD;
K=X\Y;
%Value of feedback gain matrix at time
%(N-(k+1))T
end
0.3
examp Calfilc.m
A=[0 1; -1 -2]
B=[0;1]
C=[1 0;0 1]
D=[0]
R=[0.01 0;0 1]
Cd=[0.1 0;0 0.01]
Q=[0.1 0;0 0.1]
[K,P,E]=lqe(A,Cd,C,Q,R)
The command window text is
A =
0
1
-1
-2
B =
0
1
C =
1
0
0
1
D =
0
R =
0.0100
0
0
1.0000
Cd =
0.1000
0
0
0.0100
0.1000
0
0
0.1000
Q =
K =
0.1136
-0.0004
-0.0435
0.0002
P =
0.0011
-0.0004
-0.0004
0.0002
E =
-1.0569 + 0.2589i
3
-1.0569 - 0.2589i
0.4
examp Calfild.m
0
1
Ts =
0.1000
AT =
0.9909
-0.1722
CD =
0.0100
-0.0009
R =
0.0100
0
Q =
0.1000
0
0.5
0.0861
0.7326
0.0005
0.0086
0
1.0000
0
0.1000
examp 9.3.m
X=P2*C;
Y=(C*P2*C)+Re;
Ke=X/Y;
P3=(ID-(Ke*C))*P2;
%Solve recursive equations
for i=1X19
P1=P3;
P2=(AT*P1*ATH)+(CD*Qe*CD);
X=P2*C;
Y=(C*P2*C)+Re;
Ke=X/Y;
P3=(ID-(Ke*C))*P2;
End
Ke
P3
A =
Columns 1 through 2
-0.0213
0
0.0006
-0.0050
0
-0.0004
Column 3
0
0
-0.0023
B =
8.9362
0
0
Cd =
Columns 1 through 2
0.1000
0
0
0.1000
0
0
Column 3
0
0
0.0013
C =
1
0
0
0
1
0
0
0
1
6
D =
0
AT =
Columns 1 through 2
0.9583
0
0.0012
0.9900
-0.0000
-0.0008
Column 3
0
0
0.9955
BT =
17.4974
0.0105
-0.0000
Q =
Columns 1 through 2
0
0
0
0.5000
0
0
Column 3
0
0
20.0000
R =
1
K =
Columns 1 through 2
0.0072
0.6442
Column 3
-1.8265
P =
1.0e+003 *
Columns 1 through 2
0.0000
0.0001
0.0001
0.0108
-0.0002
-0.0300
Column 3
-0.0002
-0.0300
3.6704
7
E =
-0.0449 +
-0.0449 -0.0033
Re =
Columns 1
0.0100
0
0
Column 3
0
0
7.4600
Qe =
Columns 1
0.1000
0
0
Column 3
0
0
0.1000
AT =
Columns 1
0.9583
0.0012
-0.0000
Column 3
0
0
0.9955
CD =
Columns 1
0.1958
0.0001
-0.0000
Column 3
0
0
0.0026
0.0422i
0.0422i
through 2
0
0.0100
0
through 2
0
0.1000
0
through 2
0
0.9900
-0.0008
through 2
0
0.1990
-0.0001
0.6
examp 9.6.m
%Example 9.6
%Multivariable robust control using H infinity
%Singular value loop shaping using the weighted mixed
%sensitivity approach
nug=200;
dng=[1 3 102 200];
[ag,bg,cg,dg]=tf2ss(nug,dng);
ss_g=mksys(ag,bg,cg,dg);
w1=[1 100;100 1];
nw1neg=[100 1];
dw1neg=[1 100];
w2=[1;1];
w3=[100 1;1 100];
nw3neg=[1 100];
dw3neg=[100 1];
[TSS_1]=augtf(ss_g,w1,w2,w3);
[ap,bp,cp,dp]=branch(TSS_);
[gamopt,ss_f,ss_cl]=hinfopt(TSS_,1);
[acp,bcp,ccp,dcp]=branch(ss_f);
[acl,bcl,ccl,dcl]=branch(ss_cl);
[numcp,dencp]=ss2tf(acp,bcp,ccp,dcp,1);
printsys(nug,dng,s)
printsys(nw1neg,dw1neg,s)
printsys(nw3neg,dw3neg,s)
w=logspace(-3,3,200);
[mag,phase,w]=bode(nw1neg,dw1neg,w);
[mag1,phase1,w]=bode(nw3neg,dw3neg,w);
semilogx(w,20*log10(mag),w,20*log10(mag1)),grid;
[sv,w]=sigma(ss_g);
[sv,w]=sigma(ss_cl);
[sv,w]=sigma(ss_f);
semilogx(w,20*log10(sv)),grid;
ag
bg
cg
dg
acp
bcp
9
ccp
dcp
printsys(numcp,dencp,s)
Examp 10.3
%Regulator design using pole placement
%Inverted pendulum problem
A=[0 1 0 0;9.81 0 0 0;0 0 0 1; -3.27 0 0 0]
B=[0; -0.667;0;0.889]
C=[1 0 0 0]
D=0
%Check for controllability;
rank_of_M=rank(ctrb(A,B))
%Enter desired characteristic equation
chareqn=[1 12 72 192 256]
%Calculate desired closed-loop poles
desiredpoles=roots(chareqn)
%Calculate feedback gain matrix using Ackermanns formula
K=acker(A,B,desiredpoles)
A =
0
9.8100
0
-3.2700
1.0000
0
0
0
0
0
0
0
0
0
1.0000
0
B =
0
-0.6670
0
0.8890
C =
1
D =
0
rank_of_M =
10
4
chareqn =
1
12
72
192
256
desiredpoles =
-4.0000
-4.0000
-2.0000
-2.0000
+
+
-
4.0000i
4.0000i
2.0000i
2.0000i
K =
-174.8258
-57.1201
-39.1437
-29.3578
%Filename: neural1.m
%Feedforward Back-propagation Neural Network
%Network structure: 1:10(tansig):1(purelin)
%P=Input values
P=[0 1 2 3 4 5 6 7 8];
%T=Target values
T=[0 0.84 0.91 0.14 -0.77 -0.96 -0.28 0.66 0.99];
plot(P,T,o);
%Use Levenberg-Marquardt back-propagation training
net=newff([0 8],[10 1],{tansigpurelin},trainlm);
Y1=sim(net,P);
plot(P,T,P,Y1,o);
net.trainParam.epochs=50;
net=train(net,P,T);
Y2=sim(net,P);
plot(P,T,P,Y2,o);
11
Figure 1: Nural
12