--------------------------- opt_steep.m --------------------------- function [P,eps] = opt_steep(P0,w,Vout_bar,Vo_node) %the vector P0 must be a column vector P=P0 eps = get_error(P,w,Vout_bar,Vo_node) %initialize eps %iterative loop here for iter=1:10 %compute the gradient G=grad(P,w,Vout_bar,Vo_node) %compute magnitude of the gradient G_mag = 0; for i=1:length(G) G_mag = G_mag + G(i)^2; end G_mag = sqrt(G_mag); %find direction vector S S = -G'/G_mag; %start guessing with alpha and compute the p's and eps alpha0 = 1e-4; alpha = alpha0; while(1) %solve for the new P values Pnew = P + alpha.*S; for k=1:length(Pnew) if (Pnew(k)<0) zero=1; else zero=0; end end if (zero==1) break; end epsnew = get_error(Pnew,w,Vout_bar,Vo_node); if(eps < epsnew) disp('eps is < eps new') Pnew=P+(alpha-alpha0).*S break; end eps = epsnew; alpha=alpha+alpha0; end P=Pnew; end --------------------------- get_error.m --------------------------- function eps=get_error(P,w,Vout_bar,Vo_node) % Initialize Error eps=0; % Calculate Performance function and gradient for idx=1:length(w) % Solve original network [V,Y,I]=solve_net(P,w(idx)); eps = eps + (abs(V(Vo_node))-abs(Vout_bar(idx)))^2; end eps = 0.5*eps; --------------------------- solve_net.m --------------------------- function [V,Y,I]=solve_net(P,w) % Initialize element values R1=P(1); R2=P(2); C1=P(3); Y = [(1/R1+1/R2) (-1/R2); (-1/R2) (1/R2+j*w*C1)]; I = [1;0]; V=inv(Y)*I; --------------------------- single_xtr_ex.m --------------------------- diary eec214_ac.dat % Jackie Xue % Kelvin Yuk % AC optimization of a bipolar transistor example % Set initial element values fprintf('BEGIN OF THE PROGRAM\n'); rb=1000; rx=100; rpi=1000; cpi=200e-12; cmu=2e-12; gm=0.1; ro=18000; rl=1000; re=50; % Set parameters P = [rb rpi re cpi gm rl rx cmu ro]; % Set number of adustable parameters nparm=6; N=nparm; % Set number of frequency points, inputs, desired outputs, and weight. nf=21; freq = [1 2 4 6 8 10 20 40 60 80 100 200 400 600 800 1000 2000 4000 6000 8000 10000]; freq = freq.*100; V1=1; desout=[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 .707]; desout=desout.*40; weight=1; PR=1; % a logical variable that when set to 1 will cause the value % of F to be printed each time the performance function is evaluated % Set an estimate of the minimum value of F EST=10e-9; % Set an estimate of how many significant digits of accuracy are required % for convergence, a good value is 1e-9 EPS=10e-5; % Set the maximum number of iterations LIMIT=100; % Call function CNJGRD % Function CNJGRD is an implementation of the conjugate gradient minimization % algorithm. % [P,F,G,S,IER]=CNJGRD(N,P,EST,EPS,LIMIT,PR,nf,freq,desout); [P,F,G,S,IER]=CNJGRD(N,P,EST,EPS,LIMIT,PR,nf,freq,desout); % Print Final Network Description and Response fprintf('\nFinal Network Values: rb = %.4f rpi = %.4f re = %.4f \n', P(1), P(2), P(3)) fprintf(' cpi = %.15f gm = %.4f rl = %.4f \n', P(4), P(5), P(6)) fprintf(' rx = %.0f cmu = %.12f ro = %.0f \n', P(7), P(8), P(9)) [F,G]=SOLVE_NET(N,P,-1,nf,freq,desout); final_error=F diary --------------------------- CNJGRD.m --------------------------- function [P,F,G,S,IER]=CNJGRD(N,P,EST,EPS,LIMIT,PR,nf,freq,desout) % This subroutine minimizes a function of N parameters using % the conjugate gradient method. % Initialize variables icount=0; IER=0; n1=N+1; % Calculate initial error and gradient [F,G]=SOLVE_NET(N,P,icount,nf,freq,desout); if (PR == 1) fprintf('\ninitial error = %.12f \n', F) end % Begin iteration loop for i=1:n1 icount=icount+1; oldf=F; % Compute square of gradient and check for zero value gg=0; for j=1:N gg=gg+G(j)*G(j); end if (gg == 0) IER=1; return end % Use Steepest descent at beginning of every N+1 iterations if (i > 1) beta=gg/oldgg; for j=1:N S(j)=-1*G(j)+beta*S(j); end else for j=1:N S(j)=-1*G(j); end end % Begin linear search % The method of steepest descent reduces the search for the % minimum of the performance function e(p) to a series of % search for the minimum of e along a given direction s^j. % This search is one dimensional since only the step size % a^j for which e(p^j + a^js^j) is minimized must be found. % ya is the function of lower bound alfa % yb is the function of upper bound alfa yb=F; % F is error vb=0; ss=0; % Save old parameters values and compute directional derivates for j=1:N S(j+N)=P(j); vb=vb+G(j)*S(j); ss=ss+S(j)*S(j); end % Check that direction of search will decrease error if (vb >= 0) %if vb is positive, search direction increases error oldgg = gg; if (icount > LIMIT) IER=2 return end else %vb is negative, estimate initial step size alfa=2*(EST-F)/vb; ambda=1/sqrt(ss); if (alfa <= 0) alfa=0; else if (((alfa)^2)*ss >= 1) alfa=0; else ambda=alfa; alfa=0; end end % Double step size until minimum is bounded temp2=1; check=0; %------------------------------------------------------------- %compute gradient of logarithmic param changes in the parameters until the %gradient reaches a min while (temp2 == 1) temp2=0; ya=yb; % va is df/da at lower bound alfa % vb is df/da at upper bound alfa va=vb; % Step parameters along search direction assuming log scaling % Call function [P]=PRMCHG_AC(P,ambda,S,N) [P]=PRMCHG_AC(P,ambda,S,N); % Calculate function and gradient for new parameters [F,G]=SOLVE_NET(N,P,icount,nf,freq,desout); if (PR == 1) fprintf('EXTRAPOLATION -- ITERATION NO. %2.0f ERROR = %.12f\n', icount, F); end yb=F; % Calculate new directional derivatives -- if positive minimum % has been passed -- if zero minimum has been found vb=0; for j=1:N vb=vb+G(j)*S(j); end if (vb < 0) if (yb > ya) %if the the new error > old error, quit t=0; temp2=0; else % Double step size and repeat above procedure ambda=ambda+alfa; alfa=ambda; % Terminate if step size is too large if (ss*ambda < 1e10) % 1e8 temp2=1; else temp2=0; IER=3; return end end elseif (vb == 0) temp2=0; else t=0; temp2=0; end end % while %-------------------------------------------- temp=1; while (temp == 1) if (ambda == 0 | vb == 0) % Compute length of change vector temp=0; t=0; for j=1:N k=j+N; S(k)=P(j)-S(k); t=t+abs(S(k)); end % Check to see if length of change vector is insignificant % if at least N+1 iterations have been taken if (icount < n1) if (F > (oldf+EPS)) IER=3; return; else oldgg=gg; if (icount > LIMIT) IER=2; return; end; temp=0; break; % break while loop then go back to outer for loop end else if (t < EPS) if ((gg-EPS < 0) | (gg-EPS == 0)) IER=1; return; else IER=3; return; end else % Terminate if F has not decreased during last iteration if (F > (oldf+EPS)) IER=3; return; else oldgg=gg; if (icount > LIMIT) IER=2; return; end temp=0; break; end end end else % else if(ambda ~= 0) % Perform cubic interpolation z=3*((ya-yb)/ambda)+va+vb; w=sqrt((z^2)-va*vb); alfa=ambda*(vb+w-z)/(vb+2*w-va); % Call function PRMCHG_AC P=PRMCHG_AC(P,t-alfa,S,N); % Terminate if F is less than values at each end otherwise % reduce interval [F,G]=SOLVE_NET(N,P,icount,nf,freq,desout); if (PR == 1) fprintf('Interpolation -- Iteration No. %2.0f Error = %.12f\n', icount, F) end SCALE=1e12; % check up to 12 decimal digits if ((round(F*SCALE) <= round(ya*SCALE)) & (round(F*SCALE) <= round(yb*SCALE))) % Compute length of change vector temp=0; t=0; for j=1:N k=j+N; S(k)=P(j)-S(k); t=t+abs(S(k)); end % Check to see if length of change vector is insignificant % if at least N+1 iterations have been taken if (icount < n1) % Terminate if F has not decreased during last iteration if (F > (oldf+EPS)) IER=3 return; else oldgg=gg; if (icount > LIMIT) IER=2 return; end; temp=0; break; % break while loop then go back to outer for loop end else if (t < EPS) if ((gg-EPS) < 0 | (gg-EPS) > 0) IER=1 return; else IER=3 return; end else % Terminate if F has not decreased during last iteration if (F > (oldf+EPS)) IER=3 return; else oldgg=gg; if (icount > LIMIT) IER=2 return; end; temp=0; break; % break while loop then go back to outer for loop end % if (F > (oldf+EPS)) end % if (t < EPS) end % if (icount < n1) else vc=0; for j=1:N vc=vc+G(j)*S(j); end % Check that direction of search will decrease error if (vc >= 0) yb=F; vb=vc; ambda=ambda-alfa; t=0; temp=1; else ya=F; va=vc; ambda=alfa; t=ambda; temp=1; end % if (vc >= 0) end % if (F <= ya & F <= yb) end % if (ambda = 0 | vb = 0) end % while (temp = 1) end % if (vb >= 0) end % for loop --------------------------- PRMCHG_AC.m --------------------------- function [P]=PRMCHG_AC(P,ambda,S,N); % The function PRMCHG which is called by CNJGRD_DC to alter % the parameters linerarly along the search direction. % It uses log scale because the difference between the value % of the resistor and capacitor is too big. for j=1:N P(j)=P(j)*exp(ambda*S(j)); end return; SOLVE_NET.m function [F,G]=SOLVE_NET(N,P,icount,nf,freq,desout) % Initialize element values RB=P(1); RPI=P(2); RE=P(3); CPI=P(4); GM=P(5); RL=P(6); RX=P(7); CMU=P(8); RO=P(9); V1=1; weight=1; % Initialize Gradient and Error for j=1:N G(j)=0; end F=0; % Calculate Performance function and gradient % Run over each frequency point for k=1:nf ang=2*pi*freq(k); % angular frequency % Solve original network Y = [1/RB -1/RB 0 0 0 1 -1/RB (1/RB)+(1/RX) -1/RX 0 0 0 0 -1/RX (1/RX)+(1/RPI)+(i*ang*(CMU+CPI)) (-i*ang*CMU) -(1/RPI)-(i*ang*CPI) 0 0 0 GM-(i*ang*CMU) (i*ang*CMU)+(1/RO)+(1/RL) -(1/RO)-GM 0 0 0 -(1/RPI)-(i*ang*CPI)-GM -1/RO GM+(1/RPI)+(1/RE)+(1/RO)+(i*ang*CPI) 0 1 0 0 0 0 0]; CIN = [0 0 0 0 0 1]'; % Using LU factorization [L,U]=lu(Y); b=inv(L)*CIN; V=inv(U)*b; %fprintf('Updated Network values: freq = %.1f RB=%.8f RPI=%.8f RE=%.8f \n', freq(k),RB,RPI,RE) %fprintf(' CPI=%.15f GM=%.8f RL=%.8f \n', CPI,GM,RL) %fprintf(' RX=%.0f CMU=%.12f RO=%.0f \n', RX,CMU,RO) %fprintf('Updated Vout = %.4f Desired Vout = %.4f\n', abs(V(4)),desout(k)) % Store output for plotting py(k)=abs(V(4)); % Calculate branch voltages and currents needed for gradient computation IRB=(V(2)-V(1))/RB; IRPI=(V(3)-V(5))/RPI; IRE=V(5)/RE; VCPI=V(3)-V(5); VGM=V(3)-V(5); IRL=V(4)/RL; ERR=abs(V(4))-desout(k); T=ERR*conj(V(4))/abs(V(4)); % Set up Adjoint Network Excitation CIN=[0 0 0 -1 0 0]'; % Compute Adjoint Network Voltages and Currents V=inv(L')*(inv(U')*CIN); G(1)=G(1)-P(1)*real(T*IRB*(V(2)-V(1))/RB); G(2)=G(2)-P(2)*real(T*IRPI*(V(3)-V(5))/RPI); G(3)=G(3)-P(3)*real(T*IRE*V(5)/RE); G(4)=G(4)+P(4)*real(T*i*ang*VCPI*(V(3)-V(5))); G(5)=G(5)+P(5)*real(T*VGM*(V(4)-V(5))); G(6)=G(6)-P(6)*real(T*IRL*V(4)/RL); F=F+0.5*((ERR)^2); end % Plot response of initial and final networks if (icount == -1) fprintf('\nFinal network response\n') fprintf('freq Actual desired\n') for k=1:(nf) fprintf('%.0f %.2f %.2f\n', freq(k),py(k),desout(k)) end figure; semilogx(freq,py) title('Actual response in dB vs. frequency in Hz'); ylabel('gain (dB)'); xlabel('frequency (f)'); grid; end