From 2fb947d5c425b2dad61b6400ccfcd3102fce2141 Mon Sep 17 00:00:00 2001 From: Charon77 Date: Mon, 4 May 2015 00:48:23 +0700 Subject: [PATCH 1/5] Initial FindCircles Implementation Barely "user-friendly", but sort of work for now. --- .gitignore | 2 + examples/FindCircles/FindCircles.pde | 54 + examples/FindCircles/sample.jpg | Bin 0 -> 15699 bytes src/gab/opencv/Flow.java | 2 +- src/gab/opencv/OpenCV.java | 97 +- src/gab/opencv/package.bluej | 95 + src/gab/package.bluej | 15 + src/org/opencv/calib3d/Calib3d.java | 3010 ++++++ src/org/opencv/calib3d/StereoBM.java | 261 + src/org/opencv/calib3d/StereoSGBM.java | 590 + src/org/opencv/calib3d/package.bluej | 0 src/org/opencv/contrib/Contrib.java | 144 + src/org/opencv/contrib/FaceRecognizer.java | 406 + src/org/opencv/contrib/StereoVar.java | 601 + src/org/opencv/contrib/package.bluej | 0 src/org/opencv/core/Algorithm.java | 361 + src/org/opencv/core/Core.java | 8198 ++++++++++++++ src/org/opencv/core/CvException.java | 15 + src/org/opencv/core/CvType.java | 136 + src/org/opencv/core/Mat.java | 2843 +++++ src/org/opencv/core/MatOfByte.java | 79 + src/org/opencv/core/MatOfDMatch.java | 83 + src/org/opencv/core/MatOfDouble.java | 79 + src/org/opencv/core/MatOfFloat.java | 79 + src/org/opencv/core/MatOfFloat4.java | 79 + src/org/opencv/core/MatOfFloat6.java | 79 + src/org/opencv/core/MatOfInt.java | 80 + src/org/opencv/core/MatOfInt4.java | 80 + src/org/opencv/core/MatOfKeyPoint.java | 86 + src/org/opencv/core/MatOfPoint.java | 78 + src/org/opencv/core/MatOfPoint2f.java | 78 + src/org/opencv/core/MatOfPoint3.java | 79 + src/org/opencv/core/MatOfPoint3f.java | 79 + src/org/opencv/core/MatOfRect.java | 81 + src/org/opencv/core/Point.java | 120 + src/org/opencv/core/Point3.java | 98 + src/org/opencv/core/Range.java | 129 + src/org/opencv/core/Rect.java | 164 + src/org/opencv/core/RotatedRect.java | 112 + src/org/opencv/core/Scalar.java | 106 + src/org/opencv/core/Size.java | 87 + src/org/opencv/core/TermCriteria.java | 100 + src/org/opencv/core/package.bluej | 0 src/org/opencv/features2d/DMatch.java | 57 + .../features2d/DescriptorExtractor.java | 278 + .../opencv/features2d/DescriptorMatcher.java | 742 ++ .../opencv/features2d/FeatureDetector.java | 303 + src/org/opencv/features2d/Features2d.java | 402 + .../features2d/GenericDescriptorMatcher.java | 861 ++ src/org/opencv/features2d/KeyPoint.java | 161 + src/org/opencv/features2d/package.bluej | 0 src/org/opencv/highgui/Highgui.java | 584 + src/org/opencv/highgui/VideoCapture.java | 411 + src/org/opencv/highgui/package.bluej | 0 src/org/opencv/imgproc/Imgproc.java | 9630 +++++++++++++++++ src/org/opencv/imgproc/Moments.java | 810 ++ src/org/opencv/imgproc/Subdiv2D.java | 362 + src/org/opencv/imgproc/package.bluej | 0 src/org/opencv/ml/CvANN_MLP.java | 297 + src/org/opencv/ml/CvANN_MLP_TrainParams.java | 390 + src/org/opencv/ml/CvBoost.java | 278 + src/org/opencv/ml/CvBoostParams.java | 230 + src/org/opencv/ml/CvDTree.java | 183 + src/org/opencv/ml/CvDTreeParams.java | 326 + src/org/opencv/ml/CvERTrees.java | 75 + src/org/opencv/ml/CvGBTrees.java | 296 + src/org/opencv/ml/CvGBTreesParams.java | 189 + src/org/opencv/ml/CvKNearest.java | 224 + .../opencv/ml/CvNormalBayesClassifier.java | 243 + src/org/opencv/ml/CvParamGrid.java | 192 + src/org/opencv/ml/CvRTParams.java | 147 + src/org/opencv/ml/CvRTrees.java | 256 + src/org/opencv/ml/CvSVM.java | 442 + src/org/opencv/ml/CvSVMParams.java | 360 + src/org/opencv/ml/CvStatModel.java | 176 + src/org/opencv/ml/EM.java | 356 + src/org/opencv/ml/Ml.java | 13 + src/org/opencv/ml/package.bluej | 0 .../opencv/objdetect/CascadeClassifier.java | 258 + src/org/opencv/objdetect/HOGDescriptor.java | 538 + src/org/opencv/objdetect/Objdetect.java | 105 + src/org/opencv/objdetect/package.bluej | 0 src/org/opencv/package.bluej | 0 src/org/opencv/photo/Photo.java | 347 + src/org/opencv/photo/package.bluej | 0 src/org/opencv/utils/Converters.java | 724 ++ src/org/opencv/utils/package.bluej | 0 .../opencv/video/BackgroundSubtractor.java | 93 + .../opencv/video/BackgroundSubtractorMOG.java | 106 + src/org/opencv/video/KalmanFilter.java | 176 + src/org/opencv/video/Video.java | 731 ++ src/org/opencv/video/package.bluej | 0 src/org/package.bluej | 0 src/package.bluej | 22 + 94 files changed, 41208 insertions(+), 31 deletions(-) create mode 100644 examples/FindCircles/FindCircles.pde create mode 100644 examples/FindCircles/sample.jpg create mode 100644 src/gab/opencv/package.bluej create mode 100644 src/gab/package.bluej create mode 100644 src/org/opencv/calib3d/Calib3d.java create mode 100644 src/org/opencv/calib3d/StereoBM.java create mode 100644 src/org/opencv/calib3d/StereoSGBM.java create mode 100644 src/org/opencv/calib3d/package.bluej create mode 100644 src/org/opencv/contrib/Contrib.java create mode 100644 src/org/opencv/contrib/FaceRecognizer.java create mode 100644 src/org/opencv/contrib/StereoVar.java create mode 100644 src/org/opencv/contrib/package.bluej create mode 100644 src/org/opencv/core/Algorithm.java create mode 100644 src/org/opencv/core/Core.java create mode 100644 src/org/opencv/core/CvException.java create mode 100644 src/org/opencv/core/CvType.java create mode 100644 src/org/opencv/core/Mat.java create mode 100644 src/org/opencv/core/MatOfByte.java create mode 100644 src/org/opencv/core/MatOfDMatch.java create mode 100644 src/org/opencv/core/MatOfDouble.java create mode 100644 src/org/opencv/core/MatOfFloat.java create mode 100644 src/org/opencv/core/MatOfFloat4.java create mode 100644 src/org/opencv/core/MatOfFloat6.java create mode 100644 src/org/opencv/core/MatOfInt.java create mode 100644 src/org/opencv/core/MatOfInt4.java create mode 100644 src/org/opencv/core/MatOfKeyPoint.java create mode 100644 src/org/opencv/core/MatOfPoint.java create mode 100644 src/org/opencv/core/MatOfPoint2f.java create mode 100644 src/org/opencv/core/MatOfPoint3.java create mode 100644 src/org/opencv/core/MatOfPoint3f.java create mode 100644 src/org/opencv/core/MatOfRect.java create mode 100644 src/org/opencv/core/Point.java create mode 100644 src/org/opencv/core/Point3.java create mode 100644 src/org/opencv/core/Range.java create mode 100644 src/org/opencv/core/Rect.java create mode 100644 src/org/opencv/core/RotatedRect.java create mode 100644 src/org/opencv/core/Scalar.java create mode 100644 src/org/opencv/core/Size.java create mode 100644 src/org/opencv/core/TermCriteria.java create mode 100644 src/org/opencv/core/package.bluej create mode 100644 src/org/opencv/features2d/DMatch.java create mode 100644 src/org/opencv/features2d/DescriptorExtractor.java create mode 100644 src/org/opencv/features2d/DescriptorMatcher.java create mode 100644 src/org/opencv/features2d/FeatureDetector.java create mode 100644 src/org/opencv/features2d/Features2d.java create mode 100644 src/org/opencv/features2d/GenericDescriptorMatcher.java create mode 100644 src/org/opencv/features2d/KeyPoint.java create mode 100644 src/org/opencv/features2d/package.bluej create mode 100644 src/org/opencv/highgui/Highgui.java create mode 100644 src/org/opencv/highgui/VideoCapture.java create mode 100644 src/org/opencv/highgui/package.bluej create mode 100644 src/org/opencv/imgproc/Imgproc.java create mode 100644 src/org/opencv/imgproc/Moments.java create mode 100644 src/org/opencv/imgproc/Subdiv2D.java create mode 100644 src/org/opencv/imgproc/package.bluej create mode 100644 src/org/opencv/ml/CvANN_MLP.java create mode 100644 src/org/opencv/ml/CvANN_MLP_TrainParams.java create mode 100644 src/org/opencv/ml/CvBoost.java create mode 100644 src/org/opencv/ml/CvBoostParams.java create mode 100644 src/org/opencv/ml/CvDTree.java create mode 100644 src/org/opencv/ml/CvDTreeParams.java create mode 100644 src/org/opencv/ml/CvERTrees.java create mode 100644 src/org/opencv/ml/CvGBTrees.java create mode 100644 src/org/opencv/ml/CvGBTreesParams.java create mode 100644 src/org/opencv/ml/CvKNearest.java create mode 100644 src/org/opencv/ml/CvNormalBayesClassifier.java create mode 100644 src/org/opencv/ml/CvParamGrid.java create mode 100644 src/org/opencv/ml/CvRTParams.java create mode 100644 src/org/opencv/ml/CvRTrees.java create mode 100644 src/org/opencv/ml/CvSVM.java create mode 100644 src/org/opencv/ml/CvSVMParams.java create mode 100644 src/org/opencv/ml/CvStatModel.java create mode 100644 src/org/opencv/ml/EM.java create mode 100644 src/org/opencv/ml/Ml.java create mode 100644 src/org/opencv/ml/package.bluej create mode 100644 src/org/opencv/objdetect/CascadeClassifier.java create mode 100644 src/org/opencv/objdetect/HOGDescriptor.java create mode 100644 src/org/opencv/objdetect/Objdetect.java create mode 100644 src/org/opencv/objdetect/package.bluej create mode 100644 src/org/opencv/package.bluej create mode 100644 src/org/opencv/photo/Photo.java create mode 100644 src/org/opencv/photo/package.bluej create mode 100644 src/org/opencv/utils/Converters.java create mode 100644 src/org/opencv/utils/package.bluej create mode 100644 src/org/opencv/video/BackgroundSubtractor.java create mode 100644 src/org/opencv/video/BackgroundSubtractorMOG.java create mode 100644 src/org/opencv/video/KalmanFilter.java create mode 100644 src/org/opencv/video/Video.java create mode 100644 src/org/opencv/video/package.bluej create mode 100644 src/org/package.bluej create mode 100644 src/package.bluej diff --git a/.gitignore b/.gitignore index 3bcd892..ff5859f 100755 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,5 @@ lib/ .DS_Store *.DS_Store /bin +*.ctxt +*.class diff --git a/examples/FindCircles/FindCircles.pde b/examples/FindCircles/FindCircles.pde new file mode 100644 index 0000000..5f00b49 --- /dev/null +++ b/examples/FindCircles/FindCircles.pde @@ -0,0 +1,54 @@ +import gab.opencv.*; +//import processing.video.*; +import java.awt.*; +import org.opencv.core.*; + +//Capture video; +OpenCV opencv; + +void setup() { + size(300, 400); + + //video = new Capture(this, 640, 480); + //opencv = new OpenCV(this, 640, 480); + //video.start(); + + opencv = new OpenCV(this, 300, 400); +} + +void draw() { + //opencv.loadImage(video); + opencv.loadImage("sample.jpg"); + float foo[]={0,0,0}; + Mat m;// = new Mat(0,0,0); + m=opencv.HoughCircles(); + image(opencv.getSnapshot(),0,0); + for(int x=0;x<20;x++) + { + for(int y=0;y<20;y++) + { + m.get(x,y,foo); + for (int i=0;i<3;i++) + { + if(foo[i]!=0) + { + //System.out.print("X: " + x +" Y: " + y + " foo["+i+"]: "+ foo[i] + "\n"); + ellipseMode(RADIUS); + ellipse(foo[0],foo[1],foo[2],foo[2]); + } + } + } + } + noFill(); + stroke(0, 255, 0); + strokeWeight(3); + + + +} + +/* +void captureEvent(Capture c) { + c.read(); +} +*/ \ No newline at end of file diff --git a/examples/FindCircles/sample.jpg b/examples/FindCircles/sample.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6be518d982deb8cf9e340216f73b1434ed3f809a GIT binary patch literal 15699 zcmajG1ymf})-BouP0+@j1PKZ5)<__@OK=PB4vl+ohoC`1@ZjzQch{zIYl1ZHdil=z z|8xI0-n;khTBF7uyT`7vYgg4;YtA+M8S=aiz>$@dkpv*T0su^34#4vv04VNeV&e%w z03ZPX0QQ%mBLJ4Dse`c@z_jya1mSrJ@C|^5@=tp?qhX+-qG4d6qoJW=;b3B7VPfI{ zF|b}51`r1ip8y{ZhlGTTjD&=amX?<8<9`~$Yc#aiZ(gInd4rCH@p9n)GX)nP3kw?y z3!fAh=cVD25)l&+5D*iQlarE?lhe>p)6o29{>SG|01yo!9)SrFfg11%h=2%0c+7UKO3HyF$&-RJCAHs8XPbzrmbN zeP4$8+Qq057r$Hp|1wYTO6=rA?wV(T;=n^&&*yh7BQh)omrJG@JL5a_M=qqXMa{!{ zKkbAAd3?&%{nZ>8{R<5JCVG7s`Idc#I~zWu2s4&N=Q##ZBm0jsxhIt7ALvWGeZM5L z(&YVKN?73b>zzhFbbfyP9cDgosh3v@kF-Cod-Bzhyk?kH=J`cb;+|)VZFTkIxb|o7 zj8UB>EhQ{cxpWqzW0nn*K{gpl%EPJHiUnY6P{^aT-810GBWgtSvYBCxDTupNB4fCM zNt=}ctB;`!b>SeneAXz~s%t3?tacgQQXmmp+-2&i>)_EpxUb4@Llz;3>Z#~Jpx*L( z(5mgdT-x$tZ5#O3gktGn9;VvCEq%!HtJ8ZFd856~{=v8JHDYdPIo0>Z{#@8TJWAG)32&00<4#dU*yoQ}SF#o=bfROBq_6{wNOZB`qVIq|kOSkkYv9z5J65^bM zS7pU)wP!*wR$PTeLn(v3E`G`0ofNUS1V5U%c=-R1Dc3B@$@2)?#vFu)0u40aE}06X z{a7qc5L`F)^q^we>Q02VbUc-P5CyNvsisu66~mIQMuxCv95X2+)E%ql6aT0|%AZ*x z_TBn)RwuZQ@dsUUdC;eXOvb(+6Aengh4*ZQ#=+Nj6UvPlR+dg0VK(o=?P5P+JziSu zu1c}&NoVVYuE6YTy7Eyn6nPFCrdlI4+>P3fbBO!<=c{8dO+H~-;sY=P07SF;yu2D$ z3D=DrXp2pFZP@`R(>`xN=C7BdKrSVjGIp6{d@`)pWcTW>>1FxJviy{8aO48K|)8o;?a%VGTZSD%X)$AqLpLdBxk_6W_a zlV_S@xxY*#?rKW+ce?DhBwkG(Z;>shQ}Bl)dx+53k`OqdPSc%JbYnXg`ny_t>CkM; zQ6R~*KWO?Hu=rgg2Ij7W<$%`!2UlyFSIwul((C9&f~-QnGW=bW{o)#%(96^?`Pp;! z+ICgw2P(?my7_Mh)n7kOt*@!9Yx}(TB8;Qh3u_ismgR~#>7Q~fKVsxBbcEY;3s)oB zxhMRwbe<`-s<->p18fSe*Rn(Xt?tIG_T3iiM6zG4I!yGNxQ1*l5cl2x#lCXHfNST< zyWg~;hY0OFGLFy=)UITBEUq65q_m185!-Sf$xpVda+shxC9H%)eOTWz#2`k3K}vm2 zBG$*Wek_|(c237U18UcnO4dLshpX{n`)7z>pr^Nk*LXW^MeXCdI@wev_u3npWQ%gr}3u=L>MYc_e{53X* zH2BLAVs2i5;rk5Th#bLz3~fw34>yEaJ{EK{w-5@@Xf!o-rWiG?SarV{Y#;&efVqhH zx;e}C={P}?dIJ!Rq41zOAZV9CkJ^8#(PaO&!scV(;18QXi+9&}Ez$QJ6sz=JJ~kRP z0?wHr+CmG>ExN>2!IdzRcQHa+1}F>SGo$NXp6)Zh@*-RMOpS-P*8hYn%R`%!D;WpO z;HP{zg*a%r%QZTdesMpB_GW5c`7sPSZo45^R(@ip`c}pyST0+2va;fE4LHh?9=1{2 zwMxg1j6Hh>G$?ucqVkuEOH`QtKb-FWbin`Wr{$y-EI4hPV9@%Nym%L>8{2mkXC#Pi za@OJvI|?C#ul);LJ(q|=wO02j%vF**=+BI`Qk|I^zx=5TSeG=oZE z>6CFHGE1Em7fD$}UaK0lrAI!w(G(d+dRI4E6&``}e>OaJ-}G!uhnVgt4+`Y5+HJR< zGB1BmSy6}N|Yc$WB9a88SfnjvcJ$(;bU2k_U67xhyV7<%roO)L!$HZScVGf?vxvQ-L7)=a=*=L!f>w* zq395F2nOSqv@Yi-bP3E( z66$#NenlgYeIow}K=k6V*GnKWi~m1w%gc`8(VUa^%a!vTG%?T*$c_}^@C2;(#CSM| zxp<_+5&2W9k=Z0fW~Q=p$RZl3ZfNp$&s0_1u1CUN+Z9>*aI#Mg_cV_=?l_(XRqNpH z>*=pbNg4{Km(jYW*t<6(4muUVu-D*j%+`V{G#^ftm$x1phgz{HYRRY=waT1z7m}XD z$GjzllALRyDUSCe$3# zeC~0%cUnHOS9fQO=rWb>FW)(^BYJ`|rslV*7<@lkG1{3A>-+CJD{-_QeMwO(o=un) zD^zj!8{Q}PT-wmjDzpR=b$^!7vqU*lwaihG^xZ<~e|6N}Yt3L$c-gm{)5xAfYLx#y`QM!gN21K z#*ONPzCVk_AMrmzxXLjrbxl-Tk`;)!t9{@n3obscB0!W+1}Wh6`v3<9{B&C%(_l6y zDrKTuD4O4LJ(bLHXvJ}Z$|gXqc=Bz>^TcsLzG+T=a?e=@_9vPXCTI{tKmsa*anxmu3=+Dnx;^)q?Z$Hk z#^v%*R(2maB^QoJld=9L@C1l$+cB?-dtB`un!DRkU`oKskuMKl7RLCz+3uYfH3Z2&%vB0Ij>7m{|tzD zTw@h$t{c#XJOlPNF`e)^C}$=%!E+x~ukjLhW;)!_#I@V{jbZz{7fd9(ZN3tHom{re zWE4=c@J$bUiURqN6lR-<3Gx6ktrNUF>T7HHT6V(ir!4a$?>)&~KJ~Vrw>kp*zRACB zboxWtQ0jlhSV92guRMDr~egLR^l|H$vy zA-b6Qm2mO#>P=BoU+kPmCk!yzM{0!ipXgliy;|Uk#1UP6(`~Rf+W)=P&rnLkhIeiu z%_4FA(-zJu*uKcp5k`^RF5xB+R-DOZW}8mNZ8R<1tZ?UA_sU5_Z*Jke+N57e_RQS~ zcFrr}wUfpz(-clh&2t1OhVaS5xX7D_NX4RTX}`~DF%5IqW*Iwp71UY5Z3Q3yfh_e1QZx)5%W9I9fJQpE)LI|a z z3&|pLbBV3nPbhW!JT9T`JSB{(tR|BWLQhc^A zg{5la2BL_^$Dg*Z!nAAxL|S$h9SU^0ejV^TKYZJAdCeTt{$)+EJh+5b5a#o{ET?T{ z?h71@0K~v5a<5nLoSZhj&E<^sAFAe~AJu7Wfq@KZcSk~pKaKor+o%f^=`W&!c zI6&^iUfny{&!tjk2>|c`h-eclGRd+gK&Q(B`1T>xHzw_ zGPTk2rf3kIMs!!E3eYt9pTwf7yRP-^v`~s*4yRwTL8T+lPW6!M#zW5t)r$T376V)$ zrQQpReu_4j5O)JeRKg|3a9H6$5f+zyCM(ok7E;Fn>RkCDFy1Gwc_(}X{2bs zA2@2g7l|-$)CgbILA1->G?(fHe>jM@0q|j+t_wGN#*b~Dx)C)pnRHgVf>iw^zx;OC zZykZB$;&S=gOJX^B!+0E{vIE19vMRirZ%y|e|Tni2`=Pgf1v_s%93Y{aC#F49?kQw zO~X@-$Bm4jViy<_R_T8FP98ih(|tvYMV#%pA%qGP@oj<&3k~CbXA<9O7mfkl4bu57 zY&s(SwYBj1P7*Dp!pVAQtMwd@-~fG(z(Kcy<`TknsY0eGE=wl%r@WeVOEWvED@`u=YA*7 zJ2t=R>e3qo5+RUfD%EwnkVm(YqHnkYmcbgzA+tr_6-7^B#W`{Ao85w=98M!M_1@;` zt}DD5sBX%)Z{Ri=S7B9Q+?|9?q(NAlZ*b7w!cmQMYz@2?eRpUYbg+;paZ8BktNPD+ z4)6F_76EtFm2bcj3Tpf#$6`-Ugt;}}zgnkqbspKV%F{#Qer|z_Cif))B)yWIxz!ibmqC(1GvlaF<%8BUpf>fXl*LaFsq}Z z3#H9+NIBg};)+BL{i%_L4z|cOeQ`S^_rkaz%}iM7GchC1a2EZJ_i5e<%1g?!grO)x zZ0`1*nqKkj77c0*p44=aLTFZ=&-yH+fTV;dE%Kztd2BM#!F6mbstOgD|dzhf)h~LkxUZOuDg<^v5(8qE31CU zSAiBn3k${`TU)FSn1wPg8H2a$&BIh*J_ArCd(%4pk{2uZpoIVOooKOWySG4?rS#p zy!4J%cemIO#NtC54}FOIyoj<1^BH6aG-4AvR}XB&D zO%c(}&|d|vJ=R?K864PO%5LjdHl}A!uWQYt zY;Cw9vSEh#!Omh+ZJz-;)SQ_>IX8^2@jN>MeEgx!O+MCOK{Qk}w)gXN@kVkyk#6=k z>^P&))O)~rD6P%0CcHy!!j0G3^L0td(um>3@9dV{Y(~q`+BWuJPvOAnVwT!ej>ILS zWvR;;^oe}rN`(ShYn|T)>Y@R6L!hBG&b4BMDE_`!kJot;Q9d)WHcXYC_liz;Dv2W` zWzIrjhs`~c8gcZ6=C@r(Ua%=b`vqRN*RQtL26_*%DG{rvsFDqrmV4ZAGEiOE4i8X& z#v+TxS1kIv)BWH%0a;j6e;E1SY>&@dKEkF~Xt}7CZ+~MPHBGnJHmBQWswl2pmA%)# z{-W_AI1_r&1gKHCElC+8wS?1qCq&F)v^8C`g<&;>)sDQ?VAf#kWzkdoTsJ~kN^DHT zLjMkG`tb+-u)gZhmdi8XSMBuK2m!+qV}=8T=&yELB;~XvLmea>v=`W7pp@M2vpi3H z>*T2`D9JPY6*QQ=%m0`(I!!@VVYx2Z{h`B(tX6-dYU&t-PrTcHJzRa+32uqJ6c@U_fFMx|KQS!C8XdaPiy-K z`Vy(93443BPdx{hcf1%Apk|SG{D0wZ+)@x|$##U#KMTA;Cbf9g{?=pm-ovD}!IYWH zR3X=CvDG^+%&W}e0p@$WMv?SEfPdvByFX)jSng-U?av^|q&cOWY3q)fn2ZxqFP_cH zdxa2idS_U{JDii@L`00;ruH0;JQ zqiNz2i-W36l~g8t>Ge!pcx>JZk9k3{QQwT&IDf{Ak^p(iyiH9EdPWxjI#M{`kRkPT za}MnmYH;`2)U}eOmt{Gj1~D#uam;9LQ@FUq&nT1mog})k(t(Tb z=ELps>z}k=P}Dy?1o&ot9%@SGf%F?R zx(>jSWV2`4Q-XU?T`emS-+X6!ny(X2Q=VdR;_J4=es8jq=8JQiL& zPid*j$~?WtGayInRo7|70A)wIN7Oj8%Od=>3ni;Ys^LbyKV7y}?zoHS@}iVge}Lxq zxgI<8{$ue-t)hse&Z`lOa_m@199VM&flbCnRS+-1P>x!G7r)}W1E1->F=AhF2NV}^ z@$`-Vu+y&#XPWW7cdx;ktYk}ggD0WM=u`<56Ey{`oLMRnh!NeIdncx$u+!D{-f{X) zo;y8Fiq8geez6vwo?pty_}Rf*`Fyu}vRj$oMvM)n)X=|Wv#PI}sOMa`ZckY`LFd9m z2(q>g*ej38s+OfGB$@j0o zr#Q99xEHFU__8b7#T?LDu1Dm&f}>yt&Ss5;6hg$|n+#Y&t4p!9lBsGejZL9zKfSDp zc1)kd@JguXisF?;prR)^;1sjR5t3)wPGgkkIJH&&$#>-M{XwO*m6e!R+RCT{!h))+ ztl}1y_(|NVpnOtOPl6!LbsQdAcxmzrs(!%?N*uQ)dK!82UdCC=JPY0X)0E-6tOQg$ z4Rm{@W9&bn5Aad044!)e@&MNq9}sE=M?15wpDo#{x^RizZIy%Pm%Cgbfoie1cm(BL z{{+1icgO>C_B{A(1n(O7ryC6=%F3%%Z-y>T@t8aPRd{rY*!R+V| z-Gr?L{sDzqUCfcai>n)6REp~eSFm$jl_B=|8QeAYa)H>{jEk0BE$ebajvsRt05K;8R^D#zW&-`=y$;A>xxLH{msHZl)g7#V} zpd1qw^^3g=QEq-{siI|VDy8dJ(nc~0te&e_w(tnfe{Y9aSn@NVQQyDEJy%|(1`}4* z;vlb7w8IdiHK5=@cHnehidB}L~=Thyc+!r-<=x_#F7#9&~j9d0!d{Q#|GV*eFo7+2#o zFgo$GG-NkBw61lMAW4|4h=6*0TyC$IsrYN|%WbItsXI|EcWiWt-+S!!u@O68x8K-0 zW4yPqw4=u9T-AsvilEQWrV~9xG&i>_RUCCP3c7%ER!7^uH1e{qn3m)K2obGBwnKc) zt*#&?%Q36pk**Ej3e)!j1zzrY>WUW}@5G3F)&q1)%D#rZcMI4)XK}1CLWze45A!p2 zE$5btsQg^0(=;3%GN09MBN}PtZ4P=8^-T6wK)3tFQn<3(v+^0v-Of1uGl)!!x}hPa zS`?j(T!l-e+ys?TXQ~rGu==LO_6gx?q>aFJ@qq@O6&_yl{lA{n%e%|da`*tR-nlv% z$T=Uoe|3=zYp5W~0Xh76@OxK&+B_wYEt6WnXn!S)z}yJAL87Mqt^N-qRGL4ro*UoC zqB_lw(mkMraL=oqp6H<)_V9v6N~OtO&w+*WztaD3y={5u@FP7x-5yZ z2c7}@{_OhLB~f!2tPUs*@cg8@1uK`f8my4Tmf&+z8?Ig6VzQF0fQQ+O*pvP2Z{bl% z6aTLEgq)6H+oZwAk*E+1RnU;Wy7v~(REi+oyWgG2!hll)GwSvwYpaLv zM_!Pc0K?&7c_V_Q93ENnrEs<-aX{WA1`gqnJs+~Mo{<`%V;p8|!aQlFDhVm!*mINWZoi-GMorn0Li~9v;TBe{-||-uq?lMK;%T1*`4f4|6L%o z&Ra+-w&)eUt4ek&ptBUv2}fn~>b>#>RxB2a?4>J8a~vX%GA~k$khM<*H^Z;ex5yYr zMqprJxfuSttP-a7UImJkwwTKd#9+h5pPD%*m83zNo8ul8hifa#OYJpX^q(QtD-V2> z1$VfsHoi(l+RXf4hoLI)ayjwSv-S?cbDT0YVYN$UQoo$CJWc8D2EVf-RYAGgZkgU~ zn&B;`Gp9EkN5nA9a!*evx$Y(izc1oe-JSMMr6KV!iOPw)HjHA^f!9-_LA6JaKK<;a zR90}7J&`pOw+vglf8kM+UOu^hKy2|i`^kf_;yX=fn@eC{ELYLgmPYm9kcnw^M)f*> zQ9e+)Lk{nK#K|DT5UI|_ESRB;YoZ}hPonEO9@j`jJIsE9*~eFIjDDy7CbM)Nz`o3l zCeoi>pZa7b;#IjWVsvNLP0cE+@?UH(@_FL!#s!LKraf(Axc@GP8q9dLlIwO`ddevl zSW7CID{McMUkFFg)h_NKfXrn zTq$FL(+t-A@MoYcs%)i)wv8ETQXhWq!rx9l#`+9ksub^NukT-+i0}CR*~X5nAX`%- z#srl!b~uOnJSXLxd%L60V{I=W>008QNpQ2NQC`}S&eYf@ zffvZ$%=!{d0~)Sj3rfKp4=eImp|k#Q{H{;YmvQn1WHURzWJTd$yqdHzn34>)%Fp7v zuLO1Vxhvz+TH}S|Z2F{l#oE-pV%Ihu-A^$$=>@Tx9@n3SOO^F4>r46gBpLipNTjRZ zX_a)YJ96}sAWaUitrH)xGl5_Bk^K@RZ{z5+Cb1V%j_R3MByoBFmDFls8H7!AQzDHi8l%;+xxpsDL z3Y(g2{E%T&h&JG_#I$4=Ay8j*L^bCs@DM^o6p^ZS;B5R~zm3DPP<`@Y~Ph@LO!wRILgyC|7)5KkW& zm^&JwoDzV~?`2VK@tEc5pIRii=GmH8WwD<&2F#_Ic?Mhx=Fr}{2hF3vV;5srAw#Rz z-b$yK$*?8rn_ZN;e@cxdLZa?(>Bsk9SS>pq=<=vHU>UH%z>fOk-FN@{N$BJBNd zh+C0H4f-G{VCXfm3+)Kfnt#{zH7QaXiL^$Fk8-(6^Bmn?FG*p-g`}+0n%OvN63gBsBp`hiXN|Qn zm>zgj-~Em9ZDny%5_?|efw|#^DR*M`Mpyk7*)?#3nZCd7fH6@d&pJxJ@IWuhk97M4 z8$$Y0fOQBb_^ovzH0y;0B`#X$pZ7P{)c91{;&@^IO2l+!8*u1%QiFDm_1g0@_zn~Hu|crZFGzURXk;g#^!YK*JnHUCIQUb zlp7!7ySt4Ak}$y9ltY>LHpvEh=gn?Y<9XW!B$}Fa(YXsiR$}G0X*9VmwYmuD7nAdm z{CKKuxj8?6y(CbxENwNmMl{=gNrwI#Wo#S3b47f>q+iN(dcw21l3hwRbD62QfY+N< zs(Oq6tJTOYn*H@5{Xp-9yiS&WYUuPHqE+2*?<2?aOAOwvst+$bSg0A3u%Ye<+n{w+ zGspGOWpL&Oy2n|dfic1f?_s%HeR=tJF?2qf#&32&@&4*xf6RU&FGrTT5iV^xe}c5) zyL}jxwVF{=+WCnDw=464z>I(d9Dn~+ws-#~9+5HfR{@k#7PPir;oh)0pAhdpTZQ$x zF@|xzMBlsg!eBYsrAab-Zpr2p$ZJLS(3>+VeXpQhec z|339P$>=84M4C8y9K>(Av~1h0H9pYj0y!?Faj$y@{Dc&hDXK}WZaD*YxpdQQzy@S4 zU$;i=Xvn=KgIy%7VK*RyuZ?dOh31puO)yB(CvB_rqK2 zSQAvXdnuc?9T{;QVS_)Zw?3e3!{o4)p47j|=Zzx5PxcOq5OE3D7scIw6);{pEO?(W zdzFGVc}6gTQ>0fXVNH}+HVp)0xClM$?EF+I9-bDw5XOiT%_UE>I`3cx|CRGssA}Ys zzrs7LzuvRUSQRt(_n%LH43<)m2l?YI{G>kObbc!EL$H5ctP)Se%1@z`gtZtz#qOM< z%bB6^G?!;m5K^eIP~4u-c%AzGVP(30Cj&Hy4Wv~len{kZ&>Uc|#P|LjCQ2aRBsFTO z6eTHj53Zt!CiU!ahp|MBY?;+?Dy#~`hOCUVa$tXf7mlNr$Vb75>uTy7!93^mUVr;& z>rG5!Wo>PmgQI>+ygn=+v4cacA!*psmgww1B7LwFG9rfx=BE=VWl@f}55q3LJOgZw z38K^NIQm@_W3|2U z!;^nkd2uwYA;dUuHUdY1T-dX+{t6OYCuRojkFEUes;Q{?C#0Tv7p?E5*zkE-;iF)? z>$noY#T%>weP7y^A0M1}CYbtl_yo?(?;TL*P`f_}>>puj=)CNupGYFK@C?2(*5pvG z>EPlufYtJmPu_%`?{7IWXu`l9ivx$hP3${`DA?2MhwbdL>VU-{cXV%}@9aR4PdXCL zlGunh7;Q7!xjF_PfDDP+c=t2W5@lL;yAFVY$V5Mfb@0*3T8{kaOtgc?j;5#6V^04` z|2&DHgefdK*d%NHFQuCXfDENGF|5`3YDC+mkwti)M^bps!2jEaUFHN4c*!jBT0Q|L zuBxpIoJ1)7$CZN%)hz58;2Oe#abX&|+^c=H2&H8IGhG6tm#E;q+T{T%;2+|Oom^3j zTm6RB;3#p|WgTMexC7fO_8m9tz$9qLT3~1SH3N}+*Ker{mbS5y@>=@p5zU>3F%|)4 zR6u%W>y_vfn))*wUVHdWPS^5M!71DZ3KPm5b$mBQ{WG`?M^m(O%*z}4gFgVK%CC<# z@VQ>TB8=C}B`l89e)GUndyGtRz(q5)6@16X=bRnV^V;_6)BWPr1lW6kss`LRqEjY` zOJ-|dm?WPr)3YLdP-Y2=>hVui)6Xn-@w)ZWYiIweGj`WiLd(rf|J00%1OIxw`1b@` zeyX(G^=3XT!5-`N;H}^A<>^ZlDJ1?+?EQB~Ldw6`B%+`3eS{tliT*w|vXGhb!?*&h zaJ{jl+>$b6WB(a&m{YX3Jddu|L$ed`g(^H@KjUC>c?&NnuD!9wcFNRf!t|G$A&t4i zocE{mn#gAWMRMcdtNHup42Hj%;F*G=a7{}Q{mBEvk@9r%LpjusLR!WcAy7GM^4?c` zW!WzzYx;&$A|EW~WO(vs+sX#Ap{%nZhIXKrz_V>^l9%F3=6!O0E*I3|Ym{n}g+4xw zJ|pNRt+`_QJ;B=vKe={rDiVNLS}qUP`_NZRo8+ zIi)!NvtT$}H^QH{#Zuea9v{p-H?n-LbF^_A!6@07PQl}Lp4EW6J5xLNCXU<7V9N}6 z80o&7e~fV_+t`w+t2@NMG&ucfPY^Y_EP};}#j`Y1#%Yh<>LbW|onP{?UHOH5d|_h$ zv0eNcQAYFdsY7lG)O^ZcP1NG^&}EK&6veT?9vhD!-r#k0&1>gvbG|mu!>m-!n65V6 zdoOp_V#2~BYlyBawOvX(2_yVrNlrJMIo6#jrne599QiTQzb*Cq`$y5<}SB!mkaeEmlvww@*+Z2O;Y?9zX=>xsq zz+K4@MqWf>cFAJF>HhBFfBbc_S@l#gS(9uZ(&NrhQJ8G00& zK&Zx2XnWhVA5&9!JkeTzK(smeP~XIR>9_~4Yf~v0gS3lk+$GdP=U#YmWVw+AN@#hn!1n5`ARuSMHHFxQ37>=3Vf_?<~j;xH%^S4^B zf^7Or3oMJ9KiMjmw;_%Sp)%LBzGiYF{s%{*{kN}(ShAsab3;llVu~r*r~iH%xxn;w z^PGDLt32gY>l~W zW8mWFT`Rs)kuTDC-N}YVu}k_G>1tF${ndEs46epE!S7^QF7}Osjqo&Oyd^L+jt8z0}x(k0b_AajWnRTpG zj|Kp!(;lyh+$5L=0CcddfPt@Oj=AYOJx7sTUIZ;S{X9JXfGSIbaqm3PXTZgZL{eX?35%7BLnmoQ6ZV)=cio_wfZ9WWT0Q&c8|P$C z$VfTKTdI+GT(3-YUs?v_isq0;HW|c5p?uAsC&F}z%r4|wr&4t(O|HPI{Fp5i)bJc2 zknW|%f%7lJYnkR`UIqs03{Fd_DY2g;mw({{`FZBJZY)MSa~@MvoMFp$$P@+!ZS{_` zdefNX$A?x2G>?M<$YiwEQleecyhpzW#SI#aaC=FRiqwymal_yN+DCWKWld z+33rTGrjd6Y;`MuK+E`lJE8P36{Lpox(S`gv9Bj|h!$LmT)Ag6otx^H^}U+lQ@ze3 z%K}SpY!w2%oml=nAC5X-SDrfi5Y|A%LC;9N6cD-M%SENCB|^=3E144C0g>fI$B%lT($NZ2a&F|`Gw@y zR7VaAYk>3J&uKwJcc`jzjAhk-w_g67XI94)B%&VdxvF@k^sQo6yWFHi5qByjh4Pxy z_vrAaCCrgUFl)m>h07AOGN(7FtvXEcfZBEPAvMLvQa&cn&s6xziy2aP_^ zw;^6z~SkC|)zw{Hpm|LmyWMk{m}PtDJO4S0G!5HPv%UIzCvBFPY{n=$m@ zI8&=EncF#Cr`&nJbD~Y{eoqyd+?FtSjf<$W)pkJ%`mI+<05(B4`O$FW-EU_nuf~&k zNbmoV8{!dMRd&GP+5SPI!<^S}vnyX4?W#REz@Pn`?}i*c!l-xAd@jkIwBkld8AheI zV36HvqaxGYM@$NVK^!F4<{E5gb<5n!7hwtClO7}w-R~Z(j1sGd`qm(dix44uHK^5J zZw2z1jW?>(WyC0|Xdtvv`SEYK6-~f8VCJt>L^ffVd6TOkVGg6sUwu^Uc+^>=1MJ72 z%+xHyF(0SjyQRr3c=hvOHRAeoDFh=UV%k&gDa}!Y-ZtFM{igj}fO6=yODH**Mnd+P zn3wxkfF}`MzYI-CY>H+BAB5q$DYOouH?ig#T7RwDN>%vP?QxOJqNq9A9=0TVBZGMW zO4eA@|KyT6-bYgF#&TPj!Bn>%6%QStQ<+{W=r9zC?GArp-yZsi5Z}+g{We?mNu|@j z_{*U(dy5Xp_7e81t;r2OdnAHlYKH-AuG>M(f2G6ohPSMxj3tdQsr6%O>Q+q+r3qt0 z4QFe$GS5jlGD_#GDLQ5+jCnv{8zUUyrTon7jT>*1I|MV(rTS`gRBmnFKJTnWAijoCaRUPEM+^??jF#PjS`=ysc(EQSY@_B zmQ|go?7stl7}1GhpVo}99{Hj;qrHP#?ju}M;Fj{0u(J`NN{c^y-+`#Y45g;&#Y;DD zurlB2^`|xtU-jqQ2ZmhbwpjgTuYCrj)mt|2SWAw55+V4Sqk`7~{tll3o^Wf*XR@JKKynj@rMv&ZR4KW_GUNfgB`)J>o@b%ofh|cDHcmd!4RpIl0Ui@DtE?3$B literal 0 HcmV?d00001 diff --git a/src/gab/opencv/Flow.java b/src/gab/opencv/Flow.java index a9998fd..b05b72d 100644 --- a/src/gab/opencv/Flow.java +++ b/src/gab/opencv/Flow.java @@ -59,7 +59,7 @@ public PVector getTotalFlowInRegion(int x, int y, int w, int h) { public PVector getAverageFlowInRegion(int x, int y, int w, int h) { PVector total = getTotalFlowInRegion(x, y, w, h); - return new PVector(total.x/(w*h), total.y/(w*h)); + return new PVector(total.x/(flow.width() * flow.height()), total.y/(flow.width()*flow.height())); } public PVector getTotalFlow() { diff --git a/src/gab/opencv/OpenCV.java b/src/gab/opencv/OpenCV.java index e1e4ebb..aece822 100644 --- a/src/gab/opencv/OpenCV.java +++ b/src/gab/opencv/OpenCV.java @@ -1,9 +1,9 @@ /** - * ##library.name## - * ##library.sentence## - * ##library.url## + * OpenCV for Processing + * Computer vision with OpenCV. + * https://github.com/atduskgreg/opencv-processing * - * Copyright ##copyright## ##author## + * Copyright (c) 2013 Greg Borenstein http://gregborenstein.com * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -20,9 +20,9 @@ * Free Software Foundation, Inc., 59 Temple Place, Suite 330, * Boston, MA 02111-1307 USA * - * @author ##author## - * @modified ##date## - * @version ##library.prettyVersion## (##library.version##) + * @author Greg Borenstein http://gregborenstein.com + * @modified 12/08/2014 + * @version 0.5.2 (13) */ @@ -53,6 +53,7 @@ import org.opencv.core.MatOfRect; import org.opencv.core.MatOfPoint; import org.opencv.core.MatOfPoint2f; +import org.opencv.core.MatOfPoint3f; import org.opencv.core.MatOfInt; import org.opencv.core.MatOfFloat; import org.opencv.core.Rect; @@ -69,19 +70,15 @@ import processing.core.*; /** - * OpenCV is the main class for using OpenCV for Processing. Most of the documentation is found here. + * This is a template class and can be used to start a new processing library or tool. + * Make sure you rename this class as well as the name of the example package 'template' + * to your own library or tool naming convention. * - * OpenCV for Processing is a computer vision library for the Processing creative coding toolkit. - * It's based on OpenCV, which is widely used throughout industry and academic research. OpenCV for - * Processing provides friendly, Processing-style functions for doing all of the most common tasks - * in computer vision: loading images, filtering them, detecting faces, finding contours, background - * subtraction, optical flow, calculating histograms etc. OpenCV also provides access to all native - * OpenCV data types and functions. So advanced users can do anything described in the OpenCV java - * documentation: http://docs.opencv.org/java/ - * - * A text is also underway to provide a narrative introduction to computer vision for beginners using - * OpenCV for Processing: https://github.com/atduskgreg/opencv-processing-book/blob/master/book/toc.md + * @example Hello * + * (the tag @example followed by the name of an example included in folder 'examples' will + * automatically include the example in the javadoc.) + * */ public class OpenCV { @@ -116,7 +113,7 @@ public class OpenCV { BackgroundSubtractorMOG backgroundSubtractor; public Flow flow; - public final static String VERSION = "##library.prettyVersion##"; + public final static String VERSION = "0.5.2"; public final static String CASCADE_FRONTALFACE = "haarcascade_frontalface_alt.xml"; public final static String CASCADE_PEDESTRIANS = "hogcascade_pedestrians.xml"; public final static String CASCADE_EYE = "haarcascade_eye.xml"; @@ -844,6 +841,40 @@ public void threshold(int threshold){ Imgproc.threshold(getCurrentMat(), getCurrentMat(), threshold, 255, Imgproc.THRESH_BINARY); } + + /** + *

Finds circles in a grayscale image using the Hough transform.

+ * + *

The function finds circles in a grayscale image using a modification of the + * Hough transform. + * @param image 8-bit, single-channel, grayscale input image. + * @param circles Output vector of found circles. Each vector is encoded as a + * 3-element floating-point vector (x, y, radius). + * @param method Detection method to use. Currently, the only implemented method + * is CV_HOUGH_GRADIENT, which is basically *21HT*, described in + * [Yuen90]. + * @param dp Inverse ratio of the accumulator resolution to the image + * resolution. For example, if dp=1, the accumulator has the same + * resolution as the input image. If dp=2, the accumulator has half + * as big width and height. + * @param minDist Minimum distance between the centers of the detected circles. + * If the parameter is too small, multiple neighbor circles may be falsely + * detected in addition to a true one. If it is too large, some circles may be + * missed. + * @param param1 First method-specific parameter. In case of CV_HOUGH_GRADIENT, + * it is the higher threshold of the two passed to the "Canny" edge detector + * (the lower one is twice smaller). + * @param param2 Second method-specific parameter. In case of CV_HOUGH_GRADIENT, + * it is the accumulator threshold for the circle centers at the detection + * stage. The smaller it is, the more false circles may be detected. Circles, + * corresponding to the larger accumulator values, will be returned first. + * @param minRadius Minimum circle radius. + * @param maxRadius Maximum circle radius. + * + * @see org.opencv.imgproc.Imgproc.HoughCircles + * @see org.opencv.imgproc.Imgproc#minEnclosingCircle + * @see org.opencv.imgproc.Imgproc#fitEllipse + */ /** * Apply an adaptive threshold to an image. Produces a binary image * with white pixels where the original image was above the threshold @@ -857,6 +888,15 @@ public void threshold(int threshold){ * @param c * A constant subtracted from the mean of each neighborhood. */ + + public Mat HoughCircles() + { + Mat circles=new Mat(10,10,CvType.CV_32FC3); + Imgproc.HoughCircles(getCurrentMat(), circles, Imgproc.CV_HOUGH_GRADIENT,2,getCurrentMat().width()/4, 200, 100,0,0); + return circles; + } + + public void adaptiveThreshold(int blockSize, int c){ try{ Imgproc.adaptiveThreshold(getCurrentMat(), getCurrentMat(), 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, blockSize, c); @@ -1288,19 +1328,16 @@ public PImage getOutput(){ public PImage getSnapshot(){ PImage result; - if(useROI){ - result = getSnapshot(matROI); - } else { - if(useColor){ - if(colorSpace == PApplet.HSB){ - result = getSnapshot(matHSV); - } else { - result = getSnapshot(matBGRA); - } + if(useColor){ + if(colorSpace == PApplet.HSB){ + result = getSnapshot(matHSV); } else { - result = getSnapshot(matGray); + result = getSnapshot(matBGRA); } + } else { + result = getSnapshot(matGray); } + return result; } @@ -1361,7 +1398,7 @@ public Mat getROI(){ } private void welcome() { - System.out.println("##library.name## ##library.prettyVersion## by ##author##"); + System.out.println("OpenCV for Processing 0.5.2 by Greg Borenstein http://gregborenstein.com"); System.out.println("Using Java OpenCV " + Core.VERSION); } diff --git a/src/gab/opencv/package.bluej b/src/gab/opencv/package.bluej new file mode 100644 index 0000000..40d314d --- /dev/null +++ b/src/gab/opencv/package.bluej @@ -0,0 +1,95 @@ +#BlueJ package file +dependency1.from=OpenCV +dependency1.to=Flow +dependency1.type=UsesDependency +dependency2.from=OpenCV +dependency2.to=Contour +dependency2.type=UsesDependency +dependency3.from=OpenCV +dependency3.to=ContourComparator +dependency3.type=UsesDependency +dependency4.from=OpenCV +dependency4.to=Line +dependency4.type=UsesDependency +dependency5.from=OpenCV +dependency5.to=Histogram +dependency5.type=UsesDependency +dependency6.from=ContourComparator +dependency6.to=Contour +dependency6.type=UsesDependency +package.editor.height=400 +package.editor.width=553 +package.editor.x=546 +package.editor.y=118 +package.numDependencies=6 +package.numTargets=6 +package.showExtends=true +package.showUses=true +target1.editor.height=700 +target1.editor.width=900 +target1.editor.x=110 +target1.editor.y=90 +target1.height=50 +target1.name=Flow +target1.showInterface=false +target1.type=ClassTarget +target1.width=80 +target1.x=160 +target1.y=70 +target2.editor.height=700 +target2.editor.width=900 +target2.editor.x=329 +target2.editor.y=11 +target2.height=50 +target2.name=OpenCV +target2.naviview.expanded=true +target2.showInterface=false +target2.type=ClassTarget +target2.width=80 +target2.x=160 +target2.y=10 +target3.editor.height=700 +target3.editor.width=900 +target3.editor.x=110 +target3.editor.y=90 +target3.height=50 +target3.name=ContourComparator +target3.naviview.expanded=true +target3.showInterface=false +target3.type=ClassTarget +target3.width=140 +target3.x=10 +target3.y=90 +target4.editor.height=700 +target4.editor.width=900 +target4.editor.x=110 +target4.editor.y=90 +target4.height=50 +target4.name=Line +target4.showInterface=false +target4.type=ClassTarget +target4.width=80 +target4.x=160 +target4.y=130 +target5.editor.height=700 +target5.editor.width=900 +target5.editor.x=110 +target5.editor.y=90 +target5.height=50 +target5.name=Contour +target5.showInterface=false +target5.type=ClassTarget +target5.width=80 +target5.x=100 +target5.y=190 +target6.editor.height=700 +target6.editor.width=900 +target6.editor.x=110 +target6.editor.y=90 +target6.height=50 +target6.name=Histogram +target6.showInterface=false +target6.type=ClassTarget +target6.width=80 +target6.x=10 +target6.y=150 diff --git a/src/gab/package.bluej b/src/gab/package.bluej new file mode 100644 index 0000000..6c644ae --- /dev/null +++ b/src/gab/package.bluej @@ -0,0 +1,15 @@ +#BlueJ package file +package.editor.height=400 +package.editor.width=560 +package.editor.x=285 +package.editor.y=141 +package.numDependencies=0 +package.numTargets=1 +package.showExtends=true +package.showUses=true +target1.height=62 +target1.name=opencv +target1.type=PackageTarget +target1.width=80 +target1.x=160 +target1.y=10 diff --git a/src/org/opencv/calib3d/Calib3d.java b/src/org/opencv/calib3d/Calib3d.java new file mode 100644 index 0000000..4ef2ac3 --- /dev/null +++ b/src/org/opencv/calib3d/Calib3d.java @@ -0,0 +1,3010 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.calib3d; + +import java.util.ArrayList; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfDouble; +import org.opencv.core.MatOfPoint2f; +import org.opencv.core.MatOfPoint3f; +import org.opencv.core.Point; +import org.opencv.core.Rect; +import org.opencv.core.Size; +import org.opencv.core.TermCriteria; +import org.opencv.utils.Converters; + +public class Calib3d { + + private static final int + CV_LMEDS = 4, + CV_RANSAC = 8, + CV_FM_LMEDS = CV_LMEDS, + CV_FM_RANSAC = CV_RANSAC, + CV_FM_7POINT = 1, + CV_FM_8POINT = 2, + CV_CALIB_USE_INTRINSIC_GUESS = 1, + CV_CALIB_FIX_ASPECT_RATIO = 2, + CV_CALIB_FIX_PRINCIPAL_POINT = 4, + CV_CALIB_ZERO_TANGENT_DIST = 8, + CV_CALIB_FIX_FOCAL_LENGTH = 16, + CV_CALIB_FIX_K1 = 32, + CV_CALIB_FIX_K2 = 64, + CV_CALIB_FIX_K3 = 128, + CV_CALIB_FIX_K4 = 2048, + CV_CALIB_FIX_K5 = 4096, + CV_CALIB_FIX_K6 = 8192, + CV_CALIB_RATIONAL_MODEL = 16384, + CV_CALIB_FIX_INTRINSIC = 256, + CV_CALIB_SAME_FOCAL_LENGTH = 512, + CV_CALIB_ZERO_DISPARITY = 1024; + + + public static final int + CV_ITERATIVE = 0, + CV_EPNP = 1, + CV_P3P = 2, + LMEDS = CV_LMEDS, + RANSAC = CV_RANSAC, + ITERATIVE = CV_ITERATIVE, + EPNP = CV_EPNP, + P3P = CV_P3P, + CALIB_CB_ADAPTIVE_THRESH = 1, + CALIB_CB_NORMALIZE_IMAGE = 2, + CALIB_CB_FILTER_QUADS = 4, + CALIB_CB_FAST_CHECK = 8, + CALIB_CB_SYMMETRIC_GRID = 1, + CALIB_CB_ASYMMETRIC_GRID = 2, + CALIB_CB_CLUSTERING = 4, + CALIB_USE_INTRINSIC_GUESS = CV_CALIB_USE_INTRINSIC_GUESS, + CALIB_FIX_ASPECT_RATIO = CV_CALIB_FIX_ASPECT_RATIO, + CALIB_FIX_PRINCIPAL_POINT = CV_CALIB_FIX_PRINCIPAL_POINT, + CALIB_ZERO_TANGENT_DIST = CV_CALIB_ZERO_TANGENT_DIST, + CALIB_FIX_FOCAL_LENGTH = CV_CALIB_FIX_FOCAL_LENGTH, + CALIB_FIX_K1 = CV_CALIB_FIX_K1, + CALIB_FIX_K2 = CV_CALIB_FIX_K2, + CALIB_FIX_K3 = CV_CALIB_FIX_K3, + CALIB_FIX_K4 = CV_CALIB_FIX_K4, + CALIB_FIX_K5 = CV_CALIB_FIX_K5, + CALIB_FIX_K6 = CV_CALIB_FIX_K6, + CALIB_RATIONAL_MODEL = CV_CALIB_RATIONAL_MODEL, + CALIB_FIX_INTRINSIC = CV_CALIB_FIX_INTRINSIC, + CALIB_SAME_FOCAL_LENGTH = CV_CALIB_SAME_FOCAL_LENGTH, + CALIB_ZERO_DISPARITY = CV_CALIB_ZERO_DISPARITY, + FM_7POINT = CV_FM_7POINT, + FM_8POINT = CV_FM_8POINT, + FM_LMEDS = CV_FM_LMEDS, + FM_RANSAC = CV_FM_RANSAC; + + + // + // C++: Vec3d RQDecomp3x3(Mat src, Mat& mtxR, Mat& mtxQ, Mat& Qx = Mat(), Mat& Qy = Mat(), Mat& Qz = Mat()) + // + +/** + *

Computes an RQ decomposition of 3x3 matrices.

+ * + *

The function computes a RQ decomposition using the given rotations. This + * function is used in "decomposeProjectionMatrix" to decompose the left 3x3 + * submatrix of a projection matrix into a camera and a rotation matrix.

+ * + *

It optionally returns three rotation matrices, one for each axis, and the + * three Euler angles in degrees (as the return value) that could be used in + * OpenGL. Note, there is always more than one sequence of rotations about the + * three principle axes that results in the same orientation of an object, eg. + * see [Slabaugh]. Returned tree rotation matrices and corresponding three Euler + * angules are only one of the possible solutions.

+ * + * @param src 3x3 input matrix. + * @param mtxR Output 3x3 upper-triangular matrix. + * @param mtxQ Output 3x3 orthogonal matrix. + * @param Qx Optional output 3x3 rotation matrix around x-axis. + * @param Qy Optional output 3x3 rotation matrix around y-axis. + * @param Qz Optional output 3x3 rotation matrix around z-axis. + * + * @see org.opencv.calib3d.Calib3d.RQDecomp3x3 + */ + public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx, Mat Qy, Mat Qz) + { + + double[] retVal = RQDecomp3x3_0(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj, Qx.nativeObj, Qy.nativeObj, Qz.nativeObj); + + return retVal; + } + +/** + *

Computes an RQ decomposition of 3x3 matrices.

+ * + *

The function computes a RQ decomposition using the given rotations. This + * function is used in "decomposeProjectionMatrix" to decompose the left 3x3 + * submatrix of a projection matrix into a camera and a rotation matrix.

+ * + *

It optionally returns three rotation matrices, one for each axis, and the + * three Euler angles in degrees (as the return value) that could be used in + * OpenGL. Note, there is always more than one sequence of rotations about the + * three principle axes that results in the same orientation of an object, eg. + * see [Slabaugh]. Returned tree rotation matrices and corresponding three Euler + * angules are only one of the possible solutions.

+ * + * @param src 3x3 input matrix. + * @param mtxR Output 3x3 upper-triangular matrix. + * @param mtxQ Output 3x3 orthogonal matrix. + * + * @see org.opencv.calib3d.Calib3d.RQDecomp3x3 + */ + public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ) + { + + double[] retVal = RQDecomp3x3_1(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj); + + return retVal; + } + + + // + // C++: void Rodrigues(Mat src, Mat& dst, Mat& jacobian = Mat()) + // + +/** + *

Converts a rotation matrix to a rotation vector or vice versa.

+ * + *

theta <- norm(r) + * r <- r/ theta + * R = cos(theta) I + (1- cos(theta)) r r^T + sin(theta) + * |0 -r_z r_y| + * |r_z 0 -r_x| + * |-r_y r_x 0| + *

+ * + *

Inverse transformation can be also done easily, since

+ * + *

sin(theta) + * |0 -r_z r_y| + * |r_z 0 -r_x| + * |-r_y r_x 0| + * = (R - R^T)/2

+ * + *

A rotation vector is a convenient and most compact representation of a + * rotation matrix (since any rotation matrix has just 3 degrees of freedom). + * The representation is used in the global 3D geometry optimization procedures + * like "calibrateCamera", "stereoCalibrate", or "solvePnP".

+ * + * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3). + * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), + * respectively. + * @param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a + * matrix of partial derivatives of the output array components with respect to + * the input array components. + * + * @see org.opencv.calib3d.Calib3d.Rodrigues + */ + public static void Rodrigues(Mat src, Mat dst, Mat jacobian) + { + + Rodrigues_0(src.nativeObj, dst.nativeObj, jacobian.nativeObj); + + return; + } + +/** + *

Converts a rotation matrix to a rotation vector or vice versa.

+ * + *

theta <- norm(r) + * r <- r/ theta + * R = cos(theta) I + (1- cos(theta)) r r^T + sin(theta) + * |0 -r_z r_y| + * |r_z 0 -r_x| + * |-r_y r_x 0| + *

+ * + *

Inverse transformation can be also done easily, since

+ * + *

sin(theta) + * |0 -r_z r_y| + * |r_z 0 -r_x| + * |-r_y r_x 0| + * = (R - R^T)/2

+ * + *

A rotation vector is a convenient and most compact representation of a + * rotation matrix (since any rotation matrix has just 3 degrees of freedom). + * The representation is used in the global 3D geometry optimization procedures + * like "calibrateCamera", "stereoCalibrate", or "solvePnP".

+ * + * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3). + * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), + * respectively. + * + * @see org.opencv.calib3d.Calib3d.Rodrigues + */ + public static void Rodrigues(Mat src, Mat dst) + { + + Rodrigues_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: double calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON)) + // + +/** + *

Finds the camera intrinsic and extrinsic parameters from several views of a + * calibration pattern.

+ * + *

The function estimates the intrinsic camera parameters and extrinsic + * parameters for each of the views. The algorithm is based on [Zhang2000] and + * [BouguetMCT]. The coordinates of 3D object points and their corresponding 2D + * projections in each view must be specified. That may be achieved by using an + * object with a known geometry and easily detectable feature points. + * Such an object is called a calibration rig or calibration pattern, and OpenCV + * has built-in support for a chessboard as a calibration rig (see + * "findChessboardCorners"). Currently, initialization of intrinsic parameters + * (when CV_CALIB_USE_INTRINSIC_GUESS is not set) is only + * implemented for planar calibration patterns (where Z-coordinates of the + * object points must be all zeros). 3D calibration rigs can also be used as + * long as initial cameraMatrix is provided.

+ * + *

The algorithm performs the following steps:

+ *
    + *
  • Compute the initial intrinsic parameters (the option only available + * for planar calibration patterns) or read them from the input parameters. The + * distortion coefficients are all set to zeros initially unless some of + * CV_CALIB_FIX_K? are specified. + *
  • Estimate the initial camera pose as if the intrinsic parameters have + * been already known. This is done using "solvePnP". + *
  • Run the global Levenberg-Marquardt optimization algorithm to minimize + * the reprojection error, that is, the total sum of squared distances between + * the observed feature points imagePoints and the projected (using + * the current estimates for camera parameters and the poses) object points + * objectPoints. See "projectPoints" for details. + *
+ * + *

The function returns the final re-projection error.

+ * + *

Note:

+ * + *

If you use a non-square (=non-NxN) grid and "findChessboardCorners" for + * calibration, and calibrateCamera returns bad values (zero + * distortion coefficients, an image center very far from (w/2-0.5,h/2-0.5), + * and/or large differences between f_x and f_y (ratios of + * 10:1 or more)), then you have probably used patternSize=cvSize(rows,cols) + * instead of using patternSize=cvSize(cols,rows) in + * "findChessboardCorners".

+ * + * @param objectPoints In the new interface it is a vector of vectors of + * calibration pattern points in the calibration pattern coordinate space. The + * outer vector contains as many elements as the number of the pattern views. If + * the same calibration pattern is shown in each view and it is fully visible, + * all the vectors will be the same. Although, it is possible to use partially + * occluded patterns, or even different patterns in different views. Then, the + * vectors will be different. The points are 3D, but since they are in a pattern + * coordinate system, then, if the rig is planar, it may make sense to put the + * model to a XY coordinate plane so that Z-coordinate of each input object + * point is 0. + * + *

In the old interface all the vectors of object points from different views + * are concatenated together.

+ * @param imagePoints In the new interface it is a vector of vectors of the + * projections of calibration pattern points. imagePoints.size() + * and objectPoints.size() and imagePoints[i].size() + * must be equal to objectPoints[i].size() for each i. + * + *

In the old interface all the vectors of object points from different views + * are concatenated together.

+ * @param imageSize Size of the image used only to initialize the intrinsic + * camera matrix. + * @param cameraMatrix Output 3x3 floating-point camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * . If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO + * are specified, some or all of fx, fy, cx, cy must be initialized + * before calling the function.

+ * @param distCoeffs Output vector of distortion coefficients (k_1, k_2, + * p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. + * @param rvecs Output vector of rotation vectors (see "Rodrigues") estimated + * for each pattern view. That is, each k-th rotation vector together with the + * corresponding k-th translation vector (see the next output parameter + * description) brings the calibration pattern from the model coordinate space + * (in which object points are specified) to the world coordinate space, that + * is, a real position of the calibration pattern in the k-th pattern view + * (k=0.. *M* -1). + * @param tvecs Output vector of translation vectors estimated for each pattern + * view. + * @param flags Different flags that may be zero or a combination of the + * following values: + *
    + *
  • CV_CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid + * initial values of fx, fy, cx, cy that are optimized further. + * Otherwise, (cx, cy) is initially set to the image center + * (imageSize is used), and focal distances are computed in a + * least-squares fashion. Note, that if intrinsic parameters are known, there is + * no need to use this function just to estimate extrinsic parameters. Use + * "solvePnP" instead. + *
  • CV_CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during + * the global optimization. It stays at the center or at a different location + * specified when CV_CALIB_USE_INTRINSIC_GUESS is set too. + *
  • CV_CALIB_FIX_ASPECT_RATIO The functions considers only fy + * as a free parameter. The ratio fx/fy stays the same as in the + * input cameraMatrix. When CV_CALIB_USE_INTRINSIC_GUESS + * is not set, the actual input values of fx and fy + * are ignored, only their ratio is computed and used further. + *
  • CV_CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients + * (p_1, p_2) are set to zeros and stay zero. + *
  • CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6 The corresponding radial + * distortion coefficient is not changed during the optimization. If + * CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the + * supplied distCoeffs matrix is used. Otherwise, it is set to 0. + *
  • CV_CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To + * provide the backward compatibility, this extra flag should be explicitly + * specified to make the calibration function use the rational model and return + * 8 coefficients. If the flag is not set, the function computes and returns + * only 5 distortion coefficients. + *
+ * @param criteria Termination criteria for the iterative optimization + * algorithm. + * + * @see org.opencv.calib3d.Calib3d.calibrateCamera + * @see org.opencv.calib3d.Calib3d#initCameraMatrix2D + * @see org.opencv.calib3d.Calib3d#stereoCalibrate + * @see org.opencv.calib3d.Calib3d#findChessboardCorners + * @see org.opencv.calib3d.Calib3d#solvePnP + * @see org.opencv.imgproc.Imgproc#undistort + */ + public static double calibrateCamera(List objectPoints, List imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List rvecs, List tvecs, int flags, TermCriteria criteria) + { + Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); + Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); + Mat rvecs_mat = new Mat(); + Mat tvecs_mat = new Mat(); + double retVal = calibrateCamera_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon); + Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); + Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); + return retVal; + } + +/** + *

Finds the camera intrinsic and extrinsic parameters from several views of a + * calibration pattern.

+ * + *

The function estimates the intrinsic camera parameters and extrinsic + * parameters for each of the views. The algorithm is based on [Zhang2000] and + * [BouguetMCT]. The coordinates of 3D object points and their corresponding 2D + * projections in each view must be specified. That may be achieved by using an + * object with a known geometry and easily detectable feature points. + * Such an object is called a calibration rig or calibration pattern, and OpenCV + * has built-in support for a chessboard as a calibration rig (see + * "findChessboardCorners"). Currently, initialization of intrinsic parameters + * (when CV_CALIB_USE_INTRINSIC_GUESS is not set) is only + * implemented for planar calibration patterns (where Z-coordinates of the + * object points must be all zeros). 3D calibration rigs can also be used as + * long as initial cameraMatrix is provided.

+ * + *

The algorithm performs the following steps:

+ *
    + *
  • Compute the initial intrinsic parameters (the option only available + * for planar calibration patterns) or read them from the input parameters. The + * distortion coefficients are all set to zeros initially unless some of + * CV_CALIB_FIX_K? are specified. + *
  • Estimate the initial camera pose as if the intrinsic parameters have + * been already known. This is done using "solvePnP". + *
  • Run the global Levenberg-Marquardt optimization algorithm to minimize + * the reprojection error, that is, the total sum of squared distances between + * the observed feature points imagePoints and the projected (using + * the current estimates for camera parameters and the poses) object points + * objectPoints. See "projectPoints" for details. + *
+ * + *

The function returns the final re-projection error.

+ * + *

Note:

+ * + *

If you use a non-square (=non-NxN) grid and "findChessboardCorners" for + * calibration, and calibrateCamera returns bad values (zero + * distortion coefficients, an image center very far from (w/2-0.5,h/2-0.5), + * and/or large differences between f_x and f_y (ratios of + * 10:1 or more)), then you have probably used patternSize=cvSize(rows,cols) + * instead of using patternSize=cvSize(cols,rows) in + * "findChessboardCorners".

+ * + * @param objectPoints In the new interface it is a vector of vectors of + * calibration pattern points in the calibration pattern coordinate space. The + * outer vector contains as many elements as the number of the pattern views. If + * the same calibration pattern is shown in each view and it is fully visible, + * all the vectors will be the same. Although, it is possible to use partially + * occluded patterns, or even different patterns in different views. Then, the + * vectors will be different. The points are 3D, but since they are in a pattern + * coordinate system, then, if the rig is planar, it may make sense to put the + * model to a XY coordinate plane so that Z-coordinate of each input object + * point is 0. + * + *

In the old interface all the vectors of object points from different views + * are concatenated together.

+ * @param imagePoints In the new interface it is a vector of vectors of the + * projections of calibration pattern points. imagePoints.size() + * and objectPoints.size() and imagePoints[i].size() + * must be equal to objectPoints[i].size() for each i. + * + *

In the old interface all the vectors of object points from different views + * are concatenated together.

+ * @param imageSize Size of the image used only to initialize the intrinsic + * camera matrix. + * @param cameraMatrix Output 3x3 floating-point camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * . If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO + * are specified, some or all of fx, fy, cx, cy must be initialized + * before calling the function.

+ * @param distCoeffs Output vector of distortion coefficients (k_1, k_2, + * p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. + * @param rvecs Output vector of rotation vectors (see "Rodrigues") estimated + * for each pattern view. That is, each k-th rotation vector together with the + * corresponding k-th translation vector (see the next output parameter + * description) brings the calibration pattern from the model coordinate space + * (in which object points are specified) to the world coordinate space, that + * is, a real position of the calibration pattern in the k-th pattern view + * (k=0.. *M* -1). + * @param tvecs Output vector of translation vectors estimated for each pattern + * view. + * @param flags Different flags that may be zero or a combination of the + * following values: + *
    + *
  • CV_CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid + * initial values of fx, fy, cx, cy that are optimized further. + * Otherwise, (cx, cy) is initially set to the image center + * (imageSize is used), and focal distances are computed in a + * least-squares fashion. Note, that if intrinsic parameters are known, there is + * no need to use this function just to estimate extrinsic parameters. Use + * "solvePnP" instead. + *
  • CV_CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during + * the global optimization. It stays at the center or at a different location + * specified when CV_CALIB_USE_INTRINSIC_GUESS is set too. + *
  • CV_CALIB_FIX_ASPECT_RATIO The functions considers only fy + * as a free parameter. The ratio fx/fy stays the same as in the + * input cameraMatrix. When CV_CALIB_USE_INTRINSIC_GUESS + * is not set, the actual input values of fx and fy + * are ignored, only their ratio is computed and used further. + *
  • CV_CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients + * (p_1, p_2) are set to zeros and stay zero. + *
  • CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6 The corresponding radial + * distortion coefficient is not changed during the optimization. If + * CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the + * supplied distCoeffs matrix is used. Otherwise, it is set to 0. + *
  • CV_CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To + * provide the backward compatibility, this extra flag should be explicitly + * specified to make the calibration function use the rational model and return + * 8 coefficients. If the flag is not set, the function computes and returns + * only 5 distortion coefficients. + *
+ * + * @see org.opencv.calib3d.Calib3d.calibrateCamera + * @see org.opencv.calib3d.Calib3d#initCameraMatrix2D + * @see org.opencv.calib3d.Calib3d#stereoCalibrate + * @see org.opencv.calib3d.Calib3d#findChessboardCorners + * @see org.opencv.calib3d.Calib3d#solvePnP + * @see org.opencv.imgproc.Imgproc#undistort + */ + public static double calibrateCamera(List objectPoints, List imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List rvecs, List tvecs, int flags) + { + Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); + Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); + Mat rvecs_mat = new Mat(); + Mat tvecs_mat = new Mat(); + double retVal = calibrateCamera_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags); + Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); + Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); + return retVal; + } + +/** + *

Finds the camera intrinsic and extrinsic parameters from several views of a + * calibration pattern.

+ * + *

The function estimates the intrinsic camera parameters and extrinsic + * parameters for each of the views. The algorithm is based on [Zhang2000] and + * [BouguetMCT]. The coordinates of 3D object points and their corresponding 2D + * projections in each view must be specified. That may be achieved by using an + * object with a known geometry and easily detectable feature points. + * Such an object is called a calibration rig or calibration pattern, and OpenCV + * has built-in support for a chessboard as a calibration rig (see + * "findChessboardCorners"). Currently, initialization of intrinsic parameters + * (when CV_CALIB_USE_INTRINSIC_GUESS is not set) is only + * implemented for planar calibration patterns (where Z-coordinates of the + * object points must be all zeros). 3D calibration rigs can also be used as + * long as initial cameraMatrix is provided.

+ * + *

The algorithm performs the following steps:

+ *
    + *
  • Compute the initial intrinsic parameters (the option only available + * for planar calibration patterns) or read them from the input parameters. The + * distortion coefficients are all set to zeros initially unless some of + * CV_CALIB_FIX_K? are specified. + *
  • Estimate the initial camera pose as if the intrinsic parameters have + * been already known. This is done using "solvePnP". + *
  • Run the global Levenberg-Marquardt optimization algorithm to minimize + * the reprojection error, that is, the total sum of squared distances between + * the observed feature points imagePoints and the projected (using + * the current estimates for camera parameters and the poses) object points + * objectPoints. See "projectPoints" for details. + *
+ * + *

The function returns the final re-projection error.

+ * + *

Note:

+ * + *

If you use a non-square (=non-NxN) grid and "findChessboardCorners" for + * calibration, and calibrateCamera returns bad values (zero + * distortion coefficients, an image center very far from (w/2-0.5,h/2-0.5), + * and/or large differences between f_x and f_y (ratios of + * 10:1 or more)), then you have probably used patternSize=cvSize(rows,cols) + * instead of using patternSize=cvSize(cols,rows) in + * "findChessboardCorners".

+ * + * @param objectPoints In the new interface it is a vector of vectors of + * calibration pattern points in the calibration pattern coordinate space. The + * outer vector contains as many elements as the number of the pattern views. If + * the same calibration pattern is shown in each view and it is fully visible, + * all the vectors will be the same. Although, it is possible to use partially + * occluded patterns, or even different patterns in different views. Then, the + * vectors will be different. The points are 3D, but since they are in a pattern + * coordinate system, then, if the rig is planar, it may make sense to put the + * model to a XY coordinate plane so that Z-coordinate of each input object + * point is 0. + * + *

In the old interface all the vectors of object points from different views + * are concatenated together.

+ * @param imagePoints In the new interface it is a vector of vectors of the + * projections of calibration pattern points. imagePoints.size() + * and objectPoints.size() and imagePoints[i].size() + * must be equal to objectPoints[i].size() for each i. + * + *

In the old interface all the vectors of object points from different views + * are concatenated together.

+ * @param imageSize Size of the image used only to initialize the intrinsic + * camera matrix. + * @param cameraMatrix Output 3x3 floating-point camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * . If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO + * are specified, some or all of fx, fy, cx, cy must be initialized + * before calling the function.

+ * @param distCoeffs Output vector of distortion coefficients (k_1, k_2, + * p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. + * @param rvecs Output vector of rotation vectors (see "Rodrigues") estimated + * for each pattern view. That is, each k-th rotation vector together with the + * corresponding k-th translation vector (see the next output parameter + * description) brings the calibration pattern from the model coordinate space + * (in which object points are specified) to the world coordinate space, that + * is, a real position of the calibration pattern in the k-th pattern view + * (k=0.. *M* -1). + * @param tvecs Output vector of translation vectors estimated for each pattern + * view. + * + * @see org.opencv.calib3d.Calib3d.calibrateCamera + * @see org.opencv.calib3d.Calib3d#initCameraMatrix2D + * @see org.opencv.calib3d.Calib3d#stereoCalibrate + * @see org.opencv.calib3d.Calib3d#findChessboardCorners + * @see org.opencv.calib3d.Calib3d#solvePnP + * @see org.opencv.imgproc.Imgproc#undistort + */ + public static double calibrateCamera(List objectPoints, List imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List rvecs, List tvecs) + { + Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); + Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); + Mat rvecs_mat = new Mat(); + Mat tvecs_mat = new Mat(); + double retVal = calibrateCamera_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj); + Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); + Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); + return retVal; + } + + + // + // C++: void calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength, Point2d& principalPoint, double& aspectRatio) + // + +/** + *

Computes useful camera characteristics from the camera matrix.

+ * + *

The function computes various useful camera characteristics from the + * previously estimated camera matrix.

+ * + * @param cameraMatrix Input camera matrix that can be estimated by + * "calibrateCamera" or "stereoCalibrate". + * @param imageSize Input image size in pixels. + * @param apertureWidth Physical width of the sensor. + * @param apertureHeight Physical height of the sensor. + * @param fovx Output field of view in degrees along the horizontal sensor axis. + * @param fovy Output field of view in degrees along the vertical sensor axis. + * @param focalLength Focal length of the lens in mm. + * @param principalPoint Principal point in pixels. + * @param aspectRatio f_y/f_x + * + * @see org.opencv.calib3d.Calib3d.calibrationMatrixValues + */ + public static void calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double[] fovx, double[] fovy, double[] focalLength, Point principalPoint, double[] aspectRatio) + { + double[] fovx_out = new double[1]; + double[] fovy_out = new double[1]; + double[] focalLength_out = new double[1]; + double[] principalPoint_out = new double[2]; + double[] aspectRatio_out = new double[1]; + calibrationMatrixValues_0(cameraMatrix.nativeObj, imageSize.width, imageSize.height, apertureWidth, apertureHeight, fovx_out, fovy_out, focalLength_out, principalPoint_out, aspectRatio_out); + if(fovx!=null) fovx[0] = (double)fovx_out[0]; + if(fovy!=null) fovy[0] = (double)fovy_out[0]; + if(focalLength!=null) focalLength[0] = (double)focalLength_out[0]; + if(principalPoint!=null){ principalPoint.x = principalPoint_out[0]; principalPoint.y = principalPoint_out[1]; } + if(aspectRatio!=null) aspectRatio[0] = (double)aspectRatio_out[0]; + return; + } + + + // + // C++: void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat& rvec3, Mat& tvec3, Mat& dr3dr1 = Mat(), Mat& dr3dt1 = Mat(), Mat& dr3dr2 = Mat(), Mat& dr3dt2 = Mat(), Mat& dt3dr1 = Mat(), Mat& dt3dt1 = Mat(), Mat& dt3dr2 = Mat(), Mat& dt3dt2 = Mat()) + // + +/** + *

Combines two rotation-and-shift transformations.

+ * + *

The functions compute:

+ * + *

rvec3 = rodrigues ^(-1)(rodrigues(rvec2) * rodrigues(rvec1)) + * tvec3 = rodrigues(rvec2) * tvec1 + tvec2,

+ * + *

where rodrigues denotes a rotation vector to a rotation matrix + * transformation, and rodrigues^(-1) denotes the inverse + * transformation. See "Rodrigues" for details.

+ * + *

Also, the functions can compute the derivatives of the output vectors with + * regards to the input vectors (see "matMulDeriv"). + * The functions are used inside "stereoCalibrate" but can also be used in your + * own code where Levenberg-Marquardt or another gradient-based solver is used + * to optimize a function that contains a matrix multiplication.

+ * + * @param rvec1 First rotation vector. + * @param tvec1 First translation vector. + * @param rvec2 Second rotation vector. + * @param tvec2 Second translation vector. + * @param rvec3 Output rotation vector of the superposition. + * @param tvec3 Output translation vector of the superposition. + * @param dr3dr1 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dr3dt1 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dr3dr2 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dr3dt2 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dt3dr1 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dt3dt1 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dt3dr2 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dt3dt2 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * + * @see org.opencv.calib3d.Calib3d.composeRT + */ + public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1, Mat dt3dr2, Mat dt3dt2) + { + + composeRT_0(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj, dt3dt1.nativeObj, dt3dr2.nativeObj, dt3dt2.nativeObj); + + return; + } + +/** + *

Combines two rotation-and-shift transformations.

+ * + *

The functions compute:

+ * + *

rvec3 = rodrigues ^(-1)(rodrigues(rvec2) * rodrigues(rvec1)) + * tvec3 = rodrigues(rvec2) * tvec1 + tvec2,

+ * + *

where rodrigues denotes a rotation vector to a rotation matrix + * transformation, and rodrigues^(-1) denotes the inverse + * transformation. See "Rodrigues" for details.

+ * + *

Also, the functions can compute the derivatives of the output vectors with + * regards to the input vectors (see "matMulDeriv"). + * The functions are used inside "stereoCalibrate" but can also be used in your + * own code where Levenberg-Marquardt or another gradient-based solver is used + * to optimize a function that contains a matrix multiplication.

+ * + * @param rvec1 First rotation vector. + * @param tvec1 First translation vector. + * @param rvec2 Second rotation vector. + * @param tvec2 Second translation vector. + * @param rvec3 Output rotation vector of the superposition. + * @param tvec3 Output translation vector of the superposition. + * + * @see org.opencv.calib3d.Calib3d.composeRT + */ + public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3) + { + + composeRT_1(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj); + + return; + } + + + // + // C++: void convertPointsFromHomogeneous(Mat src, Mat& dst) + // + +/** + *

Converts points from homogeneous to Euclidean space.

+ * + *

The function converts points homogeneous to Euclidean space using perspective + * projection. That is, each point (x1, x2,... x(n-1), xn) is + * converted to (x1/xn, x2/xn,..., x(n-1)/xn). When + * xn=0, the output point coordinates will be (0,0,0,...).

+ * + * @param src Input vector of N-dimensional points. + * @param dst Output vector of N-1-dimensional points. + * + * @see org.opencv.calib3d.Calib3d.convertPointsFromHomogeneous + */ + public static void convertPointsFromHomogeneous(Mat src, Mat dst) + { + + convertPointsFromHomogeneous_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void convertPointsToHomogeneous(Mat src, Mat& dst) + // + +/** + *

Converts points from Euclidean to homogeneous space.

+ * + *

The function converts points from Euclidean to homogeneous space by appending + * 1's to the tuple of point coordinates. That is, each point (x1, x2,..., + * xn) is converted to (x1, x2,..., xn, 1).

+ * + * @param src Input vector of N-dimensional points. + * @param dst Output vector of N+1-dimensional points. + * + * @see org.opencv.calib3d.Calib3d.convertPointsToHomogeneous + */ + public static void convertPointsToHomogeneous(Mat src, Mat dst) + { + + convertPointsToHomogeneous_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void correctMatches(Mat F, Mat points1, Mat points2, Mat& newPoints1, Mat& newPoints2) + // + +/** + *

Refines coordinates of corresponding points.

+ * + *

The function implements the Optimal Triangulation Method (see Multiple View + * Geometry for details). For each given point correspondence points1[i] <-> + * points2[i], and a fundamental matrix F, it computes the corrected + * correspondences newPoints1[i] <-> newPoints2[i] that minimize the geometric + * error d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2 + * (where d(a,b) is the geometric distance between points a + * and b) subject to the epipolar constraint newPoints2^T * F * + * newPoints1 = 0.

+ * + * @param F 3x3 fundamental matrix. + * @param points1 1xN array containing the first set of points. + * @param points2 1xN array containing the second set of points. + * @param newPoints1 The optimized points1. + * @param newPoints2 The optimized points2. + * + * @see org.opencv.calib3d.Calib3d.correctMatches + */ + public static void correctMatches(Mat F, Mat points1, Mat points2, Mat newPoints1, Mat newPoints2) + { + + correctMatches_0(F.nativeObj, points1.nativeObj, points2.nativeObj, newPoints1.nativeObj, newPoints2.nativeObj); + + return; + } + + + // + // C++: void decomposeProjectionMatrix(Mat projMatrix, Mat& cameraMatrix, Mat& rotMatrix, Mat& transVect, Mat& rotMatrixX = Mat(), Mat& rotMatrixY = Mat(), Mat& rotMatrixZ = Mat(), Mat& eulerAngles = Mat()) + // + +/** + *

Decomposes a projection matrix into a rotation matrix and a camera matrix.

+ * + *

The function computes a decomposition of a projection matrix into a + * calibration and a rotation matrix and the position of a camera.

+ * + *

It optionally returns three rotation matrices, one for each axis, and three + * Euler angles that could be used in OpenGL. Note, there is always more than + * one sequence of rotations about the three principle axes that results in the + * same orientation of an object, eg. see [Slabaugh]. Returned tree rotation + * matrices and corresponding three Euler angules are only one of the possible + * solutions.

+ * + *

The function is based on "RQDecomp3x3".

+ * + * @param projMatrix 3x4 input projection matrix P. + * @param cameraMatrix Output 3x3 camera matrix K. + * @param rotMatrix Output 3x3 external rotation matrix R. + * @param transVect Output 4x1 translation vector T. + * @param rotMatrixX a rotMatrixX + * @param rotMatrixY a rotMatrixY + * @param rotMatrixZ a rotMatrixZ + * @param eulerAngles Optional three-element vector containing three Euler + * angles of rotation in degrees. + * + * @see org.opencv.calib3d.Calib3d.decomposeProjectionMatrix + */ + public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY, Mat rotMatrixZ, Mat eulerAngles) + { + + decomposeProjectionMatrix_0(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj, rotMatrixY.nativeObj, rotMatrixZ.nativeObj, eulerAngles.nativeObj); + + return; + } + +/** + *

Decomposes a projection matrix into a rotation matrix and a camera matrix.

+ * + *

The function computes a decomposition of a projection matrix into a + * calibration and a rotation matrix and the position of a camera.

+ * + *

It optionally returns three rotation matrices, one for each axis, and three + * Euler angles that could be used in OpenGL. Note, there is always more than + * one sequence of rotations about the three principle axes that results in the + * same orientation of an object, eg. see [Slabaugh]. Returned tree rotation + * matrices and corresponding three Euler angules are only one of the possible + * solutions.

+ * + *

The function is based on "RQDecomp3x3".

+ * + * @param projMatrix 3x4 input projection matrix P. + * @param cameraMatrix Output 3x3 camera matrix K. + * @param rotMatrix Output 3x3 external rotation matrix R. + * @param transVect Output 4x1 translation vector T. + * + * @see org.opencv.calib3d.Calib3d.decomposeProjectionMatrix + */ + public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect) + { + + decomposeProjectionMatrix_1(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj); + + return; + } + + + // + // C++: void drawChessboardCorners(Mat& image, Size patternSize, vector_Point2f corners, bool patternWasFound) + // + +/** + *

Renders the detected chessboard corners.

+ * + *

The function draws individual chessboard corners detected either as red + * circles if the board was not found, or as colored corners connected with + * lines if the board was found.

+ * + * @param image Destination image. It must be an 8-bit color image. + * @param patternSize Number of inner corners per a chessboard row and column + * (patternSize = cv.Size(points_per_row,points_per_column)). + * @param corners Array of detected corners, the output of findChessboardCorners. + * @param patternWasFound Parameter indicating whether the complete board was + * found or not. The return value of "findChessboardCorners" should be passed + * here. + * + * @see org.opencv.calib3d.Calib3d.drawChessboardCorners + */ + public static void drawChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, boolean patternWasFound) + { + Mat corners_mat = corners; + drawChessboardCorners_0(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj, patternWasFound); + + return; + } + + + // + // C++: int estimateAffine3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99) + // + +/** + *

Computes an optimal affine transformation between two 3D point sets.

+ * + *

The function estimates an optimal 3D affine transformation between two 3D + * point sets using the RANSAC algorithm.

+ * + * @param src First input 3D point set. + * @param dst Second input 3D point set. + * @param out Output 3D affine transformation matrix 3 x 4. + * @param inliers Output vector indicating which points are inliers. + * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to + * consider a point as an inlier. + * @param confidence Confidence level, between 0 and 1, for the estimated + * transformation. Anything between 0.95 and 0.99 is usually good enough. Values + * too close to 1 can slow down the estimation significantly. Values lower than + * 0.8-0.9 can result in an incorrectly estimated transformation. + * + * @see org.opencv.calib3d.Calib3d.estimateAffine3D + */ + public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold, double confidence) + { + + int retVal = estimateAffine3D_0(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold, confidence); + + return retVal; + } + +/** + *

Computes an optimal affine transformation between two 3D point sets.

+ * + *

The function estimates an optimal 3D affine transformation between two 3D + * point sets using the RANSAC algorithm.

+ * + * @param src First input 3D point set. + * @param dst Second input 3D point set. + * @param out Output 3D affine transformation matrix 3 x 4. + * @param inliers Output vector indicating which points are inliers. + * + * @see org.opencv.calib3d.Calib3d.estimateAffine3D + */ + public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers) + { + + int retVal = estimateAffine3D_1(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj); + + return retVal; + } + + + // + // C++: void filterSpeckles(Mat& img, double newVal, int maxSpeckleSize, double maxDiff, Mat& buf = Mat()) + // + +/** + *

Filters off small noise blobs (speckles) in the disparity map

+ * + * @param img The input 16-bit signed disparity image + * @param newVal The disparity value used to paint-off the speckles + * @param maxSpeckleSize The maximum speckle size to consider it a speckle. + * Larger blobs are not affected by the algorithm + * @param maxDiff Maximum difference between neighbor disparity pixels to put + * them into the same blob. Note that since StereoBM, StereoSGBM and may be + * other algorithms return a fixed-point disparity map, where disparity values + * are multiplied by 16, this scale factor should be taken into account when + * specifying this parameter value. + * @param buf The optional temporary buffer to avoid memory allocation within + * the function. + * + * @see org.opencv.calib3d.Calib3d.filterSpeckles + */ + public static void filterSpeckles(Mat img, double newVal, int maxSpeckleSize, double maxDiff, Mat buf) + { + + filterSpeckles_0(img.nativeObj, newVal, maxSpeckleSize, maxDiff, buf.nativeObj); + + return; + } + +/** + *

Filters off small noise blobs (speckles) in the disparity map

+ * + * @param img The input 16-bit signed disparity image + * @param newVal The disparity value used to paint-off the speckles + * @param maxSpeckleSize The maximum speckle size to consider it a speckle. + * Larger blobs are not affected by the algorithm + * @param maxDiff Maximum difference between neighbor disparity pixels to put + * them into the same blob. Note that since StereoBM, StereoSGBM and may be + * other algorithms return a fixed-point disparity map, where disparity values + * are multiplied by 16, this scale factor should be taken into account when + * specifying this parameter value. + * + * @see org.opencv.calib3d.Calib3d.filterSpeckles + */ + public static void filterSpeckles(Mat img, double newVal, int maxSpeckleSize, double maxDiff) + { + + filterSpeckles_1(img.nativeObj, newVal, maxSpeckleSize, maxDiff); + + return; + } + + + // + // C++: bool findChessboardCorners(Mat image, Size patternSize, vector_Point2f& corners, int flags = CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE) + // + +/** + *

Finds the positions of internal corners of the chessboard.

+ * + *

The function attempts to determine whether the input image is a view of the + * chessboard pattern and locate the internal chessboard corners. The function + * returns a non-zero value if all of the corners are found and they are placed + * in a certain order (row by row, left to right in every row). Otherwise, if + * the function fails to find all the corners or reorder them, it returns 0. For + * example, a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, + * that is, points where the black squares touch each other. + * The detected coordinates are approximate, and to determine their positions + * more accurately, the function calls "cornerSubPix". + * You also may use the function "cornerSubPix" with different parameters if + * returned coordinates are not accurate enough. + * Sample usage of detecting and drawing chessboard corners:

+ * + *

// C++ code:

+ * + *

Size patternsize(8,6); //interior number of corners

+ * + *

Mat gray =....; //source image

+ * + *

vector corners; //this will be filled by the detected corners

+ * + *

//CALIB_CB_FAST_CHECK saves a lot of time on images

+ * + *

//that do not contain any chessboard corners

+ * + *

bool patternfound = findChessboardCorners(gray, patternsize, corners,

+ * + *

CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE

+ * + *

+ CALIB_CB_FAST_CHECK);

+ * + *

if(patternfound)

+ * + *

cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),

+ * + *

TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));

+ * + *

drawChessboardCorners(img, patternsize, Mat(corners), patternfound);

+ * + *

Note: The function requires white space (like a square-thick border, the + * wider the better) around the board to make the detection more robust in + * various environments. Otherwise, if there is no border and the background is + * dark, the outer black squares cannot be segmented properly and so the square + * grouping and ordering algorithm fails. + *

+ * + * @param image Source chessboard view. It must be an 8-bit grayscale or color + * image. + * @param patternSize Number of inner corners per a chessboard row and column + * (patternSize = cvSize(points_per_row,points_per_colum) = + * cvSize(columns,rows)). + * @param corners Output array of detected corners. + * @param flags Various operation flags that can be zero or a combination of the + * following values: + *
    + *
  • CV_CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the + * image to black and white, rather than a fixed threshold level (computed from + * the average image brightness). + *
  • CV_CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with + * "equalizeHist" before applying fixed or adaptive thresholding. + *
  • CV_CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, + * perimeter, square-like shape) to filter out false quads extracted at the + * contour retrieval stage. + *
  • CALIB_CB_FAST_CHECK Run a fast check on the image that looks for + * chessboard corners, and shortcut the call if none is found. This can + * drastically speed up the call in the degenerate condition when no chessboard + * is observed. + *
+ * + * @see org.opencv.calib3d.Calib3d.findChessboardCorners + */ + public static boolean findChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, int flags) + { + Mat corners_mat = corners; + boolean retVal = findChessboardCorners_0(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj, flags); + + return retVal; + } + +/** + *

Finds the positions of internal corners of the chessboard.

+ * + *

The function attempts to determine whether the input image is a view of the + * chessboard pattern and locate the internal chessboard corners. The function + * returns a non-zero value if all of the corners are found and they are placed + * in a certain order (row by row, left to right in every row). Otherwise, if + * the function fails to find all the corners or reorder them, it returns 0. For + * example, a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, + * that is, points where the black squares touch each other. + * The detected coordinates are approximate, and to determine their positions + * more accurately, the function calls "cornerSubPix". + * You also may use the function "cornerSubPix" with different parameters if + * returned coordinates are not accurate enough. + * Sample usage of detecting and drawing chessboard corners:

+ * + *

// C++ code:

+ * + *

Size patternsize(8,6); //interior number of corners

+ * + *

Mat gray =....; //source image

+ * + *

vector corners; //this will be filled by the detected corners

+ * + *

//CALIB_CB_FAST_CHECK saves a lot of time on images

+ * + *

//that do not contain any chessboard corners

+ * + *

bool patternfound = findChessboardCorners(gray, patternsize, corners,

+ * + *

CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE

+ * + *

+ CALIB_CB_FAST_CHECK);

+ * + *

if(patternfound)

+ * + *

cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),

+ * + *

TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));

+ * + *

drawChessboardCorners(img, patternsize, Mat(corners), patternfound);

+ * + *

Note: The function requires white space (like a square-thick border, the + * wider the better) around the board to make the detection more robust in + * various environments. Otherwise, if there is no border and the background is + * dark, the outer black squares cannot be segmented properly and so the square + * grouping and ordering algorithm fails. + *

+ * + * @param image Source chessboard view. It must be an 8-bit grayscale or color + * image. + * @param patternSize Number of inner corners per a chessboard row and column + * (patternSize = cvSize(points_per_row,points_per_colum) = + * cvSize(columns,rows)). + * @param corners Output array of detected corners. + * + * @see org.opencv.calib3d.Calib3d.findChessboardCorners + */ + public static boolean findChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners) + { + Mat corners_mat = corners; + boolean retVal = findChessboardCorners_1(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj); + + return retVal; + } + + + // + // C++: bool findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID, Ptr_FeatureDetector blobDetector = new SimpleBlobDetector()) + // + + // Unknown type 'Ptr_FeatureDetector' (I), skipping the function + + + // + // C++: bool findCirclesGridDefault(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID) + // + + public static boolean findCirclesGridDefault(Mat image, Size patternSize, Mat centers, int flags) + { + + boolean retVal = findCirclesGridDefault_0(image.nativeObj, patternSize.width, patternSize.height, centers.nativeObj, flags); + + return retVal; + } + + public static boolean findCirclesGridDefault(Mat image, Size patternSize, Mat centers) + { + + boolean retVal = findCirclesGridDefault_1(image.nativeObj, patternSize.width, patternSize.height, centers.nativeObj); + + return retVal; + } + + + // + // C++: Mat findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method = FM_RANSAC, double param1 = 3., double param2 = 0.99, Mat& mask = Mat()) + // + +/** + *

Calculates a fundamental matrix from the corresponding points in two images.

+ * + *

The epipolar geometry is described by the following equation:

+ * + *

[p_2; 1]^T F [p_1; 1] = 0

+ * + *

where F is a fundamental matrix, p_1 and p_2 are + * corresponding points in the first and the second images, respectively.

+ * + *

The function calculates the fundamental matrix using one of four methods + * listed above and returns the found fundamental matrix. Normally just one + * matrix is found. But in case of the 7-point algorithm, the function may + * return up to 3 solutions (9 x 3 matrix that stores all 3 matrices + * sequentially).

+ * + *

The calculated fundamental matrix may be passed further to "computeCorrespondEpilines" + * that finds the epipolar lines corresponding to the specified points. It can + * also be passed to"stereoRectifyUncalibrated" to compute the rectification + * transformation. + *

+ * + *

// C++ code:

+ * + *

// Example. Estimation of fundamental matrix using the RANSAC algorithm

+ * + *

int point_count = 100;

+ * + *

vector points1(point_count);

+ * + *

vector points2(point_count);

+ * + *

// initialize the points here... * /

+ * + *

for(int i = 0; i < point_count; i++)

+ * + * + *

points1[i] =...;

+ * + *

points2[i] =...;

+ * + * + *

Mat fundamental_matrix =

+ * + *

findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);

+ * + * @param points1 Array of N points from the first image. The point + * coordinates should be floating-point (single or double precision). + * @param points2 Array of the second image points of the same size and format + * as points1. + * @param method Method for computing a fundamental matrix. + *
    + *
  • CV_FM_7POINT for a 7-point algorithm. N = 7 + *
  • CV_FM_8POINT for an 8-point algorithm. N >= 8 + *
  • CV_FM_RANSAC for the RANSAC algorithm. N >= 8 + *
  • CV_FM_LMEDS for the LMedS algorithm. N >= 8 + *
+ * @param param1 Parameter used for RANSAC. It is the maximum distance from a + * point to an epipolar line in pixels, beyond which the point is considered an + * outlier and is not used for computing the final fundamental matrix. It can be + * set to something like 1-3, depending on the accuracy of the point + * localization, image resolution, and the image noise. + * @param param2 Parameter used for the RANSAC or LMedS methods only. It + * specifies a desirable level of confidence (probability) that the estimated + * matrix is correct. + * @param mask a mask + * + * @see org.opencv.calib3d.Calib3d.findFundamentalMat + */ + public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double param1, double param2, Mat mask) + { + Mat points1_mat = points1; + Mat points2_mat = points2; + Mat retVal = new Mat(findFundamentalMat_0(points1_mat.nativeObj, points2_mat.nativeObj, method, param1, param2, mask.nativeObj)); + + return retVal; + } + +/** + *

Calculates a fundamental matrix from the corresponding points in two images.

+ * + *

The epipolar geometry is described by the following equation:

+ * + *

[p_2; 1]^T F [p_1; 1] = 0

+ * + *

where F is a fundamental matrix, p_1 and p_2 are + * corresponding points in the first and the second images, respectively.

+ * + *

The function calculates the fundamental matrix using one of four methods + * listed above and returns the found fundamental matrix. Normally just one + * matrix is found. But in case of the 7-point algorithm, the function may + * return up to 3 solutions (9 x 3 matrix that stores all 3 matrices + * sequentially).

+ * + *

The calculated fundamental matrix may be passed further to "computeCorrespondEpilines" + * that finds the epipolar lines corresponding to the specified points. It can + * also be passed to"stereoRectifyUncalibrated" to compute the rectification + * transformation. + *

+ * + *

// C++ code:

+ * + *

// Example. Estimation of fundamental matrix using the RANSAC algorithm

+ * + *

int point_count = 100;

+ * + *

vector points1(point_count);

+ * + *

vector points2(point_count);

+ * + *

// initialize the points here... * /

+ * + *

for(int i = 0; i < point_count; i++)

+ * + * + *

points1[i] =...;

+ * + *

points2[i] =...;

+ * + * + *

Mat fundamental_matrix =

+ * + *

findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);

+ * + * @param points1 Array of N points from the first image. The point + * coordinates should be floating-point (single or double precision). + * @param points2 Array of the second image points of the same size and format + * as points1. + * @param method Method for computing a fundamental matrix. + *
    + *
  • CV_FM_7POINT for a 7-point algorithm. N = 7 + *
  • CV_FM_8POINT for an 8-point algorithm. N >= 8 + *
  • CV_FM_RANSAC for the RANSAC algorithm. N >= 8 + *
  • CV_FM_LMEDS for the LMedS algorithm. N >= 8 + *
+ * @param param1 Parameter used for RANSAC. It is the maximum distance from a + * point to an epipolar line in pixels, beyond which the point is considered an + * outlier and is not used for computing the final fundamental matrix. It can be + * set to something like 1-3, depending on the accuracy of the point + * localization, image resolution, and the image noise. + * @param param2 Parameter used for the RANSAC or LMedS methods only. It + * specifies a desirable level of confidence (probability) that the estimated + * matrix is correct. + * + * @see org.opencv.calib3d.Calib3d.findFundamentalMat + */ + public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double param1, double param2) + { + Mat points1_mat = points1; + Mat points2_mat = points2; + Mat retVal = new Mat(findFundamentalMat_1(points1_mat.nativeObj, points2_mat.nativeObj, method, param1, param2)); + + return retVal; + } + +/** + *

Calculates a fundamental matrix from the corresponding points in two images.

+ * + *

The epipolar geometry is described by the following equation:

+ * + *

[p_2; 1]^T F [p_1; 1] = 0

+ * + *

where F is a fundamental matrix, p_1 and p_2 are + * corresponding points in the first and the second images, respectively.

+ * + *

The function calculates the fundamental matrix using one of four methods + * listed above and returns the found fundamental matrix. Normally just one + * matrix is found. But in case of the 7-point algorithm, the function may + * return up to 3 solutions (9 x 3 matrix that stores all 3 matrices + * sequentially).

+ * + *

The calculated fundamental matrix may be passed further to "computeCorrespondEpilines" + * that finds the epipolar lines corresponding to the specified points. It can + * also be passed to"stereoRectifyUncalibrated" to compute the rectification + * transformation. + *

+ * + *

// C++ code:

+ * + *

// Example. Estimation of fundamental matrix using the RANSAC algorithm

+ * + *

int point_count = 100;

+ * + *

vector points1(point_count);

+ * + *

vector points2(point_count);

+ * + *

// initialize the points here... * /

+ * + *

for(int i = 0; i < point_count; i++)

+ * + * + *

points1[i] =...;

+ * + *

points2[i] =...;

+ * + * + *

Mat fundamental_matrix =

+ * + *

findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);

+ * + * @param points1 Array of N points from the first image. The point + * coordinates should be floating-point (single or double precision). + * @param points2 Array of the second image points of the same size and format + * as points1. + * + * @see org.opencv.calib3d.Calib3d.findFundamentalMat + */ + public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2) + { + Mat points1_mat = points1; + Mat points2_mat = points2; + Mat retVal = new Mat(findFundamentalMat_2(points1_mat.nativeObj, points2_mat.nativeObj)); + + return retVal; + } + + + // + // C++: Mat findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, int method = 0, double ransacReprojThreshold = 3, Mat& mask = Mat()) + // + +/** + *

Finds a perspective transformation between two planes.

+ * + *

The functions find and return the perspective transformation H + * between the source and the destination planes:

+ * + *

s_i [x'_i y'_i 1] ~ H [x_i y_i 1]

+ * + *

so that the back-projection error

+ * + *

sum _i(x'_i- (h_11 x_i + h_12 y_i + h_13)/(h_(31) x_i + h_32 y_i + + * h_33))^2+ (y'_i- (h_21 x_i + h_22 y_i + h_23)/(h_(31) x_i + h_32 y_i + + * h_33))^2

+ * + *

is minimized. If the parameter method is set to the default + * value 0, the function uses all the point pairs to compute an initial + * homography estimate with a simple least-squares scheme.

+ * + *

However, if not all of the point pairs (srcPoints_i,dstPoints_i) + * fit the rigid perspective transformation (that is, there are some outliers), + * this initial estimate will be poor. + * In this case, you can use one of the two robust methods. Both methods, + * RANSAC and LMeDS, try many different random subsets + * of the corresponding point pairs (of four pairs each), estimate the + * homography matrix using this subset and a simple least-square algorithm, and + * then compute the quality/goodness of the computed homography (which is the + * number of inliers for RANSAC or the median re-projection error for LMeDs). + * The best subset is then used to produce the initial estimate of the + * homography matrix and the mask of inliers/outliers.

+ * + *

Regardless of the method, robust or not, the computed homography matrix is + * refined further (using inliers only in case of a robust method) with the + * Levenberg-Marquardt method to reduce the re-projection error even more.

+ * + *

The method RANSAC can handle practically any ratio of outliers + * but it needs a threshold to distinguish inliers from outliers. + * The method LMeDS does not need any threshold but it works + * correctly only when there are more than 50% of inliers. Finally, if there are + * no outliers and the noise is rather small, use the default method + * (method=0).

+ * + *

The function is used to find initial intrinsic and extrinsic matrices. + * Homography matrix is determined up to a scale. Thus, it is normalized so that + * h_33=1.

+ * + * @param srcPoints Coordinates of the points in the original plane, a matrix of + * the type CV_32FC2 or vector. + * @param dstPoints Coordinates of the points in the target plane, a matrix of + * the type CV_32FC2 or a vector. + * @param method Method used to computed a homography matrix. The following + * methods are possible: + *
    + *
  • 0 - a regular method using all the points + *
  • CV_RANSAC - RANSAC-based robust method + *
  • CV_LMEDS - Least-Median robust method + *
+ * @param ransacReprojThreshold Maximum allowed reprojection error to treat a + * point pair as an inlier (used in the RANSAC method only). That is, if + * + *

| dstPoints _i - convertPointsHomogeneous(H * srcPoints _i)| > + * ransacReprojThreshold

+ * + *

then the point i is considered an outlier. If srcPoints + * and dstPoints are measured in pixels, it usually makes sense to + * set this parameter somewhere in the range of 1 to 10.

+ * @param mask Optional output mask set by a robust method (CV_RANSAC + * or CV_LMEDS). Note that the input mask values are ignored. + * + * @see org.opencv.calib3d.Calib3d.findHomography + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.video.Video#estimateRigidTransform + * @see org.opencv.imgproc.Imgproc#getAffineTransform + * @see org.opencv.imgproc.Imgproc#getPerspectiveTransform + */ + public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask) + { + Mat srcPoints_mat = srcPoints; + Mat dstPoints_mat = dstPoints; + Mat retVal = new Mat(findHomography_0(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold, mask.nativeObj)); + + return retVal; + } + +/** + *

Finds a perspective transformation between two planes.

+ * + *

The functions find and return the perspective transformation H + * between the source and the destination planes:

+ * + *

s_i [x'_i y'_i 1] ~ H [x_i y_i 1]

+ * + *

so that the back-projection error

+ * + *

sum _i(x'_i- (h_11 x_i + h_12 y_i + h_13)/(h_(31) x_i + h_32 y_i + + * h_33))^2+ (y'_i- (h_21 x_i + h_22 y_i + h_23)/(h_(31) x_i + h_32 y_i + + * h_33))^2

+ * + *

is minimized. If the parameter method is set to the default + * value 0, the function uses all the point pairs to compute an initial + * homography estimate with a simple least-squares scheme.

+ * + *

However, if not all of the point pairs (srcPoints_i,dstPoints_i) + * fit the rigid perspective transformation (that is, there are some outliers), + * this initial estimate will be poor. + * In this case, you can use one of the two robust methods. Both methods, + * RANSAC and LMeDS, try many different random subsets + * of the corresponding point pairs (of four pairs each), estimate the + * homography matrix using this subset and a simple least-square algorithm, and + * then compute the quality/goodness of the computed homography (which is the + * number of inliers for RANSAC or the median re-projection error for LMeDs). + * The best subset is then used to produce the initial estimate of the + * homography matrix and the mask of inliers/outliers.

+ * + *

Regardless of the method, robust or not, the computed homography matrix is + * refined further (using inliers only in case of a robust method) with the + * Levenberg-Marquardt method to reduce the re-projection error even more.

+ * + *

The method RANSAC can handle practically any ratio of outliers + * but it needs a threshold to distinguish inliers from outliers. + * The method LMeDS does not need any threshold but it works + * correctly only when there are more than 50% of inliers. Finally, if there are + * no outliers and the noise is rather small, use the default method + * (method=0).

+ * + *

The function is used to find initial intrinsic and extrinsic matrices. + * Homography matrix is determined up to a scale. Thus, it is normalized so that + * h_33=1.

+ * + * @param srcPoints Coordinates of the points in the original plane, a matrix of + * the type CV_32FC2 or vector. + * @param dstPoints Coordinates of the points in the target plane, a matrix of + * the type CV_32FC2 or a vector. + * @param method Method used to computed a homography matrix. The following + * methods are possible: + *
    + *
  • 0 - a regular method using all the points + *
  • CV_RANSAC - RANSAC-based robust method + *
  • CV_LMEDS - Least-Median robust method + *
+ * @param ransacReprojThreshold Maximum allowed reprojection error to treat a + * point pair as an inlier (used in the RANSAC method only). That is, if + * + *

| dstPoints _i - convertPointsHomogeneous(H * srcPoints _i)| > + * ransacReprojThreshold

+ * + *

then the point i is considered an outlier. If srcPoints + * and dstPoints are measured in pixels, it usually makes sense to + * set this parameter somewhere in the range of 1 to 10.

+ * + * @see org.opencv.calib3d.Calib3d.findHomography + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.video.Video#estimateRigidTransform + * @see org.opencv.imgproc.Imgproc#getAffineTransform + * @see org.opencv.imgproc.Imgproc#getPerspectiveTransform + */ + public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold) + { + Mat srcPoints_mat = srcPoints; + Mat dstPoints_mat = dstPoints; + Mat retVal = new Mat(findHomography_1(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold)); + + return retVal; + } + +/** + *

Finds a perspective transformation between two planes.

+ * + *

The functions find and return the perspective transformation H + * between the source and the destination planes:

+ * + *

s_i [x'_i y'_i 1] ~ H [x_i y_i 1]

+ * + *

so that the back-projection error

+ * + *

sum _i(x'_i- (h_11 x_i + h_12 y_i + h_13)/(h_(31) x_i + h_32 y_i + + * h_33))^2+ (y'_i- (h_21 x_i + h_22 y_i + h_23)/(h_(31) x_i + h_32 y_i + + * h_33))^2

+ * + *

is minimized. If the parameter method is set to the default + * value 0, the function uses all the point pairs to compute an initial + * homography estimate with a simple least-squares scheme.

+ * + *

However, if not all of the point pairs (srcPoints_i,dstPoints_i) + * fit the rigid perspective transformation (that is, there are some outliers), + * this initial estimate will be poor. + * In this case, you can use one of the two robust methods. Both methods, + * RANSAC and LMeDS, try many different random subsets + * of the corresponding point pairs (of four pairs each), estimate the + * homography matrix using this subset and a simple least-square algorithm, and + * then compute the quality/goodness of the computed homography (which is the + * number of inliers for RANSAC or the median re-projection error for LMeDs). + * The best subset is then used to produce the initial estimate of the + * homography matrix and the mask of inliers/outliers.

+ * + *

Regardless of the method, robust or not, the computed homography matrix is + * refined further (using inliers only in case of a robust method) with the + * Levenberg-Marquardt method to reduce the re-projection error even more.

+ * + *

The method RANSAC can handle practically any ratio of outliers + * but it needs a threshold to distinguish inliers from outliers. + * The method LMeDS does not need any threshold but it works + * correctly only when there are more than 50% of inliers. Finally, if there are + * no outliers and the noise is rather small, use the default method + * (method=0).

+ * + *

The function is used to find initial intrinsic and extrinsic matrices. + * Homography matrix is determined up to a scale. Thus, it is normalized so that + * h_33=1.

+ * + * @param srcPoints Coordinates of the points in the original plane, a matrix of + * the type CV_32FC2 or vector. + * @param dstPoints Coordinates of the points in the target plane, a matrix of + * the type CV_32FC2 or a vector. + * + * @see org.opencv.calib3d.Calib3d.findHomography + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.video.Video#estimateRigidTransform + * @see org.opencv.imgproc.Imgproc#getAffineTransform + * @see org.opencv.imgproc.Imgproc#getPerspectiveTransform + */ + public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints) + { + Mat srcPoints_mat = srcPoints; + Mat dstPoints_mat = dstPoints; + Mat retVal = new Mat(findHomography_2(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj)); + + return retVal; + } + + + // + // C++: Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize = Size(), Rect* validPixROI = 0, bool centerPrincipalPoint = false) + // + +/** + *

Returns the new camera matrix based on the free scaling parameter.

+ * + *

The function computes and returns the optimal new camera matrix based on the + * free scaling parameter. By varying this parameter, you may retrieve only + * sensible pixels alpha=0, keep all the original image pixels if + * there is valuable information in the corners alpha=1, or get + * something in between. When alpha>0, the undistortion result is + * likely to have some black pixels corresponding to "virtual" pixels outside of + * the captured distorted image. The original camera matrix, distortion + * coefficients, the computed new camera matrix, and newImageSize + * should be passed to "initUndistortRectifyMap" to produce the maps for + * "remap".

+ * + * @param cameraMatrix Input camera matrix. + * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param imageSize Original image size. + * @param alpha Free scaling parameter between 0 (when all the pixels in the + * undistorted image are valid) and 1 (when all the source image pixels are + * retained in the undistorted image). See "stereoRectify" for details. + * @param newImgSize a newImgSize + * @param validPixROI Optional output rectangle that outlines all-good-pixels + * region in the undistorted image. See roi1, roi2 description in + * "stereoRectify". + * @param centerPrincipalPoint Optional flag that indicates whether in the new + * camera matrix the principal point should be at the image center or not. By + * default, the principal point is chosen to best fit a subset of the source + * image (determined by alpha) to the corrected image. + * + * @see org.opencv.calib3d.Calib3d.getOptimalNewCameraMatrix + */ + public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize, Rect validPixROI, boolean centerPrincipalPoint) + { + double[] validPixROI_out = new double[4]; + Mat retVal = new Mat(getOptimalNewCameraMatrix_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha, newImgSize.width, newImgSize.height, validPixROI_out, centerPrincipalPoint)); + if(validPixROI!=null){ validPixROI.x = (int)validPixROI_out[0]; validPixROI.y = (int)validPixROI_out[1]; validPixROI.width = (int)validPixROI_out[2]; validPixROI.height = (int)validPixROI_out[3]; } + return retVal; + } + +/** + *

Returns the new camera matrix based on the free scaling parameter.

+ * + *

The function computes and returns the optimal new camera matrix based on the + * free scaling parameter. By varying this parameter, you may retrieve only + * sensible pixels alpha=0, keep all the original image pixels if + * there is valuable information in the corners alpha=1, or get + * something in between. When alpha>0, the undistortion result is + * likely to have some black pixels corresponding to "virtual" pixels outside of + * the captured distorted image. The original camera matrix, distortion + * coefficients, the computed new camera matrix, and newImageSize + * should be passed to "initUndistortRectifyMap" to produce the maps for + * "remap".

+ * + * @param cameraMatrix Input camera matrix. + * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param imageSize Original image size. + * @param alpha Free scaling parameter between 0 (when all the pixels in the + * undistorted image are valid) and 1 (when all the source image pixels are + * retained in the undistorted image). See "stereoRectify" for details. + * + * @see org.opencv.calib3d.Calib3d.getOptimalNewCameraMatrix + */ + public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha) + { + + Mat retVal = new Mat(getOptimalNewCameraMatrix_1(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha)); + + return retVal; + } + + + // + // C++: Rect getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int SADWindowSize) + // + + public static Rect getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int SADWindowSize) + { + + Rect retVal = new Rect(getValidDisparityROI_0(roi1.x, roi1.y, roi1.width, roi1.height, roi2.x, roi2.y, roi2.width, roi2.height, minDisparity, numberOfDisparities, SADWindowSize)); + + return retVal; + } + + + // + // C++: Mat initCameraMatrix2D(vector_vector_Point3f objectPoints, vector_vector_Point2f imagePoints, Size imageSize, double aspectRatio = 1.) + // + +/** + *

Finds an initial camera matrix from 3D-2D point correspondences.

+ * + *

The function estimates and returns an initial camera matrix for the camera + * calibration process. + * Currently, the function only supports planar calibration patterns, which are + * patterns where each object point has z-coordinate =0.

+ * + * @param objectPoints Vector of vectors of the calibration pattern points in + * the calibration pattern coordinate space. In the old interface all the + * per-view vectors are concatenated. See "calibrateCamera" for details. + * @param imagePoints Vector of vectors of the projections of the calibration + * pattern points. In the old interface all the per-view vectors are + * concatenated. + * @param imageSize Image size in pixels used to initialize the principal point. + * @param aspectRatio If it is zero or negative, both f_x and + * f_y are estimated independently. Otherwise, f_x = f_y * + * aspectRatio. + * + * @see org.opencv.calib3d.Calib3d.initCameraMatrix2D + */ + public static Mat initCameraMatrix2D(List objectPoints, List imagePoints, Size imageSize, double aspectRatio) + { + List objectPoints_tmplm = new ArrayList((objectPoints != null) ? objectPoints.size() : 0); + Mat objectPoints_mat = Converters.vector_vector_Point3f_to_Mat(objectPoints, objectPoints_tmplm); + List imagePoints_tmplm = new ArrayList((imagePoints != null) ? imagePoints.size() : 0); + Mat imagePoints_mat = Converters.vector_vector_Point2f_to_Mat(imagePoints, imagePoints_tmplm); + Mat retVal = new Mat(initCameraMatrix2D_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, aspectRatio)); + + return retVal; + } + +/** + *

Finds an initial camera matrix from 3D-2D point correspondences.

+ * + *

The function estimates and returns an initial camera matrix for the camera + * calibration process. + * Currently, the function only supports planar calibration patterns, which are + * patterns where each object point has z-coordinate =0.

+ * + * @param objectPoints Vector of vectors of the calibration pattern points in + * the calibration pattern coordinate space. In the old interface all the + * per-view vectors are concatenated. See "calibrateCamera" for details. + * @param imagePoints Vector of vectors of the projections of the calibration + * pattern points. In the old interface all the per-view vectors are + * concatenated. + * @param imageSize Image size in pixels used to initialize the principal point. + * + * @see org.opencv.calib3d.Calib3d.initCameraMatrix2D + */ + public static Mat initCameraMatrix2D(List objectPoints, List imagePoints, Size imageSize) + { + List objectPoints_tmplm = new ArrayList((objectPoints != null) ? objectPoints.size() : 0); + Mat objectPoints_mat = Converters.vector_vector_Point3f_to_Mat(objectPoints, objectPoints_tmplm); + List imagePoints_tmplm = new ArrayList((imagePoints != null) ? imagePoints.size() : 0); + Mat imagePoints_mat = Converters.vector_vector_Point2f_to_Mat(imagePoints, imagePoints_tmplm); + Mat retVal = new Mat(initCameraMatrix2D_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height)); + + return retVal; + } + + + // + // C++: void matMulDeriv(Mat A, Mat B, Mat& dABdA, Mat& dABdB) + // + +/** + *

Computes partial derivatives of the matrix product for each multiplied + * matrix.

+ * + *

The function computes partial derivatives of the elements of the matrix + * product A*B with regard to the elements of each of the two input + * matrices. The function is used to compute the Jacobian matrices in + * "stereoCalibrate" but can also be used in any other similar optimization + * function.

+ * + * @param A First multiplied matrix. + * @param B Second multiplied matrix. + * @param dABdA First output derivative matrix d(A*B)/dA of size + * A.rows*B.cols x (A.rows*A.cols). + * @param dABdB Second output derivative matrix d(A*B)/dB of size + * A.rows*B.cols x (B.rows*B.cols). + * + * @see org.opencv.calib3d.Calib3d.matMulDeriv + */ + public static void matMulDeriv(Mat A, Mat B, Mat dABdA, Mat dABdB) + { + + matMulDeriv_0(A.nativeObj, B.nativeObj, dABdA.nativeObj, dABdB.nativeObj); + + return; + } + + + // + // C++: void projectPoints(vector_Point3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, vector_double distCoeffs, vector_Point2f& imagePoints, Mat& jacobian = Mat(), double aspectRatio = 0) + // + +/** + *

Projects 3D points to an image plane.

+ * + *

The function computes projections of 3D points to the image plane given + * intrinsic and extrinsic camera parameters. Optionally, the function computes + * Jacobians - matrices of partial derivatives of image points coordinates (as + * functions of all the input parameters) with respect to the particular + * parameters, intrinsic and/or extrinsic. The Jacobians are used during the + * global optimization in "calibrateCamera", "solvePnP", and "stereoCalibrate". + * The function itself can also be used to compute a re-projection error given + * the current intrinsic and extrinsic parameters.

+ * + *

Note: By setting rvec=tvec=(0,0,0) or by setting + * cameraMatrix to a 3x3 identity matrix, or by passing zero + * distortion coefficients, you can get various useful partial cases of the + * function. This means that you can compute the distorted coordinates for a + * sparse set of points or apply a perspective transformation (and also compute + * the derivatives) in the ideal zero-distortion setup.

+ * + * @param objectPoints Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 + * 3-channel (or vector), where N is the number of points + * in the view. + * @param rvec Rotation vector. See "Rodrigues" for details. + * @param tvec Translation vector. + * @param cameraMatrix Camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 _1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 + * 2-channel, or vector. + * @param jacobian Optional output 2Nx(10+) jacobian matrix of + * derivatives of image points with respect to components of the rotation + * vector, translation vector, focal lengths, coordinates of the principal point + * and the distortion coefficients. In the old interface different components of + * the jacobian are returned via different output parameters. + * @param aspectRatio Optional "fixed aspect ratio" parameter. If the parameter + * is not 0, the function assumes that the aspect ratio (*fx/fy*) is fixed and + * correspondingly adjusts the jacobian matrix. + * + * @see org.opencv.calib3d.Calib3d.projectPoints + */ + public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints, Mat jacobian, double aspectRatio) + { + Mat objectPoints_mat = objectPoints; + Mat distCoeffs_mat = distCoeffs; + Mat imagePoints_mat = imagePoints; + projectPoints_0(objectPoints_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, imagePoints_mat.nativeObj, jacobian.nativeObj, aspectRatio); + + return; + } + +/** + *

Projects 3D points to an image plane.

+ * + *

The function computes projections of 3D points to the image plane given + * intrinsic and extrinsic camera parameters. Optionally, the function computes + * Jacobians - matrices of partial derivatives of image points coordinates (as + * functions of all the input parameters) with respect to the particular + * parameters, intrinsic and/or extrinsic. The Jacobians are used during the + * global optimization in "calibrateCamera", "solvePnP", and "stereoCalibrate". + * The function itself can also be used to compute a re-projection error given + * the current intrinsic and extrinsic parameters.

+ * + *

Note: By setting rvec=tvec=(0,0,0) or by setting + * cameraMatrix to a 3x3 identity matrix, or by passing zero + * distortion coefficients, you can get various useful partial cases of the + * function. This means that you can compute the distorted coordinates for a + * sparse set of points or apply a perspective transformation (and also compute + * the derivatives) in the ideal zero-distortion setup.

+ * + * @param objectPoints Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 + * 3-channel (or vector), where N is the number of points + * in the view. + * @param rvec Rotation vector. See "Rodrigues" for details. + * @param tvec Translation vector. + * @param cameraMatrix Camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 _1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 + * 2-channel, or vector. + * + * @see org.opencv.calib3d.Calib3d.projectPoints + */ + public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints) + { + Mat objectPoints_mat = objectPoints; + Mat distCoeffs_mat = distCoeffs; + Mat imagePoints_mat = imagePoints; + projectPoints_1(objectPoints_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, imagePoints_mat.nativeObj); + + return; + } + + + // + // C++: float rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, vector_Mat imgpt1, vector_Mat imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q, double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags) + // + + public static float rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, List imgpt1, List imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat R1, Mat R2, Mat R3, Mat P1, Mat P2, Mat P3, Mat Q, double alpha, Size newImgSize, Rect roi1, Rect roi2, int flags) + { + Mat imgpt1_mat = Converters.vector_Mat_to_Mat(imgpt1); + Mat imgpt3_mat = Converters.vector_Mat_to_Mat(imgpt3); + double[] roi1_out = new double[4]; + double[] roi2_out = new double[4]; + float retVal = rectify3Collinear_0(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, cameraMatrix3.nativeObj, distCoeffs3.nativeObj, imgpt1_mat.nativeObj, imgpt3_mat.nativeObj, imageSize.width, imageSize.height, R12.nativeObj, T12.nativeObj, R13.nativeObj, T13.nativeObj, R1.nativeObj, R2.nativeObj, R3.nativeObj, P1.nativeObj, P2.nativeObj, P3.nativeObj, Q.nativeObj, alpha, newImgSize.width, newImgSize.height, roi1_out, roi2_out, flags); + if(roi1!=null){ roi1.x = (int)roi1_out[0]; roi1.y = (int)roi1_out[1]; roi1.width = (int)roi1_out[2]; roi1.height = (int)roi1_out[3]; } + if(roi2!=null){ roi2.x = (int)roi2_out[0]; roi2.y = (int)roi2_out[1]; roi2.width = (int)roi2_out[2]; roi2.height = (int)roi2_out[3]; } + return retVal; + } + + + // + // C++: void reprojectImageTo3D(Mat disparity, Mat& _3dImage, Mat Q, bool handleMissingValues = false, int ddepth = -1) + // + +/** + *

Reprojects a disparity image to 3D space.

+ * + *

The function transforms a single-channel disparity map to a 3-channel image + * representing a 3D surface. That is, for each pixel (x,y) andthe + * corresponding disparity d=disparity(x,y), it computes:

+ * + *

[X Y Z W]^T = Q *[x y disparity(x,y) 1]^T + * _3dImage(x,y) = (X/W, Y/W, Z/W)

+ * + *

The matrix Q can be an arbitrary 4 x 4 matrix (for + * example, the one computed by "stereoRectify"). To reproject a sparse set of + * points {(x,y,d),...} to 3D space, use "perspectiveTransform".

+ * + * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit + * signed or 32-bit floating-point disparity image. + * @param _3dImage Output 3-channel floating-point image of the same size as + * disparity. Each element of _3dImage(x,y) contains + * 3D coordinates of the point (x,y) computed from the disparity + * map. + * @param Q 4 x 4 perspective transformation matrix that can be + * obtained with "stereoRectify". + * @param handleMissingValues Indicates, whether the function should handle + * missing values (i.e. points where the disparity was not computed). If + * handleMissingValues=true, then pixels with the minimal disparity + * that corresponds to the outliers (see :ocv:funcx:"StereoBM.operator()") are + * transformed to 3D points with a very large Z value (currently set to 10000). + * @param ddepth The optional output array depth. If it is -1, the + * output image will have CV_32F depth. ddepth can + * also be set to CV_16S, CV_32S or CV_32F. + * + * @see org.opencv.calib3d.Calib3d.reprojectImageTo3D + */ + public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q, boolean handleMissingValues, int ddepth) + { + + reprojectImageTo3D_0(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj, handleMissingValues, ddepth); + + return; + } + +/** + *

Reprojects a disparity image to 3D space.

+ * + *

The function transforms a single-channel disparity map to a 3-channel image + * representing a 3D surface. That is, for each pixel (x,y) andthe + * corresponding disparity d=disparity(x,y), it computes:

+ * + *

[X Y Z W]^T = Q *[x y disparity(x,y) 1]^T + * _3dImage(x,y) = (X/W, Y/W, Z/W)

+ * + *

The matrix Q can be an arbitrary 4 x 4 matrix (for + * example, the one computed by "stereoRectify"). To reproject a sparse set of + * points {(x,y,d),...} to 3D space, use "perspectiveTransform".

+ * + * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit + * signed or 32-bit floating-point disparity image. + * @param _3dImage Output 3-channel floating-point image of the same size as + * disparity. Each element of _3dImage(x,y) contains + * 3D coordinates of the point (x,y) computed from the disparity + * map. + * @param Q 4 x 4 perspective transformation matrix that can be + * obtained with "stereoRectify". + * @param handleMissingValues Indicates, whether the function should handle + * missing values (i.e. points where the disparity was not computed). If + * handleMissingValues=true, then pixels with the minimal disparity + * that corresponds to the outliers (see :ocv:funcx:"StereoBM.operator()") are + * transformed to 3D points with a very large Z value (currently set to 10000). + * + * @see org.opencv.calib3d.Calib3d.reprojectImageTo3D + */ + public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q, boolean handleMissingValues) + { + + reprojectImageTo3D_1(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj, handleMissingValues); + + return; + } + +/** + *

Reprojects a disparity image to 3D space.

+ * + *

The function transforms a single-channel disparity map to a 3-channel image + * representing a 3D surface. That is, for each pixel (x,y) andthe + * corresponding disparity d=disparity(x,y), it computes:

+ * + *

[X Y Z W]^T = Q *[x y disparity(x,y) 1]^T + * _3dImage(x,y) = (X/W, Y/W, Z/W)

+ * + *

The matrix Q can be an arbitrary 4 x 4 matrix (for + * example, the one computed by "stereoRectify"). To reproject a sparse set of + * points {(x,y,d),...} to 3D space, use "perspectiveTransform".

+ * + * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit + * signed or 32-bit floating-point disparity image. + * @param _3dImage Output 3-channel floating-point image of the same size as + * disparity. Each element of _3dImage(x,y) contains + * 3D coordinates of the point (x,y) computed from the disparity + * map. + * @param Q 4 x 4 perspective transformation matrix that can be + * obtained with "stereoRectify". + * + * @see org.opencv.calib3d.Calib3d.reprojectImageTo3D + */ + public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q) + { + + reprojectImageTo3D_2(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj); + + return; + } + + + // + // C++: bool solvePnP(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int flags = ITERATIVE) + // + +/** + *

Finds an object pose from 3D-2D point correspondences.

+ * + *

The function estimates the object pose given a set of object points, their + * corresponding image projections, as well as the camera matrix and the + * distortion coefficients.

+ * + * @param objectPoints Array of object points in the object coordinate space, + * 3xN/Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. + * vector can be also passed here. + * @param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or + * 1xN/Nx1 2-channel, where N is the number of points. vector + * can be also passed here. + * @param cameraMatrix Input camera matrix A = + *

|fx 0 cx| + * |0 fy cy| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param rvec Output rotation vector (see "Rodrigues") that, together with + * tvec, brings points from the model coordinate system to the + * camera coordinate system. + * @param tvec Output translation vector. + * @param useExtrinsicGuess If true (1), the function uses the provided + * rvec and tvec values as initial approximations of + * the rotation and translation vectors, respectively, and further optimizes + * them. + * @param flags Method for solving a PnP problem: + *
    + *
  • CV_ITERATIVE Iterative method is based on Levenberg-Marquardt + * optimization. In this case the function finds such a pose that minimizes + * reprojection error, that is the sum of squared distances between the observed + * projections imagePoints and the projected (using + * "projectPoints") objectPoints. + *
  • CV_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, + * H.-F. Chang "Complete Solution Classification for the Perspective-Three-Point + * Problem". In this case the function requires exactly four object and image + * points. + *
  • CV_EPNP Method has been introduced by F.Moreno-Noguer, V.Lepetit and + * P.Fua in the paper "EPnP: Efficient Perspective-n-Point Camera Pose + * Estimation". + *
+ * + * @see org.opencv.calib3d.Calib3d.solvePnP + */ + public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int flags) + { + Mat objectPoints_mat = objectPoints; + Mat imagePoints_mat = imagePoints; + Mat distCoeffs_mat = distCoeffs; + boolean retVal = solvePnP_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, flags); + + return retVal; + } + +/** + *

Finds an object pose from 3D-2D point correspondences.

+ * + *

The function estimates the object pose given a set of object points, their + * corresponding image projections, as well as the camera matrix and the + * distortion coefficients.

+ * + * @param objectPoints Array of object points in the object coordinate space, + * 3xN/Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. + * vector can be also passed here. + * @param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or + * 1xN/Nx1 2-channel, where N is the number of points. vector + * can be also passed here. + * @param cameraMatrix Input camera matrix A = + *

|fx 0 cx| + * |0 fy cy| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param rvec Output rotation vector (see "Rodrigues") that, together with + * tvec, brings points from the model coordinate system to the + * camera coordinate system. + * @param tvec Output translation vector. + * + * @see org.opencv.calib3d.Calib3d.solvePnP + */ + public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec) + { + Mat objectPoints_mat = objectPoints; + Mat imagePoints_mat = imagePoints; + Mat distCoeffs_mat = distCoeffs; + boolean retVal = solvePnP_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj); + + return retVal; + } + + + // + // C++: void solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int iterationsCount = 100, float reprojectionError = 8.0, int minInliersCount = 100, Mat& inliers = Mat(), int flags = ITERATIVE) + // + +/** + *

Finds an object pose from 3D-2D point correspondences using the RANSAC + * scheme.

+ * + *

The function estimates an object pose given a set of object points, their + * corresponding image projections, as well as the camera matrix and the + * distortion coefficients. This function finds such a pose that minimizes + * reprojection error, that is, the sum of squared distances between the + * observed projections imagePoints and the projected (using + * "projectPoints") objectPoints. The use of RANSAC makes the + * function resistant to outliers. The function is parallelized with the TBB + * library.

+ * + * @param objectPoints Array of object points in the object coordinate space, + * 3xN/Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. + * vector can be also passed here. + * @param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or + * 1xN/Nx1 2-channel, where N is the number of points. vector + * can be also passed here. + * @param cameraMatrix Input camera matrix A = + *

|fx 0 cx| + * |0 fy cy| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param rvec Output rotation vector (see "Rodrigues") that, together with + * tvec, brings points from the model coordinate system to the + * camera coordinate system. + * @param tvec Output translation vector. + * @param useExtrinsicGuess If true (1), the function uses the provided + * rvec and tvec values as initial approximations of + * the rotation and translation vectors, respectively, and further optimizes + * them. + * @param iterationsCount Number of iterations. + * @param reprojectionError Inlier threshold value used by the RANSAC procedure. + * The parameter value is the maximum allowed distance between the observed and + * computed point projections to consider it an inlier. + * @param minInliersCount Number of inliers. If the algorithm at some stage + * finds more inliers than minInliersCount, it finishes. + * @param inliers Output vector that contains indices of inliers in + * objectPoints and imagePoints. + * @param flags Method for solving a PnP problem (see "solvePnP"). + * + * @see org.opencv.calib3d.Calib3d.solvePnPRansac + */ + public static void solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, int minInliersCount, Mat inliers, int flags) + { + Mat objectPoints_mat = objectPoints; + Mat imagePoints_mat = imagePoints; + Mat distCoeffs_mat = distCoeffs; + solvePnPRansac_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError, minInliersCount, inliers.nativeObj, flags); + + return; + } + +/** + *

Finds an object pose from 3D-2D point correspondences using the RANSAC + * scheme.

+ * + *

The function estimates an object pose given a set of object points, their + * corresponding image projections, as well as the camera matrix and the + * distortion coefficients. This function finds such a pose that minimizes + * reprojection error, that is, the sum of squared distances between the + * observed projections imagePoints and the projected (using + * "projectPoints") objectPoints. The use of RANSAC makes the + * function resistant to outliers. The function is parallelized with the TBB + * library.

+ * + * @param objectPoints Array of object points in the object coordinate space, + * 3xN/Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. + * vector can be also passed here. + * @param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or + * 1xN/Nx1 2-channel, where N is the number of points. vector + * can be also passed here. + * @param cameraMatrix Input camera matrix A = + *

|fx 0 cx| + * |0 fy cy| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param rvec Output rotation vector (see "Rodrigues") that, together with + * tvec, brings points from the model coordinate system to the + * camera coordinate system. + * @param tvec Output translation vector. + * + * @see org.opencv.calib3d.Calib3d.solvePnPRansac + */ + public static void solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec) + { + Mat objectPoints_mat = objectPoints; + Mat imagePoints_mat = imagePoints; + Mat distCoeffs_mat = distCoeffs; + solvePnPRansac_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj); + + return; + } + + + // + // C++: double stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6), int flags = CALIB_FIX_INTRINSIC) + // + +/** + *

Calibrates the stereo camera.

+ * + *

The function estimates transformation between two cameras making a stereo + * pair. If you have a stereo camera where the relative position and orientation + * of two cameras is fixed, and if you computed poses of an object relative to + * the first camera and to the second camera, (R1, T1) and (R2, T2), + * respectively (this can be done with "solvePnP"), then those poses definitely + * relate to each other. This means that, given (R_1,T_1), it + * should be possible to compute (R_2,T_2). You only need to + * know the position and orientation of the second camera relative to the first + * camera. This is what the described function does. It computes + * (R,T) so that:

+ * + *

R_2=R*R_1<BR>T_2=R*T_1 + T,

+ * + *

Optionally, it computes the essential matrix E:

+ * + *

E= + * |0 -T_2 T_1| + * |T_2 0 -T_0| + * |-T_1 T_0 0|

+ *
    + *
  • R + *
+ * + *

where T_i are components of the translation vector T : + * T=[T_0, T_1, T_2]^T. And the function can also compute the + * fundamental matrix F:

+ * + *

F = cameraMatrix2^(-T) E cameraMatrix1^(-1)

+ * + *

Besides the stereo-related information, the function can also perform a full + * calibration of each of two cameras. However, due to the high dimensionality + * of the parameter space and noise in the input data, the function can diverge + * from the correct solution. If the intrinsic parameters can be estimated with + * high accuracy for each of the cameras individually (for example, using + * "calibrateCamera"), you are recommended to do so and then pass + * CV_CALIB_FIX_INTRINSIC flag to the function along with the + * computed intrinsic parameters. Otherwise, if all the parameters are estimated + * at once, it makes sense to restrict some parameters, for example, pass + * CV_CALIB_SAME_FOCAL_LENGTH and CV_CALIB_ZERO_TANGENT_DIST + * flags, which is usually a reasonable assumption.

+ * + *

Similarly to "calibrateCamera", the function minimizes the total + * re-projection error for all the points in all the available views from both + * cameras. The function returns the final value of the re-projection error.

+ * + * @param objectPoints Vector of vectors of the calibration pattern points. + * @param imagePoints1 Vector of vectors of the projections of the calibration + * pattern points, observed by the first camera. + * @param imagePoints2 Vector of vectors of the projections of the calibration + * pattern points, observed by the second camera. + * @param cameraMatrix1 Input/output first camera matrix: + *

|f_x^j 0 c_x^j| + * |0 f_y^j c_y^j| + * |0 0 1| + * , j = 0, 1. If any of CV_CALIB_USE_INTRINSIC_GUESS, + * CV_CALIB_FIX_ASPECT_RATIO, CV_CALIB_FIX_INTRINSIC, + * or CV_CALIB_FIX_FOCAL_LENGTH are specified, some or all of the + * matrix components must be initialized. See the flags description for details.

+ * @param distCoeffs1 Input/output vector of distortion coefficients (k_1, + * k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. The + * output vector length depends on the flags. + * @param cameraMatrix2 Input/output second camera matrix. The parameter is + * similar to cameraMatrix1. + * @param distCoeffs2 Input/output lens distortion coefficients for the second + * camera. The parameter is similar to distCoeffs1. + * @param imageSize Size of the image used only to initialize intrinsic camera + * matrix. + * @param R Output rotation matrix between the 1st and the 2nd camera coordinate + * systems. + * @param T Output translation vector between the coordinate systems of the + * cameras. + * @param E Output essential matrix. + * @param F Output fundamental matrix. + * @param criteria a criteria + * @param flags Different flags that may be zero or a combination of the + * following values: + *
    + *
  • CV_CALIB_FIX_INTRINSIC Fix cameraMatrix? and + * distCoeffs? so that only R, T, E, and + * F matrices are estimated. + *
  • CV_CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic + * parameters according to the specified flags. Initial values are provided by + * the user. + *
  • CV_CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the + * optimization. + *
  • CV_CALIB_FIX_FOCAL_LENGTH Fix f^j_x and f^j_y. + *
  • CV_CALIB_FIX_ASPECT_RATIO Optimize f^j_y. Fix the ratio + * f^j_x/f^j_y. + *
  • CV_CALIB_SAME_FOCAL_LENGTH Enforce f^0_x=f^1_x and + * f^0_y=f^1_y. + *
  • CV_CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for + * each camera to zeros and fix there. + *
  • CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6 Do not change the corresponding + * radial distortion coefficient during the optimization. If CV_CALIB_USE_INTRINSIC_GUESS + * is set, the coefficient from the supplied distCoeffs matrix is + * used. Otherwise, it is set to 0. + *
  • CV_CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide + * the backward compatibility, this extra flag should be explicitly specified to + * make the calibration function use the rational model and return 8 + * coefficients. If the flag is not set, the function computes and returns only + * 5 distortion coefficients. + *
+ * + * @see org.opencv.calib3d.Calib3d.stereoCalibrate + */ + public static double stereoCalibrate(List objectPoints, List imagePoints1, List imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, TermCriteria criteria, int flags) + { + Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); + Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); + Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); + double retVal = stereoCalibrate_0(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon, flags); + + return retVal; + } + +/** + *

Calibrates the stereo camera.

+ * + *

The function estimates transformation between two cameras making a stereo + * pair. If you have a stereo camera where the relative position and orientation + * of two cameras is fixed, and if you computed poses of an object relative to + * the first camera and to the second camera, (R1, T1) and (R2, T2), + * respectively (this can be done with "solvePnP"), then those poses definitely + * relate to each other. This means that, given (R_1,T_1), it + * should be possible to compute (R_2,T_2). You only need to + * know the position and orientation of the second camera relative to the first + * camera. This is what the described function does. It computes + * (R,T) so that:

+ * + *

R_2=R*R_1<BR>T_2=R*T_1 + T,

+ * + *

Optionally, it computes the essential matrix E:

+ * + *

E= + * |0 -T_2 T_1| + * |T_2 0 -T_0| + * |-T_1 T_0 0|

+ *
    + *
  • R + *
+ * + *

where T_i are components of the translation vector T : + * T=[T_0, T_1, T_2]^T. And the function can also compute the + * fundamental matrix F:

+ * + *

F = cameraMatrix2^(-T) E cameraMatrix1^(-1)

+ * + *

Besides the stereo-related information, the function can also perform a full + * calibration of each of two cameras. However, due to the high dimensionality + * of the parameter space and noise in the input data, the function can diverge + * from the correct solution. If the intrinsic parameters can be estimated with + * high accuracy for each of the cameras individually (for example, using + * "calibrateCamera"), you are recommended to do so and then pass + * CV_CALIB_FIX_INTRINSIC flag to the function along with the + * computed intrinsic parameters. Otherwise, if all the parameters are estimated + * at once, it makes sense to restrict some parameters, for example, pass + * CV_CALIB_SAME_FOCAL_LENGTH and CV_CALIB_ZERO_TANGENT_DIST + * flags, which is usually a reasonable assumption.

+ * + *

Similarly to "calibrateCamera", the function minimizes the total + * re-projection error for all the points in all the available views from both + * cameras. The function returns the final value of the re-projection error.

+ * + * @param objectPoints Vector of vectors of the calibration pattern points. + * @param imagePoints1 Vector of vectors of the projections of the calibration + * pattern points, observed by the first camera. + * @param imagePoints2 Vector of vectors of the projections of the calibration + * pattern points, observed by the second camera. + * @param cameraMatrix1 Input/output first camera matrix: + *

|f_x^j 0 c_x^j| + * |0 f_y^j c_y^j| + * |0 0 1| + * , j = 0, 1. If any of CV_CALIB_USE_INTRINSIC_GUESS, + * CV_CALIB_FIX_ASPECT_RATIO, CV_CALIB_FIX_INTRINSIC, + * or CV_CALIB_FIX_FOCAL_LENGTH are specified, some or all of the + * matrix components must be initialized. See the flags description for details.

+ * @param distCoeffs1 Input/output vector of distortion coefficients (k_1, + * k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. The + * output vector length depends on the flags. + * @param cameraMatrix2 Input/output second camera matrix. The parameter is + * similar to cameraMatrix1. + * @param distCoeffs2 Input/output lens distortion coefficients for the second + * camera. The parameter is similar to distCoeffs1. + * @param imageSize Size of the image used only to initialize intrinsic camera + * matrix. + * @param R Output rotation matrix between the 1st and the 2nd camera coordinate + * systems. + * @param T Output translation vector between the coordinate systems of the + * cameras. + * @param E Output essential matrix. + * @param F Output fundamental matrix. + * + * @see org.opencv.calib3d.Calib3d.stereoCalibrate + */ + public static double stereoCalibrate(List objectPoints, List imagePoints1, List imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F) + { + Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); + Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); + Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); + double retVal = stereoCalibrate_1(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj); + + return retVal; + } + + + // + // C++: void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags = CALIB_ZERO_DISPARITY, double alpha = -1, Size newImageSize = Size(), Rect* validPixROI1 = 0, Rect* validPixROI2 = 0) + // + +/** + *

Computes rectification transforms for each head of a calibrated stereo + * camera.

+ * + *

The function computes the rotation matrices for each camera that (virtually) + * make both camera image planes the same plane. Consequently, this makes all + * the epipolar lines parallel and thus simplifies the dense stereo + * correspondence problem. The function takes the matrices computed by + * "stereoCalibrate" as input. As output, it provides two rotation matrices and + * also two projection matrices in the new coordinates. The function + * distinguishes the following two cases:

+ *
    + *
  • Horizontal stereo: the first and the second camera views are shifted + * relative to each other mainly along the x axis (with possible small vertical + * shift). In the rectified images, the corresponding epipolar lines in the left + * and right cameras are horizontal and have the same y-coordinate. P1 and P2 + * look like: + *
+ * + *

P1 = f 0 cx_1 0 + * 0 f cy 0 + * 0 0 1 0

+ * + * + * + *

P2 = f 0 cx_2 T_x*f + * 0 f cy 0 + * 0 0 1 0,

+ * + *

where T_x is a horizontal shift between the cameras and + * cx_1=cx_2 if CV_CALIB_ZERO_DISPARITY is set.

+ *
    + *
  • Vertical stereo: the first and the second camera views are shifted + * relative to each other mainly in vertical direction (and probably a bit in + * the horizontal direction too). The epipolar lines in the rectified images are + * vertical and have the same x-coordinate. P1 and P2 look like: + *
+ * + *

P1 = f 0 cx 0 + * 0 f cy_1 0 + * 0 0 1 0

+ * + * + * + *

P2 = f 0 cx 0 + * 0 f cy_2 T_y*f + * 0 0 1 0,

+ * + *

where T_y is a vertical shift between the cameras and + * cy_1=cy_2 if CALIB_ZERO_DISPARITY is set.

+ * + *

As you can see, the first three columns of P1 and + * P2 will effectively be the new "rectified" camera matrices. + * The matrices, together with R1 and R2, can then be + * passed to "initUndistortRectifyMap" to initialize the rectification map for + * each camera.

+ * + *

See below the screenshot from the stereo_calib.cpp sample. Some + * red horizontal lines pass through the corresponding image regions. This means + * that the images are well rectified, which is what most stereo correspondence + * algorithms rely on. The green rectangles are roi1 and + * roi2. You see that their interiors are all valid pixels.

+ * + * @param cameraMatrix1 First camera matrix. + * @param distCoeffs1 First camera distortion parameters. + * @param cameraMatrix2 Second camera matrix. + * @param distCoeffs2 Second camera distortion parameters. + * @param imageSize Size of the image used for stereo calibration. + * @param R Rotation matrix between the coordinate systems of the first and the + * second cameras. + * @param T Translation vector between coordinate systems of the cameras. + * @param R1 Output 3x3 rectification transform (rotation matrix) for the first + * camera. + * @param R2 Output 3x3 rectification transform (rotation matrix) for the second + * camera. + * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate + * systems for the first camera. + * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate + * systems for the second camera. + * @param Q Output 4 x 4 disparity-to-depth mapping matrix (see + * "reprojectImageTo3D"). + * @param flags Operation flags that may be zero or CV_CALIB_ZERO_DISPARITY. + * If the flag is set, the function makes the principal points of each camera + * have the same pixel coordinates in the rectified views. And if the flag is + * not set, the function may still shift the images in the horizontal or + * vertical direction (depending on the orientation of epipolar lines) to + * maximize the useful image area. + * @param alpha Free scaling parameter. If it is -1 or absent, the function + * performs the default scaling. Otherwise, the parameter should be between 0 + * and 1. alpha=0 means that the rectified images are zoomed and + * shifted so that only valid pixels are visible (no black areas after + * rectification). alpha=1 means that the rectified image is + * decimated and shifted so that all the pixels from the original images from + * the cameras are retained in the rectified images (no source image pixels are + * lost). Obviously, any intermediate value yields an intermediate result + * between those two extreme cases. + * @param newImageSize New image resolution after rectification. The same size + * should be passed to "initUndistortRectifyMap" (see the stereo_calib.cpp + * sample in OpenCV samples directory). When (0,0) is passed (default), it is + * set to the original imageSize. Setting it to larger value can + * help you preserve details in the original image, especially when there is a + * big radial distortion. + * @param validPixROI1 Optional output rectangles inside the rectified images + * where all the pixels are valid. If alpha=0, the ROIs cover the + * whole images. Otherwise, they are likely to be smaller (see the picture + * below). + * @param validPixROI2 Optional output rectangles inside the rectified images + * where all the pixels are valid. If alpha=0, the ROIs cover the + * whole images. Otherwise, they are likely to be smaller (see the picture + * below). + * + * @see org.opencv.calib3d.Calib3d.stereoRectify + */ + public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize, Rect validPixROI1, Rect validPixROI2) + { + double[] validPixROI1_out = new double[4]; + double[] validPixROI2_out = new double[4]; + stereoRectify_0(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha, newImageSize.width, newImageSize.height, validPixROI1_out, validPixROI2_out); + if(validPixROI1!=null){ validPixROI1.x = (int)validPixROI1_out[0]; validPixROI1.y = (int)validPixROI1_out[1]; validPixROI1.width = (int)validPixROI1_out[2]; validPixROI1.height = (int)validPixROI1_out[3]; } + if(validPixROI2!=null){ validPixROI2.x = (int)validPixROI2_out[0]; validPixROI2.y = (int)validPixROI2_out[1]; validPixROI2.width = (int)validPixROI2_out[2]; validPixROI2.height = (int)validPixROI2_out[3]; } + return; + } + +/** + *

Computes rectification transforms for each head of a calibrated stereo + * camera.

+ * + *

The function computes the rotation matrices for each camera that (virtually) + * make both camera image planes the same plane. Consequently, this makes all + * the epipolar lines parallel and thus simplifies the dense stereo + * correspondence problem. The function takes the matrices computed by + * "stereoCalibrate" as input. As output, it provides two rotation matrices and + * also two projection matrices in the new coordinates. The function + * distinguishes the following two cases:

+ *
    + *
  • Horizontal stereo: the first and the second camera views are shifted + * relative to each other mainly along the x axis (with possible small vertical + * shift). In the rectified images, the corresponding epipolar lines in the left + * and right cameras are horizontal and have the same y-coordinate. P1 and P2 + * look like: + *
+ * + *

P1 = f 0 cx_1 0 + * 0 f cy 0 + * 0 0 1 0

+ * + * + * + *

P2 = f 0 cx_2 T_x*f + * 0 f cy 0 + * 0 0 1 0,

+ * + *

where T_x is a horizontal shift between the cameras and + * cx_1=cx_2 if CV_CALIB_ZERO_DISPARITY is set.

+ *
    + *
  • Vertical stereo: the first and the second camera views are shifted + * relative to each other mainly in vertical direction (and probably a bit in + * the horizontal direction too). The epipolar lines in the rectified images are + * vertical and have the same x-coordinate. P1 and P2 look like: + *
+ * + *

P1 = f 0 cx 0 + * 0 f cy_1 0 + * 0 0 1 0

+ * + * + * + *

P2 = f 0 cx 0 + * 0 f cy_2 T_y*f + * 0 0 1 0,

+ * + *

where T_y is a vertical shift between the cameras and + * cy_1=cy_2 if CALIB_ZERO_DISPARITY is set.

+ * + *

As you can see, the first three columns of P1 and + * P2 will effectively be the new "rectified" camera matrices. + * The matrices, together with R1 and R2, can then be + * passed to "initUndistortRectifyMap" to initialize the rectification map for + * each camera.

+ * + *

See below the screenshot from the stereo_calib.cpp sample. Some + * red horizontal lines pass through the corresponding image regions. This means + * that the images are well rectified, which is what most stereo correspondence + * algorithms rely on. The green rectangles are roi1 and + * roi2. You see that their interiors are all valid pixels.

+ * + * @param cameraMatrix1 First camera matrix. + * @param distCoeffs1 First camera distortion parameters. + * @param cameraMatrix2 Second camera matrix. + * @param distCoeffs2 Second camera distortion parameters. + * @param imageSize Size of the image used for stereo calibration. + * @param R Rotation matrix between the coordinate systems of the first and the + * second cameras. + * @param T Translation vector between coordinate systems of the cameras. + * @param R1 Output 3x3 rectification transform (rotation matrix) for the first + * camera. + * @param R2 Output 3x3 rectification transform (rotation matrix) for the second + * camera. + * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate + * systems for the first camera. + * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate + * systems for the second camera. + * @param Q Output 4 x 4 disparity-to-depth mapping matrix (see + * "reprojectImageTo3D"). + * + * @see org.opencv.calib3d.Calib3d.stereoRectify + */ + public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q) + { + + stereoRectify_1(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj); + + return; + } + + + // + // C++: bool stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat& H1, Mat& H2, double threshold = 5) + // + +/** + *

Computes a rectification transform for an uncalibrated stereo camera.

+ * + *

The function computes the rectification transformations without knowing + * intrinsic parameters of the cameras and their relative position in the space, + * which explains the suffix "uncalibrated". Another related difference from + * "stereoRectify" is that the function outputs not the rectification + * transformations in the object (3D) space, but the planar perspective + * transformations encoded by the homography matrices H1 and + * H2. The function implements the algorithm [Hartley99].

+ * + *

Note:

+ * + *

While the algorithm does not need to know the intrinsic parameters of the + * cameras, it heavily depends on the epipolar geometry. Therefore, if the + * camera lenses have a significant distortion, it would be better to correct it + * before computing the fundamental matrix and calling this function. For + * example, distortion coefficients can be estimated for each head of stereo + * camera separately by using "calibrateCamera". Then, the images can be + * corrected using "undistort", or just the point coordinates can be corrected + * with "undistortPoints".

+ * + * @param points1 Array of feature points in the first image. + * @param points2 The corresponding points in the second image. The same formats + * as in "findFundamentalMat" are supported. + * @param F Input fundamental matrix. It can be computed from the same set of + * point pairs using "findFundamentalMat". + * @param imgSize Size of the image. + * @param H1 Output rectification homography matrix for the first image. + * @param H2 Output rectification homography matrix for the second image. + * @param threshold Optional threshold used to filter out the outliers. If the + * parameter is greater than zero, all the point pairs that do not comply with + * the epipolar geometry (that is, the points for which |points2[i]^T*F*points1[i]|>threshold) + * are rejected prior to computing the homographies. Otherwise,all the points + * are considered inliers. + * + * @see org.opencv.calib3d.Calib3d.stereoRectifyUncalibrated + */ + public static boolean stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat H1, Mat H2, double threshold) + { + + boolean retVal = stereoRectifyUncalibrated_0(points1.nativeObj, points2.nativeObj, F.nativeObj, imgSize.width, imgSize.height, H1.nativeObj, H2.nativeObj, threshold); + + return retVal; + } + +/** + *

Computes a rectification transform for an uncalibrated stereo camera.

+ * + *

The function computes the rectification transformations without knowing + * intrinsic parameters of the cameras and their relative position in the space, + * which explains the suffix "uncalibrated". Another related difference from + * "stereoRectify" is that the function outputs not the rectification + * transformations in the object (3D) space, but the planar perspective + * transformations encoded by the homography matrices H1 and + * H2. The function implements the algorithm [Hartley99].

+ * + *

Note:

+ * + *

While the algorithm does not need to know the intrinsic parameters of the + * cameras, it heavily depends on the epipolar geometry. Therefore, if the + * camera lenses have a significant distortion, it would be better to correct it + * before computing the fundamental matrix and calling this function. For + * example, distortion coefficients can be estimated for each head of stereo + * camera separately by using "calibrateCamera". Then, the images can be + * corrected using "undistort", or just the point coordinates can be corrected + * with "undistortPoints".

+ * + * @param points1 Array of feature points in the first image. + * @param points2 The corresponding points in the second image. The same formats + * as in "findFundamentalMat" are supported. + * @param F Input fundamental matrix. It can be computed from the same set of + * point pairs using "findFundamentalMat". + * @param imgSize Size of the image. + * @param H1 Output rectification homography matrix for the first image. + * @param H2 Output rectification homography matrix for the second image. + * + * @see org.opencv.calib3d.Calib3d.stereoRectifyUncalibrated + */ + public static boolean stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat H1, Mat H2) + { + + boolean retVal = stereoRectifyUncalibrated_1(points1.nativeObj, points2.nativeObj, F.nativeObj, imgSize.width, imgSize.height, H1.nativeObj, H2.nativeObj); + + return retVal; + } + + + // + // C++: void triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat& points4D) + // + +/** + *

Reconstructs points by triangulation.

+ * + *

The function reconstructs 3-dimensional points (in homogeneous coordinates) + * by using their observations with a stereo camera. Projections matrices can be + * obtained from "stereoRectify".

+ * + * @param projMatr1 3x4 projection matrix of the first camera. + * @param projMatr2 3x4 projection matrix of the second camera. + * @param projPoints1 2xN array of feature points in the first image. In case of + * c++ version it can be also a vector of feature points or two-channel matrix + * of size 1xN or Nx1. + * @param projPoints2 2xN array of corresponding points in the second image. In + * case of c++ version it can be also a vector of feature points or two-channel + * matrix of size 1xN or Nx1. + * @param points4D 4xN array of reconstructed points in homogeneous coordinates. + * + * @see org.opencv.calib3d.Calib3d.triangulatePoints + * @see org.opencv.calib3d.Calib3d#reprojectImageTo3D + */ + public static void triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat points4D) + { + + triangulatePoints_0(projMatr1.nativeObj, projMatr2.nativeObj, projPoints1.nativeObj, projPoints2.nativeObj, points4D.nativeObj); + + return; + } + + + // + // C++: void validateDisparity(Mat& disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp = 1) + // + + public static void validateDisparity(Mat disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp) + { + + validateDisparity_0(disparity.nativeObj, cost.nativeObj, minDisparity, numberOfDisparities, disp12MaxDisp); + + return; + } + + public static void validateDisparity(Mat disparity, Mat cost, int minDisparity, int numberOfDisparities) + { + + validateDisparity_1(disparity.nativeObj, cost.nativeObj, minDisparity, numberOfDisparities); + + return; + } + + + + + // C++: Vec3d RQDecomp3x3(Mat src, Mat& mtxR, Mat& mtxQ, Mat& Qx = Mat(), Mat& Qy = Mat(), Mat& Qz = Mat()) + private static native double[] RQDecomp3x3_0(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj, long Qx_nativeObj, long Qy_nativeObj, long Qz_nativeObj); + private static native double[] RQDecomp3x3_1(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj); + + // C++: void Rodrigues(Mat src, Mat& dst, Mat& jacobian = Mat()) + private static native void Rodrigues_0(long src_nativeObj, long dst_nativeObj, long jacobian_nativeObj); + private static native void Rodrigues_1(long src_nativeObj, long dst_nativeObj); + + // C++: double calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON)) + private static native double calibrateCamera_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon); + private static native double calibrateCamera_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags); + private static native double calibrateCamera_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj); + + // C++: void calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength, Point2d& principalPoint, double& aspectRatio) + private static native void calibrationMatrixValues_0(long cameraMatrix_nativeObj, double imageSize_width, double imageSize_height, double apertureWidth, double apertureHeight, double[] fovx_out, double[] fovy_out, double[] focalLength_out, double[] principalPoint_out, double[] aspectRatio_out); + + // C++: void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat& rvec3, Mat& tvec3, Mat& dr3dr1 = Mat(), Mat& dr3dt1 = Mat(), Mat& dr3dr2 = Mat(), Mat& dr3dt2 = Mat(), Mat& dt3dr1 = Mat(), Mat& dt3dt1 = Mat(), Mat& dt3dr2 = Mat(), Mat& dt3dt2 = Mat()) + private static native void composeRT_0(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj, long dt3dt1_nativeObj, long dt3dr2_nativeObj, long dt3dt2_nativeObj); + private static native void composeRT_1(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj); + + // C++: void convertPointsFromHomogeneous(Mat src, Mat& dst) + private static native void convertPointsFromHomogeneous_0(long src_nativeObj, long dst_nativeObj); + + // C++: void convertPointsToHomogeneous(Mat src, Mat& dst) + private static native void convertPointsToHomogeneous_0(long src_nativeObj, long dst_nativeObj); + + // C++: void correctMatches(Mat F, Mat points1, Mat points2, Mat& newPoints1, Mat& newPoints2) + private static native void correctMatches_0(long F_nativeObj, long points1_nativeObj, long points2_nativeObj, long newPoints1_nativeObj, long newPoints2_nativeObj); + + // C++: void decomposeProjectionMatrix(Mat projMatrix, Mat& cameraMatrix, Mat& rotMatrix, Mat& transVect, Mat& rotMatrixX = Mat(), Mat& rotMatrixY = Mat(), Mat& rotMatrixZ = Mat(), Mat& eulerAngles = Mat()) + private static native void decomposeProjectionMatrix_0(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj, long rotMatrixY_nativeObj, long rotMatrixZ_nativeObj, long eulerAngles_nativeObj); + private static native void decomposeProjectionMatrix_1(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj); + + // C++: void drawChessboardCorners(Mat& image, Size patternSize, vector_Point2f corners, bool patternWasFound) + private static native void drawChessboardCorners_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj, boolean patternWasFound); + + // C++: int estimateAffine3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99) + private static native int estimateAffine3D_0(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold, double confidence); + private static native int estimateAffine3D_1(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj); + + // C++: void filterSpeckles(Mat& img, double newVal, int maxSpeckleSize, double maxDiff, Mat& buf = Mat()) + private static native void filterSpeckles_0(long img_nativeObj, double newVal, int maxSpeckleSize, double maxDiff, long buf_nativeObj); + private static native void filterSpeckles_1(long img_nativeObj, double newVal, int maxSpeckleSize, double maxDiff); + + // C++: bool findChessboardCorners(Mat image, Size patternSize, vector_Point2f& corners, int flags = CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE) + private static native boolean findChessboardCorners_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj, int flags); + private static native boolean findChessboardCorners_1(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj); + + // C++: bool findCirclesGridDefault(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID) + private static native boolean findCirclesGridDefault_0(long image_nativeObj, double patternSize_width, double patternSize_height, long centers_nativeObj, int flags); + private static native boolean findCirclesGridDefault_1(long image_nativeObj, double patternSize_width, double patternSize_height, long centers_nativeObj); + + // C++: Mat findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method = FM_RANSAC, double param1 = 3., double param2 = 0.99, Mat& mask = Mat()) + private static native long findFundamentalMat_0(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double param1, double param2, long mask_nativeObj); + private static native long findFundamentalMat_1(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double param1, double param2); + private static native long findFundamentalMat_2(long points1_mat_nativeObj, long points2_mat_nativeObj); + + // C++: Mat findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, int method = 0, double ransacReprojThreshold = 3, Mat& mask = Mat()) + private static native long findHomography_0(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold, long mask_nativeObj); + private static native long findHomography_1(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold); + private static native long findHomography_2(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj); + + // C++: Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize = Size(), Rect* validPixROI = 0, bool centerPrincipalPoint = false) + private static native long getOptimalNewCameraMatrix_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha, double newImgSize_width, double newImgSize_height, double[] validPixROI_out, boolean centerPrincipalPoint); + private static native long getOptimalNewCameraMatrix_1(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha); + + // C++: Rect getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int SADWindowSize) + private static native double[] getValidDisparityROI_0(int roi1_x, int roi1_y, int roi1_width, int roi1_height, int roi2_x, int roi2_y, int roi2_width, int roi2_height, int minDisparity, int numberOfDisparities, int SADWindowSize); + + // C++: Mat initCameraMatrix2D(vector_vector_Point3f objectPoints, vector_vector_Point2f imagePoints, Size imageSize, double aspectRatio = 1.) + private static native long initCameraMatrix2D_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, double aspectRatio); + private static native long initCameraMatrix2D_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height); + + // C++: void matMulDeriv(Mat A, Mat B, Mat& dABdA, Mat& dABdB) + private static native void matMulDeriv_0(long A_nativeObj, long B_nativeObj, long dABdA_nativeObj, long dABdB_nativeObj); + + // C++: void projectPoints(vector_Point3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, vector_double distCoeffs, vector_Point2f& imagePoints, Mat& jacobian = Mat(), double aspectRatio = 0) + private static native void projectPoints_0(long objectPoints_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long imagePoints_mat_nativeObj, long jacobian_nativeObj, double aspectRatio); + private static native void projectPoints_1(long objectPoints_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long imagePoints_mat_nativeObj); + + // C++: float rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, vector_Mat imgpt1, vector_Mat imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q, double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags) + private static native float rectify3Collinear_0(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long cameraMatrix3_nativeObj, long distCoeffs3_nativeObj, long imgpt1_mat_nativeObj, long imgpt3_mat_nativeObj, double imageSize_width, double imageSize_height, long R12_nativeObj, long T12_nativeObj, long R13_nativeObj, long T13_nativeObj, long R1_nativeObj, long R2_nativeObj, long R3_nativeObj, long P1_nativeObj, long P2_nativeObj, long P3_nativeObj, long Q_nativeObj, double alpha, double newImgSize_width, double newImgSize_height, double[] roi1_out, double[] roi2_out, int flags); + + // C++: void reprojectImageTo3D(Mat disparity, Mat& _3dImage, Mat Q, bool handleMissingValues = false, int ddepth = -1) + private static native void reprojectImageTo3D_0(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj, boolean handleMissingValues, int ddepth); + private static native void reprojectImageTo3D_1(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj, boolean handleMissingValues); + private static native void reprojectImageTo3D_2(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj); + + // C++: bool solvePnP(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int flags = ITERATIVE) + private static native boolean solvePnP_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int flags); + private static native boolean solvePnP_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj); + + // C++: void solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int iterationsCount = 100, float reprojectionError = 8.0, int minInliersCount = 100, Mat& inliers = Mat(), int flags = ITERATIVE) + private static native void solvePnPRansac_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, int minInliersCount, long inliers_nativeObj, int flags); + private static native void solvePnPRansac_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj); + + // C++: double stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6), int flags = CALIB_FIX_INTRINSIC) + private static native double stereoCalibrate_0(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon, int flags); + private static native double stereoCalibrate_1(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj); + + // C++: void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags = CALIB_ZERO_DISPARITY, double alpha = -1, Size newImageSize = Size(), Rect* validPixROI1 = 0, Rect* validPixROI2 = 0) + private static native void stereoRectify_0(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha, double newImageSize_width, double newImageSize_height, double[] validPixROI1_out, double[] validPixROI2_out); + private static native void stereoRectify_1(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj); + + // C++: bool stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat& H1, Mat& H2, double threshold = 5) + private static native boolean stereoRectifyUncalibrated_0(long points1_nativeObj, long points2_nativeObj, long F_nativeObj, double imgSize_width, double imgSize_height, long H1_nativeObj, long H2_nativeObj, double threshold); + private static native boolean stereoRectifyUncalibrated_1(long points1_nativeObj, long points2_nativeObj, long F_nativeObj, double imgSize_width, double imgSize_height, long H1_nativeObj, long H2_nativeObj); + + // C++: void triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat& points4D) + private static native void triangulatePoints_0(long projMatr1_nativeObj, long projMatr2_nativeObj, long projPoints1_nativeObj, long projPoints2_nativeObj, long points4D_nativeObj); + + // C++: void validateDisparity(Mat& disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp = 1) + private static native void validateDisparity_0(long disparity_nativeObj, long cost_nativeObj, int minDisparity, int numberOfDisparities, int disp12MaxDisp); + private static native void validateDisparity_1(long disparity_nativeObj, long cost_nativeObj, int minDisparity, int numberOfDisparities); + +} diff --git a/src/org/opencv/calib3d/StereoBM.java b/src/org/opencv/calib3d/StereoBM.java new file mode 100644 index 0000000..a7dc0c9 --- /dev/null +++ b/src/org/opencv/calib3d/StereoBM.java @@ -0,0 +1,261 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.calib3d; + +import org.opencv.core.Mat; + +// C++: class StereoBM +/** + *

Class for computing stereo correspondence using the block matching algorithm.

+ * + *

// Block matching stereo correspondence algorithm class StereoBM

+ * + *

// C++ code:

+ * + * + *

enum { NORMALIZED_RESPONSE = CV_STEREO_BM_NORMALIZED_RESPONSE,

+ * + *

BASIC_PRESET=CV_STEREO_BM_BASIC,

+ * + *

FISH_EYE_PRESET=CV_STEREO_BM_FISH_EYE,

+ * + *

NARROW_PRESET=CV_STEREO_BM_NARROW };

+ * + *

StereoBM();

+ * + *

// the preset is one of..._PRESET above.

+ * + *

// ndisparities is the size of disparity range,

+ * + *

// in which the optimal disparity at each pixel is searched for.

+ * + *

// SADWindowSize is the size of averaging window used to match pixel blocks

+ * + *

// (larger values mean better robustness to noise, but yield blurry disparity + * maps)

+ * + *

StereoBM(int preset, int ndisparities=0, int SADWindowSize=21);

+ * + *

// separate initialization function

+ * + *

void init(int preset, int ndisparities=0, int SADWindowSize=21);

+ * + *

// computes the disparity for the two rectified 8-bit single-channel images.

+ * + *

// the disparity will be 16-bit signed (fixed-point) or 32-bit floating-point + * image of the same size as left.

+ * + *

void operator()(InputArray left, InputArray right, OutputArray disparity, int + * disptype=CV_16S);

+ * + *

Ptr state;

+ * + *

};

+ * + *

The class is a C++ wrapper for the associated functions. In particular, + * :ocv:funcx:"StereoBM.operator()" is the wrapper for

+ * + *

"cvFindStereoCorrespondenceBM".

+ * + * @see org.opencv.calib3d.StereoBM + */ +public class StereoBM { + + protected final long nativeObj; + protected StereoBM(long addr) { nativeObj = addr; } + + + public static final int + PREFILTER_NORMALIZED_RESPONSE = 0, + PREFILTER_XSOBEL = 1, + BASIC_PRESET = 0, + FISH_EYE_PRESET = 1, + NARROW_PRESET = 2; + + + // + // C++: StereoBM::StereoBM() + // + +/** + *

The constructors.

+ * + *

The constructors initialize StereoBM state. You can then call + * StereoBM.operator() to compute disparity for a specific stereo + * pair.

+ * + *

Note: In the C API you need to deallocate CvStereoBM state when + * it is not needed anymore using cvReleaseStereoBMState(&stereobm).

+ * + * @see org.opencv.calib3d.StereoBM.StereoBM + */ + public StereoBM() + { + + nativeObj = StereoBM_0(); + + return; + } + + + // + // C++: StereoBM::StereoBM(int preset, int ndisparities = 0, int SADWindowSize = 21) + // + +/** + *

The constructors.

+ * + *

The constructors initialize StereoBM state. You can then call + * StereoBM.operator() to compute disparity for a specific stereo + * pair.

+ * + *

Note: In the C API you need to deallocate CvStereoBM state when + * it is not needed anymore using cvReleaseStereoBMState(&stereobm).

+ * + * @param preset specifies the whole set of algorithm parameters, one of: + *
    + *
  • BASIC_PRESET - parameters suitable for general cameras + *
  • FISH_EYE_PRESET - parameters suitable for wide-angle cameras + *
  • NARROW_PRESET - parameters suitable for narrow-angle cameras + *
+ * + *

After constructing the class, you can override any parameters set by the + * preset.

+ * @param ndisparities the disparity search range. For each pixel algorithm will + * find the best disparity from 0 (default minimum disparity) to + * ndisparities. The search range can then be shifted by changing + * the minimum disparity. + * @param SADWindowSize the linear size of the blocks compared by the algorithm. + * The size should be odd (as the block is centered at the current pixel). + * Larger block size implies smoother, though less accurate disparity map. + * Smaller block size gives more detailed disparity map, but there is higher + * chance for algorithm to find a wrong correspondence. + * + * @see org.opencv.calib3d.StereoBM.StereoBM + */ + public StereoBM(int preset, int ndisparities, int SADWindowSize) + { + + nativeObj = StereoBM_1(preset, ndisparities, SADWindowSize); + + return; + } + +/** + *

The constructors.

+ * + *

The constructors initialize StereoBM state. You can then call + * StereoBM.operator() to compute disparity for a specific stereo + * pair.

+ * + *

Note: In the C API you need to deallocate CvStereoBM state when + * it is not needed anymore using cvReleaseStereoBMState(&stereobm).

+ * + * @param preset specifies the whole set of algorithm parameters, one of: + *
    + *
  • BASIC_PRESET - parameters suitable for general cameras + *
  • FISH_EYE_PRESET - parameters suitable for wide-angle cameras + *
  • NARROW_PRESET - parameters suitable for narrow-angle cameras + *
+ * + *

After constructing the class, you can override any parameters set by the + * preset.

+ * + * @see org.opencv.calib3d.StereoBM.StereoBM + */ + public StereoBM(int preset) + { + + nativeObj = StereoBM_2(preset); + + return; + } + + + // + // C++: void StereoBM::operator ()(Mat left, Mat right, Mat& disparity, int disptype = CV_16S) + // + +/** + *

Computes disparity using the BM algorithm for a rectified stereo pair.

+ * + *

The method executes the BM algorithm on a rectified stereo pair. See the + * stereo_match.cpp OpenCV sample on how to prepare images and call + * the method. Note that the method is not constant, thus you should not use the + * same StereoBM instance from within different threads + * simultaneously. The function is parallelized with the TBB library.

+ * + * @param left Left 8-bit single-channel image. + * @param right Right image of the same size and the same type as the left one. + * @param disparity Output disparity map. It has the same size as the input + * images. When disptype==CV_16S, the map is a 16-bit signed + * single-channel image, containing disparity values scaled by 16. To get the + * true disparity values from such fixed-point representation, you will need to + * divide each disp element by 16. If disptype==CV_32F, + * the disparity map will already contain the real disparity values on output. + * @param disptype Type of the output disparity map, CV_16S + * (default) or CV_32F. + * + * @see org.opencv.calib3d.StereoBM.operator() + */ + public void compute(Mat left, Mat right, Mat disparity, int disptype) + { + + compute_0(nativeObj, left.nativeObj, right.nativeObj, disparity.nativeObj, disptype); + + return; + } + +/** + *

Computes disparity using the BM algorithm for a rectified stereo pair.

+ * + *

The method executes the BM algorithm on a rectified stereo pair. See the + * stereo_match.cpp OpenCV sample on how to prepare images and call + * the method. Note that the method is not constant, thus you should not use the + * same StereoBM instance from within different threads + * simultaneously. The function is parallelized with the TBB library.

+ * + * @param left Left 8-bit single-channel image. + * @param right Right image of the same size and the same type as the left one. + * @param disparity Output disparity map. It has the same size as the input + * images. When disptype==CV_16S, the map is a 16-bit signed + * single-channel image, containing disparity values scaled by 16. To get the + * true disparity values from such fixed-point representation, you will need to + * divide each disp element by 16. If disptype==CV_32F, + * the disparity map will already contain the real disparity values on output. + * + * @see org.opencv.calib3d.StereoBM.operator() + */ + public void compute(Mat left, Mat right, Mat disparity) + { + + compute_1(nativeObj, left.nativeObj, right.nativeObj, disparity.nativeObj); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: StereoBM::StereoBM() + private static native long StereoBM_0(); + + // C++: StereoBM::StereoBM(int preset, int ndisparities = 0, int SADWindowSize = 21) + private static native long StereoBM_1(int preset, int ndisparities, int SADWindowSize); + private static native long StereoBM_2(int preset); + + // C++: void StereoBM::operator ()(Mat left, Mat right, Mat& disparity, int disptype = CV_16S) + private static native void compute_0(long nativeObj, long left_nativeObj, long right_nativeObj, long disparity_nativeObj, int disptype); + private static native void compute_1(long nativeObj, long left_nativeObj, long right_nativeObj, long disparity_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/calib3d/StereoSGBM.java b/src/org/opencv/calib3d/StereoSGBM.java new file mode 100644 index 0000000..84354fa --- /dev/null +++ b/src/org/opencv/calib3d/StereoSGBM.java @@ -0,0 +1,590 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.calib3d; + +import org.opencv.core.Mat; + +// C++: class StereoSGBM +/** + *

Class for computing stereo correspondence using the semi-global block + * matching algorithm.

+ * + *

class StereoSGBM

+ * + *

// C++ code:

+ * + * + *

StereoSGBM();

+ * + *

StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize,

+ * + *

int P1=0, int P2=0, int disp12MaxDiff=0,

+ * + *

int preFilterCap=0, int uniquenessRatio=0,

+ * + *

int speckleWindowSize=0, int speckleRange=0,

+ * + *

bool fullDP=false);

+ * + *

virtual ~StereoSGBM();

+ * + *

virtual void operator()(InputArray left, InputArray right, OutputArray disp);

+ * + *

int minDisparity;

+ * + *

int numberOfDisparities;

+ * + *

int SADWindowSize;

+ * + *

int preFilterCap;

+ * + *

int uniquenessRatio;

+ * + *

int P1, P2;

+ * + *

int speckleWindowSize;

+ * + *

int speckleRange;

+ * + *

int disp12MaxDiff;

+ * + *

bool fullDP;...

+ * + *

};

+ * + *

The class implements the modified H. Hirschmuller algorithm [HH08] that + * differs from the original one as follows:

+ *
    + *
  • By default, the algorithm is single-pass, which means that you + * consider only 5 directions instead of 8. Set fullDP=true to run + * the full variant of the algorithm but beware that it may consume a lot of + * memory. + *
  • The algorithm matches blocks, not individual pixels. Though, setting + * SADWindowSize=1 reduces the blocks to single pixels. + *
  • Mutual information cost function is not implemented. Instead, a + * simpler Birchfield-Tomasi sub-pixel metric from [BT98] is used. Though, the + * color images are supported as well. + *
  • Some pre- and post- processing steps from K. Konolige algorithm + * :ocv:funcx:"StereoBM.operator()" are included, for example: pre-filtering + * (CV_STEREO_BM_XSOBEL type) and post-filtering (uniqueness check, + * quadratic interpolation and speckle filtering). + *
+ * + * @see org.opencv.calib3d.StereoSGBM + */ +public class StereoSGBM { + + protected final long nativeObj; + protected StereoSGBM(long addr) { nativeObj = addr; } + + + public static final int + DISP_SHIFT = 4, + DISP_SCALE = (1<Initializes StereoSGBM and sets parameters to custom values.??

+ * + *

The first constructor initializes StereoSGBM with all the + * default parameters. So, you only have to set StereoSGBM.numberOfDisparities + * at minimum. The second constructor enables you to set each parameter to a + * custom value.

+ * + * @see org.opencv.calib3d.StereoSGBM.StereoSGBM + */ + public StereoSGBM() + { + + nativeObj = StereoSGBM_0(); + + return; + } + + + // + // C++: StereoSGBM::StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize, int P1 = 0, int P2 = 0, int disp12MaxDiff = 0, int preFilterCap = 0, int uniquenessRatio = 0, int speckleWindowSize = 0, int speckleRange = 0, bool fullDP = false) + // + +/** + *

Initializes StereoSGBM and sets parameters to custom values.??

+ * + *

The first constructor initializes StereoSGBM with all the + * default parameters. So, you only have to set StereoSGBM.numberOfDisparities + * at minimum. The second constructor enables you to set each parameter to a + * custom value.

+ * + * @param minDisparity Minimum possible disparity value. Normally, it is zero + * but sometimes rectification algorithms can shift images, so this parameter + * needs to be adjusted accordingly. + * @param numDisparities Maximum disparity minus minimum disparity. The value is + * always greater than zero. In the current implementation, this parameter must + * be divisible by 16. + * @param SADWindowSize Matched block size. It must be an odd number + * >=1. Normally, it should be somewhere in the 3..11 + * range. + * @param P1 The first parameter controlling the disparity smoothness. See + * below. + * @param P2 The second parameter controlling the disparity smoothness. The + * larger the values are, the smoother the disparity is. P1 is the + * penalty on the disparity change by plus or minus 1 between neighbor pixels. + * P2 is the penalty on the disparity change by more than 1 between + * neighbor pixels. The algorithm requires P2 > P1. See + * stereo_match.cpp sample where some reasonably good + * P1 and P2 values are shown (like 8*number_of_image_channels*SADWindowSize*SADWindowSize + * and 32*number_of_image_channels*SADWindowSize*SADWindowSize, + * respectively). + * @param disp12MaxDiff Maximum allowed difference (in integer pixel units) in + * the left-right disparity check. Set it to a non-positive value to disable the + * check. + * @param preFilterCap Truncation value for the prefiltered image pixels. The + * algorithm first computes x-derivative at each pixel and clips its value by + * [-preFilterCap, preFilterCap] interval. The result values are + * passed to the Birchfield-Tomasi pixel cost function. + * @param uniquenessRatio Margin in percentage by which the best (minimum) + * computed cost function value should "win" the second best value to consider + * the found match correct. Normally, a value within the 5-15 range is good + * enough. + * @param speckleWindowSize Maximum size of smooth disparity regions to consider + * their noise speckles and invalidate. Set it to 0 to disable speckle + * filtering. Otherwise, set it somewhere in the 50-200 range. + * @param speckleRange Maximum disparity variation within each connected + * component. If you do speckle filtering, set the parameter to a positive + * value, it will be implicitly multiplied by 16. Normally, 1 or 2 is good + * enough. + * @param fullDP Set it to true to run the full-scale two-pass + * dynamic programming algorithm. It will consume O(W*H*numDisparities) bytes, + * which is large for 640x480 stereo and huge for HD-size pictures. By default, + * it is set to false. + * + * @see org.opencv.calib3d.StereoSGBM.StereoSGBM + */ + public StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize, int P1, int P2, int disp12MaxDiff, int preFilterCap, int uniquenessRatio, int speckleWindowSize, int speckleRange, boolean fullDP) + { + + nativeObj = StereoSGBM_1(minDisparity, numDisparities, SADWindowSize, P1, P2, disp12MaxDiff, preFilterCap, uniquenessRatio, speckleWindowSize, speckleRange, fullDP); + + return; + } + +/** + *

Initializes StereoSGBM and sets parameters to custom values.??

+ * + *

The first constructor initializes StereoSGBM with all the + * default parameters. So, you only have to set StereoSGBM.numberOfDisparities + * at minimum. The second constructor enables you to set each parameter to a + * custom value.

+ * + * @param minDisparity Minimum possible disparity value. Normally, it is zero + * but sometimes rectification algorithms can shift images, so this parameter + * needs to be adjusted accordingly. + * @param numDisparities Maximum disparity minus minimum disparity. The value is + * always greater than zero. In the current implementation, this parameter must + * be divisible by 16. + * @param SADWindowSize Matched block size. It must be an odd number + * >=1. Normally, it should be somewhere in the 3..11 + * range. + * + * @see org.opencv.calib3d.StereoSGBM.StereoSGBM + */ + public StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize) + { + + nativeObj = StereoSGBM_2(minDisparity, numDisparities, SADWindowSize); + + return; + } + + + // + // C++: void StereoSGBM::operator ()(Mat left, Mat right, Mat& disp) + // + + public void compute(Mat left, Mat right, Mat disp) + { + + compute_0(nativeObj, left.nativeObj, right.nativeObj, disp.nativeObj); + + return; + } + + + // + // C++: int StereoSGBM::minDisparity + // + + public int get_minDisparity() + { + + int retVal = get_minDisparity_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::minDisparity + // + + public void set_minDisparity(int minDisparity) + { + + set_minDisparity_0(nativeObj, minDisparity); + + return; + } + + + // + // C++: int StereoSGBM::numberOfDisparities + // + + public int get_numberOfDisparities() + { + + int retVal = get_numberOfDisparities_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::numberOfDisparities + // + + public void set_numberOfDisparities(int numberOfDisparities) + { + + set_numberOfDisparities_0(nativeObj, numberOfDisparities); + + return; + } + + + // + // C++: int StereoSGBM::SADWindowSize + // + + public int get_SADWindowSize() + { + + int retVal = get_SADWindowSize_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::SADWindowSize + // + + public void set_SADWindowSize(int SADWindowSize) + { + + set_SADWindowSize_0(nativeObj, SADWindowSize); + + return; + } + + + // + // C++: int StereoSGBM::preFilterCap + // + + public int get_preFilterCap() + { + + int retVal = get_preFilterCap_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::preFilterCap + // + + public void set_preFilterCap(int preFilterCap) + { + + set_preFilterCap_0(nativeObj, preFilterCap); + + return; + } + + + // + // C++: int StereoSGBM::uniquenessRatio + // + + public int get_uniquenessRatio() + { + + int retVal = get_uniquenessRatio_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::uniquenessRatio + // + + public void set_uniquenessRatio(int uniquenessRatio) + { + + set_uniquenessRatio_0(nativeObj, uniquenessRatio); + + return; + } + + + // + // C++: int StereoSGBM::P1 + // + + public int get_P1() + { + + int retVal = get_P1_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::P1 + // + + public void set_P1(int P1) + { + + set_P1_0(nativeObj, P1); + + return; + } + + + // + // C++: int StereoSGBM::P2 + // + + public int get_P2() + { + + int retVal = get_P2_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::P2 + // + + public void set_P2(int P2) + { + + set_P2_0(nativeObj, P2); + + return; + } + + + // + // C++: int StereoSGBM::speckleWindowSize + // + + public int get_speckleWindowSize() + { + + int retVal = get_speckleWindowSize_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::speckleWindowSize + // + + public void set_speckleWindowSize(int speckleWindowSize) + { + + set_speckleWindowSize_0(nativeObj, speckleWindowSize); + + return; + } + + + // + // C++: int StereoSGBM::speckleRange + // + + public int get_speckleRange() + { + + int retVal = get_speckleRange_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::speckleRange + // + + public void set_speckleRange(int speckleRange) + { + + set_speckleRange_0(nativeObj, speckleRange); + + return; + } + + + // + // C++: int StereoSGBM::disp12MaxDiff + // + + public int get_disp12MaxDiff() + { + + int retVal = get_disp12MaxDiff_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::disp12MaxDiff + // + + public void set_disp12MaxDiff(int disp12MaxDiff) + { + + set_disp12MaxDiff_0(nativeObj, disp12MaxDiff); + + return; + } + + + // + // C++: bool StereoSGBM::fullDP + // + + public boolean get_fullDP() + { + + boolean retVal = get_fullDP_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::fullDP + // + + public void set_fullDP(boolean fullDP) + { + + set_fullDP_0(nativeObj, fullDP); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: StereoSGBM::StereoSGBM() + private static native long StereoSGBM_0(); + + // C++: StereoSGBM::StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize, int P1 = 0, int P2 = 0, int disp12MaxDiff = 0, int preFilterCap = 0, int uniquenessRatio = 0, int speckleWindowSize = 0, int speckleRange = 0, bool fullDP = false) + private static native long StereoSGBM_1(int minDisparity, int numDisparities, int SADWindowSize, int P1, int P2, int disp12MaxDiff, int preFilterCap, int uniquenessRatio, int speckleWindowSize, int speckleRange, boolean fullDP); + private static native long StereoSGBM_2(int minDisparity, int numDisparities, int SADWindowSize); + + // C++: void StereoSGBM::operator ()(Mat left, Mat right, Mat& disp) + private static native void compute_0(long nativeObj, long left_nativeObj, long right_nativeObj, long disp_nativeObj); + + // C++: int StereoSGBM::minDisparity + private static native int get_minDisparity_0(long nativeObj); + + // C++: void StereoSGBM::minDisparity + private static native void set_minDisparity_0(long nativeObj, int minDisparity); + + // C++: int StereoSGBM::numberOfDisparities + private static native int get_numberOfDisparities_0(long nativeObj); + + // C++: void StereoSGBM::numberOfDisparities + private static native void set_numberOfDisparities_0(long nativeObj, int numberOfDisparities); + + // C++: int StereoSGBM::SADWindowSize + private static native int get_SADWindowSize_0(long nativeObj); + + // C++: void StereoSGBM::SADWindowSize + private static native void set_SADWindowSize_0(long nativeObj, int SADWindowSize); + + // C++: int StereoSGBM::preFilterCap + private static native int get_preFilterCap_0(long nativeObj); + + // C++: void StereoSGBM::preFilterCap + private static native void set_preFilterCap_0(long nativeObj, int preFilterCap); + + // C++: int StereoSGBM::uniquenessRatio + private static native int get_uniquenessRatio_0(long nativeObj); + + // C++: void StereoSGBM::uniquenessRatio + private static native void set_uniquenessRatio_0(long nativeObj, int uniquenessRatio); + + // C++: int StereoSGBM::P1 + private static native int get_P1_0(long nativeObj); + + // C++: void StereoSGBM::P1 + private static native void set_P1_0(long nativeObj, int P1); + + // C++: int StereoSGBM::P2 + private static native int get_P2_0(long nativeObj); + + // C++: void StereoSGBM::P2 + private static native void set_P2_0(long nativeObj, int P2); + + // C++: int StereoSGBM::speckleWindowSize + private static native int get_speckleWindowSize_0(long nativeObj); + + // C++: void StereoSGBM::speckleWindowSize + private static native void set_speckleWindowSize_0(long nativeObj, int speckleWindowSize); + + // C++: int StereoSGBM::speckleRange + private static native int get_speckleRange_0(long nativeObj); + + // C++: void StereoSGBM::speckleRange + private static native void set_speckleRange_0(long nativeObj, int speckleRange); + + // C++: int StereoSGBM::disp12MaxDiff + private static native int get_disp12MaxDiff_0(long nativeObj); + + // C++: void StereoSGBM::disp12MaxDiff + private static native void set_disp12MaxDiff_0(long nativeObj, int disp12MaxDiff); + + // C++: bool StereoSGBM::fullDP + private static native boolean get_fullDP_0(long nativeObj); + + // C++: void StereoSGBM::fullDP + private static native void set_fullDP_0(long nativeObj, boolean fullDP); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/calib3d/package.bluej b/src/org/opencv/calib3d/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/contrib/Contrib.java b/src/org/opencv/contrib/Contrib.java new file mode 100644 index 0000000..1119797 --- /dev/null +++ b/src/org/opencv/contrib/Contrib.java @@ -0,0 +1,144 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.contrib; + +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfFloat; +import org.opencv.core.MatOfPoint; +import org.opencv.utils.Converters; + +public class Contrib { + + public static final int + ROTATION = 1, + TRANSLATION = 2, + RIGID_BODY_MOTION = 4, + COLORMAP_AUTUMN = 0, + COLORMAP_BONE = 1, + COLORMAP_JET = 2, + COLORMAP_WINTER = 3, + COLORMAP_RAINBOW = 4, + COLORMAP_OCEAN = 5, + COLORMAP_SUMMER = 6, + COLORMAP_SPRING = 7, + COLORMAP_COOL = 8, + COLORMAP_HSV = 9, + COLORMAP_PINK = 10, + COLORMAP_HOT = 11, + RETINA_COLOR_RANDOM = 0, + RETINA_COLOR_DIAGONAL = 1, + RETINA_COLOR_BAYER = 2; + + + // + // C++: void applyColorMap(Mat src, Mat& dst, int colormap) + // + +/** + *

Applies a GNU Octave/MATLAB equivalent colormap on a given image.

+ * + *

Currently the following GNU Octave/MATLAB equivalent colormaps are + * implemented: enum

+ * + *

// C++ code:

+ * + * + *

COLORMAP_AUTUMN = 0,

+ * + *

COLORMAP_BONE = 1,

+ * + *

COLORMAP_JET = 2,

+ * + *

COLORMAP_WINTER = 3,

+ * + *

COLORMAP_RAINBOW = 4,

+ * + *

COLORMAP_OCEAN = 5,

+ * + *

COLORMAP_SUMMER = 6,

+ * + *

COLORMAP_SPRING = 7,

+ * + *

COLORMAP_COOL = 8,

+ * + *

COLORMAP_HSV = 9,

+ * + *

COLORMAP_PINK = 10,

+ * + *

COLORMAP_HOT = 11

+ * + * + * @param src The source image, grayscale or colored does not matter. + * @param dst The result is the colormapped source image. Note: "Mat.create" is + * called on dst. + * @param colormap The colormap to apply, see the list of available colormaps + * below. + * + * @see org.opencv.contrib.Contrib.applyColorMap + */ + public static void applyColorMap(Mat src, Mat dst, int colormap) + { + + applyColorMap_0(src.nativeObj, dst.nativeObj, colormap); + + return; + } + + + // + // C++: int chamerMatching(Mat img, Mat templ, vector_vector_Point& results, vector_float& cost, double templScale = 1, int maxMatches = 20, double minMatchDistance = 1.0, int padX = 3, int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6, double orientationWeight = 0.5, double truncate = 20) + // + + public static int chamerMatching(Mat img, Mat templ, List results, MatOfFloat cost, double templScale, int maxMatches, double minMatchDistance, int padX, int padY, int scales, double minScale, double maxScale, double orientationWeight, double truncate) + { + Mat results_mat = new Mat(); + Mat cost_mat = cost; + int retVal = chamerMatching_0(img.nativeObj, templ.nativeObj, results_mat.nativeObj, cost_mat.nativeObj, templScale, maxMatches, minMatchDistance, padX, padY, scales, minScale, maxScale, orientationWeight, truncate); + Converters.Mat_to_vector_vector_Point(results_mat, results); + return retVal; + } + + public static int chamerMatching(Mat img, Mat templ, List results, MatOfFloat cost) + { + Mat results_mat = new Mat(); + Mat cost_mat = cost; + int retVal = chamerMatching_1(img.nativeObj, templ.nativeObj, results_mat.nativeObj, cost_mat.nativeObj); + Converters.Mat_to_vector_vector_Point(results_mat, results); + return retVal; + } + + + // + // C++: Ptr_FaceRecognizer createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX) + // + + // Return type 'Ptr_FaceRecognizer' is not supported, skipping the function + + + // + // C++: Ptr_FaceRecognizer createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX) + // + + // Return type 'Ptr_FaceRecognizer' is not supported, skipping the function + + + // + // C++: Ptr_FaceRecognizer createLBPHFaceRecognizer(int radius = 1, int neighbors = 8, int grid_x = 8, int grid_y = 8, double threshold = DBL_MAX) + // + + // Return type 'Ptr_FaceRecognizer' is not supported, skipping the function + + + + + // C++: void applyColorMap(Mat src, Mat& dst, int colormap) + private static native void applyColorMap_0(long src_nativeObj, long dst_nativeObj, int colormap); + + // C++: int chamerMatching(Mat img, Mat templ, vector_vector_Point& results, vector_float& cost, double templScale = 1, int maxMatches = 20, double minMatchDistance = 1.0, int padX = 3, int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6, double orientationWeight = 0.5, double truncate = 20) + private static native int chamerMatching_0(long img_nativeObj, long templ_nativeObj, long results_mat_nativeObj, long cost_mat_nativeObj, double templScale, int maxMatches, double minMatchDistance, int padX, int padY, int scales, double minScale, double maxScale, double orientationWeight, double truncate); + private static native int chamerMatching_1(long img_nativeObj, long templ_nativeObj, long results_mat_nativeObj, long cost_mat_nativeObj); + +} diff --git a/src/org/opencv/contrib/FaceRecognizer.java b/src/org/opencv/contrib/FaceRecognizer.java new file mode 100644 index 0000000..7cdf086 --- /dev/null +++ b/src/org/opencv/contrib/FaceRecognizer.java @@ -0,0 +1,406 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.contrib; + +import java.lang.String; +import java.util.List; +import org.opencv.core.Algorithm; +import org.opencv.core.Mat; +import org.opencv.utils.Converters; + +// C++: class FaceRecognizer +/** + *

All face recognition models in OpenCV are derived from the abstract base + * class "FaceRecognizer", which provides a unified access to all face + * recongition algorithms in OpenCV.

+ * + *

class FaceRecognizer : public Algorithm

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

//! virtual destructor

+ * + *

virtual ~FaceRecognizer() {}

+ * + *

// Trains a FaceRecognizer.

+ * + *

virtual void train(InputArray src, InputArray labels) = 0;

+ * + *

// Updates a FaceRecognizer.

+ * + *

virtual void update(InputArrayOfArrays src, InputArray labels);

+ * + *

// Gets a prediction from a FaceRecognizer.

+ * + *

virtual int predict(InputArray src) const = 0;

+ * + *

// Predicts the label and confidence for a given sample.

+ * + *

virtual void predict(InputArray src, int &label, double &confidence) const = + * 0;

+ * + *

// Serializes this object to a given filename.

+ * + *

virtual void save(const string& filename) const;

+ * + *

// Deserializes this object from a given filename.

+ * + *

virtual void load(const string& filename);

+ * + *

// Serializes this object to a given cv.FileStorage.

+ * + *

virtual void save(FileStorage& fs) const = 0;

+ * + *

// Deserializes this object from a given cv.FileStorage.

+ * + *

virtual void load(const FileStorage& fs) = 0;

+ * + *

};

+ * + * @see org.opencv.contrib.FaceRecognizer : public Algorithm + */ +public class FaceRecognizer extends Algorithm { + + protected FaceRecognizer(long addr) { super(addr); } + + + // + // C++: void FaceRecognizer::load(string filename) + // + +/** + *

Loads a "FaceRecognizer" and its model state.

+ * + *

Loads a persisted model and state from a given XML or YAML file. Every + * "FaceRecognizer" has to overwrite FaceRecognizer.load(FileStorage& + * fs) to enable loading the model state. FaceRecognizer.load(FileStorage& + * fs) in turn gets called by FaceRecognizer.load(const string& + * filename), to ease saving a model.

+ * + * @param filename a filename + * + * @see org.opencv.contrib.FaceRecognizer.load + */ + public void load(String filename) + { + + load_0(nativeObj, filename); + + return; + } + + + // + // C++: void FaceRecognizer::predict(Mat src, int& label, double& confidence) + // + +/** + *

Predicts a label and associated confidence (e.g. distance) for a given input + * image.

+ * + *

The suffix const means that prediction does not affect the + * internal model state, so the method can be safely called from within + * different threads.

+ * + *

The following example shows how to get a prediction from a trained model: + * using namespace cv;

+ * + *

// C++ code:

+ * + *

// Do your initialization here (create the cv.FaceRecognizer model)...

+ * + *

//...

+ * + *

// Read in a sample image:

+ * + *

Mat img = imread("person1/3.jpg", CV_LOAD_IMAGE_GRAYSCALE);

+ * + *

// And get a prediction from the cv.FaceRecognizer:

+ * + *

int predicted = model->predict(img);

+ * + *

Or to get a prediction and the associated confidence (e.g. distance):

+ * + *

using namespace cv;

+ * + *

// C++ code:

+ * + *

// Do your initialization here (create the cv.FaceRecognizer model)...

+ * + *

//...

+ * + *

Mat img = imread("person1/3.jpg", CV_LOAD_IMAGE_GRAYSCALE);

+ * + *

// Some variables for the predicted label and associated confidence (e.g. + * distance):

+ * + *

int predicted_label = -1;

+ * + *

double predicted_confidence = 0.0;

+ * + *

// Get the prediction and associated confidence from the model

+ * + *

model->predict(img, predicted_label, predicted_confidence);

+ * + * @param src Sample image to get a prediction from. + * @param label The predicted label for the given image. + * @param confidence Associated confidence (e.g. distance) for the predicted + * label. + * + * @see org.opencv.contrib.FaceRecognizer.predict + */ + public void predict(Mat src, int[] label, double[] confidence) + { + double[] label_out = new double[1]; + double[] confidence_out = new double[1]; + predict_0(nativeObj, src.nativeObj, label_out, confidence_out); + if(label!=null) label[0] = (int)label_out[0]; + if(confidence!=null) confidence[0] = (double)confidence_out[0]; + return; + } + + + // + // C++: void FaceRecognizer::save(string filename) + // + +/** + *

Saves a "FaceRecognizer" and its model state.

+ * + *

Saves this model to a given filename, either as XML or YAML.

+ * + *

Saves this model to a given "FileStorage".

+ * + *

Every "FaceRecognizer" overwrites FaceRecognizer.save(FileStorage& + * fs) to save the internal model state. FaceRecognizer.save(const + * string& filename) saves the state of a model to the given filename.

+ * + *

The suffix const means that prediction does not affect the + * internal model state, so the method can be safely called from within + * different threads.

+ * + * @param filename The filename to store this "FaceRecognizer" to (either + * XML/YAML). + * + * @see org.opencv.contrib.FaceRecognizer.save + */ + public void save(String filename) + { + + save_0(nativeObj, filename); + + return; + } + + + // + // C++: void FaceRecognizer::train(vector_Mat src, Mat labels) + // + +/** + *

Trains a FaceRecognizer with given data and associated labels.

+ * + *

The following source code snippet shows you how to learn a Fisherfaces model + * on a given set of images. The images are read with "imread" and pushed into a + * std.vector. The labels of each image are stored within a + * std.vector (you could also use a "Mat" of type + * "CV_32SC1"). Think of the label as the subject (the person) this image + * belongs to, so same subjects (persons) should have the same label. For the + * available "FaceRecognizer" you don't have to pay any attention to the order + * of the labels, just make sure same persons have the same label: // holds + * images and labels

+ * + *

// C++ code:

+ * + *

vector images;

+ * + *

vector labels;

+ * + *

// images for first person

+ * + *

images.push_back(imread("person0/0.jpg", CV_LOAD_IMAGE_GRAYSCALE)); + * labels.push_back(0);

+ * + *

images.push_back(imread("person0/1.jpg", CV_LOAD_IMAGE_GRAYSCALE)); + * labels.push_back(0);

+ * + *

images.push_back(imread("person0/2.jpg", CV_LOAD_IMAGE_GRAYSCALE)); + * labels.push_back(0);

+ * + *

// images for second person

+ * + *

images.push_back(imread("person1/0.jpg", CV_LOAD_IMAGE_GRAYSCALE)); + * labels.push_back(1);

+ * + *

images.push_back(imread("person1/1.jpg", CV_LOAD_IMAGE_GRAYSCALE)); + * labels.push_back(1);

+ * + *

images.push_back(imread("person1/2.jpg", CV_LOAD_IMAGE_GRAYSCALE)); + * labels.push_back(1);

+ * + *

Now that you have read some images, we can create a new "FaceRecognizer". In + * this example I'll create a Fisherfaces model and decide to keep all of the + * possible Fisherfaces:

+ * + *

// Create a new Fisherfaces model and retain all available Fisherfaces, + *

+ * + *

// C++ code:

+ * + *

// this is the most common usage of this specific FaceRecognizer:

+ * + *

//

+ * + *

Ptr model = createFisherFaceRecognizer();

+ * + *

And finally train it on the given dataset (the face images and labels): + *

+ * + *

// This is the common interface to train all of the available + * cv.FaceRecognizer

+ * + *

// C++ code:

+ * + *

// implementations:

+ * + *

//

+ * + *

model->train(images, labels);

+ * + * @param src The training images, that means the faces you want to learn. The + * data has to be given as a vector. + * @param labels The labels corresponding to the images have to be given either + * as a vector or a + * + * @see org.opencv.contrib.FaceRecognizer.train + */ + public void train(List src, Mat labels) + { + Mat src_mat = Converters.vector_Mat_to_Mat(src); + train_0(nativeObj, src_mat.nativeObj, labels.nativeObj); + + return; + } + + + // + // C++: void FaceRecognizer::update(vector_Mat src, Mat labels) + // + +/** + *

Updates a FaceRecognizer with given data and associated labels.

+ * + *

This method updates a (probably trained) "FaceRecognizer", but only if the + * algorithm supports it. The Local Binary Patterns Histograms (LBPH) recognizer + * (see "createLBPHFaceRecognizer") can be updated. For the Eigenfaces and + * Fisherfaces method, this is algorithmically not possible and you have to + * re-estimate the model with "FaceRecognizer.train". In any case, a call to + * train empties the existing model and learns a new model, while update does + * not delete any model data. + * // Create a new LBPH model (it can be updated) and use the default + * parameters,

+ * + *

// C++ code:

+ * + *

// this is the most common usage of this specific FaceRecognizer:

+ * + *

//

+ * + *

Ptr model = createLBPHFaceRecognizer();

+ * + *

// This is the common interface to train all of the available + * cv.FaceRecognizer

+ * + *

// implementations:

+ * + *

//

+ * + *

model->train(images, labels);

+ * + *

// Some containers to hold new image:

+ * + *

vector newImages;

+ * + *

vector newLabels;

+ * + *

// You should add some images to the containers:

+ * + *

//

+ * + *

//...

+ * + *

//

+ * + *

// Now updating the model is as easy as calling:

+ * + *

model->update(newImages,newLabels);

+ * + *

// This will preserve the old model data and extend the existing model

+ * + *

// with the new features extracted from newImages!

+ * + *

Calling update on an Eigenfaces model (see "createEigenFaceRecognizer"), + * which doesn't support updating, will throw an error similar to:

+ * + *

OpenCV Error: The function/feature is not implemented (This FaceRecognizer + * (FaceRecognizer.Eigenfaces) does not support updating, you have to use + * FaceRecognizer.train to update it.) in update, file /home/philipp/git/opencv/modules/contrib/src/facerec.cpp, + * line 305

+ * + *

// C++ code:

+ * + *

terminate called after throwing an instance of 'cv.Exception'

+ * + *

Please note: The "FaceRecognizer" does not store your training images, + * because this would be very memory intense and it's not the responsibility of + * te "FaceRecognizer" to do so. The caller is responsible for maintaining the + * dataset, he want to work with. + *

+ * + * @param src The training images, that means the faces you want to learn. The + * data has to be given as a vector. + * @param labels The labels corresponding to the images have to be given either + * as a vector or a + * + * @see org.opencv.contrib.FaceRecognizer.update + */ + public void update(List src, Mat labels) + { + Mat src_mat = Converters.vector_Mat_to_Mat(src); + update_0(nativeObj, src_mat.nativeObj, labels.nativeObj); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: void FaceRecognizer::load(string filename) + private static native void load_0(long nativeObj, String filename); + + // C++: void FaceRecognizer::predict(Mat src, int& label, double& confidence) + private static native void predict_0(long nativeObj, long src_nativeObj, double[] label_out, double[] confidence_out); + + // C++: void FaceRecognizer::save(string filename) + private static native void save_0(long nativeObj, String filename); + + // C++: void FaceRecognizer::train(vector_Mat src, Mat labels) + private static native void train_0(long nativeObj, long src_mat_nativeObj, long labels_nativeObj); + + // C++: void FaceRecognizer::update(vector_Mat src, Mat labels) + private static native void update_0(long nativeObj, long src_mat_nativeObj, long labels_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/contrib/StereoVar.java b/src/org/opencv/contrib/StereoVar.java new file mode 100644 index 0000000..642a8a6 --- /dev/null +++ b/src/org/opencv/contrib/StereoVar.java @@ -0,0 +1,601 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.contrib; + +import org.opencv.core.Mat; + +// C++: class StereoVar +/** + *

Class for computing stereo correspondence using the variational matching + * algorithm

+ * + *

class StereoVar

+ * + *

// C++ code:

+ * + * + *

StereoVar();

+ * + *

StereoVar(int levels, double pyrScale,

+ * + *

int nIt, int minDisp, int maxDisp,

+ * + *

int poly_n, double poly_sigma, float fi,

+ * + *

float lambda, int penalization, int cycle,

+ * + *

int flags);

+ * + *

virtual ~StereoVar();

+ * + *

virtual void operator()(InputArray left, InputArray right, OutputArray disp);

+ * + *

int levels;

+ * + *

double pyrScale;

+ * + *

int nIt;

+ * + *

int minDisp;

+ * + *

int maxDisp;

+ * + *

int poly_n;

+ * + *

double poly_sigma;

+ * + *

float fi;

+ * + *

float lambda;

+ * + *

int penalization;

+ * + *

int cycle;

+ * + *

int flags;...

+ * + *

};

+ * + *

The class implements the modified S. G. Kosov algorithm [Publication] that + * differs from the original one as follows:

+ *
    + *
  • The automatic initialization of method's parameters is added. + *
  • The method of Smart Iteration Distribution (SID) is implemented. + *
  • The support of Multi-Level Adaptation Technique (MLAT) is not + * included. + *
  • The method of dynamic adaptation of method's parameters is not + * included. + *
+ * + * @see org.opencv.contrib.StereoVar + */ +public class StereoVar { + + protected final long nativeObj; + protected StereoVar(long addr) { nativeObj = addr; } + + + public static final int + USE_INITIAL_DISPARITY = 1, + USE_EQUALIZE_HIST = 2, + USE_SMART_ID = 4, + USE_AUTO_PARAMS = 8, + USE_MEDIAN_FILTERING = 16, + CYCLE_O = 0, + CYCLE_V = 1, + PENALIZATION_TICHONOV = 0, + PENALIZATION_CHARBONNIER = 1, + PENALIZATION_PERONA_MALIK = 2; + + + // + // C++: StereoVar::StereoVar() + // + +/** + *

The constructor

+ * + *

The first constructor initializes StereoVar with all the default + * parameters. So, you only have to set StereoVar.maxDisp and / or + * StereoVar.minDisp at minimum. The second constructor enables + * you to set each parameter to a custom value.

+ * + * @see org.opencv.contrib.StereoVar.StereoVar + */ + public StereoVar() + { + + nativeObj = StereoVar_0(); + + return; + } + + + // + // C++: StereoVar::StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags) + // + +/** + *

The constructor

+ * + *

The first constructor initializes StereoVar with all the default + * parameters. So, you only have to set StereoVar.maxDisp and / or + * StereoVar.minDisp at minimum. The second constructor enables + * you to set each parameter to a custom value.

+ * + * @param levels The number of pyramid layers, including the initial image. + * levels=1 means that no extra layers are created and only the original images + * are used. This parameter is ignored if flag USE_AUTO_PARAMS is set. + * @param pyrScale Specifies the image scale (<1) to build the pyramids for each + * image. pyrScale=0.5 means the classical pyramid, where each next layer is + * twice smaller than the previous. (This parameter is ignored if flag + * USE_AUTO_PARAMS is set). + * @param nIt The number of iterations the algorithm does at each pyramid level. + * (If the flag USE_SMART_ID is set, the number of iterations will be + * redistributed in such a way, that more iterations will be done on more + * coarser levels.) + * @param minDisp Minimum possible disparity value. Could be negative in case + * the left and right input images change places. + * @param maxDisp Maximum possible disparity value. + * @param poly_n Size of the pixel neighbourhood used to find polynomial + * expansion in each pixel. The larger values mean that the image will be + * approximated with smoother surfaces, yielding more robust algorithm and more + * blurred motion field. Typically, poly_n = 3, 5 or 7 + * @param poly_sigma Standard deviation of the Gaussian that is used to smooth + * derivatives that are used as a basis for the polynomial expansion. For + * poly_n=5 you can set poly_sigma=1.1, for poly_n=7 a good value would be + * poly_sigma=1.5 + * @param fi The smoothness parameter, ot the weight coefficient for the + * smoothness term. + * @param lambda The threshold parameter for edge-preserving smoothness. (This + * parameter is ignored if PENALIZATION_CHARBONNIER or PENALIZATION_PERONA_MALIK + * is used.) + * @param penalization Possible values: PENALIZATION_TICHONOV - linear + * smoothness; PENALIZATION_CHARBONNIER - non-linear edge preserving smoothness; + * PENALIZATION_PERONA_MALIK - non-linear edge-enhancing smoothness. (This + * parameter is ignored if flag USE_AUTO_PARAMS is set). + * @param cycle Type of the multigrid cycle. Possible values: CYCLE_O and + * CYCLE_V for null- and v-cycles respectively. (This parameter is ignored if + * flag USE_AUTO_PARAMS is set). + * @param flags The operation flags; can be a combination of the following: + *
    + *
  • USE_INITIAL_DISPARITY: Use the input flow as the initial flow + * approximation. + *
  • USE_EQUALIZE_HIST: Use the histogram equalization in the + * pre-processing phase. + *
  • USE_SMART_ID: Use the smart iteration distribution (SID). + *
  • USE_AUTO_PARAMS: Allow the method to initialize the main parameters. + *
  • USE_MEDIAN_FILTERING: Use the median filer of the solution in the post + * processing phase. + *
+ * + * @see org.opencv.contrib.StereoVar.StereoVar + */ + public StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags) + { + + nativeObj = StereoVar_1(levels, pyrScale, nIt, minDisp, maxDisp, poly_n, poly_sigma, fi, lambda, penalization, cycle, flags); + + return; + } + + + // + // C++: void StereoVar::operator ()(Mat left, Mat right, Mat& disp) + // + + public void compute(Mat left, Mat right, Mat disp) + { + + compute_0(nativeObj, left.nativeObj, right.nativeObj, disp.nativeObj); + + return; + } + + + // + // C++: int StereoVar::levels + // + + public int get_levels() + { + + int retVal = get_levels_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::levels + // + + public void set_levels(int levels) + { + + set_levels_0(nativeObj, levels); + + return; + } + + + // + // C++: double StereoVar::pyrScale + // + + public double get_pyrScale() + { + + double retVal = get_pyrScale_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::pyrScale + // + + public void set_pyrScale(double pyrScale) + { + + set_pyrScale_0(nativeObj, pyrScale); + + return; + } + + + // + // C++: int StereoVar::nIt + // + + public int get_nIt() + { + + int retVal = get_nIt_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::nIt + // + + public void set_nIt(int nIt) + { + + set_nIt_0(nativeObj, nIt); + + return; + } + + + // + // C++: int StereoVar::minDisp + // + + public int get_minDisp() + { + + int retVal = get_minDisp_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::minDisp + // + + public void set_minDisp(int minDisp) + { + + set_minDisp_0(nativeObj, minDisp); + + return; + } + + + // + // C++: int StereoVar::maxDisp + // + + public int get_maxDisp() + { + + int retVal = get_maxDisp_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::maxDisp + // + + public void set_maxDisp(int maxDisp) + { + + set_maxDisp_0(nativeObj, maxDisp); + + return; + } + + + // + // C++: int StereoVar::poly_n + // + + public int get_poly_n() + { + + int retVal = get_poly_n_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::poly_n + // + + public void set_poly_n(int poly_n) + { + + set_poly_n_0(nativeObj, poly_n); + + return; + } + + + // + // C++: double StereoVar::poly_sigma + // + + public double get_poly_sigma() + { + + double retVal = get_poly_sigma_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::poly_sigma + // + + public void set_poly_sigma(double poly_sigma) + { + + set_poly_sigma_0(nativeObj, poly_sigma); + + return; + } + + + // + // C++: float StereoVar::fi + // + + public float get_fi() + { + + float retVal = get_fi_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::fi + // + + public void set_fi(float fi) + { + + set_fi_0(nativeObj, fi); + + return; + } + + + // + // C++: float StereoVar::lambda + // + + public float get_lambda() + { + + float retVal = get_lambda_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::lambda + // + + public void set_lambda(float lambda) + { + + set_lambda_0(nativeObj, lambda); + + return; + } + + + // + // C++: int StereoVar::penalization + // + + public int get_penalization() + { + + int retVal = get_penalization_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::penalization + // + + public void set_penalization(int penalization) + { + + set_penalization_0(nativeObj, penalization); + + return; + } + + + // + // C++: int StereoVar::cycle + // + + public int get_cycle() + { + + int retVal = get_cycle_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::cycle + // + + public void set_cycle(int cycle) + { + + set_cycle_0(nativeObj, cycle); + + return; + } + + + // + // C++: int StereoVar::flags + // + + public int get_flags() + { + + int retVal = get_flags_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::flags + // + + public void set_flags(int flags) + { + + set_flags_0(nativeObj, flags); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: StereoVar::StereoVar() + private static native long StereoVar_0(); + + // C++: StereoVar::StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags) + private static native long StereoVar_1(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags); + + // C++: void StereoVar::operator ()(Mat left, Mat right, Mat& disp) + private static native void compute_0(long nativeObj, long left_nativeObj, long right_nativeObj, long disp_nativeObj); + + // C++: int StereoVar::levels + private static native int get_levels_0(long nativeObj); + + // C++: void StereoVar::levels + private static native void set_levels_0(long nativeObj, int levels); + + // C++: double StereoVar::pyrScale + private static native double get_pyrScale_0(long nativeObj); + + // C++: void StereoVar::pyrScale + private static native void set_pyrScale_0(long nativeObj, double pyrScale); + + // C++: int StereoVar::nIt + private static native int get_nIt_0(long nativeObj); + + // C++: void StereoVar::nIt + private static native void set_nIt_0(long nativeObj, int nIt); + + // C++: int StereoVar::minDisp + private static native int get_minDisp_0(long nativeObj); + + // C++: void StereoVar::minDisp + private static native void set_minDisp_0(long nativeObj, int minDisp); + + // C++: int StereoVar::maxDisp + private static native int get_maxDisp_0(long nativeObj); + + // C++: void StereoVar::maxDisp + private static native void set_maxDisp_0(long nativeObj, int maxDisp); + + // C++: int StereoVar::poly_n + private static native int get_poly_n_0(long nativeObj); + + // C++: void StereoVar::poly_n + private static native void set_poly_n_0(long nativeObj, int poly_n); + + // C++: double StereoVar::poly_sigma + private static native double get_poly_sigma_0(long nativeObj); + + // C++: void StereoVar::poly_sigma + private static native void set_poly_sigma_0(long nativeObj, double poly_sigma); + + // C++: float StereoVar::fi + private static native float get_fi_0(long nativeObj); + + // C++: void StereoVar::fi + private static native void set_fi_0(long nativeObj, float fi); + + // C++: float StereoVar::lambda + private static native float get_lambda_0(long nativeObj); + + // C++: void StereoVar::lambda + private static native void set_lambda_0(long nativeObj, float lambda); + + // C++: int StereoVar::penalization + private static native int get_penalization_0(long nativeObj); + + // C++: void StereoVar::penalization + private static native void set_penalization_0(long nativeObj, int penalization); + + // C++: int StereoVar::cycle + private static native int get_cycle_0(long nativeObj); + + // C++: void StereoVar::cycle + private static native void set_cycle_0(long nativeObj, int cycle); + + // C++: int StereoVar::flags + private static native int get_flags_0(long nativeObj); + + // C++: void StereoVar::flags + private static native void set_flags_0(long nativeObj, int flags); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/contrib/package.bluej b/src/org/opencv/contrib/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/core/Algorithm.java b/src/org/opencv/core/Algorithm.java new file mode 100644 index 0000000..bd855b9 --- /dev/null +++ b/src/org/opencv/core/Algorithm.java @@ -0,0 +1,361 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.core; + +import java.lang.String; +import java.util.ArrayList; +import java.util.List; +import org.opencv.utils.Converters; + +// C++: class Algorithm +/** + *

This is a base class for all more or less complex algorithms in OpenCV, + * especially for classes of algorithms, for which there can be multiple + * implementations. The examples are stereo correspondence (for which there are + * algorithms like block matching, semi-global block matching, graph-cut etc.), + * background subtraction (which can be done using mixture-of-gaussians models, + * codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, + * Horn-Schunck etc.).

+ * + *

The class provides the following features for all derived classes:

+ *
    + *
  • so called "virtual constructor". That is, each Algorithm derivative is + * registered at program start and you can get the list of registered algorithms + * and create instance of a particular algorithm by its name (see + * Algorithm.create). If you plan to add your own algorithms, it + * is good practice to add a unique prefix to your algorithms to distinguish + * them from other algorithms. + *
  • setting/retrieving algorithm parameters by name. If you used video + * capturing functionality from OpenCV highgui module, you are probably familar + * with cvSetCaptureProperty(), cvGetCaptureProperty(), + * VideoCapture.set() and VideoCapture.get(). + * Algorithm provides similar method where instead of integer id's + * you specify the parameter names as text strings. See Algorithm.set + * and Algorithm.get for details. + *
  • reading and writing parameters from/to XML or YAML files. Every + * Algorithm derivative can store all its parameters and then read them back. + * There is no need to re-implement it each time. + *
+ * + *

Here is example of SIFT use in your application via Algorithm interface:

+ * + *

#include "opencv2/opencv.hpp"

+ * + *

// C++ code:

+ * + *

#include "opencv2/nonfree/nonfree.hpp"...

+ * + *

initModule_nonfree(); // to load SURF/SIFT etc.

+ * + *

Ptr sift = Algorithm.create("Feature2D.SIFT");

+ * + *

FileStorage fs("sift_params.xml", FileStorage.READ);

+ * + *

if(fs.isOpened()) // if we have file with parameters, read them

+ * + * + *

sift->read(fs["sift_params"]);

+ * + *

fs.release();

+ * + * + *

else // else modify the parameters and store them; user can later edit the + * file to use different parameters

+ * + * + *

sift->set("contrastThreshold", 0.01f); // lower the contrast threshold, + * compared to the default value

+ * + * + *

WriteStructContext ws(fs, "sift_params", CV_NODE_MAP);

+ * + *

sift->write(fs);

+ * + * + * + *

Mat image = imread("myimage.png", 0), descriptors;

+ * + *

vector keypoints;

+ * + *

(*sift)(image, noArray(), keypoints, descriptors);

+ * + * @see org.opencv.core.Algorithm + */ +public class Algorithm { + + protected final long nativeObj; + protected Algorithm(long addr) { nativeObj = addr; } + + + // + // C++: static Ptr_Algorithm Algorithm::_create(string name) + // + + // Return type 'Ptr_Algorithm' is not supported, skipping the function + + + // + // C++: Ptr_Algorithm Algorithm::getAlgorithm(string name) + // + + // Return type 'Ptr_Algorithm' is not supported, skipping the function + + + // + // C++: bool Algorithm::getBool(string name) + // + + public boolean getBool(String name) + { + + boolean retVal = getBool_0(nativeObj, name); + + return retVal; + } + + + // + // C++: double Algorithm::getDouble(string name) + // + + public double getDouble(String name) + { + + double retVal = getDouble_0(nativeObj, name); + + return retVal; + } + + + // + // C++: int Algorithm::getInt(string name) + // + + public int getInt(String name) + { + + int retVal = getInt_0(nativeObj, name); + + return retVal; + } + + + // + // C++: static void Algorithm::getList(vector_string& algorithms) + // + + // Unknown type 'vector_string' (O), skipping the function + + + // + // C++: Mat Algorithm::getMat(string name) + // + + public Mat getMat(String name) + { + + Mat retVal = new Mat(getMat_0(nativeObj, name)); + + return retVal; + } + + + // + // C++: vector_Mat Algorithm::getMatVector(string name) + // + + public List getMatVector(String name) + { + List retVal = new ArrayList(); + Mat retValMat = new Mat(getMatVector_0(nativeObj, name)); + Converters.Mat_to_vector_Mat(retValMat, retVal); + return retVal; + } + + + // + // C++: void Algorithm::getParams(vector_string& names) + // + + // Unknown type 'vector_string' (O), skipping the function + + + // + // C++: string Algorithm::getString(string name) + // + + public String getString(String name) + { + + String retVal = getString_0(nativeObj, name); + + return retVal; + } + + + // + // C++: string Algorithm::paramHelp(string name) + // + + public String paramHelp(String name) + { + + String retVal = paramHelp_0(nativeObj, name); + + return retVal; + } + + + // + // C++: int Algorithm::paramType(string name) + // + + public int paramType(String name) + { + + int retVal = paramType_0(nativeObj, name); + + return retVal; + } + + + // + // C++: void Algorithm::setAlgorithm(string name, Ptr_Algorithm value) + // + + // Unknown type 'Ptr_Algorithm' (I), skipping the function + + + // + // C++: void Algorithm::setBool(string name, bool value) + // + + public void setBool(String name, boolean value) + { + + setBool_0(nativeObj, name, value); + + return; + } + + + // + // C++: void Algorithm::setDouble(string name, double value) + // + + public void setDouble(String name, double value) + { + + setDouble_0(nativeObj, name, value); + + return; + } + + + // + // C++: void Algorithm::setInt(string name, int value) + // + + public void setInt(String name, int value) + { + + setInt_0(nativeObj, name, value); + + return; + } + + + // + // C++: void Algorithm::setMat(string name, Mat value) + // + + public void setMat(String name, Mat value) + { + + setMat_0(nativeObj, name, value.nativeObj); + + return; + } + + + // + // C++: void Algorithm::setMatVector(string name, vector_Mat value) + // + + public void setMatVector(String name, List value) + { + Mat value_mat = Converters.vector_Mat_to_Mat(value); + setMatVector_0(nativeObj, name, value_mat.nativeObj); + + return; + } + + + // + // C++: void Algorithm::setString(string name, string value) + // + + public void setString(String name, String value) + { + + setString_0(nativeObj, name, value); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: bool Algorithm::getBool(string name) + private static native boolean getBool_0(long nativeObj, String name); + + // C++: double Algorithm::getDouble(string name) + private static native double getDouble_0(long nativeObj, String name); + + // C++: int Algorithm::getInt(string name) + private static native int getInt_0(long nativeObj, String name); + + // C++: Mat Algorithm::getMat(string name) + private static native long getMat_0(long nativeObj, String name); + + // C++: vector_Mat Algorithm::getMatVector(string name) + private static native long getMatVector_0(long nativeObj, String name); + + // C++: string Algorithm::getString(string name) + private static native String getString_0(long nativeObj, String name); + + // C++: string Algorithm::paramHelp(string name) + private static native String paramHelp_0(long nativeObj, String name); + + // C++: int Algorithm::paramType(string name) + private static native int paramType_0(long nativeObj, String name); + + // C++: void Algorithm::setBool(string name, bool value) + private static native void setBool_0(long nativeObj, String name, boolean value); + + // C++: void Algorithm::setDouble(string name, double value) + private static native void setDouble_0(long nativeObj, String name, double value); + + // C++: void Algorithm::setInt(string name, int value) + private static native void setInt_0(long nativeObj, String name, int value); + + // C++: void Algorithm::setMat(string name, Mat value) + private static native void setMat_0(long nativeObj, String name, long value_nativeObj); + + // C++: void Algorithm::setMatVector(string name, vector_Mat value) + private static native void setMatVector_0(long nativeObj, String name, long value_mat_nativeObj); + + // C++: void Algorithm::setString(string name, string value) + private static native void setString_0(long nativeObj, String name, String value); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/core/Core.java b/src/org/opencv/core/Core.java new file mode 100644 index 0000000..193797b --- /dev/null +++ b/src/org/opencv/core/Core.java @@ -0,0 +1,8198 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.core; + +import java.lang.String; +import java.util.ArrayList; +import java.util.List; +import org.opencv.utils.Converters; + +public class Core { + + public static final String VERSION = "2.4.5.0", NATIVE_LIBRARY_NAME = "opencv_java245"; + public static final int VERSION_EPOCH = 2, VERSION_MAJOR = 4, VERSION_MINOR = 5, VERSION_REVISION = 0; + + private static final int + CV_8U = 0, + CV_8S = 1, + CV_16U = 2, + CV_16S = 3, + CV_32S = 4, + CV_32F = 5, + CV_64F = 6, + CV_USRTYPE1 = 7; + + + public static final int + SVD_MODIFY_A = 1, + SVD_NO_UV = 2, + SVD_FULL_UV = 4, + FILLED = -1, + LINE_AA = 16, + LINE_8 = 8, + LINE_4 = 4, + REDUCE_SUM = 0, + REDUCE_AVG = 1, + REDUCE_MAX = 2, + REDUCE_MIN = 3, + DECOMP_LU = 0, + DECOMP_SVD = 1, + DECOMP_EIG = 2, + DECOMP_CHOLESKY = 3, + DECOMP_QR = 4, + DECOMP_NORMAL = 16, + NORM_INF = 1, + NORM_L1 = 2, + NORM_L2 = 4, + NORM_L2SQR = 5, + NORM_HAMMING = 6, + NORM_HAMMING2 = 7, + NORM_TYPE_MASK = 7, + NORM_RELATIVE = 8, + NORM_MINMAX = 32, + CMP_EQ = 0, + CMP_GT = 1, + CMP_GE = 2, + CMP_LT = 3, + CMP_LE = 4, + CMP_NE = 5, + GEMM_1_T = 1, + GEMM_2_T = 2, + GEMM_3_T = 4, + DFT_INVERSE = 1, + DFT_SCALE = 2, + DFT_ROWS = 4, + DFT_COMPLEX_OUTPUT = 16, + DFT_REAL_OUTPUT = 32, + DCT_INVERSE = DFT_INVERSE, + DCT_ROWS = DFT_ROWS, + DEPTH_MASK_8U = 1 << CV_8U, + DEPTH_MASK_8S = 1 << CV_8S, + DEPTH_MASK_16U = 1 << CV_16U, + DEPTH_MASK_16S = 1 << CV_16S, + DEPTH_MASK_32S = 1 << CV_32S, + DEPTH_MASK_32F = 1 << CV_32F, + DEPTH_MASK_64F = 1 << CV_64F, + DEPTH_MASK_ALL = (DEPTH_MASK_64F<<1)-1, + DEPTH_MASK_ALL_BUT_8S = DEPTH_MASK_ALL & ~DEPTH_MASK_8S, + DEPTH_MASK_FLT = DEPTH_MASK_32F + DEPTH_MASK_64F, + MAGIC_MASK = 0xFFFF0000, + TYPE_MASK = 0x00000FFF, + DEPTH_MASK = 7, + SORT_EVERY_ROW = 0, + SORT_EVERY_COLUMN = 1, + SORT_ASCENDING = 0, + SORT_DESCENDING = 16, + COVAR_SCRAMBLED = 0, + COVAR_NORMAL = 1, + COVAR_USE_AVG = 2, + COVAR_SCALE = 4, + COVAR_ROWS = 8, + COVAR_COLS = 16, + KMEANS_RANDOM_CENTERS = 0, + KMEANS_PP_CENTERS = 2, + KMEANS_USE_INITIAL_LABELS = 1, + FONT_HERSHEY_SIMPLEX = 0, + FONT_HERSHEY_PLAIN = 1, + FONT_HERSHEY_DUPLEX = 2, + FONT_HERSHEY_COMPLEX = 3, + FONT_HERSHEY_TRIPLEX = 4, + FONT_HERSHEY_COMPLEX_SMALL = 5, + FONT_HERSHEY_SCRIPT_SIMPLEX = 6, + FONT_HERSHEY_SCRIPT_COMPLEX = 7, + FONT_ITALIC = 16; + + + // + // C++: void LUT(Mat src, Mat lut, Mat& dst, int interpolation = 0) + // + +/** + *

Performs a look-up table transform of an array.

+ * + *

The function LUT fills the output array with values from the + * look-up table. Indices of the entries are taken from the input array. That + * is, the function processes each element of src as follows:

+ * + *

dst(I) <- lut(src(I) + d)

+ * + *

where

+ * + *

d = 0 if src has depth CV_8U; 128 if src has depth CV_8S

+ * + * @param src input array of 8-bit elements. + * @param lut look-up table of 256 elements; in case of multi-channel input + * array, the table should either have a single channel (in this case the same + * table is used for all channels) or the same number of channels as in the + * input array. + * @param dst output array of the same size and number of channels as + * src, and the same depth as lut. + * @param interpolation a interpolation + * + * @see org.opencv.core.Core.LUT + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#convertScaleAbs + */ + public static void LUT(Mat src, Mat lut, Mat dst, int interpolation) + { + + LUT_0(src.nativeObj, lut.nativeObj, dst.nativeObj, interpolation); + + return; + } + +/** + *

Performs a look-up table transform of an array.

+ * + *

The function LUT fills the output array with values from the + * look-up table. Indices of the entries are taken from the input array. That + * is, the function processes each element of src as follows:

+ * + *

dst(I) <- lut(src(I) + d)

+ * + *

where

+ * + *

d = 0 if src has depth CV_8U; 128 if src has depth CV_8S

+ * + * @param src input array of 8-bit elements. + * @param lut look-up table of 256 elements; in case of multi-channel input + * array, the table should either have a single channel (in this case the same + * table is used for all channels) or the same number of channels as in the + * input array. + * @param dst output array of the same size and number of channels as + * src, and the same depth as lut. + * + * @see org.opencv.core.Core.LUT + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#convertScaleAbs + */ + public static void LUT(Mat src, Mat lut, Mat dst) + { + + LUT_1(src.nativeObj, lut.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: double Mahalanobis(Mat v1, Mat v2, Mat icovar) + // + +/** + *

Calculates the Mahalanobis distance between two vectors.

+ * + *

The function Mahalanobis calculates and returns the weighted + * distance between two vectors:

+ * + *

d(vec1, vec2)= sqrt(sum_(i,j)(icovar(i,j)*(vec1(I)-vec2(I))*(vec1(j)-vec2(j))))

+ * + *

The covariance matrix may be calculated using the "calcCovarMatrix" function + * and then inverted using the "invert" function (preferably using the + * DECOMP_SVD method, as the most accurate).

+ * + * @param v1 a v1 + * @param v2 a v2 + * @param icovar inverse covariance matrix. + * + * @see org.opencv.core.Core.Mahalanobis + */ + public static double Mahalanobis(Mat v1, Mat v2, Mat icovar) + { + + double retVal = Mahalanobis_0(v1.nativeObj, v2.nativeObj, icovar.nativeObj); + + return retVal; + } + + + // + // C++: void PCABackProject(Mat data, Mat mean, Mat eigenvectors, Mat& result) + // + + public static void PCABackProject(Mat data, Mat mean, Mat eigenvectors, Mat result) + { + + PCABackProject_0(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj, result.nativeObj); + + return; + } + + + // + // C++: void PCACompute(Mat data, Mat& mean, Mat& eigenvectors, int maxComponents = 0) + // + + public static void PCACompute(Mat data, Mat mean, Mat eigenvectors, int maxComponents) + { + + PCACompute_0(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj, maxComponents); + + return; + } + + public static void PCACompute(Mat data, Mat mean, Mat eigenvectors) + { + + PCACompute_1(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj); + + return; + } + + + // + // C++: void PCAComputeVar(Mat data, Mat& mean, Mat& eigenvectors, double retainedVariance) + // + + public static void PCAComputeVar(Mat data, Mat mean, Mat eigenvectors, double retainedVariance) + { + + PCAComputeVar_0(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj, retainedVariance); + + return; + } + + + // + // C++: void PCAProject(Mat data, Mat mean, Mat eigenvectors, Mat& result) + // + + public static void PCAProject(Mat data, Mat mean, Mat eigenvectors, Mat result) + { + + PCAProject_0(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj, result.nativeObj); + + return; + } + + + // + // C++: void SVBackSubst(Mat w, Mat u, Mat vt, Mat rhs, Mat& dst) + // + + public static void SVBackSubst(Mat w, Mat u, Mat vt, Mat rhs, Mat dst) + { + + SVBackSubst_0(w.nativeObj, u.nativeObj, vt.nativeObj, rhs.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void SVDecomp(Mat src, Mat& w, Mat& u, Mat& vt, int flags = 0) + // + + public static void SVDecomp(Mat src, Mat w, Mat u, Mat vt, int flags) + { + + SVDecomp_0(src.nativeObj, w.nativeObj, u.nativeObj, vt.nativeObj, flags); + + return; + } + + public static void SVDecomp(Mat src, Mat w, Mat u, Mat vt) + { + + SVDecomp_1(src.nativeObj, w.nativeObj, u.nativeObj, vt.nativeObj); + + return; + } + + + // + // C++: void absdiff(Mat src1, Mat src2, Mat& dst) + // + +/** + *

Calculates the per-element absolute difference between two arrays or between + * an array and a scalar.

+ * + *

The function absdiff calculates:

+ *
    + *
  • Absolute difference between two arrays when they have the same size + * and type: + *
+ * + *

dst(I) = saturate(| src1(I) - src2(I)|)

+ * + *
    + *
  • Absolute difference between an array and a scalar when the second + * array is constructed from Scalar or has as many elements as the + * number of channels in src1: + *
+ * + *

dst(I) = saturate(| src1(I) - src2|)

+ * + *
    + *
  • Absolute difference between a scalar and an array when the first array + * is constructed from Scalar or has as many elements as the number + * of channels in src2: + *
+ * + *

dst(I) = saturate(| src1 - src2(I)|)

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently.

+ * + *

Note: Saturation is not applied when the arrays have the depth + * CV_32S. You may even get a negative value in the case of + * overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as input arrays. + * + * @see org.opencv.core.Core.absdiff + */ + public static void absdiff(Mat src1, Mat src2, Mat dst) + { + + absdiff_0(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void absdiff(Mat src1, Scalar src2, Mat& dst) + // + +/** + *

Calculates the per-element absolute difference between two arrays or between + * an array and a scalar.

+ * + *

The function absdiff calculates:

+ *
    + *
  • Absolute difference between two arrays when they have the same size + * and type: + *
+ * + *

dst(I) = saturate(| src1(I) - src2(I)|)

+ * + *
    + *
  • Absolute difference between an array and a scalar when the second + * array is constructed from Scalar or has as many elements as the + * number of channels in src1: + *
+ * + *

dst(I) = saturate(| src1(I) - src2|)

+ * + *
    + *
  • Absolute difference between a scalar and an array when the first array + * is constructed from Scalar or has as many elements as the number + * of channels in src2: + *
+ * + *

dst(I) = saturate(| src1 - src2(I)|)

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently.

+ * + *

Note: Saturation is not applied when the arrays have the depth + * CV_32S. You may even get a negative value in the case of + * overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as input arrays. + * + * @see org.opencv.core.Core.absdiff + */ + public static void absdiff(Mat src1, Scalar src2, Mat dst) + { + + absdiff_1(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: void add(Mat src1, Mat src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + // + +/** + *

Calculates the per-element sum of two arrays or an array and a scalar.

+ * + *

The function add calculates:

+ *
    + *
  • Sum of two arrays when both input arrays have the same size and the + * same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

+ * + *
    + *
  • Sum of an array and a scalar when src2 is constructed + * from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

+ * + *
    + *
  • Sum of a scalar and an array when src1 is constructed + * from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 + src2;

+ * + *

dst += src1; // equivalent to add(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed + * array and store the sum as a 32-bit floating-point array. Depth of the output + * array is determined by the dtype parameter. In the second and + * third cases above, as well as in the first case, when src1.depth() == + * src2.depth(), dtype can be set to the default + * -1. In this case, the output array will have the same depth as + * the input array, be it src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and number of channels as the + * input array(s); the depth is defined by dtype or + * src1/src2. + * @param mask optional operation mask - 8-bit single channel array, that + * specifies elements of the output array to be changed. + * @param dtype optional depth of the output array (see the discussion below). + * + * @see org.opencv.core.Core.add + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + */ + public static void add(Mat src1, Mat src2, Mat dst, Mat mask, int dtype) + { + + add_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj, dtype); + + return; + } + +/** + *

Calculates the per-element sum of two arrays or an array and a scalar.

+ * + *

The function add calculates:

+ *
    + *
  • Sum of two arrays when both input arrays have the same size and the + * same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

+ * + *
    + *
  • Sum of an array and a scalar when src2 is constructed + * from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

+ * + *
    + *
  • Sum of a scalar and an array when src1 is constructed + * from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 + src2;

+ * + *

dst += src1; // equivalent to add(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed + * array and store the sum as a 32-bit floating-point array. Depth of the output + * array is determined by the dtype parameter. In the second and + * third cases above, as well as in the first case, when src1.depth() == + * src2.depth(), dtype can be set to the default + * -1. In this case, the output array will have the same depth as + * the input array, be it src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and number of channels as the + * input array(s); the depth is defined by dtype or + * src1/src2. + * @param mask optional operation mask - 8-bit single channel array, that + * specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.add + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + */ + public static void add(Mat src1, Mat src2, Mat dst, Mat mask) + { + + add_1(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element sum of two arrays or an array and a scalar.

+ * + *

The function add calculates:

+ *
    + *
  • Sum of two arrays when both input arrays have the same size and the + * same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

+ * + *
    + *
  • Sum of an array and a scalar when src2 is constructed + * from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

+ * + *
    + *
  • Sum of a scalar and an array when src1 is constructed + * from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 + src2;

+ * + *

dst += src1; // equivalent to add(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed + * array and store the sum as a 32-bit floating-point array. Depth of the output + * array is determined by the dtype parameter. In the second and + * third cases above, as well as in the first case, when src1.depth() == + * src2.depth(), dtype can be set to the default + * -1. In this case, the output array will have the same depth as + * the input array, be it src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and number of channels as the + * input array(s); the depth is defined by dtype or + * src1/src2. + * + * @see org.opencv.core.Core.add + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + */ + public static void add(Mat src1, Mat src2, Mat dst) + { + + add_2(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void add(Mat src1, Scalar src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + // + +/** + *

Calculates the per-element sum of two arrays or an array and a scalar.

+ * + *

The function add calculates:

+ *
    + *
  • Sum of two arrays when both input arrays have the same size and the + * same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

+ * + *
    + *
  • Sum of an array and a scalar when src2 is constructed + * from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

+ * + *
    + *
  • Sum of a scalar and an array when src1 is constructed + * from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 + src2;

+ * + *

dst += src1; // equivalent to add(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed + * array and store the sum as a 32-bit floating-point array. Depth of the output + * array is determined by the dtype parameter. In the second and + * third cases above, as well as in the first case, when src1.depth() == + * src2.depth(), dtype can be set to the default + * -1. In this case, the output array will have the same depth as + * the input array, be it src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and number of channels as the + * input array(s); the depth is defined by dtype or + * src1/src2. + * @param mask optional operation mask - 8-bit single channel array, that + * specifies elements of the output array to be changed. + * @param dtype optional depth of the output array (see the discussion below). + * + * @see org.opencv.core.Core.add + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + */ + public static void add(Mat src1, Scalar src2, Mat dst, Mat mask, int dtype) + { + + add_3(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, mask.nativeObj, dtype); + + return; + } + +/** + *

Calculates the per-element sum of two arrays or an array and a scalar.

+ * + *

The function add calculates:

+ *
    + *
  • Sum of two arrays when both input arrays have the same size and the + * same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

+ * + *
    + *
  • Sum of an array and a scalar when src2 is constructed + * from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

+ * + *
    + *
  • Sum of a scalar and an array when src1 is constructed + * from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 + src2;

+ * + *

dst += src1; // equivalent to add(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed + * array and store the sum as a 32-bit floating-point array. Depth of the output + * array is determined by the dtype parameter. In the second and + * third cases above, as well as in the first case, when src1.depth() == + * src2.depth(), dtype can be set to the default + * -1. In this case, the output array will have the same depth as + * the input array, be it src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and number of channels as the + * input array(s); the depth is defined by dtype or + * src1/src2. + * @param mask optional operation mask - 8-bit single channel array, that + * specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.add + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + */ + public static void add(Mat src1, Scalar src2, Mat dst, Mat mask) + { + + add_4(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element sum of two arrays or an array and a scalar.

+ * + *

The function add calculates:

+ *
    + *
  • Sum of two arrays when both input arrays have the same size and the + * same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

+ * + *
    + *
  • Sum of an array and a scalar when src2 is constructed + * from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

+ * + *
    + *
  • Sum of a scalar and an array when src1 is constructed + * from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 + src2;

+ * + *

dst += src1; // equivalent to add(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed + * array and store the sum as a 32-bit floating-point array. Depth of the output + * array is determined by the dtype parameter. In the second and + * third cases above, as well as in the first case, when src1.depth() == + * src2.depth(), dtype can be set to the default + * -1. In this case, the output array will have the same depth as + * the input array, be it src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and number of channels as the + * input array(s); the depth is defined by dtype or + * src1/src2. + * + * @see org.opencv.core.Core.add + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + */ + public static void add(Mat src1, Scalar src2, Mat dst) + { + + add_5(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat& dst, int dtype = -1) + // + +/** + *

Calculates the weighted sum of two arrays.

+ * + *

The function addWeighted calculates the weighted sum of two + * arrays as follows:

+ * + *

dst(I)= saturate(src1(I)* alpha + src2(I)* beta + gamma)

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The function can be replaced with a matrix expression:

+ * + *

// C++ code:

+ * + *

dst = src1*alpha + src2*beta + gamma;

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow. + *

+ * + * @param src1 first input array. + * @param alpha weight of the first array elements. + * @param src2 second input array of the same size and channel number as + * src1. + * @param beta weight of the second array elements. + * @param gamma scalar added to each sum. + * @param dst output array that has the same size and number of channels as the + * input arrays. + * @param dtype optional depth of the output array; when both input arrays have + * the same depth, dtype can be set to -1, which will + * be equivalent to src1.depth(). + * + * @see org.opencv.core.Core.addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.core.Mat#convertTo + */ + public static void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat dst, int dtype) + { + + addWeighted_0(src1.nativeObj, alpha, src2.nativeObj, beta, gamma, dst.nativeObj, dtype); + + return; + } + +/** + *

Calculates the weighted sum of two arrays.

+ * + *

The function addWeighted calculates the weighted sum of two + * arrays as follows:

+ * + *

dst(I)= saturate(src1(I)* alpha + src2(I)* beta + gamma)

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The function can be replaced with a matrix expression:

+ * + *

// C++ code:

+ * + *

dst = src1*alpha + src2*beta + gamma;

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow. + *

+ * + * @param src1 first input array. + * @param alpha weight of the first array elements. + * @param src2 second input array of the same size and channel number as + * src1. + * @param beta weight of the second array elements. + * @param gamma scalar added to each sum. + * @param dst output array that has the same size and number of channels as the + * input arrays. + * + * @see org.opencv.core.Core.addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.core.Mat#convertTo + */ + public static void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat dst) + { + + addWeighted_1(src1.nativeObj, alpha, src2.nativeObj, beta, gamma, dst.nativeObj); + + return; + } + + + // + // C++: void batchDistance(Mat src1, Mat src2, Mat& dist, int dtype, Mat& nidx, int normType = NORM_L2, int K = 0, Mat mask = Mat(), int update = 0, bool crosscheck = false) + // + + public static void batchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx, int normType, int K, Mat mask, int update, boolean crosscheck) + { + + batchDistance_0(src1.nativeObj, src2.nativeObj, dist.nativeObj, dtype, nidx.nativeObj, normType, K, mask.nativeObj, update, crosscheck); + + return; + } + + public static void batchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx, int normType, int K) + { + + batchDistance_1(src1.nativeObj, src2.nativeObj, dist.nativeObj, dtype, nidx.nativeObj, normType, K); + + return; + } + + public static void batchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx) + { + + batchDistance_2(src1.nativeObj, src2.nativeObj, dist.nativeObj, dtype, nidx.nativeObj); + + return; + } + + + // + // C++: void bitwise_and(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + // + +/** + *

Calculates the per-element bit-wise conjunction of two arrays or an array and + * a scalar.

+ * + *

The function calculates the per-element bit-wise logical conjunction for:

+ *
    + *
  • Two arrays when src1 and src2 have the same + * size: + *
+ * + *

dst(I) = src1(I) / src2(I) if mask(I) != 0

+ * + *
    + *
  • An array and a scalar when src2 is constructed from + * Scalar or has the same number of elements as src1.channels(): + *
+ * + *

dst(I) = src1(I) / src2 if mask(I) != 0

+ * + *
    + *
  • A scalar and an array when src1 is constructed from + * Scalar or has the same number of elements as src2.channels(): + *
+ * + *

dst(I) = src1 / src2(I) if mask(I) != 0

+ * + *

In case of floating-point arrays, their machine-specific bit representations + * (usually IEEE754-compliant) are used for the operation. In case of + * multi-channel arrays, each channel is processed independently. In the second + * and third cases above, the scalar is first converted to the array type.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as the input arrays. + * @param mask optional operation mask, 8-bit single channel array, that + * specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.bitwise_and + */ + public static void bitwise_and(Mat src1, Mat src2, Mat dst, Mat mask) + { + + bitwise_and_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element bit-wise conjunction of two arrays or an array and + * a scalar.

+ * + *

The function calculates the per-element bit-wise logical conjunction for:

+ *
    + *
  • Two arrays when src1 and src2 have the same + * size: + *
+ * + *

dst(I) = src1(I) / src2(I) if mask(I) != 0

+ * + *
    + *
  • An array and a scalar when src2 is constructed from + * Scalar or has the same number of elements as src1.channels(): + *
+ * + *

dst(I) = src1(I) / src2 if mask(I) != 0

+ * + *
    + *
  • A scalar and an array when src1 is constructed from + * Scalar or has the same number of elements as src2.channels(): + *
+ * + *

dst(I) = src1 / src2(I) if mask(I) != 0

+ * + *

In case of floating-point arrays, their machine-specific bit representations + * (usually IEEE754-compliant) are used for the operation. In case of + * multi-channel arrays, each channel is processed independently. In the second + * and third cases above, the scalar is first converted to the array type.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as the input arrays. + * + * @see org.opencv.core.Core.bitwise_and + */ + public static void bitwise_and(Mat src1, Mat src2, Mat dst) + { + + bitwise_and_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void bitwise_not(Mat src, Mat& dst, Mat mask = Mat()) + // + +/** + *

Inverts every bit of an array.

+ * + *

The function calculates per-element bit-wise inversion of the input array:

+ * + *

dst(I) = !src(I)

+ * + *

In case of a floating-point input array, its machine-specific bit + * representation (usually IEEE754-compliant) is used for the operation. In case + * of multi-channel arrays, each channel is processed independently.

+ * + * @param src input array. + * @param dst output array that has the same size and type as the input array. + * @param mask optional operation mask, 8-bit single channel array, that + * specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.bitwise_not + */ + public static void bitwise_not(Mat src, Mat dst, Mat mask) + { + + bitwise_not_0(src.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Inverts every bit of an array.

+ * + *

The function calculates per-element bit-wise inversion of the input array:

+ * + *

dst(I) = !src(I)

+ * + *

In case of a floating-point input array, its machine-specific bit + * representation (usually IEEE754-compliant) is used for the operation. In case + * of multi-channel arrays, each channel is processed independently.

+ * + * @param src input array. + * @param dst output array that has the same size and type as the input array. + * + * @see org.opencv.core.Core.bitwise_not + */ + public static void bitwise_not(Mat src, Mat dst) + { + + bitwise_not_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void bitwise_or(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + // + +/** + *

Calculates the per-element bit-wise disjunction of two arrays or an array and + * a scalar.

+ * + *

The function calculates the per-element bit-wise logical disjunction for:

+ *
    + *
  • Two arrays when src1 and src2 have the same + * size: + *
+ * + *

dst(I) = src1(I) V src2(I) if mask(I) != 0

+ * + *
    + *
  • An array and a scalar when src2 is constructed from + * Scalar or has the same number of elements as src1.channels(): + *
+ * + *

dst(I) = src1(I) V src2 if mask(I) != 0

+ * + *
    + *
  • A scalar and an array when src1 is constructed from + * Scalar or has the same number of elements as src2.channels(): + *
+ * + *

dst(I) = src1 V src2(I) if mask(I) != 0

+ * + *

In case of floating-point arrays, their machine-specific bit representations + * (usually IEEE754-compliant) are used for the operation. In case of + * multi-channel arrays, each channel is processed independently. In the second + * and third cases above, the scalar is first converted to the array type.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as the input arrays. + * @param mask optional operation mask, 8-bit single channel array, that + * specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.bitwise_or + */ + public static void bitwise_or(Mat src1, Mat src2, Mat dst, Mat mask) + { + + bitwise_or_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element bit-wise disjunction of two arrays or an array and + * a scalar.

+ * + *

The function calculates the per-element bit-wise logical disjunction for:

+ *
    + *
  • Two arrays when src1 and src2 have the same + * size: + *
+ * + *

dst(I) = src1(I) V src2(I) if mask(I) != 0

+ * + *
    + *
  • An array and a scalar when src2 is constructed from + * Scalar or has the same number of elements as src1.channels(): + *
+ * + *

dst(I) = src1(I) V src2 if mask(I) != 0

+ * + *
    + *
  • A scalar and an array when src1 is constructed from + * Scalar or has the same number of elements as src2.channels(): + *
+ * + *

dst(I) = src1 V src2(I) if mask(I) != 0

+ * + *

In case of floating-point arrays, their machine-specific bit representations + * (usually IEEE754-compliant) are used for the operation. In case of + * multi-channel arrays, each channel is processed independently. In the second + * and third cases above, the scalar is first converted to the array type.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as the input arrays. + * + * @see org.opencv.core.Core.bitwise_or + */ + public static void bitwise_or(Mat src1, Mat src2, Mat dst) + { + + bitwise_or_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void bitwise_xor(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + // + +/** + *

Calculates the per-element bit-wise "exclusive or" operation on two arrays or + * an array and a scalar.

+ * + *

The function calculates the per-element bit-wise logical "exclusive-or" + * operation for:

+ *
    + *
  • Two arrays when src1 and src2 have the same + * size: + *
+ * + *

dst(I) = src1(I)(+) src2(I) if mask(I) != 0

+ * + *
    + *
  • An array and a scalar when src2 is constructed from + * Scalar or has the same number of elements as src1.channels(): + *
+ * + *

dst(I) = src1(I)(+) src2 if mask(I) != 0

+ * + *
    + *
  • A scalar and an array when src1 is constructed from + * Scalar or has the same number of elements as src2.channels(): + *
+ * + *

dst(I) = src1(+) src2(I) if mask(I) != 0

+ * + *

In case of floating-point arrays, their machine-specific bit representations + * (usually IEEE754-compliant) are used for the operation. In case of + * multi-channel arrays, each channel is processed independently. In the 2nd and + * 3rd cases above, the scalar is first converted to the array type.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as the input arrays. + * @param mask optional operation mask, 8-bit single channel array, that + * specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.bitwise_xor + */ + public static void bitwise_xor(Mat src1, Mat src2, Mat dst, Mat mask) + { + + bitwise_xor_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element bit-wise "exclusive or" operation on two arrays or + * an array and a scalar.

+ * + *

The function calculates the per-element bit-wise logical "exclusive-or" + * operation for:

+ *
    + *
  • Two arrays when src1 and src2 have the same + * size: + *
+ * + *

dst(I) = src1(I)(+) src2(I) if mask(I) != 0

+ * + *
    + *
  • An array and a scalar when src2 is constructed from + * Scalar or has the same number of elements as src1.channels(): + *
+ * + *

dst(I) = src1(I)(+) src2 if mask(I) != 0

+ * + *
    + *
  • A scalar and an array when src1 is constructed from + * Scalar or has the same number of elements as src2.channels(): + *
+ * + *

dst(I) = src1(+) src2(I) if mask(I) != 0

+ * + *

In case of floating-point arrays, their machine-specific bit representations + * (usually IEEE754-compliant) are used for the operation. In case of + * multi-channel arrays, each channel is processed independently. In the 2nd and + * 3rd cases above, the scalar is first converted to the array type.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as the input arrays. + * + * @see org.opencv.core.Core.bitwise_xor + */ + public static void bitwise_xor(Mat src1, Mat src2, Mat dst) + { + + bitwise_xor_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void calcCovarMatrix(Mat samples, Mat& covar, Mat& mean, int flags, int ctype = CV_64F) + // + +/** + *

Calculates the covariance matrix of a set of vectors.

+ * + *

The functions calcCovarMatrix calculate the covariance matrix + * and, optionally, the mean vector of the set of input vectors.

+ * + * @param samples samples stored either as separate matrices or as rows/columns + * of a single matrix. + * @param covar output covariance matrix of the type ctype and + * square size. + * @param mean input or output (depending on the flags) array as the average + * value of the input vectors. + * @param flags operation flags as a combination of the following values: + *
    + *
  • CV_COVAR_SCRAMBLED The output covariance matrix is calculated as: + *
+ * + *

scale * [ vects [0]- mean, vects [1]- mean,...]^T * [ vects [0]- mean, + * vects [1]- mean,...],

+ * + *

The covariance matrix will be nsamples x nsamples. Such an + * unusual covariance matrix is used for fast PCA of a set of very large vectors + * (see, for example, the EigenFaces technique for face recognition). + * Eigenvalues of this "scrambled" matrix match the eigenvalues of the true + * covariance matrix. The "true" eigenvectors can be easily calculated from the + * eigenvectors of the "scrambled" covariance matrix.

+ *
    + *
  • CV_COVAR_NORMAL The output covariance matrix is calculated as: + *
+ * + *

scale * [ vects [0]- mean, vects [1]- mean,...] * [ vects [0]- mean, + * vects [1]- mean,...]^T,

+ * + *

covar will be a square matrix of the same size as the total + * number of elements in each input vector. One and only one of + * CV_COVAR_SCRAMBLED and CV_COVAR_NORMAL must be + * specified.

+ *
    + *
  • CV_COVAR_USE_AVG If the flag is specified, the function does not + * calculate mean from the input vectors but, instead, uses the + * passed mean vector. This is useful if mean has been + * pre-calculated or known in advance, or if the covariance matrix is calculated + * by parts. In this case, mean is not a mean vector of the input + * sub-set of vectors but rather the mean vector of the whole set. + *
  • CV_COVAR_SCALE If the flag is specified, the covariance matrix is + * scaled. In the "normal" mode, scale is 1./nsamples. + * In the "scrambled" mode, scale is the reciprocal of the total + * number of elements in each input vector. By default (if the flag is not + * specified), the covariance matrix is not scaled (scale=1). + *
  • CV_COVAR_ROWS [Only useful in the second variant of the function] If + * the flag is specified, all the input vectors are stored as rows of the + * samples matrix. mean should be a single-row vector + * in this case. + *
  • CV_COVAR_COLS [Only useful in the second variant of the function] If + * the flag is specified, all the input vectors are stored as columns of the + * samples matrix. mean should be a single-column + * vector in this case. + *
+ * @param ctype type of the matrixl; it equals 'CV_64F' by default. + * + * @see org.opencv.core.Core.calcCovarMatrix + * @see org.opencv.core.Core#Mahalanobis + * @see org.opencv.core.Core#mulTransposed + */ + public static void calcCovarMatrix(Mat samples, Mat covar, Mat mean, int flags, int ctype) + { + + calcCovarMatrix_0(samples.nativeObj, covar.nativeObj, mean.nativeObj, flags, ctype); + + return; + } + +/** + *

Calculates the covariance matrix of a set of vectors.

+ * + *

The functions calcCovarMatrix calculate the covariance matrix + * and, optionally, the mean vector of the set of input vectors.

+ * + * @param samples samples stored either as separate matrices or as rows/columns + * of a single matrix. + * @param covar output covariance matrix of the type ctype and + * square size. + * @param mean input or output (depending on the flags) array as the average + * value of the input vectors. + * @param flags operation flags as a combination of the following values: + *
    + *
  • CV_COVAR_SCRAMBLED The output covariance matrix is calculated as: + *
+ * + *

scale * [ vects [0]- mean, vects [1]- mean,...]^T * [ vects [0]- mean, + * vects [1]- mean,...],

+ * + *

The covariance matrix will be nsamples x nsamples. Such an + * unusual covariance matrix is used for fast PCA of a set of very large vectors + * (see, for example, the EigenFaces technique for face recognition). + * Eigenvalues of this "scrambled" matrix match the eigenvalues of the true + * covariance matrix. The "true" eigenvectors can be easily calculated from the + * eigenvectors of the "scrambled" covariance matrix.

+ *
    + *
  • CV_COVAR_NORMAL The output covariance matrix is calculated as: + *
+ * + *

scale * [ vects [0]- mean, vects [1]- mean,...] * [ vects [0]- mean, + * vects [1]- mean,...]^T,

+ * + *

covar will be a square matrix of the same size as the total + * number of elements in each input vector. One and only one of + * CV_COVAR_SCRAMBLED and CV_COVAR_NORMAL must be + * specified.

+ *
    + *
  • CV_COVAR_USE_AVG If the flag is specified, the function does not + * calculate mean from the input vectors but, instead, uses the + * passed mean vector. This is useful if mean has been + * pre-calculated or known in advance, or if the covariance matrix is calculated + * by parts. In this case, mean is not a mean vector of the input + * sub-set of vectors but rather the mean vector of the whole set. + *
  • CV_COVAR_SCALE If the flag is specified, the covariance matrix is + * scaled. In the "normal" mode, scale is 1./nsamples. + * In the "scrambled" mode, scale is the reciprocal of the total + * number of elements in each input vector. By default (if the flag is not + * specified), the covariance matrix is not scaled (scale=1). + *
  • CV_COVAR_ROWS [Only useful in the second variant of the function] If + * the flag is specified, all the input vectors are stored as rows of the + * samples matrix. mean should be a single-row vector + * in this case. + *
  • CV_COVAR_COLS [Only useful in the second variant of the function] If + * the flag is specified, all the input vectors are stored as columns of the + * samples matrix. mean should be a single-column + * vector in this case. + *
+ * + * @see org.opencv.core.Core.calcCovarMatrix + * @see org.opencv.core.Core#Mahalanobis + * @see org.opencv.core.Core#mulTransposed + */ + public static void calcCovarMatrix(Mat samples, Mat covar, Mat mean, int flags) + { + + calcCovarMatrix_1(samples.nativeObj, covar.nativeObj, mean.nativeObj, flags); + + return; + } + + + // + // C++: void cartToPolar(Mat x, Mat y, Mat& magnitude, Mat& angle, bool angleInDegrees = false) + // + +/** + *

Calculates the magnitude and angle of 2D vectors.

+ * + *

The function cartToPolar calculates either the magnitude, angle, + * or both for every 2D vector (x(I),y(I)):

+ * + *

magnitude(I)= sqrt(x(I)^2+y(I)^2), + * angle(I)= atan2(y(I), x(I))[ *180 / pi ]

+ * + *

The angles are calculated with accuracy about 0.3 degrees. For the point + * (0,0), the angle is set to 0.

+ * + * @param x array of x-coordinates; this must be a single-precision or + * double-precision floating-point array. + * @param y array of y-coordinates, that must have the same size and same type + * as x. + * @param magnitude output array of magnitudes of the same size and type as + * x. + * @param angle output array of angles that has the same size and type as + * x; the angles are measured in radians (from 0 to 2*Pi) or in + * degrees (0 to 360 degrees). + * @param angleInDegrees a flag, indicating whether the angles are measured in + * radians (which is by default), or in degrees. + * + * @see org.opencv.core.Core.cartToPolar + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#Sobel + */ + public static void cartToPolar(Mat x, Mat y, Mat magnitude, Mat angle, boolean angleInDegrees) + { + + cartToPolar_0(x.nativeObj, y.nativeObj, magnitude.nativeObj, angle.nativeObj, angleInDegrees); + + return; + } + +/** + *

Calculates the magnitude and angle of 2D vectors.

+ * + *

The function cartToPolar calculates either the magnitude, angle, + * or both for every 2D vector (x(I),y(I)):

+ * + *

magnitude(I)= sqrt(x(I)^2+y(I)^2), + * angle(I)= atan2(y(I), x(I))[ *180 / pi ]

+ * + *

The angles are calculated with accuracy about 0.3 degrees. For the point + * (0,0), the angle is set to 0.

+ * + * @param x array of x-coordinates; this must be a single-precision or + * double-precision floating-point array. + * @param y array of y-coordinates, that must have the same size and same type + * as x. + * @param magnitude output array of magnitudes of the same size and type as + * x. + * @param angle output array of angles that has the same size and type as + * x; the angles are measured in radians (from 0 to 2*Pi) or in + * degrees (0 to 360 degrees). + * + * @see org.opencv.core.Core.cartToPolar + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#Sobel + */ + public static void cartToPolar(Mat x, Mat y, Mat magnitude, Mat angle) + { + + cartToPolar_1(x.nativeObj, y.nativeObj, magnitude.nativeObj, angle.nativeObj); + + return; + } + + + // + // C++: bool checkRange(Mat a, bool quiet = true, _hidden_ * pos = 0, double minVal = -DBL_MAX, double maxVal = DBL_MAX) + // + +/** + *

Checks every element of an input array for invalid values.

+ * + *

The functions checkRange check that every array element is + * neither NaN nor infinite. When minVal < -DBL_MAX and + * maxVal < DBL_MAX, the functions also check that each value is + * between minVal and maxVal. In case of multi-channel + * arrays, each channel is processed independently. + * If some values are out of range, position of the first outlier is stored in + * pos (when pos != NULL). Then, the functions either + * return false (when quiet=true) or throw an exception.

+ * + * @param a input array. + * @param quiet a flag, indicating whether the functions quietly return false + * when the array elements are out of range or they throw an exception. + * @param minVal inclusive lower boundary of valid values range. + * @param maxVal exclusive upper boundary of valid values range. + * + * @see org.opencv.core.Core.checkRange + */ + public static boolean checkRange(Mat a, boolean quiet, double minVal, double maxVal) + { + + boolean retVal = checkRange_0(a.nativeObj, quiet, minVal, maxVal); + + return retVal; + } + +/** + *

Checks every element of an input array for invalid values.

+ * + *

The functions checkRange check that every array element is + * neither NaN nor infinite. When minVal < -DBL_MAX and + * maxVal < DBL_MAX, the functions also check that each value is + * between minVal and maxVal. In case of multi-channel + * arrays, each channel is processed independently. + * If some values are out of range, position of the first outlier is stored in + * pos (when pos != NULL). Then, the functions either + * return false (when quiet=true) or throw an exception.

+ * + * @param a input array. + * + * @see org.opencv.core.Core.checkRange + */ + public static boolean checkRange(Mat a) + { + + boolean retVal = checkRange_1(a.nativeObj); + + return retVal; + } + + + // + // C++: void circle(Mat& img, Point center, int radius, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + // + +/** + *

Draws a circle.

+ * + *

The function circle draws a simple or filled circle with a given + * center and radius.

+ * + * @param img Image where the circle is drawn. + * @param center Center of the circle. + * @param radius Radius of the circle. + * @param color Circle color. + * @param thickness Thickness of the circle outline, if positive. Negative + * thickness means that a filled circle is to be drawn. + * @param lineType Type of the circle boundary. See the "line" description. + * @param shift Number of fractional bits in the coordinates of the center and + * in the radius value. + * + * @see org.opencv.core.Core.circle + */ + public static void circle(Mat img, Point center, int radius, Scalar color, int thickness, int lineType, int shift) + { + + circle_0(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); + + return; + } + +/** + *

Draws a circle.

+ * + *

The function circle draws a simple or filled circle with a given + * center and radius.

+ * + * @param img Image where the circle is drawn. + * @param center Center of the circle. + * @param radius Radius of the circle. + * @param color Circle color. + * @param thickness Thickness of the circle outline, if positive. Negative + * thickness means that a filled circle is to be drawn. + * + * @see org.opencv.core.Core.circle + */ + public static void circle(Mat img, Point center, int radius, Scalar color, int thickness) + { + + circle_1(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws a circle.

+ * + *

The function circle draws a simple or filled circle with a given + * center and radius.

+ * + * @param img Image where the circle is drawn. + * @param center Center of the circle. + * @param radius Radius of the circle. + * @param color Circle color. + * + * @see org.opencv.core.Core.circle + */ + public static void circle(Mat img, Point center, int radius, Scalar color) + { + + circle_2(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: bool clipLine(Rect imgRect, Point& pt1, Point& pt2) + // + +/** + *

Clips the line against the image rectangle.

+ * + *

The functions clipLine calculate a part of the line segment that + * is entirely within the specified rectangle. + * They return false if the line segment is completely outside the + * rectangle. Otherwise, they return true.

+ * + * @param imgRect Image rectangle. + * @param pt1 First line point. + * @param pt2 Second line point. + * + * @see org.opencv.core.Core.clipLine + */ + public static boolean clipLine(Rect imgRect, Point pt1, Point pt2) + { + double[] pt1_out = new double[2]; + double[] pt2_out = new double[2]; + boolean retVal = clipLine_0(imgRect.x, imgRect.y, imgRect.width, imgRect.height, pt1.x, pt1.y, pt1_out, pt2.x, pt2.y, pt2_out); + if(pt1!=null){ pt1.x = pt1_out[0]; pt1.y = pt1_out[1]; } + if(pt2!=null){ pt2.x = pt2_out[0]; pt2.y = pt2_out[1]; } + return retVal; + } + + + // + // C++: void compare(Mat src1, Mat src2, Mat& dst, int cmpop) + // + +/** + *

Performs the per-element comparison of two arrays or an array and scalar + * value.

+ * + *

The function compares:

+ *
    + *
  • Elements of two arrays when src1 and src2 + * have the same size: + *
+ * + *

dst(I) = src1(I) cmpop src2(I)

+ * + *
    + *
  • Elements of src1 with a scalar src2 when + * src2 is constructed from Scalar or has a single + * element: + *
+ * + *

dst(I) = src1(I) cmpop src2

+ * + *
    + *
  • src1 with elements of src2 when + * src1 is constructed from Scalar or has a single + * element: + *
+ * + *

dst(I) = src1 cmpop src2(I)

+ * + *

When the comparison result is true, the corresponding element of output array + * is set to 255.The comparison operations can be replaced with the equivalent + * matrix expressions:

+ * + *

// C++ code:

+ * + *

Mat dst1 = src1 >= src2;

+ * + *

Mat dst2 = src1 < 8;...

+ * + * @param src1 first input array or a scalar (in the case of cvCmp, + * cv.Cmp, cvCmpS, cv.CmpS it is always + * an array); when it is an array, it must have a single channel. + * @param src2 second input array or a scalar (in the case of cvCmp + * and cv.Cmp it is always an array; in the case of + * cvCmpS, cv.CmpS it is always a scalar); when it is + * an array, it must have a single channel. + * @param dst output array that has the same size as the input arrays and type= + * CV_8UC1. + * @param cmpop a flag, that specifies correspondence between the arrays: + *
    + *
  • CMP_EQ src1 is equal to src2. + *
  • CMP_GT src1 is greater than src2. + *
  • CMP_GE src1 is greater than or equal to src2. + *
  • CMP_LT src1 is less than src2. + *
  • CMP_LE src1 is less than or equal to src2. + *
  • CMP_NE src1 is unequal to src2. + *
+ * + * @see org.opencv.core.Core.compare + * @see org.opencv.imgproc.Imgproc#threshold + * @see org.opencv.core.Core#max + * @see org.opencv.core.Core#checkRange + * @see org.opencv.core.Core#min + */ + public static void compare(Mat src1, Mat src2, Mat dst, int cmpop) + { + + compare_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, cmpop); + + return; + } + + + // + // C++: void compare(Mat src1, Scalar src2, Mat& dst, int cmpop) + // + +/** + *

Performs the per-element comparison of two arrays or an array and scalar + * value.

+ * + *

The function compares:

+ *
    + *
  • Elements of two arrays when src1 and src2 + * have the same size: + *
+ * + *

dst(I) = src1(I) cmpop src2(I)

+ * + *
    + *
  • Elements of src1 with a scalar src2 when + * src2 is constructed from Scalar or has a single + * element: + *
+ * + *

dst(I) = src1(I) cmpop src2

+ * + *
    + *
  • src1 with elements of src2 when + * src1 is constructed from Scalar or has a single + * element: + *
+ * + *

dst(I) = src1 cmpop src2(I)

+ * + *

When the comparison result is true, the corresponding element of output array + * is set to 255.The comparison operations can be replaced with the equivalent + * matrix expressions:

+ * + *

// C++ code:

+ * + *

Mat dst1 = src1 >= src2;

+ * + *

Mat dst2 = src1 < 8;...

+ * + * @param src1 first input array or a scalar (in the case of cvCmp, + * cv.Cmp, cvCmpS, cv.CmpS it is always + * an array); when it is an array, it must have a single channel. + * @param src2 second input array or a scalar (in the case of cvCmp + * and cv.Cmp it is always an array; in the case of + * cvCmpS, cv.CmpS it is always a scalar); when it is + * an array, it must have a single channel. + * @param dst output array that has the same size as the input arrays and type= + * CV_8UC1. + * @param cmpop a flag, that specifies correspondence between the arrays: + *
    + *
  • CMP_EQ src1 is equal to src2. + *
  • CMP_GT src1 is greater than src2. + *
  • CMP_GE src1 is greater than or equal to src2. + *
  • CMP_LT src1 is less than src2. + *
  • CMP_LE src1 is less than or equal to src2. + *
  • CMP_NE src1 is unequal to src2. + *
+ * + * @see org.opencv.core.Core.compare + * @see org.opencv.imgproc.Imgproc#threshold + * @see org.opencv.core.Core#max + * @see org.opencv.core.Core#checkRange + * @see org.opencv.core.Core#min + */ + public static void compare(Mat src1, Scalar src2, Mat dst, int cmpop) + { + + compare_1(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, cmpop); + + return; + } + + + // + // C++: void completeSymm(Mat& mtx, bool lowerToUpper = false) + // + +/** + *

Copies the lower or the upper half of a square matrix to another half.

+ * + *

The function completeSymm copies the lower half of a square + * matrix to its another half. The matrix diagonal remains unchanged:

+ *
    + *
  • mtx_(ij)=mtx_(ji) for i > j if lowerToUpper=false + *
  • mtx_(ij)=mtx_(ji) for i < j if lowerToUpper=true + *
+ * + * @param mtx input-output floating-point square matrix. + * @param lowerToUpper operation flag; if true, the lower half is copied to the + * upper half. Otherwise, the upper half is copied to the lower half. + * + * @see org.opencv.core.Core.completeSymm + * @see org.opencv.core.Core#transpose + * @see org.opencv.core.Core#flip + */ + public static void completeSymm(Mat mtx, boolean lowerToUpper) + { + + completeSymm_0(mtx.nativeObj, lowerToUpper); + + return; + } + +/** + *

Copies the lower or the upper half of a square matrix to another half.

+ * + *

The function completeSymm copies the lower half of a square + * matrix to its another half. The matrix diagonal remains unchanged:

+ *
    + *
  • mtx_(ij)=mtx_(ji) for i > j if lowerToUpper=false + *
  • mtx_(ij)=mtx_(ji) for i < j if lowerToUpper=true + *
+ * + * @param mtx input-output floating-point square matrix. + * + * @see org.opencv.core.Core.completeSymm + * @see org.opencv.core.Core#transpose + * @see org.opencv.core.Core#flip + */ + public static void completeSymm(Mat mtx) + { + + completeSymm_1(mtx.nativeObj); + + return; + } + + + // + // C++: void convertScaleAbs(Mat src, Mat& dst, double alpha = 1, double beta = 0) + // + +/** + *

Scales, calculates absolute values, and converts the result to 8-bit.

+ * + *

On each element of the input array, the function convertScaleAbs + * performs three operations sequentially: scaling, taking an absolute value, + * conversion to an unsigned 8-bit type:

+ * + *

dst(I)= saturate_cast<uchar>(| src(I)* alpha + beta|)<BR>In case + * of multi-channel arrays, the function processes each channel independently. + * When the output is not 8-bit, the operation can be emulated by calling the + * Mat.convertTo method(or by using matrix expressions) and then + * by calculating an absolute value of the result. For example: + * <BR><code>

+ * + *

// C++ code:

+ * + *

Mat_ A(30,30);

+ * + *

randu(A, Scalar(-100), Scalar(100));

+ * + *

Mat_ B = A*5 + 3;

+ * + *

B = abs(B);

+ * + *

// Mat_ B = abs(A*5+3) will also do the job,

+ * + *

// but it will allocate a temporary matrix

+ * + * @param src input array. + * @param dst output array. + * @param alpha optional scale factor. + * @param beta optional delta added to the scaled values. + * + * @see org.opencv.core.Core.convertScaleAbs + * @see org.opencv.core.Mat#convertTo + */ + public static void convertScaleAbs(Mat src, Mat dst, double alpha, double beta) + { + + convertScaleAbs_0(src.nativeObj, dst.nativeObj, alpha, beta); + + return; + } + +/** + *

Scales, calculates absolute values, and converts the result to 8-bit.

+ * + *

On each element of the input array, the function convertScaleAbs + * performs three operations sequentially: scaling, taking an absolute value, + * conversion to an unsigned 8-bit type:

+ * + *

dst(I)= saturate_cast<uchar>(| src(I)* alpha + beta|)<BR>In case + * of multi-channel arrays, the function processes each channel independently. + * When the output is not 8-bit, the operation can be emulated by calling the + * Mat.convertTo method(or by using matrix expressions) and then + * by calculating an absolute value of the result. For example: + * <BR><code>

+ * + *

// C++ code:

+ * + *

Mat_ A(30,30);

+ * + *

randu(A, Scalar(-100), Scalar(100));

+ * + *

Mat_ B = A*5 + 3;

+ * + *

B = abs(B);

+ * + *

// Mat_ B = abs(A*5+3) will also do the job,

+ * + *

// but it will allocate a temporary matrix

+ * + * @param src input array. + * @param dst output array. + * + * @see org.opencv.core.Core.convertScaleAbs + * @see org.opencv.core.Mat#convertTo + */ + public static void convertScaleAbs(Mat src, Mat dst) + { + + convertScaleAbs_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: int countNonZero(Mat src) + // + +/** + *

Counts non-zero array elements.

+ * + *

The function returns the number of non-zero elements in src :

+ * + *

sum(by: I: src(I) != 0) 1

+ * + * @param src single-channel array. + * + * @see org.opencv.core.Core.countNonZero + * @see org.opencv.core.Core#minMaxLoc + * @see org.opencv.core.Core#calcCovarMatrix + * @see org.opencv.core.Core#meanStdDev + * @see org.opencv.core.Core#norm + * @see org.opencv.core.Core#mean + */ + public static int countNonZero(Mat src) + { + + int retVal = countNonZero_0(src.nativeObj); + + return retVal; + } + + + // + // C++: float cubeRoot(float val) + // + +/** + *

Computes the cube root of an argument.

+ * + *

The function cubeRoot computes sqrt3(val). Negative + * arguments are handled correctly. NaN and Inf are not handled. The accuracy + * approaches the maximum possible accuracy for single-precision data.

+ * + * @param val A function argument. + * + * @see org.opencv.core.Core.cubeRoot + */ + public static float cubeRoot(float val) + { + + float retVal = cubeRoot_0(val); + + return retVal; + } + + + // + // C++: void dct(Mat src, Mat& dst, int flags = 0) + // + +/** + *

Performs a forward or inverse discrete Cosine transform of 1D or 2D array.

+ * + *

The function dct performs a forward or inverse discrete Cosine + * transform (DCT) of a 1D or 2D floating-point array:

+ *
    + *
  • Forward Cosine transform of a 1D vector of N elements: + *
+ * + *

Y = C^N * X

+ * + *

where

+ * + *

C^N_(jk)= sqrt(alpha_j/N) cos((pi(2k+1)j)/(2N))

+ * + *

and

+ * + *

alpha_0=1, alpha_j=2 for *j > 0*.

+ *
    + *
  • Inverse Cosine transform of a 1D vector of N elements: + *
+ * + *

X = (C^N)^(-1) * Y = (C^N)^T * Y

+ * + *

(since C^N is an orthogonal matrix, C^N * (C^N)^T = I)

+ *
    + *
  • Forward 2D Cosine transform of M x N matrix: + *
+ * + *

Y = C^N * X * (C^N)^T

+ * + *
    + *
  • Inverse 2D Cosine transform of M x N matrix: + *
+ * + *

X = (C^N)^T * X * C^N

+ * + *

The function chooses the mode of operation by looking at the flags and size + * of the input array:

+ *
    + *
  • If (flags & DCT_INVERSE) == 0, the function does a + * forward 1D or 2D transform. Otherwise, it is an inverse 1D or 2D transform. + *
  • If (flags & DCT_ROWS) != 0, the function performs a 1D + * transform of each row. + *
  • If the array is a single column or a single row, the function performs + * a 1D transform. + *
  • If none of the above is true, the function performs a 2D transform. + *
+ * + *

Note:

+ * + *

Currently dct supports even-size arrays (2, 4, 6...). For data + * analysis and approximation, you can pad the array when necessary.

+ * + *

Also, the function performance depends very much, and not monotonically, on + * the array size (see"getOptimalDFTSize"). In the current implementation DCT of + * a vector of size N is calculated via DFT of a vector of size + * N/2. Thus, the optimal DCT size N1 >= N can be + * calculated as:

+ * + *

// C++ code:

+ * + *

size_t getOptimalDCTSize(size_t N) { return 2*getOptimalDFTSize((N+1)/2); }

+ * + *

N1 = getOptimalDCTSize(N);

+ * + *

+ * + * @param src input floating-point array. + * @param dst output array of the same size and type as src. + * @param flags transformation flags as a combination of the following values: + *
    + *
  • DCT_INVERSE performs an inverse 1D or 2D transform instead of the + * default forward transform. + *
  • DCT_ROWS performs a forward or inverse transform of every individual + * row of the input matrix. This flag enables you to transform multiple vectors + * simultaneously and can be used to decrease the overhead (which is sometimes + * several times larger than the processing itself) to perform 3D and + * higher-dimensional transforms and so forth. + *
+ * + * @see org.opencv.core.Core.dct + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#idct + * @see org.opencv.core.Core#getOptimalDFTSize + */ + public static void dct(Mat src, Mat dst, int flags) + { + + dct_0(src.nativeObj, dst.nativeObj, flags); + + return; + } + +/** + *

Performs a forward or inverse discrete Cosine transform of 1D or 2D array.

+ * + *

The function dct performs a forward or inverse discrete Cosine + * transform (DCT) of a 1D or 2D floating-point array:

+ *
    + *
  • Forward Cosine transform of a 1D vector of N elements: + *
+ * + *

Y = C^N * X

+ * + *

where

+ * + *

C^N_(jk)= sqrt(alpha_j/N) cos((pi(2k+1)j)/(2N))

+ * + *

and

+ * + *

alpha_0=1, alpha_j=2 for *j > 0*.

+ *
    + *
  • Inverse Cosine transform of a 1D vector of N elements: + *
+ * + *

X = (C^N)^(-1) * Y = (C^N)^T * Y

+ * + *

(since C^N is an orthogonal matrix, C^N * (C^N)^T = I)

+ *
    + *
  • Forward 2D Cosine transform of M x N matrix: + *
+ * + *

Y = C^N * X * (C^N)^T

+ * + *
    + *
  • Inverse 2D Cosine transform of M x N matrix: + *
+ * + *

X = (C^N)^T * X * C^N

+ * + *

The function chooses the mode of operation by looking at the flags and size + * of the input array:

+ *
    + *
  • If (flags & DCT_INVERSE) == 0, the function does a + * forward 1D or 2D transform. Otherwise, it is an inverse 1D or 2D transform. + *
  • If (flags & DCT_ROWS) != 0, the function performs a 1D + * transform of each row. + *
  • If the array is a single column or a single row, the function performs + * a 1D transform. + *
  • If none of the above is true, the function performs a 2D transform. + *
+ * + *

Note:

+ * + *

Currently dct supports even-size arrays (2, 4, 6...). For data + * analysis and approximation, you can pad the array when necessary.

+ * + *

Also, the function performance depends very much, and not monotonically, on + * the array size (see"getOptimalDFTSize"). In the current implementation DCT of + * a vector of size N is calculated via DFT of a vector of size + * N/2. Thus, the optimal DCT size N1 >= N can be + * calculated as:

+ * + *

// C++ code:

+ * + *

size_t getOptimalDCTSize(size_t N) { return 2*getOptimalDFTSize((N+1)/2); }

+ * + *

N1 = getOptimalDCTSize(N);

+ * + *

+ * + * @param src input floating-point array. + * @param dst output array of the same size and type as src. + * + * @see org.opencv.core.Core.dct + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#idct + * @see org.opencv.core.Core#getOptimalDFTSize + */ + public static void dct(Mat src, Mat dst) + { + + dct_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: double determinant(Mat mtx) + // + +/** + *

Returns the determinant of a square floating-point matrix.

+ * + *

The function determinant calculates and returns the determinant + * of the specified matrix. For small matrices (mtx.cols=mtx.rows<=3), + * the direct method is used. For larger matrices, the function uses LU + * factorization with partial pivoting.

+ * + *

For symmetric positively-determined matrices, it is also possible to use + * "eigen" decomposition to calculate the determinant.

+ * + * @param mtx input matrix that must have CV_32FC1 or + * CV_64FC1 type and square size. + * + * @see org.opencv.core.Core.determinant + * @see org.opencv.core.Core#invert + * @see org.opencv.core.Core#solve + * @see org.opencv.core.Core#eigen + * @see org.opencv.core.Core#trace + */ + public static double determinant(Mat mtx) + { + + double retVal = determinant_0(mtx.nativeObj); + + return retVal; + } + + + // + // C++: void dft(Mat src, Mat& dst, int flags = 0, int nonzeroRows = 0) + // + +/** + *

Performs a forward or inverse Discrete Fourier transform of a 1D or 2D + * floating-point array.

+ * + *

The function performs one of the following:

+ *
    + *
  • Forward the Fourier transform of a 1D vector of N + * elements: + *
+ * + *

Y = F^N * X,

+ * + *

where F^N_(jk)=exp(-2pi i j k/N) and i=sqrt(-1)

+ *
    + *
  • Inverse the Fourier transform of a 1D vector of N + * elements: + *
+ * + *

X'= (F^N)^(-1) * Y = (F^N)^* * y + * X = (1/N) * X,

+ * + *

where F^*=(Re(F^N)-Im(F^N))^T

+ *
    + *
  • Forward the 2D Fourier transform of a M x N matrix: + *
+ * + *

Y = F^M * X * F^N

+ * + *
    + *
  • Inverse the 2D Fourier transform of a M x N matrix: + *
+ * + *

X'= (F^M)^* * Y * (F^N)^* + * X = 1/(M * N) * X'

+ * + *

In case of real (single-channel) data, the output spectrum of the forward + * Fourier transform or input spectrum of the inverse Fourier transform can be + * represented in a packed format called *CCS* (complex-conjugate-symmetrical). + * It was borrowed from IPL (Intel* Image Processing Library). Here is how 2D + * *CCS* spectrum looks:

+ * + *

Re Y_(0,0) Re Y_(0,1) Im Y_(0,1) Re Y_(0,2) Im Y_(0,2) *s Re Y_(0,N/2-1) + * Im Y_(0,N/2-1) Re Y_(0,N/2) + * Re Y_(1,0) Re Y_(1,1) Im Y_(1,1) Re Y_(1,2) Im Y_(1,2) *s Re Y_(1,N/2-1) Im + * Y_(1,N/2-1) Re Y_(1,N/2) + * Im Y_(1,0) Re Y_(2,1) Im Y_(2,1) Re Y_(2,2) Im Y_(2,2) *s Re Y_(2,N/2-1) Im + * Y_(2,N/2-1) Im Y_(1,N/2)........................... + * Re Y_(M/2-1,0) Re Y_(M-3,1) Im Y_(M-3,1)......... Re Y_(M-3,N/2-1) Im + * Y_(M-3,N/2-1) Re Y_(M/2-1,N/2) + * Im Y_(M/2-1,0) Re Y_(M-2,1) Im Y_(M-2,1)......... Re Y_(M-2,N/2-1) Im + * Y_(M-2,N/2-1) Im Y_(M/2-1,N/2) + * Re Y_(M/2,0) Re Y_(M-1,1) Im Y_(M-1,1)......... Re Y_(M-1,N/2-1) Im + * Y_(M-1,N/2-1) Re Y_(M/2,N/2)

+ * + *

In case of 1D transform of a real vector, the output looks like the first row + * of the matrix above.

+ * + *

So, the function chooses an operation mode depending on the flags and size of + * the input array:

+ *
    + *
  • If DFT_ROWS is set or the input array has a single row or + * single column, the function performs a 1D forward or inverse transform of + * each row of a matrix when DFT_ROWS is set. Otherwise, it + * performs a 2D transform. + *
  • If the input array is real and DFT_INVERSE is not set, + * the function performs a forward 1D or 2D transform: + *
  • When DFT_COMPLEX_OUTPUT is set, the output is a complex + * matrix of the same size as input. + *
  • When DFT_COMPLEX_OUTPUT is not set, the output is a real + * matrix of the same size as input. In case of 2D transform, it uses the packed + * format as shown above. In case of a single 1D transform, it looks like the + * first row of the matrix above. In case of multiple 1D transforms (when using + * the DCT_ROWS flag), each row of the output matrix looks like the + * first row of the matrix above. + *
  • If the input array is complex and either DFT_INVERSE or + * DFT_REAL_OUTPUT are not set, the output is a complex array of + * the same size as input. The function performs a forward or inverse 1D or 2D + * transform of the whole input array or each row of the input array + * independently, depending on the flags DFT_INVERSE and + * DFT_ROWS. + *
  • When DFT_INVERSE is set and the input array is real, or + * it is complex but DFT_REAL_OUTPUT is set, the output is a real + * array of the same size as input. The function performs a 1D or 2D inverse + * transformation of the whole input array or each individual row, depending on + * the flags DFT_INVERSE and DFT_ROWS. + *
+ * + *

If DFT_SCALE is set, the scaling is done after the + * transformation.

+ * + *

Unlike "dct", the function supports arrays of arbitrary size. But only those + * arrays are processed efficiently, whose sizes can be factorized in a product + * of small prime numbers (2, 3, and 5 in the current implementation). Such an + * efficient DFT size can be calculated using the "getOptimalDFTSize" method. + * The sample below illustrates how to calculate a DFT-based convolution of two + * 2D real arrays:

+ * + *

// C++ code:

+ * + *

void convolveDFT(InputArray A, InputArray B, OutputArray C)

+ * + * + *

// reallocate the output array if needed

+ * + *

C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());

+ * + *

Size dftSize;

+ * + *

// calculate the size of DFT transform

+ * + *

dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);

+ * + *

dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);

+ * + *

// allocate temporary buffers and initialize them with 0's

+ * + *

Mat tempA(dftSize, A.type(), Scalar.all(0));

+ * + *

Mat tempB(dftSize, B.type(), Scalar.all(0));

+ * + *

// copy A and B to the top-left corners of tempA and tempB, respectively

+ * + *

Mat roiA(tempA, Rect(0,0,A.cols,A.rows));

+ * + *

A.copyTo(roiA);

+ * + *

Mat roiB(tempB, Rect(0,0,B.cols,B.rows));

+ * + *

B.copyTo(roiB);

+ * + *

// now transform the padded A & B in-place;

+ * + *

// use "nonzeroRows" hint for faster processing

+ * + *

dft(tempA, tempA, 0, A.rows);

+ * + *

dft(tempB, tempB, 0, B.rows);

+ * + *

// multiply the spectrums;

+ * + *

// the function handles packed spectrum representations well

+ * + *

mulSpectrums(tempA, tempB, tempA);

+ * + *

// transform the product back from the frequency domain.

+ * + *

// Even though all the result rows will be non-zero,

+ * + *

// you need only the first C.rows of them, and thus you

+ * + *

// pass nonzeroRows == C.rows

+ * + *

dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows);

+ * + *

// now copy the result back to C.

+ * + *

tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C);

+ * + *

// all the temporary buffers will be deallocated automatically

+ * + * + *

To optimize this sample, consider the following approaches:

+ *
    + *
  • Since nonzeroRows != 0 is passed to the forward transform + * calls and since A and B are copied to the top-left + * corners of tempA and tempB, respectively, it is not + * necessary to clear the whole tempA and tempB. It is + * only necessary to clear the tempA.cols - A.cols + * (tempB.cols - B.cols) rightmost columns of the matrices. + *
  • This DFT-based convolution does not have to be applied to the whole + * big arrays, especially if B is significantly smaller than + * A or vice versa. Instead, you can calculate convolution by + * parts. To do this, you need to split the output array C into + * multiple tiles. For each tile, estimate which parts of A and + * B are required to calculate convolution in this tile. If the + * tiles in C are too small, the speed will decrease a lot because + * of repeated work. In the ultimate case, when each tile in C is a + * single pixel, the algorithm becomes equivalent to the naive convolution + * algorithm. If the tiles are too big, the temporary arrays tempA + * and tempB become too big and there is also a slowdown because of + * bad cache locality. So, there is an optimal tile size somewhere in the + * middle. + *
  • If different tiles in C can be calculated in parallel + * and, thus, the convolution is done by parts, the loop can be threaded. + *
+ * + *

All of the above improvements have been implemented in "matchTemplate" and + * "filter2D". Therefore, by using them, you can get the performance even better + * than with the above theoretically optimal implementation. Though, those two + * functions actually calculate cross-correlation, not convolution, so you need + * to "flip" the second convolution operand B vertically and + * horizontally using "flip".

+ * + * @param src input array that could be real or complex. + * @param dst output array whose size and type depends on the flags. + * @param flags transformation flags, representing a combination of the + * following values: + *
    + *
  • DFT_INVERSE performs an inverse 1D or 2D transform instead of the + * default forward transform. + *
  • DFT_SCALE scales the result: divide it by the number of array + * elements. Normally, it is combined with DFT_INVERSE. + *
  • DFT_ROWS performs a forward or inverse transform of every individual + * row of the input matrix; this flag enables you to transform multiple vectors + * simultaneously and can be used to decrease the overhead (which is sometimes + * several times larger than the processing itself) to perform 3D and + * higher-dimensional transformations and so forth. + *
  • DFT_COMPLEX_OUTPUT performs a forward transformation of 1D or 2D real + * array; the result, though being a complex array, has complex-conjugate + * symmetry (*CCS*, see the function description below for details), and such an + * array can be packed into a real array of the same size as input, which is the + * fastest option and which is what the function does by default; however, you + * may wish to get a full complex array (for simpler spectrum analysis, and so + * on) - pass the flag to enable the function to produce a full-size complex + * output array. + *
  • DFT_REAL_OUTPUT performs an inverse transformation of a 1D or 2D + * complex array; the result is normally a complex array of the same size, + * however, if the input array has conjugate-complex symmetry (for example, it + * is a result of forward transformation with DFT_COMPLEX_OUTPUT + * flag), the output is a real array; while the function itself does not check + * whether the input is symmetrical or not, you can pass the flag and then the + * function will assume the symmetry and produce the real output array (note + * that when the input is packed into a real array and inverse transformation is + * executed, the function treats the input as a packed complex-conjugate + * symmetrical array, and the output will also be a real array). + *
+ * @param nonzeroRows when the parameter is not zero, the function assumes that + * only the first nonzeroRows rows of the input array + * (DFT_INVERSE is not set) or only the first nonzeroRows + * of the output array (DFT_INVERSE is set) contain non-zeros, + * thus, the function can handle the rest of the rows more efficiently and save + * some time; this technique is very useful for calculating array + * cross-correlation or convolution using DFT. + * + * @see org.opencv.core.Core.dft + * @see org.opencv.imgproc.Imgproc#matchTemplate + * @see org.opencv.core.Core#mulSpectrums + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#flip + * @see org.opencv.core.Core#magnitude + * @see org.opencv.core.Core#phase + * @see org.opencv.core.Core#dct + * @see org.opencv.imgproc.Imgproc#filter2D + * @see org.opencv.core.Core#getOptimalDFTSize + */ + public static void dft(Mat src, Mat dst, int flags, int nonzeroRows) + { + + dft_0(src.nativeObj, dst.nativeObj, flags, nonzeroRows); + + return; + } + +/** + *

Performs a forward or inverse Discrete Fourier transform of a 1D or 2D + * floating-point array.

+ * + *

The function performs one of the following:

+ *
    + *
  • Forward the Fourier transform of a 1D vector of N + * elements: + *
+ * + *

Y = F^N * X,

+ * + *

where F^N_(jk)=exp(-2pi i j k/N) and i=sqrt(-1)

+ *
    + *
  • Inverse the Fourier transform of a 1D vector of N + * elements: + *
+ * + *

X'= (F^N)^(-1) * Y = (F^N)^* * y + * X = (1/N) * X,

+ * + *

where F^*=(Re(F^N)-Im(F^N))^T

+ *
    + *
  • Forward the 2D Fourier transform of a M x N matrix: + *
+ * + *

Y = F^M * X * F^N

+ * + *
    + *
  • Inverse the 2D Fourier transform of a M x N matrix: + *
+ * + *

X'= (F^M)^* * Y * (F^N)^* + * X = 1/(M * N) * X'

+ * + *

In case of real (single-channel) data, the output spectrum of the forward + * Fourier transform or input spectrum of the inverse Fourier transform can be + * represented in a packed format called *CCS* (complex-conjugate-symmetrical). + * It was borrowed from IPL (Intel* Image Processing Library). Here is how 2D + * *CCS* spectrum looks:

+ * + *

Re Y_(0,0) Re Y_(0,1) Im Y_(0,1) Re Y_(0,2) Im Y_(0,2) *s Re Y_(0,N/2-1) + * Im Y_(0,N/2-1) Re Y_(0,N/2) + * Re Y_(1,0) Re Y_(1,1) Im Y_(1,1) Re Y_(1,2) Im Y_(1,2) *s Re Y_(1,N/2-1) Im + * Y_(1,N/2-1) Re Y_(1,N/2) + * Im Y_(1,0) Re Y_(2,1) Im Y_(2,1) Re Y_(2,2) Im Y_(2,2) *s Re Y_(2,N/2-1) Im + * Y_(2,N/2-1) Im Y_(1,N/2)........................... + * Re Y_(M/2-1,0) Re Y_(M-3,1) Im Y_(M-3,1)......... Re Y_(M-3,N/2-1) Im + * Y_(M-3,N/2-1) Re Y_(M/2-1,N/2) + * Im Y_(M/2-1,0) Re Y_(M-2,1) Im Y_(M-2,1)......... Re Y_(M-2,N/2-1) Im + * Y_(M-2,N/2-1) Im Y_(M/2-1,N/2) + * Re Y_(M/2,0) Re Y_(M-1,1) Im Y_(M-1,1)......... Re Y_(M-1,N/2-1) Im + * Y_(M-1,N/2-1) Re Y_(M/2,N/2)

+ * + *

In case of 1D transform of a real vector, the output looks like the first row + * of the matrix above.

+ * + *

So, the function chooses an operation mode depending on the flags and size of + * the input array:

+ *
    + *
  • If DFT_ROWS is set or the input array has a single row or + * single column, the function performs a 1D forward or inverse transform of + * each row of a matrix when DFT_ROWS is set. Otherwise, it + * performs a 2D transform. + *
  • If the input array is real and DFT_INVERSE is not set, + * the function performs a forward 1D or 2D transform: + *
  • When DFT_COMPLEX_OUTPUT is set, the output is a complex + * matrix of the same size as input. + *
  • When DFT_COMPLEX_OUTPUT is not set, the output is a real + * matrix of the same size as input. In case of 2D transform, it uses the packed + * format as shown above. In case of a single 1D transform, it looks like the + * first row of the matrix above. In case of multiple 1D transforms (when using + * the DCT_ROWS flag), each row of the output matrix looks like the + * first row of the matrix above. + *
  • If the input array is complex and either DFT_INVERSE or + * DFT_REAL_OUTPUT are not set, the output is a complex array of + * the same size as input. The function performs a forward or inverse 1D or 2D + * transform of the whole input array or each row of the input array + * independently, depending on the flags DFT_INVERSE and + * DFT_ROWS. + *
  • When DFT_INVERSE is set and the input array is real, or + * it is complex but DFT_REAL_OUTPUT is set, the output is a real + * array of the same size as input. The function performs a 1D or 2D inverse + * transformation of the whole input array or each individual row, depending on + * the flags DFT_INVERSE and DFT_ROWS. + *
+ * + *

If DFT_SCALE is set, the scaling is done after the + * transformation.

+ * + *

Unlike "dct", the function supports arrays of arbitrary size. But only those + * arrays are processed efficiently, whose sizes can be factorized in a product + * of small prime numbers (2, 3, and 5 in the current implementation). Such an + * efficient DFT size can be calculated using the "getOptimalDFTSize" method. + * The sample below illustrates how to calculate a DFT-based convolution of two + * 2D real arrays:

+ * + *

// C++ code:

+ * + *

void convolveDFT(InputArray A, InputArray B, OutputArray C)

+ * + * + *

// reallocate the output array if needed

+ * + *

C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());

+ * + *

Size dftSize;

+ * + *

// calculate the size of DFT transform

+ * + *

dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);

+ * + *

dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);

+ * + *

// allocate temporary buffers and initialize them with 0's

+ * + *

Mat tempA(dftSize, A.type(), Scalar.all(0));

+ * + *

Mat tempB(dftSize, B.type(), Scalar.all(0));

+ * + *

// copy A and B to the top-left corners of tempA and tempB, respectively

+ * + *

Mat roiA(tempA, Rect(0,0,A.cols,A.rows));

+ * + *

A.copyTo(roiA);

+ * + *

Mat roiB(tempB, Rect(0,0,B.cols,B.rows));

+ * + *

B.copyTo(roiB);

+ * + *

// now transform the padded A & B in-place;

+ * + *

// use "nonzeroRows" hint for faster processing

+ * + *

dft(tempA, tempA, 0, A.rows);

+ * + *

dft(tempB, tempB, 0, B.rows);

+ * + *

// multiply the spectrums;

+ * + *

// the function handles packed spectrum representations well

+ * + *

mulSpectrums(tempA, tempB, tempA);

+ * + *

// transform the product back from the frequency domain.

+ * + *

// Even though all the result rows will be non-zero,

+ * + *

// you need only the first C.rows of them, and thus you

+ * + *

// pass nonzeroRows == C.rows

+ * + *

dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows);

+ * + *

// now copy the result back to C.

+ * + *

tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C);

+ * + *

// all the temporary buffers will be deallocated automatically

+ * + * + *

To optimize this sample, consider the following approaches:

+ *
    + *
  • Since nonzeroRows != 0 is passed to the forward transform + * calls and since A and B are copied to the top-left + * corners of tempA and tempB, respectively, it is not + * necessary to clear the whole tempA and tempB. It is + * only necessary to clear the tempA.cols - A.cols + * (tempB.cols - B.cols) rightmost columns of the matrices. + *
  • This DFT-based convolution does not have to be applied to the whole + * big arrays, especially if B is significantly smaller than + * A or vice versa. Instead, you can calculate convolution by + * parts. To do this, you need to split the output array C into + * multiple tiles. For each tile, estimate which parts of A and + * B are required to calculate convolution in this tile. If the + * tiles in C are too small, the speed will decrease a lot because + * of repeated work. In the ultimate case, when each tile in C is a + * single pixel, the algorithm becomes equivalent to the naive convolution + * algorithm. If the tiles are too big, the temporary arrays tempA + * and tempB become too big and there is also a slowdown because of + * bad cache locality. So, there is an optimal tile size somewhere in the + * middle. + *
  • If different tiles in C can be calculated in parallel + * and, thus, the convolution is done by parts, the loop can be threaded. + *
+ * + *

All of the above improvements have been implemented in "matchTemplate" and + * "filter2D". Therefore, by using them, you can get the performance even better + * than with the above theoretically optimal implementation. Though, those two + * functions actually calculate cross-correlation, not convolution, so you need + * to "flip" the second convolution operand B vertically and + * horizontally using "flip".

+ * + * @param src input array that could be real or complex. + * @param dst output array whose size and type depends on the flags. + * + * @see org.opencv.core.Core.dft + * @see org.opencv.imgproc.Imgproc#matchTemplate + * @see org.opencv.core.Core#mulSpectrums + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#flip + * @see org.opencv.core.Core#magnitude + * @see org.opencv.core.Core#phase + * @see org.opencv.core.Core#dct + * @see org.opencv.imgproc.Imgproc#filter2D + * @see org.opencv.core.Core#getOptimalDFTSize + */ + public static void dft(Mat src, Mat dst) + { + + dft_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void divide(Mat src1, Mat src2, Mat& dst, double scale = 1, int dtype = -1) + // + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * @param scale scalar factor. + * @param dtype optional depth of the output array; if -1, + * dst will have depth src2.depth(), but in case of an + * array-by-array division, you can only pass -1 when + * src1.depth()==src2.depth(). + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(Mat src1, Mat src2, Mat dst, double scale, int dtype) + { + + divide_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, scale, dtype); + + return; + } + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * @param scale scalar factor. + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(Mat src1, Mat src2, Mat dst, double scale) + { + + divide_1(src1.nativeObj, src2.nativeObj, dst.nativeObj, scale); + + return; + } + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(Mat src1, Mat src2, Mat dst) + { + + divide_2(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void divide(double scale, Mat src2, Mat& dst, int dtype = -1) + // + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param scale scalar factor. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * @param dtype optional depth of the output array; if -1, + * dst will have depth src2.depth(), but in case of an + * array-by-array division, you can only pass -1 when + * src1.depth()==src2.depth(). + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(double scale, Mat src2, Mat dst, int dtype) + { + + divide_3(scale, src2.nativeObj, dst.nativeObj, dtype); + + return; + } + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param scale scalar factor. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(double scale, Mat src2, Mat dst) + { + + divide_4(scale, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void divide(Mat src1, Scalar src2, Mat& dst, double scale = 1, int dtype = -1) + // + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * @param scale scalar factor. + * @param dtype optional depth of the output array; if -1, + * dst will have depth src2.depth(), but in case of an + * array-by-array division, you can only pass -1 when + * src1.depth()==src2.depth(). + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(Mat src1, Scalar src2, Mat dst, double scale, int dtype) + { + + divide_5(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, scale, dtype); + + return; + } + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * @param scale scalar factor. + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(Mat src1, Scalar src2, Mat dst, double scale) + { + + divide_6(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, scale); + + return; + } + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(Mat src1, Scalar src2, Mat dst) + { + + divide_7(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: bool eigen(Mat src, bool computeEigenvectors, Mat& eigenvalues, Mat& eigenvectors) + // + +/** + *

Calculates eigenvalues and eigenvectors of a symmetric matrix.

+ * + *

The functions eigen calculate just eigenvalues, or eigenvalues + * and eigenvectors of the symmetric matrix src :

+ * + *

// C++ code:

+ * + *

src*eigenvectors.row(i).t() = eigenvalues.at(i)*eigenvectors.row(i).t()

+ * + *

Note: in the new and the old interfaces different ordering of eigenvalues and + * eigenvectors parameters is used. + *

+ * + * @param src input matrix that must have CV_32FC1 or + * CV_64FC1 type, square size and be symmetrical (src^"T" + * == src). + * @param computeEigenvectors a computeEigenvectors + * @param eigenvalues output vector of eigenvalues of the same type as + * src; the eigenvalues are stored in the descending order. + * @param eigenvectors output matrix of eigenvectors; it has the same size and + * type as src; the eigenvectors are stored as subsequent matrix + * rows, in the same order as the corresponding eigenvalues. + * + * @see org.opencv.core.Core.eigen + * @see org.opencv.core.Core#completeSymm + */ + public static boolean eigen(Mat src, boolean computeEigenvectors, Mat eigenvalues, Mat eigenvectors) + { + + boolean retVal = eigen_0(src.nativeObj, computeEigenvectors, eigenvalues.nativeObj, eigenvectors.nativeObj); + + return retVal; + } + + + // + // C++: void ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + // + +/** + *

Draws a simple or thick elliptic arc or fills an ellipse sector.

+ * + *

The functions ellipse with less parameters draw an ellipse + * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. + * A piecewise-linear curve is used to approximate the elliptic arc boundary. If + * you need more control of the ellipse rendering, you can retrieve the curve + * using "ellipse2Poly" and then render it with "polylines" or fill it with + * "fillPoly". If you use the first variant of the function and want to draw the + * whole ellipse, not an arc, pass startAngle=0 and + * endAngle=360. The figure below explains the meaning of the + * parameters. + * Figure 1. Parameters of Elliptic Arc

+ * + * @param img Image. + * @param center Center of the ellipse. + * @param axes Length of the ellipse axes. + * @param angle Ellipse rotation angle in degrees. + * @param startAngle Starting angle of the elliptic arc in degrees. + * @param endAngle Ending angle of the elliptic arc in degrees. + * @param color Ellipse color. + * @param thickness Thickness of the ellipse arc outline, if positive. + * Otherwise, this indicates that a filled ellipse sector is to be drawn. + * @param lineType Type of the ellipse boundary. See the "line" description. + * @param shift Number of fractional bits in the coordinates of the center and + * values of axes. + * + * @see org.opencv.core.Core.ellipse + */ + public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness, int lineType, int shift) + { + + ellipse_0(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); + + return; + } + +/** + *

Draws a simple or thick elliptic arc or fills an ellipse sector.

+ * + *

The functions ellipse with less parameters draw an ellipse + * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. + * A piecewise-linear curve is used to approximate the elliptic arc boundary. If + * you need more control of the ellipse rendering, you can retrieve the curve + * using "ellipse2Poly" and then render it with "polylines" or fill it with + * "fillPoly". If you use the first variant of the function and want to draw the + * whole ellipse, not an arc, pass startAngle=0 and + * endAngle=360. The figure below explains the meaning of the + * parameters. + * Figure 1. Parameters of Elliptic Arc

+ * + * @param img Image. + * @param center Center of the ellipse. + * @param axes Length of the ellipse axes. + * @param angle Ellipse rotation angle in degrees. + * @param startAngle Starting angle of the elliptic arc in degrees. + * @param endAngle Ending angle of the elliptic arc in degrees. + * @param color Ellipse color. + * @param thickness Thickness of the ellipse arc outline, if positive. + * Otherwise, this indicates that a filled ellipse sector is to be drawn. + * + * @see org.opencv.core.Core.ellipse + */ + public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness) + { + + ellipse_1(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws a simple or thick elliptic arc or fills an ellipse sector.

+ * + *

The functions ellipse with less parameters draw an ellipse + * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. + * A piecewise-linear curve is used to approximate the elliptic arc boundary. If + * you need more control of the ellipse rendering, you can retrieve the curve + * using "ellipse2Poly" and then render it with "polylines" or fill it with + * "fillPoly". If you use the first variant of the function and want to draw the + * whole ellipse, not an arc, pass startAngle=0 and + * endAngle=360. The figure below explains the meaning of the + * parameters. + * Figure 1. Parameters of Elliptic Arc

+ * + * @param img Image. + * @param center Center of the ellipse. + * @param axes Length of the ellipse axes. + * @param angle Ellipse rotation angle in degrees. + * @param startAngle Starting angle of the elliptic arc in degrees. + * @param endAngle Ending angle of the elliptic arc in degrees. + * @param color Ellipse color. + * + * @see org.opencv.core.Core.ellipse + */ + public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color) + { + + ellipse_2(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void ellipse(Mat& img, RotatedRect box, Scalar color, int thickness = 1, int lineType = 8) + // + +/** + *

Draws a simple or thick elliptic arc or fills an ellipse sector.

+ * + *

The functions ellipse with less parameters draw an ellipse + * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. + * A piecewise-linear curve is used to approximate the elliptic arc boundary. If + * you need more control of the ellipse rendering, you can retrieve the curve + * using "ellipse2Poly" and then render it with "polylines" or fill it with + * "fillPoly". If you use the first variant of the function and want to draw the + * whole ellipse, not an arc, pass startAngle=0 and + * endAngle=360. The figure below explains the meaning of the + * parameters. + * Figure 1. Parameters of Elliptic Arc

+ * + * @param img Image. + * @param box Alternative ellipse representation via "RotatedRect" or + * CvBox2D. This means that the function draws an ellipse inscribed + * in the rotated rectangle. + * @param color Ellipse color. + * @param thickness Thickness of the ellipse arc outline, if positive. + * Otherwise, this indicates that a filled ellipse sector is to be drawn. + * @param lineType Type of the ellipse boundary. See the "line" description. + * + * @see org.opencv.core.Core.ellipse + */ + public static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness, int lineType) + { + + ellipse_3(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType); + + return; + } + +/** + *

Draws a simple or thick elliptic arc or fills an ellipse sector.

+ * + *

The functions ellipse with less parameters draw an ellipse + * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. + * A piecewise-linear curve is used to approximate the elliptic arc boundary. If + * you need more control of the ellipse rendering, you can retrieve the curve + * using "ellipse2Poly" and then render it with "polylines" or fill it with + * "fillPoly". If you use the first variant of the function and want to draw the + * whole ellipse, not an arc, pass startAngle=0 and + * endAngle=360. The figure below explains the meaning of the + * parameters. + * Figure 1. Parameters of Elliptic Arc

+ * + * @param img Image. + * @param box Alternative ellipse representation via "RotatedRect" or + * CvBox2D. This means that the function draws an ellipse inscribed + * in the rotated rectangle. + * @param color Ellipse color. + * @param thickness Thickness of the ellipse arc outline, if positive. + * Otherwise, this indicates that a filled ellipse sector is to be drawn. + * + * @see org.opencv.core.Core.ellipse + */ + public static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness) + { + + ellipse_4(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws a simple or thick elliptic arc or fills an ellipse sector.

+ * + *

The functions ellipse with less parameters draw an ellipse + * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. + * A piecewise-linear curve is used to approximate the elliptic arc boundary. If + * you need more control of the ellipse rendering, you can retrieve the curve + * using "ellipse2Poly" and then render it with "polylines" or fill it with + * "fillPoly". If you use the first variant of the function and want to draw the + * whole ellipse, not an arc, pass startAngle=0 and + * endAngle=360. The figure below explains the meaning of the + * parameters. + * Figure 1. Parameters of Elliptic Arc

+ * + * @param img Image. + * @param box Alternative ellipse representation via "RotatedRect" or + * CvBox2D. This means that the function draws an ellipse inscribed + * in the rotated rectangle. + * @param color Ellipse color. + * + * @see org.opencv.core.Core.ellipse + */ + public static void ellipse(Mat img, RotatedRect box, Scalar color) + { + + ellipse_5(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector_Point& pts) + // + +/** + *

Approximates an elliptic arc with a polyline.

+ * + *

The function ellipse2Poly computes the vertices of a polyline + * that approximates the specified elliptic arc. It is used by "ellipse".

+ * + * @param center Center of the arc. + * @param axes Half-sizes of the arc. See the "ellipse" for details. + * @param angle Rotation angle of the ellipse in degrees. See the "ellipse" for + * details. + * @param arcStart Starting angle of the elliptic arc in degrees. + * @param arcEnd Ending angle of the elliptic arc in degrees. + * @param delta Angle between the subsequent polyline vertices. It defines the + * approximation accuracy. + * @param pts Output vector of polyline vertices. + * + * @see org.opencv.core.Core.ellipse2Poly + */ + public static void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, MatOfPoint pts) + { + Mat pts_mat = pts; + ellipse2Poly_0(center.x, center.y, axes.width, axes.height, angle, arcStart, arcEnd, delta, pts_mat.nativeObj); + + return; + } + + + // + // C++: void exp(Mat src, Mat& dst) + // + +/** + *

Calculates the exponent of every array element.

+ * + *

The function exp calculates the exponent of every element of the + * input array:

+ * + *

dst [I] = e^(src(I))

+ * + *

The maximum relative error is about 7e-6 for single-precision + * input and less than 1e-10 for double-precision input. Currently, + * the function converts denormalized values to zeros on output. Special values + * (NaN, Inf) are not handled.

+ * + * @param src input array. + * @param dst output array of the same size and type as src. + * + * @see org.opencv.core.Core.exp + * @see org.opencv.core.Core#log + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#pow + * @see org.opencv.core.Core#sqrt + * @see org.opencv.core.Core#magnitude + * @see org.opencv.core.Core#polarToCart + * @see org.opencv.core.Core#phase + */ + public static void exp(Mat src, Mat dst) + { + + exp_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void extractChannel(Mat src, Mat& dst, int coi) + // + + public static void extractChannel(Mat src, Mat dst, int coi) + { + + extractChannel_0(src.nativeObj, dst.nativeObj, coi); + + return; + } + + + // + // C++: float fastAtan2(float y, float x) + // + +/** + *

Calculates the angle of a 2D vector in degrees.

+ * + *

The function fastAtan2 calculates the full-range angle of an + * input 2D vector. The angle is measured in degrees and varies from 0 to 360 + * degrees. The accuracy is about 0.3 degrees.

+ * + * @param y y-coordinate of the vector. + * @param x x-coordinate of the vector. + * + * @see org.opencv.core.Core.fastAtan2 + */ + public static float fastAtan2(float y, float x) + { + + float retVal = fastAtan2_0(y, x); + + return retVal; + } + + + // + // C++: void fillConvexPoly(Mat& img, vector_Point points, Scalar color, int lineType = 8, int shift = 0) + // + +/** + *

Fills a convex polygon.

+ * + *

The function fillConvexPoly draws a filled convex polygon. + * This function is much faster than the function fillPoly. It can + * fill not only convex polygons but any monotonic polygon without + * self-intersections, that is, a polygon whose contour intersects every + * horizontal line (scan line) twice at the most (though, its top-most and/or + * the bottom edge could be horizontal).

+ * + * @param img Image. + * @param points a points + * @param color Polygon color. + * @param lineType Type of the polygon boundaries. See the "line" description. + * @param shift Number of fractional bits in the vertex coordinates. + * + * @see org.opencv.core.Core.fillConvexPoly + */ + public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color, int lineType, int shift) + { + Mat points_mat = points; + fillConvexPoly_0(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift); + + return; + } + +/** + *

Fills a convex polygon.

+ * + *

The function fillConvexPoly draws a filled convex polygon. + * This function is much faster than the function fillPoly. It can + * fill not only convex polygons but any monotonic polygon without + * self-intersections, that is, a polygon whose contour intersects every + * horizontal line (scan line) twice at the most (though, its top-most and/or + * the bottom edge could be horizontal).

+ * + * @param img Image. + * @param points a points + * @param color Polygon color. + * + * @see org.opencv.core.Core.fillConvexPoly + */ + public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color) + { + Mat points_mat = points; + fillConvexPoly_1(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void fillPoly(Mat& img, vector_vector_Point pts, Scalar color, int lineType = 8, int shift = 0, Point offset = Point()) + // + +/** + *

Fills the area bounded by one or more polygons.

+ * + *

The function fillPoly fills an area bounded by several polygonal + * contours. The function can fill complex areas, for example, areas with holes, + * contours with self-intersections (some of their parts), and so forth.

+ * + * @param img Image. + * @param pts Array of polygons where each polygon is represented as an array of + * points. + * @param color Polygon color. + * @param lineType Type of the polygon boundaries. See the "line" description. + * @param shift Number of fractional bits in the vertex coordinates. + * @param offset Optional offset of all points of the contours. + * + * @see org.opencv.core.Core.fillPoly + */ + public static void fillPoly(Mat img, List pts, Scalar color, int lineType, int shift, Point offset) + { + List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); + Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); + fillPoly_0(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift, offset.x, offset.y); + + return; + } + +/** + *

Fills the area bounded by one or more polygons.

+ * + *

The function fillPoly fills an area bounded by several polygonal + * contours. The function can fill complex areas, for example, areas with holes, + * contours with self-intersections (some of their parts), and so forth.

+ * + * @param img Image. + * @param pts Array of polygons where each polygon is represented as an array of + * points. + * @param color Polygon color. + * + * @see org.opencv.core.Core.fillPoly + */ + public static void fillPoly(Mat img, List pts, Scalar color) + { + List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); + Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); + fillPoly_1(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void findNonZero(Mat src, Mat& idx) + // + + public static void findNonZero(Mat src, Mat idx) + { + + findNonZero_0(src.nativeObj, idx.nativeObj); + + return; + } + + + // + // C++: void flip(Mat src, Mat& dst, int flipCode) + // + +/** + *

Flips a 2D array around vertical, horizontal, or both axes.

+ * + *

The function flip flips the array in one of three different ways + * (row and column indices are 0-based):

+ * + *

dst _(ij) =<BR> <= ft(<BR> ltBR gtsrc _(src.rows-i-1,j) if + * flipCode = 0 + * ltBR gtsrc _(i, src.cols -j-1) if flipCode gt 0 + * ltBR gtsrc _(src.rows -i-1, src.cols -j-1) if flipCode lt 0 + * ltBR gt<BR>right.

+ * + *

The example scenarios of using the function are the following:

+ *
    + *
  • Vertical flipping of the image (flipCode == 0) to switch + * between top-left and bottom-left image origin. This is a typical operation in + * video processing on Microsoft Windows* OS. + *
  • Horizontal flipping of the image with the subsequent horizontal shift + * and absolute difference calculation to check for a vertical-axis symmetry + * (flipCode > 0). + *
  • Simultaneous horizontal and vertical flipping of the image with the + * subsequent shift and absolute difference calculation to check for a central + * symmetry (flipCode < 0). + *
  • Reversing the order of point arrays (flipCode > 0 or + * flipCode == 0). + *
+ * + * @param src input array. + * @param dst output array of the same size and type as src. + * @param flipCode a flag to specify how to flip the array; 0 means flipping + * around the x-axis and positive value (for example, 1) means flipping around + * y-axis. Negative value (for example, -1) means flipping around both axes (see + * the discussion below for the formulas). + * + * @see org.opencv.core.Core.flip + * @see org.opencv.core.Core#repeat + * @see org.opencv.core.Core#transpose + * @see org.opencv.core.Core#completeSymm + */ + public static void flip(Mat src, Mat dst, int flipCode) + { + + flip_0(src.nativeObj, dst.nativeObj, flipCode); + + return; + } + + + // + // C++: void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat& dst, int flags = 0) + // + +/** + *

Performs generalized matrix multiplication.

+ * + *

The function performs generalized matrix multiplication similar to the + * gemm functions in BLAS level 3. For example, gemm(src1, + * src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T) corresponds to

+ * + *

dst = alpha * src1 ^T * src2 + beta * src3 ^T<BR>The function can be + * replaced with a matrix expression. For example, the above call can be + * replaced with: <BR><code>

+ * + *

// C++ code:

+ * + *

dst = alpha*src1.t()*src2 + beta*src3.t();

+ * + *

+ * + * @param src1 first multiplied input matrix that should have CV_32FC1, + * CV_64FC1, CV_32FC2, or CV_64FC2 type. + * @param src2 second multiplied input matrix of the same type as + * src1. + * @param alpha weight of the matrix product. + * @param src3 third optional delta matrix added to the matrix product; it + * should have the same type as src1 and src2. + * @param gamma a gamma + * @param dst output matrix; it has the proper size and the same type as input + * matrices. + * @param flags operation flags: + *
    + *
  • GEMM_1_T transposes src1. + *
  • GEMM_2_T transposes src2. + *
  • GEMM_3_T transposes src3. + *
+ * + * @see org.opencv.core.Core.gemm + * @see org.opencv.core.Core#mulTransposed + * @see org.opencv.core.Core#transform + */ + public static void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat dst, int flags) + { + + gemm_0(src1.nativeObj, src2.nativeObj, alpha, src3.nativeObj, gamma, dst.nativeObj, flags); + + return; + } + +/** + *

Performs generalized matrix multiplication.

+ * + *

The function performs generalized matrix multiplication similar to the + * gemm functions in BLAS level 3. For example, gemm(src1, + * src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T) corresponds to

+ * + *

dst = alpha * src1 ^T * src2 + beta * src3 ^T<BR>The function can be + * replaced with a matrix expression. For example, the above call can be + * replaced with: <BR><code>

+ * + *

// C++ code:

+ * + *

dst = alpha*src1.t()*src2 + beta*src3.t();

+ * + *

+ * + * @param src1 first multiplied input matrix that should have CV_32FC1, + * CV_64FC1, CV_32FC2, or CV_64FC2 type. + * @param src2 second multiplied input matrix of the same type as + * src1. + * @param alpha weight of the matrix product. + * @param src3 third optional delta matrix added to the matrix product; it + * should have the same type as src1 and src2. + * @param gamma a gamma + * @param dst output matrix; it has the proper size and the same type as input + * matrices. + * + * @see org.opencv.core.Core.gemm + * @see org.opencv.core.Core#mulTransposed + * @see org.opencv.core.Core#transform + */ + public static void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat dst) + { + + gemm_1(src1.nativeObj, src2.nativeObj, alpha, src3.nativeObj, gamma, dst.nativeObj); + + return; + } + + + // + // C++: string getBuildInformation() + // + +/** + *

Returns full configuration time cmake output.

+ * + *

Returned value is raw cmake output including version control system revision, + * compiler version, compiler flags, enabled modules and third party libraries, + * etc. Output format depends on target architecture.

+ * + * @see org.opencv.core.Core.getBuildInformation + */ + public static String getBuildInformation() + { + + String retVal = getBuildInformation_0(); + + return retVal; + } + + + // + // C++: int64 getCPUTickCount() + // + +/** + *

Returns the number of CPU ticks.

+ * + *

The function returns the current number of CPU ticks on some architectures + * (such as x86, x64, PowerPC). On other platforms the function is equivalent to + * getTickCount. It can also be used for very accurate time + * measurements, as well as for RNG initialization. Note that in case of + * multi-CPU systems a thread, from which getCPUTickCount is + * called, can be suspended and resumed at another CPU with its own counter. So, + * theoretically (and practically) the subsequent calls to the function do not + * necessary return the monotonously increasing values. Also, since a modern CPU + * varies the CPU frequency depending on the load, the number of CPU clocks + * spent in some code cannot be directly converted to time units. Therefore, + * getTickCount is generally a preferable solution for measuring + * execution time.

+ * + * @see org.opencv.core.Core.getCPUTickCount + */ + public static long getCPUTickCount() + { + + long retVal = getCPUTickCount_0(); + + return retVal; + } + + + // + // C++: int getNumberOfCPUs() + // + +/** + *

Returns the number of logical CPUs available for the process.

+ * + * @see org.opencv.core.Core.getNumberOfCPUs + */ + public static int getNumberOfCPUs() + { + + int retVal = getNumberOfCPUs_0(); + + return retVal; + } + + + // + // C++: int getOptimalDFTSize(int vecsize) + // + +/** + *

Returns the optimal DFT size for a given vector size.

+ * + *

DFT performance is not a monotonic function of a vector size. Therefore, when + * you calculate convolution of two arrays or perform the spectral analysis of + * an array, it usually makes sense to pad the input data with zeros to get a + * bit larger array that can be transformed much faster than the original one. + * Arrays whose size is a power-of-two (2, 4, 8, 16, 32,...) are the fastest to + * process. Though, the arrays whose size is a product of 2's, 3's, and 5's (for + * example, 300 = 5*5*3*2*2) are also processed quite efficiently.

+ * + *

The function getOptimalDFTSize returns the minimum number + * N that is greater than or equal to vecsize so that + * the DFT of a vector of size N can be processed efficiently. In + * the current implementation N = 2^"p" * 3^"q" * 5^"r" for some + * integer p, q, r.

+ * + *

The function returns a negative number if vecsize is too large + * (very close to INT_MAX).

+ * + *

While the function cannot be used directly to estimate the optimal vector + * size for DCT transform (since the current DCT implementation supports only + * even-size vectors), it can be easily processed as getOptimalDFTSize((vecsize+1)/2)*2.

+ * + * @param vecsize vector size. + * + * @see org.opencv.core.Core.getOptimalDFTSize + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#dct + * @see org.opencv.core.Core#idct + * @see org.opencv.core.Core#mulSpectrums + * @see org.opencv.core.Core#idft + */ + public static int getOptimalDFTSize(int vecsize) + { + + int retVal = getOptimalDFTSize_0(vecsize); + + return retVal; + } + + + // + // C++: int64 getTickCount() + // + +/** + *

Returns the number of ticks.

+ * + *

The function returns the number of ticks after the certain event (for + * example, when the machine was turned on). + * It can be used to initialize "RNG" or to measure a function execution time by + * reading the tick count before and after the function call. See also the tick + * frequency.

+ * + * @see org.opencv.core.Core.getTickCount + */ + public static long getTickCount() + { + + long retVal = getTickCount_0(); + + return retVal; + } + + + // + // C++: double getTickFrequency() + // + +/** + *

Returns the number of ticks per second.

+ * + *

The function returns the number of ticks per second.That is, the following + * code computes the execution time in seconds:

+ * + *

// C++ code:

+ * + *

double t = (double)getTickCount();

+ * + *

// do something...

+ * + *

t = ((double)getTickCount() - t)/getTickFrequency();

+ * + * @see org.opencv.core.Core.getTickFrequency + */ + public static double getTickFrequency() + { + + double retVal = getTickFrequency_0(); + + return retVal; + } + + + // + // C++: void hconcat(vector_Mat src, Mat& dst) + // + + public static void hconcat(List src, Mat dst) + { + Mat src_mat = Converters.vector_Mat_to_Mat(src); + hconcat_0(src_mat.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void idct(Mat src, Mat& dst, int flags = 0) + // + +/** + *

Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.

+ * + *

idct(src, dst, flags) is equivalent to dct(src, dst, flags + * | DCT_INVERSE).

+ * + * @param src input floating-point single-channel array. + * @param dst output array of the same size and type as src. + * @param flags operation flags. + * + * @see org.opencv.core.Core.idct + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#dct + * @see org.opencv.core.Core#getOptimalDFTSize + * @see org.opencv.core.Core#idft + */ + public static void idct(Mat src, Mat dst, int flags) + { + + idct_0(src.nativeObj, dst.nativeObj, flags); + + return; + } + +/** + *

Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.

+ * + *

idct(src, dst, flags) is equivalent to dct(src, dst, flags + * | DCT_INVERSE).

+ * + * @param src input floating-point single-channel array. + * @param dst output array of the same size and type as src. + * + * @see org.opencv.core.Core.idct + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#dct + * @see org.opencv.core.Core#getOptimalDFTSize + * @see org.opencv.core.Core#idft + */ + public static void idct(Mat src, Mat dst) + { + + idct_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void idft(Mat src, Mat& dst, int flags = 0, int nonzeroRows = 0) + // + +/** + *

Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.

+ * + *

idft(src, dst, flags) is equivalent to dft(src, dst, flags + * | DFT_INVERSE).

+ * + *

See "dft" for details.

+ * + *

Note: None of dft and idft scales the result by + * default. So, you should pass DFT_SCALE to one of + * dft or idft explicitly to make these transforms + * mutually inverse.

+ * + * @param src input floating-point real or complex array. + * @param dst output array whose size and type depend on the flags. + * @param flags operation flags (see "dft"). + * @param nonzeroRows number of dst rows to process; the rest of + * the rows have undefined content (see the convolution sample in "dft" + * description. + * + * @see org.opencv.core.Core.idft + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#dct + * @see org.opencv.core.Core#getOptimalDFTSize + * @see org.opencv.core.Core#idct + * @see org.opencv.core.Core#mulSpectrums + */ + public static void idft(Mat src, Mat dst, int flags, int nonzeroRows) + { + + idft_0(src.nativeObj, dst.nativeObj, flags, nonzeroRows); + + return; + } + +/** + *

Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.

+ * + *

idft(src, dst, flags) is equivalent to dft(src, dst, flags + * | DFT_INVERSE).

+ * + *

See "dft" for details.

+ * + *

Note: None of dft and idft scales the result by + * default. So, you should pass DFT_SCALE to one of + * dft or idft explicitly to make these transforms + * mutually inverse.

+ * + * @param src input floating-point real or complex array. + * @param dst output array whose size and type depend on the flags. + * + * @see org.opencv.core.Core.idft + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#dct + * @see org.opencv.core.Core#getOptimalDFTSize + * @see org.opencv.core.Core#idct + * @see org.opencv.core.Core#mulSpectrums + */ + public static void idft(Mat src, Mat dst) + { + + idft_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void inRange(Mat src, Scalar lowerb, Scalar upperb, Mat& dst) + // + +/** + *

Checks if array elements lie between the elements of two other arrays.

+ * + *

The function checks the range as follows:

+ *
    + *
  • For every element of a single-channel input array: + *
+ * + *

dst(I)= lowerb(I)_0 <= src(I)_0 <= upperb(I)_0

+ * + *
    + *
  • For two-channel arrays: + *
+ * + *

dst(I)= lowerb(I)_0 <= src(I)_0 <= upperb(I)_0 land lowerb(I)_1 <= + * src(I)_1 <= upperb(I)_1

+ * + *
    + *
  • and so forth. + *
+ * + *

That is, dst (I) is set to 255 (all 1 -bits) if + * src (I) is within the specified 1D, 2D, 3D,... box and 0 + * otherwise.

+ * + *

When the lower and/or upper boundary parameters are scalars, the indexes + * (I) at lowerb and upperb in the above + * formulas should be omitted.

+ * + * @param src first input array. + * @param lowerb inclusive lower boundary array or a scalar. + * @param upperb inclusive upper boundary array or a scalar. + * @param dst output array of the same size as src and + * CV_8U type. + * + * @see org.opencv.core.Core.inRange + */ + public static void inRange(Mat src, Scalar lowerb, Scalar upperb, Mat dst) + { + + inRange_0(src.nativeObj, lowerb.val[0], lowerb.val[1], lowerb.val[2], lowerb.val[3], upperb.val[0], upperb.val[1], upperb.val[2], upperb.val[3], dst.nativeObj); + + return; + } + + + // + // C++: void insertChannel(Mat src, Mat& dst, int coi) + // + + public static void insertChannel(Mat src, Mat dst, int coi) + { + + insertChannel_0(src.nativeObj, dst.nativeObj, coi); + + return; + } + + + // + // C++: double invert(Mat src, Mat& dst, int flags = DECOMP_LU) + // + +/** + *

Finds the inverse or pseudo-inverse of a matrix.

+ * + *

The function invert inverts the matrix src and + * stores the result in dst. + * When the matrix src is singular or non-square, the function + * calculates the pseudo-inverse matrix (the dst matrix) so that + * norm(src*dst - I) is minimal, where I is an identity matrix.

+ * + *

In case of the DECOMP_LU method, the function returns non-zero + * value if the inverse has been successfully calculated and 0 if + * src is singular.

+ * + *

In case of the DECOMP_SVD method, the function returns the + * inverse condition number of src (the ratio of the smallest + * singular value to the largest singular value) and 0 if src is + * singular. The SVD method calculates a pseudo-inverse matrix if + * src is singular.

+ * + *

Similarly to DECOMP_LU, the method DECOMP_CHOLESKY + * works only with non-singular square matrices that should also be symmetrical + * and positively defined. In this case, the function stores the inverted matrix + * in dst and returns non-zero. Otherwise, it returns 0.

+ * + * @param src input floating-point M x N matrix. + * @param dst output matrix of N x M size and the same type as + * src. + * @param flags inversion method : + *
    + *
  • DECOMP_LU Gaussian elimination with the optimal pivot element chosen. + *
  • DECOMP_SVD singular value decomposition (SVD) method. + *
  • DECOMP_CHOLESKY Cholesky decomposition; the matrix must be symmetrical + * and positively defined. + *
+ * + * @see org.opencv.core.Core.invert + * @see org.opencv.core.Core#solve + */ + public static double invert(Mat src, Mat dst, int flags) + { + + double retVal = invert_0(src.nativeObj, dst.nativeObj, flags); + + return retVal; + } + +/** + *

Finds the inverse or pseudo-inverse of a matrix.

+ * + *

The function invert inverts the matrix src and + * stores the result in dst. + * When the matrix src is singular or non-square, the function + * calculates the pseudo-inverse matrix (the dst matrix) so that + * norm(src*dst - I) is minimal, where I is an identity matrix.

+ * + *

In case of the DECOMP_LU method, the function returns non-zero + * value if the inverse has been successfully calculated and 0 if + * src is singular.

+ * + *

In case of the DECOMP_SVD method, the function returns the + * inverse condition number of src (the ratio of the smallest + * singular value to the largest singular value) and 0 if src is + * singular. The SVD method calculates a pseudo-inverse matrix if + * src is singular.

+ * + *

Similarly to DECOMP_LU, the method DECOMP_CHOLESKY + * works only with non-singular square matrices that should also be symmetrical + * and positively defined. In this case, the function stores the inverted matrix + * in dst and returns non-zero. Otherwise, it returns 0.

+ * + * @param src input floating-point M x N matrix. + * @param dst output matrix of N x M size and the same type as + * src. + * + * @see org.opencv.core.Core.invert + * @see org.opencv.core.Core#solve + */ + public static double invert(Mat src, Mat dst) + { + + double retVal = invert_1(src.nativeObj, dst.nativeObj); + + return retVal; + } + + + // + // C++: double kmeans(Mat data, int K, Mat& bestLabels, TermCriteria criteria, int attempts, int flags, Mat& centers = Mat()) + // + +/** + *

Finds centers of clusters and groups input samples around the clusters.

+ * + *

The function kmeans implements a k-means algorithm that finds + * the centers of cluster_count clusters and groups the input + * samples around the clusters. As an output, labels_i contains a + * 0-based cluster index for the sample stored in the i^(th) row of the + * samples matrix.

+ * + *

The function returns the compactness measure that is computed as

+ * + *

sum _i|samples _i - centers _(labels _i)| ^2

+ * + *

after every attempt. The best (minimum) value is chosen and the corresponding + * labels and the compactness value are returned by the function. + * Basically, you can use only the core of the function, set the number of + * attempts to 1, initialize labels each time using a custom algorithm, pass + * them with the (flags = KMEANS_USE_INITIAL_LABELS) + * flag, and then choose the best (most-compact) clustering.

+ * + * @param data a data + * @param K a K + * @param bestLabels a bestLabels + * @param criteria The algorithm termination criteria, that is, the maximum + * number of iterations and/or the desired accuracy. The accuracy is specified + * as criteria.epsilon. As soon as each of the cluster centers + * moves by less than criteria.epsilon on some iteration, the + * algorithm stops. + * @param attempts Flag to specify the number of times the algorithm is executed + * using different initial labellings. The algorithm returns the labels that + * yield the best compactness (see the last function parameter). + * @param flags Flag that can take the following values: + *
    + *
  • KMEANS_RANDOM_CENTERS Select random initial centers in each attempt. + *
  • KMEANS_PP_CENTERS Use kmeans++ center initialization by + * Arthur and Vassilvitskii [Arthur2007]. + *
  • KMEANS_USE_INITIAL_LABELS During the first (and possibly the only) + * attempt, use the user-supplied labels instead of computing them from the + * initial centers. For the second and further attempts, use the random or + * semi-random centers. Use one of KMEANS_*_CENTERS flag to specify + * the exact method. + *
+ * @param centers Output matrix of the cluster centers, one row per each cluster + * center. + * + * @see org.opencv.core.Core.kmeans + */ + public static double kmeans(Mat data, int K, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers) + { + + double retVal = kmeans_0(data.nativeObj, K, bestLabels.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon, attempts, flags, centers.nativeObj); + + return retVal; + } + +/** + *

Finds centers of clusters and groups input samples around the clusters.

+ * + *

The function kmeans implements a k-means algorithm that finds + * the centers of cluster_count clusters and groups the input + * samples around the clusters. As an output, labels_i contains a + * 0-based cluster index for the sample stored in the i^(th) row of the + * samples matrix.

+ * + *

The function returns the compactness measure that is computed as

+ * + *

sum _i|samples _i - centers _(labels _i)| ^2

+ * + *

after every attempt. The best (minimum) value is chosen and the corresponding + * labels and the compactness value are returned by the function. + * Basically, you can use only the core of the function, set the number of + * attempts to 1, initialize labels each time using a custom algorithm, pass + * them with the (flags = KMEANS_USE_INITIAL_LABELS) + * flag, and then choose the best (most-compact) clustering.

+ * + * @param data a data + * @param K a K + * @param bestLabels a bestLabels + * @param criteria The algorithm termination criteria, that is, the maximum + * number of iterations and/or the desired accuracy. The accuracy is specified + * as criteria.epsilon. As soon as each of the cluster centers + * moves by less than criteria.epsilon on some iteration, the + * algorithm stops. + * @param attempts Flag to specify the number of times the algorithm is executed + * using different initial labellings. The algorithm returns the labels that + * yield the best compactness (see the last function parameter). + * @param flags Flag that can take the following values: + *
    + *
  • KMEANS_RANDOM_CENTERS Select random initial centers in each attempt. + *
  • KMEANS_PP_CENTERS Use kmeans++ center initialization by + * Arthur and Vassilvitskii [Arthur2007]. + *
  • KMEANS_USE_INITIAL_LABELS During the first (and possibly the only) + * attempt, use the user-supplied labels instead of computing them from the + * initial centers. For the second and further attempts, use the random or + * semi-random centers. Use one of KMEANS_*_CENTERS flag to specify + * the exact method. + *
+ * + * @see org.opencv.core.Core.kmeans + */ + public static double kmeans(Mat data, int K, Mat bestLabels, TermCriteria criteria, int attempts, int flags) + { + + double retVal = kmeans_1(data.nativeObj, K, bestLabels.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon, attempts, flags); + + return retVal; + } + + + // + // C++: void line(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + // + +/** + *

Draws a line segment connecting two points.

+ * + *

The function line draws the line segment between + * pt1 and pt2 points in the image. The line is + * clipped by the image boundaries. For non-antialiased lines with integer + * coordinates, the 8-connected or 4-connected Bresenham algorithm is used. + * Thick lines are drawn with rounding endings. + * Antialiased lines are drawn using Gaussian filtering. To specify the line + * color, you may use the macro CV_RGB(r, g, b).

+ * + * @param img Image. + * @param pt1 First point of the line segment. + * @param pt2 Second point of the line segment. + * @param color Line color. + * @param thickness Line thickness. + * @param lineType Type of the line: + *
    + *
  • 8 (or omitted) - 8-connected line. + *
  • 4 - 4-connected line. + *
  • CV_AA - antialiased line. + *
+ * @param shift Number of fractional bits in the point coordinates. + * + * @see org.opencv.core.Core.line + */ + public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift) + { + + line_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); + + return; + } + +/** + *

Draws a line segment connecting two points.

+ * + *

The function line draws the line segment between + * pt1 and pt2 points in the image. The line is + * clipped by the image boundaries. For non-antialiased lines with integer + * coordinates, the 8-connected or 4-connected Bresenham algorithm is used. + * Thick lines are drawn with rounding endings. + * Antialiased lines are drawn using Gaussian filtering. To specify the line + * color, you may use the macro CV_RGB(r, g, b).

+ * + * @param img Image. + * @param pt1 First point of the line segment. + * @param pt2 Second point of the line segment. + * @param color Line color. + * @param thickness Line thickness. + * + * @see org.opencv.core.Core.line + */ + public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness) + { + + line_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws a line segment connecting two points.

+ * + *

The function line draws the line segment between + * pt1 and pt2 points in the image. The line is + * clipped by the image boundaries. For non-antialiased lines with integer + * coordinates, the 8-connected or 4-connected Bresenham algorithm is used. + * Thick lines are drawn with rounding endings. + * Antialiased lines are drawn using Gaussian filtering. To specify the line + * color, you may use the macro CV_RGB(r, g, b).

+ * + * @param img Image. + * @param pt1 First point of the line segment. + * @param pt2 Second point of the line segment. + * @param color Line color. + * + * @see org.opencv.core.Core.line + */ + public static void line(Mat img, Point pt1, Point pt2, Scalar color) + { + + line_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void log(Mat src, Mat& dst) + // + +/** + *

Calculates the natural logarithm of every array element.

+ * + *

The function log calculates the natural logarithm of the + * absolute value of every element of the input array:

+ * + *

dst(I) = log|src(I)| if src(I) != 0 ; C otherwise

+ * + *

where C is a large negative number (about -700 in the current + * implementation). + * The maximum relative error is about 7e-6 for single-precision + * input and less than 1e-10 for double-precision input. Special + * values (NaN, Inf) are not handled.

+ * + * @param src input array. + * @param dst output array of the same size and type as src. + * + * @see org.opencv.core.Core.log + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#pow + * @see org.opencv.core.Core#sqrt + * @see org.opencv.core.Core#magnitude + * @see org.opencv.core.Core#polarToCart + * @see org.opencv.core.Core#exp + * @see org.opencv.core.Core#phase + */ + public static void log(Mat src, Mat dst) + { + + log_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void magnitude(Mat x, Mat y, Mat& magnitude) + // + +/** + *

Calculates the magnitude of 2D vectors.

+ * + *

The function magnitude calculates the magnitude of 2D vectors + * formed from the corresponding elements of x and y + * arrays:

+ * + *

dst(I) = sqrt(x(I)^2 + y(I)^2)

+ * + * @param x floating-point array of x-coordinates of the vectors. + * @param y floating-point array of y-coordinates of the vectors; it must have + * the same size as x. + * @param magnitude output array of the same size and type as x. + * + * @see org.opencv.core.Core.magnitude + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#phase + * @see org.opencv.core.Core#sqrt + * @see org.opencv.core.Core#polarToCart + */ + public static void magnitude(Mat x, Mat y, Mat magnitude) + { + + magnitude_0(x.nativeObj, y.nativeObj, magnitude.nativeObj); + + return; + } + + + // + // C++: void max(Mat src1, Mat src2, Mat& dst) + // + +/** + *

Calculates per-element maximum of two arrays or an array and a scalar.

+ * + *

The functions max calculate the per-element maximum of two + * arrays:

+ * + *

dst(I)= max(src1(I), src2(I))

+ * + *

or array and a scalar:

+ * + *

dst(I)= max(src1(I), value)

+ * + *

In the second variant, when the input array is multi-channel, each channel is + * compared with value independently.

+ * + *

The first 3 variants of the function listed above are actually a part of + * "MatrixExpressions". They return an expression object that can be further + * either transformed/ assigned to a matrix, or passed to a function, and so on.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.max + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#inRange + * @see org.opencv.core.Core#minMaxLoc + * @see org.opencv.core.Core#min + */ + public static void max(Mat src1, Mat src2, Mat dst) + { + + max_0(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void max(Mat src1, Scalar src2, Mat& dst) + // + +/** + *

Calculates per-element maximum of two arrays or an array and a scalar.

+ * + *

The functions max calculate the per-element maximum of two + * arrays:

+ * + *

dst(I)= max(src1(I), src2(I))

+ * + *

or array and a scalar:

+ * + *

dst(I)= max(src1(I), value)

+ * + *

In the second variant, when the input array is multi-channel, each channel is + * compared with value independently.

+ * + *

The first 3 variants of the function listed above are actually a part of + * "MatrixExpressions". They return an expression object that can be further + * either transformed/ assigned to a matrix, or passed to a function, and so on.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.max + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#inRange + * @see org.opencv.core.Core#minMaxLoc + * @see org.opencv.core.Core#min + */ + public static void max(Mat src1, Scalar src2, Mat dst) + { + + max_1(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: Scalar mean(Mat src, Mat mask = Mat()) + // + +/** + *

Calculates an average (mean) of array elements.

+ * + *

The function mean calculates the mean value M of + * array elements, independently for each channel, and return it:

+ * + *

N = sum(by: I: mask(I) != 0) 1 + * M_c = (sum(by: I: mask(I) != 0)(mtx(I)_c))/N

+ * + *

When all the mask elements are 0's, the functions return Scalar.all(0).

+ * + * @param src input array that should have from 1 to 4 channels so that the + * result can be stored in "Scalar_". + * @param mask optional operation mask. + * + * @see org.opencv.core.Core.mean + * @see org.opencv.core.Core#countNonZero + * @see org.opencv.core.Core#meanStdDev + * @see org.opencv.core.Core#norm + * @see org.opencv.core.Core#minMaxLoc + */ + public static Scalar mean(Mat src, Mat mask) + { + + Scalar retVal = new Scalar(mean_0(src.nativeObj, mask.nativeObj)); + + return retVal; + } + +/** + *

Calculates an average (mean) of array elements.

+ * + *

The function mean calculates the mean value M of + * array elements, independently for each channel, and return it:

+ * + *

N = sum(by: I: mask(I) != 0) 1 + * M_c = (sum(by: I: mask(I) != 0)(mtx(I)_c))/N

+ * + *

When all the mask elements are 0's, the functions return Scalar.all(0).

+ * + * @param src input array that should have from 1 to 4 channels so that the + * result can be stored in "Scalar_". + * + * @see org.opencv.core.Core.mean + * @see org.opencv.core.Core#countNonZero + * @see org.opencv.core.Core#meanStdDev + * @see org.opencv.core.Core#norm + * @see org.opencv.core.Core#minMaxLoc + */ + public static Scalar mean(Mat src) + { + + Scalar retVal = new Scalar(mean_1(src.nativeObj)); + + return retVal; + } + + + // + // C++: void meanStdDev(Mat src, vector_double& mean, vector_double& stddev, Mat mask = Mat()) + // + +/** + *

Calculates a mean and standard deviation of array elements.

+ * + *

The function meanStdDev calculates the mean and the standard + * deviation M of array elements independently for each channel and + * returns it via the output parameters:

+ * + *

N = sum(by: I, mask(I) != 0) 1 + * mean _c = (sum_(I: mask(I) != 0) src(I)_c)/(N) + * stddev _c = sqrt((sum_(I: mask(I) != 0)(src(I)_c - mean _c)^2)/(N))

+ * + *

When all the mask elements are 0's, the functions return mean=stddev=Scalar.all(0).

+ * + *

Note: The calculated standard deviation is only the diagonal of the complete + * normalized covariance matrix. If the full matrix is needed, you can reshape + * the multi-channel array M x N to the single-channel array + * M*N x mtx.channels() (only possible when the matrix is + * continuous) and then pass the matrix to "calcCovarMatrix".

+ * + * @param src input array that should have from 1 to 4 channels so that the + * results can be stored in "Scalar_" 's. + * @param mean output parameter: calculated mean value. + * @param stddev output parameter: calculateded standard deviation. + * @param mask optional operation mask. + * + * @see org.opencv.core.Core.meanStdDev + * @see org.opencv.core.Core#countNonZero + * @see org.opencv.core.Core#calcCovarMatrix + * @see org.opencv.core.Core#minMaxLoc + * @see org.opencv.core.Core#norm + * @see org.opencv.core.Core#mean + */ + public static void meanStdDev(Mat src, MatOfDouble mean, MatOfDouble stddev, Mat mask) + { + Mat mean_mat = mean; + Mat stddev_mat = stddev; + meanStdDev_0(src.nativeObj, mean_mat.nativeObj, stddev_mat.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates a mean and standard deviation of array elements.

+ * + *

The function meanStdDev calculates the mean and the standard + * deviation M of array elements independently for each channel and + * returns it via the output parameters:

+ * + *

N = sum(by: I, mask(I) != 0) 1 + * mean _c = (sum_(I: mask(I) != 0) src(I)_c)/(N) + * stddev _c = sqrt((sum_(I: mask(I) != 0)(src(I)_c - mean _c)^2)/(N))

+ * + *

When all the mask elements are 0's, the functions return mean=stddev=Scalar.all(0).

+ * + *

Note: The calculated standard deviation is only the diagonal of the complete + * normalized covariance matrix. If the full matrix is needed, you can reshape + * the multi-channel array M x N to the single-channel array + * M*N x mtx.channels() (only possible when the matrix is + * continuous) and then pass the matrix to "calcCovarMatrix".

+ * + * @param src input array that should have from 1 to 4 channels so that the + * results can be stored in "Scalar_" 's. + * @param mean output parameter: calculated mean value. + * @param stddev output parameter: calculateded standard deviation. + * + * @see org.opencv.core.Core.meanStdDev + * @see org.opencv.core.Core#countNonZero + * @see org.opencv.core.Core#calcCovarMatrix + * @see org.opencv.core.Core#minMaxLoc + * @see org.opencv.core.Core#norm + * @see org.opencv.core.Core#mean + */ + public static void meanStdDev(Mat src, MatOfDouble mean, MatOfDouble stddev) + { + Mat mean_mat = mean; + Mat stddev_mat = stddev; + meanStdDev_1(src.nativeObj, mean_mat.nativeObj, stddev_mat.nativeObj); + + return; + } + + + // + // C++: void merge(vector_Mat mv, Mat& dst) + // + +/** + *

Creates one multichannel array out of several single-channel ones.

+ * + *

The functions merge merge several arrays to make a single + * multi-channel array. That is, each element of the output array will be a + * concatenation of the elements of the input arrays, where elements of i-th + * input array are treated as mv[i].channels()-element vectors.

+ * + *

The function "split" does the reverse operation. If you need to shuffle + * channels in some other advanced way, use "mixChannels".

+ * + * @param mv input array or vector of matrices to be merged; all the matrices in + * mv must have the same size and the same depth. + * @param dst output array of the same size and the same depth as + * mv[0]; The number of channels will be the total number of + * channels in the matrix array. + * + * @see org.opencv.core.Core.merge + * @see org.opencv.core.Mat#reshape + * @see org.opencv.core.Core#mixChannels + * @see org.opencv.core.Core#split + */ + public static void merge(List mv, Mat dst) + { + Mat mv_mat = Converters.vector_Mat_to_Mat(mv); + merge_0(mv_mat.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void min(Mat src1, Mat src2, Mat& dst) + // + +/** + *

Calculates per-element minimum of two arrays or an array and a scalar.

+ * + *

The functions min calculate the per-element minimum of two + * arrays:

+ * + *

dst(I)= min(src1(I), src2(I))

+ * + *

or array and a scalar:

+ * + *

dst(I)= min(src1(I), value)

+ * + *

In the second variant, when the input array is multi-channel, each channel is + * compared with value independently.

+ * + *

The first three variants of the function listed above are actually a part of + * "MatrixExpressions". They return the expression object that can be further + * either transformed/assigned to a matrix, or passed to a function, and so on.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.min + * @see org.opencv.core.Core#max + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#inRange + * @see org.opencv.core.Core#minMaxLoc + */ + public static void min(Mat src1, Mat src2, Mat dst) + { + + min_0(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void min(Mat src1, Scalar src2, Mat& dst) + // + +/** + *

Calculates per-element minimum of two arrays or an array and a scalar.

+ * + *

The functions min calculate the per-element minimum of two + * arrays:

+ * + *

dst(I)= min(src1(I), src2(I))

+ * + *

or array and a scalar:

+ * + *

dst(I)= min(src1(I), value)

+ * + *

In the second variant, when the input array is multi-channel, each channel is + * compared with value independently.

+ * + *

The first three variants of the function listed above are actually a part of + * "MatrixExpressions". They return the expression object that can be further + * either transformed/assigned to a matrix, or passed to a function, and so on.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.min + * @see org.opencv.core.Core#max + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#inRange + * @see org.opencv.core.Core#minMaxLoc + */ + public static void min(Mat src1, Scalar src2, Mat dst) + { + + min_1(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: void mixChannels(vector_Mat src, vector_Mat dst, vector_int fromTo) + // + +/** + *

Copies specified channels from input arrays to the specified channels of + * output arrays.

+ * + *

The functions mixChannels provide an advanced mechanism for + * shuffling image channels.

+ * + *

"split" and "merge" and some forms of "cvtColor" are partial cases of + * mixChannels. + * In the example below, the code splits a 4-channel RGBA image into a 3-channel + * BGR (with R and B channels swapped) and a separate alpha-channel image: + *

+ * + *

// C++ code:

+ * + *

Mat rgba(100, 100, CV_8UC4, Scalar(1,2,3,4));

+ * + *

Mat bgr(rgba.rows, rgba.cols, CV_8UC3);

+ * + *

Mat alpha(rgba.rows, rgba.cols, CV_8UC1);

+ * + *

// forming an array of matrices is a quite efficient operation,

+ * + *

// because the matrix data is not copied, only the headers

+ * + *

Mat out[] = { bgr, alpha };

+ * + *

// rgba[0] -> bgr[2], rgba[1] -> bgr[1],

+ * + *

// rgba[2] -> bgr[0], rgba[3] -> alpha[0]

+ * + *

int from_to[] = { 0,2, 1,1, 2,0, 3,3 };

+ * + *

mixChannels(&rgba, 1, out, 2, from_to, 4);

+ * + *

Note: Unlike many other new-style C++ functions in OpenCV (see the + * introduction section and "Mat.create"), mixChannels requires + * the output arrays to be pre-allocated before calling the function. + *

+ * + * @param src input array or vector of matricesl; all of the matrices must have + * the same size and the same depth. + * @param dst output array or vector of matrices; all the matrices *must be + * allocated*; their size and depth must be the same as in src[0]. + * @param fromTo array of index pairs specifying which channels are copied and + * where; fromTo[k*2] is a 0-based index of the input channel in + * src, fromTo[k*2+1] is an index of the output + * channel in dst; the continuous channel numbering is used: the + * first input image channels are indexed from 0 to + * src[0].channels()-1, the second input image channels are indexed + * from src[0].channels() to src[0].channels() + + * src[1].channels()-1, and so on, the same scheme is used for the output + * image channels; as a special case, when fromTo[k*2] is negative, + * the corresponding output channel is filled with zero. + * + * @see org.opencv.core.Core.mixChannels + * @see org.opencv.core.Core#merge + * @see org.opencv.core.Core#split + * @see org.opencv.imgproc.Imgproc#cvtColor + */ + public static void mixChannels(List src, List dst, MatOfInt fromTo) + { + Mat src_mat = Converters.vector_Mat_to_Mat(src); + Mat dst_mat = Converters.vector_Mat_to_Mat(dst); + Mat fromTo_mat = fromTo; + mixChannels_0(src_mat.nativeObj, dst_mat.nativeObj, fromTo_mat.nativeObj); + + return; + } + + + // + // C++: void mulSpectrums(Mat a, Mat b, Mat& c, int flags, bool conjB = false) + // + +/** + *

Performs the per-element multiplication of two Fourier spectrums.

+ * + *

The function mulSpectrums performs the per-element + * multiplication of the two CCS-packed or complex matrices that are results of + * a real or complex Fourier transform.

+ * + *

The function, together with "dft" and "idft", may be used to calculate + * convolution (pass conjB=false) or correlation (pass + * conjB=true) of two arrays rapidly. When the arrays are complex, + * they are simply multiplied (per element) with an optional conjugation of the + * second-array elements. When the arrays are real, they are assumed to be + * CCS-packed (see "dft" for details).

+ * + * @param a a a + * @param b a b + * @param c a c + * @param flags operation flags; currently, the only supported flag is + * DFT_ROWS, which indicates that each row of src1 and + * src2 is an independent 1D Fourier spectrum. + * @param conjB optional flag that conjugates the second input array before the + * multiplication (true) or not (false). + * + * @see org.opencv.core.Core.mulSpectrums + */ + public static void mulSpectrums(Mat a, Mat b, Mat c, int flags, boolean conjB) + { + + mulSpectrums_0(a.nativeObj, b.nativeObj, c.nativeObj, flags, conjB); + + return; + } + +/** + *

Performs the per-element multiplication of two Fourier spectrums.

+ * + *

The function mulSpectrums performs the per-element + * multiplication of the two CCS-packed or complex matrices that are results of + * a real or complex Fourier transform.

+ * + *

The function, together with "dft" and "idft", may be used to calculate + * convolution (pass conjB=false) or correlation (pass + * conjB=true) of two arrays rapidly. When the arrays are complex, + * they are simply multiplied (per element) with an optional conjugation of the + * second-array elements. When the arrays are real, they are assumed to be + * CCS-packed (see "dft" for details).

+ * + * @param a a a + * @param b a b + * @param c a c + * @param flags operation flags; currently, the only supported flag is + * DFT_ROWS, which indicates that each row of src1 and + * src2 is an independent 1D Fourier spectrum. + * + * @see org.opencv.core.Core.mulSpectrums + */ + public static void mulSpectrums(Mat a, Mat b, Mat c, int flags) + { + + mulSpectrums_1(a.nativeObj, b.nativeObj, c.nativeObj, flags); + + return; + } + + + // + // C++: void mulTransposed(Mat src, Mat& dst, bool aTa, Mat delta = Mat(), double scale = 1, int dtype = -1) + // + +/** + *

Calculates the product of a matrix and its transposition.

+ * + *

The function mulTransposed calculates the product of + * src and its transposition:

+ * + *

dst = scale(src - delta)^T(src - delta)

+ * + *

if aTa=true, and

+ * + *

dst = scale(src - delta)(src - delta)^T

+ * + *

otherwise. The function is used to calculate the covariance matrix. With zero + * delta, it can be used as a faster substitute for general matrix product + * A*B when B=A'

+ * + * @param src input single-channel matrix. Note that unlike "gemm", the function + * can multiply not only floating-point matrices. + * @param dst output square matrix. + * @param aTa Flag specifying the multiplication ordering. See the description + * below. + * @param delta Optional delta matrix subtracted from src before + * the multiplication. When the matrix is empty (delta=noArray()), + * it is assumed to be zero, that is, nothing is subtracted. If it has the same + * size as src, it is simply subtracted. Otherwise, it is + * "repeated" (see "repeat") to cover the full src and then + * subtracted. Type of the delta matrix, when it is not empty, must be the same + * as the type of created output matrix. See the dtype parameter + * description below. + * @param scale Optional scale factor for the matrix product. + * @param dtype Optional type of the output matrix. When it is negative, the + * output matrix will have the same type as src. Otherwise, it will + * be type=CV_MAT_DEPTH(dtype) that should be either + * CV_32F or CV_64F. + * + * @see org.opencv.core.Core.mulTransposed + * @see org.opencv.core.Core#calcCovarMatrix + * @see org.opencv.core.Core#repeat + * @see org.opencv.core.Core#reduce + * @see org.opencv.core.Core#gemm + */ + public static void mulTransposed(Mat src, Mat dst, boolean aTa, Mat delta, double scale, int dtype) + { + + mulTransposed_0(src.nativeObj, dst.nativeObj, aTa, delta.nativeObj, scale, dtype); + + return; + } + +/** + *

Calculates the product of a matrix and its transposition.

+ * + *

The function mulTransposed calculates the product of + * src and its transposition:

+ * + *

dst = scale(src - delta)^T(src - delta)

+ * + *

if aTa=true, and

+ * + *

dst = scale(src - delta)(src - delta)^T

+ * + *

otherwise. The function is used to calculate the covariance matrix. With zero + * delta, it can be used as a faster substitute for general matrix product + * A*B when B=A'

+ * + * @param src input single-channel matrix. Note that unlike "gemm", the function + * can multiply not only floating-point matrices. + * @param dst output square matrix. + * @param aTa Flag specifying the multiplication ordering. See the description + * below. + * @param delta Optional delta matrix subtracted from src before + * the multiplication. When the matrix is empty (delta=noArray()), + * it is assumed to be zero, that is, nothing is subtracted. If it has the same + * size as src, it is simply subtracted. Otherwise, it is + * "repeated" (see "repeat") to cover the full src and then + * subtracted. Type of the delta matrix, when it is not empty, must be the same + * as the type of created output matrix. See the dtype parameter + * description below. + * @param scale Optional scale factor for the matrix product. + * + * @see org.opencv.core.Core.mulTransposed + * @see org.opencv.core.Core#calcCovarMatrix + * @see org.opencv.core.Core#repeat + * @see org.opencv.core.Core#reduce + * @see org.opencv.core.Core#gemm + */ + public static void mulTransposed(Mat src, Mat dst, boolean aTa, Mat delta, double scale) + { + + mulTransposed_1(src.nativeObj, dst.nativeObj, aTa, delta.nativeObj, scale); + + return; + } + +/** + *

Calculates the product of a matrix and its transposition.

+ * + *

The function mulTransposed calculates the product of + * src and its transposition:

+ * + *

dst = scale(src - delta)^T(src - delta)

+ * + *

if aTa=true, and

+ * + *

dst = scale(src - delta)(src - delta)^T

+ * + *

otherwise. The function is used to calculate the covariance matrix. With zero + * delta, it can be used as a faster substitute for general matrix product + * A*B when B=A'

+ * + * @param src input single-channel matrix. Note that unlike "gemm", the function + * can multiply not only floating-point matrices. + * @param dst output square matrix. + * @param aTa Flag specifying the multiplication ordering. See the description + * below. + * + * @see org.opencv.core.Core.mulTransposed + * @see org.opencv.core.Core#calcCovarMatrix + * @see org.opencv.core.Core#repeat + * @see org.opencv.core.Core#reduce + * @see org.opencv.core.Core#gemm + */ + public static void mulTransposed(Mat src, Mat dst, boolean aTa) + { + + mulTransposed_2(src.nativeObj, dst.nativeObj, aTa); + + return; + } + + + // + // C++: void multiply(Mat src1, Mat src2, Mat& dst, double scale = 1, int dtype = -1) + // + +/** + *

Calculates the per-element scaled product of two arrays.

+ * + *

The function multiply calculates the per-element product of two + * arrays:

+ * + *

dst(I)= saturate(scale * src1(I) * src2(I))

+ * + *

There is also a "MatrixExpressions" -friendly variant of the first function. + * See "Mat.mul".

+ * + *

For a not-per-element matrix product, see "gemm".

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param dst output array of the same size and type as src1. + * @param scale optional scale factor. + * @param dtype a dtype + * + * @see org.opencv.core.Core.multiply + * @see org.opencv.core.Core#divide + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.imgproc.Imgproc#accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.imgproc.Imgproc#accumulateProduct + */ + public static void multiply(Mat src1, Mat src2, Mat dst, double scale, int dtype) + { + + multiply_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, scale, dtype); + + return; + } + +/** + *

Calculates the per-element scaled product of two arrays.

+ * + *

The function multiply calculates the per-element product of two + * arrays:

+ * + *

dst(I)= saturate(scale * src1(I) * src2(I))

+ * + *

There is also a "MatrixExpressions" -friendly variant of the first function. + * See "Mat.mul".

+ * + *

For a not-per-element matrix product, see "gemm".

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param dst output array of the same size and type as src1. + * @param scale optional scale factor. + * + * @see org.opencv.core.Core.multiply + * @see org.opencv.core.Core#divide + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.imgproc.Imgproc#accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.imgproc.Imgproc#accumulateProduct + */ + public static void multiply(Mat src1, Mat src2, Mat dst, double scale) + { + + multiply_1(src1.nativeObj, src2.nativeObj, dst.nativeObj, scale); + + return; + } + +/** + *

Calculates the per-element scaled product of two arrays.

+ * + *

The function multiply calculates the per-element product of two + * arrays:

+ * + *

dst(I)= saturate(scale * src1(I) * src2(I))

+ * + *

There is also a "MatrixExpressions" -friendly variant of the first function. + * See "Mat.mul".

+ * + *

For a not-per-element matrix product, see "gemm".

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.multiply + * @see org.opencv.core.Core#divide + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.imgproc.Imgproc#accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.imgproc.Imgproc#accumulateProduct + */ + public static void multiply(Mat src1, Mat src2, Mat dst) + { + + multiply_2(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void multiply(Mat src1, Scalar src2, Mat& dst, double scale = 1, int dtype = -1) + // + +/** + *

Calculates the per-element scaled product of two arrays.

+ * + *

The function multiply calculates the per-element product of two + * arrays:

+ * + *

dst(I)= saturate(scale * src1(I) * src2(I))

+ * + *

There is also a "MatrixExpressions" -friendly variant of the first function. + * See "Mat.mul".

+ * + *

For a not-per-element matrix product, see "gemm".

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param dst output array of the same size and type as src1. + * @param scale optional scale factor. + * @param dtype a dtype + * + * @see org.opencv.core.Core.multiply + * @see org.opencv.core.Core#divide + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.imgproc.Imgproc#accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.imgproc.Imgproc#accumulateProduct + */ + public static void multiply(Mat src1, Scalar src2, Mat dst, double scale, int dtype) + { + + multiply_3(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, scale, dtype); + + return; + } + +/** + *

Calculates the per-element scaled product of two arrays.

+ * + *

The function multiply calculates the per-element product of two + * arrays:

+ * + *

dst(I)= saturate(scale * src1(I) * src2(I))

+ * + *

There is also a "MatrixExpressions" -friendly variant of the first function. + * See "Mat.mul".

+ * + *

For a not-per-element matrix product, see "gemm".

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param dst output array of the same size and type as src1. + * @param scale optional scale factor. + * + * @see org.opencv.core.Core.multiply + * @see org.opencv.core.Core#divide + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.imgproc.Imgproc#accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.imgproc.Imgproc#accumulateProduct + */ + public static void multiply(Mat src1, Scalar src2, Mat dst, double scale) + { + + multiply_4(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, scale); + + return; + } + +/** + *

Calculates the per-element scaled product of two arrays.

+ * + *

The function multiply calculates the per-element product of two + * arrays:

+ * + *

dst(I)= saturate(scale * src1(I) * src2(I))

+ * + *

There is also a "MatrixExpressions" -friendly variant of the first function. + * See "Mat.mul".

+ * + *

For a not-per-element matrix product, see "gemm".

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.multiply + * @see org.opencv.core.Core#divide + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.imgproc.Imgproc#accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.imgproc.Imgproc#accumulateProduct + */ + public static void multiply(Mat src1, Scalar src2, Mat dst) + { + + multiply_5(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: double norm(Mat src1, int normType = NORM_L2, Mat mask = Mat()) + // + +/** + *

Calculates an absolute array norm, an absolute difference norm, or a relative + * difference norm.

+ * + *

The functions norm calculate an absolute norm of + * src1 (when there is no src2):

+ * + *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = + * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = + * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = + * NORM_L2)

+ * + *

or an absolute or relative difference norm if src2 is there:

+ * + *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if + * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - + * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = + * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

+ * + *

or

+ * + *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if + * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if + * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if + * normType = NORM_RELATIVE_L2)

+ * + *

The functions norm return the calculated norm.

+ * + *

When the mask parameter is specified and it is not empty, the + * norm is calculated only over the region specified by the mask.

+ * + *

A multi-channel input arrays are treated as a single-channel, that is, the + * results for all channels are combined.

+ * + * @param src1 first input array. + * @param normType type of the norm (see the details below). + * @param mask optional operation mask; it must have the same size as + * src1 and CV_8UC1 type. + * + * @see org.opencv.core.Core.norm + */ + public static double norm(Mat src1, int normType, Mat mask) + { + + double retVal = norm_0(src1.nativeObj, normType, mask.nativeObj); + + return retVal; + } + +/** + *

Calculates an absolute array norm, an absolute difference norm, or a relative + * difference norm.

+ * + *

The functions norm calculate an absolute norm of + * src1 (when there is no src2):

+ * + *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = + * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = + * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = + * NORM_L2)

+ * + *

or an absolute or relative difference norm if src2 is there:

+ * + *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if + * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - + * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = + * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

+ * + *

or

+ * + *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if + * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if + * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if + * normType = NORM_RELATIVE_L2)

+ * + *

The functions norm return the calculated norm.

+ * + *

When the mask parameter is specified and it is not empty, the + * norm is calculated only over the region specified by the mask.

+ * + *

A multi-channel input arrays are treated as a single-channel, that is, the + * results for all channels are combined.

+ * + * @param src1 first input array. + * @param normType type of the norm (see the details below). + * + * @see org.opencv.core.Core.norm + */ + public static double norm(Mat src1, int normType) + { + + double retVal = norm_1(src1.nativeObj, normType); + + return retVal; + } + +/** + *

Calculates an absolute array norm, an absolute difference norm, or a relative + * difference norm.

+ * + *

The functions norm calculate an absolute norm of + * src1 (when there is no src2):

+ * + *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = + * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = + * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = + * NORM_L2)

+ * + *

or an absolute or relative difference norm if src2 is there:

+ * + *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if + * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - + * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = + * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

+ * + *

or

+ * + *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if + * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if + * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if + * normType = NORM_RELATIVE_L2)

+ * + *

The functions norm return the calculated norm.

+ * + *

When the mask parameter is specified and it is not empty, the + * norm is calculated only over the region specified by the mask.

+ * + *

A multi-channel input arrays are treated as a single-channel, that is, the + * results for all channels are combined.

+ * + * @param src1 first input array. + * + * @see org.opencv.core.Core.norm + */ + public static double norm(Mat src1) + { + + double retVal = norm_2(src1.nativeObj); + + return retVal; + } + + + // + // C++: double norm(Mat src1, Mat src2, int normType = NORM_L2, Mat mask = Mat()) + // + +/** + *

Calculates an absolute array norm, an absolute difference norm, or a relative + * difference norm.

+ * + *

The functions norm calculate an absolute norm of + * src1 (when there is no src2):

+ * + *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = + * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = + * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = + * NORM_L2)

+ * + *

or an absolute or relative difference norm if src2 is there:

+ * + *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if + * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - + * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = + * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

+ * + *

or

+ * + *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if + * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if + * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if + * normType = NORM_RELATIVE_L2)

+ * + *

The functions norm return the calculated norm.

+ * + *

When the mask parameter is specified and it is not empty, the + * norm is calculated only over the region specified by the mask.

+ * + *

A multi-channel input arrays are treated as a single-channel, that is, the + * results for all channels are combined.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param normType type of the norm (see the details below). + * @param mask optional operation mask; it must have the same size as + * src1 and CV_8UC1 type. + * + * @see org.opencv.core.Core.norm + */ + public static double norm(Mat src1, Mat src2, int normType, Mat mask) + { + + double retVal = norm_3(src1.nativeObj, src2.nativeObj, normType, mask.nativeObj); + + return retVal; + } + +/** + *

Calculates an absolute array norm, an absolute difference norm, or a relative + * difference norm.

+ * + *

The functions norm calculate an absolute norm of + * src1 (when there is no src2):

+ * + *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = + * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = + * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = + * NORM_L2)

+ * + *

or an absolute or relative difference norm if src2 is there:

+ * + *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if + * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - + * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = + * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

+ * + *

or

+ * + *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if + * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if + * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if + * normType = NORM_RELATIVE_L2)

+ * + *

The functions norm return the calculated norm.

+ * + *

When the mask parameter is specified and it is not empty, the + * norm is calculated only over the region specified by the mask.

+ * + *

A multi-channel input arrays are treated as a single-channel, that is, the + * results for all channels are combined.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param normType type of the norm (see the details below). + * + * @see org.opencv.core.Core.norm + */ + public static double norm(Mat src1, Mat src2, int normType) + { + + double retVal = norm_4(src1.nativeObj, src2.nativeObj, normType); + + return retVal; + } + +/** + *

Calculates an absolute array norm, an absolute difference norm, or a relative + * difference norm.

+ * + *

The functions norm calculate an absolute norm of + * src1 (when there is no src2):

+ * + *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = + * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = + * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = + * NORM_L2)

+ * + *

or an absolute or relative difference norm if src2 is there:

+ * + *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if + * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - + * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = + * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

+ * + *

or

+ * + *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if + * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if + * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if + * normType = NORM_RELATIVE_L2)

+ * + *

The functions norm return the calculated norm.

+ * + *

When the mask parameter is specified and it is not empty, the + * norm is calculated only over the region specified by the mask.

+ * + *

A multi-channel input arrays are treated as a single-channel, that is, the + * results for all channels are combined.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * + * @see org.opencv.core.Core.norm + */ + public static double norm(Mat src1, Mat src2) + { + + double retVal = norm_5(src1.nativeObj, src2.nativeObj); + + return retVal; + } + + + // + // C++: void normalize(Mat src, Mat& dst, double alpha = 1, double beta = 0, int norm_type = NORM_L2, int dtype = -1, Mat mask = Mat()) + // + +/** + *

Normalizes the norm or value range of an array.

+ * + *

The functions normalize scale and shift the input array elements + * so that

+ * + *

| dst|_(L_p)= alpha

+ * + *

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, + * or NORM_L2, respectively; or so that

+ * + *

min _I dst(I)= alpha, max _I dst(I)= beta

+ * + *

when normType=NORM_MINMAX (for dense arrays only). + * The optional mask specifies a sub-array to be normalized. This means that the + * norm or min-n-max are calculated over the sub-array, and then this sub-array + * is modified to be normalized. If you want to only use the mask to calculate + * the norm or min-max but modify the whole array, you can use "norm" and + * "Mat.convertTo".

+ * + *

In case of sparse matrices, only the non-zero values are analyzed and + * transformed. Because of this, the range transformation for sparse matrices is + * not allowed since it can shift the zero level.

+ * + * @param src input array. + * @param dst output array of the same size as src. + * @param alpha norm value to normalize to or the lower range boundary in case + * of the range normalization. + * @param beta upper range boundary in case of the range normalization; it is + * not used for the norm normalization. + * @param norm_type a norm_type + * @param dtype when negative, the output array has the same type as + * src; otherwise, it has the same number of channels as + * src and the depth =CV_MAT_DEPTH(dtype). + * @param mask optional operation mask. + * + * @see org.opencv.core.Core.normalize + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#norm + */ + public static void normalize(Mat src, Mat dst, double alpha, double beta, int norm_type, int dtype, Mat mask) + { + + normalize_0(src.nativeObj, dst.nativeObj, alpha, beta, norm_type, dtype, mask.nativeObj); + + return; + } + +/** + *

Normalizes the norm or value range of an array.

+ * + *

The functions normalize scale and shift the input array elements + * so that

+ * + *

| dst|_(L_p)= alpha

+ * + *

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, + * or NORM_L2, respectively; or so that

+ * + *

min _I dst(I)= alpha, max _I dst(I)= beta

+ * + *

when normType=NORM_MINMAX (for dense arrays only). + * The optional mask specifies a sub-array to be normalized. This means that the + * norm or min-n-max are calculated over the sub-array, and then this sub-array + * is modified to be normalized. If you want to only use the mask to calculate + * the norm or min-max but modify the whole array, you can use "norm" and + * "Mat.convertTo".

+ * + *

In case of sparse matrices, only the non-zero values are analyzed and + * transformed. Because of this, the range transformation for sparse matrices is + * not allowed since it can shift the zero level.

+ * + * @param src input array. + * @param dst output array of the same size as src. + * @param alpha norm value to normalize to or the lower range boundary in case + * of the range normalization. + * @param beta upper range boundary in case of the range normalization; it is + * not used for the norm normalization. + * @param norm_type a norm_type + * @param dtype when negative, the output array has the same type as + * src; otherwise, it has the same number of channels as + * src and the depth =CV_MAT_DEPTH(dtype). + * + * @see org.opencv.core.Core.normalize + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#norm + */ + public static void normalize(Mat src, Mat dst, double alpha, double beta, int norm_type, int dtype) + { + + normalize_1(src.nativeObj, dst.nativeObj, alpha, beta, norm_type, dtype); + + return; + } + +/** + *

Normalizes the norm or value range of an array.

+ * + *

The functions normalize scale and shift the input array elements + * so that

+ * + *

| dst|_(L_p)= alpha

+ * + *

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, + * or NORM_L2, respectively; or so that

+ * + *

min _I dst(I)= alpha, max _I dst(I)= beta

+ * + *

when normType=NORM_MINMAX (for dense arrays only). + * The optional mask specifies a sub-array to be normalized. This means that the + * norm or min-n-max are calculated over the sub-array, and then this sub-array + * is modified to be normalized. If you want to only use the mask to calculate + * the norm or min-max but modify the whole array, you can use "norm" and + * "Mat.convertTo".

+ * + *

In case of sparse matrices, only the non-zero values are analyzed and + * transformed. Because of this, the range transformation for sparse matrices is + * not allowed since it can shift the zero level.

+ * + * @param src input array. + * @param dst output array of the same size as src. + * @param alpha norm value to normalize to or the lower range boundary in case + * of the range normalization. + * @param beta upper range boundary in case of the range normalization; it is + * not used for the norm normalization. + * @param norm_type a norm_type + * + * @see org.opencv.core.Core.normalize + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#norm + */ + public static void normalize(Mat src, Mat dst, double alpha, double beta, int norm_type) + { + + normalize_2(src.nativeObj, dst.nativeObj, alpha, beta, norm_type); + + return; + } + +/** + *

Normalizes the norm or value range of an array.

+ * + *

The functions normalize scale and shift the input array elements + * so that

+ * + *

| dst|_(L_p)= alpha

+ * + *

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, + * or NORM_L2, respectively; or so that

+ * + *

min _I dst(I)= alpha, max _I dst(I)= beta

+ * + *

when normType=NORM_MINMAX (for dense arrays only). + * The optional mask specifies a sub-array to be normalized. This means that the + * norm or min-n-max are calculated over the sub-array, and then this sub-array + * is modified to be normalized. If you want to only use the mask to calculate + * the norm or min-max but modify the whole array, you can use "norm" and + * "Mat.convertTo".

+ * + *

In case of sparse matrices, only the non-zero values are analyzed and + * transformed. Because of this, the range transformation for sparse matrices is + * not allowed since it can shift the zero level.

+ * + * @param src input array. + * @param dst output array of the same size as src. + * + * @see org.opencv.core.Core.normalize + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#norm + */ + public static void normalize(Mat src, Mat dst) + { + + normalize_3(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void patchNaNs(Mat& a, double val = 0) + // + + public static void patchNaNs(Mat a, double val) + { + + patchNaNs_0(a.nativeObj, val); + + return; + } + + public static void patchNaNs(Mat a) + { + + patchNaNs_1(a.nativeObj); + + return; + } + + + // + // C++: void perspectiveTransform(Mat src, Mat& dst, Mat m) + // + +/** + *

Performs the perspective matrix transformation of vectors.

+ * + *

The function perspectiveTransform transforms every element of + * src by treating it as a 2D or 3D vector, in the following way:

+ * + *

(x, y, z) -> (x'/w, y'/w, z'/w)

+ * + *

where

+ * + *

(x', y', z', w') = mat * x y z 1

+ * + *

and

+ * + *

w = w' if w' != 0; infty otherwise

+ * + *

Here a 3D vector transformation is shown. In case of a 2D vector + * transformation, the z component is omitted.

+ * + *

Note: The function transforms a sparse set of 2D or 3D vectors. If you want + * to transform an image using perspective transformation, use "warpPerspective". + * If you have an inverse problem, that is, you want to compute the most + * probable perspective transformation out of several pairs of corresponding + * points, you can use "getPerspectiveTransform" or "findHomography".

+ * + * @param src input two-channel or three-channel floating-point array; each + * element is a 2D/3D vector to be transformed. + * @param dst output array of the same size and type as src. + * @param m 3x3 or 4x4 floating-point transformation + * matrix. + * + * @see org.opencv.core.Core.perspectiveTransform + * @see org.opencv.calib3d.Calib3d#findHomography + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.core.Core#transform + * @see org.opencv.imgproc.Imgproc#getPerspectiveTransform + */ + public static void perspectiveTransform(Mat src, Mat dst, Mat m) + { + + perspectiveTransform_0(src.nativeObj, dst.nativeObj, m.nativeObj); + + return; + } + + + // + // C++: void phase(Mat x, Mat y, Mat& angle, bool angleInDegrees = false) + // + +/** + *

Calculates the rotation angle of 2D vectors.

+ * + *

The function phase calculates the rotation angle of each 2D + * vector that is formed from the corresponding elements of x and + * y :

+ * + *

angle(I) = atan2(y(I), x(I))

+ * + *

The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0, + * the corresponding angle(I) is set to 0.

+ * + * @param x input floating-point array of x-coordinates of 2D vectors. + * @param y input array of y-coordinates of 2D vectors; it must have the same + * size and the same type as x. + * @param angle output array of vector angles; it has the same size and same + * type as x. + * @param angleInDegrees when true, the function calculates the angle in + * degrees, otherwise, they are measured in radians. + * + * @see org.opencv.core.Core.phase + */ + public static void phase(Mat x, Mat y, Mat angle, boolean angleInDegrees) + { + + phase_0(x.nativeObj, y.nativeObj, angle.nativeObj, angleInDegrees); + + return; + } + +/** + *

Calculates the rotation angle of 2D vectors.

+ * + *

The function phase calculates the rotation angle of each 2D + * vector that is formed from the corresponding elements of x and + * y :

+ * + *

angle(I) = atan2(y(I), x(I))

+ * + *

The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0, + * the corresponding angle(I) is set to 0.

+ * + * @param x input floating-point array of x-coordinates of 2D vectors. + * @param y input array of y-coordinates of 2D vectors; it must have the same + * size and the same type as x. + * @param angle output array of vector angles; it has the same size and same + * type as x. + * + * @see org.opencv.core.Core.phase + */ + public static void phase(Mat x, Mat y, Mat angle) + { + + phase_1(x.nativeObj, y.nativeObj, angle.nativeObj); + + return; + } + + + // + // C++: void polarToCart(Mat magnitude, Mat angle, Mat& x, Mat& y, bool angleInDegrees = false) + // + +/** + *

Calculates x and y coordinates of 2D vectors from their magnitude and angle.

+ * + *

The function polarToCart calculates the Cartesian coordinates of + * each 2D vector represented by the corresponding elements of magnitude + * and angle :

+ * + *

x(I) = magnitude(I) cos(angle(I)) + * y(I) = magnitude(I) sin(angle(I)) + *

+ * + *

The relative accuracy of the estimated coordinates is about 1e-6.

+ * + * @param magnitude input floating-point array of magnitudes of 2D vectors; it + * can be an empty matrix (=Mat()), in this case, the function + * assumes that all the magnitudes are =1; if it is not empty, it must have the + * same size and type as angle. + * @param angle input floating-point array of angles of 2D vectors. + * @param x output array of x-coordinates of 2D vectors; it has the same size + * and type as angle. + * @param y output array of y-coordinates of 2D vectors; it has the same size + * and type as angle. + * @param angleInDegrees when true, the input angles are measured in degrees, + * otherwise, they are measured in radians. + * + * @see org.opencv.core.Core.polarToCart + * @see org.opencv.core.Core#log + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#pow + * @see org.opencv.core.Core#sqrt + * @see org.opencv.core.Core#magnitude + * @see org.opencv.core.Core#exp + * @see org.opencv.core.Core#phase + */ + public static void polarToCart(Mat magnitude, Mat angle, Mat x, Mat y, boolean angleInDegrees) + { + + polarToCart_0(magnitude.nativeObj, angle.nativeObj, x.nativeObj, y.nativeObj, angleInDegrees); + + return; + } + +/** + *

Calculates x and y coordinates of 2D vectors from their magnitude and angle.

+ * + *

The function polarToCart calculates the Cartesian coordinates of + * each 2D vector represented by the corresponding elements of magnitude + * and angle :

+ * + *

x(I) = magnitude(I) cos(angle(I)) + * y(I) = magnitude(I) sin(angle(I)) + *

+ * + *

The relative accuracy of the estimated coordinates is about 1e-6.

+ * + * @param magnitude input floating-point array of magnitudes of 2D vectors; it + * can be an empty matrix (=Mat()), in this case, the function + * assumes that all the magnitudes are =1; if it is not empty, it must have the + * same size and type as angle. + * @param angle input floating-point array of angles of 2D vectors. + * @param x output array of x-coordinates of 2D vectors; it has the same size + * and type as angle. + * @param y output array of y-coordinates of 2D vectors; it has the same size + * and type as angle. + * + * @see org.opencv.core.Core.polarToCart + * @see org.opencv.core.Core#log + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#pow + * @see org.opencv.core.Core#sqrt + * @see org.opencv.core.Core#magnitude + * @see org.opencv.core.Core#exp + * @see org.opencv.core.Core#phase + */ + public static void polarToCart(Mat magnitude, Mat angle, Mat x, Mat y) + { + + polarToCart_1(magnitude.nativeObj, angle.nativeObj, x.nativeObj, y.nativeObj); + + return; + } + + + // + // C++: void polylines(Mat& img, vector_vector_Point pts, bool isClosed, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + // + +/** + *

Draws several polygonal curves.

+ * + *

The function polylines draws one or more polygonal curves.

+ * + * @param img Image. + * @param pts Array of polygonal curves. + * @param isClosed Flag indicating whether the drawn polylines are closed or + * not. If they are closed, the function draws a line from the last vertex of + * each curve to its first vertex. + * @param color Polyline color. + * @param thickness Thickness of the polyline edges. + * @param lineType Type of the line segments. See the "line" description. + * @param shift Number of fractional bits in the vertex coordinates. + * + * @see org.opencv.core.Core.polylines + */ + public static void polylines(Mat img, List pts, boolean isClosed, Scalar color, int thickness, int lineType, int shift) + { + List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); + Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); + polylines_0(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); + + return; + } + +/** + *

Draws several polygonal curves.

+ * + *

The function polylines draws one or more polygonal curves.

+ * + * @param img Image. + * @param pts Array of polygonal curves. + * @param isClosed Flag indicating whether the drawn polylines are closed or + * not. If they are closed, the function draws a line from the last vertex of + * each curve to its first vertex. + * @param color Polyline color. + * @param thickness Thickness of the polyline edges. + * + * @see org.opencv.core.Core.polylines + */ + public static void polylines(Mat img, List pts, boolean isClosed, Scalar color, int thickness) + { + List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); + Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); + polylines_1(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws several polygonal curves.

+ * + *

The function polylines draws one or more polygonal curves.

+ * + * @param img Image. + * @param pts Array of polygonal curves. + * @param isClosed Flag indicating whether the drawn polylines are closed or + * not. If they are closed, the function draws a line from the last vertex of + * each curve to its first vertex. + * @param color Polyline color. + * + * @see org.opencv.core.Core.polylines + */ + public static void polylines(Mat img, List pts, boolean isClosed, Scalar color) + { + List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); + Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); + polylines_2(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void pow(Mat src, double power, Mat& dst) + // + +/** + *

Raises every array element to a power.

+ * + *

The function pow raises every element of the input array to + * power :

+ * + *

dst(I) = src(I)^power if power is integer; |src(I)|^power + * otherwise<BR>So, for a non-integer power exponent, the absolute values of + * input array elements are used. However, it is possible to get true values for + * negative values using some extra operations. In the example below, computing + * the 5th root of array src shows: <BR><code>

+ * + *

// C++ code:

+ * + *

Mat mask = src < 0;

+ * + *

pow(src, 1./5, dst);

+ * + *

subtract(Scalar.all(0), dst, dst, mask);

+ * + *

For some values of power, such as integer values, 0.5 and -0.5, + * specialized faster algorithms are used. + *

+ * + *

Special values (NaN, Inf) are not handled.

+ * + * @param src input array. + * @param power exponent of power. + * @param dst output array of the same size and type as src. + * + * @see org.opencv.core.Core.pow + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#polarToCart + * @see org.opencv.core.Core#exp + * @see org.opencv.core.Core#sqrt + * @see org.opencv.core.Core#log + */ + public static void pow(Mat src, double power, Mat dst) + { + + pow_0(src.nativeObj, power, dst.nativeObj); + + return; + } + + + // + // C++: void putText(Mat img, string text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1, int lineType = 8, bool bottomLeftOrigin = false) + // + +/** + *

Draws a text string.

+ * + *

The function putText renders the specified text string in the + * image. + * Symbols that cannot be rendered using the specified font are replaced by + * question marks. See "getTextSize" for a text rendering code example.

+ * + * @param img Image. + * @param text Text string to be drawn. + * @param org Bottom-left corner of the text string in the image. + * @param fontFace Font type. One of FONT_HERSHEY_SIMPLEX, + * FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, + * FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, + * FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, + * or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID's can + * be combined with FONT_HERSHEY_ITALIC to get the slanted letters. + * @param fontScale Font scale factor that is multiplied by the font-specific + * base size. + * @param color Text color. + * @param thickness Thickness of the lines used to draw a text. + * @param lineType Line type. See the line for details. + * @param bottomLeftOrigin When true, the image data origin is at the + * bottom-left corner. Otherwise, it is at the top-left corner. + * + * @see org.opencv.core.Core.putText + */ + public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness, int lineType, boolean bottomLeftOrigin) + { + + putText_0(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, bottomLeftOrigin); + + return; + } + +/** + *

Draws a text string.

+ * + *

The function putText renders the specified text string in the + * image. + * Symbols that cannot be rendered using the specified font are replaced by + * question marks. See "getTextSize" for a text rendering code example.

+ * + * @param img Image. + * @param text Text string to be drawn. + * @param org Bottom-left corner of the text string in the image. + * @param fontFace Font type. One of FONT_HERSHEY_SIMPLEX, + * FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, + * FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, + * FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, + * or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID's can + * be combined with FONT_HERSHEY_ITALIC to get the slanted letters. + * @param fontScale Font scale factor that is multiplied by the font-specific + * base size. + * @param color Text color. + * @param thickness Thickness of the lines used to draw a text. + * + * @see org.opencv.core.Core.putText + */ + public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness) + { + + putText_1(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws a text string.

+ * + *

The function putText renders the specified text string in the + * image. + * Symbols that cannot be rendered using the specified font are replaced by + * question marks. See "getTextSize" for a text rendering code example.

+ * + * @param img Image. + * @param text Text string to be drawn. + * @param org Bottom-left corner of the text string in the image. + * @param fontFace Font type. One of FONT_HERSHEY_SIMPLEX, + * FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, + * FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, + * FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, + * or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID's can + * be combined with FONT_HERSHEY_ITALIC to get the slanted letters. + * @param fontScale Font scale factor that is multiplied by the font-specific + * base size. + * @param color Text color. + * + * @see org.opencv.core.Core.putText + */ + public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color) + { + + putText_2(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void randShuffle_(Mat& dst, double iterFactor = 1.) + // + + public static void randShuffle(Mat dst, double iterFactor) + { + + randShuffle_0(dst.nativeObj, iterFactor); + + return; + } + + public static void randShuffle(Mat dst) + { + + randShuffle_1(dst.nativeObj); + + return; + } + + + // + // C++: void randn(Mat& dst, double mean, double stddev) + // + +/** + *

Fills the array with normally distributed random numbers.

+ * + *

The function randn fills the matrix dst with + * normally distributed random numbers with the specified mean vector and the + * standard deviation matrix. The generated random numbers are clipped to fit + * the value range of the output array data type.

+ * + * @param dst output array of random numbers; the array must be pre-allocated + * and have 1 to 4 channels. + * @param mean mean value (expectation) of the generated random numbers. + * @param stddev standard deviation of the generated random numbers; it can be + * either a vector (in which case a diagonal standard deviation matrix is + * assumed) or a square matrix. + * + * @see org.opencv.core.Core.randn + * @see org.opencv.core.Core#randu + */ + public static void randn(Mat dst, double mean, double stddev) + { + + randn_0(dst.nativeObj, mean, stddev); + + return; + } + + + // + // C++: void randu(Mat& dst, double low, double high) + // + +/** + *

Generates a single uniformly-distributed random number or an array of random + * numbers.

+ * + *

The template functions randu generate and return the next + * uniformly-distributed random value of the specified type. randu() + * is an equivalent to (int)theRNG();, and so on. See "RNG" + * description.

+ * + *

The second non-template variant of the function fills the matrix + * dst with uniformly-distributed random numbers from the specified + * range:

+ * + *

low _c <= dst(I)_c < high _c

+ * + * @param dst output array of random numbers; the array must be pre-allocated. + * @param low inclusive lower boundary of the generated random numbers. + * @param high exclusive upper boundary of the generated random numbers. + * + * @see org.opencv.core.Core.randu + * @see org.opencv.core.Core#randn + */ + public static void randu(Mat dst, double low, double high) + { + + randu_0(dst.nativeObj, low, high); + + return; + } + + + // + // C++: void rectangle(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + // + +/** + *

Draws a simple, thick, or filled up-right rectangle.

+ * + *

The function rectangle draws a rectangle outline or a filled + * rectangle whose two opposite corners are pt1 and + * pt2, or r.tl() and r.br()-Point(1,1).

+ * + * @param img Image. + * @param pt1 Vertex of the rectangle. + * @param pt2 Vertex of the rectangle opposite to pt1. + * @param color Rectangle color or brightness (grayscale image). + * @param thickness Thickness of lines that make up the rectangle. Negative + * values, like CV_FILLED, mean that the function has to draw a + * filled rectangle. + * @param lineType Type of the line. See the "line" description. + * @param shift Number of fractional bits in the point coordinates. + * + * @see org.opencv.core.Core.rectangle + */ + public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift) + { + + rectangle_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); + + return; + } + +/** + *

Draws a simple, thick, or filled up-right rectangle.

+ * + *

The function rectangle draws a rectangle outline or a filled + * rectangle whose two opposite corners are pt1 and + * pt2, or r.tl() and r.br()-Point(1,1).

+ * + * @param img Image. + * @param pt1 Vertex of the rectangle. + * @param pt2 Vertex of the rectangle opposite to pt1. + * @param color Rectangle color or brightness (grayscale image). + * @param thickness Thickness of lines that make up the rectangle. Negative + * values, like CV_FILLED, mean that the function has to draw a + * filled rectangle. + * + * @see org.opencv.core.Core.rectangle + */ + public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness) + { + + rectangle_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws a simple, thick, or filled up-right rectangle.

+ * + *

The function rectangle draws a rectangle outline or a filled + * rectangle whose two opposite corners are pt1 and + * pt2, or r.tl() and r.br()-Point(1,1).

+ * + * @param img Image. + * @param pt1 Vertex of the rectangle. + * @param pt2 Vertex of the rectangle opposite to pt1. + * @param color Rectangle color or brightness (grayscale image). + * + * @see org.opencv.core.Core.rectangle + */ + public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color) + { + + rectangle_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void reduce(Mat src, Mat& dst, int dim, int rtype, int dtype = -1) + // + +/** + *

Reduces a matrix to a vector.

+ * + *

The function reduce reduces the matrix to a vector by treating + * the matrix rows/columns as a set of 1D vectors and performing the specified + * operation on the vectors until a single row/column is obtained. For example, + * the function can be used to compute horizontal and vertical projections of a + * raster image. In case of CV_REDUCE_SUM and CV_REDUCE_AVG, + * the output may have a larger element bit-depth to preserve accuracy. And + * multi-channel arrays are also supported in these two reduction modes.

+ * + * @param src input 2D matrix. + * @param dst output vector. Its size and type is defined by dim + * and dtype parameters. + * @param dim dimension index along which the matrix is reduced. 0 means that + * the matrix is reduced to a single row. 1 means that the matrix is reduced to + * a single column. + * @param rtype reduction operation that could be one of the following: + *
    + *
  • CV_REDUCE_SUM: the output is the sum of all rows/columns of the + * matrix. + *
  • CV_REDUCE_AVG: the output is the mean vector of all rows/columns of + * the matrix. + *
  • CV_REDUCE_MAX: the output is the maximum (column/row-wise) of all + * rows/columns of the matrix. + *
  • CV_REDUCE_MIN: the output is the minimum (column/row-wise) of all + * rows/columns of the matrix. + *
+ * @param dtype when negative, the output vector will have the same type as the + * input matrix, otherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), + * src.channels()). + * + * @see org.opencv.core.Core.reduce + * @see org.opencv.core.Core#repeat + */ + public static void reduce(Mat src, Mat dst, int dim, int rtype, int dtype) + { + + reduce_0(src.nativeObj, dst.nativeObj, dim, rtype, dtype); + + return; + } + +/** + *

Reduces a matrix to a vector.

+ * + *

The function reduce reduces the matrix to a vector by treating + * the matrix rows/columns as a set of 1D vectors and performing the specified + * operation on the vectors until a single row/column is obtained. For example, + * the function can be used to compute horizontal and vertical projections of a + * raster image. In case of CV_REDUCE_SUM and CV_REDUCE_AVG, + * the output may have a larger element bit-depth to preserve accuracy. And + * multi-channel arrays are also supported in these two reduction modes.

+ * + * @param src input 2D matrix. + * @param dst output vector. Its size and type is defined by dim + * and dtype parameters. + * @param dim dimension index along which the matrix is reduced. 0 means that + * the matrix is reduced to a single row. 1 means that the matrix is reduced to + * a single column. + * @param rtype reduction operation that could be one of the following: + *
    + *
  • CV_REDUCE_SUM: the output is the sum of all rows/columns of the + * matrix. + *
  • CV_REDUCE_AVG: the output is the mean vector of all rows/columns of + * the matrix. + *
  • CV_REDUCE_MAX: the output is the maximum (column/row-wise) of all + * rows/columns of the matrix. + *
  • CV_REDUCE_MIN: the output is the minimum (column/row-wise) of all + * rows/columns of the matrix. + *
+ * + * @see org.opencv.core.Core.reduce + * @see org.opencv.core.Core#repeat + */ + public static void reduce(Mat src, Mat dst, int dim, int rtype) + { + + reduce_1(src.nativeObj, dst.nativeObj, dim, rtype); + + return; + } + + + // + // C++: void repeat(Mat src, int ny, int nx, Mat& dst) + // + +/** + *

Fills the output array with repeated copies of the input array.

+ * + *

The functions "repeat" duplicate the input array one or more times along each + * of the two axes:

+ * + *

dst _(ij)= src _(i mod src.rows, j mod src.cols)

+ * + *

The second variant of the function is more convenient to use with + * "MatrixExpressions".

+ * + * @param src input array to replicate. + * @param ny Flag to specify how many times the src is repeated + * along the vertical axis. + * @param nx Flag to specify how many times the src is repeated + * along the horizontal axis. + * @param dst output array of the same type as src. + * + * @see org.opencv.core.Core.repeat + * @see org.opencv.core.Core#reduce + */ + public static void repeat(Mat src, int ny, int nx, Mat dst) + { + + repeat_0(src.nativeObj, ny, nx, dst.nativeObj); + + return; + } + + + // + // C++: void scaleAdd(Mat src1, double alpha, Mat src2, Mat& dst) + // + +/** + *

Calculates the sum of a scaled array and another array.

+ * + *

The function scaleAdd is one of the classical primitive linear + * algebra operations, known as DAXPY or SAXPY in BLAS + * (http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms). It + * calculates the sum of a scaled array and another array:

+ * + *

dst(I)= scale * src1(I) + src2(I)<BR>The function can also be + * emulated with a matrix expression, for example: <BR><code>

+ * + *

// C++ code:

+ * + *

Mat A(3, 3, CV_64F);...

+ * + *

A.row(0) = A.row(1)*2 + A.row(2);

+ * + * @param src1 first input array. + * @param alpha a alpha + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.scaleAdd + * @see org.opencv.core.Mat#dot + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void scaleAdd(Mat src1, double alpha, Mat src2, Mat dst) + { + + scaleAdd_0(src1.nativeObj, alpha, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void setErrorVerbosity(bool verbose) + // + + public static void setErrorVerbosity(boolean verbose) + { + + setErrorVerbosity_0(verbose); + + return; + } + + + // + // C++: void setIdentity(Mat& mtx, Scalar s = Scalar(1)) + // + +/** + *

Initializes a scaled identity matrix.

+ * + *

The function "setIdentity" initializes a scaled identity matrix:

+ * + *

mtx(i,j)= value if i=j; 0 otherwise<BR>The function can also be + * emulated using the matrix initializers and the matrix expressions: + * <BR><code>

+ * + *

// C++ code:

+ * + *

Mat A = Mat.eye(4, 3, CV_32F)*5;

+ * + *

// A will be set to [[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0]]

+ * + * @param mtx matrix to initialize (not necessarily square). + * @param s a s + * + * @see org.opencv.core.Core.setIdentity + * @see org.opencv.core.Mat#setTo + * @see org.opencv.core.Mat#ones + * @see org.opencv.core.Mat#zeros + */ + public static void setIdentity(Mat mtx, Scalar s) + { + + setIdentity_0(mtx.nativeObj, s.val[0], s.val[1], s.val[2], s.val[3]); + + return; + } + +/** + *

Initializes a scaled identity matrix.

+ * + *

The function "setIdentity" initializes a scaled identity matrix:

+ * + *

mtx(i,j)= value if i=j; 0 otherwise<BR>The function can also be + * emulated using the matrix initializers and the matrix expressions: + * <BR><code>

+ * + *

// C++ code:

+ * + *

Mat A = Mat.eye(4, 3, CV_32F)*5;

+ * + *

// A will be set to [[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0]]

+ * + * @param mtx matrix to initialize (not necessarily square). + * + * @see org.opencv.core.Core.setIdentity + * @see org.opencv.core.Mat#setTo + * @see org.opencv.core.Mat#ones + * @see org.opencv.core.Mat#zeros + */ + public static void setIdentity(Mat mtx) + { + + setIdentity_1(mtx.nativeObj); + + return; + } + + + // + // C++: bool solve(Mat src1, Mat src2, Mat& dst, int flags = DECOMP_LU) + // + +/** + *

Solves one or more linear systems or least-squares problems.

+ * + *

The function solve solves a linear system or least-squares + * problem (the latter is possible with SVD or QR methods, or by specifying the + * flag DECOMP_NORMAL):

+ * + *

dst = arg min _X|src1 * X - src2|

+ * + *

If DECOMP_LU or DECOMP_CHOLESKY method is used, the + * function returns 1 if src1 (or src1^Tsrc1) is + * non-singular. Otherwise, it returns 0. In the latter case, dst + * is not valid. Other methods find a pseudo-solution in case of a singular + * left-hand side part.

+ * + *

Note: If you want to find a unity-norm solution of an under-defined singular + * system src1*dst=0, the function solve will not do the + * work. Use "SVD.solveZ" instead.

+ * + * @param src1 input matrix on the left-hand side of the system. + * @param src2 input matrix on the right-hand side of the system. + * @param dst output solution. + * @param flags solution (matrix inversion) method. + *
    + *
  • DECOMP_LU Gaussian elimination with optimal pivot element chosen. + *
  • DECOMP_CHOLESKY Cholesky LL^T factorization; the matrix + * src1 must be symmetrical and positively defined. + *
  • DECOMP_EIG eigenvalue decomposition; the matrix src1 must + * be symmetrical. + *
  • DECOMP_SVD singular value decomposition (SVD) method; the system can + * be over-defined and/or the matrix src1 can be singular. + *
  • DECOMP_QR QR factorization; the system can be over-defined and/or the + * matrix src1 can be singular. + *
  • DECOMP_NORMAL while all the previous flags are mutually exclusive, + * this flag can be used together with any of the previous; it means that the + * normal equations src1^T*src1*dst=src1^Tsrc2 are solved instead of + * the original system src1*dst=src2. + *
+ * + * @see org.opencv.core.Core.solve + * @see org.opencv.core.Core#invert + * @see org.opencv.core.Core#eigen + */ + public static boolean solve(Mat src1, Mat src2, Mat dst, int flags) + { + + boolean retVal = solve_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, flags); + + return retVal; + } + +/** + *

Solves one or more linear systems or least-squares problems.

+ * + *

The function solve solves a linear system or least-squares + * problem (the latter is possible with SVD or QR methods, or by specifying the + * flag DECOMP_NORMAL):

+ * + *

dst = arg min _X|src1 * X - src2|

+ * + *

If DECOMP_LU or DECOMP_CHOLESKY method is used, the + * function returns 1 if src1 (or src1^Tsrc1) is + * non-singular. Otherwise, it returns 0. In the latter case, dst + * is not valid. Other methods find a pseudo-solution in case of a singular + * left-hand side part.

+ * + *

Note: If you want to find a unity-norm solution of an under-defined singular + * system src1*dst=0, the function solve will not do the + * work. Use "SVD.solveZ" instead.

+ * + * @param src1 input matrix on the left-hand side of the system. + * @param src2 input matrix on the right-hand side of the system. + * @param dst output solution. + * + * @see org.opencv.core.Core.solve + * @see org.opencv.core.Core#invert + * @see org.opencv.core.Core#eigen + */ + public static boolean solve(Mat src1, Mat src2, Mat dst) + { + + boolean retVal = solve_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return retVal; + } + + + // + // C++: int solveCubic(Mat coeffs, Mat& roots) + // + +/** + *

Finds the real roots of a cubic equation.

+ * + *

The function solveCubic finds the real roots of a cubic + * equation:

+ *
    + *
  • if coeffs is a 4-element vector: + *
+ * + *

coeffs [0] x^3 + coeffs [1] x^2 + coeffs [2] x + coeffs [3] = 0

+ * + *
    + *
  • if coeffs is a 3-element vector: + *
+ * + *

x^3 + coeffs [0] x^2 + coeffs [1] x + coeffs [2] = 0

+ * + *

The roots are stored in the roots array.

+ * + * @param coeffs equation coefficients, an array of 3 or 4 elements. + * @param roots output array of real roots that has 1 or 3 elements. + * + * @see org.opencv.core.Core.solveCubic + */ + public static int solveCubic(Mat coeffs, Mat roots) + { + + int retVal = solveCubic_0(coeffs.nativeObj, roots.nativeObj); + + return retVal; + } + + + // + // C++: double solvePoly(Mat coeffs, Mat& roots, int maxIters = 300) + // + +/** + *

Finds the real or complex roots of a polynomial equation.

+ * + *

The function solvePoly finds real and complex roots of a + * polynomial equation:

+ * + *

coeffs [n] x^(n) + coeffs [n-1] x^(n-1) +... + coeffs [1] x + coeffs [0] + * = 0

+ * + * @param coeffs array of polynomial coefficients. + * @param roots output (complex) array of roots. + * @param maxIters maximum number of iterations the algorithm does. + * + * @see org.opencv.core.Core.solvePoly + */ + public static double solvePoly(Mat coeffs, Mat roots, int maxIters) + { + + double retVal = solvePoly_0(coeffs.nativeObj, roots.nativeObj, maxIters); + + return retVal; + } + +/** + *

Finds the real or complex roots of a polynomial equation.

+ * + *

The function solvePoly finds real and complex roots of a + * polynomial equation:

+ * + *

coeffs [n] x^(n) + coeffs [n-1] x^(n-1) +... + coeffs [1] x + coeffs [0] + * = 0

+ * + * @param coeffs array of polynomial coefficients. + * @param roots output (complex) array of roots. + * + * @see org.opencv.core.Core.solvePoly + */ + public static double solvePoly(Mat coeffs, Mat roots) + { + + double retVal = solvePoly_1(coeffs.nativeObj, roots.nativeObj); + + return retVal; + } + + + // + // C++: void sort(Mat src, Mat& dst, int flags) + // + +/** + *

Sorts each row or each column of a matrix.

+ * + *

The function sort sorts each matrix row or each matrix column in + * ascending or descending order. So you should pass two operation flags to get + * desired behaviour. If you want to sort matrix rows or columns + * lexicographically, you can use STL std.sort generic function + * with the proper comparison predicate.

+ * + * @param src input single-channel array. + * @param dst output array of the same size and type as src. + * @param flags operation flags, a combination of the following values: + *
    + *
  • CV_SORT_EVERY_ROW each matrix row is sorted independently. + *
  • CV_SORT_EVERY_COLUMN each matrix column is sorted independently; this + * flag and the previous one are mutually exclusive. + *
  • CV_SORT_ASCENDING each matrix row is sorted in the ascending order. + *
  • CV_SORT_DESCENDING each matrix row is sorted in the descending order; + * this flag and the previous one are also mutually exclusive. + *
+ * + * @see org.opencv.core.Core.sort + * @see org.opencv.core.Core#randShuffle + * @see org.opencv.core.Core#sortIdx + */ + public static void sort(Mat src, Mat dst, int flags) + { + + sort_0(src.nativeObj, dst.nativeObj, flags); + + return; + } + + + // + // C++: void sortIdx(Mat src, Mat& dst, int flags) + // + +/** + *

Sorts each row or each column of a matrix.

+ * + *

The function sortIdx sorts each matrix row or each matrix column + * in the ascending or descending order. So you should pass two operation flags + * to get desired behaviour. Instead of reordering the elements themselves, it + * stores the indices of sorted elements in the output array. For example: + *

+ * + *

// C++ code:

+ * + *

Mat A = Mat.eye(3,3,CV_32F), B;

+ * + *

sortIdx(A, B, CV_SORT_EVERY_ROW + CV_SORT_ASCENDING);

+ * + *

// B will probably contain

+ * + *

// (because of equal elements in A some permutations are possible):

+ * + *

// [[1, 2, 0], [0, 2, 1], [0, 1, 2]]

+ * + * @param src input single-channel array. + * @param dst output integer array of the same size as src. + * @param flags operation flags that could be a combination of the following + * values: + *
    + *
  • CV_SORT_EVERY_ROW each matrix row is sorted independently. + *
  • CV_SORT_EVERY_COLUMN each matrix column is sorted independently; this + * flag and the previous one are mutually exclusive. + *
  • CV_SORT_ASCENDING each matrix row is sorted in the ascending order. + *
  • CV_SORT_DESCENDING each matrix row is sorted in the descending order; + * his flag and the previous one are also mutually exclusive. + *
+ * + * @see org.opencv.core.Core.sortIdx + * @see org.opencv.core.Core#sort + * @see org.opencv.core.Core#randShuffle + */ + public static void sortIdx(Mat src, Mat dst, int flags) + { + + sortIdx_0(src.nativeObj, dst.nativeObj, flags); + + return; + } + + + // + // C++: void split(Mat m, vector_Mat& mv) + // + +/** + *

Divides a multi-channel array into several single-channel arrays.

+ * + *

The functions split split a multi-channel array into separate + * single-channel arrays:

+ * + *

mv [c](I) = src(I)_c

+ * + *

If you need to extract a single channel or do some other sophisticated + * channel permutation, use "mixChannels".

+ * + * @param m a m + * @param mv output array or vector of arrays; in the first variant of the + * function the number of arrays must match src.channels(); the + * arrays themselves are reallocated, if needed. + * + * @see org.opencv.core.Core.split + * @see org.opencv.core.Core#merge + * @see org.opencv.imgproc.Imgproc#cvtColor + * @see org.opencv.core.Core#mixChannels + */ + public static void split(Mat m, List mv) + { + Mat mv_mat = new Mat(); + split_0(m.nativeObj, mv_mat.nativeObj); + Converters.Mat_to_vector_Mat(mv_mat, mv); + return; + } + + + // + // C++: void sqrt(Mat src, Mat& dst) + // + +/** + *

Calculates a square root of array elements.

+ * + *

The functions sqrt calculate a square root of each input array + * element. In case of multi-channel arrays, each channel is processed + * independently. The accuracy is approximately the same as of the built-in + * std.sqrt.

+ * + * @param src input floating-point array. + * @param dst output array of the same size and type as src. + * + * @see org.opencv.core.Core.sqrt + * @see org.opencv.core.Core#pow + * @see org.opencv.core.Core#magnitude + */ + public static void sqrt(Mat src, Mat dst) + { + + sqrt_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void subtract(Mat src1, Mat src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + // + +/** + *

Calculates the per-element difference between two arrays or array and a + * scalar.

+ * + *

The function subtract calculates:

+ *
    + *
  • Difference between two arrays, when both input arrays have the same + * size and the same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

+ * + *
    + *
  • Difference between an array and a scalar, when src2 is + * constructed from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

+ * + *
    + *
  • Difference between a scalar and an array, when src1 is + * constructed from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

+ * + *
    + *
  • The reverse difference between a scalar and an array in the case of + * SubRS: + *
+ * + *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 - src2;

+ * + *

dst -= src1; // equivalent to subtract(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can subtract to 8-bit unsigned arrays and store the + * difference in a 16-bit signed array. Depth of the output array is determined + * by dtype parameter. In the second and third cases above, as well + * as in the first case, when src1.depth() == src2.depth(), + * dtype can be set to the default -1. In this case + * the output array will have the same depth as the input array, be it + * src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array of the same size and the same number of channels as + * the input array. + * @param mask optional operation mask; this is an 8-bit single channel array + * that specifies elements of the output array to be changed. + * @param dtype optional depth of the output array (see the details below). + * + * @see org.opencv.core.Core.subtract + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Mat#convertTo + */ + public static void subtract(Mat src1, Mat src2, Mat dst, Mat mask, int dtype) + { + + subtract_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj, dtype); + + return; + } + +/** + *

Calculates the per-element difference between two arrays or array and a + * scalar.

+ * + *

The function subtract calculates:

+ *
    + *
  • Difference between two arrays, when both input arrays have the same + * size and the same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

+ * + *
    + *
  • Difference between an array and a scalar, when src2 is + * constructed from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

+ * + *
    + *
  • Difference between a scalar and an array, when src1 is + * constructed from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

+ * + *
    + *
  • The reverse difference between a scalar and an array in the case of + * SubRS: + *
+ * + *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 - src2;

+ * + *

dst -= src1; // equivalent to subtract(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can subtract to 8-bit unsigned arrays and store the + * difference in a 16-bit signed array. Depth of the output array is determined + * by dtype parameter. In the second and third cases above, as well + * as in the first case, when src1.depth() == src2.depth(), + * dtype can be set to the default -1. In this case + * the output array will have the same depth as the input array, be it + * src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array of the same size and the same number of channels as + * the input array. + * @param mask optional operation mask; this is an 8-bit single channel array + * that specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.subtract + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Mat#convertTo + */ + public static void subtract(Mat src1, Mat src2, Mat dst, Mat mask) + { + + subtract_1(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element difference between two arrays or array and a + * scalar.

+ * + *

The function subtract calculates:

+ *
    + *
  • Difference between two arrays, when both input arrays have the same + * size and the same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

+ * + *
    + *
  • Difference between an array and a scalar, when src2 is + * constructed from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

+ * + *
    + *
  • Difference between a scalar and an array, when src1 is + * constructed from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

+ * + *
    + *
  • The reverse difference between a scalar and an array in the case of + * SubRS: + *
+ * + *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 - src2;

+ * + *

dst -= src1; // equivalent to subtract(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can subtract to 8-bit unsigned arrays and store the + * difference in a 16-bit signed array. Depth of the output array is determined + * by dtype parameter. In the second and third cases above, as well + * as in the first case, when src1.depth() == src2.depth(), + * dtype can be set to the default -1. In this case + * the output array will have the same depth as the input array, be it + * src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array of the same size and the same number of channels as + * the input array. + * + * @see org.opencv.core.Core.subtract + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Mat#convertTo + */ + public static void subtract(Mat src1, Mat src2, Mat dst) + { + + subtract_2(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void subtract(Mat src1, Scalar src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + // + +/** + *

Calculates the per-element difference between two arrays or array and a + * scalar.

+ * + *

The function subtract calculates:

+ *
    + *
  • Difference between two arrays, when both input arrays have the same + * size and the same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

+ * + *
    + *
  • Difference between an array and a scalar, when src2 is + * constructed from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

+ * + *
    + *
  • Difference between a scalar and an array, when src1 is + * constructed from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

+ * + *
    + *
  • The reverse difference between a scalar and an array in the case of + * SubRS: + *
+ * + *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 - src2;

+ * + *

dst -= src1; // equivalent to subtract(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can subtract to 8-bit unsigned arrays and store the + * difference in a 16-bit signed array. Depth of the output array is determined + * by dtype parameter. In the second and third cases above, as well + * as in the first case, when src1.depth() == src2.depth(), + * dtype can be set to the default -1. In this case + * the output array will have the same depth as the input array, be it + * src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array of the same size and the same number of channels as + * the input array. + * @param mask optional operation mask; this is an 8-bit single channel array + * that specifies elements of the output array to be changed. + * @param dtype optional depth of the output array (see the details below). + * + * @see org.opencv.core.Core.subtract + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Mat#convertTo + */ + public static void subtract(Mat src1, Scalar src2, Mat dst, Mat mask, int dtype) + { + + subtract_3(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, mask.nativeObj, dtype); + + return; + } + +/** + *

Calculates the per-element difference between two arrays or array and a + * scalar.

+ * + *

The function subtract calculates:

+ *
    + *
  • Difference between two arrays, when both input arrays have the same + * size and the same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

+ * + *
    + *
  • Difference between an array and a scalar, when src2 is + * constructed from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

+ * + *
    + *
  • Difference between a scalar and an array, when src1 is + * constructed from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

+ * + *
    + *
  • The reverse difference between a scalar and an array in the case of + * SubRS: + *
+ * + *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 - src2;

+ * + *

dst -= src1; // equivalent to subtract(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can subtract to 8-bit unsigned arrays and store the + * difference in a 16-bit signed array. Depth of the output array is determined + * by dtype parameter. In the second and third cases above, as well + * as in the first case, when src1.depth() == src2.depth(), + * dtype can be set to the default -1. In this case + * the output array will have the same depth as the input array, be it + * src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array of the same size and the same number of channels as + * the input array. + * @param mask optional operation mask; this is an 8-bit single channel array + * that specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.subtract + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Mat#convertTo + */ + public static void subtract(Mat src1, Scalar src2, Mat dst, Mat mask) + { + + subtract_4(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element difference between two arrays or array and a + * scalar.

+ * + *

The function subtract calculates:

+ *
    + *
  • Difference between two arrays, when both input arrays have the same + * size and the same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

+ * + *
    + *
  • Difference between an array and a scalar, when src2 is + * constructed from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

+ * + *
    + *
  • Difference between a scalar and an array, when src1 is + * constructed from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

+ * + *
    + *
  • The reverse difference between a scalar and an array in the case of + * SubRS: + *
+ * + *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 - src2;

+ * + *

dst -= src1; // equivalent to subtract(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can subtract to 8-bit unsigned arrays and store the + * difference in a 16-bit signed array. Depth of the output array is determined + * by dtype parameter. In the second and third cases above, as well + * as in the first case, when src1.depth() == src2.depth(), + * dtype can be set to the default -1. In this case + * the output array will have the same depth as the input array, be it + * src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array of the same size and the same number of channels as + * the input array. + * + * @see org.opencv.core.Core.subtract + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Mat#convertTo + */ + public static void subtract(Mat src1, Scalar src2, Mat dst) + { + + subtract_5(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: Scalar sum(Mat src) + // + +/** + *

Calculates the sum of array elements.

+ * + *

The functions sum calculate and return the sum of array + * elements, independently for each channel.

+ * + * @param src a src + * + * @see org.opencv.core.Core.sum + * @see org.opencv.core.Core#meanStdDev + * @see org.opencv.core.Core#reduce + * @see org.opencv.core.Core#minMaxLoc + * @see org.opencv.core.Core#countNonZero + * @see org.opencv.core.Core#norm + * @see org.opencv.core.Core#mean + */ + public static Scalar sumElems(Mat src) + { + + Scalar retVal = new Scalar(sumElems_0(src.nativeObj)); + + return retVal; + } + + + // + // C++: Scalar trace(Mat mtx) + // + +/** + *

Returns the trace of a matrix.

+ * + *

The function trace returns the sum of the diagonal elements of + * the matrix mtx.

+ * + *

tr(mtx) = sum _i mtx(i,i)

+ * + * @param mtx a mtx + * + * @see org.opencv.core.Core.trace + */ + public static Scalar trace(Mat mtx) + { + + Scalar retVal = new Scalar(trace_0(mtx.nativeObj)); + + return retVal; + } + + + // + // C++: void transform(Mat src, Mat& dst, Mat m) + // + +/** + *

Performs the matrix transformation of every array element.

+ * + *

The function transform performs the matrix transformation of + * every element of the array src and stores the results in + * dst :

+ * + *

dst(I) = m * src(I)

+ * + *

(when m.cols=src.channels()), or

+ * + *

dst(I) = m * [ src(I); 1]

+ * + *

(when m.cols=src.channels()+1)

+ * + *

Every element of the N -channel array src is + * interpreted as N -element vector that is transformed using the + * M x N or M x (N+1) matrix m to + * M-element vector - the corresponding element of the output array + * dst.

+ * + *

The function may be used for geometrical transformation of N + * -dimensional points, arbitrary linear color space transformation (such as + * various kinds of RGB to YUV transforms), shuffling the image channels, and so + * forth.

+ * + * @param src input array that must have as many channels (1 to 4) as + * m.cols or m.cols-1. + * @param dst output array of the same size and depth as src; it + * has as many channels as m.rows. + * @param m transformation 2x2 or 2x3 floating-point + * matrix. + * + * @see org.opencv.core.Core.transform + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.imgproc.Imgproc#getAffineTransform + * @see org.opencv.video.Video#estimateRigidTransform + */ + public static void transform(Mat src, Mat dst, Mat m) + { + + transform_0(src.nativeObj, dst.nativeObj, m.nativeObj); + + return; + } + + + // + // C++: void transpose(Mat src, Mat& dst) + // + +/** + *

Transposes a matrix.

+ * + *

The function "transpose" transposes the matrix src :

+ * + *

dst(i,j) = src(j,i)

+ * + *

Note: No complex conjugation is done in case of a complex matrix. It it + * should be done separately if needed.

+ * + * @param src input array. + * @param dst output array of the same type as src. + * + * @see org.opencv.core.Core.transpose + */ + public static void transpose(Mat src, Mat dst) + { + + transpose_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void vconcat(vector_Mat src, Mat& dst) + // + + public static void vconcat(List src, Mat dst) + { + Mat src_mat = Converters.vector_Mat_to_Mat(src); + vconcat_0(src_mat.nativeObj, dst.nativeObj); + + return; + } + + + // manual port + public static class MinMaxLocResult { + public double minVal; + public double maxVal; + public Point minLoc; + public Point maxLoc; + + public MinMaxLocResult() { + minVal=0; maxVal=0; + minLoc=new Point(); + maxLoc=new Point(); + } + } + + // C++: minMaxLoc(Mat src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0, InputArray mask=noArray()) + +/** + *

Finds the global minimum and maximum in an array.

+ * + *

The functions minMaxLoc find the minimum and maximum element + * values and their positions. The extremums are searched across the whole array + * or, if mask is not an empty array, in the specified array + * region.

+ * + *

The functions do not work with multi-channel arrays. If you need to find + * minimum or maximum elements across all the channels, use "Mat.reshape" first + * to reinterpret the array as single-channel. Or you may extract the particular + * channel using either "extractImageCOI", or "mixChannels", or "split".

+ * + * @param src input single-channel array. + * @param mask optional mask used to select a sub-array. + * + * @see org.opencv.core.Core.minMaxLoc + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#min + * @see org.opencv.core.Core#mixChannels + * @see org.opencv.core.Mat#reshape + * @see org.opencv.core.Core#split + * @see org.opencv.core.Core#max + * @see org.opencv.core.Core#inRange + */ + public static MinMaxLocResult minMaxLoc(Mat src, Mat mask) { + MinMaxLocResult res = new MinMaxLocResult(); + long maskNativeObj=0; + if (mask != null) { + maskNativeObj=mask.nativeObj; + } + double resarr[] = n_minMaxLocManual(src.nativeObj, maskNativeObj); + res.minVal=resarr[0]; + res.maxVal=resarr[1]; + res.minLoc.x=resarr[2]; + res.minLoc.y=resarr[3]; + res.maxLoc.x=resarr[4]; + res.maxLoc.y=resarr[5]; + return res; + } + +/** + *

Finds the global minimum and maximum in an array.

+ * + *

The functions minMaxLoc find the minimum and maximum element + * values and their positions. The extremums are searched across the whole array + * or, if mask is not an empty array, in the specified array + * region.

+ * + *

The functions do not work with multi-channel arrays. If you need to find + * minimum or maximum elements across all the channels, use "Mat.reshape" first + * to reinterpret the array as single-channel. Or you may extract the particular + * channel using either "extractImageCOI", or "mixChannels", or "split".

+ * + * @param src input single-channel array. + * + * @see org.opencv.core.Core.minMaxLoc + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#min + * @see org.opencv.core.Core#mixChannels + * @see org.opencv.core.Mat#reshape + * @see org.opencv.core.Core#split + * @see org.opencv.core.Core#max + * @see org.opencv.core.Core#inRange + */ + public static MinMaxLocResult minMaxLoc(Mat src) { + return minMaxLoc(src, null); + } + + + // C++: Size getTextSize(const string& text, int fontFace, double fontScale, int thickness, int* baseLine); +/** + *

Calculates the width and height of a text string.

+ * + *

The function getTextSize calculates and returns the size of a + * box that contains the specified text.That is, the following code renders some + * text, the tight box surrounding it, and the baseline:

+ * + *

// C++ code:

+ * + *

string text = "Funny text inside the box";

+ * + *

int fontFace = FONT_HERSHEY_SCRIPT_SIMPLEX;

+ * + *

double fontScale = 2;

+ * + *

int thickness = 3;

+ * + *

Mat img(600, 800, CV_8UC3, Scalar.all(0));

+ * + *

int baseline=0;

+ * + *

Size textSize = getTextSize(text, fontFace,

+ * + *

fontScale, thickness, &baseline);

+ * + *

baseline += thickness;

+ * + *

// center the text

+ * + *

Point textOrg((img.cols - textSize.width)/2,

+ * + *

(img.rows + textSize.height)/2);

+ * + *

// draw the box

+ * + *

rectangle(img, textOrg + Point(0, baseline),

+ * + *

textOrg + Point(textSize.width, -textSize.height),

+ * + *

Scalar(0,0,255));

+ * + *

//... and the baseline first

+ * + *

line(img, textOrg + Point(0, thickness),

+ * + *

textOrg + Point(textSize.width, thickness),

+ * + *

Scalar(0, 0, 255));

+ * + *

// then put the text itself

+ * + *

putText(img, text, textOrg, fontFace, fontScale,

+ * + *

Scalar.all(255), thickness, 8);

+ * + * @param text Input text string. + * @param fontFace Font to use. See the "putText" for details. + * @param fontScale Font scale. See the "putText" for details. + * @param thickness Thickness of lines used to render the text. See "putText" + * for details. + * @param baseLine Output parameter - y-coordinate of the baseline relative to + * the bottom-most text point. + * + * @see org.opencv.core.Core.getTextSize + */ + public static Size getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine) { + if(baseLine != null && baseLine.length != 1) + throw new java.lang.IllegalArgumentException("'baseLine' must be 'int[1]' or 'null'."); + Size retVal = new Size(n_getTextSize(text, fontFace, fontScale, thickness, baseLine)); + return retVal; + } + + + + // C++: void LUT(Mat src, Mat lut, Mat& dst, int interpolation = 0) + private static native void LUT_0(long src_nativeObj, long lut_nativeObj, long dst_nativeObj, int interpolation); + private static native void LUT_1(long src_nativeObj, long lut_nativeObj, long dst_nativeObj); + + // C++: double Mahalanobis(Mat v1, Mat v2, Mat icovar) + private static native double Mahalanobis_0(long v1_nativeObj, long v2_nativeObj, long icovar_nativeObj); + + // C++: void PCABackProject(Mat data, Mat mean, Mat eigenvectors, Mat& result) + private static native void PCABackProject_0(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj, long result_nativeObj); + + // C++: void PCACompute(Mat data, Mat& mean, Mat& eigenvectors, int maxComponents = 0) + private static native void PCACompute_0(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj, int maxComponents); + private static native void PCACompute_1(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj); + + // C++: void PCAComputeVar(Mat data, Mat& mean, Mat& eigenvectors, double retainedVariance) + private static native void PCAComputeVar_0(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj, double retainedVariance); + + // C++: void PCAProject(Mat data, Mat mean, Mat eigenvectors, Mat& result) + private static native void PCAProject_0(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj, long result_nativeObj); + + // C++: void SVBackSubst(Mat w, Mat u, Mat vt, Mat rhs, Mat& dst) + private static native void SVBackSubst_0(long w_nativeObj, long u_nativeObj, long vt_nativeObj, long rhs_nativeObj, long dst_nativeObj); + + // C++: void SVDecomp(Mat src, Mat& w, Mat& u, Mat& vt, int flags = 0) + private static native void SVDecomp_0(long src_nativeObj, long w_nativeObj, long u_nativeObj, long vt_nativeObj, int flags); + private static native void SVDecomp_1(long src_nativeObj, long w_nativeObj, long u_nativeObj, long vt_nativeObj); + + // C++: void absdiff(Mat src1, Mat src2, Mat& dst) + private static native void absdiff_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void absdiff(Mat src1, Scalar src2, Mat& dst) + private static native void absdiff_1(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: void add(Mat src1, Mat src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + private static native void add_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj, int dtype); + private static native void add_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void add_2(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void add(Mat src1, Scalar src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + private static native void add_3(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, long mask_nativeObj, int dtype); + private static native void add_4(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, long mask_nativeObj); + private static native void add_5(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat& dst, int dtype = -1) + private static native void addWeighted_0(long src1_nativeObj, double alpha, long src2_nativeObj, double beta, double gamma, long dst_nativeObj, int dtype); + private static native void addWeighted_1(long src1_nativeObj, double alpha, long src2_nativeObj, double beta, double gamma, long dst_nativeObj); + + // C++: void batchDistance(Mat src1, Mat src2, Mat& dist, int dtype, Mat& nidx, int normType = NORM_L2, int K = 0, Mat mask = Mat(), int update = 0, bool crosscheck = false) + private static native void batchDistance_0(long src1_nativeObj, long src2_nativeObj, long dist_nativeObj, int dtype, long nidx_nativeObj, int normType, int K, long mask_nativeObj, int update, boolean crosscheck); + private static native void batchDistance_1(long src1_nativeObj, long src2_nativeObj, long dist_nativeObj, int dtype, long nidx_nativeObj, int normType, int K); + private static native void batchDistance_2(long src1_nativeObj, long src2_nativeObj, long dist_nativeObj, int dtype, long nidx_nativeObj); + + // C++: void bitwise_and(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + private static native void bitwise_and_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void bitwise_and_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void bitwise_not(Mat src, Mat& dst, Mat mask = Mat()) + private static native void bitwise_not_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void bitwise_not_1(long src_nativeObj, long dst_nativeObj); + + // C++: void bitwise_or(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + private static native void bitwise_or_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void bitwise_or_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void bitwise_xor(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + private static native void bitwise_xor_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void bitwise_xor_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void calcCovarMatrix(Mat samples, Mat& covar, Mat& mean, int flags, int ctype = CV_64F) + private static native void calcCovarMatrix_0(long samples_nativeObj, long covar_nativeObj, long mean_nativeObj, int flags, int ctype); + private static native void calcCovarMatrix_1(long samples_nativeObj, long covar_nativeObj, long mean_nativeObj, int flags); + + // C++: void cartToPolar(Mat x, Mat y, Mat& magnitude, Mat& angle, bool angleInDegrees = false) + private static native void cartToPolar_0(long x_nativeObj, long y_nativeObj, long magnitude_nativeObj, long angle_nativeObj, boolean angleInDegrees); + private static native void cartToPolar_1(long x_nativeObj, long y_nativeObj, long magnitude_nativeObj, long angle_nativeObj); + + // C++: bool checkRange(Mat a, bool quiet = true, _hidden_ * pos = 0, double minVal = -DBL_MAX, double maxVal = DBL_MAX) + private static native boolean checkRange_0(long a_nativeObj, boolean quiet, double minVal, double maxVal); + private static native boolean checkRange_1(long a_nativeObj); + + // C++: void circle(Mat& img, Point center, int radius, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + private static native void circle_0(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); + private static native void circle_1(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void circle_2(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: bool clipLine(Rect imgRect, Point& pt1, Point& pt2) + private static native boolean clipLine_0(int imgRect_x, int imgRect_y, int imgRect_width, int imgRect_height, double pt1_x, double pt1_y, double[] pt1_out, double pt2_x, double pt2_y, double[] pt2_out); + + // C++: void compare(Mat src1, Mat src2, Mat& dst, int cmpop) + private static native void compare_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, int cmpop); + + // C++: void compare(Mat src1, Scalar src2, Mat& dst, int cmpop) + private static native void compare_1(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, int cmpop); + + // C++: void completeSymm(Mat& mtx, bool lowerToUpper = false) + private static native void completeSymm_0(long mtx_nativeObj, boolean lowerToUpper); + private static native void completeSymm_1(long mtx_nativeObj); + + // C++: void convertScaleAbs(Mat src, Mat& dst, double alpha = 1, double beta = 0) + private static native void convertScaleAbs_0(long src_nativeObj, long dst_nativeObj, double alpha, double beta); + private static native void convertScaleAbs_1(long src_nativeObj, long dst_nativeObj); + + // C++: int countNonZero(Mat src) + private static native int countNonZero_0(long src_nativeObj); + + // C++: float cubeRoot(float val) + private static native float cubeRoot_0(float val); + + // C++: void dct(Mat src, Mat& dst, int flags = 0) + private static native void dct_0(long src_nativeObj, long dst_nativeObj, int flags); + private static native void dct_1(long src_nativeObj, long dst_nativeObj); + + // C++: double determinant(Mat mtx) + private static native double determinant_0(long mtx_nativeObj); + + // C++: void dft(Mat src, Mat& dst, int flags = 0, int nonzeroRows = 0) + private static native void dft_0(long src_nativeObj, long dst_nativeObj, int flags, int nonzeroRows); + private static native void dft_1(long src_nativeObj, long dst_nativeObj); + + // C++: void divide(Mat src1, Mat src2, Mat& dst, double scale = 1, int dtype = -1) + private static native void divide_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, double scale, int dtype); + private static native void divide_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, double scale); + private static native void divide_2(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void divide(double scale, Mat src2, Mat& dst, int dtype = -1) + private static native void divide_3(double scale, long src2_nativeObj, long dst_nativeObj, int dtype); + private static native void divide_4(double scale, long src2_nativeObj, long dst_nativeObj); + + // C++: void divide(Mat src1, Scalar src2, Mat& dst, double scale = 1, int dtype = -1) + private static native void divide_5(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, double scale, int dtype); + private static native void divide_6(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, double scale); + private static native void divide_7(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: bool eigen(Mat src, bool computeEigenvectors, Mat& eigenvalues, Mat& eigenvectors) + private static native boolean eigen_0(long src_nativeObj, boolean computeEigenvectors, long eigenvalues_nativeObj, long eigenvectors_nativeObj); + + // C++: void ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + private static native void ellipse_0(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); + private static native void ellipse_1(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void ellipse_2(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void ellipse(Mat& img, RotatedRect box, Scalar color, int thickness = 1, int lineType = 8) + private static native void ellipse_3(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType); + private static native void ellipse_4(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void ellipse_5(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector_Point& pts) + private static native void ellipse2Poly_0(double center_x, double center_y, double axes_width, double axes_height, int angle, int arcStart, int arcEnd, int delta, long pts_mat_nativeObj); + + // C++: void exp(Mat src, Mat& dst) + private static native void exp_0(long src_nativeObj, long dst_nativeObj); + + // C++: void extractChannel(Mat src, Mat& dst, int coi) + private static native void extractChannel_0(long src_nativeObj, long dst_nativeObj, int coi); + + // C++: float fastAtan2(float y, float x) + private static native float fastAtan2_0(float y, float x); + + // C++: void fillConvexPoly(Mat& img, vector_Point points, Scalar color, int lineType = 8, int shift = 0) + private static native void fillConvexPoly_0(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift); + private static native void fillConvexPoly_1(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void fillPoly(Mat& img, vector_vector_Point pts, Scalar color, int lineType = 8, int shift = 0, Point offset = Point()) + private static native void fillPoly_0(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift, double offset_x, double offset_y); + private static native void fillPoly_1(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void findNonZero(Mat src, Mat& idx) + private static native void findNonZero_0(long src_nativeObj, long idx_nativeObj); + + // C++: void flip(Mat src, Mat& dst, int flipCode) + private static native void flip_0(long src_nativeObj, long dst_nativeObj, int flipCode); + + // C++: void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat& dst, int flags = 0) + private static native void gemm_0(long src1_nativeObj, long src2_nativeObj, double alpha, long src3_nativeObj, double gamma, long dst_nativeObj, int flags); + private static native void gemm_1(long src1_nativeObj, long src2_nativeObj, double alpha, long src3_nativeObj, double gamma, long dst_nativeObj); + + // C++: string getBuildInformation() + private static native String getBuildInformation_0(); + + // C++: int64 getCPUTickCount() + private static native long getCPUTickCount_0(); + + // C++: int getNumberOfCPUs() + private static native int getNumberOfCPUs_0(); + + // C++: int getOptimalDFTSize(int vecsize) + private static native int getOptimalDFTSize_0(int vecsize); + + // C++: int64 getTickCount() + private static native long getTickCount_0(); + + // C++: double getTickFrequency() + private static native double getTickFrequency_0(); + + // C++: void hconcat(vector_Mat src, Mat& dst) + private static native void hconcat_0(long src_mat_nativeObj, long dst_nativeObj); + + // C++: void idct(Mat src, Mat& dst, int flags = 0) + private static native void idct_0(long src_nativeObj, long dst_nativeObj, int flags); + private static native void idct_1(long src_nativeObj, long dst_nativeObj); + + // C++: void idft(Mat src, Mat& dst, int flags = 0, int nonzeroRows = 0) + private static native void idft_0(long src_nativeObj, long dst_nativeObj, int flags, int nonzeroRows); + private static native void idft_1(long src_nativeObj, long dst_nativeObj); + + // C++: void inRange(Mat src, Scalar lowerb, Scalar upperb, Mat& dst) + private static native void inRange_0(long src_nativeObj, double lowerb_val0, double lowerb_val1, double lowerb_val2, double lowerb_val3, double upperb_val0, double upperb_val1, double upperb_val2, double upperb_val3, long dst_nativeObj); + + // C++: void insertChannel(Mat src, Mat& dst, int coi) + private static native void insertChannel_0(long src_nativeObj, long dst_nativeObj, int coi); + + // C++: double invert(Mat src, Mat& dst, int flags = DECOMP_LU) + private static native double invert_0(long src_nativeObj, long dst_nativeObj, int flags); + private static native double invert_1(long src_nativeObj, long dst_nativeObj); + + // C++: double kmeans(Mat data, int K, Mat& bestLabels, TermCriteria criteria, int attempts, int flags, Mat& centers = Mat()) + private static native double kmeans_0(long data_nativeObj, int K, long bestLabels_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon, int attempts, int flags, long centers_nativeObj); + private static native double kmeans_1(long data_nativeObj, int K, long bestLabels_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon, int attempts, int flags); + + // C++: void line(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + private static native void line_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); + private static native void line_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void line_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void log(Mat src, Mat& dst) + private static native void log_0(long src_nativeObj, long dst_nativeObj); + + // C++: void magnitude(Mat x, Mat y, Mat& magnitude) + private static native void magnitude_0(long x_nativeObj, long y_nativeObj, long magnitude_nativeObj); + + // C++: void max(Mat src1, Mat src2, Mat& dst) + private static native void max_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void max(Mat src1, Scalar src2, Mat& dst) + private static native void max_1(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: Scalar mean(Mat src, Mat mask = Mat()) + private static native double[] mean_0(long src_nativeObj, long mask_nativeObj); + private static native double[] mean_1(long src_nativeObj); + + // C++: void meanStdDev(Mat src, vector_double& mean, vector_double& stddev, Mat mask = Mat()) + private static native void meanStdDev_0(long src_nativeObj, long mean_mat_nativeObj, long stddev_mat_nativeObj, long mask_nativeObj); + private static native void meanStdDev_1(long src_nativeObj, long mean_mat_nativeObj, long stddev_mat_nativeObj); + + // C++: void merge(vector_Mat mv, Mat& dst) + private static native void merge_0(long mv_mat_nativeObj, long dst_nativeObj); + + // C++: void min(Mat src1, Mat src2, Mat& dst) + private static native void min_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void min(Mat src1, Scalar src2, Mat& dst) + private static native void min_1(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: void mixChannels(vector_Mat src, vector_Mat dst, vector_int fromTo) + private static native void mixChannels_0(long src_mat_nativeObj, long dst_mat_nativeObj, long fromTo_mat_nativeObj); + + // C++: void mulSpectrums(Mat a, Mat b, Mat& c, int flags, bool conjB = false) + private static native void mulSpectrums_0(long a_nativeObj, long b_nativeObj, long c_nativeObj, int flags, boolean conjB); + private static native void mulSpectrums_1(long a_nativeObj, long b_nativeObj, long c_nativeObj, int flags); + + // C++: void mulTransposed(Mat src, Mat& dst, bool aTa, Mat delta = Mat(), double scale = 1, int dtype = -1) + private static native void mulTransposed_0(long src_nativeObj, long dst_nativeObj, boolean aTa, long delta_nativeObj, double scale, int dtype); + private static native void mulTransposed_1(long src_nativeObj, long dst_nativeObj, boolean aTa, long delta_nativeObj, double scale); + private static native void mulTransposed_2(long src_nativeObj, long dst_nativeObj, boolean aTa); + + // C++: void multiply(Mat src1, Mat src2, Mat& dst, double scale = 1, int dtype = -1) + private static native void multiply_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, double scale, int dtype); + private static native void multiply_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, double scale); + private static native void multiply_2(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void multiply(Mat src1, Scalar src2, Mat& dst, double scale = 1, int dtype = -1) + private static native void multiply_3(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, double scale, int dtype); + private static native void multiply_4(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, double scale); + private static native void multiply_5(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: double norm(Mat src1, int normType = NORM_L2, Mat mask = Mat()) + private static native double norm_0(long src1_nativeObj, int normType, long mask_nativeObj); + private static native double norm_1(long src1_nativeObj, int normType); + private static native double norm_2(long src1_nativeObj); + + // C++: double norm(Mat src1, Mat src2, int normType = NORM_L2, Mat mask = Mat()) + private static native double norm_3(long src1_nativeObj, long src2_nativeObj, int normType, long mask_nativeObj); + private static native double norm_4(long src1_nativeObj, long src2_nativeObj, int normType); + private static native double norm_5(long src1_nativeObj, long src2_nativeObj); + + // C++: void normalize(Mat src, Mat& dst, double alpha = 1, double beta = 0, int norm_type = NORM_L2, int dtype = -1, Mat mask = Mat()) + private static native void normalize_0(long src_nativeObj, long dst_nativeObj, double alpha, double beta, int norm_type, int dtype, long mask_nativeObj); + private static native void normalize_1(long src_nativeObj, long dst_nativeObj, double alpha, double beta, int norm_type, int dtype); + private static native void normalize_2(long src_nativeObj, long dst_nativeObj, double alpha, double beta, int norm_type); + private static native void normalize_3(long src_nativeObj, long dst_nativeObj); + + // C++: void patchNaNs(Mat& a, double val = 0) + private static native void patchNaNs_0(long a_nativeObj, double val); + private static native void patchNaNs_1(long a_nativeObj); + + // C++: void perspectiveTransform(Mat src, Mat& dst, Mat m) + private static native void perspectiveTransform_0(long src_nativeObj, long dst_nativeObj, long m_nativeObj); + + // C++: void phase(Mat x, Mat y, Mat& angle, bool angleInDegrees = false) + private static native void phase_0(long x_nativeObj, long y_nativeObj, long angle_nativeObj, boolean angleInDegrees); + private static native void phase_1(long x_nativeObj, long y_nativeObj, long angle_nativeObj); + + // C++: void polarToCart(Mat magnitude, Mat angle, Mat& x, Mat& y, bool angleInDegrees = false) + private static native void polarToCart_0(long magnitude_nativeObj, long angle_nativeObj, long x_nativeObj, long y_nativeObj, boolean angleInDegrees); + private static native void polarToCart_1(long magnitude_nativeObj, long angle_nativeObj, long x_nativeObj, long y_nativeObj); + + // C++: void polylines(Mat& img, vector_vector_Point pts, bool isClosed, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + private static native void polylines_0(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); + private static native void polylines_1(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void polylines_2(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void pow(Mat src, double power, Mat& dst) + private static native void pow_0(long src_nativeObj, double power, long dst_nativeObj); + + // C++: void putText(Mat img, string text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1, int lineType = 8, bool bottomLeftOrigin = false) + private static native void putText_0(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, boolean bottomLeftOrigin); + private static native void putText_1(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void putText_2(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void randShuffle_(Mat& dst, double iterFactor = 1.) + private static native void randShuffle_0(long dst_nativeObj, double iterFactor); + private static native void randShuffle_1(long dst_nativeObj); + + // C++: void randn(Mat& dst, double mean, double stddev) + private static native void randn_0(long dst_nativeObj, double mean, double stddev); + + // C++: void randu(Mat& dst, double low, double high) + private static native void randu_0(long dst_nativeObj, double low, double high); + + // C++: void rectangle(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + private static native void rectangle_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); + private static native void rectangle_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void rectangle_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void reduce(Mat src, Mat& dst, int dim, int rtype, int dtype = -1) + private static native void reduce_0(long src_nativeObj, long dst_nativeObj, int dim, int rtype, int dtype); + private static native void reduce_1(long src_nativeObj, long dst_nativeObj, int dim, int rtype); + + // C++: void repeat(Mat src, int ny, int nx, Mat& dst) + private static native void repeat_0(long src_nativeObj, int ny, int nx, long dst_nativeObj); + + // C++: void scaleAdd(Mat src1, double alpha, Mat src2, Mat& dst) + private static native void scaleAdd_0(long src1_nativeObj, double alpha, long src2_nativeObj, long dst_nativeObj); + + // C++: void setErrorVerbosity(bool verbose) + private static native void setErrorVerbosity_0(boolean verbose); + + // C++: void setIdentity(Mat& mtx, Scalar s = Scalar(1)) + private static native void setIdentity_0(long mtx_nativeObj, double s_val0, double s_val1, double s_val2, double s_val3); + private static native void setIdentity_1(long mtx_nativeObj); + + // C++: bool solve(Mat src1, Mat src2, Mat& dst, int flags = DECOMP_LU) + private static native boolean solve_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, int flags); + private static native boolean solve_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: int solveCubic(Mat coeffs, Mat& roots) + private static native int solveCubic_0(long coeffs_nativeObj, long roots_nativeObj); + + // C++: double solvePoly(Mat coeffs, Mat& roots, int maxIters = 300) + private static native double solvePoly_0(long coeffs_nativeObj, long roots_nativeObj, int maxIters); + private static native double solvePoly_1(long coeffs_nativeObj, long roots_nativeObj); + + // C++: void sort(Mat src, Mat& dst, int flags) + private static native void sort_0(long src_nativeObj, long dst_nativeObj, int flags); + + // C++: void sortIdx(Mat src, Mat& dst, int flags) + private static native void sortIdx_0(long src_nativeObj, long dst_nativeObj, int flags); + + // C++: void split(Mat m, vector_Mat& mv) + private static native void split_0(long m_nativeObj, long mv_mat_nativeObj); + + // C++: void sqrt(Mat src, Mat& dst) + private static native void sqrt_0(long src_nativeObj, long dst_nativeObj); + + // C++: void subtract(Mat src1, Mat src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + private static native void subtract_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj, int dtype); + private static native void subtract_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void subtract_2(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void subtract(Mat src1, Scalar src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + private static native void subtract_3(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, long mask_nativeObj, int dtype); + private static native void subtract_4(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, long mask_nativeObj); + private static native void subtract_5(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: Scalar sum(Mat src) + private static native double[] sumElems_0(long src_nativeObj); + + // C++: Scalar trace(Mat mtx) + private static native double[] trace_0(long mtx_nativeObj); + + // C++: void transform(Mat src, Mat& dst, Mat m) + private static native void transform_0(long src_nativeObj, long dst_nativeObj, long m_nativeObj); + + // C++: void transpose(Mat src, Mat& dst) + private static native void transpose_0(long src_nativeObj, long dst_nativeObj); + + // C++: void vconcat(vector_Mat src, Mat& dst) + private static native void vconcat_0(long src_mat_nativeObj, long dst_nativeObj); + private static native double[] n_minMaxLocManual(long src_nativeObj, long mask_nativeObj); + private static native double[] n_getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine); + +} diff --git a/src/org/opencv/core/CvException.java b/src/org/opencv/core/CvException.java new file mode 100644 index 0000000..e9241e6 --- /dev/null +++ b/src/org/opencv/core/CvException.java @@ -0,0 +1,15 @@ +package org.opencv.core; + +public class CvException extends RuntimeException { + + private static final long serialVersionUID = 1L; + + public CvException(String msg) { + super(msg); + } + + @Override + public String toString() { + return "CvException [" + super.toString() + "]"; + } +} diff --git a/src/org/opencv/core/CvType.java b/src/org/opencv/core/CvType.java new file mode 100644 index 0000000..748c1cd --- /dev/null +++ b/src/org/opencv/core/CvType.java @@ -0,0 +1,136 @@ +package org.opencv.core; + +public final class CvType { + + // type depth constants + public static final int + CV_8U = 0, CV_8S = 1, + CV_16U = 2, CV_16S = 3, + CV_32S = 4, + CV_32F = 5, + CV_64F = 6, + CV_USRTYPE1 = 7; + + // predefined type constants + public static final int + CV_8UC1 = CV_8UC(1), CV_8UC2 = CV_8UC(2), CV_8UC3 = CV_8UC(3), CV_8UC4 = CV_8UC(4), + CV_8SC1 = CV_8SC(1), CV_8SC2 = CV_8SC(2), CV_8SC3 = CV_8SC(3), CV_8SC4 = CV_8SC(4), + CV_16UC1 = CV_16UC(1), CV_16UC2 = CV_16UC(2), CV_16UC3 = CV_16UC(3), CV_16UC4 = CV_16UC(4), + CV_16SC1 = CV_16SC(1), CV_16SC2 = CV_16SC(2), CV_16SC3 = CV_16SC(3), CV_16SC4 = CV_16SC(4), + CV_32SC1 = CV_32SC(1), CV_32SC2 = CV_32SC(2), CV_32SC3 = CV_32SC(3), CV_32SC4 = CV_32SC(4), + CV_32FC1 = CV_32FC(1), CV_32FC2 = CV_32FC(2), CV_32FC3 = CV_32FC(3), CV_32FC4 = CV_32FC(4), + CV_64FC1 = CV_64FC(1), CV_64FC2 = CV_64FC(2), CV_64FC3 = CV_64FC(3), CV_64FC4 = CV_64FC(4); + + private static final int CV_CN_MAX = 512, CV_CN_SHIFT = 3, CV_DEPTH_MAX = (1 << CV_CN_SHIFT); + + public static final int makeType(int depth, int channels) { + if (channels <= 0 || channels >= CV_CN_MAX) { + throw new java.lang.UnsupportedOperationException( + "Channels count should be 1.." + (CV_CN_MAX - 1)); + } + if (depth < 0 || depth >= CV_DEPTH_MAX) { + throw new java.lang.UnsupportedOperationException( + "Data type depth should be 0.." + (CV_DEPTH_MAX - 1)); + } + return (depth & (CV_DEPTH_MAX - 1)) + ((channels - 1) << CV_CN_SHIFT); + } + + public static final int CV_8UC(int ch) { + return makeType(CV_8U, ch); + } + + public static final int CV_8SC(int ch) { + return makeType(CV_8S, ch); + } + + public static final int CV_16UC(int ch) { + return makeType(CV_16U, ch); + } + + public static final int CV_16SC(int ch) { + return makeType(CV_16S, ch); + } + + public static final int CV_32SC(int ch) { + return makeType(CV_32S, ch); + } + + public static final int CV_32FC(int ch) { + return makeType(CV_32F, ch); + } + + public static final int CV_64FC(int ch) { + return makeType(CV_64F, ch); + } + + public static final int channels(int type) { + return (type >> CV_CN_SHIFT) + 1; + } + + public static final int depth(int type) { + return type & (CV_DEPTH_MAX - 1); + } + + public static final boolean isInteger(int type) { + return depth(type) < CV_32F; + } + + public static final int ELEM_SIZE(int type) { + switch (depth(type)) { + case CV_8U: + case CV_8S: + return channels(type); + case CV_16U: + case CV_16S: + return 2 * channels(type); + case CV_32S: + case CV_32F: + return 4 * channels(type); + case CV_64F: + return 8 * channels(type); + default: + throw new java.lang.UnsupportedOperationException( + "Unsupported CvType value: " + type); + } + } + + public static final String typeToString(int type) { + String s; + switch (depth(type)) { + case CV_8U: + s = "CV_8U"; + break; + case CV_8S: + s = "CV_8S"; + break; + case CV_16U: + s = "CV_16U"; + break; + case CV_16S: + s = "CV_16S"; + break; + case CV_32S: + s = "CV_32S"; + break; + case CV_32F: + s = "CV_32F"; + break; + case CV_64F: + s = "CV_64F"; + break; + case CV_USRTYPE1: + s = "CV_USRTYPE1"; + break; + default: + throw new java.lang.UnsupportedOperationException( + "Unsupported CvType value: " + type); + } + + int ch = channels(type); + if (ch <= 4) + return s + "C" + ch; + else + return s + "C(" + ch + ")"; + } + +} diff --git a/src/org/opencv/core/Mat.java b/src/org/opencv/core/Mat.java new file mode 100644 index 0000000..f381e61 --- /dev/null +++ b/src/org/opencv/core/Mat.java @@ -0,0 +1,2843 @@ +package org.opencv.core; + +// C++: class Mat +/** + *

OpenCV C++ n-dimensional dense array class

+ * + *

class CV_EXPORTS Mat

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

//... a lot of methods......

+ * + *

/ *! includes several bit-fields:

+ * + *

- the magic signature

+ * + *

- continuity flag

+ * + *

- depth

+ * + *

- number of channels

+ *
    + *
  • / + *
+ * + *

int flags;

+ * + *

//! the array dimensionality, >= 2

+ * + *

int dims;

+ * + *

//! the number of rows and columns or (-1, -1) when the array has more than 2 + * dimensions

+ * + *

int rows, cols;

+ * + *

//! pointer to the data

+ * + *

uchar* data;

+ * + *

//! pointer to the reference counter;

+ * + *

// when array points to user-allocated data, the pointer is NULL

+ * + *

int* refcount;

+ * + *

// other members...

+ * + *

};

+ * + *

The class Mat represents an n-dimensional dense numerical + * single-channel or multi-channel array. It can be used to store real or + * complex-valued vectors and matrices, grayscale or color images, voxel + * volumes, vector fields, point clouds, tensors, histograms (though, very + * high-dimensional histograms may be better stored in a SparseMat). + * The data layout of the array

+ * + *

M is defined by the array M.step[], so that the address + * of element (i_0,...,i_(M.dims-1)), where 0 <= i_k<M.size[k], + * is computed as:

+ * + *

addr(M_(i_0,...,i_(M.dims-1))) = M.data + M.step[0]*i_0 + M.step[1]*i_1 + * +... + M.step[M.dims-1]*i_(M.dims-1)

+ * + *

In case of a 2-dimensional array, the above formula is reduced to:

+ * + *

addr(M_(i,j)) = M.data + M.step[0]*i + M.step[1]*j

+ * + *

Note that M.step[i] >= M.step[i+1] (in fact, M.step[i] >= + * M.step[i+1]*M.size[i+1]). This means that 2-dimensional matrices are + * stored row-by-row, 3-dimensional matrices are stored plane-by-plane, and so + * on. M.step[M.dims-1] is minimal and always equal to the element + * size M.elemSize().

+ * + *

So, the data layout in Mat is fully compatible with + * CvMat, IplImage, and CvMatND types + * from OpenCV 1.x. It is also compatible with the majority of dense array types + * from the standard toolkits and SDKs, such as Numpy (ndarray), Win32 + * (independent device bitmaps), and others, that is, with any array that uses + * *steps* (or *strides*) to compute the position of a pixel. Due to this + * compatibility, it is possible to make a Mat header for + * user-allocated data and process it in-place using OpenCV functions.

+ * + *

There are many different ways to create a Mat object. The most + * popular options are listed below:

+ *
    + *
  • Use the create(nrows, ncols, type) method or the similar + * Mat(nrows, ncols, type[, fillValue]) constructor. A new array of + * the specified size and type is allocated. type has the same + * meaning as in the cvCreateMat method. + *
+ *

For example, CV_8UC1 means a 8-bit single-channel array, + * CV_32FC2 means a 2-channel (complex) floating-point array, and + * so on.

+ * + *

+ * + *

// C++ code:

+ * + *

// make a 7x7 complex matrix filled with 1+3j.

+ * + *

Mat M(7,7,CV_32FC2,Scalar(1,3));

+ * + *

// and now turn M to a 100x60 15-channel 8-bit matrix.

+ * + *

// The old content will be deallocated

+ * + *

M.create(100,60,CV_8UC(15));

+ * + *

+ * + *

As noted in the introduction to this chapter, create() allocates + * only a new array when the shape or type of the current array are different + * from the specified ones.

+ *
    + *
  • Create a multi-dimensional array: + *
+ * + *

+ * + *

// C++ code:

+ * + *

// create a 100x100x100 8-bit array

+ * + *

int sz[] = {100, 100, 100};

+ * + *

Mat bigCube(3, sz, CV_8U, Scalar.all(0));

+ * + *

+ * + *

It passes the number of dimensions =1 to the Mat constructor but + * the created array will be 2-dimensional with the number of columns set to 1. + * So, Mat.dims is always >= 2 (can also be 0 when the array is + * empty).

+ *
    + *
  • Use a copy constructor or assignment operator where there can be an + * array or expression on the right side (see below). As noted in the + * introduction, the array assignment is an O(1) operation because it only + * copies the header and increases the reference counter. The Mat.clone() + * method can be used to get a full (deep) copy of the array when you need it. + *
  • Construct a header for a part of another array. It can be a single + * row, single column, several rows, several columns, rectangular region in the + * array (called a *minor* in algebra) or a diagonal. Such operations are also + * O(1) because the new header references the same data. You can actually modify + * a part of the array using this feature, for example: + *
+ * + *

+ * + *

// C++ code:

+ * + *

// add the 5-th row, multiplied by 3 to the 3rd row

+ * + *

M.row(3) = M.row(3) + M.row(5)*3;

+ * + *

// now copy the 7-th column to the 1-st column

+ * + *

// M.col(1) = M.col(7); // this will not work

+ * + *

Mat M1 = M.col(1);

+ * + *

M.col(7).copyTo(M1);

+ * + *

// create a new 320x240 image

+ * + *

Mat img(Size(320,240),CV_8UC3);

+ * + *

// select a ROI

+ * + *

Mat roi(img, Rect(10,10,100,100));

+ * + *

// fill the ROI with (0,255,0) (which is green in RGB space);

+ * + *

// the original 320x240 image will be modified

+ * + *

roi = Scalar(0,255,0);

+ * + *

+ * + *

Due to the additional datastart and dataend + * members, it is possible to compute a relative sub-array position in the main + * *container* array using locateROI():

+ * + *

+ * + *

// C++ code:

+ * + *

Mat A = Mat.eye(10, 10, CV_32S);

+ * + *

// extracts A columns, 1 (inclusive) to 3 (exclusive).

+ * + *

Mat B = A(Range.all(), Range(1, 3));

+ * + *

// extracts B rows, 5 (inclusive) to 9 (exclusive).

+ * + *

// that is, C ~ A(Range(5, 9), Range(1, 3))

+ * + *

Mat C = B(Range(5, 9), Range.all());

+ * + *

Size size; Point ofs;

+ * + *

C.locateROI(size, ofs);

+ * + *

// size will be (width=10,height=10) and the ofs will be (x=1, y=5)

+ * + *

+ * + *

As in case of whole matrices, if you need a deep copy, use the + * clone() method of the extracted sub-matrices.

+ *
    + *
  • Make a header for user-allocated data. It can be useful to do the + * following: + *
  • Process "foreign" data using OpenCV (for example, when you implement a + * DirectShow* filter or a processing module for gstreamer, and so + * on). For example: + *
+ * + *

+ * + *

// C++ code:

+ * + *

void process_video_frame(const unsigned char* pixels,

+ * + *

int width, int height, int step)

+ * + * + *

Mat img(height, width, CV_8UC3, pixels, step);

+ * + *

GaussianBlur(img, img, Size(7,7), 1.5, 1.5);

+ * + * + *

+ *
    + *
  • Quickly initialize small matrices and/or get a super-fast element + * access. + *
+ * + *

+ * + *

// C++ code:

+ * + *

double m[3][3] = {{a, b, c}, {d, e, f}, {g, h, i}};

+ * + *

Mat M = Mat(3, 3, CV_64F, m).inv();

+ * + *

+ * + *

Partial yet very common cases of this *user-allocated data* case are + * conversions from CvMat and IplImage to + * Mat. For this purpose, there are special constructors taking + * pointers to CvMat or IplImage and the optional flag + * indicating whether to copy the data or not.

+ * + *

Backward conversion from Mat to CvMat or + * IplImage is provided via cast operators Mat.operator + * CvMat() const and Mat.operator IplImage(). The operators + * do NOT copy the data.

+ * + *

+ * + *

// C++ code:

+ * + *

IplImage* img = cvLoadImage("greatwave.jpg", 1);

+ * + *

Mat mtx(img); // convert IplImage* -> Mat

+ * + *

CvMat oldmat = mtx; // convert Mat -> CvMat

+ * + *

CV_Assert(oldmat.cols == img->width && oldmat.rows == img->height &&

+ * + *

oldmat.data.ptr == (uchar*)img->imageData && oldmat.step == img->widthStep);

+ * + *

+ *
    + *
  • Use MATLAB-style array initializers, zeros(), ones(), + * eye(), for example: + *
+ * + *

+ * + *

// C++ code:

+ * + *

// create a double-precision identity martix and add it to M.

+ * + *

M += Mat.eye(M.rows, M.cols, CV_64F);

+ * + *

+ *
    + *
  • Use a comma-separated initializer: + *
+ * + *

+ * + *

// C++ code:

+ * + *

// create a 3x3 double-precision identity matrix

+ * + *

Mat M = (Mat_(3,3) << 1, 0, 0, 0, 1, 0, 0, 0, 1);

+ * + *

+ * + *

With this approach, you first call a constructor of the "Mat_" class with the + * proper parameters, and then you just put << operator followed by + * comma-separated values that can be constants, variables, expressions, and so + * on. Also, note the extra parentheses required to avoid compilation errors.

+ * + *

Once the array is created, it is automatically managed via a + * reference-counting mechanism. If the array header is built on top of + * user-allocated data, you should handle the data by yourself. + * The array data is deallocated when no one points to it. If you want to + * release the data pointed by a array header before the array destructor is + * called, use Mat.release().

+ * + *

The next important thing to learn about the array class is element access. + * This manual already described how to compute an address of each array + * element. Normally, you are not required to use the formula directly in the + * code. If you know the array element type (which can be retrieved using the + * method Mat.type()), you can access the elementM_(ij) + * of a 2-dimensional array as:

+ * + *

// C++ code:

+ * + *

M.at(i,j) += 1.f;

+ * + *

assuming that M is a double-precision floating-point array. There are several + * variants of the method at for a different number of dimensions. + *

+ * + *

If you need to process a whole row of a 2D array, the most efficient way is + * to get the pointer to the row first, and then just use the plain C operator + * [] :

+ * + *

// C++ code:

+ * + *

// compute sum of positive matrix elements

+ * + *

// (assuming that M isa double-precision matrix)

+ * + *

double sum=0;

+ * + *

for(int i = 0; i < M.rows; i++)

+ * + * + *

const double* Mi = M.ptr(i);

+ * + *

for(int j = 0; j < M.cols; j++)

+ * + *

sum += std.max(Mi[j], 0.);

+ * + * + *

Some operations, like the one above, do not actually depend on the array + * shape. They just process elements of an array one by one (or elements from + * multiple arrays that have the same coordinates, for example, array addition). + * Such operations are called *element-wise*. It makes sense to check whether + * all the input/output arrays are continuous, namely, have no gaps at the end + * of each row. If yes, process them as a long single row:

+ * + *

// compute the sum of positive matrix elements, optimized variant

+ * + *

double sum=0;

+ * + *

int cols = M.cols, rows = M.rows;

+ * + *

if(M.isContinuous())

+ * + * + *

cols *= rows;

+ * + *

rows = 1;

+ * + * + *

for(int i = 0; i < rows; i++)

+ * + * + *

const double* Mi = M.ptr(i);

+ * + *

for(int j = 0; j < cols; j++)

+ * + *

sum += std.max(Mi[j], 0.);

+ * + * + *

In case of the continuous matrix, the outer loop body is executed just once. + * So, the overhead is smaller, which is especially noticeable in case of small + * matrices. + *

+ * + *

Finally, there are STL-style iterators that are smart enough to skip gaps + * between successive rows:

+ * + *

// C++ code:

+ * + *

// compute sum of positive matrix elements, iterator-based variant

+ * + *

double sum=0;

+ * + *

MatConstIterator_ it = M.begin(), it_end = M.end();

+ * + *

for(; it != it_end; ++it)

+ * + *

sum += std.max(*it, 0.);

+ * + *

The matrix iterators are random-access iterators, so they can be passed to + * any STL algorithm, including std.sort(). + *

+ * + * @see org.opencv.core.Mat + */ +public class Mat { + + public final long nativeObj; + + public Mat(long addr) + { + if (addr == 0) + throw new java.lang.UnsupportedOperationException("Native object address is NULL"); + nativeObj = addr; + } + + // + // C++: Mat::Mat() + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @see org.opencv.core.Mat.Mat + */ + public Mat() + { + + nativeObj = n_Mat(); + + return; + } + + // + // C++: Mat::Mat(int rows, int cols, int type) + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param rows Number of rows in a 2D array. + * @param cols Number of columns in a 2D array. + * @param type Array type. Use CV_8UC1,..., CV_64FC4 to create 1-4 + * channel matrices, or CV_8UC(n),..., CV_64FC(n) to create + * multi-channel (up to CV_MAX_CN channels) matrices. + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(int rows, int cols, int type) + { + + nativeObj = n_Mat(rows, cols, type); + + return; + } + + // + // C++: Mat::Mat(Size size, int type) + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param size 2D array size: Size(cols, rows). In the + * Size() constructor, the number of rows and the number of columns + * go in the reverse order. + * @param type Array type. Use CV_8UC1,..., CV_64FC4 to create 1-4 + * channel matrices, or CV_8UC(n),..., CV_64FC(n) to create + * multi-channel (up to CV_MAX_CN channels) matrices. + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(Size size, int type) + { + + nativeObj = n_Mat(size.width, size.height, type); + + return; + } + + // + // C++: Mat::Mat(int rows, int cols, int type, Scalar s) + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param rows Number of rows in a 2D array. + * @param cols Number of columns in a 2D array. + * @param type Array type. Use CV_8UC1,..., CV_64FC4 to create 1-4 + * channel matrices, or CV_8UC(n),..., CV_64FC(n) to create + * multi-channel (up to CV_MAX_CN channels) matrices. + * @param s An optional value to initialize each matrix element with. To set all + * the matrix elements to the particular value after the construction, use the + * assignment operator Mat.operator=(const Scalar& value). + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(int rows, int cols, int type, Scalar s) + { + + nativeObj = n_Mat(rows, cols, type, s.val[0], s.val[1], s.val[2], s.val[3]); + + return; + } + + // + // C++: Mat::Mat(Size size, int type, Scalar s) + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param size 2D array size: Size(cols, rows). In the + * Size() constructor, the number of rows and the number of columns + * go in the reverse order. + * @param type Array type. Use CV_8UC1,..., CV_64FC4 to create 1-4 + * channel matrices, or CV_8UC(n),..., CV_64FC(n) to create + * multi-channel (up to CV_MAX_CN channels) matrices. + * @param s An optional value to initialize each matrix element with. To set all + * the matrix elements to the particular value after the construction, use the + * assignment operator Mat.operator=(const Scalar& value). + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(Size size, int type, Scalar s) + { + + nativeObj = n_Mat(size.width, size.height, type, s.val[0], s.val[1], s.val[2], s.val[3]); + + return; + } + + // + // C++: Mat::Mat(Mat m, Range rowRange, Range colRange = Range::all()) + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param m Array that (as a whole or partly) is assigned to the constructed + * matrix. No data is copied by these constructors. Instead, the header pointing + * to m data or its sub-array is constructed and associated with + * it. The reference counter, if any, is incremented. So, when you modify the + * matrix formed using such a constructor, you also modify the corresponding + * elements of m. If you want to have an independent copy of the + * sub-array, use Mat.clone(). + * @param rowRange Range of the m rows to take. As usual, the range + * start is inclusive and the range end is exclusive. Use Range.all() + * to take all the rows. + * @param colRange Range of the m columns to take. Use + * Range.all() to take all the columns. + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(Mat m, Range rowRange, Range colRange) + { + + nativeObj = n_Mat(m.nativeObj, rowRange.start, rowRange.end, colRange.start, colRange.end); + + return; + } + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param m Array that (as a whole or partly) is assigned to the constructed + * matrix. No data is copied by these constructors. Instead, the header pointing + * to m data or its sub-array is constructed and associated with + * it. The reference counter, if any, is incremented. So, when you modify the + * matrix formed using such a constructor, you also modify the corresponding + * elements of m. If you want to have an independent copy of the + * sub-array, use Mat.clone(). + * @param rowRange Range of the m rows to take. As usual, the range + * start is inclusive and the range end is exclusive. Use Range.all() + * to take all the rows. + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(Mat m, Range rowRange) + { + + nativeObj = n_Mat(m.nativeObj, rowRange.start, rowRange.end); + + return; + } + + // + // C++: Mat::Mat(Mat m, Rect roi) + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param m Array that (as a whole or partly) is assigned to the constructed + * matrix. No data is copied by these constructors. Instead, the header pointing + * to m data or its sub-array is constructed and associated with + * it. The reference counter, if any, is incremented. So, when you modify the + * matrix formed using such a constructor, you also modify the corresponding + * elements of m. If you want to have an independent copy of the + * sub-array, use Mat.clone(). + * @param roi Region of interest. + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(Mat m, Rect roi) + { + + nativeObj = n_Mat(m.nativeObj, roi.y, roi.y + roi.height, roi.x, roi.x + roi.width); + + return; + } + + // + // C++: Mat Mat::adjustROI(int dtop, int dbottom, int dleft, int dright) + // + +/** + *

Adjusts a submatrix size and position within the parent matrix.

+ * + *

The method is complimentary to"Mat.locateROI". The typical use of these + * functions is to determine the submatrix position within the parent matrix and + * then shift the position somehow. Typically, it can be required for filtering + * operations when pixels outside of the ROI should be taken into account. When + * all the method parameters are positive, the ROI needs to grow in all + * directions by the specified amount, for example:

+ * + *

// C++ code:

+ * + *

A.adjustROI(2, 2, 2, 2);

+ * + *

In this example, the matrix size is increased by 4 elements in each + * direction. The matrix is shifted by 2 elements to the left and 2 elements up, + * which brings in all the necessary pixels for the filtering with the 5x5 + * kernel. + *

+ * + *

adjustROI forces the adjusted ROI to be inside of the parent + * matrix that is boundaries of the adjusted ROI are constrained by boundaries + * of the parent matrix. For example, if the submatrix A is located + * in the first row of a parent matrix and you called A.adjustROI(2, 2, 2, + * 2) then A will not be increased in the upward direction.

+ * + *

The function is used internally by the OpenCV filtering functions, like + * "filter2D", morphological operations, and so on.

+ * + * @param dtop Shift of the top submatrix boundary upwards. + * @param dbottom Shift of the bottom submatrix boundary downwards. + * @param dleft Shift of the left submatrix boundary to the left. + * @param dright Shift of the right submatrix boundary to the right. + * + * @see org.opencv.core.Mat.adjustROI + * @see org.opencv.imgproc.Imgproc#copyMakeBorder + */ + public Mat adjustROI(int dtop, int dbottom, int dleft, int dright) + { + + Mat retVal = new Mat(n_adjustROI(nativeObj, dtop, dbottom, dleft, dright)); + + return retVal; + } + + // + // C++: void Mat::assignTo(Mat m, int type = -1) + // + +/** + *

Provides a functional form of convertTo.

+ * + *

This is an internally used method called by the "MatrixExpressions" engine.

+ * + * @param m Destination array. + * @param type Desired destination array depth (or -1 if it should be the same + * as the source type). + * + * @see org.opencv.core.Mat.assignTo + */ + public void assignTo(Mat m, int type) + { + + n_assignTo(nativeObj, m.nativeObj, type); + + return; + } + +/** + *

Provides a functional form of convertTo.

+ * + *

This is an internally used method called by the "MatrixExpressions" engine.

+ * + * @param m Destination array. + * + * @see org.opencv.core.Mat.assignTo + */ + public void assignTo(Mat m) + { + + n_assignTo(nativeObj, m.nativeObj); + + return; + } + + // + // C++: int Mat::channels() + // + +/** + *

Returns the number of matrix channels.

+ * + *

The method returns the number of matrix channels.

+ * + * @see org.opencv.core.Mat.channels + */ + public int channels() + { + + int retVal = n_channels(nativeObj); + + return retVal; + } + + // + // C++: int Mat::checkVector(int elemChannels, int depth = -1, bool + // requireContinuous = true) + // + + public int checkVector(int elemChannels, int depth, boolean requireContinuous) + { + + int retVal = n_checkVector(nativeObj, elemChannels, depth, requireContinuous); + + return retVal; + } + + public int checkVector(int elemChannels, int depth) + { + + int retVal = n_checkVector(nativeObj, elemChannels, depth); + + return retVal; + } + + public int checkVector(int elemChannels) + { + + int retVal = n_checkVector(nativeObj, elemChannels); + + return retVal; + } + + // + // C++: Mat Mat::clone() + // + +/** + *

Creates a full copy of the array and the underlying data.

+ * + *

The method creates a full copy of the array. The original step[] + * is not taken into account. So, the array copy is a continuous array occupying + * total()*elemSize() bytes.

+ * + * @see org.opencv.core.Mat.clone + */ + public Mat clone() + { + + Mat retVal = new Mat(n_clone(nativeObj)); + + return retVal; + } + + // + // C++: Mat Mat::col(int x) + // + +/** + *

Creates a matrix header for the specified matrix column.

+ * + *

The method makes a new header for the specified matrix column and returns it. + * This is an O(1) operation, regardless of the matrix size. The underlying data + * of the new matrix is shared with the original matrix. See also the "Mat.row" + * description.

+ * + * @param x A 0-based column index. + * + * @see org.opencv.core.Mat.col + */ + public Mat col(int x) + { + + Mat retVal = new Mat(n_col(nativeObj, x)); + + return retVal; + } + + // + // C++: Mat Mat::colRange(int startcol, int endcol) + // + +/** + *

Creates a matrix header for the specified row span.

+ * + *

The method makes a new header for the specified column span of the matrix. + * Similarly to "Mat.row" and "Mat.col", this is an O(1) operation.

+ * + * @param startcol An inclusive 0-based start index of the column span. + * @param endcol An exclusive 0-based ending index of the column span. + * + * @see org.opencv.core.Mat.colRange + */ + public Mat colRange(int startcol, int endcol) + { + + Mat retVal = new Mat(n_colRange(nativeObj, startcol, endcol)); + + return retVal; + } + + // + // C++: Mat Mat::colRange(Range r) + // + +/** + *

Creates a matrix header for the specified row span.

+ * + *

The method makes a new header for the specified column span of the matrix. + * Similarly to "Mat.row" and "Mat.col", this is an O(1) operation.

+ * + * @param r "Range" structure containing both the start and the end indices. + * + * @see org.opencv.core.Mat.colRange + */ + public Mat colRange(Range r) + { + + Mat retVal = new Mat(n_colRange(nativeObj, r.start, r.end)); + + return retVal; + } + + // + // C++: int Mat::cols() + // + + public int cols() + { + + int retVal = n_cols(nativeObj); + + return retVal; + } + + // + // C++: void Mat::convertTo(Mat& m, int rtype, double alpha = 1, double beta + // = 0) + // + +/** + *

Converts an array to another data type with optional scaling.

+ * + *

The method converts source pixel values to the target data type. + * saturate_cast<> is applied at the end to avoid possible + * overflows:

+ * + *

m(x,y) = saturate _ cast<rType>(alpha(*this)(x,y) + beta)

+ * + * @param m output matrix; if it does not have a proper size or type before the + * operation, it is reallocated. + * @param rtype desired output matrix type or, rather, the depth since the + * number of channels are the same as the input has; if rtype is + * negative, the output matrix will have the same type as the input. + * @param alpha optional scale factor. + * @param beta optional delta added to the scaled values. + * + * @see org.opencv.core.Mat.convertTo + */ + public void convertTo(Mat m, int rtype, double alpha, double beta) + { + + n_convertTo(nativeObj, m.nativeObj, rtype, alpha, beta); + + return; + } + +/** + *

Converts an array to another data type with optional scaling.

+ * + *

The method converts source pixel values to the target data type. + * saturate_cast<> is applied at the end to avoid possible + * overflows:

+ * + *

m(x,y) = saturate _ cast<rType>(alpha(*this)(x,y) + beta)

+ * + * @param m output matrix; if it does not have a proper size or type before the + * operation, it is reallocated. + * @param rtype desired output matrix type or, rather, the depth since the + * number of channels are the same as the input has; if rtype is + * negative, the output matrix will have the same type as the input. + * @param alpha optional scale factor. + * + * @see org.opencv.core.Mat.convertTo + */ + public void convertTo(Mat m, int rtype, double alpha) + { + + n_convertTo(nativeObj, m.nativeObj, rtype, alpha); + + return; + } + +/** + *

Converts an array to another data type with optional scaling.

+ * + *

The method converts source pixel values to the target data type. + * saturate_cast<> is applied at the end to avoid possible + * overflows:

+ * + *

m(x,y) = saturate _ cast<rType>(alpha(*this)(x,y) + beta)

+ * + * @param m output matrix; if it does not have a proper size or type before the + * operation, it is reallocated. + * @param rtype desired output matrix type or, rather, the depth since the + * number of channels are the same as the input has; if rtype is + * negative, the output matrix will have the same type as the input. + * + * @see org.opencv.core.Mat.convertTo + */ + public void convertTo(Mat m, int rtype) + { + + n_convertTo(nativeObj, m.nativeObj, rtype); + + return; + } + + // + // C++: void Mat::copyTo(Mat& m) + // + +/** + *

Copies the matrix to another one.

+ * + *

The method copies the matrix data to another matrix. Before copying the data, + * the method invokes

+ * + *

// C++ code:

+ * + *

m.create(this->size(), this->type);

+ * + *

so that the destination matrix is reallocated if needed. While + * m.copyTo(m); works flawlessly, the function does not handle the + * case of a partial overlap between the source and the destination matrices. + *

+ * + *

When the operation mask is specified, and the Mat.create call + * shown above reallocated the matrix, the newly allocated matrix is initialized + * with all zeros before copying the data.

+ * + * @param m Destination matrix. If it does not have a proper size or type before + * the operation, it is reallocated. + * + * @see org.opencv.core.Mat.copyTo + */ + public void copyTo(Mat m) + { + + n_copyTo(nativeObj, m.nativeObj); + + return; + } + + // + // C++: void Mat::copyTo(Mat& m, Mat mask) + // + +/** + *

Copies the matrix to another one.

+ * + *

The method copies the matrix data to another matrix. Before copying the data, + * the method invokes

+ * + *

// C++ code:

+ * + *

m.create(this->size(), this->type);

+ * + *

so that the destination matrix is reallocated if needed. While + * m.copyTo(m); works flawlessly, the function does not handle the + * case of a partial overlap between the source and the destination matrices. + *

+ * + *

When the operation mask is specified, and the Mat.create call + * shown above reallocated the matrix, the newly allocated matrix is initialized + * with all zeros before copying the data.

+ * + * @param m Destination matrix. If it does not have a proper size or type before + * the operation, it is reallocated. + * @param mask Operation mask. Its non-zero elements indicate which matrix + * elements need to be copied. + * + * @see org.opencv.core.Mat.copyTo + */ + public void copyTo(Mat m, Mat mask) + { + + n_copyTo(nativeObj, m.nativeObj, mask.nativeObj); + + return; + } + + // + // C++: void Mat::create(int rows, int cols, int type) + // + +/** + *

Allocates new array data if needed.

+ * + *

This is one of the key Mat methods. Most new-style OpenCV + * functions and methods that produce arrays call this method for each output + * array. The method uses the following algorithm:

+ *
    + *
  • If the current array shape and the type match the new ones, return + * immediately. Otherwise, de-reference the previous data by calling + * "Mat.release". + *
  • Initialize the new header. + *
  • Allocate the new data of total()*elemSize() bytes. + *
  • Allocate the new, associated with the data, reference counter and set + * it to 1. + *
+ *

Such a scheme makes the memory management robust and efficient at the same + * time and helps avoid extra typing for you. This means that usually there is + * no need to explicitly allocate output arrays. That is, instead of writing: + *

+ * + *

// C++ code:

+ * + *

Mat color;...

+ * + *

Mat gray(color.rows, color.cols, color.depth());

+ * + *

cvtColor(color, gray, CV_BGR2GRAY);

+ * + *

you can simply write:

+ * + *

Mat color;...

+ * + *

Mat gray;

+ * + *

cvtColor(color, gray, CV_BGR2GRAY);

+ * + *

because cvtColor, as well as the most of OpenCV functions, calls + * Mat.create() for the output array internally. + *

+ * + * @param rows New number of rows. + * @param cols New number of columns. + * @param type New matrix type. + * + * @see org.opencv.core.Mat.create + */ + public void create(int rows, int cols, int type) + { + + n_create(nativeObj, rows, cols, type); + + return; + } + + // + // C++: void Mat::create(Size size, int type) + // + +/** + *

Allocates new array data if needed.

+ * + *

This is one of the key Mat methods. Most new-style OpenCV + * functions and methods that produce arrays call this method for each output + * array. The method uses the following algorithm:

+ *
    + *
  • If the current array shape and the type match the new ones, return + * immediately. Otherwise, de-reference the previous data by calling + * "Mat.release". + *
  • Initialize the new header. + *
  • Allocate the new data of total()*elemSize() bytes. + *
  • Allocate the new, associated with the data, reference counter and set + * it to 1. + *
+ *

Such a scheme makes the memory management robust and efficient at the same + * time and helps avoid extra typing for you. This means that usually there is + * no need to explicitly allocate output arrays. That is, instead of writing: + *

+ * + *

// C++ code:

+ * + *

Mat color;...

+ * + *

Mat gray(color.rows, color.cols, color.depth());

+ * + *

cvtColor(color, gray, CV_BGR2GRAY);

+ * + *

you can simply write:

+ * + *

Mat color;...

+ * + *

Mat gray;

+ * + *

cvtColor(color, gray, CV_BGR2GRAY);

+ * + *

because cvtColor, as well as the most of OpenCV functions, calls + * Mat.create() for the output array internally. + *

+ * + * @param size Alternative new matrix size specification: Size(cols, + * rows) + * @param type New matrix type. + * + * @see org.opencv.core.Mat.create + */ + public void create(Size size, int type) + { + + n_create(nativeObj, size.width, size.height, type); + + return; + } + + // + // C++: Mat Mat::cross(Mat m) + // + +/** + *

Computes a cross-product of two 3-element vectors.

+ * + *

The method computes a cross-product of two 3-element vectors. The vectors + * must be 3-element floating-point vectors of the same shape and size. The + * result is another 3-element vector of the same shape and type as operands.

+ * + * @param m Another cross-product operand. + * + * @see org.opencv.core.Mat.cross + */ + public Mat cross(Mat m) + { + + Mat retVal = new Mat(n_cross(nativeObj, m.nativeObj)); + + return retVal; + } + + // + // C++: long Mat::dataAddr() + // + + public long dataAddr() + { + + long retVal = n_dataAddr(nativeObj); + + return retVal; + } + + // + // C++: int Mat::depth() + // + +/** + *

Returns the depth of a matrix element.

+ * + *

The method returns the identifier of the matrix element depth (the type of + * each individual channel). For example, for a 16-bit signed 3-channel array, + * the method returns CV_16S. A complete list of matrix types + * contains the following values:

+ *
    + *
  • CV_8U - 8-bit unsigned integers (0..255) + *
  • CV_8S - 8-bit signed integers (-128..127) + *
  • CV_16U - 16-bit unsigned integers (0..65535) + *
  • CV_16S - 16-bit signed integers (-32768..32767) + *
  • CV_32S - 32-bit signed integers (-2147483648..2147483647) + *
  • CV_32F - 32-bit floating-point numbers (-FLT_MAX..FLT_MAX, + * INF, NAN) + *
  • CV_64F - 64-bit floating-point numbers (-DBL_MAX..DBL_MAX, + * INF, NAN) + *
+ * + * @see org.opencv.core.Mat.depth + */ + public int depth() + { + + int retVal = n_depth(nativeObj); + + return retVal; + } + + // + // C++: Mat Mat::diag(int d = 0) + // + +/** + *

Extracts a diagonal from a matrix, or creates a diagonal matrix.

+ * + *

The method makes a new header for the specified matrix diagonal. The new + * matrix is represented as a single-column matrix. Similarly to "Mat.row" and + * "Mat.col", this is an O(1) operation.

+ * + * @param d Single-column matrix that forms a diagonal matrix or index of the + * diagonal, with the following values: + *
    + *
  • d=0 is the main diagonal. + *
  • d>0 is a diagonal from the lower half. For example, d=1 + * means the diagonal is set immediately below the main one. + *
  • d<0 is a diagonal from the upper half. For example, d=1 + * means the diagonal is set immediately above the main one. + *
+ * + * @see org.opencv.core.Mat.diag + */ + public Mat diag(int d) + { + + Mat retVal = new Mat(n_diag(nativeObj, d)); + + return retVal; + } + +/** + *

Extracts a diagonal from a matrix, or creates a diagonal matrix.

+ * + *

The method makes a new header for the specified matrix diagonal. The new + * matrix is represented as a single-column matrix. Similarly to "Mat.row" and + * "Mat.col", this is an O(1) operation.

+ * + * @see org.opencv.core.Mat.diag + */ + public Mat diag() + { + + Mat retVal = new Mat(n_diag(nativeObj, 0)); + + return retVal; + } + + // + // C++: static Mat Mat::diag(Mat d) + // + +/** + *

Extracts a diagonal from a matrix, or creates a diagonal matrix.

+ * + *

The method makes a new header for the specified matrix diagonal. The new + * matrix is represented as a single-column matrix. Similarly to "Mat.row" and + * "Mat.col", this is an O(1) operation.

+ * + * @param d Single-column matrix that forms a diagonal matrix or index of the + * diagonal, with the following values: + *
    + *
  • d=0 is the main diagonal. + *
  • d>0 is a diagonal from the lower half. For example, d=1 + * means the diagonal is set immediately below the main one. + *
  • d<0 is a diagonal from the upper half. For example, d=1 + * means the diagonal is set immediately above the main one. + *
+ * + * @see org.opencv.core.Mat.diag + */ + public static Mat diag(Mat d) + { + + Mat retVal = new Mat(n_diag(d.nativeObj)); + + return retVal; + } + + // + // C++: double Mat::dot(Mat m) + // + +/** + *

Computes a dot-product of two vectors.

+ * + *

The method computes a dot-product of two matrices. If the matrices are not + * single-column or single-row vectors, the top-to-bottom left-to-right scan + * ordering is used to treat them as 1D vectors. The vectors must have the same + * size and type. If the matrices have more than one channel, the dot products + * from all the channels are summed together.

+ * + * @param m another dot-product operand. + * + * @see org.opencv.core.Mat.dot + */ + public double dot(Mat m) + { + + double retVal = n_dot(nativeObj, m.nativeObj); + + return retVal; + } + + // + // C++: size_t Mat::elemSize() + // + +/** + *

Returns the matrix element size in bytes.

+ * + *

The method returns the matrix element size in bytes. For example, if the + * matrix type is CV_16SC3, the method returns 3*sizeof(short) + * or 6.

+ * + * @see org.opencv.core.Mat.elemSize + */ + public long elemSize() + { + + long retVal = n_elemSize(nativeObj); + + return retVal; + } + + // + // C++: size_t Mat::elemSize1() + // + +/** + *

Returns the size of each matrix element channel in bytes.

+ * + *

The method returns the matrix element channel size in bytes, that is, it + * ignores the number of channels. For example, if the matrix type is + * CV_16SC3, the method returns sizeof(short) or 2.

+ * + * @see org.opencv.core.Mat.elemSize1 + */ + public long elemSize1() + { + + long retVal = n_elemSize1(nativeObj); + + return retVal; + } + + // + // C++: bool Mat::empty() + // + +/** + *

Returns true if the array has no elements.

+ * + *

The method returns true if Mat.total() is 0 or if + * Mat.data is NULL. Because of pop_back() and + * resize() methods M.total() == 0 does not imply that + * M.data == NULL.

+ * + * @see org.opencv.core.Mat.empty + */ + public boolean empty() + { + + boolean retVal = n_empty(nativeObj); + + return retVal; + } + + // + // C++: static Mat Mat::eye(int rows, int cols, int type) + // + +/** + *

Returns an identity matrix of the specified size and type.

+ * + *

The method returns a Matlab-style identity matrix initializer, similarly to + * "Mat.zeros". Similarly to"Mat.ones", you can use a scale operation to + * create a scaled identity matrix efficiently:

+ * + *

// C++ code:

+ * + *

// make a 4x4 diagonal matrix with 0.1's on the diagonal.

+ * + *

Mat A = Mat.eye(4, 4, CV_32F)*0.1;

+ * + * @param rows Number of rows. + * @param cols Number of columns. + * @param type Created matrix type. + * + * @see org.opencv.core.Mat.eye + */ + public static Mat eye(int rows, int cols, int type) + { + + Mat retVal = new Mat(n_eye(rows, cols, type)); + + return retVal; + } + + // + // C++: static Mat Mat::eye(Size size, int type) + // + +/** + *

Returns an identity matrix of the specified size and type.

+ * + *

The method returns a Matlab-style identity matrix initializer, similarly to + * "Mat.zeros". Similarly to"Mat.ones", you can use a scale operation to + * create a scaled identity matrix efficiently:

+ * + *

// C++ code:

+ * + *

// make a 4x4 diagonal matrix with 0.1's on the diagonal.

+ * + *

Mat A = Mat.eye(4, 4, CV_32F)*0.1;

+ * + * @param size Alternative matrix size specification as Size(cols, + * rows). + * @param type Created matrix type. + * + * @see org.opencv.core.Mat.eye + */ + public static Mat eye(Size size, int type) + { + + Mat retVal = new Mat(n_eye(size.width, size.height, type)); + + return retVal; + } + + // + // C++: Mat Mat::inv(int method = DECOMP_LU) + // + +/** + *

Inverses a matrix.

+ * + *

The method performs a matrix inversion by means of matrix expressions. This + * means that a temporary matrix inversion object is returned by the method and + * can be used further as a part of more complex matrix expressions or can be + * assigned to a matrix.

+ * + * @param method Matrix inversion method. Possible values are the following: + *
    + *
  • DECOMP_LU is the LU decomposition. The matrix must be non-singular. + *
  • DECOMP_CHOLESKY is the Cholesky LL^T decomposition for + * symmetrical positively defined matrices only. This type is about twice faster + * than LU on big matrices. + *
  • DECOMP_SVD is the SVD decomposition. If the matrix is singular or even + * non-square, the pseudo inversion is computed. + *
+ * + * @see org.opencv.core.Mat.inv + */ + public Mat inv(int method) + { + + Mat retVal = new Mat(n_inv(nativeObj, method)); + + return retVal; + } + +/** + *

Inverses a matrix.

+ * + *

The method performs a matrix inversion by means of matrix expressions. This + * means that a temporary matrix inversion object is returned by the method and + * can be used further as a part of more complex matrix expressions or can be + * assigned to a matrix.

+ * + * @see org.opencv.core.Mat.inv + */ + public Mat inv() + { + + Mat retVal = new Mat(n_inv(nativeObj)); + + return retVal; + } + + // + // C++: bool Mat::isContinuous() + // + +/** + *

Reports whether the matrix is continuous or not.

+ * + *

The method returns true if the matrix elements are stored + * continuously without gaps at the end of each row. Otherwise, it returns + * false. Obviously, 1x1 or 1xN matrices + * are always continuous. Matrices created with "Mat.create" are always + * continuous. But if you extract a part of the matrix using "Mat.col", + * "Mat.diag", and so on, or constructed a matrix header for externally + * allocated data, such matrices may no longer have this property. + * The continuity flag is stored as a bit in the Mat.flags field + * and is computed automatically when you construct a matrix header. Thus, the + * continuity check is a very fast operation, though theoretically it could be + * done as follows:

+ * + *

// C++ code:

+ * + *

// alternative implementation of Mat.isContinuous()

+ * + *

bool myCheckMatContinuity(const Mat& m)

+ * + * + *

//return (m.flags & Mat.CONTINUOUS_FLAG) != 0;

+ * + *

return m.rows == 1 || m.step == m.cols*m.elemSize();

+ * + * + *

The method is used in quite a few of OpenCV functions. The point is that + * element-wise operations (such as arithmetic and logical operations, math + * functions, alpha blending, color space transformations, and others) do not + * depend on the image geometry. Thus, if all the input and output arrays are + * continuous, the functions can process them as very long single-row vectors. + * The example below illustrates how an alpha-blending function can be + * implemented.

+ * + *

template

+ * + *

void alphaBlendRGBA(const Mat& src1, const Mat& src2, Mat& dst)

+ * + * + *

const float alpha_scale = (float)std.numeric_limits.max(),

+ * + *

inv_scale = 1.f/alpha_scale;

+ * + *

CV_Assert(src1.type() == src2.type() &&

+ * + *

src1.type() == CV_MAKETYPE(DataType.depth, 4) &&

+ * + *

src1.size() == src2.size());

+ * + *

Size size = src1.size();

+ * + *

dst.create(size, src1.type());

+ * + *

// here is the idiom: check the arrays for continuity and,

+ * + *

// if this is the case,

+ * + *

// treat the arrays as 1D vectors

+ * + *

if(src1.isContinuous() && src2.isContinuous() && dst.isContinuous())

+ * + * + *

size.width *= size.height;

+ * + *

size.height = 1;

+ * + * + *

size.width *= 4;

+ * + *

for(int i = 0; i < size.height; i++)

+ * + * + *

// when the arrays are continuous,

+ * + *

// the outer loop is executed only once

+ * + *

const T* ptr1 = src1.ptr(i);

+ * + *

const T* ptr2 = src2.ptr(i);

+ * + *

T* dptr = dst.ptr(i);

+ * + *

for(int j = 0; j < size.width; j += 4)

+ * + * + *

float alpha = ptr1[j+3]*inv_scale, beta = ptr2[j+3]*inv_scale;

+ * + *

dptr[j] = saturate_cast(ptr1[j]*alpha + ptr2[j]*beta);

+ * + *

dptr[j+1] = saturate_cast(ptr1[j+1]*alpha + ptr2[j+1]*beta);

+ * + *

dptr[j+2] = saturate_cast(ptr1[j+2]*alpha + ptr2[j+2]*beta);

+ * + *

dptr[j+3] = saturate_cast((1 - (1-alpha)*(1-beta))*alpha_scale);

+ * + * + * + * + *

This approach, while being very simple, can boost the performance of a simple + * element-operation by 10-20 percents, especially if the image is rather small + * and the operation is quite simple. + *

+ * + *

Another OpenCV idiom in this function, a call of "Mat.create" for the + * destination array, that allocates the destination array unless it already has + * the proper size and type. And while the newly allocated arrays are always + * continuous, you still need to check the destination array because + * "Mat.create" does not always allocate a new matrix.

+ * + * @see org.opencv.core.Mat.isContinuous + */ + public boolean isContinuous() + { + + boolean retVal = n_isContinuous(nativeObj); + + return retVal; + } + + // + // C++: bool Mat::isSubmatrix() + // + + public boolean isSubmatrix() + { + + boolean retVal = n_isSubmatrix(nativeObj); + + return retVal; + } + + // + // C++: void Mat::locateROI(Size wholeSize, Point ofs) + // + +/** + *

Locates the matrix header within a parent matrix.

+ * + *

After you extracted a submatrix from a matrix using "Mat.row", "Mat.col", + * "Mat.rowRange", "Mat.colRange", and others, the resultant submatrix points + * just to the part of the original big matrix. However, each submatrix contains + * information (represented by datastart and dataend + * fields) that helps reconstruct the original matrix size and the position of + * the extracted submatrix within the original matrix. The method + * locateROI does exactly that.

+ * + * @param wholeSize Output parameter that contains the size of the whole matrix + * containing *this as a part. + * @param ofs Output parameter that contains an offset of *this + * inside the whole matrix. + * + * @see org.opencv.core.Mat.locateROI + */ + public void locateROI(Size wholeSize, Point ofs) + { + double[] wholeSize_out = new double[2]; + double[] ofs_out = new double[2]; + locateROI_0(nativeObj, wholeSize_out, ofs_out); + if(wholeSize!=null){ wholeSize.width = wholeSize_out[0]; wholeSize.height = wholeSize_out[1]; } + if(ofs!=null){ ofs.x = ofs_out[0]; ofs.y = ofs_out[1]; } + return; + } + + // + // C++: Mat Mat::mul(Mat m, double scale = 1) + // + +/** + *

Performs an element-wise multiplication or division of the two matrices.

+ * + *

The method returns a temporary object encoding per-element array + * multiplication, with optional scale. Note that this is not a matrix + * multiplication that corresponds to a simpler "*" operator. + * Example:

+ * + *

// C++ code:

+ * + *

Mat C = A.mul(5/B); // equivalent to divide(A, B, C, 5)

+ * + * @param m Another array of the same type and the same size as + * *this, or a matrix expression. + * @param scale Optional scale factor. + * + * @see org.opencv.core.Mat.mul + */ + public Mat mul(Mat m, double scale) + { + + Mat retVal = new Mat(n_mul(nativeObj, m.nativeObj, scale)); + + return retVal; + } + +/** + *

Performs an element-wise multiplication or division of the two matrices.

+ * + *

The method returns a temporary object encoding per-element array + * multiplication, with optional scale. Note that this is not a matrix + * multiplication that corresponds to a simpler "*" operator. + * Example:

+ * + *

// C++ code:

+ * + *

Mat C = A.mul(5/B); // equivalent to divide(A, B, C, 5)

+ * + * @param m Another array of the same type and the same size as + * *this, or a matrix expression. + * + * @see org.opencv.core.Mat.mul + */ + public Mat mul(Mat m) + { + + Mat retVal = new Mat(n_mul(nativeObj, m.nativeObj)); + + return retVal; + } + + // + // C++: static Mat Mat::ones(int rows, int cols, int type) + // + +/** + *

Returns an array of all 1's of the specified size and type.

+ * + *

The method returns a Matlab-style 1's array initializer, similarly + * to"Mat.zeros". Note that using this method you can initialize an array with + * an arbitrary value, using the following Matlab idiom:

+ * + *

// C++ code:

+ * + *

Mat A = Mat.ones(100, 100, CV_8U)*3; // make 100x100 matrix filled with 3.

+ * + *

The above operation does not form a 100x100 matrix of 1's and then multiply + * it by 3. Instead, it just remembers the scale factor (3 in this case) and use + * it when actually invoking the matrix initializer. + *

+ * + * @param rows Number of rows. + * @param cols Number of columns. + * @param type Created matrix type. + * + * @see org.opencv.core.Mat.ones + */ + public static Mat ones(int rows, int cols, int type) + { + + Mat retVal = new Mat(n_ones(rows, cols, type)); + + return retVal; + } + + // + // C++: static Mat Mat::ones(Size size, int type) + // + +/** + *

Returns an array of all 1's of the specified size and type.

+ * + *

The method returns a Matlab-style 1's array initializer, similarly + * to"Mat.zeros". Note that using this method you can initialize an array with + * an arbitrary value, using the following Matlab idiom:

+ * + *

// C++ code:

+ * + *

Mat A = Mat.ones(100, 100, CV_8U)*3; // make 100x100 matrix filled with 3.

+ * + *

The above operation does not form a 100x100 matrix of 1's and then multiply + * it by 3. Instead, it just remembers the scale factor (3 in this case) and use + * it when actually invoking the matrix initializer. + *

+ * + * @param size Alternative to the matrix size specification Size(cols, + * rows). + * @param type Created matrix type. + * + * @see org.opencv.core.Mat.ones + */ + public static Mat ones(Size size, int type) + { + + Mat retVal = new Mat(n_ones(size.width, size.height, type)); + + return retVal; + } + + // + // C++: void Mat::push_back(Mat m) + // + +/** + *

Adds elements to the bottom of the matrix.

+ * + *

The methods add one or more elements to the bottom of the matrix. They + * emulate the corresponding method of the STL vector class. When + * elem is Mat, its type and the number of columns + * must be the same as in the container matrix.

+ * + * @param m a m + * + * @see org.opencv.core.Mat.push_back + */ + public void push_back(Mat m) + { + + n_push_back(nativeObj, m.nativeObj); + + return; + } + + // + // C++: void Mat::release() + // + +/** + *

Decrements the reference counter and deallocates the matrix if needed.

+ * + *

The method decrements the reference counter associated with the matrix data. + * When the reference counter reaches 0, the matrix data is deallocated and the + * data and the reference counter pointers are set to NULL's. If the matrix + * header points to an external data set (see "Mat.Mat"), the reference counter + * is NULL, and the method has no effect in this case.

+ * + *

This method can be called manually to force the matrix data deallocation. But + * since this method is automatically called in the destructor, or by any other + * method that changes the data pointer, it is usually not needed. The reference + * counter decrement and check for 0 is an atomic operation on the platforms + * that support it. Thus, it is safe to operate on the same matrices + * asynchronously in different threads.

+ * + * @see org.opencv.core.Mat.release + */ + public void release() + { + + n_release(nativeObj); + + return; + } + + // + // C++: Mat Mat::reshape(int cn, int rows = 0) + // + +/** + *

Changes the shape and/or the number of channels of a 2D matrix without + * copying the data.

+ * + *

The method makes a new matrix header for *this elements. The new + * matrix may have a different size and/or different number of channels. Any + * combination is possible if:

+ *
    + *
  • No extra elements are included into the new matrix and no elements are + * excluded. Consequently, the product rows*cols*channels() must + * stay the same after the transformation. + *
  • No data is copied. That is, this is an O(1) operation. Consequently, + * if you change the number of rows, or the operation changes the indices of + * elements row in some other way, the matrix must be continuous. See + * "Mat.isContinuous". + *
+ *

For example, if there is a set of 3D points stored as an STL vector, and you + * want to represent the points as a 3xN matrix, do the following: + *

+ * + *

// C++ code:

+ * + *

std.vector vec;...

+ * + *

Mat pointMat = Mat(vec). // convert vector to Mat, O(1) operation

+ * + *

reshape(1). // make Nx3 1-channel matrix out of Nx1 3-channel.

+ * + *

// Also, an O(1) operation

+ * + *

t(); // finally, transpose the Nx3 matrix.

+ * + *

// This involves copying all the elements

+ * + * @param cn New number of channels. If the parameter is 0, the number of + * channels remains the same. + * @param rows New number of rows. If the parameter is 0, the number of rows + * remains the same. + * + * @see org.opencv.core.Mat.reshape + */ + public Mat reshape(int cn, int rows) + { + + Mat retVal = new Mat(n_reshape(nativeObj, cn, rows)); + + return retVal; + } + +/** + *

Changes the shape and/or the number of channels of a 2D matrix without + * copying the data.

+ * + *

The method makes a new matrix header for *this elements. The new + * matrix may have a different size and/or different number of channels. Any + * combination is possible if:

+ *
    + *
  • No extra elements are included into the new matrix and no elements are + * excluded. Consequently, the product rows*cols*channels() must + * stay the same after the transformation. + *
  • No data is copied. That is, this is an O(1) operation. Consequently, + * if you change the number of rows, or the operation changes the indices of + * elements row in some other way, the matrix must be continuous. See + * "Mat.isContinuous". + *
+ *

For example, if there is a set of 3D points stored as an STL vector, and you + * want to represent the points as a 3xN matrix, do the following: + *

+ * + *

// C++ code:

+ * + *

std.vector vec;...

+ * + *

Mat pointMat = Mat(vec). // convert vector to Mat, O(1) operation

+ * + *

reshape(1). // make Nx3 1-channel matrix out of Nx1 3-channel.

+ * + *

// Also, an O(1) operation

+ * + *

t(); // finally, transpose the Nx3 matrix.

+ * + *

// This involves copying all the elements

+ * + * @param cn New number of channels. If the parameter is 0, the number of + * channels remains the same. + * + * @see org.opencv.core.Mat.reshape + */ + public Mat reshape(int cn) + { + + Mat retVal = new Mat(n_reshape(nativeObj, cn)); + + return retVal; + } + + // + // C++: Mat Mat::row(int y) + // + +/** + *

Creates a matrix header for the specified matrix row.

+ * + *

The method makes a new header for the specified matrix row and returns it. + * This is an O(1) operation, regardless of the matrix size. The underlying data + * of the new matrix is shared with the original matrix. Here is the example of + * one of the classical basic matrix processing operations, axpy, + * used by LU and many other algorithms:

+ * + *

// C++ code:

+ * + *

inline void matrix_axpy(Mat& A, int i, int j, double alpha)

+ * + * + *

A.row(i) += A.row(j)*alpha;

+ * + * + *

Note:

+ * + *

In the current implementation, the following code does not work as expected: + *

+ * + *

// C++ code:

+ * + *

Mat A;...

+ * + *

A.row(i) = A.row(j); // will not work

+ * + *

This happens because A.row(i) forms a temporary header that is + * further assigned to another header. Remember that each of these operations is + * O(1), that is, no data is copied. Thus, the above assignment is not true if + * you may have expected the j-th row to be copied to the i-th row. To achieve + * that, you should either turn this simple assignment into an expression or use + * the "Mat.copyTo" method:

+ * + *

Mat A;...

+ * + *

// works, but looks a bit obscure.

+ * + *

A.row(i) = A.row(j) + 0;

+ * + *

// this is a bit longer, but the recommended method.

+ * + *

A.row(j).copyTo(A.row(i));

+ * + * @param y A 0-based row index. + * + * @see org.opencv.core.Mat.row + */ + public Mat row(int y) + { + + Mat retVal = new Mat(n_row(nativeObj, y)); + + return retVal; + } + + // + // C++: Mat Mat::rowRange(int startrow, int endrow) + // + +/** + *

Creates a matrix header for the specified row span.

+ * + *

The method makes a new header for the specified row span of the matrix. + * Similarly to "Mat.row" and "Mat.col", this is an O(1) operation.

+ * + * @param startrow An inclusive 0-based start index of the row span. + * @param endrow An exclusive 0-based ending index of the row span. + * + * @see org.opencv.core.Mat.rowRange + */ + public Mat rowRange(int startrow, int endrow) + { + + Mat retVal = new Mat(n_rowRange(nativeObj, startrow, endrow)); + + return retVal; + } + + // + // C++: Mat Mat::rowRange(Range r) + // + +/** + *

Creates a matrix header for the specified row span.

+ * + *

The method makes a new header for the specified row span of the matrix. + * Similarly to "Mat.row" and "Mat.col", this is an O(1) operation.

+ * + * @param r "Range" structure containing both the start and the end indices. + * + * @see org.opencv.core.Mat.rowRange + */ + public Mat rowRange(Range r) + { + + Mat retVal = new Mat(n_rowRange(nativeObj, r.start, r.end)); + + return retVal; + } + + // + // C++: int Mat::rows() + // + + public int rows() + { + + int retVal = n_rows(nativeObj); + + return retVal; + } + + // + // C++: Mat Mat::operator =(Scalar s) + // + + public Mat setTo(Scalar s) + { + + Mat retVal = new Mat(n_setTo(nativeObj, s.val[0], s.val[1], s.val[2], s.val[3])); + + return retVal; + } + + // + // C++: Mat Mat::setTo(Scalar value, Mat mask = Mat()) + // + +/** + *

Sets all or some of the array elements to the specified value.

+ * + * @param value Assigned scalar converted to the actual array type. + * @param mask Operation mask of the same size as *this. This is an + * advanced variant of the Mat.operator=(const Scalar& s) + * operator. + * + * @see org.opencv.core.Mat.setTo + */ + public Mat setTo(Scalar value, Mat mask) + { + + Mat retVal = new Mat(n_setTo(nativeObj, value.val[0], value.val[1], value.val[2], value.val[3], mask.nativeObj)); + + return retVal; + } + + // + // C++: Mat Mat::setTo(Mat value, Mat mask = Mat()) + // + +/** + *

Sets all or some of the array elements to the specified value.

+ * + * @param value Assigned scalar converted to the actual array type. + * @param mask Operation mask of the same size as *this. This is an + * advanced variant of the Mat.operator=(const Scalar& s) + * operator. + * + * @see org.opencv.core.Mat.setTo + */ + public Mat setTo(Mat value, Mat mask) + { + + Mat retVal = new Mat(n_setTo(nativeObj, value.nativeObj, mask.nativeObj)); + + return retVal; + } + +/** + *

Sets all or some of the array elements to the specified value.

+ * + * @param value Assigned scalar converted to the actual array type. + * + * @see org.opencv.core.Mat.setTo + */ + public Mat setTo(Mat value) + { + + Mat retVal = new Mat(n_setTo(nativeObj, value.nativeObj)); + + return retVal; + } + + // + // C++: Size Mat::size() + // + +/** + *

Returns a matrix size.

+ * + *

The method returns a matrix size: Size(cols, rows). When the + * matrix is more than 2-dimensional, the returned size is (-1, -1).

+ * + * @see org.opencv.core.Mat.size + */ + public Size size() + { + + Size retVal = new Size(n_size(nativeObj)); + + return retVal; + } + + // + // C++: size_t Mat::step1(int i = 0) + // + +/** + *

Returns a normalized step.

+ * + *

The method returns a matrix step divided by "Mat.elemSize1()". It can be + * useful to quickly access an arbitrary matrix element.

+ * + * @param i a i + * + * @see org.opencv.core.Mat.step1 + */ + public long step1(int i) + { + + long retVal = n_step1(nativeObj, i); + + return retVal; + } + +/** + *

Returns a normalized step.

+ * + *

The method returns a matrix step divided by "Mat.elemSize1()". It can be + * useful to quickly access an arbitrary matrix element.

+ * + * @see org.opencv.core.Mat.step1 + */ + public long step1() + { + + long retVal = n_step1(nativeObj); + + return retVal; + } + + // + // C++: Mat Mat::operator()(int rowStart, int rowEnd, int colStart, int + // colEnd) + // + +/** + *

Extracts a rectangular submatrix.

+ * + *

The operators make a new header for the specified sub-array of + * *this. They are the most generalized forms of "Mat.row", + * "Mat.col", "Mat.rowRange", and "Mat.colRange". For example, + * A(Range(0, 10), Range.all()) is equivalent to A.rowRange(0, + * 10). Similarly to all of the above, the operators are O(1) operations, + * that is, no matrix data is copied.

+ * + * @param rowStart a rowStart + * @param rowEnd a rowEnd + * @param colStart a colStart + * @param colEnd a colEnd + * + * @see org.opencv.core.Mat.operator() + */ + public Mat submat(int rowStart, int rowEnd, int colStart, int colEnd) + { + + Mat retVal = new Mat(n_submat_rr(nativeObj, rowStart, rowEnd, colStart, colEnd)); + + return retVal; + } + + // + // C++: Mat Mat::operator()(Range rowRange, Range colRange) + // + +/** + *

Extracts a rectangular submatrix.

+ * + *

The operators make a new header for the specified sub-array of + * *this. They are the most generalized forms of "Mat.row", + * "Mat.col", "Mat.rowRange", and "Mat.colRange". For example, + * A(Range(0, 10), Range.all()) is equivalent to A.rowRange(0, + * 10). Similarly to all of the above, the operators are O(1) operations, + * that is, no matrix data is copied.

+ * + * @param rowRange Start and end row of the extracted submatrix. The upper + * boundary is not included. To select all the rows, use Range.all(). + * @param colRange Start and end column of the extracted submatrix. The upper + * boundary is not included. To select all the columns, use Range.all(). + * + * @see org.opencv.core.Mat.operator() + */ + public Mat submat(Range rowRange, Range colRange) + { + + Mat retVal = new Mat(n_submat_rr(nativeObj, rowRange.start, rowRange.end, colRange.start, colRange.end)); + + return retVal; + } + + // + // C++: Mat Mat::operator()(Rect roi) + // + +/** + *

Extracts a rectangular submatrix.

+ * + *

The operators make a new header for the specified sub-array of + * *this. They are the most generalized forms of "Mat.row", + * "Mat.col", "Mat.rowRange", and "Mat.colRange". For example, + * A(Range(0, 10), Range.all()) is equivalent to A.rowRange(0, + * 10). Similarly to all of the above, the operators are O(1) operations, + * that is, no matrix data is copied.

+ * + * @param roi Extracted submatrix specified as a rectangle. + * + * @see org.opencv.core.Mat.operator() + */ + public Mat submat(Rect roi) + { + + Mat retVal = new Mat(n_submat(nativeObj, roi.x, roi.y, roi.width, roi.height)); + + return retVal; + } + + // + // C++: Mat Mat::t() + // + +/** + *

Transposes a matrix.

+ * + *

The method performs matrix transposition by means of matrix expressions. It + * does not perform the actual transposition but returns a temporary matrix + * transposition object that can be further used as a part of more complex + * matrix expressions or can be assigned to a matrix:

+ * + *

// C++ code:

+ * + *

Mat A1 = A + Mat.eye(A.size(), A.type)*lambda;

+ * + *

Mat C = A1.t()*A1; // compute (A + lambda*I)^t * (A + lamda*I)

+ * + * @see org.opencv.core.Mat.t + */ + public Mat t() + { + + Mat retVal = new Mat(n_t(nativeObj)); + + return retVal; + } + + // + // C++: size_t Mat::total() + // + +/** + *

Returns the total number of array elements.

+ * + *

The method returns the number of array elements (a number of pixels if the + * array represents an image).

+ * + * @see org.opencv.core.Mat.total + */ + public long total() + { + + long retVal = n_total(nativeObj); + + return retVal; + } + + // + // C++: int Mat::type() + // + +/** + *

Returns the type of a matrix element.

+ * + *

The method returns a matrix element type. This is an identifier compatible + * with the CvMat type system, like CV_16SC3 or 16-bit + * signed 3-channel array, and so on.

+ * + * @see org.opencv.core.Mat.type + */ + public int type() + { + + int retVal = n_type(nativeObj); + + return retVal; + } + + // + // C++: static Mat Mat::zeros(int rows, int cols, int type) + // + +/** + *

Returns a zero array of the specified size and type.

+ * + *

The method returns a Matlab-style zero array initializer. It can be used to + * quickly form a constant array as a function parameter, part of a matrix + * expression, or as a matrix initializer. + *

+ * + *

// C++ code:

+ * + *

Mat A;

+ * + *

A = Mat.zeros(3, 3, CV_32F);

+ * + *

In the example above, a new matrix is allocated only if A is not + * a 3x3 floating-point matrix. Otherwise, the existing matrix A is + * filled with zeros. + *

+ * + * @param rows Number of rows. + * @param cols Number of columns. + * @param type Created matrix type. + * + * @see org.opencv.core.Mat.zeros + */ + public static Mat zeros(int rows, int cols, int type) + { + + Mat retVal = new Mat(n_zeros(rows, cols, type)); + + return retVal; + } + + // + // C++: static Mat Mat::zeros(Size size, int type) + // + +/** + *

Returns a zero array of the specified size and type.

+ * + *

The method returns a Matlab-style zero array initializer. It can be used to + * quickly form a constant array as a function parameter, part of a matrix + * expression, or as a matrix initializer. + *

+ * + *

// C++ code:

+ * + *

Mat A;

+ * + *

A = Mat.zeros(3, 3, CV_32F);

+ * + *

In the example above, a new matrix is allocated only if A is not + * a 3x3 floating-point matrix. Otherwise, the existing matrix A is + * filled with zeros. + *

+ * + * @param size Alternative to the matrix size specification Size(cols, + * rows). + * @param type Created matrix type. + * + * @see org.opencv.core.Mat.zeros + */ + public static Mat zeros(Size size, int type) + { + + Mat retVal = new Mat(n_zeros(size.width, size.height, type)); + + return retVal; + } + + @Override + protected void finalize() throws Throwable { + n_delete(nativeObj); + super.finalize(); + } + + @Override + public String toString() { + return "Mat [ " + + rows() + "*" + cols() + "*" + CvType.typeToString(type()) + + ", isCont=" + isContinuous() + ", isSubmat=" + isSubmatrix() + + ", nativeObj=0x" + Long.toHexString(nativeObj) + + ", dataAddr=0x" + Long.toHexString(dataAddr()) + + " ]"; + } + + public String dump() { + return nDump(nativeObj); + } + + public int put(int row, int col, double... data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + return nPutD(nativeObj, row, col, data.length, data); + } + + public int put(int row, int col, float[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_32F) { + return nPutF(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int put(int row, int col, int[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_32S) { + return nPutI(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int put(int row, int col, short[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_16U || CvType.depth(t) == CvType.CV_16S) { + return nPutS(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int put(int row, int col, byte[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_8U || CvType.depth(t) == CvType.CV_8S) { + return nPutB(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int get(int row, int col, byte[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_8U || CvType.depth(t) == CvType.CV_8S) { + return nGetB(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int get(int row, int col, short[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_16U || CvType.depth(t) == CvType.CV_16S) { + return nGetS(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int get(int row, int col, int[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_32S) { + return nGetI(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int get(int row, int col, float[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_32F) { + return nGetF(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int get(int row, int col, double[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_64F) { + return nGetD(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public double[] get(int row, int col) { + return nGet(nativeObj, row, col); + } + + public int height() { + return rows(); + } + + public int width() { + return cols(); + } + + public long getNativeObjAddr() { + return nativeObj; + } + + // C++: Mat::Mat() + private static native long n_Mat(); + + // C++: Mat::Mat(int rows, int cols, int type) + private static native long n_Mat(int rows, int cols, int type); + + // C++: Mat::Mat(Size size, int type) + private static native long n_Mat(double size_width, double size_height, int type); + + // C++: Mat::Mat(int rows, int cols, int type, Scalar s) + private static native long n_Mat(int rows, int cols, int type, double s_val0, double s_val1, double s_val2, double s_val3); + + // C++: Mat::Mat(Size size, int type, Scalar s) + private static native long n_Mat(double size_width, double size_height, int type, double s_val0, double s_val1, double s_val2, double s_val3); + + // C++: Mat::Mat(Mat m, Range rowRange, Range colRange = Range::all()) + private static native long n_Mat(long m_nativeObj, int rowRange_start, int rowRange_end, int colRange_start, int colRange_end); + + private static native long n_Mat(long m_nativeObj, int rowRange_start, int rowRange_end); + + // C++: Mat Mat::adjustROI(int dtop, int dbottom, int dleft, int dright) + private static native long n_adjustROI(long nativeObj, int dtop, int dbottom, int dleft, int dright); + + // C++: void Mat::assignTo(Mat m, int type = -1) + private static native void n_assignTo(long nativeObj, long m_nativeObj, int type); + + private static native void n_assignTo(long nativeObj, long m_nativeObj); + + // C++: int Mat::channels() + private static native int n_channels(long nativeObj); + + // C++: int Mat::checkVector(int elemChannels, int depth = -1, bool + // requireContinuous = true) + private static native int n_checkVector(long nativeObj, int elemChannels, int depth, boolean requireContinuous); + + private static native int n_checkVector(long nativeObj, int elemChannels, int depth); + + private static native int n_checkVector(long nativeObj, int elemChannels); + + // C++: Mat Mat::clone() + private static native long n_clone(long nativeObj); + + // C++: Mat Mat::col(int x) + private static native long n_col(long nativeObj, int x); + + // C++: Mat Mat::colRange(int startcol, int endcol) + private static native long n_colRange(long nativeObj, int startcol, int endcol); + + // C++: int Mat::cols() + private static native int n_cols(long nativeObj); + + // C++: void Mat::convertTo(Mat& m, int rtype, double alpha = 1, double beta + // = 0) + private static native void n_convertTo(long nativeObj, long m_nativeObj, int rtype, double alpha, double beta); + + private static native void n_convertTo(long nativeObj, long m_nativeObj, int rtype, double alpha); + + private static native void n_convertTo(long nativeObj, long m_nativeObj, int rtype); + + // C++: void Mat::copyTo(Mat& m) + private static native void n_copyTo(long nativeObj, long m_nativeObj); + + // C++: void Mat::copyTo(Mat& m, Mat mask) + private static native void n_copyTo(long nativeObj, long m_nativeObj, long mask_nativeObj); + + // C++: void Mat::create(int rows, int cols, int type) + private static native void n_create(long nativeObj, int rows, int cols, int type); + + // C++: void Mat::create(Size size, int type) + private static native void n_create(long nativeObj, double size_width, double size_height, int type); + + // C++: Mat Mat::cross(Mat m) + private static native long n_cross(long nativeObj, long m_nativeObj); + + // C++: long Mat::dataAddr() + private static native long n_dataAddr(long nativeObj); + + // C++: int Mat::depth() + private static native int n_depth(long nativeObj); + + // C++: Mat Mat::diag(int d = 0) + private static native long n_diag(long nativeObj, int d); + + // C++: static Mat Mat::diag(Mat d) + private static native long n_diag(long d_nativeObj); + + // C++: double Mat::dot(Mat m) + private static native double n_dot(long nativeObj, long m_nativeObj); + + // C++: size_t Mat::elemSize() + private static native long n_elemSize(long nativeObj); + + // C++: size_t Mat::elemSize1() + private static native long n_elemSize1(long nativeObj); + + // C++: bool Mat::empty() + private static native boolean n_empty(long nativeObj); + + // C++: static Mat Mat::eye(int rows, int cols, int type) + private static native long n_eye(int rows, int cols, int type); + + // C++: static Mat Mat::eye(Size size, int type) + private static native long n_eye(double size_width, double size_height, int type); + + // C++: Mat Mat::inv(int method = DECOMP_LU) + private static native long n_inv(long nativeObj, int method); + + private static native long n_inv(long nativeObj); + + // C++: bool Mat::isContinuous() + private static native boolean n_isContinuous(long nativeObj); + + // C++: bool Mat::isSubmatrix() + private static native boolean n_isSubmatrix(long nativeObj); + + // C++: void Mat::locateROI(Size wholeSize, Point ofs) + private static native void locateROI_0(long nativeObj, double[] wholeSize_out, double[] ofs_out); + + // C++: Mat Mat::mul(Mat m, double scale = 1) + private static native long n_mul(long nativeObj, long m_nativeObj, double scale); + + private static native long n_mul(long nativeObj, long m_nativeObj); + + // C++: static Mat Mat::ones(int rows, int cols, int type) + private static native long n_ones(int rows, int cols, int type); + + // C++: static Mat Mat::ones(Size size, int type) + private static native long n_ones(double size_width, double size_height, int type); + + // C++: void Mat::push_back(Mat m) + private static native void n_push_back(long nativeObj, long m_nativeObj); + + // C++: void Mat::release() + private static native void n_release(long nativeObj); + + // C++: Mat Mat::reshape(int cn, int rows = 0) + private static native long n_reshape(long nativeObj, int cn, int rows); + + private static native long n_reshape(long nativeObj, int cn); + + // C++: Mat Mat::row(int y) + private static native long n_row(long nativeObj, int y); + + // C++: Mat Mat::rowRange(int startrow, int endrow) + private static native long n_rowRange(long nativeObj, int startrow, int endrow); + + // C++: int Mat::rows() + private static native int n_rows(long nativeObj); + + // C++: Mat Mat::operator =(Scalar s) + private static native long n_setTo(long nativeObj, double s_val0, double s_val1, double s_val2, double s_val3); + + // C++: Mat Mat::setTo(Scalar value, Mat mask = Mat()) + private static native long n_setTo(long nativeObj, double s_val0, double s_val1, double s_val2, double s_val3, long mask_nativeObj); + + // C++: Mat Mat::setTo(Mat value, Mat mask = Mat()) + private static native long n_setTo(long nativeObj, long value_nativeObj, long mask_nativeObj); + + private static native long n_setTo(long nativeObj, long value_nativeObj); + + // C++: Size Mat::size() + private static native double[] n_size(long nativeObj); + + // C++: size_t Mat::step1(int i = 0) + private static native long n_step1(long nativeObj, int i); + + private static native long n_step1(long nativeObj); + + // C++: Mat Mat::operator()(Range rowRange, Range colRange) + private static native long n_submat_rr(long nativeObj, int rowRange_start, int rowRange_end, int colRange_start, int colRange_end); + + // C++: Mat Mat::operator()(Rect roi) + private static native long n_submat(long nativeObj, int roi_x, int roi_y, int roi_width, int roi_height); + + // C++: Mat Mat::t() + private static native long n_t(long nativeObj); + + // C++: size_t Mat::total() + private static native long n_total(long nativeObj); + + // C++: int Mat::type() + private static native int n_type(long nativeObj); + + // C++: static Mat Mat::zeros(int rows, int cols, int type) + private static native long n_zeros(int rows, int cols, int type); + + // C++: static Mat Mat::zeros(Size size, int type) + private static native long n_zeros(double size_width, double size_height, int type); + + // native support for java finalize() + private static native void n_delete(long nativeObj); + + private static native int nPutD(long self, int row, int col, int count, double[] data); + + private static native int nPutF(long self, int row, int col, int count, float[] data); + + private static native int nPutI(long self, int row, int col, int count, int[] data); + + private static native int nPutS(long self, int row, int col, int count, short[] data); + + private static native int nPutB(long self, int row, int col, int count, byte[] data); + + private static native int nGetB(long self, int row, int col, int count, byte[] vals); + + private static native int nGetS(long self, int row, int col, int count, short[] vals); + + private static native int nGetI(long self, int row, int col, int count, int[] vals); + + private static native int nGetF(long self, int row, int col, int count, float[] vals); + + private static native int nGetD(long self, int row, int col, int count, double[] vals); + + private static native double[] nGet(long self, int row, int col); + + private static native String nDump(long self); +} diff --git a/src/org/opencv/core/MatOfByte.java b/src/org/opencv/core/MatOfByte.java new file mode 100644 index 0000000..0ebdb66 --- /dev/null +++ b/src/org/opencv/core/MatOfByte.java @@ -0,0 +1,79 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + +public class MatOfByte extends Mat { + // 8UC(x) + private static final int _depth = CvType.CV_8U; + private static final int _channels = 1; + + public MatOfByte() { + super(); + } + + protected MatOfByte(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfByte fromNativeAddr(long addr) { + return new MatOfByte(addr); + } + + public MatOfByte(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfByte(byte...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(byte...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public byte[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + byte[] a = new byte[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Byte ab[] = lb.toArray(new Byte[0]); + byte a[] = new byte[ab.length]; + for(int i=0; i toList() { + byte[] a = toArray(); + Byte ab[] = new Byte[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + + public void fromArray(DMatch...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + float buff[] = new float[num * _channels]; + for(int i=0; i ldm) { + DMatch adm[] = ldm.toArray(new DMatch[0]); + fromArray(adm); + } + + public List toList() { + DMatch[] adm = toArray(); + return Arrays.asList(adm); + } +} diff --git a/src/org/opencv/core/MatOfDouble.java b/src/org/opencv/core/MatOfDouble.java new file mode 100644 index 0000000..cca5251 --- /dev/null +++ b/src/org/opencv/core/MatOfDouble.java @@ -0,0 +1,79 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + +public class MatOfDouble extends Mat { + // 64FC(x) + private static final int _depth = CvType.CV_64F; + private static final int _channels = 1; + + public MatOfDouble() { + super(); + } + + protected MatOfDouble(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfDouble fromNativeAddr(long addr) { + return new MatOfDouble(addr); + } + + public MatOfDouble(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfDouble(double...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(double...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public double[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + double[] a = new double[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Double ab[] = lb.toArray(new Double[0]); + double a[] = new double[ab.length]; + for(int i=0; i toList() { + double[] a = toArray(); + Double ab[] = new Double[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(float...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public float[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + float[] a = new float[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Float ab[] = lb.toArray(new Float[0]); + float a[] = new float[ab.length]; + for(int i=0; i toList() { + float[] a = toArray(); + Float ab[] = new Float[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(float...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public float[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + float[] a = new float[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Float ab[] = lb.toArray(new Float[0]); + float a[] = new float[ab.length]; + for(int i=0; i toList() { + float[] a = toArray(); + Float ab[] = new Float[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(float...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public float[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + float[] a = new float[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Float ab[] = lb.toArray(new Float[0]); + float a[] = new float[ab.length]; + for(int i=0; i toList() { + float[] a = toArray(); + Float ab[] = new Float[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(int...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public int[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + int[] a = new int[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Integer ab[] = lb.toArray(new Integer[0]); + int a[] = new int[ab.length]; + for(int i=0; i toList() { + int[] a = toArray(); + Integer ab[] = new Integer[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(int...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public int[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + int[] a = new int[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Integer ab[] = lb.toArray(new Integer[0]); + int a[] = new int[ab.length]; + for(int i=0; i toList() { + int[] a = toArray(); + Integer ab[] = new Integer[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(KeyPoint...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + float buff[] = new float[num * _channels]; + for(int i=0; i lkp) { + KeyPoint akp[] = lkp.toArray(new KeyPoint[0]); + fromArray(akp); + } + + public List toList() { + KeyPoint[] akp = toArray(); + return Arrays.asList(akp); + } +} diff --git a/src/org/opencv/core/MatOfPoint.java b/src/org/opencv/core/MatOfPoint.java new file mode 100644 index 0000000..23eeed0 --- /dev/null +++ b/src/org/opencv/core/MatOfPoint.java @@ -0,0 +1,78 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + +public class MatOfPoint extends Mat { + // 32SC2 + private static final int _depth = CvType.CV_32S; + private static final int _channels = 2; + + public MatOfPoint() { + super(); + } + + protected MatOfPoint(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfPoint fromNativeAddr(long addr) { + return new MatOfPoint(addr); + } + + public MatOfPoint(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfPoint(Point...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(Point...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + int buff[] = new int[num * _channels]; + for(int i=0; i lp) { + Point ap[] = lp.toArray(new Point[0]); + fromArray(ap); + } + + public List toList() { + Point[] ap = toArray(); + return Arrays.asList(ap); + } +} diff --git a/src/org/opencv/core/MatOfPoint2f.java b/src/org/opencv/core/MatOfPoint2f.java new file mode 100644 index 0000000..ba4be4a --- /dev/null +++ b/src/org/opencv/core/MatOfPoint2f.java @@ -0,0 +1,78 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + +public class MatOfPoint2f extends Mat { + // 32FC2 + private static final int _depth = CvType.CV_32F; + private static final int _channels = 2; + + public MatOfPoint2f() { + super(); + } + + protected MatOfPoint2f(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfPoint2f fromNativeAddr(long addr) { + return new MatOfPoint2f(addr); + } + + public MatOfPoint2f(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfPoint2f(Point...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(Point...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + float buff[] = new float[num * _channels]; + for(int i=0; i lp) { + Point ap[] = lp.toArray(new Point[0]); + fromArray(ap); + } + + public List toList() { + Point[] ap = toArray(); + return Arrays.asList(ap); + } +} diff --git a/src/org/opencv/core/MatOfPoint3.java b/src/org/opencv/core/MatOfPoint3.java new file mode 100644 index 0000000..16e2130 --- /dev/null +++ b/src/org/opencv/core/MatOfPoint3.java @@ -0,0 +1,79 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + +public class MatOfPoint3 extends Mat { + // 32SC3 + private static final int _depth = CvType.CV_32S; + private static final int _channels = 3; + + public MatOfPoint3() { + super(); + } + + protected MatOfPoint3(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfPoint3 fromNativeAddr(long addr) { + return new MatOfPoint3(addr); + } + + public MatOfPoint3(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfPoint3(Point3...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(Point3...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + int buff[] = new int[num * _channels]; + for(int i=0; i lp) { + Point3 ap[] = lp.toArray(new Point3[0]); + fromArray(ap); + } + + public List toList() { + Point3[] ap = toArray(); + return Arrays.asList(ap); + } +} diff --git a/src/org/opencv/core/MatOfPoint3f.java b/src/org/opencv/core/MatOfPoint3f.java new file mode 100644 index 0000000..97e2a95 --- /dev/null +++ b/src/org/opencv/core/MatOfPoint3f.java @@ -0,0 +1,79 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + +public class MatOfPoint3f extends Mat { + // 32FC3 + private static final int _depth = CvType.CV_32F; + private static final int _channels = 3; + + public MatOfPoint3f() { + super(); + } + + protected MatOfPoint3f(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfPoint3f fromNativeAddr(long addr) { + return new MatOfPoint3f(addr); + } + + public MatOfPoint3f(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfPoint3f(Point3...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(Point3...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + float buff[] = new float[num * _channels]; + for(int i=0; i lp) { + Point3 ap[] = lp.toArray(new Point3[0]); + fromArray(ap); + } + + public List toList() { + Point3[] ap = toArray(); + return Arrays.asList(ap); + } +} diff --git a/src/org/opencv/core/MatOfRect.java b/src/org/opencv/core/MatOfRect.java new file mode 100644 index 0000000..2e58bfe --- /dev/null +++ b/src/org/opencv/core/MatOfRect.java @@ -0,0 +1,81 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + + +public class MatOfRect extends Mat { + // 32SC4 + private static final int _depth = CvType.CV_32S; + private static final int _channels = 4; + + public MatOfRect() { + super(); + } + + protected MatOfRect(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfRect fromNativeAddr(long addr) { + return new MatOfRect(addr); + } + + public MatOfRect(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfRect(Rect...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(Rect...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + int buff[] = new int[num * _channels]; + for(int i=0; i lr) { + Rect ap[] = lr.toArray(new Rect[0]); + fromArray(ap); + } + + public List toList() { + Rect[] ar = toArray(); + return Arrays.asList(ar); + } +} diff --git a/src/org/opencv/core/Point.java b/src/org/opencv/core/Point.java new file mode 100644 index 0000000..cb19a1d --- /dev/null +++ b/src/org/opencv/core/Point.java @@ -0,0 +1,120 @@ +package org.opencv.core; + +/** + *

Template class for 2D points specified by its coordinates x and + * y. + * An instance of the class is interchangeable with C structures, + * CvPoint and CvPoint2D32f. There is also a cast + * operator to convert point coordinates to the specified type. The conversion + * from floating-point coordinates to integer coordinates is done by rounding. + * Commonly, the conversion uses this operation for each of the coordinates. + * Besides the class members listed in the declaration above, the following + * operations on points are implemented:

+ * + *

pt1 = pt2 + pt3;

+ * + *

// C++ code:

+ * + *

pt1 = pt2 - pt3;

+ * + *

pt1 = pt2 * a;

+ * + *

pt1 = a * pt2;

+ * + *

pt1 += pt2;

+ * + *

pt1 -= pt2;

+ * + *

pt1 *= a;

+ * + *

double value = norm(pt); // L2 norm

+ * + *

pt1 == pt2;

+ * + *

pt1 != pt2;

+ * + *

For your convenience, the following type aliases are defined:

+ * + *

typedef Point_ Point2i;

+ * + *

typedef Point2i Point;

+ * + *

typedef Point_ Point2f;

+ * + *

typedef Point_ Point2d;

+ * + *

Example:

+ * + *

Point2f a(0.3f, 0.f), b(0.f, 0.4f);

+ * + *

Point pt = (a + b)*10.f;

+ * + *

cout << pt.x << ", " << pt.y << endl;

+ * + * @see org.opencv.core.Point_ + */ +public class Point { + + public double x, y; + + public Point(double x, double y) { + this.x = x; + this.y = y; + } + + public Point() { + this(0, 0); + } + + public Point(double[] vals) { + this(); + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + x = vals.length > 0 ? vals[0] : 0; + y = vals.length > 1 ? vals[1] : 0; + } else { + x = 0; + y = 0; + } + } + + public Point clone() { + return new Point(x, y); + } + + public double dot(Point p) { + return x * p.x + y * p.y; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(x); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(y); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Point)) return false; + Point it = (Point) obj; + return x == it.x && y == it.y; + } + + public boolean inside(Rect r) { + return r.contains(this); + } + + @Override + public String toString() { + return "{" + x + ", " + y + "}"; + } +} diff --git a/src/org/opencv/core/Point3.java b/src/org/opencv/core/Point3.java new file mode 100644 index 0000000..711e073 --- /dev/null +++ b/src/org/opencv/core/Point3.java @@ -0,0 +1,98 @@ +package org.opencv.core; + +/** + *

Template class for 3D points specified by its coordinates x, + * y and z. + * An instance of the class is interchangeable with the C structure + * CvPoint2D32f. Similarly to Point_, the coordinates + * of 3D points can be converted to another type. The vector arithmetic and + * comparison operations are also supported.

+ * + *

The following Point3_<> aliases are available:

+ * + *

typedef Point3_ Point3i;

+ * + *

// C++ code:

+ * + *

typedef Point3_ Point3f;

+ * + *

typedef Point3_ Point3d;

+ * + * @see org.opencv.core.Point3_ + */ +public class Point3 { + + public double x, y, z; + + public Point3(double x, double y, double z) { + this.x = x; + this.y = y; + this.z = z; + } + + public Point3() { + this(0, 0, 0); + } + + public Point3(Point p) { + x = p.x; + y = p.y; + z = 0; + } + + public Point3(double[] vals) { + this(); + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + x = vals.length > 0 ? vals[0] : 0; + y = vals.length > 1 ? vals[1] : 0; + z = vals.length > 2 ? vals[2] : 0; + } else { + x = 0; + y = 0; + z = 0; + } + } + + public Point3 clone() { + return new Point3(x, y, z); + } + + public double dot(Point3 p) { + return x * p.x + y * p.y + z * p.z; + } + + public Point3 cross(Point3 p) { + return new Point3(y * p.z - z * p.y, z * p.x - x * p.z, x * p.y - y * p.x); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(x); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(y); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(z); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Point3)) return false; + Point3 it = (Point3) obj; + return x == it.x && y == it.y && z == it.z; + } + + @Override + public String toString() { + return "{" + x + ", " + y + ", " + z + "}"; + } +} diff --git a/src/org/opencv/core/Range.java b/src/org/opencv/core/Range.java new file mode 100644 index 0000000..e904510 --- /dev/null +++ b/src/org/opencv/core/Range.java @@ -0,0 +1,129 @@ +package org.opencv.core; + +/** + *

Template class specifying a continuous subsequence (slice) of a sequence.

+ * + *

class Range

+ * + *

// C++ code:

+ * + * + *

public:...

+ * + *

int start, end;

+ * + *

};

+ * + *

The class is used to specify a row or a column span in a matrix (

+ * + *

"Mat") and for many other purposes. Range(a,b) is basically the + * same as a:b in Matlab or a..b in Python. As in + * Python, start is an inclusive left boundary of the range and + * end is an exclusive right boundary of the range. Such a + * half-opened interval is usually denoted as [start,end). + * The static method Range.all() returns a special variable that + * means "the whole sequence" or "the whole range", just like " : " + * in Matlab or " ... " in Python. All the methods and functions in + * OpenCV that take Range support this special Range.all() + * value. But, of course, in case of your own custom processing, you will + * probably have to check and handle it explicitly:

+ * + *

// C++ code:

+ * + *

void my_function(..., const Range& r,....)

+ * + * + *

if(r == Range.all()) {

+ * + *

// process all the data

+ * + * + *

else {

+ * + *

// process [r.start, r.end)

+ * + * + * + *

+ * + * @see org.opencv.core.Range + */ +public class Range { + + public int start, end; + + public Range(int s, int e) { + this.start = s; + this.end = e; + } + + public Range() { + this(0, 0); + } + + public Range(double[] vals) { + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + start = vals.length > 0 ? (int) vals[0] : 0; + end = vals.length > 1 ? (int) vals[1] : 0; + } else { + start = 0; + end = 0; + } + + } + + public int size() { + return empty() ? 0 : end - start; + } + + public boolean empty() { + return end <= start; + } + + public static Range all() { + return new Range(Integer.MIN_VALUE, Integer.MAX_VALUE); + } + + public Range intersection(Range r1) { + Range r = new Range(Math.max(r1.start, this.start), Math.min(r1.end, this.end)); + r.end = Math.max(r.end, r.start); + return r; + } + + public Range shift(int delta) { + return new Range(start + delta, end + delta); + } + + public Range clone() { + return new Range(start, end); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(start); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(end); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Range)) return false; + Range it = (Range) obj; + return start == it.start && end == it.end; + } + + @Override + public String toString() { + return "[" + start + ", " + end + ")"; + } +} diff --git a/src/org/opencv/core/Rect.java b/src/org/opencv/core/Rect.java new file mode 100644 index 0000000..dd6677a --- /dev/null +++ b/src/org/opencv/core/Rect.java @@ -0,0 +1,164 @@ +package org.opencv.core; + +/** + *

Template class for 2D rectangles, described by the following parameters:

+ *
    + *
  • Coordinates of the top-left corner. This is a default interpretation + * of Rect_.x and Rect_.y in OpenCV. Though, in your + * algorithms you may count x and y from the + * bottom-left corner. + *
  • Rectangle width and height. + *
+ * + *

OpenCV typically assumes that the top and left boundary of the rectangle are + * inclusive, while the right and bottom boundaries are not. For example, the + * method Rect_.contains returns true if

+ * + *

x <= pt.x < x+width,<BR>y <= pt.y < y+height

+ * + *

Virtually every loop over an imageROI in OpenCV (where ROI is specified by + * Rect_) is implemented as:

+ * + *

// C++ code:

+ * + *

for(int y = roi.y; y < roi.y + rect.height; y++)

+ * + *

for(int x = roi.x; x < roi.x + rect.width; x++)

+ * + * + *

//...

+ * + * + *

In addition to the class members, the following operations on rectangles are + * implemented:

+ *
    + *
  • rect = rect +- point (shifting a rectangle by a certain + * offset) + *
  • rect = rect +- size (expanding or shrinking a rectangle by a + * certain amount) + *
  • rect += point, rect -= point, rect += size, rect -= size + * (augmenting operations) + *
  • rect = rect1 & rect2 (rectangle intersection) + *
  • rect = rect1 | rect2 (minimum area rectangle containing + * rect2 and rect3) + *
  • rect &= rect1, rect |= rect1 (and the corresponding + * augmenting operations) + *
  • rect == rect1, rect != rect1 (rectangle comparison) + *
+ * + *

This is an example how the partial ordering on rectangles can be established + * (rect1subseteq rect2):

+ * + *

// C++ code:

+ * + *

template inline bool

+ * + *

operator <= (const Rect_<_Tp>& r1, const Rect_<_Tp>& r2)

+ * + * + *

return (r1 & r2) == r1;

+ * + * + *

For your convenience, the Rect_<> alias is available:

+ * + *

typedef Rect_ Rect;

+ * + * @see org.opencv.core.Rect_ + */ +public class Rect { + + public int x, y, width, height; + + public Rect(int x, int y, int width, int height) { + this.x = x; + this.y = y; + this.width = width; + this.height = height; + } + + public Rect() { + this(0, 0, 0, 0); + } + + public Rect(Point p1, Point p2) { + x = (int) (p1.x < p2.x ? p1.x : p2.x); + y = (int) (p1.y < p2.y ? p1.y : p2.y); + width = (int) (p1.x > p2.x ? p1.x : p2.x) - x; + height = (int) (p1.y > p2.y ? p1.y : p2.y) - y; + } + + public Rect(Point p, Size s) { + this((int) p.x, (int) p.y, (int) s.width, (int) s.height); + } + + public Rect(double[] vals) { + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + x = vals.length > 0 ? (int) vals[0] : 0; + y = vals.length > 1 ? (int) vals[1] : 0; + width = vals.length > 2 ? (int) vals[2] : 0; + height = vals.length > 3 ? (int) vals[3] : 0; + } else { + x = 0; + y = 0; + width = 0; + height = 0; + } + } + + public Rect clone() { + return new Rect(x, y, width, height); + } + + public Point tl() { + return new Point(x, y); + } + + public Point br() { + return new Point(x + width, y + height); + } + + public Size size() { + return new Size(width, height); + } + + public double area() { + return width * height; + } + + public boolean contains(Point p) { + return x <= p.x && p.x < x + width && y <= p.y && p.y < y + height; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(height); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(width); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(x); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(y); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Rect)) return false; + Rect it = (Rect) obj; + return x == it.x && y == it.y && width == it.width && height == it.height; + } + + @Override + public String toString() { + return "{" + x + ", " + y + ", " + width + "x" + height + "}"; + } +} diff --git a/src/org/opencv/core/RotatedRect.java b/src/org/opencv/core/RotatedRect.java new file mode 100644 index 0000000..f905361 --- /dev/null +++ b/src/org/opencv/core/RotatedRect.java @@ -0,0 +1,112 @@ +package org.opencv.core; + +public class RotatedRect { + + public Point center; + public Size size; + public double angle; + + public RotatedRect() { + this.center = new Point(); + this.size = new Size(); + this.angle = 0; + } + + public RotatedRect(Point c, Size s, double a) { + this.center = c.clone(); + this.size = s.clone(); + this.angle = a; + } + + public RotatedRect(double[] vals) { + this(); + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + center.x = vals.length > 0 ? (double) vals[0] : 0; + center.y = vals.length > 1 ? (double) vals[1] : 0; + size.width = vals.length > 2 ? (double) vals[2] : 0; + size.height = vals.length > 3 ? (double) vals[3] : 0; + angle = vals.length > 4 ? (double) vals[4] : 0; + } else { + center.x = 0; + center.x = 0; + size.width = 0; + size.height = 0; + angle = 0; + } + } + + public void points(Point pt[]) + { + double _angle = angle * Math.PI / 180.0; + double b = (double) Math.cos(_angle) * 0.5f; + double a = (double) Math.sin(_angle) * 0.5f; + + pt[0] = new Point( + center.x - a * size.height - b * size.width, + center.y + b * size.height - a * size.width); + + pt[1] = new Point( + center.x + a * size.height - b * size.width, + center.y - b * size.height - a * size.width); + + pt[2] = new Point( + 2 * center.x - pt[0].x, + 2 * center.y - pt[0].y); + + pt[3] = new Point( + 2 * center.x - pt[1].x, + 2 * center.y - pt[1].y); + } + + public Rect boundingRect() + { + Point pt[] = new Point[4]; + points(pt); + Rect r = new Rect((int) Math.floor(Math.min(Math.min(Math.min(pt[0].x, pt[1].x), pt[2].x), pt[3].x)), + (int) Math.floor(Math.min(Math.min(Math.min(pt[0].y, pt[1].y), pt[2].y), pt[3].y)), + (int) Math.ceil(Math.max(Math.max(Math.max(pt[0].x, pt[1].x), pt[2].x), pt[3].x)), + (int) Math.ceil(Math.max(Math.max(Math.max(pt[0].y, pt[1].y), pt[2].y), pt[3].y))); + r.width -= r.x - 1; + r.height -= r.y - 1; + return r; + } + + public RotatedRect clone() { + return new RotatedRect(center, size, angle); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(center.x); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(center.y); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(size.width); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(size.height); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(angle); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof RotatedRect)) return false; + RotatedRect it = (RotatedRect) obj; + return center.equals(it.center) && size.equals(it.size) && angle == it.angle; + } + + @Override + public String toString() { + return "{ " + center + " " + size + " * " + angle + " }"; + } +} diff --git a/src/org/opencv/core/Scalar.java b/src/org/opencv/core/Scalar.java new file mode 100644 index 0000000..ce87e7f --- /dev/null +++ b/src/org/opencv/core/Scalar.java @@ -0,0 +1,106 @@ +package org.opencv.core; + +/** + *

Template class for a 4-element vector derived from Vec.

+ * + *

template class Scalar_ : public Vec<_Tp, 4> {... };

+ * + *

// C++ code:

+ * + *

typedef Scalar_ Scalar;

+ * + *

Being derived from Vec<_Tp, 4>, Scalar_ and + * Scalar can be used just as typical 4-element vectors. In + * addition, they can be converted to/from CvScalar. The type + * Scalar is widely used in OpenCV to pass pixel values. + *

+ * + * @see org.opencv.core.Scalar_ + */ +public class Scalar { + + public double val[]; + + public Scalar(double v0, double v1, double v2, double v3) { + val = new double[] { v0, v1, v2, v3 }; + } + + public Scalar(double v0, double v1, double v2) { + val = new double[] { v0, v1, v2, 0 }; + } + + public Scalar(double v0, double v1) { + val = new double[] { v0, v1, 0, 0 }; + } + + public Scalar(double v0) { + val = new double[] { v0, 0, 0, 0 }; + } + + public Scalar(double[] vals) { + if (vals != null && vals.length == 4) + val = vals.clone(); + else { + val = new double[4]; + set(vals); + } + } + + public void set(double[] vals) { + if (vals != null) { + val[0] = vals.length > 0 ? vals[0] : 0; + val[1] = vals.length > 1 ? vals[1] : 0; + val[2] = vals.length > 2 ? vals[2] : 0; + val[3] = vals.length > 3 ? vals[3] : 0; + } else + val[0] = val[1] = val[2] = val[3] = 0; + } + + public static Scalar all(double v) { + return new Scalar(v, v, v, v); + } + + public Scalar clone() { + return new Scalar(val); + } + + public Scalar mul(Scalar it, double scale) { + return new Scalar(val[0] * it.val[0] * scale, val[1] * it.val[1] * scale, + val[2] * it.val[2] * scale, val[3] * it.val[3] * scale); + } + + public Scalar mul(Scalar it) { + return mul(it, 1); + } + + public Scalar conj() { + return new Scalar(val[0], -val[1], -val[2], -val[3]); + } + + public boolean isReal() { + return val[1] == 0 && val[2] == 0 && val[3] == 0; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + java.util.Arrays.hashCode(val); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Scalar)) return false; + Scalar it = (Scalar) obj; + if (!java.util.Arrays.equals(val, it.val)) return false; + return true; + } + + @Override + public String toString() { + return "[" + val[0] + ", " + val[1] + ", " + val[2] + ", " + val[3] + "]"; + } + +} diff --git a/src/org/opencv/core/Size.java b/src/org/opencv/core/Size.java new file mode 100644 index 0000000..cf84ff5 --- /dev/null +++ b/src/org/opencv/core/Size.java @@ -0,0 +1,87 @@ +package org.opencv.core; + +/** + *

Template class for specifying the size of an image or rectangle. The class + * includes two members called width and height. The + * structure can be converted to and from the old OpenCV structures + * CvSize and CvSize2D32f. The same set of arithmetic + * and comparison operations as for Point_ is available.

+ * + *

OpenCV defines the following Size_<> aliases:

+ * + *

typedef Size_ Size2i;

+ * + *

// C++ code:

+ * + *

typedef Size2i Size;

+ * + *

typedef Size_ Size2f;

+ * + * @see org.opencv.core.Size_ + */ +public class Size { + + public double width, height; + + public Size(double width, double height) { + this.width = width; + this.height = height; + } + + public Size() { + this(0, 0); + } + + public Size(Point p) { + width = p.x; + height = p.y; + } + + public Size(double[] vals) { + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + width = vals.length > 0 ? vals[0] : 0; + height = vals.length > 1 ? vals[1] : 0; + } else { + width = 0; + height = 0; + } + } + + public double area() { + return width * height; + } + + public Size clone() { + return new Size(width, height); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(height); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(width); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Size)) return false; + Size it = (Size) obj; + return width == it.width && height == it.height; + } + + @Override + public String toString() { + return (int)width + "x" + (int)height; + } + +} diff --git a/src/org/opencv/core/TermCriteria.java b/src/org/opencv/core/TermCriteria.java new file mode 100644 index 0000000..f601556 --- /dev/null +++ b/src/org/opencv/core/TermCriteria.java @@ -0,0 +1,100 @@ +package org.opencv.core; + +/** + *

The class defining termination criteria for iterative algorithms. You can + * initialize it by default constructor and then override any parameters, or the + * structure may be fully initialized using the advanced variant of the + * constructor.

+ * + * @see org.opencv.core.TermCriteria + */ +public class TermCriteria { + + /** + * The maximum number of iterations or elements to compute + */ + public static final int COUNT = 1; + /** + * The maximum number of iterations or elements to compute + */ + public static final int MAX_ITER = COUNT; + /** + * The desired accuracy threshold or change in parameters at which the iterative algorithm is terminated. + */ + public static final int EPS = 2; + + public int type; + public int maxCount; + public double epsilon; + + /** + * Termination criteria for iterative algorithms. + * + * @param type + * the type of termination criteria: COUNT, EPS or COUNT + EPS. + * @param maxCount + * the maximum number of iterations/elements. + * @param epsilon + * the desired accuracy. + */ + public TermCriteria(int type, int maxCount, double epsilon) { + this.type = type; + this.maxCount = maxCount; + this.epsilon = epsilon; + } + + /** + * Termination criteria for iterative algorithms. + */ + public TermCriteria() { + this(0, 0, 0.0); + } + + public TermCriteria(double[] vals) { + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + type = vals.length > 0 ? (int) vals[0] : 0; + maxCount = vals.length > 1 ? (int) vals[1] : 0; + epsilon = vals.length > 2 ? (double) vals[2] : 0; + } else { + type = 0; + maxCount = 0; + epsilon = 0; + } + } + + public TermCriteria clone() { + return new TermCriteria(type, maxCount, epsilon); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(type); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(maxCount); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(epsilon); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof TermCriteria)) return false; + TermCriteria it = (TermCriteria) obj; + return type == it.type && maxCount == it.maxCount && epsilon == it.epsilon; + } + + @Override + public String toString() { + if (this == null) return "null"; + return "{ type: " + type + ", maxCount: " + maxCount + ", epsilon: " + epsilon + "}"; + } +} diff --git a/src/org/opencv/core/package.bluej b/src/org/opencv/core/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/features2d/DMatch.java b/src/org/opencv/features2d/DMatch.java new file mode 100644 index 0000000..d520a2e --- /dev/null +++ b/src/org/opencv/features2d/DMatch.java @@ -0,0 +1,57 @@ +package org.opencv.features2d; + +//C++: class DMatch + +/** + * Structure for matching: query descriptor index, train descriptor index, train + * image index and distance between descriptors. + */ +public class DMatch { + + /** + * Query descriptor index. + */ + public int queryIdx; + /** + * Train descriptor index. + */ + public int trainIdx; + /** + * Train image index. + */ + public int imgIdx; + + public float distance; + + public DMatch() { + this(-1, -1, Float.MAX_VALUE); + } + + public DMatch(int _queryIdx, int _trainIdx, float _distance) { + queryIdx = _queryIdx; + trainIdx = _trainIdx; + imgIdx = -1; + distance = _distance; + } + + public DMatch(int _queryIdx, int _trainIdx, int _imgIdx, float _distance) { + queryIdx = _queryIdx; + trainIdx = _trainIdx; + imgIdx = _imgIdx; + distance = _distance; + } + + /** + * Less is better. + */ + public boolean lessThan(DMatch it) { + return distance < it.distance; + } + + @Override + public String toString() { + return "DMatch [queryIdx=" + queryIdx + ", trainIdx=" + trainIdx + + ", imgIdx=" + imgIdx + ", distance=" + distance + "]"; + } + +} diff --git a/src/org/opencv/features2d/DescriptorExtractor.java b/src/org/opencv/features2d/DescriptorExtractor.java new file mode 100644 index 0000000..dff80f4 --- /dev/null +++ b/src/org/opencv/features2d/DescriptorExtractor.java @@ -0,0 +1,278 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.features2d; + +import java.lang.String; +import java.util.ArrayList; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfKeyPoint; +import org.opencv.utils.Converters; + +// C++: class javaDescriptorExtractor +/** + *

Abstract base class for computing descriptors for image keypoints.

+ * + *

class CV_EXPORTS DescriptorExtractor

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

virtual ~DescriptorExtractor();

+ * + *

void compute(const Mat& image, vector& keypoints,

+ * + *

Mat& descriptors) const;

+ * + *

void compute(const vector& images, vector >& keypoints,

+ * + *

vector& descriptors) const;

+ * + *

virtual void read(const FileNode&);

+ * + *

virtual void write(FileStorage&) const;

+ * + *

virtual int descriptorSize() const = 0;

+ * + *

virtual int descriptorType() const = 0;

+ * + *

static Ptr create(const string& descriptorExtractorType);

+ * + *

protected:...

+ * + *

};

+ * + *

In this interface, a keypoint descriptor can be represented as a

+ * + *

dense, fixed-dimension vector of a basic type. Most descriptors follow this + * pattern as it simplifies computing distances between descriptors. Therefore, + * a collection of descriptors is represented as "Mat", where each row is a + * keypoint descriptor.

+ * + * @see org.opencv.features2d.DescriptorExtractor : public Algorithm + */ +public class DescriptorExtractor { + + protected final long nativeObj; + protected DescriptorExtractor(long addr) { nativeObj = addr; } + + + private static final int + OPPONENTEXTRACTOR = 1000; + + + public static final int + SIFT = 1, + SURF = 2, + ORB = 3, + BRIEF = 4, + BRISK = 5, + FREAK = 6, + OPPONENT_SIFT = OPPONENTEXTRACTOR + SIFT, + OPPONENT_SURF = OPPONENTEXTRACTOR + SURF, + OPPONENT_ORB = OPPONENTEXTRACTOR + ORB, + OPPONENT_BRIEF = OPPONENTEXTRACTOR + BRIEF, + OPPONENT_BRISK = OPPONENTEXTRACTOR + BRISK, + OPPONENT_FREAK = OPPONENTEXTRACTOR + FREAK; + + + // + // C++: void javaDescriptorExtractor::compute(Mat image, vector_KeyPoint& keypoints, Mat descriptors) + // + +/** + *

Computes the descriptors for a set of keypoints detected in an image (first + * variant) or image set (second variant).

+ * + * @param image Image. + * @param keypoints Input collection of keypoints. Keypoints for which a + * descriptor cannot be computed are removed. Sometimes new keypoints can be + * added, for example: SIFT duplicates keypoint with several + * dominant orientations (for each orientation). + * @param descriptors Computed descriptors. In the second variant of the method + * descriptors[i] are descriptors computed for a keypoints[i]". + * Row j is the keypoints (or keypoints[i]) + * is the descriptor for keypoint j"-th keypoint. + * + * @see org.opencv.features2d.DescriptorExtractor.compute + */ + public void compute(Mat image, MatOfKeyPoint keypoints, Mat descriptors) + { + Mat keypoints_mat = keypoints; + compute_0(nativeObj, image.nativeObj, keypoints_mat.nativeObj, descriptors.nativeObj); + + return; + } + + + // + // C++: void javaDescriptorExtractor::compute(vector_Mat images, vector_vector_KeyPoint& keypoints, vector_Mat& descriptors) + // + +/** + *

Computes the descriptors for a set of keypoints detected in an image (first + * variant) or image set (second variant).

+ * + * @param images Image set. + * @param keypoints Input collection of keypoints. Keypoints for which a + * descriptor cannot be computed are removed. Sometimes new keypoints can be + * added, for example: SIFT duplicates keypoint with several + * dominant orientations (for each orientation). + * @param descriptors Computed descriptors. In the second variant of the method + * descriptors[i] are descriptors computed for a keypoints[i]". + * Row j is the keypoints (or keypoints[i]) + * is the descriptor for keypoint j"-th keypoint. + * + * @see org.opencv.features2d.DescriptorExtractor.compute + */ + public void compute(List images, List keypoints, List descriptors) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + List keypoints_tmplm = new ArrayList((keypoints != null) ? keypoints.size() : 0); + Mat keypoints_mat = Converters.vector_vector_KeyPoint_to_Mat(keypoints, keypoints_tmplm); + Mat descriptors_mat = new Mat(); + compute_1(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj, descriptors_mat.nativeObj); + Converters.Mat_to_vector_vector_KeyPoint(keypoints_mat, keypoints); + Converters.Mat_to_vector_Mat(descriptors_mat, descriptors); + return; + } + + + // + // C++: static javaDescriptorExtractor* javaDescriptorExtractor::create(int extractorType) + // + +/** + *

Creates a descriptor extractor by name.

+ * + *

The current implementation supports the following types of a descriptor + * extractor:

+ *
    + *
  • "SIFT" -- "SIFT" + *
  • "SURF" -- "SURF" + *
  • "ORB" -- "ORB" + *
  • "BRISK" -- "BRISK" + *
  • "BRIEF" -- "BriefDescriptorExtractor" + *
+ * + *

A combined format is also supported: descriptor extractor adapter name + * ("Opponent" -- "OpponentColorDescriptorExtractor") + descriptor + * extractor name (see above), for example: "OpponentSIFT".

+ * + * @param extractorType a extractorType + * + * @see org.opencv.features2d.DescriptorExtractor.create + */ + public static DescriptorExtractor create(int extractorType) + { + + DescriptorExtractor retVal = new DescriptorExtractor(create_0(extractorType)); + + return retVal; + } + + + // + // C++: int javaDescriptorExtractor::descriptorSize() + // + + public int descriptorSize() + { + + int retVal = descriptorSize_0(nativeObj); + + return retVal; + } + + + // + // C++: int javaDescriptorExtractor::descriptorType() + // + + public int descriptorType() + { + + int retVal = descriptorType_0(nativeObj); + + return retVal; + } + + + // + // C++: bool javaDescriptorExtractor::empty() + // + + public boolean empty() + { + + boolean retVal = empty_0(nativeObj); + + return retVal; + } + + + // + // C++: void javaDescriptorExtractor::read(string fileName) + // + + public void read(String fileName) + { + + read_0(nativeObj, fileName); + + return; + } + + + // + // C++: void javaDescriptorExtractor::write(string fileName) + // + + public void write(String fileName) + { + + write_0(nativeObj, fileName); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: void javaDescriptorExtractor::compute(Mat image, vector_KeyPoint& keypoints, Mat descriptors) + private static native void compute_0(long nativeObj, long image_nativeObj, long keypoints_mat_nativeObj, long descriptors_nativeObj); + + // C++: void javaDescriptorExtractor::compute(vector_Mat images, vector_vector_KeyPoint& keypoints, vector_Mat& descriptors) + private static native void compute_1(long nativeObj, long images_mat_nativeObj, long keypoints_mat_nativeObj, long descriptors_mat_nativeObj); + + // C++: static javaDescriptorExtractor* javaDescriptorExtractor::create(int extractorType) + private static native long create_0(int extractorType); + + // C++: int javaDescriptorExtractor::descriptorSize() + private static native int descriptorSize_0(long nativeObj); + + // C++: int javaDescriptorExtractor::descriptorType() + private static native int descriptorType_0(long nativeObj); + + // C++: bool javaDescriptorExtractor::empty() + private static native boolean empty_0(long nativeObj); + + // C++: void javaDescriptorExtractor::read(string fileName) + private static native void read_0(long nativeObj, String fileName); + + // C++: void javaDescriptorExtractor::write(string fileName) + private static native void write_0(long nativeObj, String fileName); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/features2d/DescriptorMatcher.java b/src/org/opencv/features2d/DescriptorMatcher.java new file mode 100644 index 0000000..40a7613 --- /dev/null +++ b/src/org/opencv/features2d/DescriptorMatcher.java @@ -0,0 +1,742 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.features2d; + +import java.lang.String; +import java.util.ArrayList; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfDMatch; +import org.opencv.utils.Converters; + +// C++: class javaDescriptorMatcher +/** + *

Abstract base class for matching keypoint descriptors. It has two groups of + * match methods: for matching descriptors of an image with another image or + * with an image set.

+ * + *

class DescriptorMatcher

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

virtual ~DescriptorMatcher();

+ * + *

virtual void add(const vector& descriptors);

+ * + *

const vector& getTrainDescriptors() const;

+ * + *

virtual void clear();

+ * + *

bool empty() const;

+ * + *

virtual bool isMaskSupported() const = 0;

+ * + *

virtual void train();

+ * + *

/ *

+ *
    + *
  • Group of methods to match descriptors from an image pair. + *
  • / + *
+ * + *

void match(const Mat& queryDescriptors, const Mat& trainDescriptors,

+ * + *

vector& matches, const Mat& mask=Mat()) const;

+ * + *

void knnMatch(const Mat& queryDescriptors, const Mat& trainDescriptors,

+ * + *

vector >& matches, int k,

+ * + *

const Mat& mask=Mat(), bool compactResult=false) const;

+ * + *

void radiusMatch(const Mat& queryDescriptors, const Mat& trainDescriptors,

+ * + *

vector >& matches, float maxDistance,

+ * + *

const Mat& mask=Mat(), bool compactResult=false) const;

+ * + *

/ *

+ *
    + *
  • Group of methods to match descriptors from one image to an image set. + *
  • / + *
+ * + *

void match(const Mat& queryDescriptors, vector& matches,

+ * + *

const vector& masks=vector());

+ * + *

void knnMatch(const Mat& queryDescriptors, vector >& matches,

+ * + *

int k, const vector& masks=vector(),

+ * + *

bool compactResult=false);

+ * + *

void radiusMatch(const Mat& queryDescriptors, vector >& + * matches,

+ * + *

float maxDistance, const vector& masks=vector(),

+ * + *

bool compactResult=false);

+ * + *

virtual void read(const FileNode&);

+ * + *

virtual void write(FileStorage&) const;

+ * + *

virtual Ptr clone(bool emptyTrainData=false) const = 0;

+ * + *

static Ptr create(const string& descriptorMatcherType);

+ * + *

protected:

+ * + *

vector trainDescCollection;...

+ * + *

};

+ * + * @see org.opencv.features2d.DescriptorMatcher : public Algorithm + */ +public class DescriptorMatcher { + + protected final long nativeObj; + protected DescriptorMatcher(long addr) { nativeObj = addr; } + + + public static final int + FLANNBASED = 1, + BRUTEFORCE = 2, + BRUTEFORCE_L1 = 3, + BRUTEFORCE_HAMMING = 4, + BRUTEFORCE_HAMMINGLUT = 5, + BRUTEFORCE_SL2 = 6; + + + // + // C++: void javaDescriptorMatcher::add(vector_Mat descriptors) + // + +/** + *

Adds descriptors to train a descriptor collection. If the collection + * trainDescCollectionis is not empty, the new descriptors are + * added to existing train descriptors.

+ * + * @param descriptors Descriptors to add. Each descriptors[i] is a + * set of descriptors from the same train image. + * + * @see org.opencv.features2d.DescriptorMatcher.add + */ + public void add(List descriptors) + { + Mat descriptors_mat = Converters.vector_Mat_to_Mat(descriptors); + add_0(nativeObj, descriptors_mat.nativeObj); + + return; + } + + + // + // C++: void javaDescriptorMatcher::clear() + // + +/** + *

Clears the train descriptor collection.

+ * + * @see org.opencv.features2d.DescriptorMatcher.clear + */ + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: javaDescriptorMatcher* javaDescriptorMatcher::jclone(bool emptyTrainData = false) + // + + public DescriptorMatcher clone(boolean emptyTrainData) + { + + DescriptorMatcher retVal = new DescriptorMatcher(clone_0(nativeObj, emptyTrainData)); + + return retVal; + } + + public DescriptorMatcher clone() + { + + DescriptorMatcher retVal = new DescriptorMatcher(clone_1(nativeObj)); + + return retVal; + } + + + // + // C++: static javaDescriptorMatcher* javaDescriptorMatcher::create(int matcherType) + // + +/** + *

Creates a descriptor matcher of a given type with the default parameters + * (using default constructor).

+ * + * @param matcherType a matcherType + * + * @see org.opencv.features2d.DescriptorMatcher.create + */ + public static DescriptorMatcher create(int matcherType) + { + + DescriptorMatcher retVal = new DescriptorMatcher(create_0(matcherType)); + + return retVal; + } + + + // + // C++: bool javaDescriptorMatcher::empty() + // + +/** + *

Returns true if there are no train descriptors in the collection.

+ * + * @see org.opencv.features2d.DescriptorMatcher.empty + */ + public boolean empty() + { + + boolean retVal = empty_0(nativeObj); + + return retVal; + } + + + // + // C++: vector_Mat javaDescriptorMatcher::getTrainDescriptors() + // + +/** + *

Returns a constant link to the train descriptor collection trainDescCollection.

+ * + * @see org.opencv.features2d.DescriptorMatcher.getTrainDescriptors + */ + public List getTrainDescriptors() + { + List retVal = new ArrayList(); + Mat retValMat = new Mat(getTrainDescriptors_0(nativeObj)); + Converters.Mat_to_vector_Mat(retValMat, retVal); + return retVal; + } + + + // + // C++: bool javaDescriptorMatcher::isMaskSupported() + // + +/** + *

Returns true if the descriptor matcher supports masking permissible matches.

+ * + * @see org.opencv.features2d.DescriptorMatcher.isMaskSupported + */ + public boolean isMaskSupported() + { + + boolean retVal = isMaskSupported_0(nativeObj); + + return retVal; + } + + + // + // C++: void javaDescriptorMatcher::knnMatch(Mat queryDescriptors, Mat trainDescriptors, vector_vector_DMatch& matches, int k, Mat mask = Mat(), bool compactResult = false) + // + +/** + *

Finds the k best matches for each descriptor from a query set.

+ * + *

These extended variants of "DescriptorMatcher.match" methods find several + * best matches for each query descriptor. The matches are returned in the + * distance increasing order. See "DescriptorMatcher.match" for the details + * about query and train descriptors.

+ * + * @param queryDescriptors Query set of descriptors. + * @param trainDescriptors Train set of descriptors. This set is not added to + * the train descriptors collection stored in the class object. + * @param matches Matches. Each matches[i] is k or less matches for + * the same query descriptor. + * @param k Count of best matches found per each query descriptor or less if a + * query descriptor has less than k possible matches in total. + * @param mask Mask specifying permissible matches between an input query and + * train matrices of descriptors. + * @param compactResult Parameter used when the mask (or masks) is not empty. If + * compactResult is false, the matches vector has the + * same size as queryDescriptors rows. If compactResult + * is true, the matches vector does not contain matches for fully + * masked-out query descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryDescriptors, Mat trainDescriptors, List matches, int k, Mat mask, boolean compactResult) + { + Mat matches_mat = new Mat(); + knnMatch_0(nativeObj, queryDescriptors.nativeObj, trainDescriptors.nativeObj, matches_mat.nativeObj, k, mask.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

Finds the k best matches for each descriptor from a query set.

+ * + *

These extended variants of "DescriptorMatcher.match" methods find several + * best matches for each query descriptor. The matches are returned in the + * distance increasing order. See "DescriptorMatcher.match" for the details + * about query and train descriptors.

+ * + * @param queryDescriptors Query set of descriptors. + * @param trainDescriptors Train set of descriptors. This set is not added to + * the train descriptors collection stored in the class object. + * @param matches Matches. Each matches[i] is k or less matches for + * the same query descriptor. + * @param k Count of best matches found per each query descriptor or less if a + * query descriptor has less than k possible matches in total. + * + * @see org.opencv.features2d.DescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryDescriptors, Mat trainDescriptors, List matches, int k) + { + Mat matches_mat = new Mat(); + knnMatch_1(nativeObj, queryDescriptors.nativeObj, trainDescriptors.nativeObj, matches_mat.nativeObj, k); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaDescriptorMatcher::knnMatch(Mat queryDescriptors, vector_vector_DMatch& matches, int k, vector_Mat masks = vector(), bool compactResult = false) + // + +/** + *

Finds the k best matches for each descriptor from a query set.

+ * + *

These extended variants of "DescriptorMatcher.match" methods find several + * best matches for each query descriptor. The matches are returned in the + * distance increasing order. See "DescriptorMatcher.match" for the details + * about query and train descriptors.

+ * + * @param queryDescriptors Query set of descriptors. + * @param matches Matches. Each matches[i] is k or less matches for + * the same query descriptor. + * @param k Count of best matches found per each query descriptor or less if a + * query descriptor has less than k possible matches in total. + * @param masks Set of masks. Each masks[i] specifies permissible + * matches between the input query descriptors and stored train descriptors from + * the i-th image trainDescCollection[i]. + * @param compactResult Parameter used when the mask (or masks) is not empty. If + * compactResult is false, the matches vector has the + * same size as queryDescriptors rows. If compactResult + * is true, the matches vector does not contain matches for fully + * masked-out query descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryDescriptors, List matches, int k, List masks, boolean compactResult) + { + Mat matches_mat = new Mat(); + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + knnMatch_2(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj, k, masks_mat.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

Finds the k best matches for each descriptor from a query set.

+ * + *

These extended variants of "DescriptorMatcher.match" methods find several + * best matches for each query descriptor. The matches are returned in the + * distance increasing order. See "DescriptorMatcher.match" for the details + * about query and train descriptors.

+ * + * @param queryDescriptors Query set of descriptors. + * @param matches Matches. Each matches[i] is k or less matches for + * the same query descriptor. + * @param k Count of best matches found per each query descriptor or less if a + * query descriptor has less than k possible matches in total. + * + * @see org.opencv.features2d.DescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryDescriptors, List matches, int k) + { + Mat matches_mat = new Mat(); + knnMatch_3(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj, k); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaDescriptorMatcher::match(Mat queryDescriptors, Mat trainDescriptors, vector_DMatch& matches, Mat mask = Mat()) + // + +/** + *

Finds the best match for each descriptor from a query set.

+ * + *

In the first variant of this method, the train descriptors are passed as an + * input argument. In the second variant of the method, train descriptors + * collection that was set by DescriptorMatcher.add is used. + * Optional mask (or masks) can be passed to specify which query and training + * descriptors can be matched. Namely, queryDescriptors[i] can be + * matched with trainDescriptors[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryDescriptors Query set of descriptors. + * @param trainDescriptors Train set of descriptors. This set is not added to + * the train descriptors collection stored in the class object. + * @param matches Matches. If a query descriptor is masked out in + * mask, no match is added for this descriptor. So, + * matches size may be smaller than the query descriptors count. + * @param mask Mask specifying permissible matches between an input query and + * train matrices of descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.match + */ + public void match(Mat queryDescriptors, Mat trainDescriptors, MatOfDMatch matches, Mat mask) + { + Mat matches_mat = matches; + match_0(nativeObj, queryDescriptors.nativeObj, trainDescriptors.nativeObj, matches_mat.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Finds the best match for each descriptor from a query set.

+ * + *

In the first variant of this method, the train descriptors are passed as an + * input argument. In the second variant of the method, train descriptors + * collection that was set by DescriptorMatcher.add is used. + * Optional mask (or masks) can be passed to specify which query and training + * descriptors can be matched. Namely, queryDescriptors[i] can be + * matched with trainDescriptors[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryDescriptors Query set of descriptors. + * @param trainDescriptors Train set of descriptors. This set is not added to + * the train descriptors collection stored in the class object. + * @param matches Matches. If a query descriptor is masked out in + * mask, no match is added for this descriptor. So, + * matches size may be smaller than the query descriptors count. + * + * @see org.opencv.features2d.DescriptorMatcher.match + */ + public void match(Mat queryDescriptors, Mat trainDescriptors, MatOfDMatch matches) + { + Mat matches_mat = matches; + match_1(nativeObj, queryDescriptors.nativeObj, trainDescriptors.nativeObj, matches_mat.nativeObj); + + return; + } + + + // + // C++: void javaDescriptorMatcher::match(Mat queryDescriptors, vector_DMatch& matches, vector_Mat masks = vector()) + // + +/** + *

Finds the best match for each descriptor from a query set.

+ * + *

In the first variant of this method, the train descriptors are passed as an + * input argument. In the second variant of the method, train descriptors + * collection that was set by DescriptorMatcher.add is used. + * Optional mask (or masks) can be passed to specify which query and training + * descriptors can be matched. Namely, queryDescriptors[i] can be + * matched with trainDescriptors[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryDescriptors Query set of descriptors. + * @param matches Matches. If a query descriptor is masked out in + * mask, no match is added for this descriptor. So, + * matches size may be smaller than the query descriptors count. + * @param masks Set of masks. Each masks[i] specifies permissible + * matches between the input query descriptors and stored train descriptors from + * the i-th image trainDescCollection[i]. + * + * @see org.opencv.features2d.DescriptorMatcher.match + */ + public void match(Mat queryDescriptors, MatOfDMatch matches, List masks) + { + Mat matches_mat = matches; + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + match_2(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj, masks_mat.nativeObj); + + return; + } + +/** + *

Finds the best match for each descriptor from a query set.

+ * + *

In the first variant of this method, the train descriptors are passed as an + * input argument. In the second variant of the method, train descriptors + * collection that was set by DescriptorMatcher.add is used. + * Optional mask (or masks) can be passed to specify which query and training + * descriptors can be matched. Namely, queryDescriptors[i] can be + * matched with trainDescriptors[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryDescriptors Query set of descriptors. + * @param matches Matches. If a query descriptor is masked out in + * mask, no match is added for this descriptor. So, + * matches size may be smaller than the query descriptors count. + * + * @see org.opencv.features2d.DescriptorMatcher.match + */ + public void match(Mat queryDescriptors, MatOfDMatch matches) + { + Mat matches_mat = matches; + match_3(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj); + + return; + } + + + // + // C++: void javaDescriptorMatcher::radiusMatch(Mat queryDescriptors, Mat trainDescriptors, vector_vector_DMatch& matches, float maxDistance, Mat mask = Mat(), bool compactResult = false) + // + +/** + *

For each query descriptor, finds the training descriptors not farther than + * the specified distance.

+ * + *

For each query descriptor, the methods find such training descriptors that + * the distance between the query descriptor and the training descriptor is + * equal or smaller than maxDistance. Found matches are returned in + * the distance increasing order.

+ * + * @param queryDescriptors Query set of descriptors. + * @param trainDescriptors Train set of descriptors. This set is not added to + * the train descriptors collection stored in the class object. + * @param matches Found matches. + * @param maxDistance Threshold for the distance between matched descriptors. + * @param mask Mask specifying permissible matches between an input query and + * train matrices of descriptors. + * @param compactResult Parameter used when the mask (or masks) is not empty. If + * compactResult is false, the matches vector has the + * same size as queryDescriptors rows. If compactResult + * is true, the matches vector does not contain matches for fully + * masked-out query descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryDescriptors, Mat trainDescriptors, List matches, float maxDistance, Mat mask, boolean compactResult) + { + Mat matches_mat = new Mat(); + radiusMatch_0(nativeObj, queryDescriptors.nativeObj, trainDescriptors.nativeObj, matches_mat.nativeObj, maxDistance, mask.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

For each query descriptor, finds the training descriptors not farther than + * the specified distance.

+ * + *

For each query descriptor, the methods find such training descriptors that + * the distance between the query descriptor and the training descriptor is + * equal or smaller than maxDistance. Found matches are returned in + * the distance increasing order.

+ * + * @param queryDescriptors Query set of descriptors. + * @param trainDescriptors Train set of descriptors. This set is not added to + * the train descriptors collection stored in the class object. + * @param matches Found matches. + * @param maxDistance Threshold for the distance between matched descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryDescriptors, Mat trainDescriptors, List matches, float maxDistance) + { + Mat matches_mat = new Mat(); + radiusMatch_1(nativeObj, queryDescriptors.nativeObj, trainDescriptors.nativeObj, matches_mat.nativeObj, maxDistance); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaDescriptorMatcher::radiusMatch(Mat queryDescriptors, vector_vector_DMatch& matches, float maxDistance, vector_Mat masks = vector(), bool compactResult = false) + // + +/** + *

For each query descriptor, finds the training descriptors not farther than + * the specified distance.

+ * + *

For each query descriptor, the methods find such training descriptors that + * the distance between the query descriptor and the training descriptor is + * equal or smaller than maxDistance. Found matches are returned in + * the distance increasing order.

+ * + * @param queryDescriptors Query set of descriptors. + * @param matches Found matches. + * @param maxDistance Threshold for the distance between matched descriptors. + * @param masks Set of masks. Each masks[i] specifies permissible + * matches between the input query descriptors and stored train descriptors from + * the i-th image trainDescCollection[i]. + * @param compactResult Parameter used when the mask (or masks) is not empty. If + * compactResult is false, the matches vector has the + * same size as queryDescriptors rows. If compactResult + * is true, the matches vector does not contain matches for fully + * masked-out query descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryDescriptors, List matches, float maxDistance, List masks, boolean compactResult) + { + Mat matches_mat = new Mat(); + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + radiusMatch_2(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj, maxDistance, masks_mat.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

For each query descriptor, finds the training descriptors not farther than + * the specified distance.

+ * + *

For each query descriptor, the methods find such training descriptors that + * the distance between the query descriptor and the training descriptor is + * equal or smaller than maxDistance. Found matches are returned in + * the distance increasing order.

+ * + * @param queryDescriptors Query set of descriptors. + * @param matches Found matches. + * @param maxDistance Threshold for the distance between matched descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryDescriptors, List matches, float maxDistance) + { + Mat matches_mat = new Mat(); + radiusMatch_3(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj, maxDistance); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaDescriptorMatcher::read(string fileName) + // + + public void read(String fileName) + { + + read_0(nativeObj, fileName); + + return; + } + + + // + // C++: void javaDescriptorMatcher::train() + // + +/** + *

Trains a descriptor matcher

+ * + *

Trains a descriptor matcher (for example, the flann index). In all methods to + * match, the method train() is run every time before matching. + * Some descriptor matchers (for example, BruteForceMatcher) have + * an empty implementation of this method. Other matchers really train their + * inner structures (for example, FlannBasedMatcher trains + * flann.Index).

+ * + * @see org.opencv.features2d.DescriptorMatcher.train + */ + public void train() + { + + train_0(nativeObj); + + return; + } + + + // + // C++: void javaDescriptorMatcher::write(string fileName) + // + + public void write(String fileName) + { + + write_0(nativeObj, fileName); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: void javaDescriptorMatcher::add(vector_Mat descriptors) + private static native void add_0(long nativeObj, long descriptors_mat_nativeObj); + + // C++: void javaDescriptorMatcher::clear() + private static native void clear_0(long nativeObj); + + // C++: javaDescriptorMatcher* javaDescriptorMatcher::jclone(bool emptyTrainData = false) + private static native long clone_0(long nativeObj, boolean emptyTrainData); + private static native long clone_1(long nativeObj); + + // C++: static javaDescriptorMatcher* javaDescriptorMatcher::create(int matcherType) + private static native long create_0(int matcherType); + + // C++: bool javaDescriptorMatcher::empty() + private static native boolean empty_0(long nativeObj); + + // C++: vector_Mat javaDescriptorMatcher::getTrainDescriptors() + private static native long getTrainDescriptors_0(long nativeObj); + + // C++: bool javaDescriptorMatcher::isMaskSupported() + private static native boolean isMaskSupported_0(long nativeObj); + + // C++: void javaDescriptorMatcher::knnMatch(Mat queryDescriptors, Mat trainDescriptors, vector_vector_DMatch& matches, int k, Mat mask = Mat(), bool compactResult = false) + private static native void knnMatch_0(long nativeObj, long queryDescriptors_nativeObj, long trainDescriptors_nativeObj, long matches_mat_nativeObj, int k, long mask_nativeObj, boolean compactResult); + private static native void knnMatch_1(long nativeObj, long queryDescriptors_nativeObj, long trainDescriptors_nativeObj, long matches_mat_nativeObj, int k); + + // C++: void javaDescriptorMatcher::knnMatch(Mat queryDescriptors, vector_vector_DMatch& matches, int k, vector_Mat masks = vector(), bool compactResult = false) + private static native void knnMatch_2(long nativeObj, long queryDescriptors_nativeObj, long matches_mat_nativeObj, int k, long masks_mat_nativeObj, boolean compactResult); + private static native void knnMatch_3(long nativeObj, long queryDescriptors_nativeObj, long matches_mat_nativeObj, int k); + + // C++: void javaDescriptorMatcher::match(Mat queryDescriptors, Mat trainDescriptors, vector_DMatch& matches, Mat mask = Mat()) + private static native void match_0(long nativeObj, long queryDescriptors_nativeObj, long trainDescriptors_nativeObj, long matches_mat_nativeObj, long mask_nativeObj); + private static native void match_1(long nativeObj, long queryDescriptors_nativeObj, long trainDescriptors_nativeObj, long matches_mat_nativeObj); + + // C++: void javaDescriptorMatcher::match(Mat queryDescriptors, vector_DMatch& matches, vector_Mat masks = vector()) + private static native void match_2(long nativeObj, long queryDescriptors_nativeObj, long matches_mat_nativeObj, long masks_mat_nativeObj); + private static native void match_3(long nativeObj, long queryDescriptors_nativeObj, long matches_mat_nativeObj); + + // C++: void javaDescriptorMatcher::radiusMatch(Mat queryDescriptors, Mat trainDescriptors, vector_vector_DMatch& matches, float maxDistance, Mat mask = Mat(), bool compactResult = false) + private static native void radiusMatch_0(long nativeObj, long queryDescriptors_nativeObj, long trainDescriptors_nativeObj, long matches_mat_nativeObj, float maxDistance, long mask_nativeObj, boolean compactResult); + private static native void radiusMatch_1(long nativeObj, long queryDescriptors_nativeObj, long trainDescriptors_nativeObj, long matches_mat_nativeObj, float maxDistance); + + // C++: void javaDescriptorMatcher::radiusMatch(Mat queryDescriptors, vector_vector_DMatch& matches, float maxDistance, vector_Mat masks = vector(), bool compactResult = false) + private static native void radiusMatch_2(long nativeObj, long queryDescriptors_nativeObj, long matches_mat_nativeObj, float maxDistance, long masks_mat_nativeObj, boolean compactResult); + private static native void radiusMatch_3(long nativeObj, long queryDescriptors_nativeObj, long matches_mat_nativeObj, float maxDistance); + + // C++: void javaDescriptorMatcher::read(string fileName) + private static native void read_0(long nativeObj, String fileName); + + // C++: void javaDescriptorMatcher::train() + private static native void train_0(long nativeObj); + + // C++: void javaDescriptorMatcher::write(string fileName) + private static native void write_0(long nativeObj, String fileName); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/features2d/FeatureDetector.java b/src/org/opencv/features2d/FeatureDetector.java new file mode 100644 index 0000000..7f67e32 --- /dev/null +++ b/src/org/opencv/features2d/FeatureDetector.java @@ -0,0 +1,303 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.features2d; + +import java.lang.String; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfKeyPoint; +import org.opencv.utils.Converters; + +// C++: class javaFeatureDetector +/** + *

Abstract base class for 2D image feature detectors.

+ * + *

class CV_EXPORTS FeatureDetector

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

virtual ~FeatureDetector();

+ * + *

void detect(const Mat& image, vector& keypoints,

+ * + *

const Mat& mask=Mat()) const;

+ * + *

void detect(const vector& images,

+ * + *

vector >& keypoints,

+ * + *

const vector& masks=vector()) const;

+ * + *

virtual void read(const FileNode&);

+ * + *

virtual void write(FileStorage&) const;

+ * + *

static Ptr create(const string& detectorType);

+ * + *

protected:...

+ * + *

};

+ * + * @see org.opencv.features2d.FeatureDetector : public Algorithm + */ +public class FeatureDetector { + + protected final long nativeObj; + protected FeatureDetector(long addr) { nativeObj = addr; } + + + private static final int + GRIDDETECTOR = 1000, + PYRAMIDDETECTOR = 2000, + DYNAMICDETECTOR = 3000; + + + public static final int + FAST = 1, + STAR = 2, + SIFT = 3, + SURF = 4, + ORB = 5, + MSER = 6, + GFTT = 7, + HARRIS = 8, + SIMPLEBLOB = 9, + DENSE = 10, + BRISK = 11, + GRIDRETECTOR = 1000, + GRID_FAST = GRIDDETECTOR + FAST, + GRID_STAR = GRIDDETECTOR + STAR, + GRID_SIFT = GRIDDETECTOR + SIFT, + GRID_SURF = GRIDDETECTOR + SURF, + GRID_ORB = GRIDDETECTOR + ORB, + GRID_MSER = GRIDDETECTOR + MSER, + GRID_GFTT = GRIDDETECTOR + GFTT, + GRID_HARRIS = GRIDDETECTOR + HARRIS, + GRID_SIMPLEBLOB = GRIDDETECTOR + SIMPLEBLOB, + GRID_DENSE = GRIDDETECTOR + DENSE, + GRID_BRISK = GRIDDETECTOR + BRISK, + PYRAMID_FAST = PYRAMIDDETECTOR + FAST, + PYRAMID_STAR = PYRAMIDDETECTOR + STAR, + PYRAMID_SIFT = PYRAMIDDETECTOR + SIFT, + PYRAMID_SURF = PYRAMIDDETECTOR + SURF, + PYRAMID_ORB = PYRAMIDDETECTOR + ORB, + PYRAMID_MSER = PYRAMIDDETECTOR + MSER, + PYRAMID_GFTT = PYRAMIDDETECTOR + GFTT, + PYRAMID_HARRIS = PYRAMIDDETECTOR + HARRIS, + PYRAMID_SIMPLEBLOB = PYRAMIDDETECTOR + SIMPLEBLOB, + PYRAMID_DENSE = PYRAMIDDETECTOR + DENSE, + PYRAMID_BRISK = PYRAMIDDETECTOR + BRISK, + DYNAMIC_FAST = DYNAMICDETECTOR + FAST, + DYNAMIC_STAR = DYNAMICDETECTOR + STAR, + DYNAMIC_SIFT = DYNAMICDETECTOR + SIFT, + DYNAMIC_SURF = DYNAMICDETECTOR + SURF, + DYNAMIC_ORB = DYNAMICDETECTOR + ORB, + DYNAMIC_MSER = DYNAMICDETECTOR + MSER, + DYNAMIC_GFTT = DYNAMICDETECTOR + GFTT, + DYNAMIC_HARRIS = DYNAMICDETECTOR + HARRIS, + DYNAMIC_SIMPLEBLOB = DYNAMICDETECTOR + SIMPLEBLOB, + DYNAMIC_DENSE = DYNAMICDETECTOR + DENSE, + DYNAMIC_BRISK = DYNAMICDETECTOR + BRISK; + + + // + // C++: static javaFeatureDetector* javaFeatureDetector::create(int detectorType) + // + +/** + *

Creates a feature detector by its name.

+ * + *

The following detector types are supported:

+ *
    + *
  • "FAST" -- "FastFeatureDetector" + *
  • "STAR" -- "StarFeatureDetector" + *
  • "SIFT" -- "SIFT" (nonfree module) + *
  • "SURF" -- "SURF" (nonfree module) + *
  • "ORB" -- "ORB" + *
  • "BRISK" -- "BRISK" + *
  • "MSER" -- "MSER" + *
  • "GFTT" -- "GoodFeaturesToTrackDetector" + *
  • "HARRIS" -- "GoodFeaturesToTrackDetector" with Harris + * detector enabled + *
  • "Dense" -- "DenseFeatureDetector" + *
  • "SimpleBlob" -- "SimpleBlobDetector" + *
+ * + *

Also a combined format is supported: feature detector adapter name + * ("Grid" -- "GridAdaptedFeatureDetector", "Pyramid" + * -- "PyramidAdaptedFeatureDetector") + feature detector name (see above), for + * example: "GridFAST", "PyramidSTAR".

+ * + * @param detectorType Feature detector type. + * + * @see org.opencv.features2d.FeatureDetector.create + */ + public static FeatureDetector create(int detectorType) + { + + FeatureDetector retVal = new FeatureDetector(create_0(detectorType)); + + return retVal; + } + + + // + // C++: void javaFeatureDetector::detect(Mat image, vector_KeyPoint& keypoints, Mat mask = Mat()) + // + +/** + *

Detects keypoints in an image (first variant) or image set (second variant).

+ * + * @param image Image. + * @param keypoints The detected keypoints. In the second variant of the method + * keypoints[i] is a set of keypoints detected in images[i]. + * @param mask Mask specifying where to look for keypoints (optional). It must + * be a 8-bit integer matrix with non-zero values in the region of interest. + * + * @see org.opencv.features2d.FeatureDetector.detect + */ + public void detect(Mat image, MatOfKeyPoint keypoints, Mat mask) + { + Mat keypoints_mat = keypoints; + detect_0(nativeObj, image.nativeObj, keypoints_mat.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Detects keypoints in an image (first variant) or image set (second variant).

+ * + * @param image Image. + * @param keypoints The detected keypoints. In the second variant of the method + * keypoints[i] is a set of keypoints detected in images[i]. + * + * @see org.opencv.features2d.FeatureDetector.detect + */ + public void detect(Mat image, MatOfKeyPoint keypoints) + { + Mat keypoints_mat = keypoints; + detect_1(nativeObj, image.nativeObj, keypoints_mat.nativeObj); + + return; + } + + + // + // C++: void javaFeatureDetector::detect(vector_Mat images, vector_vector_KeyPoint& keypoints, vector_Mat masks = vector()) + // + +/** + *

Detects keypoints in an image (first variant) or image set (second variant).

+ * + * @param images Image set. + * @param keypoints The detected keypoints. In the second variant of the method + * keypoints[i] is a set of keypoints detected in images[i]. + * @param masks Masks for each input image specifying where to look for + * keypoints (optional). masks[i] is a mask for images[i]. + * + * @see org.opencv.features2d.FeatureDetector.detect + */ + public void detect(List images, List keypoints, List masks) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + Mat keypoints_mat = new Mat(); + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + detect_2(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj, masks_mat.nativeObj); + Converters.Mat_to_vector_vector_KeyPoint(keypoints_mat, keypoints); + return; + } + +/** + *

Detects keypoints in an image (first variant) or image set (second variant).

+ * + * @param images Image set. + * @param keypoints The detected keypoints. In the second variant of the method + * keypoints[i] is a set of keypoints detected in images[i]. + * + * @see org.opencv.features2d.FeatureDetector.detect + */ + public void detect(List images, List keypoints) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + Mat keypoints_mat = new Mat(); + detect_3(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj); + Converters.Mat_to_vector_vector_KeyPoint(keypoints_mat, keypoints); + return; + } + + + // + // C++: bool javaFeatureDetector::empty() + // + + public boolean empty() + { + + boolean retVal = empty_0(nativeObj); + + return retVal; + } + + + // + // C++: void javaFeatureDetector::read(string fileName) + // + + public void read(String fileName) + { + + read_0(nativeObj, fileName); + + return; + } + + + // + // C++: void javaFeatureDetector::write(string fileName) + // + + public void write(String fileName) + { + + write_0(nativeObj, fileName); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: static javaFeatureDetector* javaFeatureDetector::create(int detectorType) + private static native long create_0(int detectorType); + + // C++: void javaFeatureDetector::detect(Mat image, vector_KeyPoint& keypoints, Mat mask = Mat()) + private static native void detect_0(long nativeObj, long image_nativeObj, long keypoints_mat_nativeObj, long mask_nativeObj); + private static native void detect_1(long nativeObj, long image_nativeObj, long keypoints_mat_nativeObj); + + // C++: void javaFeatureDetector::detect(vector_Mat images, vector_vector_KeyPoint& keypoints, vector_Mat masks = vector()) + private static native void detect_2(long nativeObj, long images_mat_nativeObj, long keypoints_mat_nativeObj, long masks_mat_nativeObj); + private static native void detect_3(long nativeObj, long images_mat_nativeObj, long keypoints_mat_nativeObj); + + // C++: bool javaFeatureDetector::empty() + private static native boolean empty_0(long nativeObj); + + // C++: void javaFeatureDetector::read(string fileName) + private static native void read_0(long nativeObj, String fileName); + + // C++: void javaFeatureDetector::write(string fileName) + private static native void write_0(long nativeObj, String fileName); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/features2d/Features2d.java b/src/org/opencv/features2d/Features2d.java new file mode 100644 index 0000000..0b17924 --- /dev/null +++ b/src/org/opencv/features2d/Features2d.java @@ -0,0 +1,402 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.features2d; + +import java.util.ArrayList; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfByte; +import org.opencv.core.MatOfDMatch; +import org.opencv.core.MatOfKeyPoint; +import org.opencv.core.Scalar; +import org.opencv.utils.Converters; + +public class Features2d { + + public static final int + DRAW_OVER_OUTIMG = 1, + NOT_DRAW_SINGLE_POINTS = 2, + DRAW_RICH_KEYPOINTS = 4; + + + // + // C++: void drawKeypoints(Mat image, vector_KeyPoint keypoints, Mat outImage, Scalar color = Scalar::all(-1), int flags = 0) + // + +/** + *

Draws keypoints.

+ * + * @param image Source image. + * @param keypoints Keypoints from the source image. + * @param outImage Output image. Its content depends on the flags + * value defining what is drawn in the output image. See possible + * flags bit values below. + * @param color Color of keypoints. + * @param flags Flags setting drawing features. Possible flags bit + * values are defined by DrawMatchesFlags. See details above in + * "drawMatches". + * + * @see org.opencv.features2d.Features2d.drawKeypoints + */ + public static void drawKeypoints(Mat image, MatOfKeyPoint keypoints, Mat outImage, Scalar color, int flags) + { + Mat keypoints_mat = keypoints; + drawKeypoints_0(image.nativeObj, keypoints_mat.nativeObj, outImage.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], flags); + + return; + } + +/** + *

Draws keypoints.

+ * + * @param image Source image. + * @param keypoints Keypoints from the source image. + * @param outImage Output image. Its content depends on the flags + * value defining what is drawn in the output image. See possible + * flags bit values below. + * + * @see org.opencv.features2d.Features2d.drawKeypoints + */ + public static void drawKeypoints(Mat image, MatOfKeyPoint keypoints, Mat outImage) + { + Mat keypoints_mat = keypoints; + drawKeypoints_1(image.nativeObj, keypoints_mat.nativeObj, outImage.nativeObj); + + return; + } + + + // + // C++: void drawMatches(Mat img1, vector_KeyPoint keypoints1, Mat img2, vector_KeyPoint keypoints2, vector_DMatch matches1to2, Mat outImg, Scalar matchColor = Scalar::all(-1), Scalar singlePointColor = Scalar::all(-1), vector_char matchesMask = vector(), int flags = 0) + // + +/** + *

Draws the found matches of keypoints from two images.

+ * + *

This function draws matches of keypoints from two images in the output image. + * Match is a line connecting two keypoints (circles). The structure + * DrawMatchesFlags is defined as follows: struct DrawMatchesFlags + *

+ * + *

// C++ code:

+ * + * + *

enum

+ * + * + *

DEFAULT = 0, // Output image matrix will be created (Mat.create),

+ * + *

// i.e. existing memory of output image may be reused.

+ * + *

// Two source images, matches, and single keypoints

+ * + *

// will be drawn.

+ * + *

// For each keypoint, only the center point will be

+ * + *

// drawn (without a circle around the keypoint with the

+ * + *

// keypoint size and orientation).

+ * + *

DRAW_OVER_OUTIMG = 1, // Output image matrix will not be

+ * + *

// created (using Mat.create). Matches will be drawn

+ * + *

// on existing content of output image.

+ * + *

NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn.

+ * + *

DRAW_RICH_KEYPOINTS = 4 // For each keypoint, the circle around

+ * + *

// keypoint with keypoint size and orientation will

+ * + *

// be drawn.

+ * + *

};

+ * + *

};

+ * + *

+ * + * @param img1 First source image. + * @param keypoints1 Keypoints from the first source image. + * @param img2 Second source image. + * @param keypoints2 Keypoints from the second source image. + * @param matches1to2 Matches from the first image to the second one, which + * means that keypoints1[i] has a corresponding point in + * keypoints2[matches[i]]. + * @param outImg Output image. Its content depends on the flags + * value defining what is drawn in the output image. See possible + * flags bit values below. + * @param matchColor Color of matches (lines and connected keypoints). If + * matchColor==Scalar.all(-1), the color is generated randomly. + * @param singlePointColor Color of single keypoints (circles), which means that + * keypoints do not have the matches. If singlePointColor==Scalar.all(-1), + * the color is generated randomly. + * @param matchesMask Mask determining which matches are drawn. If the mask is + * empty, all matches are drawn. + * @param flags Flags setting drawing features. Possible flags bit + * values are defined by DrawMatchesFlags. + * + * @see org.opencv.features2d.Features2d.drawMatches + */ + public static void drawMatches(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, MatOfDMatch matches1to2, Mat outImg, Scalar matchColor, Scalar singlePointColor, MatOfByte matchesMask, int flags) + { + Mat keypoints1_mat = keypoints1; + Mat keypoints2_mat = keypoints2; + Mat matches1to2_mat = matches1to2; + Mat matchesMask_mat = matchesMask; + drawMatches_0(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj, matchColor.val[0], matchColor.val[1], matchColor.val[2], matchColor.val[3], singlePointColor.val[0], singlePointColor.val[1], singlePointColor.val[2], singlePointColor.val[3], matchesMask_mat.nativeObj, flags); + + return; + } + +/** + *

Draws the found matches of keypoints from two images.

+ * + *

This function draws matches of keypoints from two images in the output image. + * Match is a line connecting two keypoints (circles). The structure + * DrawMatchesFlags is defined as follows: struct DrawMatchesFlags + *

+ * + *

// C++ code:

+ * + * + *

enum

+ * + * + *

DEFAULT = 0, // Output image matrix will be created (Mat.create),

+ * + *

// i.e. existing memory of output image may be reused.

+ * + *

// Two source images, matches, and single keypoints

+ * + *

// will be drawn.

+ * + *

// For each keypoint, only the center point will be

+ * + *

// drawn (without a circle around the keypoint with the

+ * + *

// keypoint size and orientation).

+ * + *

DRAW_OVER_OUTIMG = 1, // Output image matrix will not be

+ * + *

// created (using Mat.create). Matches will be drawn

+ * + *

// on existing content of output image.

+ * + *

NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn.

+ * + *

DRAW_RICH_KEYPOINTS = 4 // For each keypoint, the circle around

+ * + *

// keypoint with keypoint size and orientation will

+ * + *

// be drawn.

+ * + *

};

+ * + *

};

+ * + *

+ * + * @param img1 First source image. + * @param keypoints1 Keypoints from the first source image. + * @param img2 Second source image. + * @param keypoints2 Keypoints from the second source image. + * @param matches1to2 Matches from the first image to the second one, which + * means that keypoints1[i] has a corresponding point in + * keypoints2[matches[i]]. + * @param outImg Output image. Its content depends on the flags + * value defining what is drawn in the output image. See possible + * flags bit values below. + * + * @see org.opencv.features2d.Features2d.drawMatches + */ + public static void drawMatches(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, MatOfDMatch matches1to2, Mat outImg) + { + Mat keypoints1_mat = keypoints1; + Mat keypoints2_mat = keypoints2; + Mat matches1to2_mat = matches1to2; + drawMatches_1(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj); + + return; + } + + + // + // C++: void drawMatches(Mat img1, vector_KeyPoint keypoints1, Mat img2, vector_KeyPoint keypoints2, vector_vector_DMatch matches1to2, Mat outImg, Scalar matchColor = Scalar::all(-1), Scalar singlePointColor = Scalar::all(-1), vector_vector_char matchesMask = vector >(), int flags = 0) + // + +/** + *

Draws the found matches of keypoints from two images.

+ * + *

This function draws matches of keypoints from two images in the output image. + * Match is a line connecting two keypoints (circles). The structure + * DrawMatchesFlags is defined as follows: struct DrawMatchesFlags + *

+ * + *

// C++ code:

+ * + * + *

enum

+ * + * + *

DEFAULT = 0, // Output image matrix will be created (Mat.create),

+ * + *

// i.e. existing memory of output image may be reused.

+ * + *

// Two source images, matches, and single keypoints

+ * + *

// will be drawn.

+ * + *

// For each keypoint, only the center point will be

+ * + *

// drawn (without a circle around the keypoint with the

+ * + *

// keypoint size and orientation).

+ * + *

DRAW_OVER_OUTIMG = 1, // Output image matrix will not be

+ * + *

// created (using Mat.create). Matches will be drawn

+ * + *

// on existing content of output image.

+ * + *

NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn.

+ * + *

DRAW_RICH_KEYPOINTS = 4 // For each keypoint, the circle around

+ * + *

// keypoint with keypoint size and orientation will

+ * + *

// be drawn.

+ * + *

};

+ * + *

};

+ * + *

+ * + * @param img1 First source image. + * @param keypoints1 Keypoints from the first source image. + * @param img2 Second source image. + * @param keypoints2 Keypoints from the second source image. + * @param matches1to2 Matches from the first image to the second one, which + * means that keypoints1[i] has a corresponding point in + * keypoints2[matches[i]]. + * @param outImg Output image. Its content depends on the flags + * value defining what is drawn in the output image. See possible + * flags bit values below. + * @param matchColor Color of matches (lines and connected keypoints). If + * matchColor==Scalar.all(-1), the color is generated randomly. + * @param singlePointColor Color of single keypoints (circles), which means that + * keypoints do not have the matches. If singlePointColor==Scalar.all(-1), + * the color is generated randomly. + * @param matchesMask Mask determining which matches are drawn. If the mask is + * empty, all matches are drawn. + * @param flags Flags setting drawing features. Possible flags bit + * values are defined by DrawMatchesFlags. + * + * @see org.opencv.features2d.Features2d.drawMatches + */ + public static void drawMatches2(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, List matches1to2, Mat outImg, Scalar matchColor, Scalar singlePointColor, List matchesMask, int flags) + { + Mat keypoints1_mat = keypoints1; + Mat keypoints2_mat = keypoints2; + List matches1to2_tmplm = new ArrayList((matches1to2 != null) ? matches1to2.size() : 0); + Mat matches1to2_mat = Converters.vector_vector_DMatch_to_Mat(matches1to2, matches1to2_tmplm); + List matchesMask_tmplm = new ArrayList((matchesMask != null) ? matchesMask.size() : 0); + Mat matchesMask_mat = Converters.vector_vector_char_to_Mat(matchesMask, matchesMask_tmplm); + drawMatches2_0(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj, matchColor.val[0], matchColor.val[1], matchColor.val[2], matchColor.val[3], singlePointColor.val[0], singlePointColor.val[1], singlePointColor.val[2], singlePointColor.val[3], matchesMask_mat.nativeObj, flags); + + return; + } + +/** + *

Draws the found matches of keypoints from two images.

+ * + *

This function draws matches of keypoints from two images in the output image. + * Match is a line connecting two keypoints (circles). The structure + * DrawMatchesFlags is defined as follows: struct DrawMatchesFlags + *

+ * + *

// C++ code:

+ * + * + *

enum

+ * + * + *

DEFAULT = 0, // Output image matrix will be created (Mat.create),

+ * + *

// i.e. existing memory of output image may be reused.

+ * + *

// Two source images, matches, and single keypoints

+ * + *

// will be drawn.

+ * + *

// For each keypoint, only the center point will be

+ * + *

// drawn (without a circle around the keypoint with the

+ * + *

// keypoint size and orientation).

+ * + *

DRAW_OVER_OUTIMG = 1, // Output image matrix will not be

+ * + *

// created (using Mat.create). Matches will be drawn

+ * + *

// on existing content of output image.

+ * + *

NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn.

+ * + *

DRAW_RICH_KEYPOINTS = 4 // For each keypoint, the circle around

+ * + *

// keypoint with keypoint size and orientation will

+ * + *

// be drawn.

+ * + *

};

+ * + *

};

+ * + *

+ * + * @param img1 First source image. + * @param keypoints1 Keypoints from the first source image. + * @param img2 Second source image. + * @param keypoints2 Keypoints from the second source image. + * @param matches1to2 Matches from the first image to the second one, which + * means that keypoints1[i] has a corresponding point in + * keypoints2[matches[i]]. + * @param outImg Output image. Its content depends on the flags + * value defining what is drawn in the output image. See possible + * flags bit values below. + * + * @see org.opencv.features2d.Features2d.drawMatches + */ + public static void drawMatches2(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, List matches1to2, Mat outImg) + { + Mat keypoints1_mat = keypoints1; + Mat keypoints2_mat = keypoints2; + List matches1to2_tmplm = new ArrayList((matches1to2 != null) ? matches1to2.size() : 0); + Mat matches1to2_mat = Converters.vector_vector_DMatch_to_Mat(matches1to2, matches1to2_tmplm); + drawMatches2_1(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj); + + return; + } + + + + + // C++: void drawKeypoints(Mat image, vector_KeyPoint keypoints, Mat outImage, Scalar color = Scalar::all(-1), int flags = 0) + private static native void drawKeypoints_0(long image_nativeObj, long keypoints_mat_nativeObj, long outImage_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int flags); + private static native void drawKeypoints_1(long image_nativeObj, long keypoints_mat_nativeObj, long outImage_nativeObj); + + // C++: void drawMatches(Mat img1, vector_KeyPoint keypoints1, Mat img2, vector_KeyPoint keypoints2, vector_DMatch matches1to2, Mat outImg, Scalar matchColor = Scalar::all(-1), Scalar singlePointColor = Scalar::all(-1), vector_char matchesMask = vector(), int flags = 0) + private static native void drawMatches_0(long img1_nativeObj, long keypoints1_mat_nativeObj, long img2_nativeObj, long keypoints2_mat_nativeObj, long matches1to2_mat_nativeObj, long outImg_nativeObj, double matchColor_val0, double matchColor_val1, double matchColor_val2, double matchColor_val3, double singlePointColor_val0, double singlePointColor_val1, double singlePointColor_val2, double singlePointColor_val3, long matchesMask_mat_nativeObj, int flags); + private static native void drawMatches_1(long img1_nativeObj, long keypoints1_mat_nativeObj, long img2_nativeObj, long keypoints2_mat_nativeObj, long matches1to2_mat_nativeObj, long outImg_nativeObj); + + // C++: void drawMatches(Mat img1, vector_KeyPoint keypoints1, Mat img2, vector_KeyPoint keypoints2, vector_vector_DMatch matches1to2, Mat outImg, Scalar matchColor = Scalar::all(-1), Scalar singlePointColor = Scalar::all(-1), vector_vector_char matchesMask = vector >(), int flags = 0) + private static native void drawMatches2_0(long img1_nativeObj, long keypoints1_mat_nativeObj, long img2_nativeObj, long keypoints2_mat_nativeObj, long matches1to2_mat_nativeObj, long outImg_nativeObj, double matchColor_val0, double matchColor_val1, double matchColor_val2, double matchColor_val3, double singlePointColor_val0, double singlePointColor_val1, double singlePointColor_val2, double singlePointColor_val3, long matchesMask_mat_nativeObj, int flags); + private static native void drawMatches2_1(long img1_nativeObj, long keypoints1_mat_nativeObj, long img2_nativeObj, long keypoints2_mat_nativeObj, long matches1to2_mat_nativeObj, long outImg_nativeObj); + +} diff --git a/src/org/opencv/features2d/GenericDescriptorMatcher.java b/src/org/opencv/features2d/GenericDescriptorMatcher.java new file mode 100644 index 0000000..a5cfc53 --- /dev/null +++ b/src/org/opencv/features2d/GenericDescriptorMatcher.java @@ -0,0 +1,861 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.features2d; + +import java.lang.String; +import java.util.ArrayList; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfDMatch; +import org.opencv.core.MatOfKeyPoint; +import org.opencv.utils.Converters; + +// C++: class javaGenericDescriptorMatcher +/** + *

Abstract interface for extracting and matching a keypoint descriptor. There + * are also "DescriptorExtractor" and "DescriptorMatcher" for these purposes but + * their interfaces are intended for descriptors represented as vectors in a + * multidimensional space. GenericDescriptorMatcher is a more + * generic interface for descriptors. DescriptorMatcher and + * GenericDescriptorMatcher have two groups of match methods: for + * matching keypoints of an image with another image or with an image set.

+ * + *

class GenericDescriptorMatcher

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

GenericDescriptorMatcher();

+ * + *

virtual ~GenericDescriptorMatcher();

+ * + *

virtual void add(const vector& images,

+ * + *

vector >& keypoints);

+ * + *

const vector& getTrainImages() const;

+ * + *

const vector >& getTrainKeypoints() const;

+ * + *

virtual void clear();

+ * + *

virtual void train() = 0;

+ * + *

virtual bool isMaskSupported() = 0;

+ * + *

void classify(const Mat& queryImage,

+ * + *

vector& queryKeypoints,

+ * + *

const Mat& trainImage,

+ * + *

vector& trainKeypoints) const;

+ * + *

void classify(const Mat& queryImage,

+ * + *

vector& queryKeypoints);

+ * + *

/ *

+ *
    + *
  • Group of methods to match keypoints from an image pair. + *
  • / + *
+ * + *

void match(const Mat& queryImage, vector& queryKeypoints,

+ * + *

const Mat& trainImage, vector& trainKeypoints,

+ * + *

vector& matches, const Mat& mask=Mat()) const;

+ * + *

void knnMatch(const Mat& queryImage, vector& queryKeypoints,

+ * + *

const Mat& trainImage, vector& trainKeypoints,

+ * + *

vector >& matches, int k,

+ * + *

const Mat& mask=Mat(), bool compactResult=false) const;

+ * + *

void radiusMatch(const Mat& queryImage, vector& queryKeypoints,

+ * + *

const Mat& trainImage, vector& trainKeypoints,

+ * + *

vector >& matches, float maxDistance,

+ * + *

const Mat& mask=Mat(), bool compactResult=false) const;

+ * + *

/ *

+ *
    + *
  • Group of methods to match keypoints from one image to an image set. + *
  • / + *
+ * + *

void match(const Mat& queryImage, vector& queryKeypoints,

+ * + *

vector& matches, const vector& masks=vector());

+ * + *

void knnMatch(const Mat& queryImage, vector& queryKeypoints,

+ * + *

vector >& matches, int k,

+ * + *

const vector& masks=vector(), bool compactResult=false);

+ * + *

void radiusMatch(const Mat& queryImage, vector& queryKeypoints,

+ * + *

vector >& matches, float maxDistance,

+ * + *

const vector& masks=vector(), bool compactResult=false);

+ * + *

virtual void read(const FileNode&);

+ * + *

virtual void write(FileStorage&) const;

+ * + *

virtual Ptr clone(bool emptyTrainData=false) const + * = 0;

+ * + *

protected:...

+ * + *

};

+ * + * @see org.opencv.features2d.GenericDescriptorMatcher + */ +public class GenericDescriptorMatcher { + + protected final long nativeObj; + protected GenericDescriptorMatcher(long addr) { nativeObj = addr; } + + + public static final int + ONEWAY = 1, + FERN = 2; + + + // + // C++: void javaGenericDescriptorMatcher::add(vector_Mat images, vector_vector_KeyPoint keypoints) + // + +/** + *

Adds images and their keypoints to the training collection stored in the + * class instance.

+ * + * @param images Image collection. + * @param keypoints Point collection. It is assumed that keypoints[i] + * are keypoints detected in the image images[i]. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.add + */ + public void add(List images, List keypoints) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + List keypoints_tmplm = new ArrayList((keypoints != null) ? keypoints.size() : 0); + Mat keypoints_mat = Converters.vector_vector_KeyPoint_to_Mat(keypoints, keypoints_tmplm); + add_0(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::classify(Mat queryImage, vector_KeyPoint& queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints) + // + +/** + *

Classifies keypoints from a query set.

+ * + *

The method classifies each keypoint from a query set. The first variant of + * the method takes a train image and its keypoints as an input argument. The + * second variant uses the internally stored training collection that can be + * built using the GenericDescriptorMatcher.add method.

+ * + *

The methods do the following:

+ *
    + *
  • Call the GenericDescriptorMatcher.match method to find + * correspondence between the query set and the training set. + *
  • Set the class_id field of each keypoint from the query + * set to class_id of the corresponding keypoint from the training + * set. + *
+ * + * @param queryImage Query image. + * @param queryKeypoints Keypoints from a query image. + * @param trainImage Train image. + * @param trainKeypoints Keypoints from a train image. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.classify + */ + public void classify(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + classify_0(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::classify(Mat queryImage, vector_KeyPoint& queryKeypoints) + // + +/** + *

Classifies keypoints from a query set.

+ * + *

The method classifies each keypoint from a query set. The first variant of + * the method takes a train image and its keypoints as an input argument. The + * second variant uses the internally stored training collection that can be + * built using the GenericDescriptorMatcher.add method.

+ * + *

The methods do the following:

+ *
    + *
  • Call the GenericDescriptorMatcher.match method to find + * correspondence between the query set and the training set. + *
  • Set the class_id field of each keypoint from the query + * set to class_id of the corresponding keypoint from the training + * set. + *
+ * + * @param queryImage Query image. + * @param queryKeypoints Keypoints from a query image. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.classify + */ + public void classify(Mat queryImage, MatOfKeyPoint queryKeypoints) + { + Mat queryKeypoints_mat = queryKeypoints; + classify_1(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::clear() + // + +/** + *

Clears a train collection (images and keypoints).

+ * + * @see org.opencv.features2d.GenericDescriptorMatcher.clear + */ + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: javaGenericDescriptorMatcher* javaGenericDescriptorMatcher::jclone(bool emptyTrainData = false) + // + + public GenericDescriptorMatcher clone(boolean emptyTrainData) + { + + GenericDescriptorMatcher retVal = new GenericDescriptorMatcher(clone_0(nativeObj, emptyTrainData)); + + return retVal; + } + + public GenericDescriptorMatcher clone() + { + + GenericDescriptorMatcher retVal = new GenericDescriptorMatcher(clone_1(nativeObj)); + + return retVal; + } + + + // + // C++: static javaGenericDescriptorMatcher* javaGenericDescriptorMatcher::create(int matcherType) + // + + public static GenericDescriptorMatcher create(int matcherType) + { + + GenericDescriptorMatcher retVal = new GenericDescriptorMatcher(create_0(matcherType)); + + return retVal; + } + + + // + // C++: bool javaGenericDescriptorMatcher::empty() + // + + public boolean empty() + { + + boolean retVal = empty_0(nativeObj); + + return retVal; + } + + + // + // C++: vector_Mat javaGenericDescriptorMatcher::getTrainImages() + // + +/** + *

Returns a train image collection.

+ * + * @see org.opencv.features2d.GenericDescriptorMatcher.getTrainImages + */ + public List getTrainImages() + { + List retVal = new ArrayList(); + Mat retValMat = new Mat(getTrainImages_0(nativeObj)); + Converters.Mat_to_vector_Mat(retValMat, retVal); + return retVal; + } + + + // + // C++: vector_vector_KeyPoint javaGenericDescriptorMatcher::getTrainKeypoints() + // + +/** + *

Returns a train keypoints collection.

+ * + * @see org.opencv.features2d.GenericDescriptorMatcher.getTrainKeypoints + */ + public List getTrainKeypoints() + { + List retVal = new ArrayList(); + Mat retValMat = new Mat(getTrainKeypoints_0(nativeObj)); + Converters.Mat_to_vector_vector_KeyPoint(retValMat, retVal); + return retVal; + } + + + // + // C++: bool javaGenericDescriptorMatcher::isMaskSupported() + // + +/** + *

Returns true if a generic descriptor matcher supports masking + * permissible matches.

+ * + * @see org.opencv.features2d.GenericDescriptorMatcher.isMaskSupported + */ + public boolean isMaskSupported() + { + + boolean retVal = isMaskSupported_0(nativeObj); + + return retVal; + } + + + // + // C++: void javaGenericDescriptorMatcher::knnMatch(Mat queryImage, vector_KeyPoint queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints, vector_vector_DMatch& matches, int k, Mat mask = Mat(), bool compactResult = false) + // + +/** + *

Finds the k best matches for each query keypoint.

+ * + *

The methods are extended variants of GenericDescriptorMatch.match. + * The parameters are similar, and the semantics is similar to DescriptorMatcher.knnMatch. + * But this class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param trainImage a trainImage + * @param trainKeypoints a trainKeypoints + * @param matches a matches + * @param k a k + * @param mask a mask + * @param compactResult a compactResult + * + * @see org.opencv.features2d.GenericDescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints, List matches, int k, Mat mask, boolean compactResult) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + Mat matches_mat = new Mat(); + knnMatch_0(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj, matches_mat.nativeObj, k, mask.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

Finds the k best matches for each query keypoint.

+ * + *

The methods are extended variants of GenericDescriptorMatch.match. + * The parameters are similar, and the semantics is similar to DescriptorMatcher.knnMatch. + * But this class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param trainImage a trainImage + * @param trainKeypoints a trainKeypoints + * @param matches a matches + * @param k a k + * + * @see org.opencv.features2d.GenericDescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints, List matches, int k) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + Mat matches_mat = new Mat(); + knnMatch_1(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj, matches_mat.nativeObj, k); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::knnMatch(Mat queryImage, vector_KeyPoint queryKeypoints, vector_vector_DMatch& matches, int k, vector_Mat masks = vector(), bool compactResult = false) + // + +/** + *

Finds the k best matches for each query keypoint.

+ * + *

The methods are extended variants of GenericDescriptorMatch.match. + * The parameters are similar, and the semantics is similar to DescriptorMatcher.knnMatch. + * But this class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param matches a matches + * @param k a k + * @param masks a masks + * @param compactResult a compactResult + * + * @see org.opencv.features2d.GenericDescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, List matches, int k, List masks, boolean compactResult) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat matches_mat = new Mat(); + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + knnMatch_2(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, matches_mat.nativeObj, k, masks_mat.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

Finds the k best matches for each query keypoint.

+ * + *

The methods are extended variants of GenericDescriptorMatch.match. + * The parameters are similar, and the semantics is similar to DescriptorMatcher.knnMatch. + * But this class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param matches a matches + * @param k a k + * + * @see org.opencv.features2d.GenericDescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, List matches, int k) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat matches_mat = new Mat(); + knnMatch_3(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, matches_mat.nativeObj, k); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::match(Mat queryImage, vector_KeyPoint queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints, vector_DMatch& matches, Mat mask = Mat()) + // + +/** + *

Finds the best match in the training set for each keypoint from the query + * set.

+ * + *

The methods find the best match for each query keypoint. In the first variant + * of the method, a train image and its keypoints are the input arguments. In + * the second variant, query keypoints are matched to the internally stored + * training collection that can be built using the GenericDescriptorMatcher.add + * method. Optional mask (or masks) can be passed to specify which query and + * training descriptors can be matched. Namely, queryKeypoints[i] + * can be matched with trainKeypoints[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryImage Query image. + * @param queryKeypoints Keypoints detected in queryImage. + * @param trainImage Train image. It is not added to a train image collection + * stored in the class object. + * @param trainKeypoints Keypoints detected in trainImage. They are + * not added to a train points collection stored in the class object. + * @param matches Matches. If a query descriptor (keypoint) is masked out in + * mask, match is added for this descriptor. So, matches + * size may be smaller than the query keypoints count. + * @param mask Mask specifying permissible matches between an input query and + * train keypoints. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.match + */ + public void match(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints, MatOfDMatch matches, Mat mask) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + Mat matches_mat = matches; + match_0(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj, matches_mat.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Finds the best match in the training set for each keypoint from the query + * set.

+ * + *

The methods find the best match for each query keypoint. In the first variant + * of the method, a train image and its keypoints are the input arguments. In + * the second variant, query keypoints are matched to the internally stored + * training collection that can be built using the GenericDescriptorMatcher.add + * method. Optional mask (or masks) can be passed to specify which query and + * training descriptors can be matched. Namely, queryKeypoints[i] + * can be matched with trainKeypoints[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryImage Query image. + * @param queryKeypoints Keypoints detected in queryImage. + * @param trainImage Train image. It is not added to a train image collection + * stored in the class object. + * @param trainKeypoints Keypoints detected in trainImage. They are + * not added to a train points collection stored in the class object. + * @param matches Matches. If a query descriptor (keypoint) is masked out in + * mask, match is added for this descriptor. So, matches + * size may be smaller than the query keypoints count. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.match + */ + public void match(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints, MatOfDMatch matches) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + Mat matches_mat = matches; + match_1(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj, matches_mat.nativeObj); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::match(Mat queryImage, vector_KeyPoint queryKeypoints, vector_DMatch& matches, vector_Mat masks = vector()) + // + +/** + *

Finds the best match in the training set for each keypoint from the query + * set.

+ * + *

The methods find the best match for each query keypoint. In the first variant + * of the method, a train image and its keypoints are the input arguments. In + * the second variant, query keypoints are matched to the internally stored + * training collection that can be built using the GenericDescriptorMatcher.add + * method. Optional mask (or masks) can be passed to specify which query and + * training descriptors can be matched. Namely, queryKeypoints[i] + * can be matched with trainKeypoints[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryImage Query image. + * @param queryKeypoints Keypoints detected in queryImage. + * @param matches Matches. If a query descriptor (keypoint) is masked out in + * mask, match is added for this descriptor. So, matches + * size may be smaller than the query keypoints count. + * @param masks Set of masks. Each masks[i] specifies permissible + * matches between input query keypoints and stored train keypoints from the + * i-th image. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.match + */ + public void match(Mat queryImage, MatOfKeyPoint queryKeypoints, MatOfDMatch matches, List masks) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat matches_mat = matches; + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + match_2(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, matches_mat.nativeObj, masks_mat.nativeObj); + + return; + } + +/** + *

Finds the best match in the training set for each keypoint from the query + * set.

+ * + *

The methods find the best match for each query keypoint. In the first variant + * of the method, a train image and its keypoints are the input arguments. In + * the second variant, query keypoints are matched to the internally stored + * training collection that can be built using the GenericDescriptorMatcher.add + * method. Optional mask (or masks) can be passed to specify which query and + * training descriptors can be matched. Namely, queryKeypoints[i] + * can be matched with trainKeypoints[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryImage Query image. + * @param queryKeypoints Keypoints detected in queryImage. + * @param matches Matches. If a query descriptor (keypoint) is masked out in + * mask, match is added for this descriptor. So, matches + * size may be smaller than the query keypoints count. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.match + */ + public void match(Mat queryImage, MatOfKeyPoint queryKeypoints, MatOfDMatch matches) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat matches_mat = matches; + match_3(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, matches_mat.nativeObj); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::radiusMatch(Mat queryImage, vector_KeyPoint queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints, vector_vector_DMatch& matches, float maxDistance, Mat mask = Mat(), bool compactResult = false) + // + +/** + *

For each query keypoint, finds the training keypoints not farther than the + * specified distance.

+ * + *

The methods are similar to DescriptorMatcher.radius. But this + * class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param trainImage a trainImage + * @param trainKeypoints a trainKeypoints + * @param matches a matches + * @param maxDistance a maxDistance + * @param mask a mask + * @param compactResult a compactResult + * + * @see org.opencv.features2d.GenericDescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints, List matches, float maxDistance, Mat mask, boolean compactResult) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + Mat matches_mat = new Mat(); + radiusMatch_0(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj, matches_mat.nativeObj, maxDistance, mask.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

For each query keypoint, finds the training keypoints not farther than the + * specified distance.

+ * + *

The methods are similar to DescriptorMatcher.radius. But this + * class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param trainImage a trainImage + * @param trainKeypoints a trainKeypoints + * @param matches a matches + * @param maxDistance a maxDistance + * + * @see org.opencv.features2d.GenericDescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints, List matches, float maxDistance) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + Mat matches_mat = new Mat(); + radiusMatch_1(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj, matches_mat.nativeObj, maxDistance); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::radiusMatch(Mat queryImage, vector_KeyPoint queryKeypoints, vector_vector_DMatch& matches, float maxDistance, vector_Mat masks = vector(), bool compactResult = false) + // + +/** + *

For each query keypoint, finds the training keypoints not farther than the + * specified distance.

+ * + *

The methods are similar to DescriptorMatcher.radius. But this + * class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param matches a matches + * @param maxDistance a maxDistance + * @param masks a masks + * @param compactResult a compactResult + * + * @see org.opencv.features2d.GenericDescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, List matches, float maxDistance, List masks, boolean compactResult) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat matches_mat = new Mat(); + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + radiusMatch_2(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, matches_mat.nativeObj, maxDistance, masks_mat.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

For each query keypoint, finds the training keypoints not farther than the + * specified distance.

+ * + *

The methods are similar to DescriptorMatcher.radius. But this + * class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param matches a matches + * @param maxDistance a maxDistance + * + * @see org.opencv.features2d.GenericDescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, List matches, float maxDistance) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat matches_mat = new Mat(); + radiusMatch_3(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, matches_mat.nativeObj, maxDistance); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::read(string fileName) + // + +/** + *

Reads a matcher object from a file node.

+ * + * @param fileName a fileName + * + * @see org.opencv.features2d.GenericDescriptorMatcher.read + */ + public void read(String fileName) + { + + read_0(nativeObj, fileName); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::train() + // + +/** + *

Trains descriptor matcher

+ * + *

Prepares descriptor matcher, for example, creates a tree-based structure, to + * extract descriptors or to optimize descriptors matching.

+ * + * @see org.opencv.features2d.GenericDescriptorMatcher.train + */ + public void train() + { + + train_0(nativeObj); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::write(string fileName) + // + +/** + *

Writes a match object to a file storage.

+ * + * @param fileName a fileName + * + * @see org.opencv.features2d.GenericDescriptorMatcher.write + */ + public void write(String fileName) + { + + write_0(nativeObj, fileName); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: void javaGenericDescriptorMatcher::add(vector_Mat images, vector_vector_KeyPoint keypoints) + private static native void add_0(long nativeObj, long images_mat_nativeObj, long keypoints_mat_nativeObj); + + // C++: void javaGenericDescriptorMatcher::classify(Mat queryImage, vector_KeyPoint& queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints) + private static native void classify_0(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj); + + // C++: void javaGenericDescriptorMatcher::classify(Mat queryImage, vector_KeyPoint& queryKeypoints) + private static native void classify_1(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj); + + // C++: void javaGenericDescriptorMatcher::clear() + private static native void clear_0(long nativeObj); + + // C++: javaGenericDescriptorMatcher* javaGenericDescriptorMatcher::jclone(bool emptyTrainData = false) + private static native long clone_0(long nativeObj, boolean emptyTrainData); + private static native long clone_1(long nativeObj); + + // C++: static javaGenericDescriptorMatcher* javaGenericDescriptorMatcher::create(int matcherType) + private static native long create_0(int matcherType); + + // C++: bool javaGenericDescriptorMatcher::empty() + private static native boolean empty_0(long nativeObj); + + // C++: vector_Mat javaGenericDescriptorMatcher::getTrainImages() + private static native long getTrainImages_0(long nativeObj); + + // C++: vector_vector_KeyPoint javaGenericDescriptorMatcher::getTrainKeypoints() + private static native long getTrainKeypoints_0(long nativeObj); + + // C++: bool javaGenericDescriptorMatcher::isMaskSupported() + private static native boolean isMaskSupported_0(long nativeObj); + + // C++: void javaGenericDescriptorMatcher::knnMatch(Mat queryImage, vector_KeyPoint queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints, vector_vector_DMatch& matches, int k, Mat mask = Mat(), bool compactResult = false) + private static native void knnMatch_0(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj, long matches_mat_nativeObj, int k, long mask_nativeObj, boolean compactResult); + private static native void knnMatch_1(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj, long matches_mat_nativeObj, int k); + + // C++: void javaGenericDescriptorMatcher::knnMatch(Mat queryImage, vector_KeyPoint queryKeypoints, vector_vector_DMatch& matches, int k, vector_Mat masks = vector(), bool compactResult = false) + private static native void knnMatch_2(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long matches_mat_nativeObj, int k, long masks_mat_nativeObj, boolean compactResult); + private static native void knnMatch_3(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long matches_mat_nativeObj, int k); + + // C++: void javaGenericDescriptorMatcher::match(Mat queryImage, vector_KeyPoint queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints, vector_DMatch& matches, Mat mask = Mat()) + private static native void match_0(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj, long matches_mat_nativeObj, long mask_nativeObj); + private static native void match_1(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj, long matches_mat_nativeObj); + + // C++: void javaGenericDescriptorMatcher::match(Mat queryImage, vector_KeyPoint queryKeypoints, vector_DMatch& matches, vector_Mat masks = vector()) + private static native void match_2(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long matches_mat_nativeObj, long masks_mat_nativeObj); + private static native void match_3(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long matches_mat_nativeObj); + + // C++: void javaGenericDescriptorMatcher::radiusMatch(Mat queryImage, vector_KeyPoint queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints, vector_vector_DMatch& matches, float maxDistance, Mat mask = Mat(), bool compactResult = false) + private static native void radiusMatch_0(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj, long matches_mat_nativeObj, float maxDistance, long mask_nativeObj, boolean compactResult); + private static native void radiusMatch_1(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj, long matches_mat_nativeObj, float maxDistance); + + // C++: void javaGenericDescriptorMatcher::radiusMatch(Mat queryImage, vector_KeyPoint queryKeypoints, vector_vector_DMatch& matches, float maxDistance, vector_Mat masks = vector(), bool compactResult = false) + private static native void radiusMatch_2(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long matches_mat_nativeObj, float maxDistance, long masks_mat_nativeObj, boolean compactResult); + private static native void radiusMatch_3(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long matches_mat_nativeObj, float maxDistance); + + // C++: void javaGenericDescriptorMatcher::read(string fileName) + private static native void read_0(long nativeObj, String fileName); + + // C++: void javaGenericDescriptorMatcher::train() + private static native void train_0(long nativeObj); + + // C++: void javaGenericDescriptorMatcher::write(string fileName) + private static native void write_0(long nativeObj, String fileName); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/features2d/KeyPoint.java b/src/org/opencv/features2d/KeyPoint.java new file mode 100644 index 0000000..d0f03ba --- /dev/null +++ b/src/org/opencv/features2d/KeyPoint.java @@ -0,0 +1,161 @@ +package org.opencv.features2d; + +import org.opencv.core.Point; + +/** + *

Data structure for salient point detectors.

+ * + *

coordinates of the keypoint

+ * + *

diameter of the meaningful keypoint neighborhood

+ * + *

// C++ code:

+ * + *

computed orientation of the keypoint (-1 if not applicable). Its possible + * values are in a range [0,360) degrees. It is measured relative to image + * coordinate system (y-axis is directed downward), ie in clockwise.

+ * + *

the response by which the most strong keypoints have been selected. Can be + * used for further sorting or subsampling

+ * + *

octave (pyramid layer) from which the keypoint has been extracted

+ * + *

object id that can be used to clustered keypoints by an object they belong to

+ * + * @see org.opencv.features2d.KeyPoint + */ +public class KeyPoint { + + /** + * Coordinates of the keypoint. + */ + public Point pt; + /** + * Diameter of the useful keypoint adjacent area. + */ + public float size; + /** + * Computed orientation of the keypoint (-1 if not applicable). + */ + public float angle; + /** + * The response, by which the strongest keypoints have been selected. Can + * be used for further sorting or subsampling. + */ + public float response; + /** + * Octave (pyramid layer), from which the keypoint has been extracted. + */ + public int octave; + /** + * Object ID, that can be used to cluster keypoints by an object they + * belong to. + */ + public int class_id; + +/** + *

The keypoint constructors

+ * + * @param x x-coordinate of the keypoint + * @param y y-coordinate of the keypoint + * @param _size keypoint diameter + * @param _angle keypoint orientation + * @param _response keypoint detector response on the keypoint (that is, + * strength of the keypoint) + * @param _octave pyramid octave in which the keypoint has been detected + * @param _class_id object id + * + * @see org.opencv.features2d.KeyPoint.KeyPoint + */ + public KeyPoint(float x, float y, float _size, float _angle, float _response, int _octave, int _class_id) + { + pt = new Point(x, y); + size = _size; + angle = _angle; + response = _response; + octave = _octave; + class_id = _class_id; + } + +/** + *

The keypoint constructors

+ * + * @see org.opencv.features2d.KeyPoint.KeyPoint + */ + public KeyPoint() + { + this(0, 0, 0, -1, 0, 0, -1); + } + +/** + *

The keypoint constructors

+ * + * @param x x-coordinate of the keypoint + * @param y y-coordinate of the keypoint + * @param _size keypoint diameter + * @param _angle keypoint orientation + * @param _response keypoint detector response on the keypoint (that is, + * strength of the keypoint) + * @param _octave pyramid octave in which the keypoint has been detected + * + * @see org.opencv.features2d.KeyPoint.KeyPoint + */ + public KeyPoint(float x, float y, float _size, float _angle, float _response, int _octave) + { + this(x, y, _size, _angle, _response, _octave, -1); + } + +/** + *

The keypoint constructors

+ * + * @param x x-coordinate of the keypoint + * @param y y-coordinate of the keypoint + * @param _size keypoint diameter + * @param _angle keypoint orientation + * @param _response keypoint detector response on the keypoint (that is, + * strength of the keypoint) + * + * @see org.opencv.features2d.KeyPoint.KeyPoint + */ + public KeyPoint(float x, float y, float _size, float _angle, float _response) + { + this(x, y, _size, _angle, _response, 0, -1); + } + +/** + *

The keypoint constructors

+ * + * @param x x-coordinate of the keypoint + * @param y y-coordinate of the keypoint + * @param _size keypoint diameter + * @param _angle keypoint orientation + * + * @see org.opencv.features2d.KeyPoint.KeyPoint + */ + public KeyPoint(float x, float y, float _size, float _angle) + { + this(x, y, _size, _angle, 0, 0, -1); + } + +/** + *

The keypoint constructors

+ * + * @param x x-coordinate of the keypoint + * @param y y-coordinate of the keypoint + * @param _size keypoint diameter + * + * @see org.opencv.features2d.KeyPoint.KeyPoint + */ + public KeyPoint(float x, float y, float _size) + { + this(x, y, _size, -1, 0, 0, -1); + } + + @Override + public String toString() { + return "KeyPoint [pt=" + pt + ", size=" + size + ", angle=" + angle + + ", response=" + response + ", octave=" + octave + + ", class_id=" + class_id + "]"; + } + +} diff --git a/src/org/opencv/features2d/package.bluej b/src/org/opencv/features2d/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/highgui/Highgui.java b/src/org/opencv/highgui/Highgui.java new file mode 100644 index 0000000..cdd2a6d --- /dev/null +++ b/src/org/opencv/highgui/Highgui.java @@ -0,0 +1,584 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.highgui; + +import java.lang.String; +import org.opencv.core.Mat; +import org.opencv.core.MatOfByte; +import org.opencv.core.MatOfInt; + +public class Highgui { + + public static final int + CV_FONT_LIGHT = 25, + CV_FONT_NORMAL = 50, + CV_FONT_DEMIBOLD = 63, + CV_FONT_BOLD = 75, + CV_FONT_BLACK = 87, + CV_STYLE_NORMAL = 0, + CV_STYLE_ITALIC = 1, + CV_STYLE_OBLIQUE = 2, + CV_LOAD_IMAGE_UNCHANGED = -1, + CV_LOAD_IMAGE_GRAYSCALE = 0, + CV_LOAD_IMAGE_COLOR = 1, + CV_LOAD_IMAGE_ANYDEPTH = 2, + CV_LOAD_IMAGE_ANYCOLOR = 4, + CV_IMWRITE_JPEG_QUALITY = 1, + CV_IMWRITE_PNG_COMPRESSION = 16, + CV_IMWRITE_PNG_STRATEGY = 17, + CV_IMWRITE_PNG_BILEVEL = 18, + CV_IMWRITE_PNG_STRATEGY_DEFAULT = 0, + CV_IMWRITE_PNG_STRATEGY_FILTERED = 1, + CV_IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY = 2, + CV_IMWRITE_PNG_STRATEGY_RLE = 3, + CV_IMWRITE_PNG_STRATEGY_FIXED = 4, + CV_IMWRITE_PXM_BINARY = 32, + CV_CVTIMG_FLIP = 1, + CV_CVTIMG_SWAP_RB = 2, + CV_CAP_MSMF = 1400, + CV_CAP_ANDROID = 1000, + CV_CAP_XIAPI = 1100, + CV_CAP_AVFOUNDATION = 1200, + CV_CAP_GIGANETIX = 1300, + CV_CAP_PROP_FRAME_WIDTH = 3, + CV_CAP_PROP_FRAME_HEIGHT = 4, + CV_CAP_PROP_ZOOM = 27, + CV_CAP_PROP_FOCUS = 28, + CV_CAP_PROP_GUID = 29, + CV_CAP_PROP_ISO_SPEED = 30, + CV_CAP_PROP_BACKLIGHT = 32, + CV_CAP_PROP_PAN = 33, + CV_CAP_PROP_TILT = 34, + CV_CAP_PROP_ROLL = 35, + CV_CAP_PROP_IRIS = 36, + CV_CAP_PROP_SETTINGS = 37, + CV_CAP_PROP_AUTOGRAB = 1024, + CV_CAP_PROP_PREVIEW_FORMAT = 1026, + CV_CAP_PROP_XI_DOWNSAMPLING = 400, + CV_CAP_PROP_XI_DATA_FORMAT = 401, + CV_CAP_PROP_XI_OFFSET_X = 402, + CV_CAP_PROP_XI_OFFSET_Y = 403, + CV_CAP_PROP_XI_TRG_SOURCE = 404, + CV_CAP_PROP_XI_TRG_SOFTWARE = 405, + CV_CAP_PROP_XI_GPI_SELECTOR = 406, + CV_CAP_PROP_XI_GPI_MODE = 407, + CV_CAP_PROP_XI_GPI_LEVEL = 408, + CV_CAP_PROP_XI_GPO_SELECTOR = 409, + CV_CAP_PROP_XI_GPO_MODE = 410, + CV_CAP_PROP_XI_LED_SELECTOR = 411, + CV_CAP_PROP_XI_LED_MODE = 412, + CV_CAP_PROP_XI_MANUAL_WB = 413, + CV_CAP_PROP_XI_AUTO_WB = 414, + CV_CAP_PROP_XI_AEAG = 415, + CV_CAP_PROP_XI_EXP_PRIORITY = 416, + CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, + CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, + CV_CAP_PROP_XI_AEAG_LEVEL = 419, + CV_CAP_PROP_XI_TIMEOUT = 420, + CV_CAP_PROP_ANDROID_FLASH_MODE = 8001, + CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002, + CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003, + CV_CAP_PROP_ANDROID_ANTIBANDING = 8004, + CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008, + CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001, + CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002, + CV_CAP_PROP_IOS_DEVICE_FLASH = 9003, + CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004, + CV_CAP_PROP_IOS_DEVICE_TORCH = 9005, + CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001, + CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002, + CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003, + CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004, + CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005, + CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006, + CV_CAP_ANDROID_COLOR_FRAME_BGR = 0, + CV_CAP_ANDROID_COLOR_FRAME = CV_CAP_ANDROID_COLOR_FRAME_BGR, + CV_CAP_ANDROID_GREY_FRAME = 1, + CV_CAP_ANDROID_COLOR_FRAME_RGB = 2, + CV_CAP_ANDROID_COLOR_FRAME_BGRA = 3, + CV_CAP_ANDROID_COLOR_FRAME_RGBA = 4, + CV_CAP_ANDROID_FLASH_MODE_AUTO = 0, + CV_CAP_ANDROID_FLASH_MODE_OFF = 0+1, + CV_CAP_ANDROID_FLASH_MODE_ON = 0+2, + CV_CAP_ANDROID_FLASH_MODE_RED_EYE = 0+3, + CV_CAP_ANDROID_FLASH_MODE_TORCH = 0+4, + CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0, + CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO = 0+1, + CV_CAP_ANDROID_FOCUS_MODE_EDOF = 0+2, + CV_CAP_ANDROID_FOCUS_MODE_FIXED = 0+3, + CV_CAP_ANDROID_FOCUS_MODE_INFINITY = 0+4, + CV_CAP_ANDROID_FOCUS_MODE_MACRO = 0+5, + CV_CAP_ANDROID_WHITE_BALANCE_AUTO = 0, + CV_CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT = 0+1, + CV_CAP_ANDROID_WHITE_BALANCE_DAYLIGHT = 0+2, + CV_CAP_ANDROID_WHITE_BALANCE_FLUORESCENT = 0+3, + CV_CAP_ANDROID_WHITE_BALANCE_INCANDESCENT = 0+4, + CV_CAP_ANDROID_WHITE_BALANCE_SHADE = 0+5, + CV_CAP_ANDROID_WHITE_BALANCE_TWILIGHT = 0+6, + CV_CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT = 0+7, + CV_CAP_ANDROID_ANTIBANDING_50HZ = 0, + CV_CAP_ANDROID_ANTIBANDING_60HZ = 0+1, + CV_CAP_ANDROID_ANTIBANDING_AUTO = 0+2, + CV_CAP_ANDROID_ANTIBANDING_OFF = 0+3, + IMREAD_UNCHANGED = -1, + IMREAD_GRAYSCALE = 0, + IMREAD_COLOR = 1, + IMREAD_ANYDEPTH = 2, + IMREAD_ANYCOLOR = 4, + IMWRITE_JPEG_QUALITY = 1, + IMWRITE_PNG_COMPRESSION = 16, + IMWRITE_PNG_STRATEGY = 17, + IMWRITE_PNG_BILEVEL = 18, + IMWRITE_PNG_STRATEGY_DEFAULT = 0, + IMWRITE_PNG_STRATEGY_FILTERED = 1, + IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY = 2, + IMWRITE_PNG_STRATEGY_RLE = 3, + IMWRITE_PNG_STRATEGY_FIXED = 4, + IMWRITE_PXM_BINARY = 32; + + + // + // C++: Mat imdecode(Mat buf, int flags) + // + +/** + *

Reads an image from a buffer in memory.

+ * + *

The function reads an image from the specified buffer in the memory. + * If the buffer is too short or contains invalid data, the empty matrix/image + * is returned.

+ * + *

See "imread" for the list of supported formats and flags description.

+ * + *

Note: In the case of color images, the decoded images will have the channels + * stored in B G R order.

+ * + * @param buf Input array or vector of bytes. + * @param flags The same flags as in "imread". + * + * @see org.opencv.highgui.Highgui.imdecode + */ + public static Mat imdecode(Mat buf, int flags) + { + + Mat retVal = new Mat(imdecode_0(buf.nativeObj, flags)); + + return retVal; + } + + + // + // C++: bool imencode(string ext, Mat img, vector_uchar& buf, vector_int params = vector()) + // + +/** + *

Encodes an image into a memory buffer.

+ * + *

The function compresses the image and stores it in the memory buffer that is + * resized to fit the result. + * See "imwrite" for the list of supported formats and flags description.

+ * + *

Note: cvEncodeImage returns single-row matrix of type + * CV_8UC1 that contains encoded image as array of bytes.

+ * + * @param ext File extension that defines the output format. + * @param img Image to be written. + * @param buf Output buffer resized to fit the compressed image. + * @param params Format-specific parameters. See "imwrite". + * + * @see org.opencv.highgui.Highgui.imencode + */ + public static boolean imencode(String ext, Mat img, MatOfByte buf, MatOfInt params) + { + Mat buf_mat = buf; + Mat params_mat = params; + boolean retVal = imencode_0(ext, img.nativeObj, buf_mat.nativeObj, params_mat.nativeObj); + + return retVal; + } + +/** + *

Encodes an image into a memory buffer.

+ * + *

The function compresses the image and stores it in the memory buffer that is + * resized to fit the result. + * See "imwrite" for the list of supported formats and flags description.

+ * + *

Note: cvEncodeImage returns single-row matrix of type + * CV_8UC1 that contains encoded image as array of bytes.

+ * + * @param ext File extension that defines the output format. + * @param img Image to be written. + * @param buf Output buffer resized to fit the compressed image. + * + * @see org.opencv.highgui.Highgui.imencode + */ + public static boolean imencode(String ext, Mat img, MatOfByte buf) + { + Mat buf_mat = buf; + boolean retVal = imencode_1(ext, img.nativeObj, buf_mat.nativeObj); + + return retVal; + } + + + // + // C++: Mat imread(string filename, int flags = 1) + // + +/** + *

Loads an image from a file.

+ * + *

The function imread loads an image from the specified file and + * returns it. If the image cannot be read (because of missing file, improper + * permissions, unsupported or invalid format), the function returns an empty + * matrix (Mat.data==NULL). Currently, the following file formats + * are supported:

+ *
    + *
  • Windows bitmaps - *.bmp, *.dib (always supported) + *
  • JPEG files - *.jpeg, *.jpg, *.jpe (see the *Notes* + * section) + *
  • JPEG 2000 files - *.jp2 (see the *Notes* section) + *
  • Portable Network Graphics - *.png (see the *Notes* + * section) + *
  • Portable image format - *.pbm, *.pgm, *.ppm (always + * supported) + *
  • Sun rasters - *.sr, *.ras (always supported) + *
  • TIFF files - *.tiff, *.tif (see the *Notes* section) + *
+ * + *

Note:

+ *
    + *
  • The function determines the type of an image by the content, not by + * the file extension. + *
  • On Microsoft Windows* OS and MacOSX*, the codecs shipped with an + * OpenCV image (libjpeg, libpng, libtiff, and libjasper) are used by default. + * So, OpenCV can always read JPEGs, PNGs, and TIFFs. On MacOSX, there is also + * an option to use native MacOSX image readers. But beware that currently these + * native image loaders give images with different pixel values because of the + * color management embedded into MacOSX. + *
  • On Linux*, BSD flavors and other Unix-like open-source operating + * systems, OpenCV looks for codecs supplied with an OS image. Install the + * relevant packages (do not forget the development files, for example, + * "libjpeg-dev", in Debian* and Ubuntu*) to get the codec support or turn on + * the OPENCV_BUILD_3RDPARTY_LIBS flag in CMake. + *
+ * + *

Note: In the case of color images, the decoded images will have the channels + * stored in B G R order.

+ * + * @param filename Name of file to be loaded. + * @param flags Flags specifying the color type of a loaded image: + *
    + *
  • CV_LOAD_IMAGE_ANYDEPTH - If set, return 16-bit/32-bit image when the + * input has the corresponding depth, otherwise convert it to 8-bit. + *
  • CV_LOAD_IMAGE_COLOR - If set, always convert image to the color one + *
  • CV_LOAD_IMAGE_GRAYSCALE - If set, always convert image to the + * grayscale one + *
  • >0 Return a 3-channel color image. + *
+ *

Note: In the current implementation the alpha channel, if any, is stripped + * from the output image. Use negative value if you need the alpha channel.

+ *
    + *
  • =0 Return a grayscale image. + *
  • <0 Return the loaded image as is (with alpha channel). + *
+ * + * @see org.opencv.highgui.Highgui.imread + */ + public static Mat imread(String filename, int flags) + { + + Mat retVal = new Mat(imread_0(filename, flags)); + + return retVal; + } + +/** + *

Loads an image from a file.

+ * + *

The function imread loads an image from the specified file and + * returns it. If the image cannot be read (because of missing file, improper + * permissions, unsupported or invalid format), the function returns an empty + * matrix (Mat.data==NULL). Currently, the following file formats + * are supported:

+ *
    + *
  • Windows bitmaps - *.bmp, *.dib (always supported) + *
  • JPEG files - *.jpeg, *.jpg, *.jpe (see the *Notes* + * section) + *
  • JPEG 2000 files - *.jp2 (see the *Notes* section) + *
  • Portable Network Graphics - *.png (see the *Notes* + * section) + *
  • Portable image format - *.pbm, *.pgm, *.ppm (always + * supported) + *
  • Sun rasters - *.sr, *.ras (always supported) + *
  • TIFF files - *.tiff, *.tif (see the *Notes* section) + *
+ * + *

Note:

+ *
    + *
  • The function determines the type of an image by the content, not by + * the file extension. + *
  • On Microsoft Windows* OS and MacOSX*, the codecs shipped with an + * OpenCV image (libjpeg, libpng, libtiff, and libjasper) are used by default. + * So, OpenCV can always read JPEGs, PNGs, and TIFFs. On MacOSX, there is also + * an option to use native MacOSX image readers. But beware that currently these + * native image loaders give images with different pixel values because of the + * color management embedded into MacOSX. + *
  • On Linux*, BSD flavors and other Unix-like open-source operating + * systems, OpenCV looks for codecs supplied with an OS image. Install the + * relevant packages (do not forget the development files, for example, + * "libjpeg-dev", in Debian* and Ubuntu*) to get the codec support or turn on + * the OPENCV_BUILD_3RDPARTY_LIBS flag in CMake. + *
+ * + *

Note: In the case of color images, the decoded images will have the channels + * stored in B G R order.

+ * + * @param filename Name of file to be loaded. + * + * @see org.opencv.highgui.Highgui.imread + */ + public static Mat imread(String filename) + { + + Mat retVal = new Mat(imread_1(filename)); + + return retVal; + } + + + // + // C++: bool imwrite(string filename, Mat img, vector_int params = vector()) + // + +/** + *

Saves an image to a specified file.

+ * + *

The function imwrite saves the image to the specified file. The + * image format is chosen based on the filename extension (see + * "imread" for the list of extensions). Only 8-bit (or 16-bit unsigned + * (CV_16U) in case of PNG, JPEG 2000, and TIFF) single-channel or + * 3-channel (with 'BGR' channel order) images can be saved using this function. + * If the format, depth or channel order is different, use "Mat.convertTo", and + * "cvtColor" to convert it before saving. Or, use the universal XML I/O + * functions to save the image to XML or YAML format. + * It is possible to store PNG images with an alpha channel using this function. + * To do this, create 8-bit (or 16-bit) 4-channel image BGRA, where the alpha + * channel goes last. Fully transparent pixels should have alpha set to 0, fully + * opaque pixels should have alpha set to 255/65535. The sample below shows how + * to create such a BGRA image and store to PNG file. It also demonstrates how + * to set custom compression parameters

+ * + *

// C++ code:

+ * + *

#include

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

using namespace std;

+ * + *

void createAlphaMat(Mat &mat)

+ * + * + *

for (int i = 0; i < mat.rows; ++i) {

+ * + *

for (int j = 0; j < mat.cols; ++j) {

+ * + *

Vec4b& rgba = mat.at(i, j);

+ * + *

rgba[0] = UCHAR_MAX;

+ * + *

rgba[1] = saturate_cast((float (mat.cols - j)) / ((float)mat.cols) * + * UCHAR_MAX);

+ * + *

rgba[2] = saturate_cast((float (mat.rows - i)) / ((float)mat.rows) * + * UCHAR_MAX);

+ * + *

rgba[3] = saturate_cast(0.5 * (rgba[1] + rgba[2]));

+ * + * + * + * + *

int main(int argv, char argc)

+ * + * + *

// Create mat with alpha channel

+ * + *

Mat mat(480, 640, CV_8UC4);

+ * + *

createAlphaMat(mat);

+ * + *

vector compression_params;

+ * + *

compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);

+ * + *

compression_params.push_back(9);

+ * + *

try {

+ * + *

imwrite("alpha.png", mat, compression_params);

+ * + * + *

catch (runtime_error& ex) {

+ * + *

fprintf(stderr, "Exception converting image to PNG format: %sn", ex.what());

+ * + *

return 1;

+ * + * + *

fprintf(stdout, "Saved PNG file with alpha data.n");

+ * + *

return 0;

+ * + * + * @param filename Name of the file. + * @param img a img + * @param params Format-specific save parameters encoded as pairs + * paramId_1, paramValue_1, paramId_2, paramValue_2,.... The + * following parameters are currently supported: + *
    + *
  • For JPEG, it can be a quality (CV_IMWRITE_JPEG_QUALITY) + * from 0 to 100 (the higher is the better). Default value is 95. + *
  • For PNG, it can be the compression level (CV_IMWRITE_PNG_COMPRESSION) + * from 0 to 9. A higher value means a smaller size and longer compression time. + * Default value is 3. + *
  • For PPM, PGM, or PBM, it can be a binary format flag (CV_IMWRITE_PXM_BINARY), + * 0 or 1. Default value is 1. + *
+ * + * @see org.opencv.highgui.Highgui.imwrite + */ + public static boolean imwrite(String filename, Mat img, MatOfInt params) + { + Mat params_mat = params; + boolean retVal = imwrite_0(filename, img.nativeObj, params_mat.nativeObj); + + return retVal; + } + +/** + *

Saves an image to a specified file.

+ * + *

The function imwrite saves the image to the specified file. The + * image format is chosen based on the filename extension (see + * "imread" for the list of extensions). Only 8-bit (or 16-bit unsigned + * (CV_16U) in case of PNG, JPEG 2000, and TIFF) single-channel or + * 3-channel (with 'BGR' channel order) images can be saved using this function. + * If the format, depth or channel order is different, use "Mat.convertTo", and + * "cvtColor" to convert it before saving. Or, use the universal XML I/O + * functions to save the image to XML or YAML format. + * It is possible to store PNG images with an alpha channel using this function. + * To do this, create 8-bit (or 16-bit) 4-channel image BGRA, where the alpha + * channel goes last. Fully transparent pixels should have alpha set to 0, fully + * opaque pixels should have alpha set to 255/65535. The sample below shows how + * to create such a BGRA image and store to PNG file. It also demonstrates how + * to set custom compression parameters

+ * + *

// C++ code:

+ * + *

#include

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

using namespace std;

+ * + *

void createAlphaMat(Mat &mat)

+ * + * + *

for (int i = 0; i < mat.rows; ++i) {

+ * + *

for (int j = 0; j < mat.cols; ++j) {

+ * + *

Vec4b& rgba = mat.at(i, j);

+ * + *

rgba[0] = UCHAR_MAX;

+ * + *

rgba[1] = saturate_cast((float (mat.cols - j)) / ((float)mat.cols) * + * UCHAR_MAX);

+ * + *

rgba[2] = saturate_cast((float (mat.rows - i)) / ((float)mat.rows) * + * UCHAR_MAX);

+ * + *

rgba[3] = saturate_cast(0.5 * (rgba[1] + rgba[2]));

+ * + * + * + * + *

int main(int argv, char argc)

+ * + * + *

// Create mat with alpha channel

+ * + *

Mat mat(480, 640, CV_8UC4);

+ * + *

createAlphaMat(mat);

+ * + *

vector compression_params;

+ * + *

compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);

+ * + *

compression_params.push_back(9);

+ * + *

try {

+ * + *

imwrite("alpha.png", mat, compression_params);

+ * + * + *

catch (runtime_error& ex) {

+ * + *

fprintf(stderr, "Exception converting image to PNG format: %sn", ex.what());

+ * + *

return 1;

+ * + * + *

fprintf(stdout, "Saved PNG file with alpha data.n");

+ * + *

return 0;

+ * + * + * @param filename Name of the file. + * @param img a img + * + * @see org.opencv.highgui.Highgui.imwrite + */ + public static boolean imwrite(String filename, Mat img) + { + + boolean retVal = imwrite_1(filename, img.nativeObj); + + return retVal; + } + + + + + // C++: Mat imdecode(Mat buf, int flags) + private static native long imdecode_0(long buf_nativeObj, int flags); + + // C++: bool imencode(string ext, Mat img, vector_uchar& buf, vector_int params = vector()) + private static native boolean imencode_0(String ext, long img_nativeObj, long buf_mat_nativeObj, long params_mat_nativeObj); + private static native boolean imencode_1(String ext, long img_nativeObj, long buf_mat_nativeObj); + + // C++: Mat imread(string filename, int flags = 1) + private static native long imread_0(String filename, int flags); + private static native long imread_1(String filename); + + // C++: bool imwrite(string filename, Mat img, vector_int params = vector()) + private static native boolean imwrite_0(String filename, long img_nativeObj, long params_mat_nativeObj); + private static native boolean imwrite_1(String filename, long img_nativeObj); + +} diff --git a/src/org/opencv/highgui/VideoCapture.java b/src/org/opencv/highgui/VideoCapture.java new file mode 100644 index 0000000..7dcf322 --- /dev/null +++ b/src/org/opencv/highgui/VideoCapture.java @@ -0,0 +1,411 @@ +package org.opencv.highgui; + +import java.util.List; +import java.util.LinkedList; + +import org.opencv.core.Mat; +import org.opencv.core.Size; + +// C++: class VideoCapture +/** + *

Class for video capturing from video files or cameras. + * The class provides C++ API for capturing video from cameras or for reading + * video files. Here is how the class can be used:

+ * + *

#include "opencv2/opencv.hpp"

+ * + *

// C++ code:

+ * + *

using namespace cv;

+ * + *

int main(int, char)

+ * + * + *

VideoCapture cap(0); // open the default camera

+ * + *

if(!cap.isOpened()) // check if we succeeded

+ * + *

return -1;

+ * + *

Mat edges;

+ * + *

namedWindow("edges",1);

+ * + *

for(;;)

+ * + * + *

Mat frame;

+ * + *

cap >> frame; // get a new frame from camera

+ * + *

cvtColor(frame, edges, CV_BGR2GRAY);

+ * + *

GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);

+ * + *

Canny(edges, edges, 0, 30, 3);

+ * + *

imshow("edges", edges);

+ * + *

if(waitKey(30) >= 0) break;

+ * + * + *

// the camera will be deinitialized automatically in VideoCapture destructor

+ * + *

return 0;

+ * + * + *

Note: In C API the black-box structure CvCapture is used instead + * of VideoCapture. + *

+ * + * @see org.opencv.highgui.VideoCapture + */ +public class VideoCapture { + + protected final long nativeObj; + + protected VideoCapture(long addr) { + nativeObj = addr; + } + + // + // C++: VideoCapture::VideoCapture() + // + +/** + *

VideoCapture constructors.

+ * + *

Note: In C API, when you finished working with video, release + * CvCapture structure with cvReleaseCapture(), or use + * Ptr that calls cvReleaseCapture() + * automatically in the destructor.

+ * + * @see org.opencv.highgui.VideoCapture.VideoCapture + */ + public VideoCapture() + { + + nativeObj = n_VideoCapture(); + + return; + } + + // + // C++: VideoCapture::VideoCapture(int device) + // + +/** + *

VideoCapture constructors.

+ * + *

Note: In C API, when you finished working with video, release + * CvCapture structure with cvReleaseCapture(), or use + * Ptr that calls cvReleaseCapture() + * automatically in the destructor.

+ * + * @param device id of the opened video capturing device (i.e. a camera index). + * If there is a single camera connected, just pass 0. + * + * @see org.opencv.highgui.VideoCapture.VideoCapture + */ + public VideoCapture(int device) + { + + nativeObj = n_VideoCapture(device); + + return; + } + + // + // C++: double VideoCapture::get(int propId) + // + +/** + * Returns the specified "VideoCapture" property. + * + * Note: When querying a property that is not supported by the backend used by + * the "VideoCapture" class, value 0 is returned. + * + * @param propId property identifier; it can be one of the following: + * * CV_CAP_PROP_FRAME_WIDTH width of the frames in the video stream. + * * CV_CAP_PROP_FRAME_HEIGHT height of the frames in the video stream. + * + * @see org.opencv.highgui.VideoCapture.get + */ + public double get(int propId) + { + + double retVal = n_get(nativeObj, propId); + + return retVal; + } + + public List getSupportedPreviewSizes() + { + String[] sizes_str = n_getSupportedPreviewSizes(nativeObj).split(","); + List sizes = new LinkedList(); + + for (String str : sizes_str) { + String[] wh = str.split("x"); + sizes.add(new Size(Double.parseDouble(wh[0]), Double.parseDouble(wh[1]))); + } + + return sizes; + } + + // + // C++: bool VideoCapture::grab() + // + +/** + *

Grabs the next frame from video file or capturing device.

+ * + *

The methods/functions grab the next frame from video file or camera and + * return true (non-zero) in the case of success.

+ * + *

The primary use of the function is in multi-camera environments, especially + * when the cameras do not have hardware synchronization. That is, you call + * VideoCapture.grab() for each camera and after that call the + * slower method VideoCapture.retrieve() to decode and get frame + * from each camera. This way the overhead on demosaicing or motion jpeg + * decompression etc. is eliminated and the retrieved frames from different + * cameras will be closer in time.

+ * + *

Also, when a connected camera is multi-head (for example, a stereo camera or + * a Kinect device), the correct way of retrieving data from it is to call + * "VideoCapture.grab" first and then call "VideoCapture.retrieve" one or more + * times with different values of the channel parameter. See + * http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/kinect_maps.cpp

+ * + * @see org.opencv.highgui.VideoCapture.grab + */ + public boolean grab() + { + + boolean retVal = n_grab(nativeObj); + + return retVal; + } + + // + // C++: bool VideoCapture::isOpened() + // + +/** + *

Returns true if video capturing has been initialized already.

+ * + *

If the previous call to VideoCapture constructor or + * VideoCapture.open succeeded, the method returns true.

+ * + * @see org.opencv.highgui.VideoCapture.isOpened + */ + public boolean isOpened() + { + + boolean retVal = n_isOpened(nativeObj); + + return retVal; + } + + // + // C++: bool VideoCapture::open(int device) + // + +/** + *

Open video file or a capturing device for video capturing

+ * + *

The methods first call "VideoCapture.release" to close the already opened + * file or camera.

+ * + * @param device id of the opened video capturing device (i.e. a camera index). + * + * @see org.opencv.highgui.VideoCapture.open + */ + public boolean open(int device) + { + + boolean retVal = n_open(nativeObj, device); + + return retVal; + } + + // + // C++: bool VideoCapture::read(Mat image) + // + +/** + *

Grabs, decodes and returns the next video frame.

+ * + *

The methods/functions combine "VideoCapture.grab" and "VideoCapture.retrieve" + * in one call. This is the most convenient method for reading video files or + * capturing data from decode and return the just grabbed frame. If no frames + * has been grabbed (camera has been disconnected, or there are no more frames + * in video file), the methods return false and the functions return NULL + * pointer.

+ * + *

Note: OpenCV 1.x functions cvRetrieveFrame and cv.RetrieveFrame + * return image stored inside the video capturing structure. It is not allowed + * to modify or release the image! You can copy the frame using "cvCloneImage" + * and then do whatever you want with the copy.

+ * + * @param image a image + * + * @see org.opencv.highgui.VideoCapture.read + */ + public boolean read(Mat image) + { + + boolean retVal = n_read(nativeObj, image.nativeObj); + + return retVal; + } + + // + // C++: void VideoCapture::release() + // + +/** + *

Closes video file or capturing device.

+ * + *

The methods are automatically called by subsequent "VideoCapture.open" and + * by VideoCapture destructor.

+ * + *

The C function also deallocates memory and clears *capture + * pointer.

+ * + * @see org.opencv.highgui.VideoCapture.release + */ + public void release() + { + + n_release(nativeObj); + + return; + } + + // + // C++: bool VideoCapture::retrieve(Mat image, int channel = 0) + // + +/** + *

Decodes and returns the grabbed video frame.

+ * + *

The methods/functions decode and return the just grabbed frame. If no frames + * has been grabbed (camera has been disconnected, or there are no more frames + * in video file), the methods return false and the functions return NULL + * pointer.

+ * + *

Note: OpenCV 1.x functions cvRetrieveFrame and cv.RetrieveFrame + * return image stored inside the video capturing structure. It is not allowed + * to modify or release the image! You can copy the frame using "cvCloneImage" + * and then do whatever you want with the copy.

+ * + * @param image a image + * @param channel a channel + * + * @see org.opencv.highgui.VideoCapture.retrieve + */ + public boolean retrieve(Mat image, int channel) + { + + boolean retVal = n_retrieve(nativeObj, image.nativeObj, channel); + + return retVal; + } + +/** + *

Decodes and returns the grabbed video frame.

+ * + *

The methods/functions decode and return the just grabbed frame. If no frames + * has been grabbed (camera has been disconnected, or there are no more frames + * in video file), the methods return false and the functions return NULL + * pointer.

+ * + *

Note: OpenCV 1.x functions cvRetrieveFrame and cv.RetrieveFrame + * return image stored inside the video capturing structure. It is not allowed + * to modify or release the image! You can copy the frame using "cvCloneImage" + * and then do whatever you want with the copy.

+ * + * @param image a image + * + * @see org.opencv.highgui.VideoCapture.retrieve + */ + public boolean retrieve(Mat image) + { + + boolean retVal = n_retrieve(nativeObj, image.nativeObj); + + return retVal; + } + + // + // C++: bool VideoCapture::set(int propId, double value) + // + +/** + * Sets a property in the "VideoCapture". + * + * @param propId property identifier; it can be one of the following: + * * CV_CAP_PROP_FRAME_WIDTH width of the frames in the video stream. + * * CV_CAP_PROP_FRAME_HEIGHT height of the frames in the video stream. + * @param value value of the property. + * + * @see org.opencv.highgui.VideoCapture.set + */ + public boolean set(int propId, double value) + { + + boolean retVal = n_set(nativeObj, propId, value); + + return retVal; + } + + @Override + protected void finalize() throws Throwable { + n_delete(nativeObj); + super.finalize(); + } + + // C++: VideoCapture::VideoCapture() + private static native long n_VideoCapture(); + + // C++: VideoCapture::VideoCapture(string filename) + private static native long n_VideoCapture(java.lang.String filename); + + // C++: VideoCapture::VideoCapture(int device) + private static native long n_VideoCapture(int device); + + // C++: double VideoCapture::get(int propId) + private static native double n_get(long nativeObj, int propId); + + // C++: bool VideoCapture::grab() + private static native boolean n_grab(long nativeObj); + + // C++: bool VideoCapture::isOpened() + private static native boolean n_isOpened(long nativeObj); + + // C++: bool VideoCapture::open(string filename) + private static native boolean n_open(long nativeObj, java.lang.String filename); + + // C++: bool VideoCapture::open(int device) + private static native boolean n_open(long nativeObj, int device); + + // C++: bool VideoCapture::read(Mat image) + private static native boolean n_read(long nativeObj, long image_nativeObj); + + // C++: void VideoCapture::release() + private static native void n_release(long nativeObj); + + // C++: bool VideoCapture::retrieve(Mat image, int channel = 0) + private static native boolean n_retrieve(long nativeObj, long image_nativeObj, int channel); + + private static native boolean n_retrieve(long nativeObj, long image_nativeObj); + + // C++: bool VideoCapture::set(int propId, double value) + private static native boolean n_set(long nativeObj, int propId, double value); + + private static native String n_getSupportedPreviewSizes(long nativeObj); + + // native support for java finalize() + private static native void n_delete(long nativeObj); + +} diff --git a/src/org/opencv/highgui/package.bluej b/src/org/opencv/highgui/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/imgproc/Imgproc.java b/src/org/opencv/imgproc/Imgproc.java new file mode 100644 index 0000000..aa51ea0 --- /dev/null +++ b/src/org/opencv/imgproc/Imgproc.java @@ -0,0 +1,9630 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.imgproc; + +import java.util.ArrayList; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfFloat; +import org.opencv.core.MatOfInt; +import org.opencv.core.MatOfInt4; +import org.opencv.core.MatOfPoint; +import org.opencv.core.MatOfPoint2f; +import org.opencv.core.Point; +import org.opencv.core.Rect; +import org.opencv.core.RotatedRect; +import org.opencv.core.Scalar; +import org.opencv.core.Size; +import org.opencv.core.TermCriteria; +import org.opencv.utils.Converters; + +public class Imgproc { + + private static final int + IPL_BORDER_CONSTANT = 0, + IPL_BORDER_REPLICATE = 1, + IPL_BORDER_REFLECT = 2, + IPL_BORDER_WRAP = 3, + IPL_BORDER_REFLECT_101 = 4, + IPL_BORDER_TRANSPARENT = 5, + CV_INTER_NN = 0, + CV_INTER_LINEAR = 1, + CV_INTER_CUBIC = 2, + CV_INTER_AREA = 3, + CV_INTER_LANCZOS4 = 4, + CV_MOP_ERODE = 0, + CV_MOP_DILATE = 1, + CV_MOP_OPEN = 2, + CV_MOP_CLOSE = 3, + CV_MOP_GRADIENT = 4, + CV_MOP_TOPHAT = 5, + CV_MOP_BLACKHAT = 6, + CV_RETR_EXTERNAL = 0, + CV_RETR_LIST = 1, + CV_RETR_CCOMP = 2, + CV_RETR_TREE = 3, + CV_RETR_FLOODFILL = 4, + CV_CHAIN_APPROX_NONE = 1, + CV_CHAIN_APPROX_SIMPLE = 2, + CV_CHAIN_APPROX_TC89_L1 = 3, + CV_CHAIN_APPROX_TC89_KCOS = 4, + CV_THRESH_BINARY = 0, + CV_THRESH_BINARY_INV = 1, + CV_THRESH_TRUNC = 2, + CV_THRESH_TOZERO = 3, + CV_THRESH_TOZERO_INV = 4, + CV_THRESH_MASK = 7, + CV_THRESH_OTSU = 8; + + + public static final int + CV_BLUR_NO_SCALE = 0, + CV_BLUR = 1, + CV_GAUSSIAN = 2, + CV_MEDIAN = 3, + CV_BILATERAL = 4, + CV_GAUSSIAN_5x5 = 7, + CV_SCHARR = -1, + CV_MAX_SOBEL_KSIZE = 7, + CV_RGBA2mRGBA = 125, + CV_mRGBA2RGBA = 126, + CV_WARP_FILL_OUTLIERS = 8, + CV_WARP_INVERSE_MAP = 16, + CV_SHAPE_RECT = 0, + CV_SHAPE_CROSS = 1, + CV_SHAPE_ELLIPSE = 2, + CV_SHAPE_CUSTOM = 100, + CV_CHAIN_CODE = 0, + CV_LINK_RUNS = 5, + CV_POLY_APPROX_DP = 0, + CV_CONTOURS_MATCH_I1 = 1, + CV_CONTOURS_MATCH_I2 = 2, + CV_CONTOURS_MATCH_I3 = 3, + CV_CLOCKWISE = 1, + CV_COUNTER_CLOCKWISE = 2, + CV_COMP_CORREL = 0, + CV_COMP_CHISQR = 1, + CV_COMP_INTERSECT = 2, + CV_COMP_BHATTACHARYYA = 3, + CV_COMP_HELLINGER = CV_COMP_BHATTACHARYYA, + CV_DIST_MASK_3 = 3, + CV_DIST_MASK_5 = 5, + CV_DIST_MASK_PRECISE = 0, + CV_DIST_LABEL_CCOMP = 0, + CV_DIST_LABEL_PIXEL = 1, + CV_DIST_USER = -1, + CV_DIST_L1 = 1, + CV_DIST_L2 = 2, + CV_DIST_C = 3, + CV_DIST_L12 = 4, + CV_DIST_FAIR = 5, + CV_DIST_WELSCH = 6, + CV_DIST_HUBER = 7, + CV_CANNY_L2_GRADIENT = (1 << 31), + CV_HOUGH_STANDARD = 0, + CV_HOUGH_PROBABILISTIC = 1, + CV_HOUGH_MULTI_SCALE = 2, + CV_HOUGH_GRADIENT = 3, + BORDER_REPLICATE = IPL_BORDER_REPLICATE, + BORDER_CONSTANT = IPL_BORDER_CONSTANT, + BORDER_REFLECT = IPL_BORDER_REFLECT, + BORDER_WRAP = IPL_BORDER_WRAP, + BORDER_REFLECT_101 = IPL_BORDER_REFLECT_101, + BORDER_REFLECT101 = BORDER_REFLECT_101, + BORDER_TRANSPARENT = IPL_BORDER_TRANSPARENT, + BORDER_DEFAULT = BORDER_REFLECT_101, + BORDER_ISOLATED = 16, + KERNEL_GENERAL = 0, + KERNEL_SYMMETRICAL = 1, + KERNEL_ASYMMETRICAL = 2, + KERNEL_SMOOTH = 4, + KERNEL_INTEGER = 8, + MORPH_ERODE = CV_MOP_ERODE, + MORPH_DILATE = CV_MOP_DILATE, + MORPH_OPEN = CV_MOP_OPEN, + MORPH_CLOSE = CV_MOP_CLOSE, + MORPH_GRADIENT = CV_MOP_GRADIENT, + MORPH_TOPHAT = CV_MOP_TOPHAT, + MORPH_BLACKHAT = CV_MOP_BLACKHAT, + MORPH_RECT = 0, + MORPH_CROSS = 1, + MORPH_ELLIPSE = 2, + GHT_POSITION = 0, + GHT_SCALE = 1, + GHT_ROTATION = 2, + INTER_NEAREST = CV_INTER_NN, + INTER_LINEAR = CV_INTER_LINEAR, + INTER_CUBIC = CV_INTER_CUBIC, + INTER_AREA = CV_INTER_AREA, + INTER_LANCZOS4 = CV_INTER_LANCZOS4, + INTER_MAX = 7, + WARP_INVERSE_MAP = CV_WARP_INVERSE_MAP, + INTER_BITS = 5, + INTER_BITS2 = INTER_BITS*2, + INTER_TAB_SIZE = (1<Finds edges in an image using the [Canny86] algorithm.

+ * + *

The function finds edges in the input image image and marks them + * in the output map edges using the Canny algorithm. The smallest + * value between threshold1 and threshold2 is used for + * edge linking. The largest value is used to find initial segments of strong + * edges. See http://en.wikipedia.org/wiki/Canny_edge_detector

+ * + * @param image single-channel 8-bit input image. + * @param edges output edge map; it has the same size and type as + * image. + * @param threshold1 first threshold for the hysteresis procedure. + * @param threshold2 second threshold for the hysteresis procedure. + * @param apertureSize aperture size for the "Sobel" operator. + * @param L2gradient a flag, indicating whether a more accurate L_2 + * norm =sqrt((dI/dx)^2 + (dI/dy)^2) should be used to calculate the + * image gradient magnitude (L2gradient=true), or whether the + * default L_1 norm =|dI/dx|+|dI/dy| is enough + * (L2gradient=false). + * + * @see org.opencv.imgproc.Imgproc.Canny + */ + public static void Canny(Mat image, Mat edges, double threshold1, double threshold2, int apertureSize, boolean L2gradient) + { + + Canny_0(image.nativeObj, edges.nativeObj, threshold1, threshold2, apertureSize, L2gradient); + + return; + } + +/** + *

Finds edges in an image using the [Canny86] algorithm.

+ * + *

The function finds edges in the input image image and marks them + * in the output map edges using the Canny algorithm. The smallest + * value between threshold1 and threshold2 is used for + * edge linking. The largest value is used to find initial segments of strong + * edges. See http://en.wikipedia.org/wiki/Canny_edge_detector

+ * + * @param image single-channel 8-bit input image. + * @param edges output edge map; it has the same size and type as + * image. + * @param threshold1 first threshold for the hysteresis procedure. + * @param threshold2 second threshold for the hysteresis procedure. + * + * @see org.opencv.imgproc.Imgproc.Canny + */ + public static void Canny(Mat image, Mat edges, double threshold1, double threshold2) + { + + Canny_1(image.nativeObj, edges.nativeObj, threshold1, threshold2); + + return; + } + + + // + // C++: void GaussianBlur(Mat src, Mat& dst, Size ksize, double sigmaX, double sigmaY = 0, int borderType = BORDER_DEFAULT) + // + +/** + *

Blurs an image using a Gaussian filter.

+ * + *

The function convolves the source image with the specified Gaussian kernel. + * In-place filtering is supported.

+ * + * @param src input image; the image can have any number of channels, which are + * processed independently, but the depth should be CV_8U, + * CV_16U, CV_16S, CV_32F or + * CV_64F. + * @param dst output image of the same size and type as src. + * @param ksize Gaussian kernel size. ksize.width and + * ksize.height can differ but they both must be positive and odd. + * Or, they can be zero's and then they are computed from sigma*. + * @param sigmaX Gaussian kernel standard deviation in X direction. + * @param sigmaY Gaussian kernel standard deviation in Y direction; if + * sigmaY is zero, it is set to be equal to sigmaX, if + * both sigmas are zeros, they are computed from ksize.width and + * ksize.height, respectively (see "getGaussianKernel" for + * details); to fully control the result regardless of possible future + * modifications of all this semantics, it is recommended to specify all of + * ksize, sigmaX, and sigmaY. + * @param borderType pixel extrapolation method (see "borderInterpolate" for + * details). + * + * @see org.opencv.imgproc.Imgproc.GaussianBlur + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#medianBlur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#blur + * @see org.opencv.imgproc.Imgproc#filter2D + * @see org.opencv.imgproc.Imgproc#bilateralFilter + */ + public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX, double sigmaY, int borderType) + { + + GaussianBlur_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX, sigmaY, borderType); + + return; + } + +/** + *

Blurs an image using a Gaussian filter.

+ * + *

The function convolves the source image with the specified Gaussian kernel. + * In-place filtering is supported.

+ * + * @param src input image; the image can have any number of channels, which are + * processed independently, but the depth should be CV_8U, + * CV_16U, CV_16S, CV_32F or + * CV_64F. + * @param dst output image of the same size and type as src. + * @param ksize Gaussian kernel size. ksize.width and + * ksize.height can differ but they both must be positive and odd. + * Or, they can be zero's and then they are computed from sigma*. + * @param sigmaX Gaussian kernel standard deviation in X direction. + * @param sigmaY Gaussian kernel standard deviation in Y direction; if + * sigmaY is zero, it is set to be equal to sigmaX, if + * both sigmas are zeros, they are computed from ksize.width and + * ksize.height, respectively (see "getGaussianKernel" for + * details); to fully control the result regardless of possible future + * modifications of all this semantics, it is recommended to specify all of + * ksize, sigmaX, and sigmaY. + * + * @see org.opencv.imgproc.Imgproc.GaussianBlur + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#medianBlur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#blur + * @see org.opencv.imgproc.Imgproc#filter2D + * @see org.opencv.imgproc.Imgproc#bilateralFilter + */ + public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX, double sigmaY) + { + + GaussianBlur_1(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX, sigmaY); + + return; + } + +/** + *

Blurs an image using a Gaussian filter.

+ * + *

The function convolves the source image with the specified Gaussian kernel. + * In-place filtering is supported.

+ * + * @param src input image; the image can have any number of channels, which are + * processed independently, but the depth should be CV_8U, + * CV_16U, CV_16S, CV_32F or + * CV_64F. + * @param dst output image of the same size and type as src. + * @param ksize Gaussian kernel size. ksize.width and + * ksize.height can differ but they both must be positive and odd. + * Or, they can be zero's and then they are computed from sigma*. + * @param sigmaX Gaussian kernel standard deviation in X direction. + * + * @see org.opencv.imgproc.Imgproc.GaussianBlur + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#medianBlur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#blur + * @see org.opencv.imgproc.Imgproc#filter2D + * @see org.opencv.imgproc.Imgproc#bilateralFilter + */ + public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX) + { + + GaussianBlur_2(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX); + + return; + } + + + // + // C++: void HoughCircles(Mat image, Mat& circles, int method, double dp, double minDist, double param1 = 100, double param2 = 100, int minRadius = 0, int maxRadius = 0) + // + +/** + *

Finds circles in a grayscale image using the Hough transform.

+ * + *

The function finds circles in a grayscale image using a modification of the + * Hough transform. + * Example:

+ * + *

// C++ code:

+ * + *

#include

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat img, gray;

+ * + *

if(argc != 2 && !(img=imread(argv[1], 1)).data)

+ * + *

return -1;

+ * + *

cvtColor(img, gray, CV_BGR2GRAY);

+ * + *

// smooth it, otherwise a lot of false circles may be detected

+ * + *

GaussianBlur(gray, gray, Size(9, 9), 2, 2);

+ * + *

vector circles;

+ * + *

HoughCircles(gray, circles, CV_HOUGH_GRADIENT,

+ * + *

2, gray->rows/4, 200, 100);

+ * + *

for(size_t i = 0; i < circles.size(); i++)

+ * + * + *

Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));

+ * + *

int radius = cvRound(circles[i][2]);

+ * + *

// draw the circle center

+ * + *

circle(img, center, 3, Scalar(0,255,0), -1, 8, 0);

+ * + *

// draw the circle outline

+ * + *

circle(img, center, radius, Scalar(0,0,255), 3, 8, 0);

+ * + * + *

namedWindow("circles", 1);

+ * + *

imshow("circles", img);

+ * + *

return 0;

+ * + * + *

Note: Usually the function detects the centers of circles well. However, it + * may fail to find correct radii. You can assist to the function by specifying + * the radius range (minRadius and maxRadius) if you + * know it. Or, you may ignore the returned radius, use only the center, and + * find the correct radius using an additional procedure. + *

+ * + * @param image 8-bit, single-channel, grayscale input image. + * @param circles Output vector of found circles. Each vector is encoded as a + * 3-element floating-point vector (x, y, radius). + * @param method Detection method to use. Currently, the only implemented method + * is CV_HOUGH_GRADIENT, which is basically *21HT*, described in + * [Yuen90]. + * @param dp Inverse ratio of the accumulator resolution to the image + * resolution. For example, if dp=1, the accumulator has the same + * resolution as the input image. If dp=2, the accumulator has half + * as big width and height. + * @param minDist Minimum distance between the centers of the detected circles. + * If the parameter is too small, multiple neighbor circles may be falsely + * detected in addition to a true one. If it is too large, some circles may be + * missed. + * @param param1 First method-specific parameter. In case of CV_HOUGH_GRADIENT, + * it is the higher threshold of the two passed to the "Canny" edge detector + * (the lower one is twice smaller). + * @param param2 Second method-specific parameter. In case of CV_HOUGH_GRADIENT, + * it is the accumulator threshold for the circle centers at the detection + * stage. The smaller it is, the more false circles may be detected. Circles, + * corresponding to the larger accumulator values, will be returned first. + * @param minRadius Minimum circle radius. + * @param maxRadius Maximum circle radius. + * + * @see org.opencv.imgproc.Imgproc.HoughCircles + * @see org.opencv.imgproc.Imgproc#minEnclosingCircle + * @see org.opencv.imgproc.Imgproc#fitEllipse + */ + public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1, double param2, int minRadius, int maxRadius) + { + + HoughCircles_0(image.nativeObj, circles.nativeObj, method, dp, minDist, param1, param2, minRadius, maxRadius); + + return; + } + +/** + *

Finds circles in a grayscale image using the Hough transform.

+ * + *

The function finds circles in a grayscale image using a modification of the + * Hough transform. + * Example:

+ * + *

// C++ code:

+ * + *

#include

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat img, gray;

+ * + *

if(argc != 2 && !(img=imread(argv[1], 1)).data)

+ * + *

return -1;

+ * + *

cvtColor(img, gray, CV_BGR2GRAY);

+ * + *

// smooth it, otherwise a lot of false circles may be detected

+ * + *

GaussianBlur(gray, gray, Size(9, 9), 2, 2);

+ * + *

vector circles;

+ * + *

HoughCircles(gray, circles, CV_HOUGH_GRADIENT,

+ * + *

2, gray->rows/4, 200, 100);

+ * + *

for(size_t i = 0; i < circles.size(); i++)

+ * + * + *

Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));

+ * + *

int radius = cvRound(circles[i][2]);

+ * + *

// draw the circle center

+ * + *

circle(img, center, 3, Scalar(0,255,0), -1, 8, 0);

+ * + *

// draw the circle outline

+ * + *

circle(img, center, radius, Scalar(0,0,255), 3, 8, 0);

+ * + * + *

namedWindow("circles", 1);

+ * + *

imshow("circles", img);

+ * + *

return 0;

+ * + * + *

Note: Usually the function detects the centers of circles well. However, it + * may fail to find correct radii. You can assist to the function by specifying + * the radius range (minRadius and maxRadius) if you + * know it. Or, you may ignore the returned radius, use only the center, and + * find the correct radius using an additional procedure. + *

+ * + * @param image 8-bit, single-channel, grayscale input image. + * @param circles Output vector of found circles. Each vector is encoded as a + * 3-element floating-point vector (x, y, radius). + * @param method Detection method to use. Currently, the only implemented method + * is CV_HOUGH_GRADIENT, which is basically *21HT*, described in + * [Yuen90]. + * @param dp Inverse ratio of the accumulator resolution to the image + * resolution. For example, if dp=1, the accumulator has the same + * resolution as the input image. If dp=2, the accumulator has half + * as big width and height. + * @param minDist Minimum distance between the centers of the detected circles. + * If the parameter is too small, multiple neighbor circles may be falsely + * detected in addition to a true one. If it is too large, some circles may be + * missed. + * + * @see org.opencv.imgproc.Imgproc.HoughCircles + * @see org.opencv.imgproc.Imgproc#minEnclosingCircle + * @see org.opencv.imgproc.Imgproc#fitEllipse + */ + public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist) + { + + HoughCircles_1(image.nativeObj, circles.nativeObj, method, dp, minDist); + + return; + } + + + // + // C++: void HoughLines(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0) + // + +/** + *

Finds lines in a binary image using the standard Hough transform.

+ * + *

The function implements the standard or standard multi-scale Hough transform + * algorithm for line detection. See http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm + * for a good explanation of Hough transform. + * See also the example in "HoughLinesP" description.

+ * + * @param image 8-bit, single-channel binary source image. The image may be + * modified by the function. + * @param lines Output vector of lines. Each line is represented by a + * two-element vector (rho, theta). rho is the distance from + * the coordinate origin (0,0) (top-left corner of the image). + * theta is the line rotation angle in radians (0 ~ vertical line, + * pi/2 ~ horizontal line). + * @param rho Distance resolution of the accumulator in pixels. + * @param theta Angle resolution of the accumulator in radians. + * @param threshold Accumulator threshold parameter. Only those lines are + * returned that get enough votes (>threshold). + * @param srn For the multi-scale Hough transform, it is a divisor for the + * distance resolution rho. The coarse accumulator distance + * resolution is rho and the accurate accumulator resolution is + * rho/srn. If both srn=0 and stn=0, the + * classical Hough transform is used. Otherwise, both these parameters should be + * positive. + * @param stn For the multi-scale Hough transform, it is a divisor for the + * distance resolution theta. + * + * @see org.opencv.imgproc.Imgproc.HoughLines + */ + public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn) + { + + HoughLines_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn); + + return; + } + +/** + *

Finds lines in a binary image using the standard Hough transform.

+ * + *

The function implements the standard or standard multi-scale Hough transform + * algorithm for line detection. See http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm + * for a good explanation of Hough transform. + * See also the example in "HoughLinesP" description.

+ * + * @param image 8-bit, single-channel binary source image. The image may be + * modified by the function. + * @param lines Output vector of lines. Each line is represented by a + * two-element vector (rho, theta). rho is the distance from + * the coordinate origin (0,0) (top-left corner of the image). + * theta is the line rotation angle in radians (0 ~ vertical line, + * pi/2 ~ horizontal line). + * @param rho Distance resolution of the accumulator in pixels. + * @param theta Angle resolution of the accumulator in radians. + * @param threshold Accumulator threshold parameter. Only those lines are + * returned that get enough votes (>threshold). + * + * @see org.opencv.imgproc.Imgproc.HoughLines + */ + public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold) + { + + HoughLines_1(image.nativeObj, lines.nativeObj, rho, theta, threshold); + + return; + } + + + // + // C++: void HoughLinesP(Mat image, Mat& lines, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0) + // + +/** + *

Finds line segments in a binary image using the probabilistic Hough + * transform.

+ * + *

The function implements the probabilistic Hough transform algorithm for line + * detection, described in[Matas00]. See the line detection example below: + *

+ * + *

// C++ code:

+ * + *

/ * This is a standalone program. Pass an image name as the first parameter

+ * + *

of the program. Switch between standard and probabilistic Hough transform

+ * + *

by changing "#if 1" to "#if 0" and back * /

+ * + *

#include

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src, dst, color_dst;

+ * + *

if(argc != 2 || !(src=imread(argv[1], 0)).data)

+ * + *

return -1;

+ * + *

Canny(src, dst, 50, 200, 3);

+ * + *

cvtColor(dst, color_dst, CV_GRAY2BGR);

+ * + *

#if 0

+ * + *

vector lines;

+ * + *

HoughLines(dst, lines, 1, CV_PI/180, 100);

+ * + *

for(size_t i = 0; i < lines.size(); i++)

+ * + * + *

float rho = lines[i][0];

+ * + *

float theta = lines[i][1];

+ * + *

double a = cos(theta), b = sin(theta);

+ * + *

double x0 = a*rho, y0 = b*rho;

+ * + *

Point pt1(cvRound(x0 + 1000*(-b)),

+ * + *

cvRound(y0 + 1000*(a)));

+ * + *

Point pt2(cvRound(x0 - 1000*(-b)),

+ * + *

cvRound(y0 - 1000*(a)));

+ * + *

line(color_dst, pt1, pt2, Scalar(0,0,255), 3, 8);

+ * + * + *

#else

+ * + *

vector lines;

+ * + *

HoughLinesP(dst, lines, 1, CV_PI/180, 80, 30, 10);

+ * + *

for(size_t i = 0; i < lines.size(); i++)

+ * + * + *

line(color_dst, Point(lines[i][0], lines[i][1]),

+ * + *

Point(lines[i][2], lines[i][3]), Scalar(0,0,255), 3, 8);

+ * + * + *

#endif

+ * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

namedWindow("Detected Lines", 1);

+ * + *

imshow("Detected Lines", color_dst);

+ * + *

waitKey(0);

+ * + *

return 0;

+ * + * + *

This is a sample picture the function parameters have been tuned for:

+ * + *

And this is the output of the above program in case of the probabilistic + * Hough transform:

+ * + * @param image 8-bit, single-channel binary source image. The image may be + * modified by the function. + * @param lines Output vector of lines. Each line is represented by a 4-element + * vector (x_1, y_1, x_2, y_2), where (x_1,y_1) and (x_2, + * y_2) are the ending points of each detected line segment. + * @param rho Distance resolution of the accumulator in pixels. + * @param theta Angle resolution of the accumulator in radians. + * @param threshold Accumulator threshold parameter. Only those lines are + * returned that get enough votes (>threshold). + * @param minLineLength Minimum line length. Line segments shorter than that are + * rejected. + * @param maxLineGap Maximum allowed gap between points on the same line to link + * them. + * + * @see org.opencv.imgproc.Imgproc.HoughLinesP + */ + public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold, double minLineLength, double maxLineGap) + { + + HoughLinesP_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, minLineLength, maxLineGap); + + return; + } + +/** + *

Finds line segments in a binary image using the probabilistic Hough + * transform.

+ * + *

The function implements the probabilistic Hough transform algorithm for line + * detection, described in[Matas00]. See the line detection example below: + *

+ * + *

// C++ code:

+ * + *

/ * This is a standalone program. Pass an image name as the first parameter

+ * + *

of the program. Switch between standard and probabilistic Hough transform

+ * + *

by changing "#if 1" to "#if 0" and back * /

+ * + *

#include

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src, dst, color_dst;

+ * + *

if(argc != 2 || !(src=imread(argv[1], 0)).data)

+ * + *

return -1;

+ * + *

Canny(src, dst, 50, 200, 3);

+ * + *

cvtColor(dst, color_dst, CV_GRAY2BGR);

+ * + *

#if 0

+ * + *

vector lines;

+ * + *

HoughLines(dst, lines, 1, CV_PI/180, 100);

+ * + *

for(size_t i = 0; i < lines.size(); i++)

+ * + * + *

float rho = lines[i][0];

+ * + *

float theta = lines[i][1];

+ * + *

double a = cos(theta), b = sin(theta);

+ * + *

double x0 = a*rho, y0 = b*rho;

+ * + *

Point pt1(cvRound(x0 + 1000*(-b)),

+ * + *

cvRound(y0 + 1000*(a)));

+ * + *

Point pt2(cvRound(x0 - 1000*(-b)),

+ * + *

cvRound(y0 - 1000*(a)));

+ * + *

line(color_dst, pt1, pt2, Scalar(0,0,255), 3, 8);

+ * + * + *

#else

+ * + *

vector lines;

+ * + *

HoughLinesP(dst, lines, 1, CV_PI/180, 80, 30, 10);

+ * + *

for(size_t i = 0; i < lines.size(); i++)

+ * + * + *

line(color_dst, Point(lines[i][0], lines[i][1]),

+ * + *

Point(lines[i][2], lines[i][3]), Scalar(0,0,255), 3, 8);

+ * + * + *

#endif

+ * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

namedWindow("Detected Lines", 1);

+ * + *

imshow("Detected Lines", color_dst);

+ * + *

waitKey(0);

+ * + *

return 0;

+ * + * + *

This is a sample picture the function parameters have been tuned for:

+ * + *

And this is the output of the above program in case of the probabilistic + * Hough transform:

+ * + * @param image 8-bit, single-channel binary source image. The image may be + * modified by the function. + * @param lines Output vector of lines. Each line is represented by a 4-element + * vector (x_1, y_1, x_2, y_2), where (x_1,y_1) and (x_2, + * y_2) are the ending points of each detected line segment. + * @param rho Distance resolution of the accumulator in pixels. + * @param theta Angle resolution of the accumulator in radians. + * @param threshold Accumulator threshold parameter. Only those lines are + * returned that get enough votes (>threshold). + * + * @see org.opencv.imgproc.Imgproc.HoughLinesP + */ + public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold) + { + + HoughLinesP_1(image.nativeObj, lines.nativeObj, rho, theta, threshold); + + return; + } + + + // + // C++: void HuMoments(Moments m, Mat& hu) + // + +/** + *

Calculates seven Hu invariants.

+ * + *

The function calculates seven Hu invariants (introduced in [Hu62]; see also + * http://en.wikipedia.org/wiki/Image_moment) defined as:

+ * + *

hu[0]= eta _20+ eta _02 + * hu[1]=(eta _20- eta _02)^2+4 eta _11^2 + * hu[2]=(eta _30-3 eta _12)^2+ (3 eta _21- eta _03)^2 + * hu[3]=(eta _30+ eta _12)^2+ (eta _21+ eta _03)^2 + * hu[4]=(eta _30-3 eta _12)(eta _30+ eta _12)[(eta _30+ eta _12)^2-3(eta _21+ + * eta _03)^2]+(3 eta _21- eta _03)(eta _21+ eta _03)[3(eta _30+ eta _12)^2-(eta + * _21+ eta _03)^2] + * hu[5]=(eta _20- eta _02)[(eta _30+ eta _12)^2- (eta _21+ eta _03)^2]+4 eta + * _11(eta _30+ eta _12)(eta _21+ eta _03) + * hu[6]=(3 eta _21- eta _03)(eta _21+ eta _03)[3(eta _30+ eta _12)^2-(eta _21+ + * eta _03)^2]-(eta _30-3 eta _12)(eta _21+ eta _03)[3(eta _30+ eta _12)^2-(eta + * _21+ eta _03)^2] + *

+ * + *

where eta_(ji) stands for Moments.nu_(ji).

+ * + *

These values are proved to be invariants to the image scale, rotation, and + * reflection except the seventh one, whose sign is changed by reflection. This + * invariance is proved with the assumption of infinite image resolution. In + * case of raster images, the computed Hu invariants for the original and + * transformed images are a bit different.

+ * + * @param m a m + * @param hu Output Hu invariants. + * + * @see org.opencv.imgproc.Imgproc.HuMoments + * @see org.opencv.imgproc.Imgproc#matchShapes + */ + public static void HuMoments(Moments m, Mat hu) + { + + HuMoments_0(m.nativeObj, hu.nativeObj); + + return; + } + + + // + // C++: void Laplacian(Mat src, Mat& dst, int ddepth, int ksize = 1, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) + // + +/** + *

Calculates the Laplacian of an image.

+ * + *

The function calculates the Laplacian of the source image by adding up the + * second x and y derivatives calculated using the Sobel operator:

+ * + *

dst = Delta src = (d^2 src)/(dx^2) + (d^2 src)/(dy^2)

+ * + *

This is done when ksize > 1. When ksize == 1, the + * Laplacian is computed by filtering the image with the following 3 x + * 3 aperture:

+ * + *

vecthreethree 0101(-4)1010

+ * + * @param src Source image. + * @param dst Destination image of the same size and the same number of channels + * as src. + * @param ddepth Desired depth of the destination image. + * @param ksize Aperture size used to compute the second-derivative filters. See + * "getDerivKernels" for details. The size must be positive and odd. + * @param scale Optional scale factor for the computed Laplacian values. By + * default, no scaling is applied. See "getDerivKernels" for details. + * @param delta Optional delta value that is added to the results prior to + * storing them in dst. + * @param borderType Pixel extrapolation method. See "borderInterpolate" for + * details. + * + * @see org.opencv.imgproc.Imgproc.Laplacian + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#Sobel + */ + public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale, double delta, int borderType) + { + + Laplacian_0(src.nativeObj, dst.nativeObj, ddepth, ksize, scale, delta, borderType); + + return; + } + +/** + *

Calculates the Laplacian of an image.

+ * + *

The function calculates the Laplacian of the source image by adding up the + * second x and y derivatives calculated using the Sobel operator:

+ * + *

dst = Delta src = (d^2 src)/(dx^2) + (d^2 src)/(dy^2)

+ * + *

This is done when ksize > 1. When ksize == 1, the + * Laplacian is computed by filtering the image with the following 3 x + * 3 aperture:

+ * + *

vecthreethree 0101(-4)1010

+ * + * @param src Source image. + * @param dst Destination image of the same size and the same number of channels + * as src. + * @param ddepth Desired depth of the destination image. + * @param ksize Aperture size used to compute the second-derivative filters. See + * "getDerivKernels" for details. The size must be positive and odd. + * @param scale Optional scale factor for the computed Laplacian values. By + * default, no scaling is applied. See "getDerivKernels" for details. + * @param delta Optional delta value that is added to the results prior to + * storing them in dst. + * + * @see org.opencv.imgproc.Imgproc.Laplacian + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#Sobel + */ + public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale, double delta) + { + + Laplacian_1(src.nativeObj, dst.nativeObj, ddepth, ksize, scale, delta); + + return; + } + +/** + *

Calculates the Laplacian of an image.

+ * + *

The function calculates the Laplacian of the source image by adding up the + * second x and y derivatives calculated using the Sobel operator:

+ * + *

dst = Delta src = (d^2 src)/(dx^2) + (d^2 src)/(dy^2)

+ * + *

This is done when ksize > 1. When ksize == 1, the + * Laplacian is computed by filtering the image with the following 3 x + * 3 aperture:

+ * + *

vecthreethree 0101(-4)1010

+ * + * @param src Source image. + * @param dst Destination image of the same size and the same number of channels + * as src. + * @param ddepth Desired depth of the destination image. + * + * @see org.opencv.imgproc.Imgproc.Laplacian + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#Sobel + */ + public static void Laplacian(Mat src, Mat dst, int ddepth) + { + + Laplacian_2(src.nativeObj, dst.nativeObj, ddepth); + + return; + } + + + // + // C++: double PSNR(Mat src1, Mat src2) + // + + public static double PSNR(Mat src1, Mat src2) + { + + double retVal = PSNR_0(src1.nativeObj, src2.nativeObj); + + return retVal; + } + + + // + // C++: void Scharr(Mat src, Mat& dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) + // + +/** + *

Calculates the first x- or y- image derivative using Scharr operator.

+ * + *

The function computes the first x- or y- spatial image derivative using the + * Scharr operator. The call

+ * + *

Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)

+ * + *

is equivalent to

+ * + *

Sobel(src, dst, ddepth, dx, dy, CV_SCHARR, scale, delta, + * borderType).

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth output image depth (see "Sobel" for the list of supported + * combination of src.depth() and ddepth). + * @param dx order of the derivative x. + * @param dy order of the derivative y. + * @param scale optional scale factor for the computed derivative values; by + * default, no scaling is applied (see "getDerivKernels" for details). + * @param delta optional delta value that is added to the results prior to + * storing them in dst. + * @param borderType pixel extrapolation method (see "borderInterpolate" for + * details). + * + * @see org.opencv.imgproc.Imgproc.Scharr + * @see org.opencv.core.Core#cartToPolar + */ + public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale, double delta, int borderType) + { + + Scharr_0(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale, delta, borderType); + + return; + } + +/** + *

Calculates the first x- or y- image derivative using Scharr operator.

+ * + *

The function computes the first x- or y- spatial image derivative using the + * Scharr operator. The call

+ * + *

Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)

+ * + *

is equivalent to

+ * + *

Sobel(src, dst, ddepth, dx, dy, CV_SCHARR, scale, delta, + * borderType).

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth output image depth (see "Sobel" for the list of supported + * combination of src.depth() and ddepth). + * @param dx order of the derivative x. + * @param dy order of the derivative y. + * @param scale optional scale factor for the computed derivative values; by + * default, no scaling is applied (see "getDerivKernels" for details). + * @param delta optional delta value that is added to the results prior to + * storing them in dst. + * + * @see org.opencv.imgproc.Imgproc.Scharr + * @see org.opencv.core.Core#cartToPolar + */ + public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale, double delta) + { + + Scharr_1(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale, delta); + + return; + } + +/** + *

Calculates the first x- or y- image derivative using Scharr operator.

+ * + *

The function computes the first x- or y- spatial image derivative using the + * Scharr operator. The call

+ * + *

Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)

+ * + *

is equivalent to

+ * + *

Sobel(src, dst, ddepth, dx, dy, CV_SCHARR, scale, delta, + * borderType).

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth output image depth (see "Sobel" for the list of supported + * combination of src.depth() and ddepth). + * @param dx order of the derivative x. + * @param dy order of the derivative y. + * + * @see org.opencv.imgproc.Imgproc.Scharr + * @see org.opencv.core.Core#cartToPolar + */ + public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy) + { + + Scharr_2(src.nativeObj, dst.nativeObj, ddepth, dx, dy); + + return; + } + + + // + // C++: void Sobel(Mat src, Mat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) + // + +/** + *

Calculates the first, second, third, or mixed image derivatives using an + * extended Sobel operator.

+ * + *

In all cases except one, the ksize x<BR>ksize separable kernel + * is used to calculate the derivative. When ksize = 1, the 3 x + * 1 or 1 x 3 kernel is used (that is, no Gaussian smoothing is + * done). ksize = 1 can only be used for the first or the second x- + * or y- derivatives.

+ * + *

There is also the special value ksize = CV_SCHARR (-1) that + * corresponds to the 3x3 Scharr filter that may give more accurate + * results than the 3x3 Sobel. The Scharr aperture is

+ * + *

+ * |-3 0 3| + * |-10 0 10| + * |-3 0 3| + *

+ * + *

for the x-derivative, or transposed for the y-derivative.

+ * + *

The function calculates an image derivative by convolving the image with the + * appropriate kernel:

+ * + *

dst = (d^(xorder+yorder) src)/(dx^(xorder) dy^(yorder))

+ * + *

The Sobel operators combine Gaussian smoothing and differentiation, so the + * result is more or less resistant to the noise. Most often, the function is + * called with (xorder = 1, yorder = 0, + * ksize = 3) or (xorder = 0, yorder = 1, + * ksize = 3) to calculate the first x- or y- image derivative. The + * first case corresponds to a kernel of:

+ * + *

+ * |-1 0 1| + * |-2 0 2| + * |-1 0 1| + *

+ * + *

The second case corresponds to a kernel of:

+ * + *

+ * |-1 -2 -1| + * |0 0 0| + * |1 2 1| + *

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth output image depth; the following combinations of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the destination image will have the same depth + * as the source; in the case of 8-bit input images it will result in truncated + * derivatives.

+ * @param dx a dx + * @param dy a dy + * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7. + * @param scale optional scale factor for the computed derivative values; by + * default, no scaling is applied (see "getDerivKernels" for details). + * @param delta optional delta value that is added to the results prior to + * storing them in dst. + * @param borderType pixel extrapolation method (see "borderInterpolate" for + * details). + * + * @see org.opencv.imgproc.Imgproc.Sobel + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#Laplacian + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#filter2D + */ + public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType) + { + + Sobel_0(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale, delta, borderType); + + return; + } + +/** + *

Calculates the first, second, third, or mixed image derivatives using an + * extended Sobel operator.

+ * + *

In all cases except one, the ksize x<BR>ksize separable kernel + * is used to calculate the derivative. When ksize = 1, the 3 x + * 1 or 1 x 3 kernel is used (that is, no Gaussian smoothing is + * done). ksize = 1 can only be used for the first or the second x- + * or y- derivatives.

+ * + *

There is also the special value ksize = CV_SCHARR (-1) that + * corresponds to the 3x3 Scharr filter that may give more accurate + * results than the 3x3 Sobel. The Scharr aperture is

+ * + *

+ * |-3 0 3| + * |-10 0 10| + * |-3 0 3| + *

+ * + *

for the x-derivative, or transposed for the y-derivative.

+ * + *

The function calculates an image derivative by convolving the image with the + * appropriate kernel:

+ * + *

dst = (d^(xorder+yorder) src)/(dx^(xorder) dy^(yorder))

+ * + *

The Sobel operators combine Gaussian smoothing and differentiation, so the + * result is more or less resistant to the noise. Most often, the function is + * called with (xorder = 1, yorder = 0, + * ksize = 3) or (xorder = 0, yorder = 1, + * ksize = 3) to calculate the first x- or y- image derivative. The + * first case corresponds to a kernel of:

+ * + *

+ * |-1 0 1| + * |-2 0 2| + * |-1 0 1| + *

+ * + *

The second case corresponds to a kernel of:

+ * + *

+ * |-1 -2 -1| + * |0 0 0| + * |1 2 1| + *

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth output image depth; the following combinations of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the destination image will have the same depth + * as the source; in the case of 8-bit input images it will result in truncated + * derivatives.

+ * @param dx a dx + * @param dy a dy + * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7. + * @param scale optional scale factor for the computed derivative values; by + * default, no scaling is applied (see "getDerivKernels" for details). + * @param delta optional delta value that is added to the results prior to + * storing them in dst. + * + * @see org.opencv.imgproc.Imgproc.Sobel + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#Laplacian + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#filter2D + */ + public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta) + { + + Sobel_1(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale, delta); + + return; + } + +/** + *

Calculates the first, second, third, or mixed image derivatives using an + * extended Sobel operator.

+ * + *

In all cases except one, the ksize x<BR>ksize separable kernel + * is used to calculate the derivative. When ksize = 1, the 3 x + * 1 or 1 x 3 kernel is used (that is, no Gaussian smoothing is + * done). ksize = 1 can only be used for the first or the second x- + * or y- derivatives.

+ * + *

There is also the special value ksize = CV_SCHARR (-1) that + * corresponds to the 3x3 Scharr filter that may give more accurate + * results than the 3x3 Sobel. The Scharr aperture is

+ * + *

+ * |-3 0 3| + * |-10 0 10| + * |-3 0 3| + *

+ * + *

for the x-derivative, or transposed for the y-derivative.

+ * + *

The function calculates an image derivative by convolving the image with the + * appropriate kernel:

+ * + *

dst = (d^(xorder+yorder) src)/(dx^(xorder) dy^(yorder))

+ * + *

The Sobel operators combine Gaussian smoothing and differentiation, so the + * result is more or less resistant to the noise. Most often, the function is + * called with (xorder = 1, yorder = 0, + * ksize = 3) or (xorder = 0, yorder = 1, + * ksize = 3) to calculate the first x- or y- image derivative. The + * first case corresponds to a kernel of:

+ * + *

+ * |-1 0 1| + * |-2 0 2| + * |-1 0 1| + *

+ * + *

The second case corresponds to a kernel of:

+ * + *

+ * |-1 -2 -1| + * |0 0 0| + * |1 2 1| + *

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth output image depth; the following combinations of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the destination image will have the same depth + * as the source; in the case of 8-bit input images it will result in truncated + * derivatives.

+ * @param dx a dx + * @param dy a dy + * + * @see org.opencv.imgproc.Imgproc.Sobel + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#Laplacian + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#filter2D + */ + public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy) + { + + Sobel_2(src.nativeObj, dst.nativeObj, ddepth, dx, dy); + + return; + } + + + // + // C++: void accumulate(Mat src, Mat& dst, Mat mask = Mat()) + // + +/** + *

Adds an image to the accumulator.

+ * + *

The function adds src or some of its elements to + * dst :

+ * + *

dst(x,y) <- dst(x,y) + src(x,y) if mask(x,y) != 0

+ * + *

The function supports multi-channel images. Each channel is processed + * independently.

+ * + *

The functions accumulate* can be used, for example, to collect + * statistics of a scene background viewed by a still camera and for the further + * foreground-background segmentation.

+ * + * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. + * @param dst Accumulator image with the same number of channels as input image, + * 32-bit or 64-bit floating-point. + * @param mask Optional operation mask. + * + * @see org.opencv.imgproc.Imgproc.accumulate + * @see org.opencv.imgproc.Imgproc#accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulate(Mat src, Mat dst, Mat mask) + { + + accumulate_0(src.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Adds an image to the accumulator.

+ * + *

The function adds src or some of its elements to + * dst :

+ * + *

dst(x,y) <- dst(x,y) + src(x,y) if mask(x,y) != 0

+ * + *

The function supports multi-channel images. Each channel is processed + * independently.

+ * + *

The functions accumulate* can be used, for example, to collect + * statistics of a scene background viewed by a still camera and for the further + * foreground-background segmentation.

+ * + * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. + * @param dst Accumulator image with the same number of channels as input image, + * 32-bit or 64-bit floating-point. + * + * @see org.opencv.imgproc.Imgproc.accumulate + * @see org.opencv.imgproc.Imgproc#accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulate(Mat src, Mat dst) + { + + accumulate_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void accumulateProduct(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + // + +/** + *

Adds the per-element product of two input images to the accumulator.

+ * + *

The function adds the product of two images or their selected regions to the + * accumulator dst :

+ * + *

dst(x,y) <- dst(x,y) + src1(x,y) * src2(x,y) if mask(x,y) != 0

+ * + *

The function supports multi-channel images. Each channel is processed + * independently.

+ * + * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating + * point. + * @param src2 Second input image of the same type and the same size as + * src1. + * @param dst Accumulator with the same number of channels as input images, + * 32-bit or 64-bit floating-point. + * @param mask Optional operation mask. + * + * @see org.opencv.imgproc.Imgproc.accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.imgproc.Imgproc#accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulateProduct(Mat src1, Mat src2, Mat dst, Mat mask) + { + + accumulateProduct_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Adds the per-element product of two input images to the accumulator.

+ * + *

The function adds the product of two images or their selected regions to the + * accumulator dst :

+ * + *

dst(x,y) <- dst(x,y) + src1(x,y) * src2(x,y) if mask(x,y) != 0

+ * + *

The function supports multi-channel images. Each channel is processed + * independently.

+ * + * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating + * point. + * @param src2 Second input image of the same type and the same size as + * src1. + * @param dst Accumulator with the same number of channels as input images, + * 32-bit or 64-bit floating-point. + * + * @see org.opencv.imgproc.Imgproc.accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.imgproc.Imgproc#accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulateProduct(Mat src1, Mat src2, Mat dst) + { + + accumulateProduct_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void accumulateSquare(Mat src, Mat& dst, Mat mask = Mat()) + // + +/** + *

Adds the square of a source image to the accumulator.

+ * + *

The function adds the input image src or its selected region, + * raised to a power of 2, to the accumulator dst :

+ * + *

dst(x,y) <- dst(x,y) + src(x,y)^2 if mask(x,y) != 0

+ * + *

The function supports multi-channel images. Each channel is processed + * independently.

+ * + * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. + * @param dst Accumulator image with the same number of channels as input image, + * 32-bit or 64-bit floating-point. + * @param mask Optional operation mask. + * + * @see org.opencv.imgproc.Imgproc.accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulateSquare(Mat src, Mat dst, Mat mask) + { + + accumulateSquare_0(src.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Adds the square of a source image to the accumulator.

+ * + *

The function adds the input image src or its selected region, + * raised to a power of 2, to the accumulator dst :

+ * + *

dst(x,y) <- dst(x,y) + src(x,y)^2 if mask(x,y) != 0

+ * + *

The function supports multi-channel images. Each channel is processed + * independently.

+ * + * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. + * @param dst Accumulator image with the same number of channels as input image, + * 32-bit or 64-bit floating-point. + * + * @see org.opencv.imgproc.Imgproc.accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulateSquare(Mat src, Mat dst) + { + + accumulateSquare_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void accumulateWeighted(Mat src, Mat& dst, double alpha, Mat mask = Mat()) + // + +/** + *

Updates a running average.

+ * + *

The function calculates the weighted sum of the input image src + * and the accumulator dst so that dst becomes a + * running average of a frame sequence:

+ * + *

dst(x,y) <- (1- alpha) * dst(x,y) + alpha * src(x,y) if mask(x,y) != + * 0

+ * + *

That is, alpha regulates the update speed (how fast the + * accumulator "forgets" about earlier images). + * The function supports multi-channel images. Each channel is processed + * independently.

+ * + * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. + * @param dst Accumulator image with the same number of channels as input image, + * 32-bit or 64-bit floating-point. + * @param alpha Weight of the input image. + * @param mask Optional operation mask. + * + * @see org.opencv.imgproc.Imgproc.accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.imgproc.Imgproc#accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulateWeighted(Mat src, Mat dst, double alpha, Mat mask) + { + + accumulateWeighted_0(src.nativeObj, dst.nativeObj, alpha, mask.nativeObj); + + return; + } + +/** + *

Updates a running average.

+ * + *

The function calculates the weighted sum of the input image src + * and the accumulator dst so that dst becomes a + * running average of a frame sequence:

+ * + *

dst(x,y) <- (1- alpha) * dst(x,y) + alpha * src(x,y) if mask(x,y) != + * 0

+ * + *

That is, alpha regulates the update speed (how fast the + * accumulator "forgets" about earlier images). + * The function supports multi-channel images. Each channel is processed + * independently.

+ * + * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. + * @param dst Accumulator image with the same number of channels as input image, + * 32-bit or 64-bit floating-point. + * @param alpha Weight of the input image. + * + * @see org.opencv.imgproc.Imgproc.accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.imgproc.Imgproc#accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulateWeighted(Mat src, Mat dst, double alpha) + { + + accumulateWeighted_1(src.nativeObj, dst.nativeObj, alpha); + + return; + } + + + // + // C++: void adaptiveThreshold(Mat src, Mat& dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C) + // + +/** + *

Applies an adaptive threshold to an array.

+ * + *

The function transforms a grayscale image to a binary image according to the + * formulae:

+ *
    + *
  • THRESH_BINARY + *
+ * + *

dst(x,y) = maxValue if src(x,y) > T(x,y); 0 otherwise

+ * + *
    + *
  • THRESH_BINARY_INV + *
+ * + *

dst(x,y) = 0 if src(x,y) > T(x,y); maxValue otherwise

+ * + *

where T(x,y) is a threshold calculated individually for each pixel.

+ *
    + *
  • For the method ADAPTIVE_THRESH_MEAN_C, the threshold + * value T(x,y) is a mean of the blockSize x blockSize + * neighborhood of (x, y) minus C. + *
  • For the method ADAPTIVE_THRESH_GAUSSIAN_C, the threshold + * value T(x, y) is a weighted sum (cross-correlation with a Gaussian + * window) of the blockSize x blockSize neighborhood of (x, y) + * minus C. The default sigma (standard deviation) is used for the + * specified blockSize. See "getGaussianKernel". + *
+ * + *

The function can process the image in-place.

+ * + * @param src Source 8-bit single-channel image. + * @param dst Destination image of the same size and the same type as + * src. + * @param maxValue Non-zero value assigned to the pixels for which the condition + * is satisfied. See the details below. + * @param adaptiveMethod Adaptive thresholding algorithm to use, + * ADAPTIVE_THRESH_MEAN_C or ADAPTIVE_THRESH_GAUSSIAN_C. + * See the details below. + * @param thresholdType Thresholding type that must be either THRESH_BINARY + * or THRESH_BINARY_INV. + * @param blockSize Size of a pixel neighborhood that is used to calculate a + * threshold value for the pixel: 3, 5, 7, and so on. + * @param C Constant subtracted from the mean or weighted mean (see the details + * below). Normally, it is positive but may be zero or negative as well. + * + * @see org.opencv.imgproc.Imgproc.adaptiveThreshold + * @see org.opencv.imgproc.Imgproc#threshold + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#blur + */ + public static void adaptiveThreshold(Mat src, Mat dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C) + { + + adaptiveThreshold_0(src.nativeObj, dst.nativeObj, maxValue, adaptiveMethod, thresholdType, blockSize, C); + + return; + } + + + // + // C++: void approxPolyDP(vector_Point2f curve, vector_Point2f& approxCurve, double epsilon, bool closed) + // + +/** + *

Approximates a polygonal curve(s) with the specified precision.

+ * + *

The functions approxPolyDP approximate a curve or a polygon with + * another curve/polygon with less vertices so that the distance between them is + * less or equal to the specified precision. It uses the Douglas-Peucker + * algorithm http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm

+ * + *

See http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/contours.cpp + * for the function usage model.

+ * + * @param curve Input vector of a 2D point stored in: + *
    + *
  • std.vector or Mat (C++ interface) + *
  • Nx2 numpy array (Python interface) + *
  • CvSeq or CvMat" (C interface) + *
+ * @param approxCurve Result of the approximation. The type should match the + * type of the input curve. In case of C interface the approximated curve is + * stored in the memory storage and pointer to it is returned. + * @param epsilon Parameter specifying the approximation accuracy. This is the + * maximum distance between the original curve and its approximation. + * @param closed If true, the approximated curve is closed (its first and last + * vertices are connected). Otherwise, it is not closed. + * + * @see org.opencv.imgproc.Imgproc.approxPolyDP + */ + public static void approxPolyDP(MatOfPoint2f curve, MatOfPoint2f approxCurve, double epsilon, boolean closed) + { + Mat curve_mat = curve; + Mat approxCurve_mat = approxCurve; + approxPolyDP_0(curve_mat.nativeObj, approxCurve_mat.nativeObj, epsilon, closed); + + return; + } + + + // + // C++: double arcLength(vector_Point2f curve, bool closed) + // + +/** + *

Calculates a contour perimeter or a curve length.

+ * + *

The function computes a curve length or a closed contour perimeter.

+ * + * @param curve Input vector of 2D points, stored in std.vector or + * Mat. + * @param closed Flag indicating whether the curve is closed or not. + * + * @see org.opencv.imgproc.Imgproc.arcLength + */ + public static double arcLength(MatOfPoint2f curve, boolean closed) + { + Mat curve_mat = curve; + double retVal = arcLength_0(curve_mat.nativeObj, closed); + + return retVal; + } + + + // + // C++: void bilateralFilter(Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT) + // + +/** + *

Applies the bilateral filter to an image.

+ * + *

The function applies bilateral filtering to the input image, as described in + * http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html + * bilateralFilter can reduce unwanted noise very well while + * keeping edges fairly sharp. However, it is very slow compared to most + * filters.

+ *
    + *
  • Sigma values*: For simplicity, you can set the 2 sigma values to be the + * same. If they are small (< 10), the filter will not have much effect, whereas + * if they are large (> 150), they will have a very strong effect, making the + * image look "cartoonish". + *
  • Filter size*: Large filters (d > 5) are very slow, so it is recommended + * to use d=5 for real-time applications, and perhaps d=9 for offline + * applications that need heavy noise filtering. + *
+ * + *

This filter does not work inplace.

+ * + * @param src Source 8-bit or floating-point, 1-channel or 3-channel image. + * @param dst Destination image of the same size and type as src. + * @param d Diameter of each pixel neighborhood that is used during filtering. + * If it is non-positive, it is computed from sigmaSpace. + * @param sigmaColor Filter sigma in the color space. A larger value of the + * parameter means that farther colors within the pixel neighborhood (see + * sigmaSpace) will be mixed together, resulting in larger areas of + * semi-equal color. + * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the + * parameter means that farther pixels will influence each other as long as + * their colors are close enough (see sigmaColor). When + * d>0, it specifies the neighborhood size regardless of + * sigmaSpace. Otherwise, d is proportional to + * sigmaSpace. + * @param borderType a borderType + * + * @see org.opencv.imgproc.Imgproc.bilateralFilter + */ + public static void bilateralFilter(Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace, int borderType) + { + + bilateralFilter_0(src.nativeObj, dst.nativeObj, d, sigmaColor, sigmaSpace, borderType); + + return; + } + +/** + *

Applies the bilateral filter to an image.

+ * + *

The function applies bilateral filtering to the input image, as described in + * http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html + * bilateralFilter can reduce unwanted noise very well while + * keeping edges fairly sharp. However, it is very slow compared to most + * filters.

+ *
    + *
  • Sigma values*: For simplicity, you can set the 2 sigma values to be the + * same. If they are small (< 10), the filter will not have much effect, whereas + * if they are large (> 150), they will have a very strong effect, making the + * image look "cartoonish". + *
  • Filter size*: Large filters (d > 5) are very slow, so it is recommended + * to use d=5 for real-time applications, and perhaps d=9 for offline + * applications that need heavy noise filtering. + *
+ * + *

This filter does not work inplace.

+ * + * @param src Source 8-bit or floating-point, 1-channel or 3-channel image. + * @param dst Destination image of the same size and type as src. + * @param d Diameter of each pixel neighborhood that is used during filtering. + * If it is non-positive, it is computed from sigmaSpace. + * @param sigmaColor Filter sigma in the color space. A larger value of the + * parameter means that farther colors within the pixel neighborhood (see + * sigmaSpace) will be mixed together, resulting in larger areas of + * semi-equal color. + * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the + * parameter means that farther pixels will influence each other as long as + * their colors are close enough (see sigmaColor). When + * d>0, it specifies the neighborhood size regardless of + * sigmaSpace. Otherwise, d is proportional to + * sigmaSpace. + * + * @see org.opencv.imgproc.Imgproc.bilateralFilter + */ + public static void bilateralFilter(Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace) + { + + bilateralFilter_1(src.nativeObj, dst.nativeObj, d, sigmaColor, sigmaSpace); + + return; + } + + + // + // C++: void blur(Mat src, Mat& dst, Size ksize, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT) + // + +/** + *

Blurs an image using the normalized box filter.

+ * + *

The function smoothes an image using the kernel:

+ * + *

K = 1/(ksize.width*ksize.height) 1 1 1 *s 1 1 + * 1 1 1 *s 1 1.................. + * 1 1 1 *s 1 1 + *

+ * + *

The call blur(src, dst, ksize, anchor, borderType) is equivalent + * to boxFilter(src, dst, src.type(), anchor, true, borderType).

+ * + * @param src input image; it can have any number of channels, which are + * processed independently, but the depth should be CV_8U, + * CV_16U, CV_16S, CV_32F or + * CV_64F. + * @param dst output image of the same size and type as src. + * @param ksize blurring kernel size. + * @param anchor anchor point; default value Point(-1,-1) means + * that the anchor is at the kernel center. + * @param borderType border mode used to extrapolate pixels outside of the + * image. + * + * @see org.opencv.imgproc.Imgproc.blur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#medianBlur + */ + public static void blur(Mat src, Mat dst, Size ksize, Point anchor, int borderType) + { + + blur_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, anchor.x, anchor.y, borderType); + + return; + } + +/** + *

Blurs an image using the normalized box filter.

+ * + *

The function smoothes an image using the kernel:

+ * + *

K = 1/(ksize.width*ksize.height) 1 1 1 *s 1 1 + * 1 1 1 *s 1 1.................. + * 1 1 1 *s 1 1 + *

+ * + *

The call blur(src, dst, ksize, anchor, borderType) is equivalent + * to boxFilter(src, dst, src.type(), anchor, true, borderType).

+ * + * @param src input image; it can have any number of channels, which are + * processed independently, but the depth should be CV_8U, + * CV_16U, CV_16S, CV_32F or + * CV_64F. + * @param dst output image of the same size and type as src. + * @param ksize blurring kernel size. + * @param anchor anchor point; default value Point(-1,-1) means + * that the anchor is at the kernel center. + * + * @see org.opencv.imgproc.Imgproc.blur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#medianBlur + */ + public static void blur(Mat src, Mat dst, Size ksize, Point anchor) + { + + blur_1(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, anchor.x, anchor.y); + + return; + } + +/** + *

Blurs an image using the normalized box filter.

+ * + *

The function smoothes an image using the kernel:

+ * + *

K = 1/(ksize.width*ksize.height) 1 1 1 *s 1 1 + * 1 1 1 *s 1 1.................. + * 1 1 1 *s 1 1 + *

+ * + *

The call blur(src, dst, ksize, anchor, borderType) is equivalent + * to boxFilter(src, dst, src.type(), anchor, true, borderType).

+ * + * @param src input image; it can have any number of channels, which are + * processed independently, but the depth should be CV_8U, + * CV_16U, CV_16S, CV_32F or + * CV_64F. + * @param dst output image of the same size and type as src. + * @param ksize blurring kernel size. + * + * @see org.opencv.imgproc.Imgproc.blur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#medianBlur + */ + public static void blur(Mat src, Mat dst, Size ksize) + { + + blur_2(src.nativeObj, dst.nativeObj, ksize.width, ksize.height); + + return; + } + + + // + // C++: int borderInterpolate(int p, int len, int borderType) + // + +/** + *

Computes the source location of an extrapolated pixel.

+ * + *

The function computes and returns the coordinate of a donor pixel + * corresponding to the specified extrapolated pixel when using the specified + * extrapolation border mode. For example, if you use BORDER_WRAP + * mode in the horizontal direction, BORDER_REFLECT_101 in the + * vertical direction and want to compute value of the "virtual" pixel + * Point(-5, 100) in a floating-point image img, it + * looks like:

+ * + *

// C++ code:

+ * + *

float val = img.at(borderInterpolate(100, img.rows, BORDER_REFLECT_101),

+ * + *

borderInterpolate(-5, img.cols, BORDER_WRAP));

+ * + *

Normally, the function is not called directly. It is used inside

+ * + *

"FilterEngine" and "copyMakeBorder" to compute tables for quick + * extrapolation.

+ * + * @param p 0-based coordinate of the extrapolated pixel along one of the axes, + * likely <0 or >= len. + * @param len Length of the array along the corresponding axis. + * @param borderType Border type, one of the BORDER_*, except for + * BORDER_TRANSPARENT and BORDER_ISOLATED. When + * borderType==BORDER_CONSTANT, the function always returns -1, + * regardless of p and len. + * + * @see org.opencv.imgproc.Imgproc.borderInterpolate + * @see org.opencv.imgproc.Imgproc#copyMakeBorder + */ + public static int borderInterpolate(int p, int len, int borderType) + { + + int retVal = borderInterpolate_0(p, len, borderType); + + return retVal; + } + + + // + // C++: Rect boundingRect(vector_Point points) + // + +/** + *

Calculates the up-right bounding rectangle of a point set.

+ * + *

The function calculates and returns the minimal up-right bounding rectangle + * for the specified point set.

+ * + * @param points Input 2D point set, stored in std.vector or + * Mat. + * + * @see org.opencv.imgproc.Imgproc.boundingRect + */ + public static Rect boundingRect(MatOfPoint points) + { + Mat points_mat = points; + Rect retVal = new Rect(boundingRect_0(points_mat.nativeObj)); + + return retVal; + } + + + // + // C++: void boxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), bool normalize = true, int borderType = BORDER_DEFAULT) + // + +/** + *

Blurs an image using the box filter.

+ * + *

The function smoothes an image using the kernel:

+ * + *

K = alpha 1 1 1 *s 1 1 + * 1 1 1 *s 1 1.................. + * 1 1 1 *s 1 1

+ * + *

where

+ * + *

alpha = 1/(ksize.width*ksize.height) when normalize=true; 1 + * otherwise

+ * + *

Unnormalized box filter is useful for computing various integral + * characteristics over each pixel neighborhood, such as covariance matrices of + * image derivatives (used in dense optical flow algorithms, and so on). If you + * need to compute pixel sums over variable-size windows, use "integral".

+ * + * @param src input image. + * @param dst output image of the same size and type as src. + * @param ddepth the output image depth (-1 to use src.depth()). + * @param ksize blurring kernel size. + * @param anchor anchor point; default value Point(-1,-1) means + * that the anchor is at the kernel center. + * @param normalize flag, specifying whether the kernel is normalized by its + * area or not. + * @param borderType border mode used to extrapolate pixels outside of the + * image. + * + * @see org.opencv.imgproc.Imgproc.boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#medianBlur + * @see org.opencv.imgproc.Imgproc#integral + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#blur + */ + public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize, int borderType) + { + + boxFilter_0(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize, borderType); + + return; + } + +/** + *

Blurs an image using the box filter.

+ * + *

The function smoothes an image using the kernel:

+ * + *

K = alpha 1 1 1 *s 1 1 + * 1 1 1 *s 1 1.................. + * 1 1 1 *s 1 1

+ * + *

where

+ * + *

alpha = 1/(ksize.width*ksize.height) when normalize=true; 1 + * otherwise

+ * + *

Unnormalized box filter is useful for computing various integral + * characteristics over each pixel neighborhood, such as covariance matrices of + * image derivatives (used in dense optical flow algorithms, and so on). If you + * need to compute pixel sums over variable-size windows, use "integral".

+ * + * @param src input image. + * @param dst output image of the same size and type as src. + * @param ddepth the output image depth (-1 to use src.depth()). + * @param ksize blurring kernel size. + * @param anchor anchor point; default value Point(-1,-1) means + * that the anchor is at the kernel center. + * @param normalize flag, specifying whether the kernel is normalized by its + * area or not. + * + * @see org.opencv.imgproc.Imgproc.boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#medianBlur + * @see org.opencv.imgproc.Imgproc#integral + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#blur + */ + public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize) + { + + boxFilter_1(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize); + + return; + } + +/** + *

Blurs an image using the box filter.

+ * + *

The function smoothes an image using the kernel:

+ * + *

K = alpha 1 1 1 *s 1 1 + * 1 1 1 *s 1 1.................. + * 1 1 1 *s 1 1

+ * + *

where

+ * + *

alpha = 1/(ksize.width*ksize.height) when normalize=true; 1 + * otherwise

+ * + *

Unnormalized box filter is useful for computing various integral + * characteristics over each pixel neighborhood, such as covariance matrices of + * image derivatives (used in dense optical flow algorithms, and so on). If you + * need to compute pixel sums over variable-size windows, use "integral".

+ * + * @param src input image. + * @param dst output image of the same size and type as src. + * @param ddepth the output image depth (-1 to use src.depth()). + * @param ksize blurring kernel size. + * + * @see org.opencv.imgproc.Imgproc.boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#medianBlur + * @see org.opencv.imgproc.Imgproc#integral + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#blur + */ + public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize) + { + + boxFilter_2(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height); + + return; + } + + + // + // C++: void calcBackProject(vector_Mat images, vector_int channels, Mat hist, Mat& dst, vector_float ranges, double scale) + // + +/** + *

Calculates the back projection of a histogram.

+ * + *

The functions calcBackProject calculate the back project of the + * histogram. That is, similarly to calcHist, at each location + * (x, y) the function collects the values from the selected + * channels in the input images and finds the corresponding histogram bin. But + * instead of incrementing it, the function reads the bin value, scales it by + * scale, and stores in backProject(x,y). In terms of + * statistics, the function computes probability of each element value in + * respect with the empirical probability distribution represented by the + * histogram. See how, for example, you can find and track a bright-colored + * object in a scene:

+ *
    + *
  • Before tracking, show the object to the camera so that it covers + * almost the whole frame. Calculate a hue histogram. The histogram may have + * strong maximums, corresponding to the dominant colors in the object. + *
  • When tracking, calculate a back projection of a hue plane of each + * input video frame using that pre-computed histogram. Threshold the back + * projection to suppress weak colors. It may also make sense to suppress pixels + * with non-sufficient color saturation and too dark or too bright pixels. + *
  • Find connected components in the resulting picture and choose, for + * example, the largest component. + *
+ * + *

This is an approximate algorithm of the "CamShift" color object tracker.

+ * + * @param images Source arrays. They all should have the same depth, + * CV_8U or CV_32F, and the same size. Each of them + * can have an arbitrary number of channels. + * @param channels The list of channels used to compute the back projection. The + * number of channels must match the histogram dimensionality. The first array + * channels are numerated from 0 to images[0].channels()-1, the + * second array channels are counted from images[0].channels() to + * images[0].channels() + images[1].channels()-1, and so on. + * @param hist Input histogram that can be dense or sparse. + * @param dst a dst + * @param ranges Array of arrays of the histogram bin boundaries in each + * dimension. See "calcHist". + * @param scale Optional scale factor for the output back projection. + * + * @see org.opencv.imgproc.Imgproc.calcBackProject + * @see org.opencv.imgproc.Imgproc#calcHist + */ + public static void calcBackProject(List images, MatOfInt channels, Mat hist, Mat dst, MatOfFloat ranges, double scale) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + Mat channels_mat = channels; + Mat ranges_mat = ranges; + calcBackProject_0(images_mat.nativeObj, channels_mat.nativeObj, hist.nativeObj, dst.nativeObj, ranges_mat.nativeObj, scale); + + return; + } + + + // + // C++: void calcHist(vector_Mat images, vector_int channels, Mat mask, Mat& hist, vector_int histSize, vector_float ranges, bool accumulate = false) + // + +/** + *

Calculates a histogram of a set of arrays.

+ * + *

The functions calcHist calculate the histogram of one or more + * arrays. The elements of a tuple used to increment a histogram bin are taken + * from the correspondinginput arrays at the same location. The sample below + * shows how to compute a 2D Hue-Saturation histogram for a color image. + *

+ * + *

// C++ code:

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src, hsv;

+ * + *

if(argc != 2 || !(src=imread(argv[1], 1)).data)

+ * + *

return -1;

+ * + *

cvtColor(src, hsv, CV_BGR2HSV);

+ * + *

// Quantize the hue to 30 levels

+ * + *

// and the saturation to 32 levels

+ * + *

int hbins = 30, sbins = 32;

+ * + *

int histSize[] = {hbins, sbins};

+ * + *

// hue varies from 0 to 179, see cvtColor

+ * + *

float hranges[] = { 0, 180 };

+ * + *

// saturation varies from 0 (black-gray-white) to

+ * + *

// 255 (pure spectrum color)

+ * + *

float sranges[] = { 0, 256 };

+ * + *

const float* ranges[] = { hranges, sranges };

+ * + *

MatND hist;

+ * + *

// we compute the histogram from the 0-th and 1-st channels

+ * + *

int channels[] = {0, 1};

+ * + *

calcHist(&hsv, 1, channels, Mat(), // do not use mask

+ * + *

hist, 2, histSize, ranges,

+ * + *

true, // the histogram is uniform

+ * + *

false);

+ * + *

double maxVal=0;

+ * + *

minMaxLoc(hist, 0, &maxVal, 0, 0);

+ * + *

int scale = 10;

+ * + *

Mat histImg = Mat.zeros(sbins*scale, hbins*10, CV_8UC3);

+ * + *

for(int h = 0; h < hbins; h++)

+ * + *

for(int s = 0; s < sbins; s++)

+ * + * + *

float binVal = hist.at(h, s);

+ * + *

int intensity = cvRound(binVal*255/maxVal);

+ * + *

rectangle(histImg, Point(h*scale, s*scale),

+ * + *

Point((h+1)*scale - 1, (s+1)*scale - 1),

+ * + *

Scalar.all(intensity),

+ * + *

CV_FILLED);

+ * + * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

namedWindow("H-S Histogram", 1);

+ * + *

imshow("H-S Histogram", histImg);

+ * + *

waitKey();

+ * + * + * @param images Source arrays. They all should have the same depth, + * CV_8U or CV_32F, and the same size. Each of them + * can have an arbitrary number of channels. + * @param channels List of the dims channels used to compute the + * histogram. The first array channels are numerated from 0 to images[0].channels()-1, + * the second array channels are counted from images[0].channels() + * to images[0].channels() + images[1].channels()-1, and so on. + * @param mask Optional mask. If the matrix is not empty, it must be an 8-bit + * array of the same size as images[i]. The non-zero mask elements + * mark the array elements counted in the histogram. + * @param hist Output histogram, which is a dense or sparse dims + * -dimensional array. + * @param histSize Array of histogram sizes in each dimension. + * @param ranges Array of the dims arrays of the histogram bin + * boundaries in each dimension. When the histogram is uniform (uniform + * =true), then for each dimension i it is enough to specify the + * lower (inclusive) boundary L_0 of the 0-th histogram bin and the + * upper (exclusive) boundary U_(histSize[i]-1) for the last histogram + * bin histSize[i]-1. That is, in case of a uniform histogram each + * of ranges[i] is an array of 2 elements. When the histogram is + * not uniform (uniform=false), then each of ranges[i] + * contains histSize[i]+1 elements: L_0, U_0=L_1, U_1=L_2,..., + * U_(histSize[i]-2)=L_(histSize[i]-1), U_(histSize[i]-1). The array + * elements, that are not between L_0 and U_(histSize[i]-1), + * are not counted in the histogram. + * @param accumulate Accumulation flag. If it is set, the histogram is not + * cleared in the beginning when it is allocated. This feature enables you to + * compute a single histogram from several sets of arrays, or to update the + * histogram in time. + * + * @see org.opencv.imgproc.Imgproc.calcHist + */ + public static void calcHist(List images, MatOfInt channels, Mat mask, Mat hist, MatOfInt histSize, MatOfFloat ranges, boolean accumulate) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + Mat channels_mat = channels; + Mat histSize_mat = histSize; + Mat ranges_mat = ranges; + calcHist_0(images_mat.nativeObj, channels_mat.nativeObj, mask.nativeObj, hist.nativeObj, histSize_mat.nativeObj, ranges_mat.nativeObj, accumulate); + + return; + } + +/** + *

Calculates a histogram of a set of arrays.

+ * + *

The functions calcHist calculate the histogram of one or more + * arrays. The elements of a tuple used to increment a histogram bin are taken + * from the correspondinginput arrays at the same location. The sample below + * shows how to compute a 2D Hue-Saturation histogram for a color image. + *

+ * + *

// C++ code:

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src, hsv;

+ * + *

if(argc != 2 || !(src=imread(argv[1], 1)).data)

+ * + *

return -1;

+ * + *

cvtColor(src, hsv, CV_BGR2HSV);

+ * + *

// Quantize the hue to 30 levels

+ * + *

// and the saturation to 32 levels

+ * + *

int hbins = 30, sbins = 32;

+ * + *

int histSize[] = {hbins, sbins};

+ * + *

// hue varies from 0 to 179, see cvtColor

+ * + *

float hranges[] = { 0, 180 };

+ * + *

// saturation varies from 0 (black-gray-white) to

+ * + *

// 255 (pure spectrum color)

+ * + *

float sranges[] = { 0, 256 };

+ * + *

const float* ranges[] = { hranges, sranges };

+ * + *

MatND hist;

+ * + *

// we compute the histogram from the 0-th and 1-st channels

+ * + *

int channels[] = {0, 1};

+ * + *

calcHist(&hsv, 1, channels, Mat(), // do not use mask

+ * + *

hist, 2, histSize, ranges,

+ * + *

true, // the histogram is uniform

+ * + *

false);

+ * + *

double maxVal=0;

+ * + *

minMaxLoc(hist, 0, &maxVal, 0, 0);

+ * + *

int scale = 10;

+ * + *

Mat histImg = Mat.zeros(sbins*scale, hbins*10, CV_8UC3);

+ * + *

for(int h = 0; h < hbins; h++)

+ * + *

for(int s = 0; s < sbins; s++)

+ * + * + *

float binVal = hist.at(h, s);

+ * + *

int intensity = cvRound(binVal*255/maxVal);

+ * + *

rectangle(histImg, Point(h*scale, s*scale),

+ * + *

Point((h+1)*scale - 1, (s+1)*scale - 1),

+ * + *

Scalar.all(intensity),

+ * + *

CV_FILLED);

+ * + * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

namedWindow("H-S Histogram", 1);

+ * + *

imshow("H-S Histogram", histImg);

+ * + *

waitKey();

+ * + * + * @param images Source arrays. They all should have the same depth, + * CV_8U or CV_32F, and the same size. Each of them + * can have an arbitrary number of channels. + * @param channels List of the dims channels used to compute the + * histogram. The first array channels are numerated from 0 to images[0].channels()-1, + * the second array channels are counted from images[0].channels() + * to images[0].channels() + images[1].channels()-1, and so on. + * @param mask Optional mask. If the matrix is not empty, it must be an 8-bit + * array of the same size as images[i]. The non-zero mask elements + * mark the array elements counted in the histogram. + * @param hist Output histogram, which is a dense or sparse dims + * -dimensional array. + * @param histSize Array of histogram sizes in each dimension. + * @param ranges Array of the dims arrays of the histogram bin + * boundaries in each dimension. When the histogram is uniform (uniform + * =true), then for each dimension i it is enough to specify the + * lower (inclusive) boundary L_0 of the 0-th histogram bin and the + * upper (exclusive) boundary U_(histSize[i]-1) for the last histogram + * bin histSize[i]-1. That is, in case of a uniform histogram each + * of ranges[i] is an array of 2 elements. When the histogram is + * not uniform (uniform=false), then each of ranges[i] + * contains histSize[i]+1 elements: L_0, U_0=L_1, U_1=L_2,..., + * U_(histSize[i]-2)=L_(histSize[i]-1), U_(histSize[i]-1). The array + * elements, that are not between L_0 and U_(histSize[i]-1), + * are not counted in the histogram. + * + * @see org.opencv.imgproc.Imgproc.calcHist + */ + public static void calcHist(List images, MatOfInt channels, Mat mask, Mat hist, MatOfInt histSize, MatOfFloat ranges) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + Mat channels_mat = channels; + Mat histSize_mat = histSize; + Mat ranges_mat = ranges; + calcHist_1(images_mat.nativeObj, channels_mat.nativeObj, mask.nativeObj, hist.nativeObj, histSize_mat.nativeObj, ranges_mat.nativeObj); + + return; + } + + + // + // C++: double compareHist(Mat H1, Mat H2, int method) + // + +/** + *

Compares two histograms.

+ * + *

The functions compareHist compare two dense or two sparse + * histograms using the specified method:

+ *
    + *
  • Correlation (method=CV_COMP_CORREL) + *
+ * + *

d(H_1,H_2) = (sum_I(H_1(I) - H_1")(H_2(I) - H_2"))/(sqrt(sum_I(H_1(I) - + * H_1")^2 sum_I(H_2(I) - H_2")^2))

+ * + *

where

+ * + *

H_k" = 1/(N) sum _J H_k(J)

+ * + *

and N is a total number of histogram bins.

+ *
    + *
  • Chi-Square (method=CV_COMP_CHISQR) + *
+ * + *

d(H_1,H_2) = sum _I((H_1(I)-H_2(I))^2)/(H_1(I))

+ * + *
    + *
  • Intersection (method=CV_COMP_INTERSECT) + *
+ * + *

d(H_1,H_2) = sum _I min(H_1(I), H_2(I))

+ * + *
    + *
  • Bhattacharyya distance (method=CV_COMP_BHATTACHARYYA or + * method=CV_COMP_HELLINGER). In fact, OpenCV computes Hellinger + * distance, which is related to Bhattacharyya coefficient. + *
+ * + *

d(H_1,H_2) = sqrt(1 - frac(1)(sqrt(H_1" H_2" N^2)) sum_I sqrt(H_1(I) * + * H_2(I)))

+ * + *

The function returns d(H_1, H_2).

+ * + *

While the function works well with 1-, 2-, 3-dimensional dense histograms, it + * may not be suitable for high-dimensional sparse histograms. In such + * histograms, because of aliasing and sampling problems, the coordinates of + * non-zero histogram bins can slightly shift. To compare such histograms or + * more general sparse configurations of weighted points, consider using the + * "EMD" function.

+ * + * @param H1 First compared histogram. + * @param H2 Second compared histogram of the same size as H1. + * @param method Comparison method that could be one of the following: + *
    + *
  • CV_COMP_CORREL Correlation + *
  • CV_COMP_CHISQR Chi-Square + *
  • CV_COMP_INTERSECT Intersection + *
  • CV_COMP_BHATTACHARYYA Bhattacharyya distance + *
  • CV_COMP_HELLINGER Synonym for CV_COMP_BHATTACHARYYA + *
+ * + * @see org.opencv.imgproc.Imgproc.compareHist + */ + public static double compareHist(Mat H1, Mat H2, int method) + { + + double retVal = compareHist_0(H1.nativeObj, H2.nativeObj, method); + + return retVal; + } + + + // + // C++: double contourArea(Mat contour, bool oriented = false) + // + +/** + *

Calculates a contour area.

+ * + *

The function computes a contour area. Similarly to "moments", the area is + * computed using the Green formula. Thus, the returned area and the number of + * non-zero pixels, if you draw the contour using "drawContours" or "fillPoly", + * can be different. + * Also, the function will most certainly give a wrong results for contours with + * self-intersections. + * Example:

+ * + *

// C++ code:

+ * + *

vector contour;

+ * + *

contour.push_back(Point2f(0, 0));

+ * + *

contour.push_back(Point2f(10, 0));

+ * + *

contour.push_back(Point2f(10, 10));

+ * + *

contour.push_back(Point2f(5, 4));

+ * + *

double area0 = contourArea(contour);

+ * + *

vector approx;

+ * + *

approxPolyDP(contour, approx, 5, true);

+ * + *

double area1 = contourArea(approx);

+ * + *

cout << "area0 =" << area0 << endl <<

+ * + *

"area1 =" << area1 << endl <<

+ * + *

"approx poly vertices" << approx.size() << endl;

+ * + * @param contour Input vector of 2D points (contour vertices), stored in + * std.vector or Mat. + * @param oriented Oriented area flag. If it is true, the function returns a + * signed area value, depending on the contour orientation (clockwise or + * counter-clockwise). Using this feature you can determine orientation of a + * contour by taking the sign of an area. By default, the parameter is + * false, which means that the absolute value is returned. + * + * @see org.opencv.imgproc.Imgproc.contourArea + */ + public static double contourArea(Mat contour, boolean oriented) + { + + double retVal = contourArea_0(contour.nativeObj, oriented); + + return retVal; + } + +/** + *

Calculates a contour area.

+ * + *

The function computes a contour area. Similarly to "moments", the area is + * computed using the Green formula. Thus, the returned area and the number of + * non-zero pixels, if you draw the contour using "drawContours" or "fillPoly", + * can be different. + * Also, the function will most certainly give a wrong results for contours with + * self-intersections. + * Example:

+ * + *

// C++ code:

+ * + *

vector contour;

+ * + *

contour.push_back(Point2f(0, 0));

+ * + *

contour.push_back(Point2f(10, 0));

+ * + *

contour.push_back(Point2f(10, 10));

+ * + *

contour.push_back(Point2f(5, 4));

+ * + *

double area0 = contourArea(contour);

+ * + *

vector approx;

+ * + *

approxPolyDP(contour, approx, 5, true);

+ * + *

double area1 = contourArea(approx);

+ * + *

cout << "area0 =" << area0 << endl <<

+ * + *

"area1 =" << area1 << endl <<

+ * + *

"approx poly vertices" << approx.size() << endl;

+ * + * @param contour Input vector of 2D points (contour vertices), stored in + * std.vector or Mat. + * + * @see org.opencv.imgproc.Imgproc.contourArea + */ + public static double contourArea(Mat contour) + { + + double retVal = contourArea_1(contour.nativeObj); + + return retVal; + } + + + // + // C++: void convertMaps(Mat map1, Mat map2, Mat& dstmap1, Mat& dstmap2, int dstmap1type, bool nninterpolation = false) + // + +/** + *

Converts image transformation maps from one representation to another.

+ * + *

The function converts a pair of maps for "remap" from one representation to + * another. The following options ((map1.type(), map2.type()) + * -> (dstmap1.type(), dstmap2.type())) are supported:

+ *
    + *
  • (CV_32FC1, CV_32FC1) -> (CV_16SC2, CV_16UC1). This is the + * most frequently used conversion operation, in which the original + * floating-point maps (see "remap") are converted to a more compact and much + * faster fixed-point representation. The first output array contains the + * rounded coordinates and the second array (created only when nninterpolation=false) + * contains indices in the interpolation tables. + *
  • (CV_32FC2) -> (CV_16SC2, CV_16UC1). The same as above but the + * original maps are stored in one 2-channel matrix. + *
  • Reverse conversion. Obviously, the reconstructed floating-point maps + * will not be exactly the same as the originals. + *
+ * + * @param map1 The first input map of type CV_16SC2, + * CV_32FC1, or CV_32FC2. + * @param map2 The second input map of type CV_16UC1, + * CV_32FC1, or none (empty matrix), respectively. + * @param dstmap1 The first output map that has the type dstmap1type + * and the same size as src. + * @param dstmap2 The second output map. + * @param dstmap1type Type of the first output map that should be + * CV_16SC2, CV_32FC1, or CV_32FC2. + * @param nninterpolation Flag indicating whether the fixed-point maps are used + * for the nearest-neighbor or for a more complex interpolation. + * + * @see org.opencv.imgproc.Imgproc.convertMaps + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#initUndistortRectifyMap + * @see org.opencv.imgproc.Imgproc#undistort + */ + public static void convertMaps(Mat map1, Mat map2, Mat dstmap1, Mat dstmap2, int dstmap1type, boolean nninterpolation) + { + + convertMaps_0(map1.nativeObj, map2.nativeObj, dstmap1.nativeObj, dstmap2.nativeObj, dstmap1type, nninterpolation); + + return; + } + +/** + *

Converts image transformation maps from one representation to another.

+ * + *

The function converts a pair of maps for "remap" from one representation to + * another. The following options ((map1.type(), map2.type()) + * -> (dstmap1.type(), dstmap2.type())) are supported:

+ *
    + *
  • (CV_32FC1, CV_32FC1) -> (CV_16SC2, CV_16UC1). This is the + * most frequently used conversion operation, in which the original + * floating-point maps (see "remap") are converted to a more compact and much + * faster fixed-point representation. The first output array contains the + * rounded coordinates and the second array (created only when nninterpolation=false) + * contains indices in the interpolation tables. + *
  • (CV_32FC2) -> (CV_16SC2, CV_16UC1). The same as above but the + * original maps are stored in one 2-channel matrix. + *
  • Reverse conversion. Obviously, the reconstructed floating-point maps + * will not be exactly the same as the originals. + *
+ * + * @param map1 The first input map of type CV_16SC2, + * CV_32FC1, or CV_32FC2. + * @param map2 The second input map of type CV_16UC1, + * CV_32FC1, or none (empty matrix), respectively. + * @param dstmap1 The first output map that has the type dstmap1type + * and the same size as src. + * @param dstmap2 The second output map. + * @param dstmap1type Type of the first output map that should be + * CV_16SC2, CV_32FC1, or CV_32FC2. + * + * @see org.opencv.imgproc.Imgproc.convertMaps + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#initUndistortRectifyMap + * @see org.opencv.imgproc.Imgproc#undistort + */ + public static void convertMaps(Mat map1, Mat map2, Mat dstmap1, Mat dstmap2, int dstmap1type) + { + + convertMaps_1(map1.nativeObj, map2.nativeObj, dstmap1.nativeObj, dstmap2.nativeObj, dstmap1type); + + return; + } + + + // + // C++: void convexHull(vector_Point points, vector_int& hull, bool clockwise = false, _hidden_ returnPoints = true) + // + +/** + *

Finds the convex hull of a point set.

+ * + *

The functions find the convex hull of a 2D point set using the Sklansky's + * algorithm [Sklansky82] that has *O(N logN)* complexity in the current + * implementation. See the OpenCV sample convexhull.cpp that + * demonstrates the usage of different function variants.

+ * + * @param points Input 2D point set, stored in std.vector or + * Mat. + * @param hull Output convex hull. It is either an integer vector of indices or + * vector of points. In the first case, the hull elements are + * 0-based indices of the convex hull points in the original array (since the + * set of convex hull points is a subset of the original point set). In the + * second case, hull elements are the convex hull points + * themselves. + * @param clockwise Orientation flag. If it is true, the output convex hull is + * oriented clockwise. Otherwise, it is oriented counter-clockwise. The usual + * screen coordinate system is assumed so that the origin is at the top-left + * corner, x axis is oriented to the right, and y axis is oriented downwards. + * + * @see org.opencv.imgproc.Imgproc.convexHull + */ + public static void convexHull(MatOfPoint points, MatOfInt hull, boolean clockwise) + { + Mat points_mat = points; + Mat hull_mat = hull; + convexHull_0(points_mat.nativeObj, hull_mat.nativeObj, clockwise); + + return; + } + +/** + *

Finds the convex hull of a point set.

+ * + *

The functions find the convex hull of a 2D point set using the Sklansky's + * algorithm [Sklansky82] that has *O(N logN)* complexity in the current + * implementation. See the OpenCV sample convexhull.cpp that + * demonstrates the usage of different function variants.

+ * + * @param points Input 2D point set, stored in std.vector or + * Mat. + * @param hull Output convex hull. It is either an integer vector of indices or + * vector of points. In the first case, the hull elements are + * 0-based indices of the convex hull points in the original array (since the + * set of convex hull points is a subset of the original point set). In the + * second case, hull elements are the convex hull points + * themselves. + * + * @see org.opencv.imgproc.Imgproc.convexHull + */ + public static void convexHull(MatOfPoint points, MatOfInt hull) + { + Mat points_mat = points; + Mat hull_mat = hull; + convexHull_1(points_mat.nativeObj, hull_mat.nativeObj); + + return; + } + + + // + // C++: void convexityDefects(vector_Point contour, vector_int convexhull, vector_Vec4i& convexityDefects) + // + +/** + *

Finds the convexity defects of a contour.

+ * + *

The function finds all convexity defects of the input contour and returns a + * sequence of the CvConvexityDefect structures, where + * CvConvexityDetect is defined as:

+ * + *

// C++ code:

+ * + *

struct CvConvexityDefect

+ * + * + *

CvPoint* start; // point of the contour where the defect begins

+ * + *

CvPoint* end; // point of the contour where the defect ends

+ * + *

CvPoint* depth_point; // the farthest from the convex hull point within the + * defect

+ * + *

float depth; // distance between the farthest point and the convex hull

+ * + *

};

+ * + *

The figure below displays convexity defects of a hand contour:

+ * + * @param contour Input contour. + * @param convexhull Convex hull obtained using "convexHull" that should contain + * indices of the contour points that make the hull. + * @param convexityDefects The output vector of convexity defects. In C++ and + * the new Python/Java interface each convexity defect is represented as + * 4-element integer vector (a.k.a. cv.Vec4i): (start_index, + * end_index, farthest_pt_index, fixpt_depth), where indices are 0-based + * indices in the original contour of the convexity defect beginning, end and + * the farthest point, and fixpt_depth is fixed-point approximation + * (with 8 fractional bits) of the distance between the farthest contour point + * and the hull. That is, to get the floating-point value of the depth will be + * fixpt_depth/256.0. In C interface convexity defect is + * represented by CvConvexityDefect structure - see below. + * + * @see org.opencv.imgproc.Imgproc.convexityDefects + */ + public static void convexityDefects(MatOfPoint contour, MatOfInt convexhull, MatOfInt4 convexityDefects) + { + Mat contour_mat = contour; + Mat convexhull_mat = convexhull; + Mat convexityDefects_mat = convexityDefects; + convexityDefects_0(contour_mat.nativeObj, convexhull_mat.nativeObj, convexityDefects_mat.nativeObj); + + return; + } + + + // + // C++: void copyMakeBorder(Mat src, Mat& dst, int top, int bottom, int left, int right, int borderType, Scalar value = Scalar()) + // + +/** + *

Forms a border around an image.

+ * + *

The function copies the source image into the middle of the destination + * image. The areas to the left, to the right, above and below the copied source + * image will be filled with extrapolated pixels. This is not what + * "FilterEngine" or filtering functions based on it do (they extrapolate pixels + * on-fly), but what other more complex functions, including your own, may do to + * simplify image boundary handling. + * The function supports the mode when src is already in the middle + * of dst. In this case, the function does not copy + * src itself but simply constructs the border, for example:

+ * + *

// C++ code:

+ * + *

// let border be the same in all directions

+ * + *

int border=2;

+ * + *

// constructs a larger image to fit both the image and the border

+ * + *

Mat gray_buf(rgb.rows + border*2, rgb.cols + border*2, rgb.depth());

+ * + *

// select the middle part of it w/o copying data

+ * + *

Mat gray(gray_canvas, Rect(border, border, rgb.cols, rgb.rows));

+ * + *

// convert image from RGB to grayscale

+ * + *

cvtColor(rgb, gray, CV_RGB2GRAY);

+ * + *

// form a border in-place

+ * + *

copyMakeBorder(gray, gray_buf, border, border,

+ * + *

border, border, BORDER_REPLICATE);

+ * + *

// now do some custom filtering......

+ * + *

Note:

+ * + *

When the source image is a part (ROI) of a bigger image, the function will + * try to use the pixels outside of the ROI to form a border. To disable this + * feature and always do extrapolation, as if src was not a ROI, + * use borderType | BORDER_ISOLATED.

+ * + * @param src Source image. + * @param dst Destination image of the same type as src and the + * size Size(src.cols+left+right, src.rows+top+bottom). + * @param top a top + * @param bottom a bottom + * @param left a left + * @param right Parameter specifying how many pixels in each direction from the + * source image rectangle to extrapolate. For example, top=1, bottom=1, + * left=1, right=1 mean that 1 pixel-wide border needs to be built. + * @param borderType Border type. See "borderInterpolate" for details. + * @param value Border value if borderType==BORDER_CONSTANT. + * + * @see org.opencv.imgproc.Imgproc.copyMakeBorder + * @see org.opencv.imgproc.Imgproc#borderInterpolate + */ + public static void copyMakeBorder(Mat src, Mat dst, int top, int bottom, int left, int right, int borderType, Scalar value) + { + + copyMakeBorder_0(src.nativeObj, dst.nativeObj, top, bottom, left, right, borderType, value.val[0], value.val[1], value.val[2], value.val[3]); + + return; + } + +/** + *

Forms a border around an image.

+ * + *

The function copies the source image into the middle of the destination + * image. The areas to the left, to the right, above and below the copied source + * image will be filled with extrapolated pixels. This is not what + * "FilterEngine" or filtering functions based on it do (they extrapolate pixels + * on-fly), but what other more complex functions, including your own, may do to + * simplify image boundary handling. + * The function supports the mode when src is already in the middle + * of dst. In this case, the function does not copy + * src itself but simply constructs the border, for example:

+ * + *

// C++ code:

+ * + *

// let border be the same in all directions

+ * + *

int border=2;

+ * + *

// constructs a larger image to fit both the image and the border

+ * + *

Mat gray_buf(rgb.rows + border*2, rgb.cols + border*2, rgb.depth());

+ * + *

// select the middle part of it w/o copying data

+ * + *

Mat gray(gray_canvas, Rect(border, border, rgb.cols, rgb.rows));

+ * + *

// convert image from RGB to grayscale

+ * + *

cvtColor(rgb, gray, CV_RGB2GRAY);

+ * + *

// form a border in-place

+ * + *

copyMakeBorder(gray, gray_buf, border, border,

+ * + *

border, border, BORDER_REPLICATE);

+ * + *

// now do some custom filtering......

+ * + *

Note:

+ * + *

When the source image is a part (ROI) of a bigger image, the function will + * try to use the pixels outside of the ROI to form a border. To disable this + * feature and always do extrapolation, as if src was not a ROI, + * use borderType | BORDER_ISOLATED.

+ * + * @param src Source image. + * @param dst Destination image of the same type as src and the + * size Size(src.cols+left+right, src.rows+top+bottom). + * @param top a top + * @param bottom a bottom + * @param left a left + * @param right Parameter specifying how many pixels in each direction from the + * source image rectangle to extrapolate. For example, top=1, bottom=1, + * left=1, right=1 mean that 1 pixel-wide border needs to be built. + * @param borderType Border type. See "borderInterpolate" for details. + * + * @see org.opencv.imgproc.Imgproc.copyMakeBorder + * @see org.opencv.imgproc.Imgproc#borderInterpolate + */ + public static void copyMakeBorder(Mat src, Mat dst, int top, int bottom, int left, int right, int borderType) + { + + copyMakeBorder_1(src.nativeObj, dst.nativeObj, top, bottom, left, right, borderType); + + return; + } + + + // + // C++: void cornerEigenValsAndVecs(Mat src, Mat& dst, int blockSize, int ksize, int borderType = BORDER_DEFAULT) + // + +/** + *

Calculates eigenvalues and eigenvectors of image blocks for corner detection.

+ * + *

For every pixel p, the function cornerEigenValsAndVecs + * considers a blockSize x blockSize + * neighborhood S(p). It calculates the covariation matrix of + * derivatives over the neighborhood as:

+ * + *

M = sum(by: S(p))(dI/dx)^2 sum(by: S(p))(dI/dx dI/dy)^2 + * sum(by: S(p))(dI/dx dI/dy)^2 sum(by: S(p))(dI/dy)^2

+ * + *

where the derivatives are computed using the "Sobel" operator.

+ * + *

After that, it finds eigenvectors and eigenvalues of M and stores + * them in the destination image as (lambda_1, lambda_2, x_1, y_1, x_2, + * y_2) where

+ *
    + *
  • lambda_1, lambda_2 are the non-sorted eigenvalues of + * M + *
  • x_1, y_1 are the eigenvectors corresponding to + * lambda_1 + *
  • x_2, y_2 are the eigenvectors corresponding to + * lambda_2 + *
+ * + *

The output of the function can be used for robust edge or corner detection.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the results. It has the same size as + * src and the type CV_32FC(6). + * @param blockSize Neighborhood size (see details below). + * @param ksize Aperture parameter for the "Sobel" operator. + * @param borderType Pixel extrapolation method. See "borderInterpolate". + * + * @see org.opencv.imgproc.Imgproc.cornerEigenValsAndVecs + * @see org.opencv.imgproc.Imgproc#cornerHarris + * @see org.opencv.imgproc.Imgproc#cornerMinEigenVal + * @see org.opencv.imgproc.Imgproc#preCornerDetect + */ + public static void cornerEigenValsAndVecs(Mat src, Mat dst, int blockSize, int ksize, int borderType) + { + + cornerEigenValsAndVecs_0(src.nativeObj, dst.nativeObj, blockSize, ksize, borderType); + + return; + } + +/** + *

Calculates eigenvalues and eigenvectors of image blocks for corner detection.

+ * + *

For every pixel p, the function cornerEigenValsAndVecs + * considers a blockSize x blockSize + * neighborhood S(p). It calculates the covariation matrix of + * derivatives over the neighborhood as:

+ * + *

M = sum(by: S(p))(dI/dx)^2 sum(by: S(p))(dI/dx dI/dy)^2 + * sum(by: S(p))(dI/dx dI/dy)^2 sum(by: S(p))(dI/dy)^2

+ * + *

where the derivatives are computed using the "Sobel" operator.

+ * + *

After that, it finds eigenvectors and eigenvalues of M and stores + * them in the destination image as (lambda_1, lambda_2, x_1, y_1, x_2, + * y_2) where

+ *
    + *
  • lambda_1, lambda_2 are the non-sorted eigenvalues of + * M + *
  • x_1, y_1 are the eigenvectors corresponding to + * lambda_1 + *
  • x_2, y_2 are the eigenvectors corresponding to + * lambda_2 + *
+ * + *

The output of the function can be used for robust edge or corner detection.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the results. It has the same size as + * src and the type CV_32FC(6). + * @param blockSize Neighborhood size (see details below). + * @param ksize Aperture parameter for the "Sobel" operator. + * + * @see org.opencv.imgproc.Imgproc.cornerEigenValsAndVecs + * @see org.opencv.imgproc.Imgproc#cornerHarris + * @see org.opencv.imgproc.Imgproc#cornerMinEigenVal + * @see org.opencv.imgproc.Imgproc#preCornerDetect + */ + public static void cornerEigenValsAndVecs(Mat src, Mat dst, int blockSize, int ksize) + { + + cornerEigenValsAndVecs_1(src.nativeObj, dst.nativeObj, blockSize, ksize); + + return; + } + + + // + // C++: void cornerHarris(Mat src, Mat& dst, int blockSize, int ksize, double k, int borderType = BORDER_DEFAULT) + // + +/** + *

Harris edge detector.

+ * + *

The function runs the Harris edge detector on the image. Similarly to + * "cornerMinEigenVal" and "cornerEigenValsAndVecs", for each pixel (x, + * y) it calculates a 2x2 gradient covariance matrix + * M^((x,y)) over a blockSize x blockSize neighborhood. Then, + * it computes the following characteristic:

+ * + *

dst(x,y) = det M^((x,y)) - k * (tr M^((x,y)))^2

+ * + *

Corners in the image can be found as the local maxima of this response map.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the Harris detector responses. It has the type + * CV_32FC1 and the same size as src. + * @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs"). + * @param ksize Aperture parameter for the "Sobel" operator. + * @param k Harris detector free parameter. See the formula below. + * @param borderType Pixel extrapolation method. See "borderInterpolate". + * + * @see org.opencv.imgproc.Imgproc.cornerHarris + */ + public static void cornerHarris(Mat src, Mat dst, int blockSize, int ksize, double k, int borderType) + { + + cornerHarris_0(src.nativeObj, dst.nativeObj, blockSize, ksize, k, borderType); + + return; + } + +/** + *

Harris edge detector.

+ * + *

The function runs the Harris edge detector on the image. Similarly to + * "cornerMinEigenVal" and "cornerEigenValsAndVecs", for each pixel (x, + * y) it calculates a 2x2 gradient covariance matrix + * M^((x,y)) over a blockSize x blockSize neighborhood. Then, + * it computes the following characteristic:

+ * + *

dst(x,y) = det M^((x,y)) - k * (tr M^((x,y)))^2

+ * + *

Corners in the image can be found as the local maxima of this response map.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the Harris detector responses. It has the type + * CV_32FC1 and the same size as src. + * @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs"). + * @param ksize Aperture parameter for the "Sobel" operator. + * @param k Harris detector free parameter. See the formula below. + * + * @see org.opencv.imgproc.Imgproc.cornerHarris + */ + public static void cornerHarris(Mat src, Mat dst, int blockSize, int ksize, double k) + { + + cornerHarris_1(src.nativeObj, dst.nativeObj, blockSize, ksize, k); + + return; + } + + + // + // C++: void cornerMinEigenVal(Mat src, Mat& dst, int blockSize, int ksize = 3, int borderType = BORDER_DEFAULT) + // + +/** + *

Calculates the minimal eigenvalue of gradient matrices for corner detection.

+ * + *

The function is similar to "cornerEigenValsAndVecs" but it calculates and + * stores only the minimal eigenvalue of the covariance matrix of derivatives, + * that is, min(lambda_1, lambda_2) in terms of the formulae in the + * "cornerEigenValsAndVecs" description.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the minimal eigenvalues. It has the type + * CV_32FC1 and the same size as src. + * @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs"). + * @param ksize Aperture parameter for the "Sobel" operator. + * @param borderType Pixel extrapolation method. See "borderInterpolate". + * + * @see org.opencv.imgproc.Imgproc.cornerMinEigenVal + */ + public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize, int ksize, int borderType) + { + + cornerMinEigenVal_0(src.nativeObj, dst.nativeObj, blockSize, ksize, borderType); + + return; + } + +/** + *

Calculates the minimal eigenvalue of gradient matrices for corner detection.

+ * + *

The function is similar to "cornerEigenValsAndVecs" but it calculates and + * stores only the minimal eigenvalue of the covariance matrix of derivatives, + * that is, min(lambda_1, lambda_2) in terms of the formulae in the + * "cornerEigenValsAndVecs" description.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the minimal eigenvalues. It has the type + * CV_32FC1 and the same size as src. + * @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs"). + * @param ksize Aperture parameter for the "Sobel" operator. + * + * @see org.opencv.imgproc.Imgproc.cornerMinEigenVal + */ + public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize, int ksize) + { + + cornerMinEigenVal_1(src.nativeObj, dst.nativeObj, blockSize, ksize); + + return; + } + +/** + *

Calculates the minimal eigenvalue of gradient matrices for corner detection.

+ * + *

The function is similar to "cornerEigenValsAndVecs" but it calculates and + * stores only the minimal eigenvalue of the covariance matrix of derivatives, + * that is, min(lambda_1, lambda_2) in terms of the formulae in the + * "cornerEigenValsAndVecs" description.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the minimal eigenvalues. It has the type + * CV_32FC1 and the same size as src. + * @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs"). + * + * @see org.opencv.imgproc.Imgproc.cornerMinEigenVal + */ + public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize) + { + + cornerMinEigenVal_2(src.nativeObj, dst.nativeObj, blockSize); + + return; + } + + + // + // C++: void cornerSubPix(Mat image, vector_Point2f& corners, Size winSize, Size zeroZone, TermCriteria criteria) + // + +/** + *

Refines the corner locations.

+ * + *

The function iterates to find the sub-pixel accurate location of corners or + * radial saddle points, as shown on the figure below.

+ * + *

Sub-pixel accurate corner locator is based on the observation that every + * vector from the center q to a point p located within a + * neighborhood of q is orthogonal to the image gradient at p + * subject to image and measurement noise. Consider the expression:

+ * + *

epsilon _i = (DI_(p_i))^T * (q - p_i)

+ * + *

where (DI_(p_i)) is an image gradient at one of the points + * p_i in a neighborhood of q. The value of q is to + * be found so that epsilon_i is minimized. A system of equations may + * be set up with epsilon_i set to zero:

+ * + *

sum _i(DI_(p_i) * (DI_(p_i))^T) - sum _i(DI_(p_i) * (DI_(p_i))^T * + * p_i)

+ * + *

where the gradients are summed within a neighborhood ("search window") of + * q. Calling the first gradient term G and the second + * gradient term b gives:

+ * + *

q = G^(-1) * b

+ * + *

The algorithm sets the center of the neighborhood window at this new center + * q and then iterates until the center stays within a set threshold.

+ * + * @param image Input image. + * @param corners Initial coordinates of the input corners and refined + * coordinates provided for output. + * @param winSize Half of the side length of the search window. For example, if + * winSize=Size(5,5), then a 5*2+1 x 5*2+1 = 11 x 11 + * search window is used. + * @param zeroZone Half of the size of the dead region in the middle of the + * search zone over which the summation in the formula below is not done. It is + * used sometimes to avoid possible singularities of the autocorrelation matrix. + * The value of (-1,-1) indicates that there is no such a size. + * @param criteria Criteria for termination of the iterative process of corner + * refinement. That is, the process of corner position refinement stops either + * after criteria.maxCount iterations or when the corner position + * moves by less than criteria.epsilon on some iteration. + * + * @see org.opencv.imgproc.Imgproc.cornerSubPix + */ + public static void cornerSubPix(Mat image, MatOfPoint2f corners, Size winSize, Size zeroZone, TermCriteria criteria) + { + Mat corners_mat = corners; + cornerSubPix_0(image.nativeObj, corners_mat.nativeObj, winSize.width, winSize.height, zeroZone.width, zeroZone.height, criteria.type, criteria.maxCount, criteria.epsilon); + + return; + } + + + // + // C++: void createHanningWindow(Mat& dst, Size winSize, int type) + // + +/** + *

This function computes a Hanning window coefficients in two dimensions. See + * http://en.wikipedia.org/wiki/Hann_function and http://en.wikipedia.org/wiki/Window_function + * for more information.

+ * + *

An example is shown below:

+ * + *

// C++ code:

+ * + *

// create hanning window of size 100x100 and type CV_32F

+ * + *

Mat hann;

+ * + *

createHanningWindow(hann, Size(100, 100), CV_32F);

+ * + * @param dst Destination array to place Hann coefficients in + * @param winSize The window size specifications + * @param type Created array type + * + * @see org.opencv.imgproc.Imgproc.createHanningWindow + * @see org.opencv.imgproc.Imgproc#phaseCorrelate + */ + public static void createHanningWindow(Mat dst, Size winSize, int type) + { + + createHanningWindow_0(dst.nativeObj, winSize.width, winSize.height, type); + + return; + } + + + // + // C++: void cvtColor(Mat src, Mat& dst, int code, int dstCn = 0) + // + +/** + *

Converts an image from one color space to another.

+ * + *

The function converts an input image from one color space to another. In case + * of a transformation to-from RGB color space, the order of the channels should + * be specified explicitly (RGB or BGR). + * Note that the default color format in OpenCV is often referred to as RGB but + * it is actually BGR (the bytes are reversed). So the first byte in a standard + * (24-bit) color image will be an 8-bit Blue component, the second byte will be + * Green, and the third byte will be Red. The fourth, fifth, and sixth bytes + * would then be the second pixel (Blue, then Green, then Red), and so on.

+ * + *

The conventional ranges for R, G, and B channel values are:

+ *
    + *
  • 0 to 255 for CV_8U images + *
  • 0 to 65535 for CV_16U images + *
  • 0 to 1 for CV_32F images + *
+ * + *

In case of linear transformations, the range does not matter. + * But in case of a non-linear transformation, an input RGB image should be + * normalized to the proper value range to get the correct results, for example, + * for RGB-> L*u*v* transformation. For example, if you have a 32-bit + * floating-point image directly converted from an 8-bit image without any + * scaling, then it will have the 0..255 value range instead of 0..1 assumed by + * the function. So, before calling cvtColor, you need first to + * scale the image down:

+ * + *

// C++ code:

+ * + *

img *= 1./255;

+ * + *

cvtColor(img, img, CV_BGR2Luv);

+ * + *

If you use cvtColor with 8-bit images, the conversion will have + * some information lost. For many applications, this will not be noticeable but + * it is recommended to use 32-bit images in applications that need the full + * range of colors or that convert an image before an operation and then convert + * back. + *

+ * + *

The function can do the following transformations:

+ *
    + *
  • Transformations within RGB space like adding/removing the alpha + * channel, reversing the channel order, conversion to/from 16-bit RGB color + * (R5:G6:B5 or R5:G5:B5), as well as conversion to/from grayscale using: + *
+ * + *

RGB[A] to Gray: Y <- 0.299 * R + 0.587 * G + 0.114 * B

+ * + *

and

+ * + *

Gray to RGB[A]: R <- Y, G <- Y, B <- Y, A <- 0

+ * + *

The conversion from a RGB image to gray is done with:

+ * + *

+ * + *

// C++ code:

+ * + *

cvtColor(src, bwsrc, CV_RGB2GRAY);

+ * + *

+ * + *

More advanced channel reordering can also be done with "mixChannels".

+ *
    + *
  • RGB <-> CIE XYZ.Rec 709 with D65 white point + * (CV_BGR2XYZ, CV_RGB2XYZ, CV_XYZ2BGR, CV_XYZ2RGB): + *
+ * + *

X + * Z ltBR gt <- 0.412453 0.357580 0.180423 + * 0.212671 0.715160 0.072169 + * 0.019334 0.119193 0.950227 ltBR gt * R + * B ltBR gt

+ * + * + * + *

R + * B ltBR gt <- 3.240479 -1.53715 -0.498535 + * -0.969256 1.875991 0.041556 + * 0.055648 -0.204043 1.057311 ltBR gt * X + * Z ltBR gt

+ * + *

X, Y and Z cover the whole value range (in case of + * floating-point images, Z may exceed 1).

+ *
    + *
  • RGB <-> YCrCb JPEG (or YCC) (CV_BGR2YCrCb, + * CV_RGB2YCrCb, CV_YCrCb2BGR, CV_YCrCb2RGB) + *
+ * + *

Y <- 0.299 * R + 0.587 * G + 0.114 * B

+ * + * + * + *

Cr <- (R-Y) * 0.713 + delta

+ * + * + * + *

Cb <- (B-Y) * 0.564 + delta

+ * + * + * + *

R <- Y + 1.403 * (Cr - delta)

+ * + * + * + *

G <- Y - 0.714 * (Cr - delta) - 0.344 * (Cb - delta)

+ * + * + * + *

B <- Y + 1.773 * (Cb - delta)

+ * + *

where

+ * + *

delta = <= ft (128 for 8-bit images + * 32768 for 16-bit images + * 0.5 for floating-point images right.

+ * + *

Y, Cr, and Cb cover the whole value range.

+ *
    + *
  • RGB <-> HSV (CV_BGR2HSV, CV_RGB2HSV, CV_HSV2BGR, + * CV_HSV2RGB) In case of 8-bit and 16-bit images, R, G, and B are + * converted to the floating-point format and scaled to fit the 0 to 1 range. + *
+ * + *

V <- max(R,G,B)

+ * + * + * + *

S <- (V-min(R,G,B))/(V) if V != 0; 0 otherwise

+ * + * + * + *

H <- (60(G - B))/((V-min(R,G,B))) if V=R; (120+60(B - R))/((V-min(R,G,B))) + * if V=G; (240+60(R - G))/((V-min(R,G,B))) if V=B

+ * + *

If H<0 then H <- H+360. On output 0 <= V <= 1, + * 0 <= S <= 1, 0 <= H <= 360.

+ * + *

The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

V <- 255 V, S <- 255 S, H <- H/2(to fit to 0 to 255)

+ * + *
    + *
  • 16-bit images (currently not supported) + *
+ * + *

V <- 65535 V, S <- 65535 S, H <- H

+ * + *
    + *
  • 32-bit images H, S, and V are left as is + *
  • RGB <-> HLS (CV_BGR2HLS, CV_RGB2HLS, CV_HLS2BGR, + * CV_HLS2RGB). + *
+ *

In case of 8-bit and 16-bit images, R, G, and B are converted to the + * floating-point format and scaled to fit the 0 to 1 range.

+ * + *

V_(max) <- (max)(R,G,B)

+ * + * + * + *

V_(min) <- (min)(R,G,B)

+ * + * + * + *

L <- (V_(max) + V_(min))/2

+ * + * + * + *

S <- fork ((V_(max) - V_(min))/(V_(max) + V_(min)))(if L < + * 0.5)<BR>((V_(max) - V_(min))/(2 - (V_(max) + V_(min))))(if L >= 0.5)

+ * + * + * + *

H <- forkthree ((60(G - B))/(S))(if V_(max)=R)<BR>((120+60(B - + * R))/(S))(if V_(max)=G)<BR>((240+60(R - G))/(S))(if V_(max)=B)

+ * + *

If H<0 then H <- H+360. On output 0 <= L <= 1, + * 0 <= S <= 1, 0 <= H <= 360.

+ * + *

The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

V <- 255 * V, S <- 255 * S, H <- H/2(to fit to 0 to 255)

+ * + *
    + *
  • 16-bit images (currently not supported) + *
+ * + *

V <- 65535 * V, S <- 65535 * S, H <- H

+ * + *
    + *
  • 32-bit images H, S, V are left as is + *
  • RGB <-> CIE L*a*b* (CV_BGR2Lab, CV_RGB2Lab, CV_Lab2BGR, + * CV_Lab2RGB). + *
+ *

In case of 8-bit and 16-bit images, R, G, and B are converted to the + * floating-point format and scaled to fit the 0 to 1 range.

+ * + *

[X Y Z] <- + * |0.412453 0.357580 0.180423| + * |0.212671 0.715160 0.072169| + * |0.019334 0.119193 0.950227|

+ *
    + *
  • [R G B] + * + * + *
+ * + *

X <- X/X_n, where X_n = 0.950456

+ * + * + * + *

Z <- Z/Z_n, where Z_n = 1.088754

+ * + * + * + *

L <- 116*Y^(1/3)-16 for Y>0.008856; 903.3*Y for Y <= 0.008856

+ * + * + * + *

a <- 500(f(X)-f(Y)) + delta

+ * + * + * + *

b <- 200(f(Y)-f(Z)) + delta

+ * + *

where

+ * + *

f(t)= t^(1/3) for t>0.008856; 7.787 t+16/116 for t <= 0.008856

+ * + *

and

+ * + *

delta = 128 for 8-bit images; 0 for floating-point images

+ * + *

This outputs 0 <= L <= 100, -127 <= a <= 127, -127 <= b + * <= 127. The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

L <- L*255/100, a <- a + 128, b <- b + 128

+ * + *
    + *
  • 16-bit images (currently not supported) + *
  • 32-bit images L, a, and b are left as is + *
  • RGB <-> CIE L*u*v* (CV_BGR2Luv, CV_RGB2Luv, CV_Luv2BGR, + * CV_Luv2RGB). + *
+ *

In case of 8-bit and 16-bit images, R, G, and B are converted to the + * floating-point format and scaled to fit 0 to 1 range.

+ * + *

[X Y Z] <- + * |0.412453 0.357580 0.180423| + * |0.212671 0.715160 0.072169| + * |0.019334 0.119193 0.950227|

+ *
    + *
  • [R G B] + * + * + *
+ * + *

L <- 116 Y^(1/3) for Y>0.008856; 903.3 Y for Y <= 0.008856

+ * + * + * + *

u' <- 4*X/(X + 15*Y + 3 Z)

+ * + * + * + *

v' <- 9*Y/(X + 15*Y + 3 Z)

+ * + * + * + *

u <- 13*L*(u' - u_n) where u_n=0.19793943

+ * + * + * + *

v <- 13*L*(v' - v_n) where v_n=0.46831096

+ * + *

This outputs 0 <= L <= 100, -134 <= u <= 220, -140 <= v + * <= 122.

+ * + *

The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

L <- 255/100 L, u <- 255/354(u + 134), v <- 255/256(v + 140)

+ * + *
    + *
  • 16-bit images (currently not supported) + *
  • 32-bit images L, u, and v are left as is + *
+ * + *

The above formulae for converting RGB to/from various color spaces have been + * taken from multiple sources on the web, primarily from the Charles Poynton + * site http://www.poynton.com/ColorFAQ.html

+ *
    + *
  • Bayer -> RGB (CV_BayerBG2BGR, CV_BayerGB2BGR, + * CV_BayerRG2BGR, CV_BayerGR2BGR, CV_BayerBG2RGB, CV_BayerGB2RGB, + * CV_BayerRG2RGB, CV_BayerGR2RGB). The Bayer pattern is widely used in + * CCD and CMOS cameras. It enables you to get color pictures from a single + * plane where R,G, and B pixels (sensors of a particular component) are + * interleaved as follows: The output RGB components of a pixel are interpolated + * from 1, 2, or + *
+ * + *

// C++ code:

+ * + *

4 neighbors of the pixel having the same color. There are several

+ * + *

modifications of the above pattern that can be achieved by shifting

+ * + *

the pattern one pixel left and/or one pixel up. The two letters

+ * + *

C_1 and

+ * + *

C_2 in the conversion constants CV_Bayer C_1 + * C_2 2BGR and CV_Bayer C_1 C_2 + * 2RGB indicate the particular pattern

+ * + *

type. These are components from the second row, second and third

+ * + *

columns, respectively. For example, the above pattern has a very

+ * + *

popular "BG" type.

+ * + * @param src input image: 8-bit unsigned, 16-bit unsigned (CV_16UC...), + * or single-precision floating-point. + * @param dst output image of the same size and depth as src. + * @param code color space conversion code (see the description below). + * @param dstCn number of channels in the destination image; if the parameter is + * 0, the number of the channels is derived automatically from src + * and code. + * + * @see org.opencv.imgproc.Imgproc.cvtColor + */ + public static void cvtColor(Mat src, Mat dst, int code, int dstCn) + { + + cvtColor_0(src.nativeObj, dst.nativeObj, code, dstCn); + + return; + } + +/** + *

Converts an image from one color space to another.

+ * + *

The function converts an input image from one color space to another. In case + * of a transformation to-from RGB color space, the order of the channels should + * be specified explicitly (RGB or BGR). + * Note that the default color format in OpenCV is often referred to as RGB but + * it is actually BGR (the bytes are reversed). So the first byte in a standard + * (24-bit) color image will be an 8-bit Blue component, the second byte will be + * Green, and the third byte will be Red. The fourth, fifth, and sixth bytes + * would then be the second pixel (Blue, then Green, then Red), and so on.

+ * + *

The conventional ranges for R, G, and B channel values are:

+ *
    + *
  • 0 to 255 for CV_8U images + *
  • 0 to 65535 for CV_16U images + *
  • 0 to 1 for CV_32F images + *
+ * + *

In case of linear transformations, the range does not matter. + * But in case of a non-linear transformation, an input RGB image should be + * normalized to the proper value range to get the correct results, for example, + * for RGB-> L*u*v* transformation. For example, if you have a 32-bit + * floating-point image directly converted from an 8-bit image without any + * scaling, then it will have the 0..255 value range instead of 0..1 assumed by + * the function. So, before calling cvtColor, you need first to + * scale the image down:

+ * + *

// C++ code:

+ * + *

img *= 1./255;

+ * + *

cvtColor(img, img, CV_BGR2Luv);

+ * + *

If you use cvtColor with 8-bit images, the conversion will have + * some information lost. For many applications, this will not be noticeable but + * it is recommended to use 32-bit images in applications that need the full + * range of colors or that convert an image before an operation and then convert + * back. + *

+ * + *

The function can do the following transformations:

+ *
    + *
  • Transformations within RGB space like adding/removing the alpha + * channel, reversing the channel order, conversion to/from 16-bit RGB color + * (R5:G6:B5 or R5:G5:B5), as well as conversion to/from grayscale using: + *
+ * + *

RGB[A] to Gray: Y <- 0.299 * R + 0.587 * G + 0.114 * B

+ * + *

and

+ * + *

Gray to RGB[A]: R <- Y, G <- Y, B <- Y, A <- 0

+ * + *

The conversion from a RGB image to gray is done with:

+ * + *

+ * + *

// C++ code:

+ * + *

cvtColor(src, bwsrc, CV_RGB2GRAY);

+ * + *

+ * + *

More advanced channel reordering can also be done with "mixChannels".

+ *
    + *
  • RGB <-> CIE XYZ.Rec 709 with D65 white point + * (CV_BGR2XYZ, CV_RGB2XYZ, CV_XYZ2BGR, CV_XYZ2RGB): + *
+ * + *

X + * Z ltBR gt <- 0.412453 0.357580 0.180423 + * 0.212671 0.715160 0.072169 + * 0.019334 0.119193 0.950227 ltBR gt * R + * B ltBR gt

+ * + * + * + *

R + * B ltBR gt <- 3.240479 -1.53715 -0.498535 + * -0.969256 1.875991 0.041556 + * 0.055648 -0.204043 1.057311 ltBR gt * X + * Z ltBR gt

+ * + *

X, Y and Z cover the whole value range (in case of + * floating-point images, Z may exceed 1).

+ *
    + *
  • RGB <-> YCrCb JPEG (or YCC) (CV_BGR2YCrCb, + * CV_RGB2YCrCb, CV_YCrCb2BGR, CV_YCrCb2RGB) + *
+ * + *

Y <- 0.299 * R + 0.587 * G + 0.114 * B

+ * + * + * + *

Cr <- (R-Y) * 0.713 + delta

+ * + * + * + *

Cb <- (B-Y) * 0.564 + delta

+ * + * + * + *

R <- Y + 1.403 * (Cr - delta)

+ * + * + * + *

G <- Y - 0.714 * (Cr - delta) - 0.344 * (Cb - delta)

+ * + * + * + *

B <- Y + 1.773 * (Cb - delta)

+ * + *

where

+ * + *

delta = <= ft (128 for 8-bit images + * 32768 for 16-bit images + * 0.5 for floating-point images right.

+ * + *

Y, Cr, and Cb cover the whole value range.

+ *
    + *
  • RGB <-> HSV (CV_BGR2HSV, CV_RGB2HSV, CV_HSV2BGR, + * CV_HSV2RGB) In case of 8-bit and 16-bit images, R, G, and B are + * converted to the floating-point format and scaled to fit the 0 to 1 range. + *
+ * + *

V <- max(R,G,B)

+ * + * + * + *

S <- (V-min(R,G,B))/(V) if V != 0; 0 otherwise

+ * + * + * + *

H <- (60(G - B))/((V-min(R,G,B))) if V=R; (120+60(B - R))/((V-min(R,G,B))) + * if V=G; (240+60(R - G))/((V-min(R,G,B))) if V=B

+ * + *

If H<0 then H <- H+360. On output 0 <= V <= 1, + * 0 <= S <= 1, 0 <= H <= 360.

+ * + *

The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

V <- 255 V, S <- 255 S, H <- H/2(to fit to 0 to 255)

+ * + *
    + *
  • 16-bit images (currently not supported) + *
+ * + *

V <- 65535 V, S <- 65535 S, H <- H

+ * + *
    + *
  • 32-bit images H, S, and V are left as is + *
  • RGB <-> HLS (CV_BGR2HLS, CV_RGB2HLS, CV_HLS2BGR, + * CV_HLS2RGB). + *
+ *

In case of 8-bit and 16-bit images, R, G, and B are converted to the + * floating-point format and scaled to fit the 0 to 1 range.

+ * + *

V_(max) <- (max)(R,G,B)

+ * + * + * + *

V_(min) <- (min)(R,G,B)

+ * + * + * + *

L <- (V_(max) + V_(min))/2

+ * + * + * + *

S <- fork ((V_(max) - V_(min))/(V_(max) + V_(min)))(if L < + * 0.5)<BR>((V_(max) - V_(min))/(2 - (V_(max) + V_(min))))(if L >= 0.5)

+ * + * + * + *

H <- forkthree ((60(G - B))/(S))(if V_(max)=R)<BR>((120+60(B - + * R))/(S))(if V_(max)=G)<BR>((240+60(R - G))/(S))(if V_(max)=B)

+ * + *

If H<0 then H <- H+360. On output 0 <= L <= 1, + * 0 <= S <= 1, 0 <= H <= 360.

+ * + *

The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

V <- 255 * V, S <- 255 * S, H <- H/2(to fit to 0 to 255)

+ * + *
    + *
  • 16-bit images (currently not supported) + *
+ * + *

V <- 65535 * V, S <- 65535 * S, H <- H

+ * + *
    + *
  • 32-bit images H, S, V are left as is + *
  • RGB <-> CIE L*a*b* (CV_BGR2Lab, CV_RGB2Lab, CV_Lab2BGR, + * CV_Lab2RGB). + *
+ *

In case of 8-bit and 16-bit images, R, G, and B are converted to the + * floating-point format and scaled to fit the 0 to 1 range.

+ * + *

[X Y Z] <- + * |0.412453 0.357580 0.180423| + * |0.212671 0.715160 0.072169| + * |0.019334 0.119193 0.950227|

+ *
    + *
  • [R G B] + * + * + *
+ * + *

X <- X/X_n, where X_n = 0.950456

+ * + * + * + *

Z <- Z/Z_n, where Z_n = 1.088754

+ * + * + * + *

L <- 116*Y^(1/3)-16 for Y>0.008856; 903.3*Y for Y <= 0.008856

+ * + * + * + *

a <- 500(f(X)-f(Y)) + delta

+ * + * + * + *

b <- 200(f(Y)-f(Z)) + delta

+ * + *

where

+ * + *

f(t)= t^(1/3) for t>0.008856; 7.787 t+16/116 for t <= 0.008856

+ * + *

and

+ * + *

delta = 128 for 8-bit images; 0 for floating-point images

+ * + *

This outputs 0 <= L <= 100, -127 <= a <= 127, -127 <= b + * <= 127. The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

L <- L*255/100, a <- a + 128, b <- b + 128

+ * + *
    + *
  • 16-bit images (currently not supported) + *
  • 32-bit images L, a, and b are left as is + *
  • RGB <-> CIE L*u*v* (CV_BGR2Luv, CV_RGB2Luv, CV_Luv2BGR, + * CV_Luv2RGB). + *
+ *

In case of 8-bit and 16-bit images, R, G, and B are converted to the + * floating-point format and scaled to fit 0 to 1 range.

+ * + *

[X Y Z] <- + * |0.412453 0.357580 0.180423| + * |0.212671 0.715160 0.072169| + * |0.019334 0.119193 0.950227|

+ *
    + *
  • [R G B] + * + * + *
+ * + *

L <- 116 Y^(1/3) for Y>0.008856; 903.3 Y for Y <= 0.008856

+ * + * + * + *

u' <- 4*X/(X + 15*Y + 3 Z)

+ * + * + * + *

v' <- 9*Y/(X + 15*Y + 3 Z)

+ * + * + * + *

u <- 13*L*(u' - u_n) where u_n=0.19793943

+ * + * + * + *

v <- 13*L*(v' - v_n) where v_n=0.46831096

+ * + *

This outputs 0 <= L <= 100, -134 <= u <= 220, -140 <= v + * <= 122.

+ * + *

The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

L <- 255/100 L, u <- 255/354(u + 134), v <- 255/256(v + 140)

+ * + *
    + *
  • 16-bit images (currently not supported) + *
  • 32-bit images L, u, and v are left as is + *
+ * + *

The above formulae for converting RGB to/from various color spaces have been + * taken from multiple sources on the web, primarily from the Charles Poynton + * site http://www.poynton.com/ColorFAQ.html

+ *
    + *
  • Bayer -> RGB (CV_BayerBG2BGR, CV_BayerGB2BGR, + * CV_BayerRG2BGR, CV_BayerGR2BGR, CV_BayerBG2RGB, CV_BayerGB2RGB, + * CV_BayerRG2RGB, CV_BayerGR2RGB). The Bayer pattern is widely used in + * CCD and CMOS cameras. It enables you to get color pictures from a single + * plane where R,G, and B pixels (sensors of a particular component) are + * interleaved as follows: The output RGB components of a pixel are interpolated + * from 1, 2, or + *
+ * + *

// C++ code:

+ * + *

4 neighbors of the pixel having the same color. There are several

+ * + *

modifications of the above pattern that can be achieved by shifting

+ * + *

the pattern one pixel left and/or one pixel up. The two letters

+ * + *

C_1 and

+ * + *

C_2 in the conversion constants CV_Bayer C_1 + * C_2 2BGR and CV_Bayer C_1 C_2 + * 2RGB indicate the particular pattern

+ * + *

type. These are components from the second row, second and third

+ * + *

columns, respectively. For example, the above pattern has a very

+ * + *

popular "BG" type.

+ * + * @param src input image: 8-bit unsigned, 16-bit unsigned (CV_16UC...), + * or single-precision floating-point. + * @param dst output image of the same size and depth as src. + * @param code color space conversion code (see the description below). + * + * @see org.opencv.imgproc.Imgproc.cvtColor + */ + public static void cvtColor(Mat src, Mat dst, int code) + { + + cvtColor_1(src.nativeObj, dst.nativeObj, code); + + return; + } + + + // + // C++: void dilate(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) + // + +/** + *

Dilates an image by using a specific structuring element.

+ * + *

The function dilates the source image using the specified structuring element + * that determines the shape of a pixel neighborhood over which the maximum is + * taken:

+ * + *

dst(x,y) = max _((x',y'): element(x',y') != 0) src(x+x',y+y')

+ * + *

The function supports the in-place mode. Dilation can be applied several + * (iterations) times. In case of multi-channel images, each + * channel is processed independently.

+ * + * @param src input image; the number of channels can be arbitrary, but the + * depth should be one of CV_8U, CV_16U, + * CV_16S, CV_32F" or CV_64F". + * @param dst output image of the same size and type as src. + * @param kernel a kernel + * @param anchor position of the anchor within the element; default value + * (-1, -1) means that the anchor is at the element center. + * @param iterations number of times dilation is applied. + * @param borderType pixel extrapolation method (see "borderInterpolate" for + * details). + * @param borderValue border value in case of a constant border (see + * "createMorphologyFilter" for details). + * + * @see org.opencv.imgproc.Imgproc.dilate + * @see org.opencv.imgproc.Imgproc#erode + * @see org.opencv.imgproc.Imgproc#morphologyEx + */ + public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) + { + + dilate_0(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); + + return; + } + +/** + *

Dilates an image by using a specific structuring element.

+ * + *

The function dilates the source image using the specified structuring element + * that determines the shape of a pixel neighborhood over which the maximum is + * taken:

+ * + *

dst(x,y) = max _((x',y'): element(x',y') != 0) src(x+x',y+y')

+ * + *

The function supports the in-place mode. Dilation can be applied several + * (iterations) times. In case of multi-channel images, each + * channel is processed independently.

+ * + * @param src input image; the number of channels can be arbitrary, but the + * depth should be one of CV_8U, CV_16U, + * CV_16S, CV_32F" or CV_64F". + * @param dst output image of the same size and type as src. + * @param kernel a kernel + * @param anchor position of the anchor within the element; default value + * (-1, -1) means that the anchor is at the element center. + * @param iterations number of times dilation is applied. + * + * @see org.opencv.imgproc.Imgproc.dilate + * @see org.opencv.imgproc.Imgproc#erode + * @see org.opencv.imgproc.Imgproc#morphologyEx + */ + public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations) + { + + dilate_1(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations); + + return; + } + +/** + *

Dilates an image by using a specific structuring element.

+ * + *

The function dilates the source image using the specified structuring element + * that determines the shape of a pixel neighborhood over which the maximum is + * taken:

+ * + *

dst(x,y) = max _((x',y'): element(x',y') != 0) src(x+x',y+y')

+ * + *

The function supports the in-place mode. Dilation can be applied several + * (iterations) times. In case of multi-channel images, each + * channel is processed independently.

+ * + * @param src input image; the number of channels can be arbitrary, but the + * depth should be one of CV_8U, CV_16U, + * CV_16S, CV_32F" or CV_64F". + * @param dst output image of the same size and type as src. + * @param kernel a kernel + * + * @see org.opencv.imgproc.Imgproc.dilate + * @see org.opencv.imgproc.Imgproc#erode + * @see org.opencv.imgproc.Imgproc#morphologyEx + */ + public static void dilate(Mat src, Mat dst, Mat kernel) + { + + dilate_2(src.nativeObj, dst.nativeObj, kernel.nativeObj); + + return; + } + + + // + // C++: void distanceTransform(Mat src, Mat& dst, int distanceType, int maskSize) + // + +/** + *

Calculates the distance to the closest zero pixel for each pixel of the + * source image.

+ * + *

The functions distanceTransform calculate the approximate or + * precise distance from every binary image pixel to the nearest zero pixel. + * For zero image pixels, the distance will obviously be zero.

+ * + *

When maskSize == CV_DIST_MASK_PRECISE and distanceType == + * CV_DIST_L2, the function runs the algorithm described in + * [Felzenszwalb04]. This algorithm is parallelized with the TBB library.

+ * + *

In other cases, the algorithm [Borgefors86] is used. This means that for a + * pixel the function finds the shortest path to the nearest zero pixel + * consisting of basic shifts: horizontal, vertical, diagonal, or knight's move + * (the latest is available for a 5x 5 mask). The overall distance is + * calculated as a sum of these basic distances. Since the distance function + * should be symmetric, all of the horizontal and vertical shifts must have the + * same cost (denoted as a), all the diagonal shifts must have the + * same cost (denoted as b), and all knight's moves must have the + * same cost (denoted as c). For the CV_DIST_C and + * CV_DIST_L1 types, the distance is calculated precisely, whereas + * for CV_DIST_L2 (Euclidean distance) the distance can be + * calculated only with a relative error (a 5x 5 mask gives more + * accurate results). For a,b, and c, + * OpenCV uses the values suggested in the original paper:

+ * + *

============== =================== ====================== + * CV_DIST_C (3x 3) a = 1, b = 1 \ + * ============== =================== ====================== + * CV_DIST_L1 (3x 3) a = 1, b = 2 \ + * CV_DIST_L2 (3x 3) a=0.955, b=1.3693 \ + * CV_DIST_L2 (5x 5) a=1, b=1.4, c=2.1969 \ + * ============== =================== ======================

+ * + *

Typically, for a fast, coarse distance estimation CV_DIST_L2, a + * 3x 3 mask is used. For a more accurate distance estimation + * CV_DIST_L2, a 5x 5 mask or the precise algorithm is + * used. + * Note that both the precise and the approximate algorithms are linear on the + * number of pixels.

+ * + *

The second variant of the function does not only compute the minimum distance + * for each pixel (x, y) but also identifies the nearest connected + * component consisting of zero pixels (labelType==DIST_LABEL_CCOMP) + * or the nearest zero pixel (labelType==DIST_LABEL_PIXEL). Index + * of the component/pixel is stored in labels(x, y). + * When labelType==DIST_LABEL_CCOMP, the function automatically + * finds connected components of zero pixels in the input image and marks them + * with distinct labels. When labelType==DIST_LABEL_CCOMP, the + * function scans through the input image and marks all the zero pixels with + * distinct labels.

+ * + *

In this mode, the complexity is still linear. + * That is, the function provides a very fast way to compute the Voronoi diagram + * for a binary image. + * Currently, the second variant can use only the approximate distance transform + * algorithm, i.e. maskSize=CV_DIST_MASK_PRECISE is not supported + * yet.

+ * + * @param src 8-bit, single-channel (binary) source image. + * @param dst Output image with calculated distances. It is a 32-bit + * floating-point, single-channel image of the same size as src. + * @param distanceType Type of distance. It can be CV_DIST_L1, + * CV_DIST_L2, or CV_DIST_C. + * @param maskSize Size of the distance transform mask. It can be 3, 5, or + * CV_DIST_MASK_PRECISE (the latter option is only supported by the + * first function). In case of the CV_DIST_L1 or CV_DIST_C + * distance type, the parameter is forced to 3 because a 3x 3 mask + * gives the same result as 5x 5 or any larger aperture. + * + * @see org.opencv.imgproc.Imgproc.distanceTransform + */ + public static void distanceTransform(Mat src, Mat dst, int distanceType, int maskSize) + { + + distanceTransform_0(src.nativeObj, dst.nativeObj, distanceType, maskSize); + + return; + } + + + // + // C++: void distanceTransform(Mat src, Mat& dst, Mat& labels, int distanceType, int maskSize, int labelType = DIST_LABEL_CCOMP) + // + +/** + *

Calculates the distance to the closest zero pixel for each pixel of the + * source image.

+ * + *

The functions distanceTransform calculate the approximate or + * precise distance from every binary image pixel to the nearest zero pixel. + * For zero image pixels, the distance will obviously be zero.

+ * + *

When maskSize == CV_DIST_MASK_PRECISE and distanceType == + * CV_DIST_L2, the function runs the algorithm described in + * [Felzenszwalb04]. This algorithm is parallelized with the TBB library.

+ * + *

In other cases, the algorithm [Borgefors86] is used. This means that for a + * pixel the function finds the shortest path to the nearest zero pixel + * consisting of basic shifts: horizontal, vertical, diagonal, or knight's move + * (the latest is available for a 5x 5 mask). The overall distance is + * calculated as a sum of these basic distances. Since the distance function + * should be symmetric, all of the horizontal and vertical shifts must have the + * same cost (denoted as a), all the diagonal shifts must have the + * same cost (denoted as b), and all knight's moves must have the + * same cost (denoted as c). For the CV_DIST_C and + * CV_DIST_L1 types, the distance is calculated precisely, whereas + * for CV_DIST_L2 (Euclidean distance) the distance can be + * calculated only with a relative error (a 5x 5 mask gives more + * accurate results). For a,b, and c, + * OpenCV uses the values suggested in the original paper:

+ * + *

============== =================== ====================== + * CV_DIST_C (3x 3) a = 1, b = 1 \ + * ============== =================== ====================== + * CV_DIST_L1 (3x 3) a = 1, b = 2 \ + * CV_DIST_L2 (3x 3) a=0.955, b=1.3693 \ + * CV_DIST_L2 (5x 5) a=1, b=1.4, c=2.1969 \ + * ============== =================== ======================

+ * + *

Typically, for a fast, coarse distance estimation CV_DIST_L2, a + * 3x 3 mask is used. For a more accurate distance estimation + * CV_DIST_L2, a 5x 5 mask or the precise algorithm is + * used. + * Note that both the precise and the approximate algorithms are linear on the + * number of pixels.

+ * + *

The second variant of the function does not only compute the minimum distance + * for each pixel (x, y) but also identifies the nearest connected + * component consisting of zero pixels (labelType==DIST_LABEL_CCOMP) + * or the nearest zero pixel (labelType==DIST_LABEL_PIXEL). Index + * of the component/pixel is stored in labels(x, y). + * When labelType==DIST_LABEL_CCOMP, the function automatically + * finds connected components of zero pixels in the input image and marks them + * with distinct labels. When labelType==DIST_LABEL_CCOMP, the + * function scans through the input image and marks all the zero pixels with + * distinct labels.

+ * + *

In this mode, the complexity is still linear. + * That is, the function provides a very fast way to compute the Voronoi diagram + * for a binary image. + * Currently, the second variant can use only the approximate distance transform + * algorithm, i.e. maskSize=CV_DIST_MASK_PRECISE is not supported + * yet.

+ * + * @param src 8-bit, single-channel (binary) source image. + * @param dst Output image with calculated distances. It is a 32-bit + * floating-point, single-channel image of the same size as src. + * @param labels Optional output 2D array of labels (the discrete Voronoi + * diagram). It has the type CV_32SC1 and the same size as + * src. See the details below. + * @param distanceType Type of distance. It can be CV_DIST_L1, + * CV_DIST_L2, or CV_DIST_C. + * @param maskSize Size of the distance transform mask. It can be 3, 5, or + * CV_DIST_MASK_PRECISE (the latter option is only supported by the + * first function). In case of the CV_DIST_L1 or CV_DIST_C + * distance type, the parameter is forced to 3 because a 3x 3 mask + * gives the same result as 5x 5 or any larger aperture. + * @param labelType Type of the label array to build. If labelType==DIST_LABEL_CCOMP + * then each connected component of zeros in src (as well as all + * the non-zero pixels closest to the connected component) will be assigned the + * same label. If labelType==DIST_LABEL_PIXEL then each zero pixel + * (and all the non-zero pixels closest to it) gets its own label. + * + * @see org.opencv.imgproc.Imgproc.distanceTransform + */ + public static void distanceTransformWithLabels(Mat src, Mat dst, Mat labels, int distanceType, int maskSize, int labelType) + { + + distanceTransformWithLabels_0(src.nativeObj, dst.nativeObj, labels.nativeObj, distanceType, maskSize, labelType); + + return; + } + +/** + *

Calculates the distance to the closest zero pixel for each pixel of the + * source image.

+ * + *

The functions distanceTransform calculate the approximate or + * precise distance from every binary image pixel to the nearest zero pixel. + * For zero image pixels, the distance will obviously be zero.

+ * + *

When maskSize == CV_DIST_MASK_PRECISE and distanceType == + * CV_DIST_L2, the function runs the algorithm described in + * [Felzenszwalb04]. This algorithm is parallelized with the TBB library.

+ * + *

In other cases, the algorithm [Borgefors86] is used. This means that for a + * pixel the function finds the shortest path to the nearest zero pixel + * consisting of basic shifts: horizontal, vertical, diagonal, or knight's move + * (the latest is available for a 5x 5 mask). The overall distance is + * calculated as a sum of these basic distances. Since the distance function + * should be symmetric, all of the horizontal and vertical shifts must have the + * same cost (denoted as a), all the diagonal shifts must have the + * same cost (denoted as b), and all knight's moves must have the + * same cost (denoted as c). For the CV_DIST_C and + * CV_DIST_L1 types, the distance is calculated precisely, whereas + * for CV_DIST_L2 (Euclidean distance) the distance can be + * calculated only with a relative error (a 5x 5 mask gives more + * accurate results). For a,b, and c, + * OpenCV uses the values suggested in the original paper:

+ * + *

============== =================== ====================== + * CV_DIST_C (3x 3) a = 1, b = 1 \ + * ============== =================== ====================== + * CV_DIST_L1 (3x 3) a = 1, b = 2 \ + * CV_DIST_L2 (3x 3) a=0.955, b=1.3693 \ + * CV_DIST_L2 (5x 5) a=1, b=1.4, c=2.1969 \ + * ============== =================== ======================

+ * + *

Typically, for a fast, coarse distance estimation CV_DIST_L2, a + * 3x 3 mask is used. For a more accurate distance estimation + * CV_DIST_L2, a 5x 5 mask or the precise algorithm is + * used. + * Note that both the precise and the approximate algorithms are linear on the + * number of pixels.

+ * + *

The second variant of the function does not only compute the minimum distance + * for each pixel (x, y) but also identifies the nearest connected + * component consisting of zero pixels (labelType==DIST_LABEL_CCOMP) + * or the nearest zero pixel (labelType==DIST_LABEL_PIXEL). Index + * of the component/pixel is stored in labels(x, y). + * When labelType==DIST_LABEL_CCOMP, the function automatically + * finds connected components of zero pixels in the input image and marks them + * with distinct labels. When labelType==DIST_LABEL_CCOMP, the + * function scans through the input image and marks all the zero pixels with + * distinct labels.

+ * + *

In this mode, the complexity is still linear. + * That is, the function provides a very fast way to compute the Voronoi diagram + * for a binary image. + * Currently, the second variant can use only the approximate distance transform + * algorithm, i.e. maskSize=CV_DIST_MASK_PRECISE is not supported + * yet.

+ * + * @param src 8-bit, single-channel (binary) source image. + * @param dst Output image with calculated distances. It is a 32-bit + * floating-point, single-channel image of the same size as src. + * @param labels Optional output 2D array of labels (the discrete Voronoi + * diagram). It has the type CV_32SC1 and the same size as + * src. See the details below. + * @param distanceType Type of distance. It can be CV_DIST_L1, + * CV_DIST_L2, or CV_DIST_C. + * @param maskSize Size of the distance transform mask. It can be 3, 5, or + * CV_DIST_MASK_PRECISE (the latter option is only supported by the + * first function). In case of the CV_DIST_L1 or CV_DIST_C + * distance type, the parameter is forced to 3 because a 3x 3 mask + * gives the same result as 5x 5 or any larger aperture. + * + * @see org.opencv.imgproc.Imgproc.distanceTransform + */ + public static void distanceTransformWithLabels(Mat src, Mat dst, Mat labels, int distanceType, int maskSize) + { + + distanceTransformWithLabels_1(src.nativeObj, dst.nativeObj, labels.nativeObj, distanceType, maskSize); + + return; + } + + + // + // C++: void drawContours(Mat& image, vector_vector_Point contours, int contourIdx, Scalar color, int thickness = 1, int lineType = 8, Mat hierarchy = Mat(), int maxLevel = INT_MAX, Point offset = Point()) + // + +/** + *

Draws contours outlines or filled contours.

+ * + *

The function draws contour outlines in the image if thickness >= 0 + * or fills the area bounded by the contours ifthickness<0. The + * example below shows how to retrieve connected components from the binary + * image and label them:

+ * + *

// C++ code:

+ * + *

#include "cv.h"

+ * + *

#include "highgui.h"

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src;

+ * + *

// the first command-line parameter must be a filename of the binary

+ * + *

// (black-n-white) image

+ * + *

if(argc != 2 || !(src=imread(argv[1], 0)).data)

+ * + *

return -1;

+ * + *

Mat dst = Mat.zeros(src.rows, src.cols, CV_8UC3);

+ * + *

src = src > 1;

+ * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

vector > contours;

+ * + *

vector hierarchy;

+ * + *

findContours(src, contours, hierarchy,

+ * + *

CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

+ * + *

// iterate through all the top-level contours,

+ * + *

// draw each connected component with its own random color

+ * + *

int idx = 0;

+ * + *

for(; idx >= 0; idx = hierarchy[idx][0])

+ * + * + *

Scalar color(rand()&255, rand()&255, rand()&255);

+ * + *

drawContours(dst, contours, idx, color, CV_FILLED, 8, hierarchy);

+ * + * + *

namedWindow("Components", 1);

+ * + *

imshow("Components", dst);

+ * + *

waitKey(0);

+ * + * + * @param image Destination image. + * @param contours All the input contours. Each contour is stored as a point + * vector. + * @param contourIdx Parameter indicating a contour to draw. If it is negative, + * all the contours are drawn. + * @param color Color of the contours. + * @param thickness Thickness of lines the contours are drawn with. If it is + * negative (for example, thickness=CV_FILLED), the contour + * interiors are drawn. + * @param lineType Line connectivity. See "line" for details. + * @param hierarchy Optional information about hierarchy. It is only needed if + * you want to draw only some of the contours (see maxLevel). + * @param maxLevel Maximal level for drawn contours. If it is 0, only the + * specified contour is drawn. If it is 1, the function draws the contour(s) and + * all the nested contours. If it is 2, the function draws the contours, all the + * nested contours, all the nested-to-nested contours, and so on. This parameter + * is only taken into account when there is hierarchy available. + * @param offset Optional contour shift parameter. Shift all the drawn contours + * by the specified offset=(dx,dy). + * + * @see org.opencv.imgproc.Imgproc.drawContours + */ + public static void drawContours(Mat image, List contours, int contourIdx, Scalar color, int thickness, int lineType, Mat hierarchy, int maxLevel, Point offset) + { + List contours_tmplm = new ArrayList((contours != null) ? contours.size() : 0); + Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm); + drawContours_0(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, hierarchy.nativeObj, maxLevel, offset.x, offset.y); + + return; + } + +/** + *

Draws contours outlines or filled contours.

+ * + *

The function draws contour outlines in the image if thickness >= 0 + * or fills the area bounded by the contours ifthickness<0. The + * example below shows how to retrieve connected components from the binary + * image and label them:

+ * + *

// C++ code:

+ * + *

#include "cv.h"

+ * + *

#include "highgui.h"

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src;

+ * + *

// the first command-line parameter must be a filename of the binary

+ * + *

// (black-n-white) image

+ * + *

if(argc != 2 || !(src=imread(argv[1], 0)).data)

+ * + *

return -1;

+ * + *

Mat dst = Mat.zeros(src.rows, src.cols, CV_8UC3);

+ * + *

src = src > 1;

+ * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

vector > contours;

+ * + *

vector hierarchy;

+ * + *

findContours(src, contours, hierarchy,

+ * + *

CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

+ * + *

// iterate through all the top-level contours,

+ * + *

// draw each connected component with its own random color

+ * + *

int idx = 0;

+ * + *

for(; idx >= 0; idx = hierarchy[idx][0])

+ * + * + *

Scalar color(rand()&255, rand()&255, rand()&255);

+ * + *

drawContours(dst, contours, idx, color, CV_FILLED, 8, hierarchy);

+ * + * + *

namedWindow("Components", 1);

+ * + *

imshow("Components", dst);

+ * + *

waitKey(0);

+ * + * + * @param image Destination image. + * @param contours All the input contours. Each contour is stored as a point + * vector. + * @param contourIdx Parameter indicating a contour to draw. If it is negative, + * all the contours are drawn. + * @param color Color of the contours. + * @param thickness Thickness of lines the contours are drawn with. If it is + * negative (for example, thickness=CV_FILLED), the contour + * interiors are drawn. + * + * @see org.opencv.imgproc.Imgproc.drawContours + */ + public static void drawContours(Mat image, List contours, int contourIdx, Scalar color, int thickness) + { + List contours_tmplm = new ArrayList((contours != null) ? contours.size() : 0); + Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm); + drawContours_1(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws contours outlines or filled contours.

+ * + *

The function draws contour outlines in the image if thickness >= 0 + * or fills the area bounded by the contours ifthickness<0. The + * example below shows how to retrieve connected components from the binary + * image and label them:

+ * + *

// C++ code:

+ * + *

#include "cv.h"

+ * + *

#include "highgui.h"

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src;

+ * + *

// the first command-line parameter must be a filename of the binary

+ * + *

// (black-n-white) image

+ * + *

if(argc != 2 || !(src=imread(argv[1], 0)).data)

+ * + *

return -1;

+ * + *

Mat dst = Mat.zeros(src.rows, src.cols, CV_8UC3);

+ * + *

src = src > 1;

+ * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

vector > contours;

+ * + *

vector hierarchy;

+ * + *

findContours(src, contours, hierarchy,

+ * + *

CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

+ * + *

// iterate through all the top-level contours,

+ * + *

// draw each connected component with its own random color

+ * + *

int idx = 0;

+ * + *

for(; idx >= 0; idx = hierarchy[idx][0])

+ * + * + *

Scalar color(rand()&255, rand()&255, rand()&255);

+ * + *

drawContours(dst, contours, idx, color, CV_FILLED, 8, hierarchy);

+ * + * + *

namedWindow("Components", 1);

+ * + *

imshow("Components", dst);

+ * + *

waitKey(0);

+ * + * + * @param image Destination image. + * @param contours All the input contours. Each contour is stored as a point + * vector. + * @param contourIdx Parameter indicating a contour to draw. If it is negative, + * all the contours are drawn. + * @param color Color of the contours. + * + * @see org.opencv.imgproc.Imgproc.drawContours + */ + public static void drawContours(Mat image, List contours, int contourIdx, Scalar color) + { + List contours_tmplm = new ArrayList((contours != null) ? contours.size() : 0); + Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm); + drawContours_2(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void equalizeHist(Mat src, Mat& dst) + // + +/** + *

Equalizes the histogram of a grayscale image.

+ * + *

The function equalizes the histogram of the input image using the following + * algorithm:

+ *
    + *
  • Calculate the histogram H for src. + *
  • Normalize the histogram so that the sum of histogram bins is 255. + *
  • Compute the integral of the histogram: + *
+ * + *

H'_i = sum(by: 0 <= j < i) H(j)

+ * + *
    + *
  • + *
+ *

Transform the image using H' as a look-up table: dst(x,y) = + * H'(src(x,y))

+ * + *

The algorithm normalizes the brightness and increases the contrast of the + * image.

+ * + * @param src Source 8-bit single channel image. + * @param dst Destination image of the same size and type as src. + * + * @see org.opencv.imgproc.Imgproc.equalizeHist + */ + public static void equalizeHist(Mat src, Mat dst) + { + + equalizeHist_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void erode(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) + // + +/** + *

Erodes an image by using a specific structuring element.

+ * + *

The function erodes the source image using the specified structuring element + * that determines the shape of a pixel neighborhood over which the minimum is + * taken:

+ * + *

dst(x,y) = min _((x',y'): element(x',y') != 0) src(x+x',y+y')

+ * + *

The function supports the in-place mode. Erosion can be applied several + * (iterations) times. In case of multi-channel images, each + * channel is processed independently.

+ * + * @param src input image; the number of channels can be arbitrary, but the + * depth should be one of CV_8U, CV_16U, + * CV_16S, CV_32F" or CV_64F". + * @param dst output image of the same size and type as src. + * @param kernel a kernel + * @param anchor position of the anchor within the element; default value + * (-1, -1) means that the anchor is at the element center. + * @param iterations number of times erosion is applied. + * @param borderType pixel extrapolation method (see "borderInterpolate" for + * details). + * @param borderValue border value in case of a constant border (see + * "createMorphologyFilter" for details). + * + * @see org.opencv.imgproc.Imgproc.erode + * @see org.opencv.imgproc.Imgproc#morphologyEx + * @see org.opencv.imgproc.Imgproc#dilate + */ + public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) + { + + erode_0(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); + + return; + } + +/** + *

Erodes an image by using a specific structuring element.

+ * + *

The function erodes the source image using the specified structuring element + * that determines the shape of a pixel neighborhood over which the minimum is + * taken:

+ * + *

dst(x,y) = min _((x',y'): element(x',y') != 0) src(x+x',y+y')

+ * + *

The function supports the in-place mode. Erosion can be applied several + * (iterations) times. In case of multi-channel images, each + * channel is processed independently.

+ * + * @param src input image; the number of channels can be arbitrary, but the + * depth should be one of CV_8U, CV_16U, + * CV_16S, CV_32F" or CV_64F". + * @param dst output image of the same size and type as src. + * @param kernel a kernel + * @param anchor position of the anchor within the element; default value + * (-1, -1) means that the anchor is at the element center. + * @param iterations number of times erosion is applied. + * + * @see org.opencv.imgproc.Imgproc.erode + * @see org.opencv.imgproc.Imgproc#morphologyEx + * @see org.opencv.imgproc.Imgproc#dilate + */ + public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations) + { + + erode_1(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations); + + return; + } + +/** + *

Erodes an image by using a specific structuring element.

+ * + *

The function erodes the source image using the specified structuring element + * that determines the shape of a pixel neighborhood over which the minimum is + * taken:

+ * + *

dst(x,y) = min _((x',y'): element(x',y') != 0) src(x+x',y+y')

+ * + *

The function supports the in-place mode. Erosion can be applied several + * (iterations) times. In case of multi-channel images, each + * channel is processed independently.

+ * + * @param src input image; the number of channels can be arbitrary, but the + * depth should be one of CV_8U, CV_16U, + * CV_16S, CV_32F" or CV_64F". + * @param dst output image of the same size and type as src. + * @param kernel a kernel + * + * @see org.opencv.imgproc.Imgproc.erode + * @see org.opencv.imgproc.Imgproc#morphologyEx + * @see org.opencv.imgproc.Imgproc#dilate + */ + public static void erode(Mat src, Mat dst, Mat kernel) + { + + erode_2(src.nativeObj, dst.nativeObj, kernel.nativeObj); + + return; + } + + + // + // C++: void filter2D(Mat src, Mat& dst, int ddepth, Mat kernel, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT) + // + +/** + *

Convolves an image with the kernel.

+ * + *

The function applies an arbitrary linear filter to an image. In-place + * operation is supported. When the aperture is partially outside the image, the + * function interpolates outlier pixel values according to the specified border + * mode.

+ * + *

The function does actually compute correlation, not the convolution:

+ * + *

dst(x,y) = sum(by: 0 <= x' < kernel.cols, 0 <= y' < kernel.rows) + * kernel(x',y')* src(x+x'- anchor.x,y+y'- anchor.y)

+ * + *

That is, the kernel is not mirrored around the anchor point. If you need a + * real convolution, flip the kernel using "flip" and set the new anchor to + * (kernel.cols - anchor.x - 1, kernel.rows - anchor.y - 1).

+ * + *

The function uses the DFT-based algorithm in case of sufficiently large + * kernels (~11 x 11 or larger) and the direct algorithm (that uses + * the engine retrieved by "createLinearFilter") for small kernels.

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth desired depth of the destination image; if it is negative, it + * will be the same as src.depth(); the following combinations of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the output image will have the same depth as the + * source.

+ * @param kernel convolution kernel (or rather a correlation kernel), a + * single-channel floating point matrix; if you want to apply different kernels + * to different channels, split the image into separate color planes using + * "split" and process them individually. + * @param anchor anchor of the kernel that indicates the relative position of a + * filtered point within the kernel; the anchor should lie within the kernel; + * default value (-1,-1) means that the anchor is at the kernel center. + * @param delta optional value added to the filtered pixels before storing them + * in dst. + * @param borderType pixel extrapolation method (see "borderInterpolate" for + * details). + * + * @see org.opencv.imgproc.Imgproc.filter2D + * @see org.opencv.imgproc.Imgproc#matchTemplate + * @see org.opencv.core.Core#dft + * @see org.opencv.imgproc.Imgproc#sepFilter2D + */ + public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta, int borderType) + { + + filter2D_0(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y, delta, borderType); + + return; + } + +/** + *

Convolves an image with the kernel.

+ * + *

The function applies an arbitrary linear filter to an image. In-place + * operation is supported. When the aperture is partially outside the image, the + * function interpolates outlier pixel values according to the specified border + * mode.

+ * + *

The function does actually compute correlation, not the convolution:

+ * + *

dst(x,y) = sum(by: 0 <= x' < kernel.cols, 0 <= y' < kernel.rows) + * kernel(x',y')* src(x+x'- anchor.x,y+y'- anchor.y)

+ * + *

That is, the kernel is not mirrored around the anchor point. If you need a + * real convolution, flip the kernel using "flip" and set the new anchor to + * (kernel.cols - anchor.x - 1, kernel.rows - anchor.y - 1).

+ * + *

The function uses the DFT-based algorithm in case of sufficiently large + * kernels (~11 x 11 or larger) and the direct algorithm (that uses + * the engine retrieved by "createLinearFilter") for small kernels.

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth desired depth of the destination image; if it is negative, it + * will be the same as src.depth(); the following combinations of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the output image will have the same depth as the + * source.

+ * @param kernel convolution kernel (or rather a correlation kernel), a + * single-channel floating point matrix; if you want to apply different kernels + * to different channels, split the image into separate color planes using + * "split" and process them individually. + * @param anchor anchor of the kernel that indicates the relative position of a + * filtered point within the kernel; the anchor should lie within the kernel; + * default value (-1,-1) means that the anchor is at the kernel center. + * @param delta optional value added to the filtered pixels before storing them + * in dst. + * + * @see org.opencv.imgproc.Imgproc.filter2D + * @see org.opencv.imgproc.Imgproc#matchTemplate + * @see org.opencv.core.Core#dft + * @see org.opencv.imgproc.Imgproc#sepFilter2D + */ + public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta) + { + + filter2D_1(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y, delta); + + return; + } + +/** + *

Convolves an image with the kernel.

+ * + *

The function applies an arbitrary linear filter to an image. In-place + * operation is supported. When the aperture is partially outside the image, the + * function interpolates outlier pixel values according to the specified border + * mode.

+ * + *

The function does actually compute correlation, not the convolution:

+ * + *

dst(x,y) = sum(by: 0 <= x' < kernel.cols, 0 <= y' < kernel.rows) + * kernel(x',y')* src(x+x'- anchor.x,y+y'- anchor.y)

+ * + *

That is, the kernel is not mirrored around the anchor point. If you need a + * real convolution, flip the kernel using "flip" and set the new anchor to + * (kernel.cols - anchor.x - 1, kernel.rows - anchor.y - 1).

+ * + *

The function uses the DFT-based algorithm in case of sufficiently large + * kernels (~11 x 11 or larger) and the direct algorithm (that uses + * the engine retrieved by "createLinearFilter") for small kernels.

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth desired depth of the destination image; if it is negative, it + * will be the same as src.depth(); the following combinations of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the output image will have the same depth as the + * source.

+ * @param kernel convolution kernel (or rather a correlation kernel), a + * single-channel floating point matrix; if you want to apply different kernels + * to different channels, split the image into separate color planes using + * "split" and process them individually. + * + * @see org.opencv.imgproc.Imgproc.filter2D + * @see org.opencv.imgproc.Imgproc#matchTemplate + * @see org.opencv.core.Core#dft + * @see org.opencv.imgproc.Imgproc#sepFilter2D + */ + public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel) + { + + filter2D_2(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj); + + return; + } + + + // + // C++: void findContours(Mat& image, vector_vector_Point& contours, Mat& hierarchy, int mode, int method, Point offset = Point()) + // + +/** + *

Finds contours in a binary image.

+ * + *

The function retrieves contours from the binary image using the algorithm + * [Suzuki85]. The contours are a useful tool for shape analysis and object + * detection and recognition. See squares.c in the OpenCV sample + * directory.

+ * + *

Note: Source image is modified by this function. Also, the + * function does not take into account 1-pixel border of the image (it's filled + * with 0's and used for neighbor analysis in the algorithm), therefore the + * contours touching the image border will be clipped.

+ * + *

Note: If you use the new Python interface then the CV_ prefix + * has to be omitted in contour retrieval mode and contour approximation method + * parameters (for example, use cv2.RETR_LIST and cv2.CHAIN_APPROX_NONE + * parameters). If you use the old Python interface then these parameters have + * the CV_ prefix (for example, use cv.CV_RETR_LIST + * and cv.CV_CHAIN_APPROX_NONE).

+ * + * @param image Source, an 8-bit single-channel image. Non-zero pixels are + * treated as 1's. Zero pixels remain 0's, so the image is treated as + * binary. You can use "compare", "inRange", "threshold", + * "adaptiveThreshold", "Canny", and others to create a binary image out of a + * grayscale or color one. The function modifies the image while + * extracting the contours. + * @param contours Detected contours. Each contour is stored as a vector of + * points. + * @param hierarchy Optional output vector, containing information about the + * image topology. It has as many elements as the number of contours. For each + * i-th contour contours[i], the elements hierarchy[i][0], + * hiearchy[i][1], hiearchy[i][2], and + * hiearchy[i][3] are set to 0-based indices in contours + * of the next and previous contours at the same hierarchical level, the first + * child contour and the parent contour, respectively. If for the contour + * i there are no next, previous, parent, or nested contours, the + * corresponding elements of hierarchy[i] will be negative. + * @param mode Contour retrieval mode (if you use Python see also a note below). + *
    + *
  • CV_RETR_EXTERNAL retrieves only the extreme outer contours. It sets + * hierarchy[i][2]=hierarchy[i][3]=-1 for all the contours. + *
  • CV_RETR_LIST retrieves all of the contours without establishing any + * hierarchical relationships. + *
  • CV_RETR_CCOMP retrieves all of the contours and organizes them into a + * two-level hierarchy. At the top level, there are external boundaries of the + * components. At the second level, there are boundaries of the holes. If there + * is another contour inside a hole of a connected component, it is still put at + * the top level. + *
  • CV_RETR_TREE retrieves all of the contours and reconstructs a full + * hierarchy of nested contours. This full hierarchy is built and shown in the + * OpenCV contours.c demo. + *
+ * @param method Contour approximation method (if you use Python see also a note + * below). + *
    + *
  • CV_CHAIN_APPROX_NONE stores absolutely all the contour points. That + * is, any 2 subsequent points (x1,y1) and (x2,y2) of + * the contour will be either horizontal, vertical or diagonal neighbors, that + * is, max(abs(x1-x2),abs(y2-y1))==1. + *
  • CV_CHAIN_APPROX_SIMPLE compresses horizontal, vertical, and diagonal + * segments and leaves only their end points. For example, an up-right + * rectangular contour is encoded with 4 points. + *
  • CV_CHAIN_APPROX_TC89_L1,CV_CHAIN_APPROX_TC89_KCOS applies one of the + * flavors of the Teh-Chin chain approximation algorithm. See [TehChin89] for + * details. + *
+ * @param offset Optional offset by which every contour point is shifted. This + * is useful if the contours are extracted from the image ROI and then they + * should be analyzed in the whole image context. + * + * @see org.opencv.imgproc.Imgproc.findContours + */ + public static void findContours(Mat image, List contours, Mat hierarchy, int mode, int method, Point offset) + { + Mat contours_mat = new Mat(); + findContours_0(image.nativeObj, contours_mat.nativeObj, hierarchy.nativeObj, mode, method, offset.x, offset.y); + Converters.Mat_to_vector_vector_Point(contours_mat, contours); + return; + } + +/** + *

Finds contours in a binary image.

+ * + *

The function retrieves contours from the binary image using the algorithm + * [Suzuki85]. The contours are a useful tool for shape analysis and object + * detection and recognition. See squares.c in the OpenCV sample + * directory.

+ * + *

Note: Source image is modified by this function. Also, the + * function does not take into account 1-pixel border of the image (it's filled + * with 0's and used for neighbor analysis in the algorithm), therefore the + * contours touching the image border will be clipped.

+ * + *

Note: If you use the new Python interface then the CV_ prefix + * has to be omitted in contour retrieval mode and contour approximation method + * parameters (for example, use cv2.RETR_LIST and cv2.CHAIN_APPROX_NONE + * parameters). If you use the old Python interface then these parameters have + * the CV_ prefix (for example, use cv.CV_RETR_LIST + * and cv.CV_CHAIN_APPROX_NONE).

+ * + * @param image Source, an 8-bit single-channel image. Non-zero pixels are + * treated as 1's. Zero pixels remain 0's, so the image is treated as + * binary. You can use "compare", "inRange", "threshold", + * "adaptiveThreshold", "Canny", and others to create a binary image out of a + * grayscale or color one. The function modifies the image while + * extracting the contours. + * @param contours Detected contours. Each contour is stored as a vector of + * points. + * @param hierarchy Optional output vector, containing information about the + * image topology. It has as many elements as the number of contours. For each + * i-th contour contours[i], the elements hierarchy[i][0], + * hiearchy[i][1], hiearchy[i][2], and + * hiearchy[i][3] are set to 0-based indices in contours + * of the next and previous contours at the same hierarchical level, the first + * child contour and the parent contour, respectively. If for the contour + * i there are no next, previous, parent, or nested contours, the + * corresponding elements of hierarchy[i] will be negative. + * @param mode Contour retrieval mode (if you use Python see also a note below). + *
    + *
  • CV_RETR_EXTERNAL retrieves only the extreme outer contours. It sets + * hierarchy[i][2]=hierarchy[i][3]=-1 for all the contours. + *
  • CV_RETR_LIST retrieves all of the contours without establishing any + * hierarchical relationships. + *
  • CV_RETR_CCOMP retrieves all of the contours and organizes them into a + * two-level hierarchy. At the top level, there are external boundaries of the + * components. At the second level, there are boundaries of the holes. If there + * is another contour inside a hole of a connected component, it is still put at + * the top level. + *
  • CV_RETR_TREE retrieves all of the contours and reconstructs a full + * hierarchy of nested contours. This full hierarchy is built and shown in the + * OpenCV contours.c demo. + *
+ * @param method Contour approximation method (if you use Python see also a note + * below). + *
    + *
  • CV_CHAIN_APPROX_NONE stores absolutely all the contour points. That + * is, any 2 subsequent points (x1,y1) and (x2,y2) of + * the contour will be either horizontal, vertical or diagonal neighbors, that + * is, max(abs(x1-x2),abs(y2-y1))==1. + *
  • CV_CHAIN_APPROX_SIMPLE compresses horizontal, vertical, and diagonal + * segments and leaves only their end points. For example, an up-right + * rectangular contour is encoded with 4 points. + *
  • CV_CHAIN_APPROX_TC89_L1,CV_CHAIN_APPROX_TC89_KCOS applies one of the + * flavors of the Teh-Chin chain approximation algorithm. See [TehChin89] for + * details. + *
+ * + * @see org.opencv.imgproc.Imgproc.findContours + */ + public static void findContours(Mat image, List contours, Mat hierarchy, int mode, int method) + { + Mat contours_mat = new Mat(); + findContours_1(image.nativeObj, contours_mat.nativeObj, hierarchy.nativeObj, mode, method); + Converters.Mat_to_vector_vector_Point(contours_mat, contours); + return; + } + + + // + // C++: RotatedRect fitEllipse(vector_Point2f points) + // + +/** + *

Fits an ellipse around a set of 2D points.

+ * + *

The function calculates the ellipse that fits (in a least-squares sense) a + * set of 2D points best of all. It returns the rotated rectangle in which the + * ellipse is inscribed. The algorithm [Fitzgibbon95] is used.

+ * + * @param points Input 2D point set, stored in: + *
    + *
  • std.vector<> or Mat (C++ interface) + *
  • CvSeq* or CvMat* (C interface) + *
  • Nx2 numpy array (Python interface) + *
+ * + * @see org.opencv.imgproc.Imgproc.fitEllipse + */ + public static RotatedRect fitEllipse(MatOfPoint2f points) + { + Mat points_mat = points; + RotatedRect retVal = new RotatedRect(fitEllipse_0(points_mat.nativeObj)); + + return retVal; + } + + + // + // C++: void fitLine(Mat points, Mat& line, int distType, double param, double reps, double aeps) + // + +/** + *

Fits a line to a 2D or 3D point set.

+ * + *

The function fitLine fits a line to a 2D or 3D point set by + * minimizing sum_i rho(r_i) where r_i is a distance between + * the i^(th) point, the line and rho(r) is a distance + * function, one of the following:

+ *
    + *
  • distType=CV_DIST_L2 + *
+ * + *

rho(r) = r^2/2(the simplest and the fastest least-squares method)

+ * + *
    + *
  • distType=CV_DIST_L1 + *
+ * + *

rho(r) = r

+ * + *
    + *
  • distType=CV_DIST_L12 + *
+ * + *

rho(r) = 2 * (sqrt(1 + frac(r^2)2) - 1)

+ * + *
    + *
  • distType=CV_DIST_FAIR + *
+ * + *

rho(r) = C^2 * ((r)/(C) - log((1 + (r)/(C)))) where C=1.3998

+ * + *
    + *
  • distType=CV_DIST_WELSCH + *
+ * + *

rho(r) = (C^2)/2 * (1 - exp((-((r)/(C))^2))) where C=2.9846

+ * + *
    + *
  • distType=CV_DIST_HUBER + *
+ * + *

rho(r) = r^2/2 if r < C; C * (r-C/2) otherwise where C=1.345

+ * + *

The algorithm is based on the M-estimator (http://en.wikipedia.org/wiki/M-estimator) + * technique that iteratively fits the line using the weighted least-squares + * algorithm. After each iteration the weights w_i are adjusted to be + * inversely proportional to rho(r_i).

+ * + * @param points Input vector of 2D or 3D points, stored in std.vector<> + * or Mat. + * @param line Output line parameters. In case of 2D fitting, it should be a + * vector of 4 elements (like Vec4f) - (vx, vy, x0, + * y0), where (vx, vy) is a normalized vector collinear to + * the line and (x0, y0) is a point on the line. In case of 3D + * fitting, it should be a vector of 6 elements (like Vec6f) - + * (vx, vy, vz, x0, y0, z0), where (vx, vy, vz) is a + * normalized vector collinear to the line and (x0, y0, z0) is a + * point on the line. + * @param distType Distance used by the M-estimator (see the discussion below). + * @param param Numerical parameter (C) for some types of + * distances. If it is 0, an optimal value is chosen. + * @param reps Sufficient accuracy for the radius (distance between the + * coordinate origin and the line). + * @param aeps Sufficient accuracy for the angle. 0.01 would be a good default + * value for reps and aeps. + * + * @see org.opencv.imgproc.Imgproc.fitLine + */ + public static void fitLine(Mat points, Mat line, int distType, double param, double reps, double aeps) + { + + fitLine_0(points.nativeObj, line.nativeObj, distType, param, reps, aeps); + + return; + } + + + // + // C++: int floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4) + // + +/** + *

Fills a connected component with the given color.

+ * + *

The functions floodFill fill a connected component starting from + * the seed point with the specified color. The connectivity is determined by + * the color/brightness closeness of the neighbor pixels. The pixel at + * (x,y) is considered to belong to the repainted domain if:

+ *
    + *
  • src(x',y')- loDiff <= src(x,y) <= src(x',y')+ upDiff + *
+ * + *

in case of a grayscale image and floating range

+ *
    + *
  • src(seedPoint.x, seedPoint.y)- loDiff <= src(x,y) <= + * src(seedPoint.x, seedPoint.y)+ upDiff + *
+ * + *

in case of a grayscale image and fixed range

+ *
    + *
  • src(x',y')_r- loDiff _r <= src(x,y)_r <= src(x',y')_r+ upDiff + * _r, + * + * + *
+ * + *

src(x',y')_g- loDiff _g <= src(x,y)_g <= src(x',y')_g+ upDiff _g

+ * + *

and

+ * + *

src(x',y')_b- loDiff _b <= src(x,y)_b <= src(x',y')_b+ upDiff _b

+ * + *

in case of a color image and floating range

+ *
    + *
  • src(seedPoint.x, seedPoint.y)_r- loDiff _r <= src(x,y)_r <= + * src(seedPoint.x, seedPoint.y)_r+ upDiff _r, + * + * + *
+ * + *

src(seedPoint.x, seedPoint.y)_g- loDiff _g <= src(x,y)_g <= + * src(seedPoint.x, seedPoint.y)_g+ upDiff _g

+ * + *

and

+ * + *

src(seedPoint.x, seedPoint.y)_b- loDiff _b <= src(x,y)_b <= + * src(seedPoint.x, seedPoint.y)_b+ upDiff _b

+ * + *

in case of a color image and fixed range

+ * + *

where src(x',y') is the value of one of pixel neighbors that is + * already known to belong to the component. That is, to be added to the + * connected component, a color/brightness of the pixel should be close enough + * to:

+ *
    + *
  • Color/brightness of one of its neighbors that already belong to the + * connected component in case of a floating range. + *
  • Color/brightness of the seed point in case of a fixed range. + *
+ * + *

Use these functions to either mark a connected component with the specified + * color in-place, or build a mask and then extract the contour, or copy the + * region to another image, and so on. Various modes of the function are + * demonstrated in the floodfill.cpp sample.

+ * + * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It + * is modified by the function unless the FLOODFILL_MASK_ONLY flag + * is set in the second variant of the function. See the details below. + * @param mask (For the second function only) Operation mask that should be a + * single-channel 8-bit image, 2 pixels wider and 2 pixels taller. The function + * uses and updates the mask, so you take responsibility of initializing the + * mask content. Flood-filling cannot go across non-zero pixels in + * the mask. For example, an edge detector output can be used as a mask to stop + * filling at edges. It is possible to use the same mask in multiple calls to + * the function to make sure the filled area does not overlap. + * + *

Note: Since the mask is larger than the filled image, a pixel (x, y) + * in image corresponds to the pixel (x+1, y+1) in the + * mask.

+ * @param seedPoint Starting point. + * @param newVal New value of the repainted domain pixels. + * @param rect Optional output parameter set by the function to the minimum + * bounding rectangle of the repainted domain. + * @param loDiff Maximal lower brightness/color difference between the currently + * observed pixel and one of its neighbors belonging to the component, or a seed + * pixel being added to the component. + * @param upDiff Maximal upper brightness/color difference between the currently + * observed pixel and one of its neighbors belonging to the component, or a seed + * pixel being added to the component. + * @param flags Operation flags. Lower bits contain a connectivity value, 4 + * (default) or 8, used within the function. Connectivity determines which + * neighbors of a pixel are considered. Upper bits can be 0 or a combination of + * the following flags: + *
    + *
  • FLOODFILL_FIXED_RANGE If set, the difference between the current pixel + * and seed pixel is considered. Otherwise, the difference between neighbor + * pixels is considered (that is, the range is floating). + *
  • FLOODFILL_MASK_ONLY If set, the function does not change the image + * (newVal is ignored), but fills the mask. The flag can be used + * for the second variant only. + *
+ * + * @see org.opencv.imgproc.Imgproc.floodFill + * @see org.opencv.imgproc.Imgproc#findContours + */ + public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect, Scalar loDiff, Scalar upDiff, int flags) + { + double[] rect_out = new double[4]; + int retVal = floodFill_0(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out, loDiff.val[0], loDiff.val[1], loDiff.val[2], loDiff.val[3], upDiff.val[0], upDiff.val[1], upDiff.val[2], upDiff.val[3], flags); + if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } + return retVal; + } + +/** + *

Fills a connected component with the given color.

+ * + *

The functions floodFill fill a connected component starting from + * the seed point with the specified color. The connectivity is determined by + * the color/brightness closeness of the neighbor pixels. The pixel at + * (x,y) is considered to belong to the repainted domain if:

+ *
    + *
  • src(x',y')- loDiff <= src(x,y) <= src(x',y')+ upDiff + *
+ * + *

in case of a grayscale image and floating range

+ *
    + *
  • src(seedPoint.x, seedPoint.y)- loDiff <= src(x,y) <= + * src(seedPoint.x, seedPoint.y)+ upDiff + *
+ * + *

in case of a grayscale image and fixed range

+ *
    + *
  • src(x',y')_r- loDiff _r <= src(x,y)_r <= src(x',y')_r+ upDiff + * _r, + * + * + *
+ * + *

src(x',y')_g- loDiff _g <= src(x,y)_g <= src(x',y')_g+ upDiff _g

+ * + *

and

+ * + *

src(x',y')_b- loDiff _b <= src(x,y)_b <= src(x',y')_b+ upDiff _b

+ * + *

in case of a color image and floating range

+ *
    + *
  • src(seedPoint.x, seedPoint.y)_r- loDiff _r <= src(x,y)_r <= + * src(seedPoint.x, seedPoint.y)_r+ upDiff _r, + * + * + *
+ * + *

src(seedPoint.x, seedPoint.y)_g- loDiff _g <= src(x,y)_g <= + * src(seedPoint.x, seedPoint.y)_g+ upDiff _g

+ * + *

and

+ * + *

src(seedPoint.x, seedPoint.y)_b- loDiff _b <= src(x,y)_b <= + * src(seedPoint.x, seedPoint.y)_b+ upDiff _b

+ * + *

in case of a color image and fixed range

+ * + *

where src(x',y') is the value of one of pixel neighbors that is + * already known to belong to the component. That is, to be added to the + * connected component, a color/brightness of the pixel should be close enough + * to:

+ *
    + *
  • Color/brightness of one of its neighbors that already belong to the + * connected component in case of a floating range. + *
  • Color/brightness of the seed point in case of a fixed range. + *
+ * + *

Use these functions to either mark a connected component with the specified + * color in-place, or build a mask and then extract the contour, or copy the + * region to another image, and so on. Various modes of the function are + * demonstrated in the floodfill.cpp sample.

+ * + * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It + * is modified by the function unless the FLOODFILL_MASK_ONLY flag + * is set in the second variant of the function. See the details below. + * @param mask (For the second function only) Operation mask that should be a + * single-channel 8-bit image, 2 pixels wider and 2 pixels taller. The function + * uses and updates the mask, so you take responsibility of initializing the + * mask content. Flood-filling cannot go across non-zero pixels in + * the mask. For example, an edge detector output can be used as a mask to stop + * filling at edges. It is possible to use the same mask in multiple calls to + * the function to make sure the filled area does not overlap. + * + *

Note: Since the mask is larger than the filled image, a pixel (x, y) + * in image corresponds to the pixel (x+1, y+1) in the + * mask.

+ * @param seedPoint Starting point. + * @param newVal New value of the repainted domain pixels. + * + * @see org.opencv.imgproc.Imgproc.floodFill + * @see org.opencv.imgproc.Imgproc#findContours + */ + public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal) + { + + int retVal = floodFill_1(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3]); + + return retVal; + } + + + // + // C++: Mat getAffineTransform(vector_Point2f src, vector_Point2f dst) + // + +/** + *

Calculates an affine transform from three pairs of the corresponding points.

+ * + *

The function calculates the 2 x 3 matrix of an affine transform so + * that:

+ * + *

x'_i + * y'_i = map_matrix * x_i + * y_i + * 1

+ * + *

where

+ * + *

dst(i)=(x'_i,y'_i),<BR>src(i)=(x_i, y_i),<BR>i=0,1,2

+ * + * @param src Coordinates of triangle vertices in the source image. + * @param dst Coordinates of the corresponding triangle vertices in the + * destination image. + * + * @see org.opencv.imgproc.Imgproc.getAffineTransform + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.core.Core#transform + */ + public static Mat getAffineTransform(MatOfPoint2f src, MatOfPoint2f dst) + { + Mat src_mat = src; + Mat dst_mat = dst; + Mat retVal = new Mat(getAffineTransform_0(src_mat.nativeObj, dst_mat.nativeObj)); + + return retVal; + } + + + // + // C++: Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false) + // + +/** + *

Returns the default new camera matrix.

+ * + *

The function returns the camera matrix that is either an exact copy of the + * input cameraMatrix (when centerPrinicipalPoint=false), + * or the modified one (when centerPrincipalPoint=true).

+ * + *

In the latter case, the new camera matrix will be:

+ * + *

f_x 0(imgSize.width -1)*0.5 + * 0 f_y(imgSize.height -1)*0.5 + * 0 0 1,

+ * + *

where f_x and f_y are (0,0) and (1,1) + * elements of cameraMatrix, respectively.

+ * + *

By default, the undistortion functions in OpenCV (see "initUndistortRectifyMap", + * "undistort") do not move the principal point. However, when you work with + * stereo, it is important to move the principal points in both views to the + * same y-coordinate (which is required by most of stereo correspondence + * algorithms), and may be to the same x-coordinate too. So, you can form the + * new camera matrix for each view where the principal points are located at the + * center.

+ * + * @param cameraMatrix Input camera matrix. + * @param imgsize Camera view image size in pixels. + * @param centerPrincipalPoint Location of the principal point in the new camera + * matrix. The parameter indicates whether this location should be at the image + * center or not. + * + * @see org.opencv.imgproc.Imgproc.getDefaultNewCameraMatrix + */ + public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize, boolean centerPrincipalPoint) + { + + Mat retVal = new Mat(getDefaultNewCameraMatrix_0(cameraMatrix.nativeObj, imgsize.width, imgsize.height, centerPrincipalPoint)); + + return retVal; + } + +/** + *

Returns the default new camera matrix.

+ * + *

The function returns the camera matrix that is either an exact copy of the + * input cameraMatrix (when centerPrinicipalPoint=false), + * or the modified one (when centerPrincipalPoint=true).

+ * + *

In the latter case, the new camera matrix will be:

+ * + *

f_x 0(imgSize.width -1)*0.5 + * 0 f_y(imgSize.height -1)*0.5 + * 0 0 1,

+ * + *

where f_x and f_y are (0,0) and (1,1) + * elements of cameraMatrix, respectively.

+ * + *

By default, the undistortion functions in OpenCV (see "initUndistortRectifyMap", + * "undistort") do not move the principal point. However, when you work with + * stereo, it is important to move the principal points in both views to the + * same y-coordinate (which is required by most of stereo correspondence + * algorithms), and may be to the same x-coordinate too. So, you can form the + * new camera matrix for each view where the principal points are located at the + * center.

+ * + * @param cameraMatrix Input camera matrix. + * + * @see org.opencv.imgproc.Imgproc.getDefaultNewCameraMatrix + */ + public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix) + { + + Mat retVal = new Mat(getDefaultNewCameraMatrix_1(cameraMatrix.nativeObj)); + + return retVal; + } + + + // + // C++: void getDerivKernels(Mat& kx, Mat& ky, int dx, int dy, int ksize, bool normalize = false, int ktype = CV_32F) + // + +/** + *

Returns filter coefficients for computing spatial image derivatives.

+ * + *

The function computes and returns the filter coefficients for spatial image + * derivatives. When ksize=CV_SCHARR, the Scharr 3 x 3 + * kernels are generated (see "Scharr"). Otherwise, Sobel kernels are generated + * (see "Sobel"). The filters are normally passed to "sepFilter2D" or to + * "createSeparableLinearFilter".

+ * + * @param kx Output matrix of row filter coefficients. It has the type + * ktype. + * @param ky Output matrix of column filter coefficients. It has the type + * ktype. + * @param dx Derivative order in respect of x. + * @param dy Derivative order in respect of y. + * @param ksize Aperture size. It can be CV_SCHARR, 1, 3, 5, or 7. + * @param normalize Flag indicating whether to normalize (scale down) the filter + * coefficients or not. Theoretically, the coefficients should have the + * denominator =2^(ksize*2-dx-dy-2). If you are going to filter + * floating-point images, you are likely to use the normalized kernels. But if + * you compute derivatives of an 8-bit image, store the results in a 16-bit + * image, and wish to preserve all the fractional bits, you may want to set + * normalize=false. + * @param ktype Type of filter coefficients. It can be CV_32f or + * CV_64F. + * + * @see org.opencv.imgproc.Imgproc.getDerivKernels + */ + public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize, boolean normalize, int ktype) + { + + getDerivKernels_0(kx.nativeObj, ky.nativeObj, dx, dy, ksize, normalize, ktype); + + return; + } + +/** + *

Returns filter coefficients for computing spatial image derivatives.

+ * + *

The function computes and returns the filter coefficients for spatial image + * derivatives. When ksize=CV_SCHARR, the Scharr 3 x 3 + * kernels are generated (see "Scharr"). Otherwise, Sobel kernels are generated + * (see "Sobel"). The filters are normally passed to "sepFilter2D" or to + * "createSeparableLinearFilter".

+ * + * @param kx Output matrix of row filter coefficients. It has the type + * ktype. + * @param ky Output matrix of column filter coefficients. It has the type + * ktype. + * @param dx Derivative order in respect of x. + * @param dy Derivative order in respect of y. + * @param ksize Aperture size. It can be CV_SCHARR, 1, 3, 5, or 7. + * + * @see org.opencv.imgproc.Imgproc.getDerivKernels + */ + public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize) + { + + getDerivKernels_1(kx.nativeObj, ky.nativeObj, dx, dy, ksize); + + return; + } + + + // + // C++: Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi = CV_PI*0.5, int ktype = CV_64F) + // + + public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi, int ktype) + { + + Mat retVal = new Mat(getGaborKernel_0(ksize.width, ksize.height, sigma, theta, lambd, gamma, psi, ktype)); + + return retVal; + } + + public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma) + { + + Mat retVal = new Mat(getGaborKernel_1(ksize.width, ksize.height, sigma, theta, lambd, gamma)); + + return retVal; + } + + + // + // C++: Mat getGaussianKernel(int ksize, double sigma, int ktype = CV_64F) + // + +/** + *

Returns Gaussian filter coefficients.

+ * + *

The function computes and returns the ksize x 1 matrix of Gaussian + * filter coefficients:

+ * + *

G_i= alpha *e^(-(i-(ksize -1)/2)^2/(2* sigma)^2),

+ * + *

where i=0..ksize-1 and alpha is the scale factor chosen so + * that sum_i G_i=1.

+ * + *

Two of such generated kernels can be passed to "sepFilter2D" or to + * "createSeparableLinearFilter". Those functions automatically recognize + * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and + * handle them accordingly. You may also use the higher-level "GaussianBlur".

+ * + * @param ksize Aperture size. It should be odd (ksize mod 2 = 1) and + * positive. + * @param sigma Gaussian standard deviation. If it is non-positive, it is + * computed from ksize as sigma = 0.3*((ksize-1)*0.5 - 1) + + * 0.8. + * @param ktype Type of filter coefficients. It can be CV_32f or + * CV_64F. + * + * @see org.opencv.imgproc.Imgproc.getGaussianKernel + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#getStructuringElement + * @see org.opencv.imgproc.Imgproc#getDerivKernels + */ + public static Mat getGaussianKernel(int ksize, double sigma, int ktype) + { + + Mat retVal = new Mat(getGaussianKernel_0(ksize, sigma, ktype)); + + return retVal; + } + +/** + *

Returns Gaussian filter coefficients.

+ * + *

The function computes and returns the ksize x 1 matrix of Gaussian + * filter coefficients:

+ * + *

G_i= alpha *e^(-(i-(ksize -1)/2)^2/(2* sigma)^2),

+ * + *

where i=0..ksize-1 and alpha is the scale factor chosen so + * that sum_i G_i=1.

+ * + *

Two of such generated kernels can be passed to "sepFilter2D" or to + * "createSeparableLinearFilter". Those functions automatically recognize + * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and + * handle them accordingly. You may also use the higher-level "GaussianBlur".

+ * + * @param ksize Aperture size. It should be odd (ksize mod 2 = 1) and + * positive. + * @param sigma Gaussian standard deviation. If it is non-positive, it is + * computed from ksize as sigma = 0.3*((ksize-1)*0.5 - 1) + + * 0.8. + * + * @see org.opencv.imgproc.Imgproc.getGaussianKernel + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#getStructuringElement + * @see org.opencv.imgproc.Imgproc#getDerivKernels + */ + public static Mat getGaussianKernel(int ksize, double sigma) + { + + Mat retVal = new Mat(getGaussianKernel_1(ksize, sigma)); + + return retVal; + } + + + // + // C++: Mat getPerspectiveTransform(Mat src, Mat dst) + // + +/** + *

Calculates a perspective transform from four pairs of the corresponding + * points.

+ * + *

The function calculates the 3 x 3 matrix of a perspective transform + * so that:

+ * + *

t_i x'_i + * t_i y'_i + * t_i = map_matrix * x_i + * y_i + * 1

+ * + *

where

+ * + *

dst(i)=(x'_i,y'_i),<BR>src(i)=(x_i, y_i),<BR>i=0,1,2,3

+ * + * @param src Coordinates of quadrangle vertices in the source image. + * @param dst Coordinates of the corresponding quadrangle vertices in the + * destination image. + * + * @see org.opencv.imgproc.Imgproc.getPerspectiveTransform + * @see org.opencv.calib3d.Calib3d#findHomography + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.imgproc.Imgproc#warpPerspective + */ + public static Mat getPerspectiveTransform(Mat src, Mat dst) + { + + Mat retVal = new Mat(getPerspectiveTransform_0(src.nativeObj, dst.nativeObj)); + + return retVal; + } + + + // + // C++: void getRectSubPix(Mat image, Size patchSize, Point2f center, Mat& patch, int patchType = -1) + // + +/** + *

Retrieves a pixel rectangle from an image with sub-pixel accuracy.

+ * + *

The function getRectSubPix extracts pixels from src

+ * + *

dst(x, y) = src(x + center.x - (dst.cols -1)*0.5, y + center.y - + * (dst.rows -1)*0.5)

+ * + *

where the values of the pixels at non-integer coordinates are retrieved using + * bilinear interpolation. Every channel of multi-channel images is processed + * independently. While the center of the rectangle must be inside the image, + * parts of the rectangle may be outside. In this case, the replication border + * mode (see "borderInterpolate") is used to extrapolate the pixel values + * outside of the image.

+ * + * @param image a image + * @param patchSize Size of the extracted patch. + * @param center Floating point coordinates of the center of the extracted + * rectangle within the source image. The center must be inside the image. + * @param patch a patch + * @param patchType Depth of the extracted pixels. By default, they have the + * same depth as src. + * + * @see org.opencv.imgproc.Imgproc.getRectSubPix + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#warpPerspective + */ + public static void getRectSubPix(Mat image, Size patchSize, Point center, Mat patch, int patchType) + { + + getRectSubPix_0(image.nativeObj, patchSize.width, patchSize.height, center.x, center.y, patch.nativeObj, patchType); + + return; + } + +/** + *

Retrieves a pixel rectangle from an image with sub-pixel accuracy.

+ * + *

The function getRectSubPix extracts pixels from src

+ * + *

dst(x, y) = src(x + center.x - (dst.cols -1)*0.5, y + center.y - + * (dst.rows -1)*0.5)

+ * + *

where the values of the pixels at non-integer coordinates are retrieved using + * bilinear interpolation. Every channel of multi-channel images is processed + * independently. While the center of the rectangle must be inside the image, + * parts of the rectangle may be outside. In this case, the replication border + * mode (see "borderInterpolate") is used to extrapolate the pixel values + * outside of the image.

+ * + * @param image a image + * @param patchSize Size of the extracted patch. + * @param center Floating point coordinates of the center of the extracted + * rectangle within the source image. The center must be inside the image. + * @param patch a patch + * + * @see org.opencv.imgproc.Imgproc.getRectSubPix + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#warpPerspective + */ + public static void getRectSubPix(Mat image, Size patchSize, Point center, Mat patch) + { + + getRectSubPix_1(image.nativeObj, patchSize.width, patchSize.height, center.x, center.y, patch.nativeObj); + + return; + } + + + // + // C++: Mat getRotationMatrix2D(Point2f center, double angle, double scale) + // + +/** + *

Calculates an affine matrix of 2D rotation.

+ * + *

The function calculates the following matrix:

+ * + *

alpha beta(1- alpha) * center.x - beta * center.y + * - beta alpha beta * center.x + (1- alpha) * center.y

+ * + *

where

+ * + *

alpha = scale * cos angle, + * beta = scale * sin angle

+ * + *

The transformation maps the rotation center to itself. If this is not the + * target, adjust the shift.

+ * + * @param center Center of the rotation in the source image. + * @param angle Rotation angle in degrees. Positive values mean + * counter-clockwise rotation (the coordinate origin is assumed to be the + * top-left corner). + * @param scale Isotropic scale factor. + * + * @see org.opencv.imgproc.Imgproc.getRotationMatrix2D + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#getAffineTransform + * @see org.opencv.core.Core#transform + */ + public static Mat getRotationMatrix2D(Point center, double angle, double scale) + { + + Mat retVal = new Mat(getRotationMatrix2D_0(center.x, center.y, angle, scale)); + + return retVal; + } + + + // + // C++: Mat getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1)) + // + +/** + *

Returns a structuring element of the specified size and shape for + * morphological operations.

+ * + *

The function constructs and returns the structuring element that can be + * further passed to "createMorphologyFilter", "erode", "dilate" or + * "morphologyEx". But you can also construct an arbitrary binary mask yourself + * and use it as the structuring element.

+ * + *

Note: When using OpenCV 1.x C API, the created structuring element + * IplConvKernel* element must be released in the end using + * cvReleaseStructuringElement(&element).

+ * + * @param shape Element shape that could be one of the following: + *
    + *
  • MORPH_RECT - a rectangular structuring element: + *
+ * + *

E_(ij)=1

+ * + *
    + *
  • MORPH_ELLIPSE - an elliptic structuring element, that is, a filled + * ellipse inscribed into the rectangle Rect(0, 0, esize.width, + * 0.esize.height) + *
  • MORPH_CROSS - a cross-shaped structuring element: + *
+ * + *

E_(ij) = 1 if i=anchor.y or j=anchor.x; 0 otherwise

+ * + *
    + *
  • CV_SHAPE_CUSTOM - custom structuring element (OpenCV 1.x API) + *
+ * @param ksize Size of the structuring element. + * @param anchor Anchor position within the element. The default value (-1, + * -1) means that the anchor is at the center. Note that only the shape of + * a cross-shaped element depends on the anchor position. In other cases the + * anchor just regulates how much the result of the morphological operation is + * shifted. + * + * @see org.opencv.imgproc.Imgproc.getStructuringElement + */ + public static Mat getStructuringElement(int shape, Size ksize, Point anchor) + { + + Mat retVal = new Mat(getStructuringElement_0(shape, ksize.width, ksize.height, anchor.x, anchor.y)); + + return retVal; + } + +/** + *

Returns a structuring element of the specified size and shape for + * morphological operations.

+ * + *

The function constructs and returns the structuring element that can be + * further passed to "createMorphologyFilter", "erode", "dilate" or + * "morphologyEx". But you can also construct an arbitrary binary mask yourself + * and use it as the structuring element.

+ * + *

Note: When using OpenCV 1.x C API, the created structuring element + * IplConvKernel* element must be released in the end using + * cvReleaseStructuringElement(&element).

+ * + * @param shape Element shape that could be one of the following: + *
    + *
  • MORPH_RECT - a rectangular structuring element: + *
+ * + *

E_(ij)=1

+ * + *
    + *
  • MORPH_ELLIPSE - an elliptic structuring element, that is, a filled + * ellipse inscribed into the rectangle Rect(0, 0, esize.width, + * 0.esize.height) + *
  • MORPH_CROSS - a cross-shaped structuring element: + *
+ * + *

E_(ij) = 1 if i=anchor.y or j=anchor.x; 0 otherwise

+ * + *
    + *
  • CV_SHAPE_CUSTOM - custom structuring element (OpenCV 1.x API) + *
+ * @param ksize Size of the structuring element. + * + * @see org.opencv.imgproc.Imgproc.getStructuringElement + */ + public static Mat getStructuringElement(int shape, Size ksize) + { + + Mat retVal = new Mat(getStructuringElement_1(shape, ksize.width, ksize.height)); + + return retVal; + } + + + // + // C++: void goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask = Mat(), int blockSize = 3, bool useHarrisDetector = false, double k = 0.04) + // + +/** + *

Determines strong corners on an image.

+ * + *

The function finds the most prominent corners in the image or in the + * specified image region, as described in [Shi94]:

+ *
    + *
  • Function calculates the corner quality measure at every source image + * pixel using the "cornerMinEigenVal" or "cornerHarris". + *
  • Function performs a non-maximum suppression (the local maximums in *3 + * x 3* neighborhood are retained). + *
  • The corners with the minimal eigenvalue less than qualityLevel * + * max_(x,y) qualityMeasureMap(x,y) are rejected. + *
  • The remaining corners are sorted by the quality measure in the + * descending order. + *
  • Function throws away each corner for which there is a stronger corner + * at a distance less than maxDistance. + *
+ * + *

The function can be used to initialize a point-based tracker of an object.

+ * + *

Note: If the function is called with different values A and + * B of the parameter qualityLevel, and A + * > {B}, the vector of returned corners with qualityLevel=A will + * be the prefix of the output vector with qualityLevel=B.

+ * + * @param image Input 8-bit or floating-point 32-bit, single-channel image. + * @param corners Output vector of detected corners. + * @param maxCorners Maximum number of corners to return. If there are more + * corners than are found, the strongest of them is returned. + * @param qualityLevel Parameter characterizing the minimal accepted quality of + * image corners. The parameter value is multiplied by the best corner quality + * measure, which is the minimal eigenvalue (see "cornerMinEigenVal") or the + * Harris function response (see "cornerHarris"). The corners with the quality + * measure less than the product are rejected. For example, if the best corner + * has the quality measure = 1500, and the qualityLevel=0.01, then + * all the corners with the quality measure less than 15 are rejected. + * @param minDistance Minimum possible Euclidean distance between the returned + * corners. + * @param mask Optional region of interest. If the image is not empty (it needs + * to have the type CV_8UC1 and the same size as image), + * it specifies the region in which the corners are detected. + * @param blockSize Size of an average block for computing a derivative + * covariation matrix over each pixel neighborhood. See "cornerEigenValsAndVecs". + * @param useHarrisDetector Parameter indicating whether to use a Harris + * detector (see "cornerHarris") or "cornerMinEigenVal". + * @param k Free parameter of the Harris detector. + * + * @see org.opencv.imgproc.Imgproc.goodFeaturesToTrack + * @see org.opencv.imgproc.Imgproc#cornerHarris + * @see org.opencv.video.Video#estimateRigidTransform + * @see org.opencv.imgproc.Imgproc#cornerMinEigenVal + * @see org.opencv.video.Video#calcOpticalFlowPyrLK + */ + public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, boolean useHarrisDetector, double k) + { + Mat corners_mat = corners; + goodFeaturesToTrack_0(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, useHarrisDetector, k); + + return; + } + +/** + *

Determines strong corners on an image.

+ * + *

The function finds the most prominent corners in the image or in the + * specified image region, as described in [Shi94]:

+ *
    + *
  • Function calculates the corner quality measure at every source image + * pixel using the "cornerMinEigenVal" or "cornerHarris". + *
  • Function performs a non-maximum suppression (the local maximums in *3 + * x 3* neighborhood are retained). + *
  • The corners with the minimal eigenvalue less than qualityLevel * + * max_(x,y) qualityMeasureMap(x,y) are rejected. + *
  • The remaining corners are sorted by the quality measure in the + * descending order. + *
  • Function throws away each corner for which there is a stronger corner + * at a distance less than maxDistance. + *
+ * + *

The function can be used to initialize a point-based tracker of an object.

+ * + *

Note: If the function is called with different values A and + * B of the parameter qualityLevel, and A + * > {B}, the vector of returned corners with qualityLevel=A will + * be the prefix of the output vector with qualityLevel=B.

+ * + * @param image Input 8-bit or floating-point 32-bit, single-channel image. + * @param corners Output vector of detected corners. + * @param maxCorners Maximum number of corners to return. If there are more + * corners than are found, the strongest of them is returned. + * @param qualityLevel Parameter characterizing the minimal accepted quality of + * image corners. The parameter value is multiplied by the best corner quality + * measure, which is the minimal eigenvalue (see "cornerMinEigenVal") or the + * Harris function response (see "cornerHarris"). The corners with the quality + * measure less than the product are rejected. For example, if the best corner + * has the quality measure = 1500, and the qualityLevel=0.01, then + * all the corners with the quality measure less than 15 are rejected. + * @param minDistance Minimum possible Euclidean distance between the returned + * corners. + * + * @see org.opencv.imgproc.Imgproc.goodFeaturesToTrack + * @see org.opencv.imgproc.Imgproc#cornerHarris + * @see org.opencv.video.Video#estimateRigidTransform + * @see org.opencv.imgproc.Imgproc#cornerMinEigenVal + * @see org.opencv.video.Video#calcOpticalFlowPyrLK + */ + public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance) + { + Mat corners_mat = corners; + goodFeaturesToTrack_1(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance); + + return; + } + + + // + // C++: void grabCut(Mat img, Mat& mask, Rect rect, Mat& bgdModel, Mat& fgdModel, int iterCount, int mode = GC_EVAL) + // + +/** + *

Runs the GrabCut algorithm.

+ * + *

The function implements the GrabCut image segmentation algorithm + * (http://en.wikipedia.org/wiki/GrabCut). + * See the sample grabcut.cpp to learn how to use the function.

+ * + * @param img Input 8-bit 3-channel image. + * @param mask Input/output 8-bit single-channel mask. The mask is initialized + * by the function when mode is set to GC_INIT_WITH_RECT. + * Its elements may have one of following values: + *
    + *
  • GC_BGD defines an obvious background pixels. + *
  • GC_FGD defines an obvious foreground (object) pixel. + *
  • GC_PR_BGD defines a possible background pixel. + *
  • GC_PR_BGD defines a possible foreground pixel. + *
+ * @param rect ROI containing a segmented object. The pixels outside of the ROI + * are marked as "obvious background". The parameter is only used when + * mode==GC_INIT_WITH_RECT. + * @param bgdModel Temporary array for the background model. Do not modify it + * while you are processing the same image. + * @param fgdModel Temporary arrays for the foreground model. Do not modify it + * while you are processing the same image. + * @param iterCount Number of iterations the algorithm should make before + * returning the result. Note that the result can be refined with further calls + * with mode==GC_INIT_WITH_MASK or mode==GC_EVAL. + * @param mode Operation mode that could be one of the following: + *
    + *
  • GC_INIT_WITH_RECT The function initializes the state and the mask + * using the provided rectangle. After that it runs iterCount + * iterations of the algorithm. + *
  • GC_INIT_WITH_MASK The function initializes the state using the + * provided mask. Note that GC_INIT_WITH_RECT and GC_INIT_WITH_MASK + * can be combined. Then, all the pixels outside of the ROI are automatically + * initialized with GC_BGD. + *
  • GC_EVAL The value means that the algorithm should just resume. + *
+ * + * @see org.opencv.imgproc.Imgproc.grabCut + */ + public static void grabCut(Mat img, Mat mask, Rect rect, Mat bgdModel, Mat fgdModel, int iterCount, int mode) + { + + grabCut_0(img.nativeObj, mask.nativeObj, rect.x, rect.y, rect.width, rect.height, bgdModel.nativeObj, fgdModel.nativeObj, iterCount, mode); + + return; + } + +/** + *

Runs the GrabCut algorithm.

+ * + *

The function implements the GrabCut image segmentation algorithm + * (http://en.wikipedia.org/wiki/GrabCut). + * See the sample grabcut.cpp to learn how to use the function.

+ * + * @param img Input 8-bit 3-channel image. + * @param mask Input/output 8-bit single-channel mask. The mask is initialized + * by the function when mode is set to GC_INIT_WITH_RECT. + * Its elements may have one of following values: + *
    + *
  • GC_BGD defines an obvious background pixels. + *
  • GC_FGD defines an obvious foreground (object) pixel. + *
  • GC_PR_BGD defines a possible background pixel. + *
  • GC_PR_BGD defines a possible foreground pixel. + *
+ * @param rect ROI containing a segmented object. The pixels outside of the ROI + * are marked as "obvious background". The parameter is only used when + * mode==GC_INIT_WITH_RECT. + * @param bgdModel Temporary array for the background model. Do not modify it + * while you are processing the same image. + * @param fgdModel Temporary arrays for the foreground model. Do not modify it + * while you are processing the same image. + * @param iterCount Number of iterations the algorithm should make before + * returning the result. Note that the result can be refined with further calls + * with mode==GC_INIT_WITH_MASK or mode==GC_EVAL. + * + * @see org.opencv.imgproc.Imgproc.grabCut + */ + public static void grabCut(Mat img, Mat mask, Rect rect, Mat bgdModel, Mat fgdModel, int iterCount) + { + + grabCut_1(img.nativeObj, mask.nativeObj, rect.x, rect.y, rect.width, rect.height, bgdModel.nativeObj, fgdModel.nativeObj, iterCount); + + return; + } + + + // + // C++: void initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2) + // + +/** + *

Computes the undistortion and rectification transformation map.

+ * + *

The function computes the joint undistortion and rectification transformation + * and represents the result in the form of maps for "remap". The undistorted + * image looks like original, as if it is captured with a camera using the + * camera matrix =newCameraMatrix and zero distortion. In case of a + * monocular camera, newCameraMatrix is usually equal to + * cameraMatrix, or it can be computed by "getOptimalNewCameraMatrix" + * for a better control over scaling. In case of a stereo camera, + * newCameraMatrix is normally set to P1 or + * P2 computed by "stereoRectify".

+ * + *

Also, this new camera is oriented differently in the coordinate space, + * according to R. That, for example, helps to align two heads of a + * stereo camera so that the epipolar lines on both images become horizontal and + * have the same y- coordinate (in case of a horizontally aligned stereo + * camera).

+ * + *

The function actually builds the maps for the inverse mapping algorithm that + * is used by "remap". That is, for each pixel (u, v) in the + * destination (corrected and rectified) image, the function computes the + * corresponding coordinates in the source image (that is, in the original image + * from camera). The following process is applied:

+ * + *

x <- (u - (c')_x)/(f')_x + * y <- (v - (c')_y)/(f')_y + * ([X Y W]) ^T <- R^(-1)*[x y 1]^T + * x' <- X/W + * y' <- Y/W + * x" <- x' (1 + k_1 r^2 + k_2 r^4 + k_3 r^6) + 2p_1 x' y' + p_2(r^2 + 2 x'^2) + * y" <- y' (1 + k_1 r^2 + k_2 r^4 + k_3 r^6) + p_1(r^2 + 2 y'^2) + 2 p_2 x' y' + * map_x(u,v) <- x" f_x + c_x + * map_y(u,v) <- y" f_y + c_y

+ * + *

where (k_1, k_2, p_1, p_2[, k_3]) are the distortion coefficients.

+ * + *

In case of a stereo camera, this function is called twice: once for each + * camera head, after "stereoRectify", which in its turn is called after + * "stereoCalibrate". But if the stereo camera was not calibrated, it is still + * possible to compute the rectification transformations directly from the + * fundamental matrix using "stereoRectifyUncalibrated". For each camera, the + * function computes homography H as the rectification + * transformation in a pixel domain, not a rotation matrix R in 3D + * space. R can be computed from H as

+ * + *

R = cameraMatrix ^(-1) * H * cameraMatrix

+ * + *

where cameraMatrix can be chosen arbitrarily.

+ * + * @param cameraMatrix Input camera matrix A= + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param R Optional rectification transformation in the object space (3x3 + * matrix). R1 or R2, computed by "stereoRectify" can + * be passed here. If the matrix is empty, the identity transformation is + * assumed. In cvInitUndistortMap R assumed to be an identity + * matrix. + * @param newCameraMatrix New camera matrix A'= + *

|f_x' 0 c_x'| + * |0 f_y' c_y'| + * |0 0 1| + * .

+ * @param size Undistorted image size. + * @param m1type Type of the first output map that can be CV_32FC1 + * or CV_16SC2. See "convertMaps" for details. + * @param map1 The first output map. + * @param map2 The second output map. + * + * @see org.opencv.imgproc.Imgproc.initUndistortRectifyMap + */ + public static void initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat map1, Mat map2) + { + + initUndistortRectifyMap_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, newCameraMatrix.nativeObj, size.width, size.height, m1type, map1.nativeObj, map2.nativeObj); + + return; + } + + + // + // C++: float initWideAngleProjMap(Mat cameraMatrix, Mat distCoeffs, Size imageSize, int destImageWidth, int m1type, Mat& map1, Mat& map2, int projType = PROJ_SPHERICAL_EQRECT, double alpha = 0) + // + + public static float initWideAngleProjMap(Mat cameraMatrix, Mat distCoeffs, Size imageSize, int destImageWidth, int m1type, Mat map1, Mat map2, int projType, double alpha) + { + + float retVal = initWideAngleProjMap_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, destImageWidth, m1type, map1.nativeObj, map2.nativeObj, projType, alpha); + + return retVal; + } + + public static float initWideAngleProjMap(Mat cameraMatrix, Mat distCoeffs, Size imageSize, int destImageWidth, int m1type, Mat map1, Mat map2) + { + + float retVal = initWideAngleProjMap_1(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, destImageWidth, m1type, map1.nativeObj, map2.nativeObj); + + return retVal; + } + + + // + // C++: void integral(Mat src, Mat& sum, int sdepth = -1) + // + +/** + *

Calculates the integral of an image.

+ * + *

The functions calculate one or more integral images for the source image as + * follows:

+ * + *

sum(X,Y) = sum(by: x<X,y<Y) image(x,y)

+ * + * + * + *

sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2

+ * + * + * + *

tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)

+ * + *

Using these integral images, you can calculate sa um, mean, and standard + * deviation over a specific up-right or rotated rectangular region of the image + * in a constant time, for example:

+ * + *

sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- + * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)

+ * + *

It makes possible to do a fast blurring or fast block correlation with a + * variable window size, for example. In case of multi-channel images, sums for + * each channel are accumulated independently.

+ * + *

As a practical example, the next figure shows the calculation of the integral + * of a straight rectangle Rect(3,3,3,2) and of a tilted rectangle + * Rect(5,1,2,3). The selected pixels in the original + * image are shown, as well as the relative pixels in the integral + * images sum and tilted.

+ * + * @param src a src + * @param sum integral image as (W+1)x(H+1), 32-bit integer or + * floating-point (32f or 64f). + * @param sdepth desired depth of the integral and the tilted integral images, + * CV_32S, CV_32F, or CV_64F. + * + * @see org.opencv.imgproc.Imgproc.integral + */ + public static void integral(Mat src, Mat sum, int sdepth) + { + + integral_0(src.nativeObj, sum.nativeObj, sdepth); + + return; + } + +/** + *

Calculates the integral of an image.

+ * + *

The functions calculate one or more integral images for the source image as + * follows:

+ * + *

sum(X,Y) = sum(by: x<X,y<Y) image(x,y)

+ * + * + * + *

sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2

+ * + * + * + *

tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)

+ * + *

Using these integral images, you can calculate sa um, mean, and standard + * deviation over a specific up-right or rotated rectangular region of the image + * in a constant time, for example:

+ * + *

sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- + * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)

+ * + *

It makes possible to do a fast blurring or fast block correlation with a + * variable window size, for example. In case of multi-channel images, sums for + * each channel are accumulated independently.

+ * + *

As a practical example, the next figure shows the calculation of the integral + * of a straight rectangle Rect(3,3,3,2) and of a tilted rectangle + * Rect(5,1,2,3). The selected pixels in the original + * image are shown, as well as the relative pixels in the integral + * images sum and tilted.

+ * + * @param src a src + * @param sum integral image as (W+1)x(H+1), 32-bit integer or + * floating-point (32f or 64f). + * + * @see org.opencv.imgproc.Imgproc.integral + */ + public static void integral(Mat src, Mat sum) + { + + integral_1(src.nativeObj, sum.nativeObj); + + return; + } + + + // + // C++: void integral(Mat src, Mat& sum, Mat& sqsum, int sdepth = -1) + // + +/** + *

Calculates the integral of an image.

+ * + *

The functions calculate one or more integral images for the source image as + * follows:

+ * + *

sum(X,Y) = sum(by: x<X,y<Y) image(x,y)

+ * + * + * + *

sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2

+ * + * + * + *

tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)

+ * + *

Using these integral images, you can calculate sa um, mean, and standard + * deviation over a specific up-right or rotated rectangular region of the image + * in a constant time, for example:

+ * + *

sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- + * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)

+ * + *

It makes possible to do a fast blurring or fast block correlation with a + * variable window size, for example. In case of multi-channel images, sums for + * each channel are accumulated independently.

+ * + *

As a practical example, the next figure shows the calculation of the integral + * of a straight rectangle Rect(3,3,3,2) and of a tilted rectangle + * Rect(5,1,2,3). The selected pixels in the original + * image are shown, as well as the relative pixels in the integral + * images sum and tilted.

+ * + * @param src a src + * @param sum integral image as (W+1)x(H+1), 32-bit integer or + * floating-point (32f or 64f). + * @param sqsum integral image for squared pixel values; it is (W+1)x(H+1), + * double-precision floating-point (64f) array. + * @param sdepth desired depth of the integral and the tilted integral images, + * CV_32S, CV_32F, or CV_64F. + * + * @see org.opencv.imgproc.Imgproc.integral + */ + public static void integral2(Mat src, Mat sum, Mat sqsum, int sdepth) + { + + integral2_0(src.nativeObj, sum.nativeObj, sqsum.nativeObj, sdepth); + + return; + } + +/** + *

Calculates the integral of an image.

+ * + *

The functions calculate one or more integral images for the source image as + * follows:

+ * + *

sum(X,Y) = sum(by: x<X,y<Y) image(x,y)

+ * + * + * + *

sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2

+ * + * + * + *

tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)

+ * + *

Using these integral images, you can calculate sa um, mean, and standard + * deviation over a specific up-right or rotated rectangular region of the image + * in a constant time, for example:

+ * + *

sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- + * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)

+ * + *

It makes possible to do a fast blurring or fast block correlation with a + * variable window size, for example. In case of multi-channel images, sums for + * each channel are accumulated independently.

+ * + *

As a practical example, the next figure shows the calculation of the integral + * of a straight rectangle Rect(3,3,3,2) and of a tilted rectangle + * Rect(5,1,2,3). The selected pixels in the original + * image are shown, as well as the relative pixels in the integral + * images sum and tilted.

+ * + * @param src a src + * @param sum integral image as (W+1)x(H+1), 32-bit integer or + * floating-point (32f or 64f). + * @param sqsum integral image for squared pixel values; it is (W+1)x(H+1), + * double-precision floating-point (64f) array. + * + * @see org.opencv.imgproc.Imgproc.integral + */ + public static void integral2(Mat src, Mat sum, Mat sqsum) + { + + integral2_1(src.nativeObj, sum.nativeObj, sqsum.nativeObj); + + return; + } + + + // + // C++: void integral(Mat src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth = -1) + // + +/** + *

Calculates the integral of an image.

+ * + *

The functions calculate one or more integral images for the source image as + * follows:

+ * + *

sum(X,Y) = sum(by: x<X,y<Y) image(x,y)

+ * + * + * + *

sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2

+ * + * + * + *

tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)

+ * + *

Using these integral images, you can calculate sa um, mean, and standard + * deviation over a specific up-right or rotated rectangular region of the image + * in a constant time, for example:

+ * + *

sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- + * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)

+ * + *

It makes possible to do a fast blurring or fast block correlation with a + * variable window size, for example. In case of multi-channel images, sums for + * each channel are accumulated independently.

+ * + *

As a practical example, the next figure shows the calculation of the integral + * of a straight rectangle Rect(3,3,3,2) and of a tilted rectangle + * Rect(5,1,2,3). The selected pixels in the original + * image are shown, as well as the relative pixels in the integral + * images sum and tilted.

+ * + * @param src a src + * @param sum integral image as (W+1)x(H+1), 32-bit integer or + * floating-point (32f or 64f). + * @param sqsum integral image for squared pixel values; it is (W+1)x(H+1), + * double-precision floating-point (64f) array. + * @param tilted integral for the image rotated by 45 degrees; it is + * (W+1)x(H+1) array with the same data type as sum. + * @param sdepth desired depth of the integral and the tilted integral images, + * CV_32S, CV_32F, or CV_64F. + * + * @see org.opencv.imgproc.Imgproc.integral + */ + public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted, int sdepth) + { + + integral3_0(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj, sdepth); + + return; + } + +/** + *

Calculates the integral of an image.

+ * + *

The functions calculate one or more integral images for the source image as + * follows:

+ * + *

sum(X,Y) = sum(by: x<X,y<Y) image(x,y)

+ * + * + * + *

sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2

+ * + * + * + *

tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)

+ * + *

Using these integral images, you can calculate sa um, mean, and standard + * deviation over a specific up-right or rotated rectangular region of the image + * in a constant time, for example:

+ * + *

sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- + * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)

+ * + *

It makes possible to do a fast blurring or fast block correlation with a + * variable window size, for example. In case of multi-channel images, sums for + * each channel are accumulated independently.

+ * + *

As a practical example, the next figure shows the calculation of the integral + * of a straight rectangle Rect(3,3,3,2) and of a tilted rectangle + * Rect(5,1,2,3). The selected pixels in the original + * image are shown, as well as the relative pixels in the integral + * images sum and tilted.

+ * + * @param src a src + * @param sum integral image as (W+1)x(H+1), 32-bit integer or + * floating-point (32f or 64f). + * @param sqsum integral image for squared pixel values; it is (W+1)x(H+1), + * double-precision floating-point (64f) array. + * @param tilted integral for the image rotated by 45 degrees; it is + * (W+1)x(H+1) array with the same data type as sum. + * + * @see org.opencv.imgproc.Imgproc.integral + */ + public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted) + { + + integral3_1(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj); + + return; + } + + + // + // C++: float intersectConvexConvex(Mat _p1, Mat _p2, Mat& _p12, bool handleNested = true) + // + + public static float intersectConvexConvex(Mat _p1, Mat _p2, Mat _p12, boolean handleNested) + { + + float retVal = intersectConvexConvex_0(_p1.nativeObj, _p2.nativeObj, _p12.nativeObj, handleNested); + + return retVal; + } + + public static float intersectConvexConvex(Mat _p1, Mat _p2, Mat _p12) + { + + float retVal = intersectConvexConvex_1(_p1.nativeObj, _p2.nativeObj, _p12.nativeObj); + + return retVal; + } + + + // + // C++: void invertAffineTransform(Mat M, Mat& iM) + // + +/** + *

Inverts an affine transformation.

+ * + *

The function computes an inverse affine transformation represented by 2 x + * 3 matrix M :

+ * + *

a_11 a_12 b_1 + * a_21 a_22 b_2

+ * + *

The result is also a 2 x 3 matrix of the same type as + * M.

+ * + * @param M Original affine transformation. + * @param iM Output reverse affine transformation. + * + * @see org.opencv.imgproc.Imgproc.invertAffineTransform + */ + public static void invertAffineTransform(Mat M, Mat iM) + { + + invertAffineTransform_0(M.nativeObj, iM.nativeObj); + + return; + } + + + // + // C++: bool isContourConvex(vector_Point contour) + // + +/** + *

Tests a contour convexity.

+ * + *

The function tests whether the input contour is convex or not. The contour + * must be simple, that is, without self-intersections. Otherwise, the function + * output is undefined.

+ * + * @param contour Input vector of 2D points, stored in: + *
    + *
  • std.vector<> or Mat (C++ interface) + *
  • CvSeq* or CvMat* (C interface) + *
  • Nx2 numpy array (Python interface) + *
+ * + * @see org.opencv.imgproc.Imgproc.isContourConvex + */ + public static boolean isContourConvex(MatOfPoint contour) + { + Mat contour_mat = contour; + boolean retVal = isContourConvex_0(contour_mat.nativeObj); + + return retVal; + } + + + // + // C++: double matchShapes(Mat contour1, Mat contour2, int method, double parameter) + // + +/** + *

Compares two shapes.

+ * + *

The function compares two shapes. All three implemented methods use the Hu + * invariants (see "HuMoments") as follows (A denotes object1,B + * denotes object2):

+ *
    + *
  • method=CV_CONTOURS_MATCH_I1 + *
+ * + *

I_1(A,B) = sum(by: i=1...7) <= ft|1/(m^A_i) - 1/(m^B_i) right|

+ * + *
    + *
  • method=CV_CONTOURS_MATCH_I2 + *
+ * + *

I_2(A,B) = sum(by: i=1...7) <= ft|m^A_i - m^B_i right|

+ * + *
    + *
  • method=CV_CONTOURS_MATCH_I3 + *
+ * + *

I_3(A,B) = max _(i=1...7)(<= ft| m^A_i - m^B_i right|)/(<= ft| m^A_i + * right|)

+ * + *

where

+ * + *

m^A_i = sign(h^A_i) * log(h^A_i) + * m^B_i = sign(h^B_i) * log(h^B_i)

+ * + *

and h^A_i, h^B_i are the Hu moments of A and B, + * respectively.

+ * + * @param contour1 a contour1 + * @param contour2 a contour2 + * @param method Comparison method: CV_CONTOURS_MATCH_I1, + * CV_CONTOURS_MATCH_I2 \ + *

or CV_CONTOURS_MATCH_I3 (see the details below).

+ * @param parameter Method-specific parameter (not supported now). + * + * @see org.opencv.imgproc.Imgproc.matchShapes + */ + public static double matchShapes(Mat contour1, Mat contour2, int method, double parameter) + { + + double retVal = matchShapes_0(contour1.nativeObj, contour2.nativeObj, method, parameter); + + return retVal; + } + + + // + // C++: void matchTemplate(Mat image, Mat templ, Mat& result, int method) + // + +/** + *

Compares a template against overlapped image regions.

+ * + *

The function slides through image, compares the overlapped + * patches of size w x h against templ using the specified + * method and stores the comparison results in result. Here are the + * formulae for the available comparison methods (I denotes + * image, T template, R + * result). The summation is done over template and/or the image + * patch: x' = 0...w-1, y' = 0...h-1

+ *
    + *
  • method=CV_TM_SQDIFF + *
+ * + *

R(x,y)= sum(by: x',y')(T(x',y')-I(x+x',y+y'))^2

+ * + *
    + *
  • method=CV_TM_SQDIFF_NORMED + *
+ * + *

R(x,y)= (sum_(x',y')(T(x',y')-I(x+x',y+y'))^2)/(sqrt(sum_(x',y')T(x',y')^2 + * * sum_(x',y') I(x+x',y+y')^2))

+ * + *
    + *
  • method=CV_TM_CCORR + *
+ * + *

R(x,y)= sum(by: x',y')(T(x',y') * I(x+x',y+y'))

+ * + *
    + *
  • method=CV_TM_CCORR_NORMED + *
+ * + *

R(x,y)= (sum_(x',y')(T(x',y') * I(x+x',y+y')))/(sqrt(sum_(x',y')T(x',y')^2 + * * sum_(x',y') I(x+x',y+y')^2))

+ * + *
    + *
  • method=CV_TM_CCOEFF + *
+ * + *

R(x,y)= sum(by: x',y')(T'(x',y') * I'(x+x',y+y'))

+ * + *

where

+ * + *

T'(x',y')=T(x',y') - 1/(w * h) * sum(by: x'',y'') T(x'',y'') + * I'(x+x',y+y')=I(x+x',y+y') - 1/(w * h) * sum(by: x'',y'') I(x+x'',y+y'') + *

+ * + *
    + *
  • method=CV_TM_CCOEFF_NORMED + *
+ * + *

R(x,y)= (sum_(x',y')(T'(x',y') * I'(x+x',y+y')))/(sqrt(sum_(x',y')T'(x',y')^2 + * * sum_(x',y') I'(x+x',y+y')^2))

+ * + *

After the function finishes the comparison, the best matches can be found as + * global minimums (when CV_TM_SQDIFF was used) or maximums (when + * CV_TM_CCORR or CV_TM_CCOEFF was used) using the + * "minMaxLoc" function. In case of a color image, template summation in the + * numerator and each sum in the denominator is done over all of the channels + * and separate mean values are used for each channel. That is, the function can + * take a color template and a color image. The result will still be a + * single-channel image, which is easier to analyze.

+ * + * @param image Image where the search is running. It must be 8-bit or 32-bit + * floating-point. + * @param templ Searched template. It must be not greater than the source image + * and have the same data type. + * @param result Map of comparison results. It must be single-channel 32-bit + * floating-point. If image is W x H and templ + * is w x h, then result is (W-w+1) x(H-h+1). + * @param method Parameter specifying the comparison method (see below). + * + * @see org.opencv.imgproc.Imgproc.matchTemplate + */ + public static void matchTemplate(Mat image, Mat templ, Mat result, int method) + { + + matchTemplate_0(image.nativeObj, templ.nativeObj, result.nativeObj, method); + + return; + } + + + // + // C++: void medianBlur(Mat src, Mat& dst, int ksize) + // + +/** + *

Blurs an image using the median filter.

+ * + *

The function smoothes an image using the median filter with the ksize x + * ksize aperture. Each channel of a multi-channel image is processed + * independently. In-place operation is supported.

+ * + * @param src input 1-, 3-, or 4-channel image; when ksize is 3 or + * 5, the image depth should be CV_8U, CV_16U, or + * CV_32F, for larger aperture sizes, it can only be + * CV_8U. + * @param dst destination array of the same size and type as src. + * @param ksize aperture linear size; it must be odd and greater than 1, for + * example: 3, 5, 7... + * + * @see org.opencv.imgproc.Imgproc.medianBlur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#blur + */ + public static void medianBlur(Mat src, Mat dst, int ksize) + { + + medianBlur_0(src.nativeObj, dst.nativeObj, ksize); + + return; + } + + + // + // C++: RotatedRect minAreaRect(vector_Point2f points) + // + +/** + *

Finds a rotated rectangle of the minimum area enclosing the input 2D point + * set.

+ * + *

The function calculates and returns the minimum-area bounding rectangle + * (possibly rotated) for a specified point set. See the OpenCV sample + * minarea.cpp.

+ * + * @param points Input vector of 2D points, stored in: + *
    + *
  • std.vector<> or Mat (C++ interface) + *
  • CvSeq* or CvMat* (C interface) + *
  • Nx2 numpy array (Python interface) + *
+ * + * @see org.opencv.imgproc.Imgproc.minAreaRect + */ + public static RotatedRect minAreaRect(MatOfPoint2f points) + { + Mat points_mat = points; + RotatedRect retVal = new RotatedRect(minAreaRect_0(points_mat.nativeObj)); + + return retVal; + } + + + // + // C++: void minEnclosingCircle(vector_Point2f points, Point2f& center, float& radius) + // + +/** + *

Finds a circle of the minimum area enclosing a 2D point set.

+ * + *

The function finds the minimal enclosing circle of a 2D point set using an + * iterative algorithm. See the OpenCV sample minarea.cpp.

+ * + * @param points Input vector of 2D points, stored in: + *
    + *
  • std.vector<> or Mat (C++ interface) + *
  • CvSeq* or CvMat* (C interface) + *
  • Nx2 numpy array (Python interface) + *
+ * @param center Output center of the circle. + * @param radius Output radius of the circle. + * + * @see org.opencv.imgproc.Imgproc.minEnclosingCircle + */ + public static void minEnclosingCircle(MatOfPoint2f points, Point center, float[] radius) + { + Mat points_mat = points; + double[] center_out = new double[2]; + double[] radius_out = new double[1]; + minEnclosingCircle_0(points_mat.nativeObj, center_out, radius_out); + if(center!=null){ center.x = center_out[0]; center.y = center_out[1]; } + if(radius!=null) radius[0] = (float)radius_out[0]; + return; + } + + + // + // C++: Moments moments(Mat array, bool binaryImage = false) + // + +/** + *

Calculates all of the moments up to the third order of a polygon or + * rasterized shape.

+ * + *

The function computes moments, up to the 3rd order, of a vector shape or a + * rasterized shape. The results are returned in the structure Moments + * defined as:

+ * + *

// C++ code:

+ * + *

class Moments

+ * + * + *

public:

+ * + *

Moments();

+ * + *

Moments(double m00, double m10, double m01, double m20, double m11,

+ * + *

double m02, double m30, double m21, double m12, double m03);

+ * + *

Moments(const CvMoments& moments);

+ * + *

operator CvMoments() const;

+ * + *

// spatial moments

+ * + *

double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;

+ * + *

// central moments

+ * + *

double mu20, mu11, mu02, mu30, mu21, mu12, mu03;

+ * + *

// central normalized moments

+ * + *

double nu20, nu11, nu02, nu30, nu21, nu12, nu03;

+ * + * + *

In case of a raster image, the spatial moments Moments.m_(ji) are + * computed as:

+ * + *

m _(ji)= sum(by: x,y)(array(x,y) * x^j * y^i)

+ * + *

The central moments Moments.mu_(ji) are computed as:

+ * + *

mu _(ji)= sum(by: x,y)(array(x,y) * (x - x")^j * (y - y")^i)

+ * + *

where (x", y") is the mass center:

+ * + *

x" = (m_10)/(m_(00)), y" = (m_01)/(m_(00))

+ * + *

The normalized central moments Moments.nu_(ij) are computed as:

+ * + *

nu _(ji)= (mu_(ji))/(m_(00)^((i+j)/2+1)).

+ * + *

Note:

+ * + *

mu_00=m_00, nu_00=1 nu_10=mu_10=mu_01=mu_10=0, + * hence the values are not stored.

+ * + *

The moments of a contour are defined in the same way but computed using the + * Green's formula (see http://en.wikipedia.org/wiki/Green_theorem). So, due to + * a limited raster resolution, the moments computed for a contour are slightly + * different from the moments computed for the same rasterized contour.

+ * + *

Note:

+ * + *

Since the contour moments are computed using Green formula, you may get + * seemingly odd results for contours with self-intersections, e.g. a zero area + * (m00) for butterfly-shaped contours.

+ * + * @param array Raster image (single-channel, 8-bit or floating-point 2D array) + * or an array (1 x N or N x 1) of 2D points (Point + * or Point2f). + * @param binaryImage If it is true, all non-zero image pixels are treated as + * 1's. The parameter is used for images only. + * + * @see org.opencv.imgproc.Imgproc.moments + * @see org.opencv.imgproc.Imgproc#contourArea + * @see org.opencv.imgproc.Imgproc#arcLength + */ + public static Moments moments(Mat array, boolean binaryImage) + { + + Moments retVal = new Moments(moments_0(array.nativeObj, binaryImage)); + + return retVal; + } + +/** + *

Calculates all of the moments up to the third order of a polygon or + * rasterized shape.

+ * + *

The function computes moments, up to the 3rd order, of a vector shape or a + * rasterized shape. The results are returned in the structure Moments + * defined as:

+ * + *

// C++ code:

+ * + *

class Moments

+ * + * + *

public:

+ * + *

Moments();

+ * + *

Moments(double m00, double m10, double m01, double m20, double m11,

+ * + *

double m02, double m30, double m21, double m12, double m03);

+ * + *

Moments(const CvMoments& moments);

+ * + *

operator CvMoments() const;

+ * + *

// spatial moments

+ * + *

double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;

+ * + *

// central moments

+ * + *

double mu20, mu11, mu02, mu30, mu21, mu12, mu03;

+ * + *

// central normalized moments

+ * + *

double nu20, nu11, nu02, nu30, nu21, nu12, nu03;

+ * + * + *

In case of a raster image, the spatial moments Moments.m_(ji) are + * computed as:

+ * + *

m _(ji)= sum(by: x,y)(array(x,y) * x^j * y^i)

+ * + *

The central moments Moments.mu_(ji) are computed as:

+ * + *

mu _(ji)= sum(by: x,y)(array(x,y) * (x - x")^j * (y - y")^i)

+ * + *

where (x", y") is the mass center:

+ * + *

x" = (m_10)/(m_(00)), y" = (m_01)/(m_(00))

+ * + *

The normalized central moments Moments.nu_(ij) are computed as:

+ * + *

nu _(ji)= (mu_(ji))/(m_(00)^((i+j)/2+1)).

+ * + *

Note:

+ * + *

mu_00=m_00, nu_00=1 nu_10=mu_10=mu_01=mu_10=0, + * hence the values are not stored.

+ * + *

The moments of a contour are defined in the same way but computed using the + * Green's formula (see http://en.wikipedia.org/wiki/Green_theorem). So, due to + * a limited raster resolution, the moments computed for a contour are slightly + * different from the moments computed for the same rasterized contour.

+ * + *

Note:

+ * + *

Since the contour moments are computed using Green formula, you may get + * seemingly odd results for contours with self-intersections, e.g. a zero area + * (m00) for butterfly-shaped contours.

+ * + * @param array Raster image (single-channel, 8-bit or floating-point 2D array) + * or an array (1 x N or N x 1) of 2D points (Point + * or Point2f). + * + * @see org.opencv.imgproc.Imgproc.moments + * @see org.opencv.imgproc.Imgproc#contourArea + * @see org.opencv.imgproc.Imgproc#arcLength + */ + public static Moments moments(Mat array) + { + + Moments retVal = new Moments(moments_1(array.nativeObj)); + + return retVal; + } + + + // + // C++: void morphologyEx(Mat src, Mat& dst, int op, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) + // + +/** + *

Performs advanced morphological transformations.

+ * + *

The function can perform advanced morphological transformations using an + * erosion and dilation as basic operations.

+ * + *

Opening operation:

+ * + *

dst = open(src, element)= dilate(erode(src, element))

+ * + *

Closing operation:

+ * + *

dst = close(src, element)= erode(dilate(src, element))

+ * + *

Morphological gradient:

+ * + *

dst = morph_grad(src, element)= dilate(src, element)- erode(src, + * element)

+ * + *

"Top hat":

+ * + *

dst = tophat(src, element)= src - open(src, element)

+ * + *

"Black hat":

+ * + *

dst = blackhat(src, element)= close(src, element)- src

+ * + *

Any of the operations can be done in-place. In case of multi-channel images, + * each channel is processed independently.

+ * + * @param src Source image. The number of channels can be arbitrary. The depth + * should be one of CV_8U, CV_16U, CV_16S, + * CV_32F" or CV_64F". + * @param dst Destination image of the same size and type as src. + * @param op Type of a morphological operation that can be one of the following: + *
    + *
  • MORPH_OPEN - an opening operation + *
  • MORPH_CLOSE - a closing operation + *
  • MORPH_GRADIENT - a morphological gradient + *
  • MORPH_TOPHAT - "top hat" + *
  • MORPH_BLACKHAT - "black hat" + *
+ * @param kernel a kernel + * @param anchor a anchor + * @param iterations Number of times erosion and dilation are applied. + * @param borderType Pixel extrapolation method. See "borderInterpolate" for + * details. + * @param borderValue Border value in case of a constant border. The default + * value has a special meaning. See "createMorphologyFilter" for details. + * + * @see org.opencv.imgproc.Imgproc.morphologyEx + * @see org.opencv.imgproc.Imgproc#erode + * @see org.opencv.imgproc.Imgproc#dilate + */ + public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) + { + + morphologyEx_0(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); + + return; + } + +/** + *

Performs advanced morphological transformations.

+ * + *

The function can perform advanced morphological transformations using an + * erosion and dilation as basic operations.

+ * + *

Opening operation:

+ * + *

dst = open(src, element)= dilate(erode(src, element))

+ * + *

Closing operation:

+ * + *

dst = close(src, element)= erode(dilate(src, element))

+ * + *

Morphological gradient:

+ * + *

dst = morph_grad(src, element)= dilate(src, element)- erode(src, + * element)

+ * + *

"Top hat":

+ * + *

dst = tophat(src, element)= src - open(src, element)

+ * + *

"Black hat":

+ * + *

dst = blackhat(src, element)= close(src, element)- src

+ * + *

Any of the operations can be done in-place. In case of multi-channel images, + * each channel is processed independently.

+ * + * @param src Source image. The number of channels can be arbitrary. The depth + * should be one of CV_8U, CV_16U, CV_16S, + * CV_32F" or CV_64F". + * @param dst Destination image of the same size and type as src. + * @param op Type of a morphological operation that can be one of the following: + *
    + *
  • MORPH_OPEN - an opening operation + *
  • MORPH_CLOSE - a closing operation + *
  • MORPH_GRADIENT - a morphological gradient + *
  • MORPH_TOPHAT - "top hat" + *
  • MORPH_BLACKHAT - "black hat" + *
+ * @param kernel a kernel + * @param anchor a anchor + * @param iterations Number of times erosion and dilation are applied. + * + * @see org.opencv.imgproc.Imgproc.morphologyEx + * @see org.opencv.imgproc.Imgproc#erode + * @see org.opencv.imgproc.Imgproc#dilate + */ + public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor, int iterations) + { + + morphologyEx_1(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y, iterations); + + return; + } + +/** + *

Performs advanced morphological transformations.

+ * + *

The function can perform advanced morphological transformations using an + * erosion and dilation as basic operations.

+ * + *

Opening operation:

+ * + *

dst = open(src, element)= dilate(erode(src, element))

+ * + *

Closing operation:

+ * + *

dst = close(src, element)= erode(dilate(src, element))

+ * + *

Morphological gradient:

+ * + *

dst = morph_grad(src, element)= dilate(src, element)- erode(src, + * element)

+ * + *

"Top hat":

+ * + *

dst = tophat(src, element)= src - open(src, element)

+ * + *

"Black hat":

+ * + *

dst = blackhat(src, element)= close(src, element)- src

+ * + *

Any of the operations can be done in-place. In case of multi-channel images, + * each channel is processed independently.

+ * + * @param src Source image. The number of channels can be arbitrary. The depth + * should be one of CV_8U, CV_16U, CV_16S, + * CV_32F" or CV_64F". + * @param dst Destination image of the same size and type as src. + * @param op Type of a morphological operation that can be one of the following: + *
    + *
  • MORPH_OPEN - an opening operation + *
  • MORPH_CLOSE - a closing operation + *
  • MORPH_GRADIENT - a morphological gradient + *
  • MORPH_TOPHAT - "top hat" + *
  • MORPH_BLACKHAT - "black hat" + *
+ * @param kernel a kernel + * + * @see org.opencv.imgproc.Imgproc.morphologyEx + * @see org.opencv.imgproc.Imgproc#erode + * @see org.opencv.imgproc.Imgproc#dilate + */ + public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel) + { + + morphologyEx_2(src.nativeObj, dst.nativeObj, op, kernel.nativeObj); + + return; + } + + + // + // C++: Point2d phaseCorrelate(Mat src1, Mat src2, Mat window = Mat()) + // + +/** + *

The function is used to detect translational shifts that occur between two + * images. The operation takes advantage of the Fourier shift theorem for + * detecting the translational shift in the frequency domain. It can be used for + * fast image registration as well as motion estimation. For more information + * please see http://en.wikipedia.org/wiki/Phase_correlation.

+ * + *

Calculates the cross-power spectrum of two supplied source arrays. The arrays + * are padded if needed with "getOptimalDFTSize".

+ * + *

Return value: detected phase shift (sub-pixel) between the two arrays.

+ * + *

The function performs the following equations

+ *
    + *
  • First it applies a Hanning window (see http://en.wikipedia.org/wiki/Hann_function) + * to each image to remove possible edge effects. This window is cached until + * the array size changes to speed up processing time. + *
  • Next it computes the forward DFTs of each source array: + *
+ * + *

mathbf(G)_a = mathcal(F)(src_1), mathbf(G)_b = mathcal(F)(src_2)

+ * + *

where mathcal(F) is the forward DFT.

+ *
    + *
  • It then computes the cross-power spectrum of each frequency domain + * array: + *
+ * + *

R = (mathbf(G)_a mathbf(G)_b^*)/(|mathbf(G)_a mathbf(G)_b^*|)

+ * + *
    + *
  • Next the cross-correlation is converted back into the time domain via + * the inverse DFT: + *
+ * + *

r = mathcal(F)^(-1)(R)

+ * + *
    + *
  • Finally, it computes the peak location and computes a 5x5 weighted + * centroid around the peak to achieve sub-pixel accuracy. + *
+ * + *

(Delta x, Delta y) = weightedCentroid (arg max_((x, y))(r))

+ * + *
    + *
  • If non-zero, the response parameter is computed as the sum of the + * elements of r within the 5x5 centroid around the peak location. It is + * normalized to a maximum of 1 (meaning there is a single peak) and will be + * smaller when there are multiple peaks. + *
+ * + * @param src1 Source floating point array (CV_32FC1 or CV_64FC1) + * @param src2 Source floating point array (CV_32FC1 or CV_64FC1) + * @param window Floating point array with windowing coefficients to reduce edge + * effects (optional). + * + * @see org.opencv.imgproc.Imgproc.phaseCorrelate + * @see org.opencv.imgproc.Imgproc#createHanningWindow + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#mulSpectrums + * @see org.opencv.core.Core#getOptimalDFTSize + * @see org.opencv.core.Core#idft + */ + public static Point phaseCorrelate(Mat src1, Mat src2, Mat window) + { + + Point retVal = new Point(phaseCorrelate_0(src1.nativeObj, src2.nativeObj, window.nativeObj)); + + return retVal; + } + +/** + *

The function is used to detect translational shifts that occur between two + * images. The operation takes advantage of the Fourier shift theorem for + * detecting the translational shift in the frequency domain. It can be used for + * fast image registration as well as motion estimation. For more information + * please see http://en.wikipedia.org/wiki/Phase_correlation.

+ * + *

Calculates the cross-power spectrum of two supplied source arrays. The arrays + * are padded if needed with "getOptimalDFTSize".

+ * + *

Return value: detected phase shift (sub-pixel) between the two arrays.

+ * + *

The function performs the following equations

+ *
    + *
  • First it applies a Hanning window (see http://en.wikipedia.org/wiki/Hann_function) + * to each image to remove possible edge effects. This window is cached until + * the array size changes to speed up processing time. + *
  • Next it computes the forward DFTs of each source array: + *
+ * + *

mathbf(G)_a = mathcal(F)(src_1), mathbf(G)_b = mathcal(F)(src_2)

+ * + *

where mathcal(F) is the forward DFT.

+ *
    + *
  • It then computes the cross-power spectrum of each frequency domain + * array: + *
+ * + *

R = (mathbf(G)_a mathbf(G)_b^*)/(|mathbf(G)_a mathbf(G)_b^*|)

+ * + *
    + *
  • Next the cross-correlation is converted back into the time domain via + * the inverse DFT: + *
+ * + *

r = mathcal(F)^(-1)(R)

+ * + *
    + *
  • Finally, it computes the peak location and computes a 5x5 weighted + * centroid around the peak to achieve sub-pixel accuracy. + *
+ * + *

(Delta x, Delta y) = weightedCentroid (arg max_((x, y))(r))

+ * + *
    + *
  • If non-zero, the response parameter is computed as the sum of the + * elements of r within the 5x5 centroid around the peak location. It is + * normalized to a maximum of 1 (meaning there is a single peak) and will be + * smaller when there are multiple peaks. + *
+ * + * @param src1 Source floating point array (CV_32FC1 or CV_64FC1) + * @param src2 Source floating point array (CV_32FC1 or CV_64FC1) + * + * @see org.opencv.imgproc.Imgproc.phaseCorrelate + * @see org.opencv.imgproc.Imgproc#createHanningWindow + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#mulSpectrums + * @see org.opencv.core.Core#getOptimalDFTSize + * @see org.opencv.core.Core#idft + */ + public static Point phaseCorrelate(Mat src1, Mat src2) + { + + Point retVal = new Point(phaseCorrelate_1(src1.nativeObj, src2.nativeObj)); + + return retVal; + } + + + // + // C++: Point2d phaseCorrelateRes(Mat src1, Mat src2, Mat window, double* response = 0) + // + + public static Point phaseCorrelateRes(Mat src1, Mat src2, Mat window, double[] response) + { + double[] response_out = new double[1]; + Point retVal = new Point(phaseCorrelateRes_0(src1.nativeObj, src2.nativeObj, window.nativeObj, response_out)); + if(response!=null) response[0] = (double)response_out[0]; + return retVal; + } + + public static Point phaseCorrelateRes(Mat src1, Mat src2, Mat window) + { + + Point retVal = new Point(phaseCorrelateRes_1(src1.nativeObj, src2.nativeObj, window.nativeObj)); + + return retVal; + } + + + // + // C++: double pointPolygonTest(vector_Point2f contour, Point2f pt, bool measureDist) + // + +/** + *

Performs a point-in-contour test.

+ * + *

The function determines whether the point is inside a contour, outside, or + * lies on an edge (or coincides with a vertex). It returns positive (inside), + * negative (outside), or zero (on an edge) value, correspondingly. When + * measureDist=false, the return value is +1, -1, and 0, + * respectively. Otherwise, the return value is a signed distance between the + * point and the nearest contour edge.

+ * + *

See below a sample output of the function where each image pixel is tested + * against the contour.

+ * + * @param contour Input contour. + * @param pt Point tested against the contour. + * @param measureDist If true, the function estimates the signed distance from + * the point to the nearest contour edge. Otherwise, the function only checks if + * the point is inside a contour or not. + * + * @see org.opencv.imgproc.Imgproc.pointPolygonTest + */ + public static double pointPolygonTest(MatOfPoint2f contour, Point pt, boolean measureDist) + { + Mat contour_mat = contour; + double retVal = pointPolygonTest_0(contour_mat.nativeObj, pt.x, pt.y, measureDist); + + return retVal; + } + + + // + // C++: void preCornerDetect(Mat src, Mat& dst, int ksize, int borderType = BORDER_DEFAULT) + // + +/** + *

Calculates a feature map for corner detection.

+ * + *

The function calculates the complex spatial derivative-based function of the + * source image

+ * + *

dst = (D_x src)^2 * D_(yy) src + (D_y src)^2 * D_(xx) src - 2 D_x src * + * D_y src * D_(xy) src

+ * + *

where D_x,D_y are the first image derivatives, + * D_(xx),D_(yy) are the second image derivatives, and + * D_(xy) is the mixed derivative. + * The corners can be found as local maximums of the functions, as shown below: + *

+ * + *

// C++ code:

+ * + *

Mat corners, dilated_corners;

+ * + *

preCornerDetect(image, corners, 3);

+ * + *

// dilation with 3x3 rectangular structuring element

+ * + *

dilate(corners, dilated_corners, Mat(), 1);

+ * + *

Mat corner_mask = corners == dilated_corners;

+ * + *

+ * + * @param src Source single-channel 8-bit of floating-point image. + * @param dst Output image that has the type CV_32F and the same + * size as src. + * @param ksize Aperture size of the "Sobel". + * @param borderType Pixel extrapolation method. See "borderInterpolate". + * + * @see org.opencv.imgproc.Imgproc.preCornerDetect + */ + public static void preCornerDetect(Mat src, Mat dst, int ksize, int borderType) + { + + preCornerDetect_0(src.nativeObj, dst.nativeObj, ksize, borderType); + + return; + } + +/** + *

Calculates a feature map for corner detection.

+ * + *

The function calculates the complex spatial derivative-based function of the + * source image

+ * + *

dst = (D_x src)^2 * D_(yy) src + (D_y src)^2 * D_(xx) src - 2 D_x src * + * D_y src * D_(xy) src

+ * + *

where D_x,D_y are the first image derivatives, + * D_(xx),D_(yy) are the second image derivatives, and + * D_(xy) is the mixed derivative. + * The corners can be found as local maximums of the functions, as shown below: + *

+ * + *

// C++ code:

+ * + *

Mat corners, dilated_corners;

+ * + *

preCornerDetect(image, corners, 3);

+ * + *

// dilation with 3x3 rectangular structuring element

+ * + *

dilate(corners, dilated_corners, Mat(), 1);

+ * + *

Mat corner_mask = corners == dilated_corners;

+ * + *

+ * + * @param src Source single-channel 8-bit of floating-point image. + * @param dst Output image that has the type CV_32F and the same + * size as src. + * @param ksize Aperture size of the "Sobel". + * + * @see org.opencv.imgproc.Imgproc.preCornerDetect + */ + public static void preCornerDetect(Mat src, Mat dst, int ksize) + { + + preCornerDetect_1(src.nativeObj, dst.nativeObj, ksize); + + return; + } + + + // + // C++: void pyrDown(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT) + // + +/** + *

Blurs an image and downsamples it.

+ * + *

The function performs the downsampling step of the Gaussian pyramid + * construction. First, it convolves the source image with the kernel:

+ * + *

1/256 1 4 6 4 1 + * 4 16 24 16 4 + * 6 24 36 24 6 + * 4 16 24 16 4 + * 1 4 6 4 1

+ * + *

Then, it downsamples the image by rejecting even rows and columns.

+ * + * @param src input image. + * @param dst output image; it has the specified size and the same type as + * src. + * @param dstsize size of the output image; by default, it is computed as + * Size((src.cols+1)/2, (src.rows+1)/2), but in any case, the + * following conditions should be satisfied: + * + *

ltBR gt| dstsize.width *2-src.cols| <= 2 + * |dstsize.height *2-src.rows| <= 2

+ * @param borderType a borderType + * + * @see org.opencv.imgproc.Imgproc.pyrDown + */ + public static void pyrDown(Mat src, Mat dst, Size dstsize, int borderType) + { + + pyrDown_0(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height, borderType); + + return; + } + +/** + *

Blurs an image and downsamples it.

+ * + *

The function performs the downsampling step of the Gaussian pyramid + * construction. First, it convolves the source image with the kernel:

+ * + *

1/256 1 4 6 4 1 + * 4 16 24 16 4 + * 6 24 36 24 6 + * 4 16 24 16 4 + * 1 4 6 4 1

+ * + *

Then, it downsamples the image by rejecting even rows and columns.

+ * + * @param src input image. + * @param dst output image; it has the specified size and the same type as + * src. + * @param dstsize size of the output image; by default, it is computed as + * Size((src.cols+1)/2, (src.rows+1)/2), but in any case, the + * following conditions should be satisfied: + * + *

ltBR gt| dstsize.width *2-src.cols| <= 2 + * |dstsize.height *2-src.rows| <= 2

+ * + * @see org.opencv.imgproc.Imgproc.pyrDown + */ + public static void pyrDown(Mat src, Mat dst, Size dstsize) + { + + pyrDown_1(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height); + + return; + } + +/** + *

Blurs an image and downsamples it.

+ * + *

The function performs the downsampling step of the Gaussian pyramid + * construction. First, it convolves the source image with the kernel:

+ * + *

1/256 1 4 6 4 1 + * 4 16 24 16 4 + * 6 24 36 24 6 + * 4 16 24 16 4 + * 1 4 6 4 1

+ * + *

Then, it downsamples the image by rejecting even rows and columns.

+ * + * @param src input image. + * @param dst output image; it has the specified size and the same type as + * src. + * + * @see org.opencv.imgproc.Imgproc.pyrDown + */ + public static void pyrDown(Mat src, Mat dst) + { + + pyrDown_2(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void pyrMeanShiftFiltering(Mat src, Mat& dst, double sp, double sr, int maxLevel = 1, TermCriteria termcrit = TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS,5,1)) + // + +/** + *

Performs initial step of meanshift segmentation of an image.

+ * + *

The function implements the filtering stage of meanshift segmentation, that + * is, the output of the function is the filtered "posterized" image with color + * gradients and fine-grain texture flattened. At every pixel (X,Y) + * of the input image (or down-sized input image, see below) the function + * executes meanshift iterations, that is, the pixel (X,Y) + * neighborhood in the joint space-color hyperspace is considered:

+ * + *

(x,y): X- sp <= x <= X+ sp, Y- sp <= y <= Y+ sp, ||(R,G,B)-(r,g,b)|| <= + * sr

+ * + *

where (R,G,B) and (r,g,b) are the vectors of color + * components at (X,Y) and (x,y), respectively + * (though, the algorithm does not depend on the color space used, so any + * 3-component color space can be used instead). Over the neighborhood the + * average spatial value (X',Y') and average color vector + * (R',G',B') are found and they act as the neighborhood center on + * the next iteration:

+ * + *

(X,Y)~(X',Y'), (R,G,B)~(R',G',B').

+ * + *

After the iterations over, the color components of the initial pixel (that + * is, the pixel from where the iterations started) are set to the final value + * (average color at the last iteration):

+ * + *

I(X,Y) <- (R*,G*,B*)

+ * + *

When maxLevel > 0, the gaussian pyramid of maxLevel+1 + * levels is built, and the above procedure is run on the smallest layer first. + * After that, the results are propagated to the larger layer and the iterations + * are run again only on those pixels where the layer colors differ by more than + * sr from the lower-resolution layer of the pyramid. That makes + * boundaries of color regions sharper. Note that the results will be actually + * different from the ones obtained by running the meanshift procedure on the + * whole original image (i.e. when maxLevel==0).

+ * + * @param src The source 8-bit, 3-channel image. + * @param dst The destination image of the same format and the same size as the + * source. + * @param sp The spatial window radius. + * @param sr The color window radius. + * @param maxLevel Maximum level of the pyramid for the segmentation. + * @param termcrit Termination criteria: when to stop meanshift iterations. + * + * @see org.opencv.imgproc.Imgproc.pyrMeanShiftFiltering + */ + public static void pyrMeanShiftFiltering(Mat src, Mat dst, double sp, double sr, int maxLevel, TermCriteria termcrit) + { + + pyrMeanShiftFiltering_0(src.nativeObj, dst.nativeObj, sp, sr, maxLevel, termcrit.type, termcrit.maxCount, termcrit.epsilon); + + return; + } + +/** + *

Performs initial step of meanshift segmentation of an image.

+ * + *

The function implements the filtering stage of meanshift segmentation, that + * is, the output of the function is the filtered "posterized" image with color + * gradients and fine-grain texture flattened. At every pixel (X,Y) + * of the input image (or down-sized input image, see below) the function + * executes meanshift iterations, that is, the pixel (X,Y) + * neighborhood in the joint space-color hyperspace is considered:

+ * + *

(x,y): X- sp <= x <= X+ sp, Y- sp <= y <= Y+ sp, ||(R,G,B)-(r,g,b)|| <= + * sr

+ * + *

where (R,G,B) and (r,g,b) are the vectors of color + * components at (X,Y) and (x,y), respectively + * (though, the algorithm does not depend on the color space used, so any + * 3-component color space can be used instead). Over the neighborhood the + * average spatial value (X',Y') and average color vector + * (R',G',B') are found and they act as the neighborhood center on + * the next iteration:

+ * + *

(X,Y)~(X',Y'), (R,G,B)~(R',G',B').

+ * + *

After the iterations over, the color components of the initial pixel (that + * is, the pixel from where the iterations started) are set to the final value + * (average color at the last iteration):

+ * + *

I(X,Y) <- (R*,G*,B*)

+ * + *

When maxLevel > 0, the gaussian pyramid of maxLevel+1 + * levels is built, and the above procedure is run on the smallest layer first. + * After that, the results are propagated to the larger layer and the iterations + * are run again only on those pixels where the layer colors differ by more than + * sr from the lower-resolution layer of the pyramid. That makes + * boundaries of color regions sharper. Note that the results will be actually + * different from the ones obtained by running the meanshift procedure on the + * whole original image (i.e. when maxLevel==0).

+ * + * @param src The source 8-bit, 3-channel image. + * @param dst The destination image of the same format and the same size as the + * source. + * @param sp The spatial window radius. + * @param sr The color window radius. + * + * @see org.opencv.imgproc.Imgproc.pyrMeanShiftFiltering + */ + public static void pyrMeanShiftFiltering(Mat src, Mat dst, double sp, double sr) + { + + pyrMeanShiftFiltering_1(src.nativeObj, dst.nativeObj, sp, sr); + + return; + } + + + // + // C++: void pyrUp(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT) + // + +/** + *

Upsamples an image and then blurs it.

+ * + *

The function performs the upsampling step of the Gaussian pyramid + * construction, though it can actually be used to construct the Laplacian + * pyramid. First, it upsamples the source image by injecting even zero rows and + * columns and then convolves the result with the same kernel as in "pyrDown" + * multiplied by 4.

+ * + * @param src input image. + * @param dst output image. It has the specified size and the same type as + * src. + * @param dstsize size of the output image; by default, it is computed as + * Size(src.cols*2, (src.rows*2), but in any case, the following + * conditions should be satisfied: + * + *

ltBR gt| dstsize.width -src.cols*2| <= (dstsize.width mod 2) + * |dstsize.height -src.rows*2| <= (dstsize.height mod 2)

+ * @param borderType a borderType + * + * @see org.opencv.imgproc.Imgproc.pyrUp + */ + public static void pyrUp(Mat src, Mat dst, Size dstsize, int borderType) + { + + pyrUp_0(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height, borderType); + + return; + } + +/** + *

Upsamples an image and then blurs it.

+ * + *

The function performs the upsampling step of the Gaussian pyramid + * construction, though it can actually be used to construct the Laplacian + * pyramid. First, it upsamples the source image by injecting even zero rows and + * columns and then convolves the result with the same kernel as in "pyrDown" + * multiplied by 4.

+ * + * @param src input image. + * @param dst output image. It has the specified size and the same type as + * src. + * @param dstsize size of the output image; by default, it is computed as + * Size(src.cols*2, (src.rows*2), but in any case, the following + * conditions should be satisfied: + * + *

ltBR gt| dstsize.width -src.cols*2| <= (dstsize.width mod 2) + * |dstsize.height -src.rows*2| <= (dstsize.height mod 2)

+ * + * @see org.opencv.imgproc.Imgproc.pyrUp + */ + public static void pyrUp(Mat src, Mat dst, Size dstsize) + { + + pyrUp_1(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height); + + return; + } + +/** + *

Upsamples an image and then blurs it.

+ * + *

The function performs the upsampling step of the Gaussian pyramid + * construction, though it can actually be used to construct the Laplacian + * pyramid. First, it upsamples the source image by injecting even zero rows and + * columns and then convolves the result with the same kernel as in "pyrDown" + * multiplied by 4.

+ * + * @param src input image. + * @param dst output image. It has the specified size and the same type as + * src. + * + * @see org.opencv.imgproc.Imgproc.pyrUp + */ + public static void pyrUp(Mat src, Mat dst) + { + + pyrUp_2(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void remap(Mat src, Mat& dst, Mat map1, Mat map2, int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) + // + +/** + *

Applies a generic geometrical transformation to an image.

+ * + *

The function remap transforms the source image using the + * specified map:

+ * + *

dst(x,y) = src(map_x(x,y),map_y(x,y))

+ * + *

where values of pixels with non-integer coordinates are computed using one of + * available interpolation methods. + * map_x and map_y can be encoded as separate floating-point + * maps in map_1 and map_2 respectively, or interleaved + * floating-point maps of (x,y) in map_1, or fixed-point maps + * created by using "convertMaps". The reason you might want to convert from + * floating to fixed-point representations of a map is that they can yield much + * faster (~2x) remapping operations. In the converted case, map_1 + * contains pairs (cvFloor(x), cvFloor(y)) and map_2 + * contains indices in a table of interpolation coefficients.

+ * + *

This function cannot operate in-place.

+ * + * @param src Source image. + * @param dst Destination image. It has the same size as map1 and + * the same type as src. + * @param map1 The first map of either (x,y) points or just + * x values having the type CV_16SC2, + * CV_32FC1, or CV_32FC2. See "convertMaps" for + * details on converting a floating point representation to fixed-point for + * speed. + * @param map2 The second map of y values having the type + * CV_16UC1, CV_32FC1, or none (empty map if + * map1 is (x,y) points), respectively. + * @param interpolation Interpolation method (see "resize"). The method + * INTER_AREA is not supported by this function. + * @param borderMode Pixel extrapolation method (see "borderInterpolate"). When + * borderMode=BORDER_TRANSPARENT, it means that the pixels in the + * destination image that corresponds to the "outliers" in the source image are + * not modified by the function. + * @param borderValue Value used in case of a constant border. By default, it is + * 0. + * + * @see org.opencv.imgproc.Imgproc.remap + */ + public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode, Scalar borderValue) + { + + remap_0(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); + + return; + } + +/** + *

Applies a generic geometrical transformation to an image.

+ * + *

The function remap transforms the source image using the + * specified map:

+ * + *

dst(x,y) = src(map_x(x,y),map_y(x,y))

+ * + *

where values of pixels with non-integer coordinates are computed using one of + * available interpolation methods. + * map_x and map_y can be encoded as separate floating-point + * maps in map_1 and map_2 respectively, or interleaved + * floating-point maps of (x,y) in map_1, or fixed-point maps + * created by using "convertMaps". The reason you might want to convert from + * floating to fixed-point representations of a map is that they can yield much + * faster (~2x) remapping operations. In the converted case, map_1 + * contains pairs (cvFloor(x), cvFloor(y)) and map_2 + * contains indices in a table of interpolation coefficients.

+ * + *

This function cannot operate in-place.

+ * + * @param src Source image. + * @param dst Destination image. It has the same size as map1 and + * the same type as src. + * @param map1 The first map of either (x,y) points or just + * x values having the type CV_16SC2, + * CV_32FC1, or CV_32FC2. See "convertMaps" for + * details on converting a floating point representation to fixed-point for + * speed. + * @param map2 The second map of y values having the type + * CV_16UC1, CV_32FC1, or none (empty map if + * map1 is (x,y) points), respectively. + * @param interpolation Interpolation method (see "resize"). The method + * INTER_AREA is not supported by this function. + * + * @see org.opencv.imgproc.Imgproc.remap + */ + public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation) + { + + remap_1(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation); + + return; + } + + + // + // C++: void resize(Mat src, Mat& dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR) + // + +/** + *

Resizes an image.

+ * + *

The function resize resizes the image src down to + * or up to the specified size.Note that the initial dst type or + * size are not taken into account. Instead, the size and type are derived from + * the src,dsize,fx, and fy. + * If you want to resize src so that it fits the pre-created + * dst, you may call the function as follows:

+ * + *

// C++ code:

+ * + *

// explicitly specify dsize=dst.size(); fx and fy will be computed from that.

+ * + *

resize(src, dst, dst.size(), 0, 0, interpolation);

+ * + *

If you want to decimate the image by factor of 2 in each direction, you can + * call the function this way:

+ * + *

// specify fx and fy and let the function compute the destination image size.

+ * + *

resize(src, dst, Size(), 0.5, 0.5, interpolation);

+ * + *

To shrink an image, it will generally look best with CV_INTER_AREA + * interpolation, whereas to enlarge an image, it will generally look best with + * CV_INTER_CUBIC (slow) or CV_INTER_LINEAR (faster but still looks OK). + *

+ * + * @param src input image. + * @param dst output image; it has the size dsize (when it is + * non-zero) or the size computed from src.size(), fx, + * and fy; the type of dst is the same as of + * src. + * @param dsize output image size; if it equals zero, it is computed as: + * + *

dsize = Size(round(fx*src.cols), round(fy*src.rows))

+ * + *

Either dsize or both fx and fy must be + * non-zero.

+ * @param fx scale factor along the horizontal axis; when it equals 0, it is + * computed as + * + *

(double)dsize.width/src.cols

+ * @param fy scale factor along the vertical axis; when it equals 0, it is + * computed as + * + *

(double)dsize.height/src.rows

+ * @param interpolation interpolation method: + *
    + *
  • INTER_NEAREST - a nearest-neighbor interpolation + *
  • INTER_LINEAR - a bilinear interpolation (used by default) + *
  • INTER_AREA - resampling using pixel area relation. It may be a + * preferred method for image decimation, as it gives moire'-free results. But + * when the image is zoomed, it is similar to the INTER_NEAREST + * method. + *
  • INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood + *
  • INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood + *
+ * + * @see org.opencv.imgproc.Imgproc.resize + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#warpPerspective + */ + public static void resize(Mat src, Mat dst, Size dsize, double fx, double fy, int interpolation) + { + + resize_0(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, fx, fy, interpolation); + + return; + } + +/** + *

Resizes an image.

+ * + *

The function resize resizes the image src down to + * or up to the specified size.Note that the initial dst type or + * size are not taken into account. Instead, the size and type are derived from + * the src,dsize,fx, and fy. + * If you want to resize src so that it fits the pre-created + * dst, you may call the function as follows:

+ * + *

// C++ code:

+ * + *

// explicitly specify dsize=dst.size(); fx and fy will be computed from that.

+ * + *

resize(src, dst, dst.size(), 0, 0, interpolation);

+ * + *

If you want to decimate the image by factor of 2 in each direction, you can + * call the function this way:

+ * + *

// specify fx and fy and let the function compute the destination image size.

+ * + *

resize(src, dst, Size(), 0.5, 0.5, interpolation);

+ * + *

To shrink an image, it will generally look best with CV_INTER_AREA + * interpolation, whereas to enlarge an image, it will generally look best with + * CV_INTER_CUBIC (slow) or CV_INTER_LINEAR (faster but still looks OK). + *

+ * + * @param src input image. + * @param dst output image; it has the size dsize (when it is + * non-zero) or the size computed from src.size(), fx, + * and fy; the type of dst is the same as of + * src. + * @param dsize output image size; if it equals zero, it is computed as: + * + *

dsize = Size(round(fx*src.cols), round(fy*src.rows))

+ * + *

Either dsize or both fx and fy must be + * non-zero.

+ * + * @see org.opencv.imgproc.Imgproc.resize + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#warpPerspective + */ + public static void resize(Mat src, Mat dst, Size dsize) + { + + resize_1(src.nativeObj, dst.nativeObj, dsize.width, dsize.height); + + return; + } + + + // + // C++: void sepFilter2D(Mat src, Mat& dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT) + // + +/** + *

Applies a separable linear filter to an image.

+ * + *

The function applies a separable linear filter to the image. That is, first, + * every row of src is filtered with the 1D kernel + * kernelX. Then, every column of the result is filtered with the + * 1D kernel kernelY. The final result shifted by delta + * is stored in dst.

+ * + * @param src Source image. + * @param dst Destination image of the same size and the same number of channels + * as src. + * @param ddepth Destination image depth. The following combination of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the destination image will have the same depth + * as the source.

+ * @param kernelX Coefficients for filtering each row. + * @param kernelY Coefficients for filtering each column. + * @param anchor Anchor position within the kernel. The default value (-1, + * 1) means that the anchor is at the kernel center. + * @param delta Value added to the filtered results before storing them. + * @param borderType Pixel extrapolation method. See "borderInterpolate" for + * details. + * + * @see org.opencv.imgproc.Imgproc.sepFilter2D + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#Sobel + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#blur + * @see org.opencv.imgproc.Imgproc#filter2D + */ + public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta, int borderType) + { + + sepFilter2D_0(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj, anchor.x, anchor.y, delta, borderType); + + return; + } + +/** + *

Applies a separable linear filter to an image.

+ * + *

The function applies a separable linear filter to the image. That is, first, + * every row of src is filtered with the 1D kernel + * kernelX. Then, every column of the result is filtered with the + * 1D kernel kernelY. The final result shifted by delta + * is stored in dst.

+ * + * @param src Source image. + * @param dst Destination image of the same size and the same number of channels + * as src. + * @param ddepth Destination image depth. The following combination of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the destination image will have the same depth + * as the source.

+ * @param kernelX Coefficients for filtering each row. + * @param kernelY Coefficients for filtering each column. + * @param anchor Anchor position within the kernel. The default value (-1, + * 1) means that the anchor is at the kernel center. + * @param delta Value added to the filtered results before storing them. + * + * @see org.opencv.imgproc.Imgproc.sepFilter2D + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#Sobel + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#blur + * @see org.opencv.imgproc.Imgproc#filter2D + */ + public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta) + { + + sepFilter2D_1(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj, anchor.x, anchor.y, delta); + + return; + } + +/** + *

Applies a separable linear filter to an image.

+ * + *

The function applies a separable linear filter to the image. That is, first, + * every row of src is filtered with the 1D kernel + * kernelX. Then, every column of the result is filtered with the + * 1D kernel kernelY. The final result shifted by delta + * is stored in dst.

+ * + * @param src Source image. + * @param dst Destination image of the same size and the same number of channels + * as src. + * @param ddepth Destination image depth. The following combination of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the destination image will have the same depth + * as the source.

+ * @param kernelX Coefficients for filtering each row. + * @param kernelY Coefficients for filtering each column. + * + * @see org.opencv.imgproc.Imgproc.sepFilter2D + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#Sobel + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#blur + * @see org.opencv.imgproc.Imgproc#filter2D + */ + public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY) + { + + sepFilter2D_2(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj); + + return; + } + + + // + // C++: double threshold(Mat src, Mat& dst, double thresh, double maxval, int type) + // + +/** + *

Applies a fixed-level threshold to each array element.

+ * + *

The function applies fixed-level thresholding to a single-channel array. The + * function is typically used to get a bi-level (binary) image out of a + * grayscale image ("compare" could be also used for this purpose) or for + * removing a noise, that is, filtering out pixels with too small or too large + * values. There are several types of thresholding supported by the function. + * They are determined by type :

+ *
    + *
  • THRESH_BINARY + *
+ * + *

dst(x,y) = maxval if src(x,y) > thresh; 0 otherwise

+ * + *
    + *
  • THRESH_BINARY_INV + *
+ * + *

dst(x,y) = 0 if src(x,y) > thresh; maxval otherwise

+ * + *
    + *
  • THRESH_TRUNC + *
+ * + *

dst(x,y) = threshold if src(x,y) > thresh; src(x,y) otherwise

+ * + *
    + *
  • THRESH_TOZERO + *
+ * + *

dst(x,y) = src(x,y) if src(x,y) > thresh; 0 otherwise

+ * + *
    + *
  • THRESH_TOZERO_INV + *
+ * + *

dst(x,y) = 0 if src(x,y) > thresh; src(x,y) otherwise

+ * + *

Also, the special value THRESH_OTSU may be combined with one of + * the above values. In this case, the function determines the optimal threshold + * value using the Otsu's algorithm and uses it instead of the specified + * thresh. + * The function returns the computed threshold value. + * Currently, the Otsu's method is implemented only for 8-bit images.

+ * + * @param src input array (single-channel, 8-bit or 32-bit floating point). + * @param dst output array of the same size and type as src. + * @param thresh threshold value. + * @param maxval maximum value to use with the THRESH_BINARY and + * THRESH_BINARY_INV thresholding types. + * @param type thresholding type (see the details below). + * + * @see org.opencv.imgproc.Imgproc.threshold + * @see org.opencv.imgproc.Imgproc#findContours + * @see org.opencv.core.Core#max + * @see org.opencv.imgproc.Imgproc#adaptiveThreshold + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#min + */ + public static double threshold(Mat src, Mat dst, double thresh, double maxval, int type) + { + + double retVal = threshold_0(src.nativeObj, dst.nativeObj, thresh, maxval, type); + + return retVal; + } + + + // + // C++: void undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat()) + // + +/** + *

Transforms an image to compensate for lens distortion.

+ * + *

The function transforms an image to compensate radial and tangential lens + * distortion.

+ * + *

The function is simply a combination of "initUndistortRectifyMap" (with unity + * R) and "remap" (with bilinear interpolation). See the former + * function for details of the transformation being performed.

+ * + *

Those pixels in the destination image, for which there is no correspondent + * pixels in the source image, are filled with zeros (black color).

+ * + *

A particular subset of the source image that will be visible in the corrected + * image can be regulated by newCameraMatrix. You can use + * "getOptimalNewCameraMatrix" to compute the appropriate newCameraMatrix + * depending on your requirements.

+ * + *

The camera matrix and the distortion parameters can be determined using + * "calibrateCamera". If the resolution of images is different from the + * resolution used at the calibration stage, f_x, f_y, c_x and + * c_y need to be scaled accordingly, while the distortion coefficients + * remain the same.

+ * + * @param src Input (distorted) image. + * @param dst Output (corrected) image that has the same size and type as + * src. + * @param cameraMatrix Input camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param newCameraMatrix Camera matrix of the distorted image. By default, it + * is the same as cameraMatrix but you may additionally scale and + * shift the result by using a different matrix. + * + * @see org.opencv.imgproc.Imgproc.undistort + */ + public static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix) + { + + undistort_0(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, newCameraMatrix.nativeObj); + + return; + } + +/** + *

Transforms an image to compensate for lens distortion.

+ * + *

The function transforms an image to compensate radial and tangential lens + * distortion.

+ * + *

The function is simply a combination of "initUndistortRectifyMap" (with unity + * R) and "remap" (with bilinear interpolation). See the former + * function for details of the transformation being performed.

+ * + *

Those pixels in the destination image, for which there is no correspondent + * pixels in the source image, are filled with zeros (black color).

+ * + *

A particular subset of the source image that will be visible in the corrected + * image can be regulated by newCameraMatrix. You can use + * "getOptimalNewCameraMatrix" to compute the appropriate newCameraMatrix + * depending on your requirements.

+ * + *

The camera matrix and the distortion parameters can be determined using + * "calibrateCamera". If the resolution of images is different from the + * resolution used at the calibration stage, f_x, f_y, c_x and + * c_y need to be scaled accordingly, while the distortion coefficients + * remain the same.

+ * + * @param src Input (distorted) image. + * @param dst Output (corrected) image that has the same size and type as + * src. + * @param cameraMatrix Input camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * + * @see org.opencv.imgproc.Imgproc.undistort + */ + public static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs) + { + + undistort_1(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj); + + return; + } + + + // + // C++: void undistortPoints(vector_Point2f src, vector_Point2f& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat()) + // + +/** + *

Computes the ideal point coordinates from the observed point coordinates.

+ * + *

The function is similar to "undistort" and "initUndistortRectifyMap" but it + * operates on a sparse set of points instead of a raster image. Also the + * function performs a reverse transformation to"projectPoints". In case of a 3D + * object, it does not reconstruct its 3D coordinates, but for a planar object, + * it does, up to a translation vector, if the proper R is + * specified. + *

+ * + *

// C++ code:

+ * + *

// (u,v) is the input point, (u', v') is the output point

+ * + *

// camera_matrix=[fx 0 cx; 0 fy cy; 0 0 1]

+ * + *

// P=[fx' 0 cx' tx; 0 fy' cy' ty; 0 0 1 tz]

+ * + *

x" = (u - cx)/fx

+ * + *

y" = (v - cy)/fy

+ * + *

(x',y') = undistort(x",y",dist_coeffs)

+ * + *

[X,Y,W]T = R*[x' y' 1]T

+ * + *

x = X/W, y = Y/W

+ * + *

// only performed if P=[fx' 0 cx' [tx]; 0 fy' cy' [ty]; 0 0 1 [tz]] is + * specified

+ * + *

u' = x*fx' + cx'

+ * + *

v' = y*fy' + cy',

+ * + *

where undistort() is an approximate iterative algorithm that + * estimates the normalized original point coordinates out of the normalized + * distorted point coordinates ("normalized" means that the coordinates do not + * depend on the camera matrix). + *

+ * + *

The function can be used for both a stereo camera head or a monocular camera + * (when R is empty).

+ * + * @param src Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or + * CV_64FC2). + * @param dst Output ideal point coordinates after undistortion and reverse + * perspective transformation. If matrix P is identity or omitted, + * dst will contain normalized point coordinates. + * @param cameraMatrix Camera matrix + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param R Rectification transformation in the object space (3x3 matrix). + * R1 or R2 computed by "stereoRectify" can be passed + * here. If the matrix is empty, the identity transformation is used. + * @param P New camera matrix (3x3) or new projection matrix (3x4). + * P1 or P2 computed by "stereoRectify" can be passed + * here. If the matrix is empty, the identity new camera matrix is used. + * + * @see org.opencv.imgproc.Imgproc.undistortPoints + */ + public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P) + { + Mat src_mat = src; + Mat dst_mat = dst; + undistortPoints_0(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, P.nativeObj); + + return; + } + +/** + *

Computes the ideal point coordinates from the observed point coordinates.

+ * + *

The function is similar to "undistort" and "initUndistortRectifyMap" but it + * operates on a sparse set of points instead of a raster image. Also the + * function performs a reverse transformation to"projectPoints". In case of a 3D + * object, it does not reconstruct its 3D coordinates, but for a planar object, + * it does, up to a translation vector, if the proper R is + * specified. + *

+ * + *

// C++ code:

+ * + *

// (u,v) is the input point, (u', v') is the output point

+ * + *

// camera_matrix=[fx 0 cx; 0 fy cy; 0 0 1]

+ * + *

// P=[fx' 0 cx' tx; 0 fy' cy' ty; 0 0 1 tz]

+ * + *

x" = (u - cx)/fx

+ * + *

y" = (v - cy)/fy

+ * + *

(x',y') = undistort(x",y",dist_coeffs)

+ * + *

[X,Y,W]T = R*[x' y' 1]T

+ * + *

x = X/W, y = Y/W

+ * + *

// only performed if P=[fx' 0 cx' [tx]; 0 fy' cy' [ty]; 0 0 1 [tz]] is + * specified

+ * + *

u' = x*fx' + cx'

+ * + *

v' = y*fy' + cy',

+ * + *

where undistort() is an approximate iterative algorithm that + * estimates the normalized original point coordinates out of the normalized + * distorted point coordinates ("normalized" means that the coordinates do not + * depend on the camera matrix). + *

+ * + *

The function can be used for both a stereo camera head or a monocular camera + * (when R is empty).

+ * + * @param src Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or + * CV_64FC2). + * @param dst Output ideal point coordinates after undistortion and reverse + * perspective transformation. If matrix P is identity or omitted, + * dst will contain normalized point coordinates. + * @param cameraMatrix Camera matrix + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * + * @see org.opencv.imgproc.Imgproc.undistortPoints + */ + public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs) + { + Mat src_mat = src; + Mat dst_mat = dst; + undistortPoints_1(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj); + + return; + } + + + // + // C++: void warpAffine(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) + // + +/** + *

Applies an affine transformation to an image.

+ * + *

The function warpAffine transforms the source image using the + * specified matrix:

+ * + *

dst(x,y) = src(M _11 x + M _12 y + M _13, M _21 x + M _22 y + M _23)

+ * + *

when the flag WARP_INVERSE_MAP is set. Otherwise, the + * transformation is first inverted with "invertAffineTransform" and then put in + * the formula above instead of M. + * The function cannot operate in-place.

+ * + *

Note: cvGetQuadrangleSubPix is similar to cvWarpAffine, + * but the outliers are extrapolated using replication border mode.

+ * + * @param src input image. + * @param dst output image that has the size dsize and the same + * type as src. + * @param M 2x 3 transformation matrix. + * @param dsize size of the output image. + * @param flags combination of interpolation methods (see "resize") and the + * optional flag WARP_INVERSE_MAP that means that M is + * the inverse transformation (dst->src). + * @param borderMode pixel extrapolation method (see "borderInterpolate"); when + * borderMode=BORDER_TRANSPARENT, it means that the pixels in the + * destination image corresponding to the "outliers" in the source image are not + * modified by the function. + * @param borderValue value used in case of a constant border; by default, it is + * 0. + * + * @see org.opencv.imgproc.Imgproc.warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.imgproc.Imgproc#getRectSubPix + * @see org.opencv.imgproc.Imgproc#resize + * @see org.opencv.core.Core#transform + */ + public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode, Scalar borderValue) + { + + warpAffine_0(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); + + return; + } + +/** + *

Applies an affine transformation to an image.

+ * + *

The function warpAffine transforms the source image using the + * specified matrix:

+ * + *

dst(x,y) = src(M _11 x + M _12 y + M _13, M _21 x + M _22 y + M _23)

+ * + *

when the flag WARP_INVERSE_MAP is set. Otherwise, the + * transformation is first inverted with "invertAffineTransform" and then put in + * the formula above instead of M. + * The function cannot operate in-place.

+ * + *

Note: cvGetQuadrangleSubPix is similar to cvWarpAffine, + * but the outliers are extrapolated using replication border mode.

+ * + * @param src input image. + * @param dst output image that has the size dsize and the same + * type as src. + * @param M 2x 3 transformation matrix. + * @param dsize size of the output image. + * @param flags combination of interpolation methods (see "resize") and the + * optional flag WARP_INVERSE_MAP that means that M is + * the inverse transformation (dst->src). + * + * @see org.opencv.imgproc.Imgproc.warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.imgproc.Imgproc#getRectSubPix + * @see org.opencv.imgproc.Imgproc#resize + * @see org.opencv.core.Core#transform + */ + public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags) + { + + warpAffine_1(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags); + + return; + } + +/** + *

Applies an affine transformation to an image.

+ * + *

The function warpAffine transforms the source image using the + * specified matrix:

+ * + *

dst(x,y) = src(M _11 x + M _12 y + M _13, M _21 x + M _22 y + M _23)

+ * + *

when the flag WARP_INVERSE_MAP is set. Otherwise, the + * transformation is first inverted with "invertAffineTransform" and then put in + * the formula above instead of M. + * The function cannot operate in-place.

+ * + *

Note: cvGetQuadrangleSubPix is similar to cvWarpAffine, + * but the outliers are extrapolated using replication border mode.

+ * + * @param src input image. + * @param dst output image that has the size dsize and the same + * type as src. + * @param M 2x 3 transformation matrix. + * @param dsize size of the output image. + * + * @see org.opencv.imgproc.Imgproc.warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.imgproc.Imgproc#getRectSubPix + * @see org.opencv.imgproc.Imgproc#resize + * @see org.opencv.core.Core#transform + */ + public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize) + { + + warpAffine_2(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height); + + return; + } + + + // + // C++: void warpPerspective(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) + // + +/** + *

Applies a perspective transformation to an image.

+ * + *

The function warpPerspective transforms the source image using + * the specified matrix:

+ * + *

dst(x,y) = src((M_11 x + M_12 y + M_13)/(M_(31) x + M_32 y + + * M_33),<BR>(M_21 x + M_22 y + M_23)/(M_(31) x + M_32 y + M_33))

+ * + *

when the flag WARP_INVERSE_MAP is set. Otherwise, the + * transformation is first inverted with "invert" and then put in the formula + * above instead of M. + * The function cannot operate in-place.

+ * + * @param src input image. + * @param dst output image that has the size dsize and the same + * type as src. + * @param M 3x 3 transformation matrix. + * @param dsize size of the output image. + * @param flags combination of interpolation methods (INTER_LINEAR + * or INTER_NEAREST) and the optional flag WARP_INVERSE_MAP, + * that sets M as the inverse transformation (dst->src). + * @param borderMode pixel extrapolation method (BORDER_CONSTANT or + * BORDER_REPLICATE). + * @param borderValue value used in case of a constant border; by default, it + * equals 0. + * + * @see org.opencv.imgproc.Imgproc.warpPerspective + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.imgproc.Imgproc#getRectSubPix + * @see org.opencv.imgproc.Imgproc#resize + */ + public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode, Scalar borderValue) + { + + warpPerspective_0(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); + + return; + } + +/** + *

Applies a perspective transformation to an image.

+ * + *

The function warpPerspective transforms the source image using + * the specified matrix:

+ * + *

dst(x,y) = src((M_11 x + M_12 y + M_13)/(M_(31) x + M_32 y + + * M_33),<BR>(M_21 x + M_22 y + M_23)/(M_(31) x + M_32 y + M_33))

+ * + *

when the flag WARP_INVERSE_MAP is set. Otherwise, the + * transformation is first inverted with "invert" and then put in the formula + * above instead of M. + * The function cannot operate in-place.

+ * + * @param src input image. + * @param dst output image that has the size dsize and the same + * type as src. + * @param M 3x 3 transformation matrix. + * @param dsize size of the output image. + * @param flags combination of interpolation methods (INTER_LINEAR + * or INTER_NEAREST) and the optional flag WARP_INVERSE_MAP, + * that sets M as the inverse transformation (dst->src). + * + * @see org.opencv.imgproc.Imgproc.warpPerspective + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.imgproc.Imgproc#getRectSubPix + * @see org.opencv.imgproc.Imgproc#resize + */ + public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags) + { + + warpPerspective_1(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags); + + return; + } + +/** + *

Applies a perspective transformation to an image.

+ * + *

The function warpPerspective transforms the source image using + * the specified matrix:

+ * + *

dst(x,y) = src((M_11 x + M_12 y + M_13)/(M_(31) x + M_32 y + + * M_33),<BR>(M_21 x + M_22 y + M_23)/(M_(31) x + M_32 y + M_33))

+ * + *

when the flag WARP_INVERSE_MAP is set. Otherwise, the + * transformation is first inverted with "invert" and then put in the formula + * above instead of M. + * The function cannot operate in-place.

+ * + * @param src input image. + * @param dst output image that has the size dsize and the same + * type as src. + * @param M 3x 3 transformation matrix. + * @param dsize size of the output image. + * + * @see org.opencv.imgproc.Imgproc.warpPerspective + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.imgproc.Imgproc#getRectSubPix + * @see org.opencv.imgproc.Imgproc#resize + */ + public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize) + { + + warpPerspective_2(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height); + + return; + } + + + // + // C++: void watershed(Mat image, Mat& markers) + // + +/** + *

Performs a marker-based image segmentation using the watershed algorithm.

+ * + *

The function implements one of the variants of watershed, non-parametric + * marker-based segmentation algorithm, described in [Meyer92].

+ * + *

Before passing the image to the function, you have to roughly outline the + * desired regions in the image markers with positive + * (>0) indices. So, every region is represented as one or more + * connected components with the pixel values 1, 2, 3, and so on. Such markers + * can be retrieved from a binary mask using "findContours" and "drawContours" + * (see the watershed.cpp demo). The markers are "seeds" of the + * future image regions. All the other pixels in markers, whose + * relation to the outlined regions is not known and should be defined by the + * algorithm, should be set to 0's. In the function output, each pixel in + * markers is set to a value of the "seed" components or to -1 at boundaries + * between the regions.

+ * + *

Visual demonstration and usage example of the function can be found in the + * OpenCV samples directory (see the watershed.cpp demo).

+ * + *

Note: Any two neighbor connected components are not necessarily separated by + * a watershed boundary (-1's pixels); for example, they can touch each other in + * the initial marker image passed to the function.

+ * + * @param image Input 8-bit 3-channel image. + * @param markers Input/output 32-bit single-channel image (map) of markers. It + * should have the same size as image. + * + * @see org.opencv.imgproc.Imgproc.watershed + * @see org.opencv.imgproc.Imgproc#findContours + */ + public static void watershed(Mat image, Mat markers) + { + + watershed_0(image.nativeObj, markers.nativeObj); + + return; + } + + + + + // C++: void Canny(Mat image, Mat& edges, double threshold1, double threshold2, int apertureSize = 3, bool L2gradient = false) + private static native void Canny_0(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2, int apertureSize, boolean L2gradient); + private static native void Canny_1(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2); + + // C++: void GaussianBlur(Mat src, Mat& dst, Size ksize, double sigmaX, double sigmaY = 0, int borderType = BORDER_DEFAULT) + private static native void GaussianBlur_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX, double sigmaY, int borderType); + private static native void GaussianBlur_1(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX, double sigmaY); + private static native void GaussianBlur_2(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX); + + // C++: void HoughCircles(Mat image, Mat& circles, int method, double dp, double minDist, double param1 = 100, double param2 = 100, int minRadius = 0, int maxRadius = 0) + private static native void HoughCircles_0(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1, double param2, int minRadius, int maxRadius); + private static native void HoughCircles_1(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist); + + // C++: void HoughLines(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0) + private static native void HoughLines_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn); + private static native void HoughLines_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold); + + // C++: void HoughLinesP(Mat image, Mat& lines, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0) + private static native void HoughLinesP_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double minLineLength, double maxLineGap); + private static native void HoughLinesP_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold); + + // C++: void HuMoments(Moments m, Mat& hu) + private static native void HuMoments_0(long m_nativeObj, long hu_nativeObj); + + // C++: void Laplacian(Mat src, Mat& dst, int ddepth, int ksize = 1, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) + private static native void Laplacian_0(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale, double delta, int borderType); + private static native void Laplacian_1(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale, double delta); + private static native void Laplacian_2(long src_nativeObj, long dst_nativeObj, int ddepth); + + // C++: double PSNR(Mat src1, Mat src2) + private static native double PSNR_0(long src1_nativeObj, long src2_nativeObj); + + // C++: void Scharr(Mat src, Mat& dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) + private static native void Scharr_0(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale, double delta, int borderType); + private static native void Scharr_1(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale, double delta); + private static native void Scharr_2(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy); + + // C++: void Sobel(Mat src, Mat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) + private static native void Sobel_0(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType); + private static native void Sobel_1(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale, double delta); + private static native void Sobel_2(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy); + + // C++: void accumulate(Mat src, Mat& dst, Mat mask = Mat()) + private static native void accumulate_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void accumulate_1(long src_nativeObj, long dst_nativeObj); + + // C++: void accumulateProduct(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + private static native void accumulateProduct_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void accumulateProduct_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void accumulateSquare(Mat src, Mat& dst, Mat mask = Mat()) + private static native void accumulateSquare_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void accumulateSquare_1(long src_nativeObj, long dst_nativeObj); + + // C++: void accumulateWeighted(Mat src, Mat& dst, double alpha, Mat mask = Mat()) + private static native void accumulateWeighted_0(long src_nativeObj, long dst_nativeObj, double alpha, long mask_nativeObj); + private static native void accumulateWeighted_1(long src_nativeObj, long dst_nativeObj, double alpha); + + // C++: void adaptiveThreshold(Mat src, Mat& dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C) + private static native void adaptiveThreshold_0(long src_nativeObj, long dst_nativeObj, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C); + + // C++: void approxPolyDP(vector_Point2f curve, vector_Point2f& approxCurve, double epsilon, bool closed) + private static native void approxPolyDP_0(long curve_mat_nativeObj, long approxCurve_mat_nativeObj, double epsilon, boolean closed); + + // C++: double arcLength(vector_Point2f curve, bool closed) + private static native double arcLength_0(long curve_mat_nativeObj, boolean closed); + + // C++: void bilateralFilter(Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT) + private static native void bilateralFilter_0(long src_nativeObj, long dst_nativeObj, int d, double sigmaColor, double sigmaSpace, int borderType); + private static native void bilateralFilter_1(long src_nativeObj, long dst_nativeObj, int d, double sigmaColor, double sigmaSpace); + + // C++: void blur(Mat src, Mat& dst, Size ksize, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT) + private static native void blur_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double anchor_x, double anchor_y, int borderType); + private static native void blur_1(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double anchor_x, double anchor_y); + private static native void blur_2(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height); + + // C++: int borderInterpolate(int p, int len, int borderType) + private static native int borderInterpolate_0(int p, int len, int borderType); + + // C++: Rect boundingRect(vector_Point points) + private static native double[] boundingRect_0(long points_mat_nativeObj); + + // C++: void boxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), bool normalize = true, int borderType = BORDER_DEFAULT) + private static native void boxFilter_0(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize, int borderType); + private static native void boxFilter_1(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize); + private static native void boxFilter_2(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height); + + // C++: void calcBackProject(vector_Mat images, vector_int channels, Mat hist, Mat& dst, vector_float ranges, double scale) + private static native void calcBackProject_0(long images_mat_nativeObj, long channels_mat_nativeObj, long hist_nativeObj, long dst_nativeObj, long ranges_mat_nativeObj, double scale); + + // C++: void calcHist(vector_Mat images, vector_int channels, Mat mask, Mat& hist, vector_int histSize, vector_float ranges, bool accumulate = false) + private static native void calcHist_0(long images_mat_nativeObj, long channels_mat_nativeObj, long mask_nativeObj, long hist_nativeObj, long histSize_mat_nativeObj, long ranges_mat_nativeObj, boolean accumulate); + private static native void calcHist_1(long images_mat_nativeObj, long channels_mat_nativeObj, long mask_nativeObj, long hist_nativeObj, long histSize_mat_nativeObj, long ranges_mat_nativeObj); + + // C++: double compareHist(Mat H1, Mat H2, int method) + private static native double compareHist_0(long H1_nativeObj, long H2_nativeObj, int method); + + // C++: double contourArea(Mat contour, bool oriented = false) + private static native double contourArea_0(long contour_nativeObj, boolean oriented); + private static native double contourArea_1(long contour_nativeObj); + + // C++: void convertMaps(Mat map1, Mat map2, Mat& dstmap1, Mat& dstmap2, int dstmap1type, bool nninterpolation = false) + private static native void convertMaps_0(long map1_nativeObj, long map2_nativeObj, long dstmap1_nativeObj, long dstmap2_nativeObj, int dstmap1type, boolean nninterpolation); + private static native void convertMaps_1(long map1_nativeObj, long map2_nativeObj, long dstmap1_nativeObj, long dstmap2_nativeObj, int dstmap1type); + + // C++: void convexHull(vector_Point points, vector_int& hull, bool clockwise = false, _hidden_ returnPoints = true) + private static native void convexHull_0(long points_mat_nativeObj, long hull_mat_nativeObj, boolean clockwise); + private static native void convexHull_1(long points_mat_nativeObj, long hull_mat_nativeObj); + + // C++: void convexityDefects(vector_Point contour, vector_int convexhull, vector_Vec4i& convexityDefects) + private static native void convexityDefects_0(long contour_mat_nativeObj, long convexhull_mat_nativeObj, long convexityDefects_mat_nativeObj); + + // C++: void copyMakeBorder(Mat src, Mat& dst, int top, int bottom, int left, int right, int borderType, Scalar value = Scalar()) + private static native void copyMakeBorder_0(long src_nativeObj, long dst_nativeObj, int top, int bottom, int left, int right, int borderType, double value_val0, double value_val1, double value_val2, double value_val3); + private static native void copyMakeBorder_1(long src_nativeObj, long dst_nativeObj, int top, int bottom, int left, int right, int borderType); + + // C++: void cornerEigenValsAndVecs(Mat src, Mat& dst, int blockSize, int ksize, int borderType = BORDER_DEFAULT) + private static native void cornerEigenValsAndVecs_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, int borderType); + private static native void cornerEigenValsAndVecs_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize); + + // C++: void cornerHarris(Mat src, Mat& dst, int blockSize, int ksize, double k, int borderType = BORDER_DEFAULT) + private static native void cornerHarris_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, double k, int borderType); + private static native void cornerHarris_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, double k); + + // C++: void cornerMinEigenVal(Mat src, Mat& dst, int blockSize, int ksize = 3, int borderType = BORDER_DEFAULT) + private static native void cornerMinEigenVal_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, int borderType); + private static native void cornerMinEigenVal_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize); + private static native void cornerMinEigenVal_2(long src_nativeObj, long dst_nativeObj, int blockSize); + + // C++: void cornerSubPix(Mat image, vector_Point2f& corners, Size winSize, Size zeroZone, TermCriteria criteria) + private static native void cornerSubPix_0(long image_nativeObj, long corners_mat_nativeObj, double winSize_width, double winSize_height, double zeroZone_width, double zeroZone_height, int criteria_type, int criteria_maxCount, double criteria_epsilon); + + // C++: void createHanningWindow(Mat& dst, Size winSize, int type) + private static native void createHanningWindow_0(long dst_nativeObj, double winSize_width, double winSize_height, int type); + + // C++: void cvtColor(Mat src, Mat& dst, int code, int dstCn = 0) + private static native void cvtColor_0(long src_nativeObj, long dst_nativeObj, int code, int dstCn); + private static native void cvtColor_1(long src_nativeObj, long dst_nativeObj, int code); + + // C++: void dilate(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) + private static native void dilate_0(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); + private static native void dilate_1(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations); + private static native void dilate_2(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj); + + // C++: void distanceTransform(Mat src, Mat& dst, int distanceType, int maskSize) + private static native void distanceTransform_0(long src_nativeObj, long dst_nativeObj, int distanceType, int maskSize); + + // C++: void distanceTransform(Mat src, Mat& dst, Mat& labels, int distanceType, int maskSize, int labelType = DIST_LABEL_CCOMP) + private static native void distanceTransformWithLabels_0(long src_nativeObj, long dst_nativeObj, long labels_nativeObj, int distanceType, int maskSize, int labelType); + private static native void distanceTransformWithLabels_1(long src_nativeObj, long dst_nativeObj, long labels_nativeObj, int distanceType, int maskSize); + + // C++: void drawContours(Mat& image, vector_vector_Point contours, int contourIdx, Scalar color, int thickness = 1, int lineType = 8, Mat hierarchy = Mat(), int maxLevel = INT_MAX, Point offset = Point()) + private static native void drawContours_0(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, long hierarchy_nativeObj, int maxLevel, double offset_x, double offset_y); + private static native void drawContours_1(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void drawContours_2(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void equalizeHist(Mat src, Mat& dst) + private static native void equalizeHist_0(long src_nativeObj, long dst_nativeObj); + + // C++: void erode(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) + private static native void erode_0(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); + private static native void erode_1(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations); + private static native void erode_2(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj); + + // C++: void filter2D(Mat src, Mat& dst, int ddepth, Mat kernel, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT) + private static native void filter2D_0(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y, double delta, int borderType); + private static native void filter2D_1(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y, double delta); + private static native void filter2D_2(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj); + + // C++: void findContours(Mat& image, vector_vector_Point& contours, Mat& hierarchy, int mode, int method, Point offset = Point()) + private static native void findContours_0(long image_nativeObj, long contours_mat_nativeObj, long hierarchy_nativeObj, int mode, int method, double offset_x, double offset_y); + private static native void findContours_1(long image_nativeObj, long contours_mat_nativeObj, long hierarchy_nativeObj, int mode, int method); + + // C++: RotatedRect fitEllipse(vector_Point2f points) + private static native double[] fitEllipse_0(long points_mat_nativeObj); + + // C++: void fitLine(Mat points, Mat& line, int distType, double param, double reps, double aeps) + private static native void fitLine_0(long points_nativeObj, long line_nativeObj, int distType, double param, double reps, double aeps); + + // C++: int floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4) + private static native int floodFill_0(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out, double loDiff_val0, double loDiff_val1, double loDiff_val2, double loDiff_val3, double upDiff_val0, double upDiff_val1, double upDiff_val2, double upDiff_val3, int flags); + private static native int floodFill_1(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3); + + // C++: Mat getAffineTransform(vector_Point2f src, vector_Point2f dst) + private static native long getAffineTransform_0(long src_mat_nativeObj, long dst_mat_nativeObj); + + // C++: Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false) + private static native long getDefaultNewCameraMatrix_0(long cameraMatrix_nativeObj, double imgsize_width, double imgsize_height, boolean centerPrincipalPoint); + private static native long getDefaultNewCameraMatrix_1(long cameraMatrix_nativeObj); + + // C++: void getDerivKernels(Mat& kx, Mat& ky, int dx, int dy, int ksize, bool normalize = false, int ktype = CV_32F) + private static native void getDerivKernels_0(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize, boolean normalize, int ktype); + private static native void getDerivKernels_1(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize); + + // C++: Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi = CV_PI*0.5, int ktype = CV_64F) + private static native long getGaborKernel_0(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma, double psi, int ktype); + private static native long getGaborKernel_1(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma); + + // C++: Mat getGaussianKernel(int ksize, double sigma, int ktype = CV_64F) + private static native long getGaussianKernel_0(int ksize, double sigma, int ktype); + private static native long getGaussianKernel_1(int ksize, double sigma); + + // C++: Mat getPerspectiveTransform(Mat src, Mat dst) + private static native long getPerspectiveTransform_0(long src_nativeObj, long dst_nativeObj); + + // C++: void getRectSubPix(Mat image, Size patchSize, Point2f center, Mat& patch, int patchType = -1) + private static native void getRectSubPix_0(long image_nativeObj, double patchSize_width, double patchSize_height, double center_x, double center_y, long patch_nativeObj, int patchType); + private static native void getRectSubPix_1(long image_nativeObj, double patchSize_width, double patchSize_height, double center_x, double center_y, long patch_nativeObj); + + // C++: Mat getRotationMatrix2D(Point2f center, double angle, double scale) + private static native long getRotationMatrix2D_0(double center_x, double center_y, double angle, double scale); + + // C++: Mat getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1)) + private static native long getStructuringElement_0(int shape, double ksize_width, double ksize_height, double anchor_x, double anchor_y); + private static native long getStructuringElement_1(int shape, double ksize_width, double ksize_height); + + // C++: void goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask = Mat(), int blockSize = 3, bool useHarrisDetector = false, double k = 0.04) + private static native void goodFeaturesToTrack_0(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, boolean useHarrisDetector, double k); + private static native void goodFeaturesToTrack_1(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance); + + // C++: void grabCut(Mat img, Mat& mask, Rect rect, Mat& bgdModel, Mat& fgdModel, int iterCount, int mode = GC_EVAL) + private static native void grabCut_0(long img_nativeObj, long mask_nativeObj, int rect_x, int rect_y, int rect_width, int rect_height, long bgdModel_nativeObj, long fgdModel_nativeObj, int iterCount, int mode); + private static native void grabCut_1(long img_nativeObj, long mask_nativeObj, int rect_x, int rect_y, int rect_width, int rect_height, long bgdModel_nativeObj, long fgdModel_nativeObj, int iterCount); + + // C++: void initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2) + private static native void initUndistortRectifyMap_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long newCameraMatrix_nativeObj, double size_width, double size_height, int m1type, long map1_nativeObj, long map2_nativeObj); + + // C++: float initWideAngleProjMap(Mat cameraMatrix, Mat distCoeffs, Size imageSize, int destImageWidth, int m1type, Mat& map1, Mat& map2, int projType = PROJ_SPHERICAL_EQRECT, double alpha = 0) + private static native float initWideAngleProjMap_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, int destImageWidth, int m1type, long map1_nativeObj, long map2_nativeObj, int projType, double alpha); + private static native float initWideAngleProjMap_1(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, int destImageWidth, int m1type, long map1_nativeObj, long map2_nativeObj); + + // C++: void integral(Mat src, Mat& sum, int sdepth = -1) + private static native void integral_0(long src_nativeObj, long sum_nativeObj, int sdepth); + private static native void integral_1(long src_nativeObj, long sum_nativeObj); + + // C++: void integral(Mat src, Mat& sum, Mat& sqsum, int sdepth = -1) + private static native void integral2_0(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, int sdepth); + private static native void integral2_1(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj); + + // C++: void integral(Mat src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth = -1) + private static native void integral3_0(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj, int sdepth); + private static native void integral3_1(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj); + + // C++: float intersectConvexConvex(Mat _p1, Mat _p2, Mat& _p12, bool handleNested = true) + private static native float intersectConvexConvex_0(long _p1_nativeObj, long _p2_nativeObj, long _p12_nativeObj, boolean handleNested); + private static native float intersectConvexConvex_1(long _p1_nativeObj, long _p2_nativeObj, long _p12_nativeObj); + + // C++: void invertAffineTransform(Mat M, Mat& iM) + private static native void invertAffineTransform_0(long M_nativeObj, long iM_nativeObj); + + // C++: bool isContourConvex(vector_Point contour) + private static native boolean isContourConvex_0(long contour_mat_nativeObj); + + // C++: double matchShapes(Mat contour1, Mat contour2, int method, double parameter) + private static native double matchShapes_0(long contour1_nativeObj, long contour2_nativeObj, int method, double parameter); + + // C++: void matchTemplate(Mat image, Mat templ, Mat& result, int method) + private static native void matchTemplate_0(long image_nativeObj, long templ_nativeObj, long result_nativeObj, int method); + + // C++: void medianBlur(Mat src, Mat& dst, int ksize) + private static native void medianBlur_0(long src_nativeObj, long dst_nativeObj, int ksize); + + // C++: RotatedRect minAreaRect(vector_Point2f points) + private static native double[] minAreaRect_0(long points_mat_nativeObj); + + // C++: void minEnclosingCircle(vector_Point2f points, Point2f& center, float& radius) + private static native void minEnclosingCircle_0(long points_mat_nativeObj, double[] center_out, double[] radius_out); + + // C++: Moments moments(Mat array, bool binaryImage = false) + private static native long moments_0(long array_nativeObj, boolean binaryImage); + private static native long moments_1(long array_nativeObj); + + // C++: void morphologyEx(Mat src, Mat& dst, int op, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) + private static native void morphologyEx_0(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); + private static native void morphologyEx_1(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations); + private static native void morphologyEx_2(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj); + + // C++: Point2d phaseCorrelate(Mat src1, Mat src2, Mat window = Mat()) + private static native double[] phaseCorrelate_0(long src1_nativeObj, long src2_nativeObj, long window_nativeObj); + private static native double[] phaseCorrelate_1(long src1_nativeObj, long src2_nativeObj); + + // C++: Point2d phaseCorrelateRes(Mat src1, Mat src2, Mat window, double* response = 0) + private static native double[] phaseCorrelateRes_0(long src1_nativeObj, long src2_nativeObj, long window_nativeObj, double[] response_out); + private static native double[] phaseCorrelateRes_1(long src1_nativeObj, long src2_nativeObj, long window_nativeObj); + + // C++: double pointPolygonTest(vector_Point2f contour, Point2f pt, bool measureDist) + private static native double pointPolygonTest_0(long contour_mat_nativeObj, double pt_x, double pt_y, boolean measureDist); + + // C++: void preCornerDetect(Mat src, Mat& dst, int ksize, int borderType = BORDER_DEFAULT) + private static native void preCornerDetect_0(long src_nativeObj, long dst_nativeObj, int ksize, int borderType); + private static native void preCornerDetect_1(long src_nativeObj, long dst_nativeObj, int ksize); + + // C++: void pyrDown(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT) + private static native void pyrDown_0(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height, int borderType); + private static native void pyrDown_1(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height); + private static native void pyrDown_2(long src_nativeObj, long dst_nativeObj); + + // C++: void pyrMeanShiftFiltering(Mat src, Mat& dst, double sp, double sr, int maxLevel = 1, TermCriteria termcrit = TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS,5,1)) + private static native void pyrMeanShiftFiltering_0(long src_nativeObj, long dst_nativeObj, double sp, double sr, int maxLevel, int termcrit_type, int termcrit_maxCount, double termcrit_epsilon); + private static native void pyrMeanShiftFiltering_1(long src_nativeObj, long dst_nativeObj, double sp, double sr); + + // C++: void pyrUp(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT) + private static native void pyrUp_0(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height, int borderType); + private static native void pyrUp_1(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height); + private static native void pyrUp_2(long src_nativeObj, long dst_nativeObj); + + // C++: void remap(Mat src, Mat& dst, Mat map1, Mat map2, int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) + private static native void remap_0(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); + private static native void remap_1(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation); + + // C++: void resize(Mat src, Mat& dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR) + private static native void resize_0(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double fx, double fy, int interpolation); + private static native void resize_1(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height); + + // C++: void sepFilter2D(Mat src, Mat& dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT) + private static native void sepFilter2D_0(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y, double delta, int borderType); + private static native void sepFilter2D_1(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y, double delta); + private static native void sepFilter2D_2(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj); + + // C++: double threshold(Mat src, Mat& dst, double thresh, double maxval, int type) + private static native double threshold_0(long src_nativeObj, long dst_nativeObj, double thresh, double maxval, int type); + + // C++: void undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat()) + private static native void undistort_0(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long newCameraMatrix_nativeObj); + private static native void undistort_1(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj); + + // C++: void undistortPoints(vector_Point2f src, vector_Point2f& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat()) + private static native void undistortPoints_0(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long P_nativeObj); + private static native void undistortPoints_1(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj); + + // C++: void warpAffine(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) + private static native void warpAffine_0(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); + private static native void warpAffine_1(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags); + private static native void warpAffine_2(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height); + + // C++: void warpPerspective(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) + private static native void warpPerspective_0(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); + private static native void warpPerspective_1(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags); + private static native void warpPerspective_2(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height); + + // C++: void watershed(Mat image, Mat& markers) + private static native void watershed_0(long image_nativeObj, long markers_nativeObj); + +} diff --git a/src/org/opencv/imgproc/Moments.java b/src/org/opencv/imgproc/Moments.java new file mode 100644 index 0000000..41b4326 --- /dev/null +++ b/src/org/opencv/imgproc/Moments.java @@ -0,0 +1,810 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.imgproc; + + + +// C++: class Moments +public class Moments { + + protected final long nativeObj; + protected Moments(long addr) { nativeObj = addr; } + + + // + // C++: Moments::Moments() + // + + public Moments() + { + + nativeObj = Moments_0(); + + return; + } + + + // + // C++: double Moments::m00 + // + + public double get_m00() + { + + double retVal = get_m00_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m00 + // + + public void set_m00(double m00) + { + + set_m00_0(nativeObj, m00); + + return; + } + + + // + // C++: double Moments::m10 + // + + public double get_m10() + { + + double retVal = get_m10_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m10 + // + + public void set_m10(double m10) + { + + set_m10_0(nativeObj, m10); + + return; + } + + + // + // C++: double Moments::m01 + // + + public double get_m01() + { + + double retVal = get_m01_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m01 + // + + public void set_m01(double m01) + { + + set_m01_0(nativeObj, m01); + + return; + } + + + // + // C++: double Moments::m20 + // + + public double get_m20() + { + + double retVal = get_m20_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m20 + // + + public void set_m20(double m20) + { + + set_m20_0(nativeObj, m20); + + return; + } + + + // + // C++: double Moments::m11 + // + + public double get_m11() + { + + double retVal = get_m11_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m11 + // + + public void set_m11(double m11) + { + + set_m11_0(nativeObj, m11); + + return; + } + + + // + // C++: double Moments::m02 + // + + public double get_m02() + { + + double retVal = get_m02_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m02 + // + + public void set_m02(double m02) + { + + set_m02_0(nativeObj, m02); + + return; + } + + + // + // C++: double Moments::m30 + // + + public double get_m30() + { + + double retVal = get_m30_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m30 + // + + public void set_m30(double m30) + { + + set_m30_0(nativeObj, m30); + + return; + } + + + // + // C++: double Moments::m21 + // + + public double get_m21() + { + + double retVal = get_m21_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m21 + // + + public void set_m21(double m21) + { + + set_m21_0(nativeObj, m21); + + return; + } + + + // + // C++: double Moments::m12 + // + + public double get_m12() + { + + double retVal = get_m12_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m12 + // + + public void set_m12(double m12) + { + + set_m12_0(nativeObj, m12); + + return; + } + + + // + // C++: double Moments::m03 + // + + public double get_m03() + { + + double retVal = get_m03_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m03 + // + + public void set_m03(double m03) + { + + set_m03_0(nativeObj, m03); + + return; + } + + + // + // C++: double Moments::mu20 + // + + public double get_mu20() + { + + double retVal = get_mu20_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu20 + // + + public void set_mu20(double mu20) + { + + set_mu20_0(nativeObj, mu20); + + return; + } + + + // + // C++: double Moments::mu11 + // + + public double get_mu11() + { + + double retVal = get_mu11_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu11 + // + + public void set_mu11(double mu11) + { + + set_mu11_0(nativeObj, mu11); + + return; + } + + + // + // C++: double Moments::mu02 + // + + public double get_mu02() + { + + double retVal = get_mu02_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu02 + // + + public void set_mu02(double mu02) + { + + set_mu02_0(nativeObj, mu02); + + return; + } + + + // + // C++: double Moments::mu30 + // + + public double get_mu30() + { + + double retVal = get_mu30_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu30 + // + + public void set_mu30(double mu30) + { + + set_mu30_0(nativeObj, mu30); + + return; + } + + + // + // C++: double Moments::mu21 + // + + public double get_mu21() + { + + double retVal = get_mu21_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu21 + // + + public void set_mu21(double mu21) + { + + set_mu21_0(nativeObj, mu21); + + return; + } + + + // + // C++: double Moments::mu12 + // + + public double get_mu12() + { + + double retVal = get_mu12_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu12 + // + + public void set_mu12(double mu12) + { + + set_mu12_0(nativeObj, mu12); + + return; + } + + + // + // C++: double Moments::mu03 + // + + public double get_mu03() + { + + double retVal = get_mu03_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu03 + // + + public void set_mu03(double mu03) + { + + set_mu03_0(nativeObj, mu03); + + return; + } + + + // + // C++: double Moments::nu20 + // + + public double get_nu20() + { + + double retVal = get_nu20_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu20 + // + + public void set_nu20(double nu20) + { + + set_nu20_0(nativeObj, nu20); + + return; + } + + + // + // C++: double Moments::nu11 + // + + public double get_nu11() + { + + double retVal = get_nu11_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu11 + // + + public void set_nu11(double nu11) + { + + set_nu11_0(nativeObj, nu11); + + return; + } + + + // + // C++: double Moments::nu02 + // + + public double get_nu02() + { + + double retVal = get_nu02_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu02 + // + + public void set_nu02(double nu02) + { + + set_nu02_0(nativeObj, nu02); + + return; + } + + + // + // C++: double Moments::nu30 + // + + public double get_nu30() + { + + double retVal = get_nu30_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu30 + // + + public void set_nu30(double nu30) + { + + set_nu30_0(nativeObj, nu30); + + return; + } + + + // + // C++: double Moments::nu21 + // + + public double get_nu21() + { + + double retVal = get_nu21_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu21 + // + + public void set_nu21(double nu21) + { + + set_nu21_0(nativeObj, nu21); + + return; + } + + + // + // C++: double Moments::nu12 + // + + public double get_nu12() + { + + double retVal = get_nu12_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu12 + // + + public void set_nu12(double nu12) + { + + set_nu12_0(nativeObj, nu12); + + return; + } + + + // + // C++: double Moments::nu03 + // + + public double get_nu03() + { + + double retVal = get_nu03_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu03 + // + + public void set_nu03(double nu03) + { + + set_nu03_0(nativeObj, nu03); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: Moments::Moments() + private static native long Moments_0(); + + // C++: double Moments::m00 + private static native double get_m00_0(long nativeObj); + + // C++: void Moments::m00 + private static native void set_m00_0(long nativeObj, double m00); + + // C++: double Moments::m10 + private static native double get_m10_0(long nativeObj); + + // C++: void Moments::m10 + private static native void set_m10_0(long nativeObj, double m10); + + // C++: double Moments::m01 + private static native double get_m01_0(long nativeObj); + + // C++: void Moments::m01 + private static native void set_m01_0(long nativeObj, double m01); + + // C++: double Moments::m20 + private static native double get_m20_0(long nativeObj); + + // C++: void Moments::m20 + private static native void set_m20_0(long nativeObj, double m20); + + // C++: double Moments::m11 + private static native double get_m11_0(long nativeObj); + + // C++: void Moments::m11 + private static native void set_m11_0(long nativeObj, double m11); + + // C++: double Moments::m02 + private static native double get_m02_0(long nativeObj); + + // C++: void Moments::m02 + private static native void set_m02_0(long nativeObj, double m02); + + // C++: double Moments::m30 + private static native double get_m30_0(long nativeObj); + + // C++: void Moments::m30 + private static native void set_m30_0(long nativeObj, double m30); + + // C++: double Moments::m21 + private static native double get_m21_0(long nativeObj); + + // C++: void Moments::m21 + private static native void set_m21_0(long nativeObj, double m21); + + // C++: double Moments::m12 + private static native double get_m12_0(long nativeObj); + + // C++: void Moments::m12 + private static native void set_m12_0(long nativeObj, double m12); + + // C++: double Moments::m03 + private static native double get_m03_0(long nativeObj); + + // C++: void Moments::m03 + private static native void set_m03_0(long nativeObj, double m03); + + // C++: double Moments::mu20 + private static native double get_mu20_0(long nativeObj); + + // C++: void Moments::mu20 + private static native void set_mu20_0(long nativeObj, double mu20); + + // C++: double Moments::mu11 + private static native double get_mu11_0(long nativeObj); + + // C++: void Moments::mu11 + private static native void set_mu11_0(long nativeObj, double mu11); + + // C++: double Moments::mu02 + private static native double get_mu02_0(long nativeObj); + + // C++: void Moments::mu02 + private static native void set_mu02_0(long nativeObj, double mu02); + + // C++: double Moments::mu30 + private static native double get_mu30_0(long nativeObj); + + // C++: void Moments::mu30 + private static native void set_mu30_0(long nativeObj, double mu30); + + // C++: double Moments::mu21 + private static native double get_mu21_0(long nativeObj); + + // C++: void Moments::mu21 + private static native void set_mu21_0(long nativeObj, double mu21); + + // C++: double Moments::mu12 + private static native double get_mu12_0(long nativeObj); + + // C++: void Moments::mu12 + private static native void set_mu12_0(long nativeObj, double mu12); + + // C++: double Moments::mu03 + private static native double get_mu03_0(long nativeObj); + + // C++: void Moments::mu03 + private static native void set_mu03_0(long nativeObj, double mu03); + + // C++: double Moments::nu20 + private static native double get_nu20_0(long nativeObj); + + // C++: void Moments::nu20 + private static native void set_nu20_0(long nativeObj, double nu20); + + // C++: double Moments::nu11 + private static native double get_nu11_0(long nativeObj); + + // C++: void Moments::nu11 + private static native void set_nu11_0(long nativeObj, double nu11); + + // C++: double Moments::nu02 + private static native double get_nu02_0(long nativeObj); + + // C++: void Moments::nu02 + private static native void set_nu02_0(long nativeObj, double nu02); + + // C++: double Moments::nu30 + private static native double get_nu30_0(long nativeObj); + + // C++: void Moments::nu30 + private static native void set_nu30_0(long nativeObj, double nu30); + + // C++: double Moments::nu21 + private static native double get_nu21_0(long nativeObj); + + // C++: void Moments::nu21 + private static native void set_nu21_0(long nativeObj, double nu21); + + // C++: double Moments::nu12 + private static native double get_nu12_0(long nativeObj); + + // C++: void Moments::nu12 + private static native void set_nu12_0(long nativeObj, double nu12); + + // C++: double Moments::nu03 + private static native double get_nu03_0(long nativeObj); + + // C++: void Moments::nu03 + private static native void set_nu03_0(long nativeObj, double nu03); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/imgproc/Subdiv2D.java b/src/org/opencv/imgproc/Subdiv2D.java new file mode 100644 index 0000000..fbc9230 --- /dev/null +++ b/src/org/opencv/imgproc/Subdiv2D.java @@ -0,0 +1,362 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.imgproc; + +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfFloat4; +import org.opencv.core.MatOfFloat6; +import org.opencv.core.MatOfInt; +import org.opencv.core.MatOfPoint2f; +import org.opencv.core.Point; +import org.opencv.core.Rect; +import org.opencv.utils.Converters; + +// C++: class Subdiv2D +public class Subdiv2D { + + protected final long nativeObj; + protected Subdiv2D(long addr) { nativeObj = addr; } + + + public static final int + PTLOC_ERROR = -2, + PTLOC_OUTSIDE_RECT = -1, + PTLOC_INSIDE = 0, + PTLOC_VERTEX = 1, + PTLOC_ON_EDGE = 2, + NEXT_AROUND_ORG = 0x00, + NEXT_AROUND_DST = 0x22, + PREV_AROUND_ORG = 0x11, + PREV_AROUND_DST = 0x33, + NEXT_AROUND_LEFT = 0x13, + NEXT_AROUND_RIGHT = 0x31, + PREV_AROUND_LEFT = 0x20, + PREV_AROUND_RIGHT = 0x02; + + + // + // C++: Subdiv2D::Subdiv2D() + // + + public Subdiv2D() + { + + nativeObj = Subdiv2D_0(); + + return; + } + + + // + // C++: Subdiv2D::Subdiv2D(Rect rect) + // + + public Subdiv2D(Rect rect) + { + + nativeObj = Subdiv2D_1(rect.x, rect.y, rect.width, rect.height); + + return; + } + + + // + // C++: int Subdiv2D::edgeDst(int edge, Point2f* dstpt = 0) + // + + public int edgeDst(int edge, Point dstpt) + { + double[] dstpt_out = new double[2]; + int retVal = edgeDst_0(nativeObj, edge, dstpt_out); + if(dstpt!=null){ dstpt.x = dstpt_out[0]; dstpt.y = dstpt_out[1]; } + return retVal; + } + + public int edgeDst(int edge) + { + + int retVal = edgeDst_1(nativeObj, edge); + + return retVal; + } + + + // + // C++: int Subdiv2D::edgeOrg(int edge, Point2f* orgpt = 0) + // + + public int edgeOrg(int edge, Point orgpt) + { + double[] orgpt_out = new double[2]; + int retVal = edgeOrg_0(nativeObj, edge, orgpt_out); + if(orgpt!=null){ orgpt.x = orgpt_out[0]; orgpt.y = orgpt_out[1]; } + return retVal; + } + + public int edgeOrg(int edge) + { + + int retVal = edgeOrg_1(nativeObj, edge); + + return retVal; + } + + + // + // C++: int Subdiv2D::findNearest(Point2f pt, Point2f* nearestPt = 0) + // + + public int findNearest(Point pt, Point nearestPt) + { + double[] nearestPt_out = new double[2]; + int retVal = findNearest_0(nativeObj, pt.x, pt.y, nearestPt_out); + if(nearestPt!=null){ nearestPt.x = nearestPt_out[0]; nearestPt.y = nearestPt_out[1]; } + return retVal; + } + + public int findNearest(Point pt) + { + + int retVal = findNearest_1(nativeObj, pt.x, pt.y); + + return retVal; + } + + + // + // C++: int Subdiv2D::getEdge(int edge, int nextEdgeType) + // + + public int getEdge(int edge, int nextEdgeType) + { + + int retVal = getEdge_0(nativeObj, edge, nextEdgeType); + + return retVal; + } + + + // + // C++: void Subdiv2D::getEdgeList(vector_Vec4f& edgeList) + // + + public void getEdgeList(MatOfFloat4 edgeList) + { + Mat edgeList_mat = edgeList; + getEdgeList_0(nativeObj, edgeList_mat.nativeObj); + + return; + } + + + // + // C++: void Subdiv2D::getTriangleList(vector_Vec6f& triangleList) + // + + public void getTriangleList(MatOfFloat6 triangleList) + { + Mat triangleList_mat = triangleList; + getTriangleList_0(nativeObj, triangleList_mat.nativeObj); + + return; + } + + + // + // C++: Point2f Subdiv2D::getVertex(int vertex, int* firstEdge = 0) + // + + public Point getVertex(int vertex, int[] firstEdge) + { + double[] firstEdge_out = new double[1]; + Point retVal = new Point(getVertex_0(nativeObj, vertex, firstEdge_out)); + if(firstEdge!=null) firstEdge[0] = (int)firstEdge_out[0]; + return retVal; + } + + public Point getVertex(int vertex) + { + + Point retVal = new Point(getVertex_1(nativeObj, vertex)); + + return retVal; + } + + + // + // C++: void Subdiv2D::getVoronoiFacetList(vector_int idx, vector_vector_Point2f& facetList, vector_Point2f& facetCenters) + // + + public void getVoronoiFacetList(MatOfInt idx, List facetList, MatOfPoint2f facetCenters) + { + Mat idx_mat = idx; + Mat facetList_mat = new Mat(); + Mat facetCenters_mat = facetCenters; + getVoronoiFacetList_0(nativeObj, idx_mat.nativeObj, facetList_mat.nativeObj, facetCenters_mat.nativeObj); + Converters.Mat_to_vector_vector_Point2f(facetList_mat, facetList); + return; + } + + + // + // C++: void Subdiv2D::initDelaunay(Rect rect) + // + + public void initDelaunay(Rect rect) + { + + initDelaunay_0(nativeObj, rect.x, rect.y, rect.width, rect.height); + + return; + } + + + // + // C++: int Subdiv2D::insert(Point2f pt) + // + + public int insert(Point pt) + { + + int retVal = insert_0(nativeObj, pt.x, pt.y); + + return retVal; + } + + + // + // C++: void Subdiv2D::insert(vector_Point2f ptvec) + // + + public void insert(MatOfPoint2f ptvec) + { + Mat ptvec_mat = ptvec; + insert_1(nativeObj, ptvec_mat.nativeObj); + + return; + } + + + // + // C++: int Subdiv2D::locate(Point2f pt, int& edge, int& vertex) + // + + public int locate(Point pt, int[] edge, int[] vertex) + { + double[] edge_out = new double[1]; + double[] vertex_out = new double[1]; + int retVal = locate_0(nativeObj, pt.x, pt.y, edge_out, vertex_out); + if(edge!=null) edge[0] = (int)edge_out[0]; + if(vertex!=null) vertex[0] = (int)vertex_out[0]; + return retVal; + } + + + // + // C++: int Subdiv2D::nextEdge(int edge) + // + + public int nextEdge(int edge) + { + + int retVal = nextEdge_0(nativeObj, edge); + + return retVal; + } + + + // + // C++: int Subdiv2D::rotateEdge(int edge, int rotate) + // + + public int rotateEdge(int edge, int rotate) + { + + int retVal = rotateEdge_0(nativeObj, edge, rotate); + + return retVal; + } + + + // + // C++: int Subdiv2D::symEdge(int edge) + // + + public int symEdge(int edge) + { + + int retVal = symEdge_0(nativeObj, edge); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: Subdiv2D::Subdiv2D() + private static native long Subdiv2D_0(); + + // C++: Subdiv2D::Subdiv2D(Rect rect) + private static native long Subdiv2D_1(int rect_x, int rect_y, int rect_width, int rect_height); + + // C++: int Subdiv2D::edgeDst(int edge, Point2f* dstpt = 0) + private static native int edgeDst_0(long nativeObj, int edge, double[] dstpt_out); + private static native int edgeDst_1(long nativeObj, int edge); + + // C++: int Subdiv2D::edgeOrg(int edge, Point2f* orgpt = 0) + private static native int edgeOrg_0(long nativeObj, int edge, double[] orgpt_out); + private static native int edgeOrg_1(long nativeObj, int edge); + + // C++: int Subdiv2D::findNearest(Point2f pt, Point2f* nearestPt = 0) + private static native int findNearest_0(long nativeObj, double pt_x, double pt_y, double[] nearestPt_out); + private static native int findNearest_1(long nativeObj, double pt_x, double pt_y); + + // C++: int Subdiv2D::getEdge(int edge, int nextEdgeType) + private static native int getEdge_0(long nativeObj, int edge, int nextEdgeType); + + // C++: void Subdiv2D::getEdgeList(vector_Vec4f& edgeList) + private static native void getEdgeList_0(long nativeObj, long edgeList_mat_nativeObj); + + // C++: void Subdiv2D::getTriangleList(vector_Vec6f& triangleList) + private static native void getTriangleList_0(long nativeObj, long triangleList_mat_nativeObj); + + // C++: Point2f Subdiv2D::getVertex(int vertex, int* firstEdge = 0) + private static native double[] getVertex_0(long nativeObj, int vertex, double[] firstEdge_out); + private static native double[] getVertex_1(long nativeObj, int vertex); + + // C++: void Subdiv2D::getVoronoiFacetList(vector_int idx, vector_vector_Point2f& facetList, vector_Point2f& facetCenters) + private static native void getVoronoiFacetList_0(long nativeObj, long idx_mat_nativeObj, long facetList_mat_nativeObj, long facetCenters_mat_nativeObj); + + // C++: void Subdiv2D::initDelaunay(Rect rect) + private static native void initDelaunay_0(long nativeObj, int rect_x, int rect_y, int rect_width, int rect_height); + + // C++: int Subdiv2D::insert(Point2f pt) + private static native int insert_0(long nativeObj, double pt_x, double pt_y); + + // C++: void Subdiv2D::insert(vector_Point2f ptvec) + private static native void insert_1(long nativeObj, long ptvec_mat_nativeObj); + + // C++: int Subdiv2D::locate(Point2f pt, int& edge, int& vertex) + private static native int locate_0(long nativeObj, double pt_x, double pt_y, double[] edge_out, double[] vertex_out); + + // C++: int Subdiv2D::nextEdge(int edge) + private static native int nextEdge_0(long nativeObj, int edge); + + // C++: int Subdiv2D::rotateEdge(int edge, int rotate) + private static native int rotateEdge_0(long nativeObj, int edge, int rotate); + + // C++: int Subdiv2D::symEdge(int edge) + private static native int symEdge_0(long nativeObj, int edge); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/imgproc/package.bluej b/src/org/opencv/imgproc/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/ml/CvANN_MLP.java b/src/org/opencv/ml/CvANN_MLP.java new file mode 100644 index 0000000..de411ed --- /dev/null +++ b/src/org/opencv/ml/CvANN_MLP.java @@ -0,0 +1,297 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvANN_MLP +/** + *

MLP model.

+ * + *

Unlike many other models in ML that are constructed and trained at once, in + * the MLP model these steps are separated. First, a network with the specified + * topology is created using the non-default constructor or the method + * "CvANN_MLP.create". All the weights are set to zeros. Then, the network is + * trained using a set of input and output vectors. The training procedure can + * be repeated more than once, that is, the weights can be adjusted based on the + * new training data.

+ * + * @see org.opencv.ml.CvANN_MLP : public CvStatModel + */ +public class CvANN_MLP extends CvStatModel { + + protected CvANN_MLP(long addr) { super(addr); } + + + public static final int + IDENTITY = 0, + SIGMOID_SYM = 1, + GAUSSIAN = 2, + UPDATE_WEIGHTS = 1, + NO_INPUT_SCALE = 2, + NO_OUTPUT_SCALE = 4; + + + // + // C++: CvANN_MLP::CvANN_MLP() + // + +/** + *

The constructors.

+ * + *

The advanced constructor allows to create MLP with the specified topology. + * See "CvANN_MLP.create" for details.

+ * + * @see org.opencv.ml.CvANN_MLP.CvANN_MLP + */ + public CvANN_MLP() + { + + super( CvANN_MLP_0() ); + + return; + } + + + // + // C++: CvANN_MLP::CvANN_MLP(Mat layerSizes, int activateFunc = CvANN_MLP::SIGMOID_SYM, double fparam1 = 0, double fparam2 = 0) + // + +/** + *

The constructors.

+ * + *

The advanced constructor allows to create MLP with the specified topology. + * See "CvANN_MLP.create" for details.

+ * + * @param layerSizes a layerSizes + * @param activateFunc a activateFunc + * @param fparam1 a fparam1 + * @param fparam2 a fparam2 + * + * @see org.opencv.ml.CvANN_MLP.CvANN_MLP + */ + public CvANN_MLP(Mat layerSizes, int activateFunc, double fparam1, double fparam2) + { + + super( CvANN_MLP_1(layerSizes.nativeObj, activateFunc, fparam1, fparam2) ); + + return; + } + +/** + *

The constructors.

+ * + *

The advanced constructor allows to create MLP with the specified topology. + * See "CvANN_MLP.create" for details.

+ * + * @param layerSizes a layerSizes + * + * @see org.opencv.ml.CvANN_MLP.CvANN_MLP + */ + public CvANN_MLP(Mat layerSizes) + { + + super( CvANN_MLP_2(layerSizes.nativeObj) ); + + return; + } + + + // + // C++: void CvANN_MLP::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: void CvANN_MLP::create(Mat layerSizes, int activateFunc = CvANN_MLP::SIGMOID_SYM, double fparam1 = 0, double fparam2 = 0) + // + +/** + *

Constructs MLP with the specified topology.

+ * + *

The method creates an MLP network with the specified topology and assigns the + * same activation function to all the neurons.

+ * + * @param layerSizes Integer vector specifying the number of neurons in each + * layer including the input and output layers. + * @param activateFunc Parameter specifying the activation function for each + * neuron: one of CvANN_MLP.IDENTITY, CvANN_MLP.SIGMOID_SYM, + * and CvANN_MLP.GAUSSIAN. + * @param fparam1 Free parameter of the activation function, alpha. See + * the formulas in the introduction section. + * @param fparam2 Free parameter of the activation function, beta. See + * the formulas in the introduction section. + * + * @see org.opencv.ml.CvANN_MLP.create + */ + public void create(Mat layerSizes, int activateFunc, double fparam1, double fparam2) + { + + create_0(nativeObj, layerSizes.nativeObj, activateFunc, fparam1, fparam2); + + return; + } + +/** + *

Constructs MLP with the specified topology.

+ * + *

The method creates an MLP network with the specified topology and assigns the + * same activation function to all the neurons.

+ * + * @param layerSizes Integer vector specifying the number of neurons in each + * layer including the input and output layers. + * + * @see org.opencv.ml.CvANN_MLP.create + */ + public void create(Mat layerSizes) + { + + create_1(nativeObj, layerSizes.nativeObj); + + return; + } + + + // + // C++: float CvANN_MLP::predict(Mat inputs, Mat& outputs) + // + +/** + *

Predicts responses for input samples.

+ * + *

The method returns a dummy value which should be ignored.

+ * + * @param inputs Input samples. + * @param outputs Predicted responses for corresponding samples. + * + * @see org.opencv.ml.CvANN_MLP.predict + */ + public float predict(Mat inputs, Mat outputs) + { + + float retVal = predict_0(nativeObj, inputs.nativeObj, outputs.nativeObj); + + return retVal; + } + + + // + // C++: int CvANN_MLP::train(Mat inputs, Mat outputs, Mat sampleWeights, Mat sampleIdx = cv::Mat(), CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), int flags = 0) + // + +/** + *

Trains/updates MLP.

+ * + *

This method applies the specified training algorithm to computing/adjusting + * the network weights. It returns the number of done iterations.

+ * + *

The RPROP training algorithm is parallelized with the TBB library.

+ * + * @param inputs Floating-point matrix of input vectors, one vector per row. + * @param outputs Floating-point matrix of the corresponding output vectors, one + * vector per row. + * @param sampleWeights (RPROP only) Optional floating-point vector of weights + * for each sample. Some samples may be more important than others for training. + * You may want to raise the weight of certain classes to find the right balance + * between hit-rate and false-alarm rate, and so on. + * @param sampleIdx Optional integer vector indicating the samples (rows of + * inputs and outputs) that are taken into account. + * @param params Training parameters. See the "CvANN_MLP_TrainParams" + * description. + * @param flags Various parameters to control the training algorithm. A + * combination of the following parameters is possible: + *
    + *
  • UPDATE_WEIGHTS Algorithm updates the network weights, rather than + * computes them from scratch. In the latter case the weights are initialized + * using the Nguyen-Widrow algorithm. + *
  • NO_INPUT_SCALE Algorithm does not normalize the input vectors. If this + * flag is not set, the training algorithm normalizes each input feature + * independently, shifting its mean value to 0 and making the standard deviation + * equal to 1. If the network is assumed to be updated frequently, the new + * training data could be much different from original one. In this case, you + * should take care of proper normalization. + *
  • NO_OUTPUT_SCALE Algorithm does not normalize the output vectors. If + * the flag is not set, the training algorithm normalizes each output feature + * independently, by transforming it to the certain range depending on the used + * activation function. + *
+ * + * @see org.opencv.ml.CvANN_MLP.train + */ + public int train(Mat inputs, Mat outputs, Mat sampleWeights, Mat sampleIdx, CvANN_MLP_TrainParams params, int flags) + { + + int retVal = train_0(nativeObj, inputs.nativeObj, outputs.nativeObj, sampleWeights.nativeObj, sampleIdx.nativeObj, params.nativeObj, flags); + + return retVal; + } + +/** + *

Trains/updates MLP.

+ * + *

This method applies the specified training algorithm to computing/adjusting + * the network weights. It returns the number of done iterations.

+ * + *

The RPROP training algorithm is parallelized with the TBB library.

+ * + * @param inputs Floating-point matrix of input vectors, one vector per row. + * @param outputs Floating-point matrix of the corresponding output vectors, one + * vector per row. + * @param sampleWeights (RPROP only) Optional floating-point vector of weights + * for each sample. Some samples may be more important than others for training. + * You may want to raise the weight of certain classes to find the right balance + * between hit-rate and false-alarm rate, and so on. + * + * @see org.opencv.ml.CvANN_MLP.train + */ + public int train(Mat inputs, Mat outputs, Mat sampleWeights) + { + + int retVal = train_1(nativeObj, inputs.nativeObj, outputs.nativeObj, sampleWeights.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvANN_MLP::CvANN_MLP() + private static native long CvANN_MLP_0(); + + // C++: CvANN_MLP::CvANN_MLP(Mat layerSizes, int activateFunc = CvANN_MLP::SIGMOID_SYM, double fparam1 = 0, double fparam2 = 0) + private static native long CvANN_MLP_1(long layerSizes_nativeObj, int activateFunc, double fparam1, double fparam2); + private static native long CvANN_MLP_2(long layerSizes_nativeObj); + + // C++: void CvANN_MLP::clear() + private static native void clear_0(long nativeObj); + + // C++: void CvANN_MLP::create(Mat layerSizes, int activateFunc = CvANN_MLP::SIGMOID_SYM, double fparam1 = 0, double fparam2 = 0) + private static native void create_0(long nativeObj, long layerSizes_nativeObj, int activateFunc, double fparam1, double fparam2); + private static native void create_1(long nativeObj, long layerSizes_nativeObj); + + // C++: float CvANN_MLP::predict(Mat inputs, Mat& outputs) + private static native float predict_0(long nativeObj, long inputs_nativeObj, long outputs_nativeObj); + + // C++: int CvANN_MLP::train(Mat inputs, Mat outputs, Mat sampleWeights, Mat sampleIdx = cv::Mat(), CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), int flags = 0) + private static native int train_0(long nativeObj, long inputs_nativeObj, long outputs_nativeObj, long sampleWeights_nativeObj, long sampleIdx_nativeObj, long params_nativeObj, int flags); + private static native int train_1(long nativeObj, long inputs_nativeObj, long outputs_nativeObj, long sampleWeights_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvANN_MLP_TrainParams.java b/src/org/opencv/ml/CvANN_MLP_TrainParams.java new file mode 100644 index 0000000..92c3cc1 --- /dev/null +++ b/src/org/opencv/ml/CvANN_MLP_TrainParams.java @@ -0,0 +1,390 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.TermCriteria; + +// C++: class CvANN_MLP_TrainParams +/** + *

Parameters of the MLP training algorithm. You can initialize the structure by + * a constructor or the individual parameters can be adjusted after the + * structure is created.

+ * + *

The back-propagation algorithm parameters:

+ * + *

Strength of the weight gradient term. The recommended value is about 0.1.

+ * + *

Strength of the momentum term (the difference between weights on the 2 + * previous iterations). This parameter provides some inertia to smooth the + * random fluctuations of the weights. It can vary from 0 (the feature is + * disabled) to 1 and beyond. The value 0.1 or so is good enough

+ * + *

// C++ code:

+ * + *

The RPROP algorithm parameters (see [RPROP93] for details):

+ * + *

Initial value Delta_0 of update-values Delta_(ij).

+ * + *

Increase factor eta^+. It must be >1.

+ * + *

Decrease factor eta^-. It must be <1.

+ * + *

Update-values lower limit Delta_(min). It must be positive.

+ * + *

Update-values upper limit Delta_(max). It must be >1.

+ * + * @see org.opencv.ml.CvANN_MLP_TrainParams + */ +public class CvANN_MLP_TrainParams { + + protected final long nativeObj; + protected CvANN_MLP_TrainParams(long addr) { nativeObj = addr; } + + + public static final int + BACKPROP = 0, + RPROP = 1; + + + // + // C++: CvANN_MLP_TrainParams::CvANN_MLP_TrainParams() + // + +/** + *

The constructors.

+ * + *

By default the RPROP algorithm is used:

+ * + *

+ * + *

// C++ code:

+ * + *

CvANN_MLP_TrainParams.CvANN_MLP_TrainParams()

+ * + * + *

term_crit = cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 1000, 0.01);

+ * + *

train_method = RPROP;

+ * + *

bp_dw_scale = bp_moment_scale = 0.1;

+ * + *

rp_dw0 = 0.1; rp_dw_plus = 1.2; rp_dw_minus = 0.5;

+ * + *

rp_dw_min = FLT_EPSILON; rp_dw_max = 50.;

+ * + * + * @see org.opencv.ml.CvANN_MLP_TrainParams.CvANN_MLP_TrainParams + */ + public CvANN_MLP_TrainParams() + { + + nativeObj = CvANN_MLP_TrainParams_0(); + + return; + } + + + // + // C++: TermCriteria CvANN_MLP_TrainParams::term_crit + // + + public TermCriteria get_term_crit() + { + + TermCriteria retVal = new TermCriteria(get_term_crit_0(nativeObj)); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::term_crit + // + + public void set_term_crit(TermCriteria term_crit) + { + + set_term_crit_0(nativeObj, term_crit.type, term_crit.maxCount, term_crit.epsilon); + + return; + } + + + // + // C++: int CvANN_MLP_TrainParams::train_method + // + + public int get_train_method() + { + + int retVal = get_train_method_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::train_method + // + + public void set_train_method(int train_method) + { + + set_train_method_0(nativeObj, train_method); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::bp_dw_scale + // + + public double get_bp_dw_scale() + { + + double retVal = get_bp_dw_scale_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::bp_dw_scale + // + + public void set_bp_dw_scale(double bp_dw_scale) + { + + set_bp_dw_scale_0(nativeObj, bp_dw_scale); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::bp_moment_scale + // + + public double get_bp_moment_scale() + { + + double retVal = get_bp_moment_scale_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::bp_moment_scale + // + + public void set_bp_moment_scale(double bp_moment_scale) + { + + set_bp_moment_scale_0(nativeObj, bp_moment_scale); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::rp_dw0 + // + + public double get_rp_dw0() + { + + double retVal = get_rp_dw0_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::rp_dw0 + // + + public void set_rp_dw0(double rp_dw0) + { + + set_rp_dw0_0(nativeObj, rp_dw0); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::rp_dw_plus + // + + public double get_rp_dw_plus() + { + + double retVal = get_rp_dw_plus_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::rp_dw_plus + // + + public void set_rp_dw_plus(double rp_dw_plus) + { + + set_rp_dw_plus_0(nativeObj, rp_dw_plus); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::rp_dw_minus + // + + public double get_rp_dw_minus() + { + + double retVal = get_rp_dw_minus_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::rp_dw_minus + // + + public void set_rp_dw_minus(double rp_dw_minus) + { + + set_rp_dw_minus_0(nativeObj, rp_dw_minus); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::rp_dw_min + // + + public double get_rp_dw_min() + { + + double retVal = get_rp_dw_min_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::rp_dw_min + // + + public void set_rp_dw_min(double rp_dw_min) + { + + set_rp_dw_min_0(nativeObj, rp_dw_min); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::rp_dw_max + // + + public double get_rp_dw_max() + { + + double retVal = get_rp_dw_max_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::rp_dw_max + // + + public void set_rp_dw_max(double rp_dw_max) + { + + set_rp_dw_max_0(nativeObj, rp_dw_max); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvANN_MLP_TrainParams::CvANN_MLP_TrainParams() + private static native long CvANN_MLP_TrainParams_0(); + + // C++: TermCriteria CvANN_MLP_TrainParams::term_crit + private static native double[] get_term_crit_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::term_crit + private static native void set_term_crit_0(long nativeObj, int term_crit_type, int term_crit_maxCount, double term_crit_epsilon); + + // C++: int CvANN_MLP_TrainParams::train_method + private static native int get_train_method_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::train_method + private static native void set_train_method_0(long nativeObj, int train_method); + + // C++: double CvANN_MLP_TrainParams::bp_dw_scale + private static native double get_bp_dw_scale_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::bp_dw_scale + private static native void set_bp_dw_scale_0(long nativeObj, double bp_dw_scale); + + // C++: double CvANN_MLP_TrainParams::bp_moment_scale + private static native double get_bp_moment_scale_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::bp_moment_scale + private static native void set_bp_moment_scale_0(long nativeObj, double bp_moment_scale); + + // C++: double CvANN_MLP_TrainParams::rp_dw0 + private static native double get_rp_dw0_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::rp_dw0 + private static native void set_rp_dw0_0(long nativeObj, double rp_dw0); + + // C++: double CvANN_MLP_TrainParams::rp_dw_plus + private static native double get_rp_dw_plus_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::rp_dw_plus + private static native void set_rp_dw_plus_0(long nativeObj, double rp_dw_plus); + + // C++: double CvANN_MLP_TrainParams::rp_dw_minus + private static native double get_rp_dw_minus_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::rp_dw_minus + private static native void set_rp_dw_minus_0(long nativeObj, double rp_dw_minus); + + // C++: double CvANN_MLP_TrainParams::rp_dw_min + private static native double get_rp_dw_min_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::rp_dw_min + private static native void set_rp_dw_min_0(long nativeObj, double rp_dw_min); + + // C++: double CvANN_MLP_TrainParams::rp_dw_max + private static native double get_rp_dw_max_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::rp_dw_max + private static native void set_rp_dw_max_0(long nativeObj, double rp_dw_max); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvBoost.java b/src/org/opencv/ml/CvBoost.java new file mode 100644 index 0000000..96942fa --- /dev/null +++ b/src/org/opencv/ml/CvBoost.java @@ -0,0 +1,278 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; +import org.opencv.core.Range; + +// C++: class CvBoost +/** + *

Boosted tree classifier derived from "CvStatModel".

+ * + * @see org.opencv.ml.CvBoost : public CvStatModel + */ +public class CvBoost extends CvStatModel { + + protected CvBoost(long addr) { super(addr); } + + + public static final int + DISCRETE = 0, + REAL = 1, + LOGIT = 2, + GENTLE = 3, + DEFAULT = 0, + GINI = 1, + MISCLASS = 3, + SQERR = 4; + + + // + // C++: CvBoost::CvBoost() + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @see org.opencv.ml.CvBoost.CvBoost + */ + public CvBoost() + { + + super( CvBoost_0() ); + + return; + } + + + // + // C++: CvBoost::CvBoost(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvBoostParams params = CvBoostParams()) + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param varType a varType + * @param missingDataMask a missingDataMask + * @param params a params + * + * @see org.opencv.ml.CvBoost.CvBoost + */ + public CvBoost(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvBoostParams params) + { + + super( CvBoost_1(trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj) ); + + return; + } + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * + * @see org.opencv.ml.CvBoost.CvBoost + */ + public CvBoost(Mat trainData, int tflag, Mat responses) + { + + super( CvBoost_2(trainData.nativeObj, tflag, responses.nativeObj) ); + + return; + } + + + // + // C++: void CvBoost::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: float CvBoost::predict(Mat sample, Mat missing = cv::Mat(), Range slice = cv::Range::all(), bool rawMode = false, bool returnSum = false) + // + +/** + *

Predicts a response for an input sample.

+ * + *

The method runs the sample through the trees in the ensemble and returns the + * output class label based on the weighted voting.

+ * + * @param sample Input sample. + * @param missing Optional mask of missing measurements. To handle missing + * measurements, the weak classifiers must include surrogate splits (see + * CvDTreeParams.use_surrogates). + * @param slice Continuous subset of the sequence of weak classifiers to be used + * for prediction. By default, all the weak classifiers are used. + * @param rawMode Normally, it should be set to false. + * @param returnSum If true then return sum of votes instead of the + * class label. + * + * @see org.opencv.ml.CvBoost.predict + */ + public float predict(Mat sample, Mat missing, Range slice, boolean rawMode, boolean returnSum) + { + + float retVal = predict_0(nativeObj, sample.nativeObj, missing.nativeObj, slice.start, slice.end, rawMode, returnSum); + + return retVal; + } + +/** + *

Predicts a response for an input sample.

+ * + *

The method runs the sample through the trees in the ensemble and returns the + * output class label based on the weighted voting.

+ * + * @param sample Input sample. + * + * @see org.opencv.ml.CvBoost.predict + */ + public float predict(Mat sample) + { + + float retVal = predict_1(nativeObj, sample.nativeObj); + + return retVal; + } + + + // + // C++: void CvBoost::prune(CvSlice slice) + // + +/** + *

Removes the specified weak classifiers.

+ * + *

The method removes the specified weak classifiers from the sequence.

+ * + *

Note: Do not confuse this method with the pruning of individual decision + * trees, which is currently not supported.

+ * + * @param slice Continuous subset of the sequence of weak classifiers to be + * removed. + * + * @see org.opencv.ml.CvBoost.prune + */ + public void prune(Range slice) + { + + prune_0(nativeObj, slice.start, slice.end); + + return; + } + + + // + // C++: bool CvBoost::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvBoostParams params = CvBoostParams(), bool update = false) + // + +/** + *

Trains a boosted tree classifier.

+ * + *

The train method follows the common template of "CvStatModel.train". The + * responses must be categorical, which means that boosted trees cannot be built + * for regression, and there should be two classes.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param varType a varType + * @param missingDataMask a missingDataMask + * @param params a params + * @param update Specifies whether the classifier needs to be updated + * (true, the new weak tree classifiers added to the existing + * ensemble) or the classifier needs to be rebuilt from scratch + * (false). + * + * @see org.opencv.ml.CvBoost.train + */ + public boolean train(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvBoostParams params, boolean update) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj, update); + + return retVal; + } + +/** + *

Trains a boosted tree classifier.

+ * + *

The train method follows the common template of "CvStatModel.train". The + * responses must be categorical, which means that boosted trees cannot be built + * for regression, and there should be two classes.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * + * @see org.opencv.ml.CvBoost.train + */ + public boolean train(Mat trainData, int tflag, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, tflag, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvBoost::CvBoost() + private static native long CvBoost_0(); + + // C++: CvBoost::CvBoost(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvBoostParams params = CvBoostParams()) + private static native long CvBoost_1(long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj); + private static native long CvBoost_2(long trainData_nativeObj, int tflag, long responses_nativeObj); + + // C++: void CvBoost::clear() + private static native void clear_0(long nativeObj); + + // C++: float CvBoost::predict(Mat sample, Mat missing = cv::Mat(), Range slice = cv::Range::all(), bool rawMode = false, bool returnSum = false) + private static native float predict_0(long nativeObj, long sample_nativeObj, long missing_nativeObj, int slice_start, int slice_end, boolean rawMode, boolean returnSum); + private static native float predict_1(long nativeObj, long sample_nativeObj); + + // C++: void CvBoost::prune(CvSlice slice) + private static native void prune_0(long nativeObj, int slice_start, int slice_end); + + // C++: bool CvBoost::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvBoostParams params = CvBoostParams(), bool update = false) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj, boolean update); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvBoostParams.java b/src/org/opencv/ml/CvBoostParams.java new file mode 100644 index 0000000..bbbb6ca --- /dev/null +++ b/src/org/opencv/ml/CvBoostParams.java @@ -0,0 +1,230 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + + + +// C++: class CvBoostParams +/** + *

Boosting training parameters.

+ * + *

There is one structure member that you can set directly:

+ * + *

Splitting criteria used to choose optimal splits during a weak tree + * construction. Possible values are:

+ * + *
    + *
  • CvBoost.DEFAULT Use the default for the particular boosting method, + * see below. + *
+ *

+ * + *

// C++ code:

+ *
    + *
  • CvBoost.GINI Use Gini index. This is default option for Real + * AdaBoost; may be also used for Discrete AdaBoost. + *
  • CvBoost.MISCLASS Use misclassification rate. This is default option + * for Discrete AdaBoost; may be also used for Real AdaBoost. + *
  • CvBoost.SQERR Use least squares criteria. This is default and the + * only option for LogitBoost and Gentle AdaBoost. + *
+ * + *

The structure is derived from "CvDTreeParams" but not all of the decision + * tree parameters are supported. In particular, cross-validation is not + * supported. + *

+ * + *

All parameters are public. You can initialize them by a constructor and then + * override some of them directly if you want.

+ * + * @see org.opencv.ml.CvBoostParams : public CvDTreeParams + */ +public class CvBoostParams extends CvDTreeParams { + + protected CvBoostParams(long addr) { super(addr); } + + + // + // C++: CvBoostParams::CvBoostParams() + // + +/** + *

The constructors.

+ * + *

See "CvDTreeParams.CvDTreeParams" for description of other parameters.

+ * + *

Default parameters are:

+ * + *

+ * + *

// C++ code:

+ * + *

CvBoostParams.CvBoostParams()

+ * + * + *

boost_type = CvBoost.REAL;

+ * + *

weak_count = 100;

+ * + *

weight_trim_rate = 0.95;

+ * + *

cv_folds = 0;

+ * + *

max_depth = 1;

+ * + * + * @see org.opencv.ml.CvBoostParams.CvBoostParams + */ + public CvBoostParams() + { + + super( CvBoostParams_0() ); + + return; + } + + + // + // C++: int CvBoostParams::boost_type + // + + public int get_boost_type() + { + + int retVal = get_boost_type_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvBoostParams::boost_type + // + + public void set_boost_type(int boost_type) + { + + set_boost_type_0(nativeObj, boost_type); + + return; + } + + + // + // C++: int CvBoostParams::weak_count + // + + public int get_weak_count() + { + + int retVal = get_weak_count_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvBoostParams::weak_count + // + + public void set_weak_count(int weak_count) + { + + set_weak_count_0(nativeObj, weak_count); + + return; + } + + + // + // C++: int CvBoostParams::split_criteria + // + + public int get_split_criteria() + { + + int retVal = get_split_criteria_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvBoostParams::split_criteria + // + + public void set_split_criteria(int split_criteria) + { + + set_split_criteria_0(nativeObj, split_criteria); + + return; + } + + + // + // C++: double CvBoostParams::weight_trim_rate + // + + public double get_weight_trim_rate() + { + + double retVal = get_weight_trim_rate_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvBoostParams::weight_trim_rate + // + + public void set_weight_trim_rate(double weight_trim_rate) + { + + set_weight_trim_rate_0(nativeObj, weight_trim_rate); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvBoostParams::CvBoostParams() + private static native long CvBoostParams_0(); + + // C++: int CvBoostParams::boost_type + private static native int get_boost_type_0(long nativeObj); + + // C++: void CvBoostParams::boost_type + private static native void set_boost_type_0(long nativeObj, int boost_type); + + // C++: int CvBoostParams::weak_count + private static native int get_weak_count_0(long nativeObj); + + // C++: void CvBoostParams::weak_count + private static native void set_weak_count_0(long nativeObj, int weak_count); + + // C++: int CvBoostParams::split_criteria + private static native int get_split_criteria_0(long nativeObj); + + // C++: void CvBoostParams::split_criteria + private static native void set_split_criteria_0(long nativeObj, int split_criteria); + + // C++: double CvBoostParams::weight_trim_rate + private static native double get_weight_trim_rate_0(long nativeObj); + + // C++: void CvBoostParams::weight_trim_rate + private static native void set_weight_trim_rate_0(long nativeObj, double weight_trim_rate); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvDTree.java b/src/org/opencv/ml/CvDTree.java new file mode 100644 index 0000000..0b503f5 --- /dev/null +++ b/src/org/opencv/ml/CvDTree.java @@ -0,0 +1,183 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvDTree +/** + *

The class implements a decision tree as described in the beginning of this + * section.

+ * + * @see org.opencv.ml.CvDTree : public CvStatModel + */ +public class CvDTree extends CvStatModel { + + protected CvDTree(long addr) { super(addr); } + + + // + // C++: CvDTree::CvDTree() + // + + public CvDTree() + { + + super( CvDTree_0() ); + + return; + } + + + // + // C++: void CvDTree::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: Mat CvDTree::getVarImportance() + // + +/** + *

Returns the variable importance array.

+ * + * @see org.opencv.ml.CvDTree.getVarImportance + */ + public Mat getVarImportance() + { + + Mat retVal = new Mat(getVarImportance_0(nativeObj)); + + return retVal; + } + + + // + // C++: CvDTreeNode* CvDTree::predict(Mat sample, Mat missingDataMask = cv::Mat(), bool preprocessedInput = false) + // + + // Return type 'CvDTreeNode*' is not supported, skipping the function + + + // + // C++: bool CvDTree::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvDTreeParams params = CvDTreeParams()) + // + +/** + *

Trains a decision tree.

+ * + *

There are four train methods in "CvDTree":

+ *
    + *
  • The first two methods follow the generic "CvStatModel.train" + * conventions. It is the most complete form. Both data layouts + * (tflag=CV_ROW_SAMPLE and tflag=CV_COL_SAMPLE) are + * supported, as well as sample and variable subsets, missing measurements, + * arbitrary combinations of input and output variable types, and so on. The + * last parameter contains all of the necessary training parameters (see the + * "CvDTreeParams" description). + *
  • The third method uses "CvMLData" to pass training data to a decision + * tree. + *
  • The last method train is mostly used for building tree + * ensembles. It takes the pre-constructed "CvDTreeTrainData" instance and an + * optional subset of the training set. The indices in subsampleIdx + * are counted relatively to the _sample_idx, passed to the + * CvDTreeTrainData constructor. For example, if _sample_idx=[1, + * 5, 7, 100], then subsampleIdx=[0,3] means that the + * samples [1, 100] of the original training set are used. + *
+ * + *

The function is parallelized with the TBB library.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param varType a varType + * @param missingDataMask a missingDataMask + * @param params a params + * + * @see org.opencv.ml.CvDTree.train + */ + public boolean train(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvDTreeParams params) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj); + + return retVal; + } + +/** + *

Trains a decision tree.

+ * + *

There are four train methods in "CvDTree":

+ *
    + *
  • The first two methods follow the generic "CvStatModel.train" + * conventions. It is the most complete form. Both data layouts + * (tflag=CV_ROW_SAMPLE and tflag=CV_COL_SAMPLE) are + * supported, as well as sample and variable subsets, missing measurements, + * arbitrary combinations of input and output variable types, and so on. The + * last parameter contains all of the necessary training parameters (see the + * "CvDTreeParams" description). + *
  • The third method uses "CvMLData" to pass training data to a decision + * tree. + *
  • The last method train is mostly used for building tree + * ensembles. It takes the pre-constructed "CvDTreeTrainData" instance and an + * optional subset of the training set. The indices in subsampleIdx + * are counted relatively to the _sample_idx, passed to the + * CvDTreeTrainData constructor. For example, if _sample_idx=[1, + * 5, 7, 100], then subsampleIdx=[0,3] means that the + * samples [1, 100] of the original training set are used. + *
+ * + *

The function is parallelized with the TBB library.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * + * @see org.opencv.ml.CvDTree.train + */ + public boolean train(Mat trainData, int tflag, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, tflag, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvDTree::CvDTree() + private static native long CvDTree_0(); + + // C++: void CvDTree::clear() + private static native void clear_0(long nativeObj); + + // C++: Mat CvDTree::getVarImportance() + private static native long getVarImportance_0(long nativeObj); + + // C++: bool CvDTree::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvDTreeParams params = CvDTreeParams()) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvDTreeParams.java b/src/org/opencv/ml/CvDTreeParams.java new file mode 100644 index 0000000..4711572 --- /dev/null +++ b/src/org/opencv/ml/CvDTreeParams.java @@ -0,0 +1,326 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + + + +// C++: class CvDTreeParams +/** + *

The structure contains all the decision tree training parameters. You can + * initialize it by default constructor and then override any parameters + * directly before training, or the structure may be fully initialized using the + * advanced variant of the constructor.

+ * + * @see org.opencv.ml.CvDTreeParams + */ +public class CvDTreeParams { + + protected final long nativeObj; + protected CvDTreeParams(long addr) { nativeObj = addr; } + + + // + // C++: CvDTreeParams::CvDTreeParams() + // + +/** + *

The constructors.

+ * + *

The default constructor initializes all the parameters with the default + * values tuned for the standalone classification tree:

+ * + *

+ * + *

// C++ code:

+ * + *

CvDTreeParams() : max_categories(10), max_depth(INT_MAX), min_sample_count(10),

+ * + *

cv_folds(10), use_surrogates(true), use_1se_rule(true),

+ * + *

truncate_pruned_tree(true), regression_accuracy(0.01f), priors(0)

+ * + *

{}

+ * + * @see org.opencv.ml.CvDTreeParams.CvDTreeParams + */ + public CvDTreeParams() + { + + nativeObj = CvDTreeParams_0(); + + return; + } + + + // + // C++: int CvDTreeParams::max_categories + // + + public int get_max_categories() + { + + int retVal = get_max_categories_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::max_categories + // + + public void set_max_categories(int max_categories) + { + + set_max_categories_0(nativeObj, max_categories); + + return; + } + + + // + // C++: int CvDTreeParams::max_depth + // + + public int get_max_depth() + { + + int retVal = get_max_depth_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::max_depth + // + + public void set_max_depth(int max_depth) + { + + set_max_depth_0(nativeObj, max_depth); + + return; + } + + + // + // C++: int CvDTreeParams::min_sample_count + // + + public int get_min_sample_count() + { + + int retVal = get_min_sample_count_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::min_sample_count + // + + public void set_min_sample_count(int min_sample_count) + { + + set_min_sample_count_0(nativeObj, min_sample_count); + + return; + } + + + // + // C++: int CvDTreeParams::cv_folds + // + + public int get_cv_folds() + { + + int retVal = get_cv_folds_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::cv_folds + // + + public void set_cv_folds(int cv_folds) + { + + set_cv_folds_0(nativeObj, cv_folds); + + return; + } + + + // + // C++: bool CvDTreeParams::use_surrogates + // + + public boolean get_use_surrogates() + { + + boolean retVal = get_use_surrogates_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::use_surrogates + // + + public void set_use_surrogates(boolean use_surrogates) + { + + set_use_surrogates_0(nativeObj, use_surrogates); + + return; + } + + + // + // C++: bool CvDTreeParams::use_1se_rule + // + + public boolean get_use_1se_rule() + { + + boolean retVal = get_use_1se_rule_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::use_1se_rule + // + + public void set_use_1se_rule(boolean use_1se_rule) + { + + set_use_1se_rule_0(nativeObj, use_1se_rule); + + return; + } + + + // + // C++: bool CvDTreeParams::truncate_pruned_tree + // + + public boolean get_truncate_pruned_tree() + { + + boolean retVal = get_truncate_pruned_tree_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::truncate_pruned_tree + // + + public void set_truncate_pruned_tree(boolean truncate_pruned_tree) + { + + set_truncate_pruned_tree_0(nativeObj, truncate_pruned_tree); + + return; + } + + + // + // C++: float CvDTreeParams::regression_accuracy + // + + public float get_regression_accuracy() + { + + float retVal = get_regression_accuracy_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::regression_accuracy + // + + public void set_regression_accuracy(float regression_accuracy) + { + + set_regression_accuracy_0(nativeObj, regression_accuracy); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvDTreeParams::CvDTreeParams() + private static native long CvDTreeParams_0(); + + // C++: int CvDTreeParams::max_categories + private static native int get_max_categories_0(long nativeObj); + + // C++: void CvDTreeParams::max_categories + private static native void set_max_categories_0(long nativeObj, int max_categories); + + // C++: int CvDTreeParams::max_depth + private static native int get_max_depth_0(long nativeObj); + + // C++: void CvDTreeParams::max_depth + private static native void set_max_depth_0(long nativeObj, int max_depth); + + // C++: int CvDTreeParams::min_sample_count + private static native int get_min_sample_count_0(long nativeObj); + + // C++: void CvDTreeParams::min_sample_count + private static native void set_min_sample_count_0(long nativeObj, int min_sample_count); + + // C++: int CvDTreeParams::cv_folds + private static native int get_cv_folds_0(long nativeObj); + + // C++: void CvDTreeParams::cv_folds + private static native void set_cv_folds_0(long nativeObj, int cv_folds); + + // C++: bool CvDTreeParams::use_surrogates + private static native boolean get_use_surrogates_0(long nativeObj); + + // C++: void CvDTreeParams::use_surrogates + private static native void set_use_surrogates_0(long nativeObj, boolean use_surrogates); + + // C++: bool CvDTreeParams::use_1se_rule + private static native boolean get_use_1se_rule_0(long nativeObj); + + // C++: void CvDTreeParams::use_1se_rule + private static native void set_use_1se_rule_0(long nativeObj, boolean use_1se_rule); + + // C++: bool CvDTreeParams::truncate_pruned_tree + private static native boolean get_truncate_pruned_tree_0(long nativeObj); + + // C++: void CvDTreeParams::truncate_pruned_tree + private static native void set_truncate_pruned_tree_0(long nativeObj, boolean truncate_pruned_tree); + + // C++: float CvDTreeParams::regression_accuracy + private static native float get_regression_accuracy_0(long nativeObj); + + // C++: void CvDTreeParams::regression_accuracy + private static native void set_regression_accuracy_0(long nativeObj, float regression_accuracy); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvERTrees.java b/src/org/opencv/ml/CvERTrees.java new file mode 100644 index 0000000..441ed9a --- /dev/null +++ b/src/org/opencv/ml/CvERTrees.java @@ -0,0 +1,75 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvERTrees +/** + *

The class implements the Extremely randomized trees algorithm. + * CvERTrees is inherited from "CvRTrees" and has the same + * interface, so see description of "CvRTrees" class to get details. To set the + * training parameters of Extremely randomized trees the same class "CvRTParams" + * is used.

+ * + * @see org.opencv.ml.CvERTrees : public CvRTrees + */ +public class CvERTrees extends CvRTrees { + + protected CvERTrees(long addr) { super(addr); } + + + // + // C++: CvERTrees::CvERTrees() + // + + public CvERTrees() + { + + super( CvERTrees_0() ); + + return; + } + + + // + // C++: bool CvERTrees::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvRTParams params = CvRTParams()) + // + + public boolean train(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvRTParams params) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj); + + return retVal; + } + + public boolean train(Mat trainData, int tflag, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, tflag, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvERTrees::CvERTrees() + private static native long CvERTrees_0(); + + // C++: bool CvERTrees::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvRTParams params = CvRTParams()) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvGBTrees.java b/src/org/opencv/ml/CvGBTrees.java new file mode 100644 index 0000000..566d887 --- /dev/null +++ b/src/org/opencv/ml/CvGBTrees.java @@ -0,0 +1,296 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; +import org.opencv.core.Range; + +// C++: class CvGBTrees +/** + *

The class implements the Gradient boosted tree model as described in the + * beginning of this section.

+ * + * @see org.opencv.ml.CvGBTrees : public CvStatModel + */ +public class CvGBTrees extends CvStatModel { + + protected CvGBTrees(long addr) { super(addr); } + + + public static final int + SQUARED_LOSS = 0, + ABSOLUTE_LOSS = 0+1, + HUBER_LOSS = 3, + DEVIANCE_LOSS = 3+1; + + + // + // C++: CvGBTrees::CvGBTrees() + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @see org.opencv.ml.CvGBTrees.CvGBTrees + */ + public CvGBTrees() + { + + super( CvGBTrees_0() ); + + return; + } + + + // + // C++: CvGBTrees::CvGBTrees(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvGBTreesParams params = CvGBTreesParams()) + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param varType a varType + * @param missingDataMask a missingDataMask + * @param params a params + * + * @see org.opencv.ml.CvGBTrees.CvGBTrees + */ + public CvGBTrees(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvGBTreesParams params) + { + + super( CvGBTrees_1(trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj) ); + + return; + } + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * + * @see org.opencv.ml.CvGBTrees.CvGBTrees + */ + public CvGBTrees(Mat trainData, int tflag, Mat responses) + { + + super( CvGBTrees_2(trainData.nativeObj, tflag, responses.nativeObj) ); + + return; + } + + + // + // C++: void CvGBTrees::clear() + // + +/** + *

Clears the model.

+ * + *

The function deletes the data set information and all the weak models and + * sets all internal variables to the initial state. The function is called in + * "CvGBTrees.train" and in the destructor.

+ * + * @see org.opencv.ml.CvGBTrees.clear + */ + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: float CvGBTrees::predict(Mat sample, Mat missing = cv::Mat(), Range slice = cv::Range::all(), int k = -1) + // + +/** + *

Predicts a response for an input sample.

+ * + *

The method predicts the response corresponding to the given sample (see + * "Predicting with GBT"). + * The result is either the class label or the estimated function value. The + * "CvGBTrees.predict" method enables using the parallel version of the GBT + * model prediction if the OpenCV is built with the TBB library. In this case, + * predictions of single trees are computed in a parallel fashion.

+ * + * @param sample Input feature vector that has the same format as every training + * set element. If not all the variables were actually used during training, + * sample contains forged values at the appropriate places. + * @param missing Missing values mask, which is a dimensional matrix of the same + * size as sample having the CV_8U type. + * 1 corresponds to the missing value in the same position in the + * sample vector. If there are no missing values in the feature + * vector, an empty matrix can be passed instead of the missing mask. + * @param slice Parameter defining the part of the ensemble used for prediction. + *

If slice = Range.all(), all trees are used. Use this parameter + * to get predictions of the GBT models with different ensemble sizes learning + * only one model.

+ * @param k Number of tree ensembles built in case of the classification problem + * (see "Training GBT"). Use this parameter to change the output to sum of the + * trees' predictions in the k-th ensemble only. To get the total + * GBT model prediction, k value must be -1. For regression + * problems, k is also equal to -1. + * + * @see org.opencv.ml.CvGBTrees.predict + */ + public float predict(Mat sample, Mat missing, Range slice, int k) + { + + float retVal = predict_0(nativeObj, sample.nativeObj, missing.nativeObj, slice.start, slice.end, k); + + return retVal; + } + +/** + *

Predicts a response for an input sample.

+ * + *

The method predicts the response corresponding to the given sample (see + * "Predicting with GBT"). + * The result is either the class label or the estimated function value. The + * "CvGBTrees.predict" method enables using the parallel version of the GBT + * model prediction if the OpenCV is built with the TBB library. In this case, + * predictions of single trees are computed in a parallel fashion.

+ * + * @param sample Input feature vector that has the same format as every training + * set element. If not all the variables were actually used during training, + * sample contains forged values at the appropriate places. + * + * @see org.opencv.ml.CvGBTrees.predict + */ + public float predict(Mat sample) + { + + float retVal = predict_1(nativeObj, sample.nativeObj); + + return retVal; + } + + + // + // C++: bool CvGBTrees::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvGBTreesParams params = CvGBTreesParams(), bool update = false) + // + +/** + *

Trains a Gradient boosted tree model.

+ * + *

The first train method follows the common template (see "CvStatModel.train"). + * Both tflag values (CV_ROW_SAMPLE, CV_COL_SAMPLE) + * are supported. + * trainData must be of the CV_32F type. + * responses must be a matrix of type CV_32S or + * CV_32F. In both cases it is converted into the CV_32F + * matrix inside the training procedure. varIdx and + * sampleIdx must be a list of indices (CV_32S) or a + * mask (CV_8U or CV_8S). update is a + * dummy parameter.

+ * + *

The second form of "CvGBTrees.train" function uses "CvMLData" as a data set + * container. update is still a dummy parameter.

+ * + *

All parameters specific to the GBT model are passed into the training + * function as a "CvGBTreesParams" structure.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param varType a varType + * @param missingDataMask a missingDataMask + * @param params a params + * @param update a update + * + * @see org.opencv.ml.CvGBTrees.train + */ + public boolean train(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvGBTreesParams params, boolean update) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj, update); + + return retVal; + } + +/** + *

Trains a Gradient boosted tree model.

+ * + *

The first train method follows the common template (see "CvStatModel.train"). + * Both tflag values (CV_ROW_SAMPLE, CV_COL_SAMPLE) + * are supported. + * trainData must be of the CV_32F type. + * responses must be a matrix of type CV_32S or + * CV_32F. In both cases it is converted into the CV_32F + * matrix inside the training procedure. varIdx and + * sampleIdx must be a list of indices (CV_32S) or a + * mask (CV_8U or CV_8S). update is a + * dummy parameter.

+ * + *

The second form of "CvGBTrees.train" function uses "CvMLData" as a data set + * container. update is still a dummy parameter.

+ * + *

All parameters specific to the GBT model are passed into the training + * function as a "CvGBTreesParams" structure.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * + * @see org.opencv.ml.CvGBTrees.train + */ + public boolean train(Mat trainData, int tflag, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, tflag, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvGBTrees::CvGBTrees() + private static native long CvGBTrees_0(); + + // C++: CvGBTrees::CvGBTrees(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvGBTreesParams params = CvGBTreesParams()) + private static native long CvGBTrees_1(long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj); + private static native long CvGBTrees_2(long trainData_nativeObj, int tflag, long responses_nativeObj); + + // C++: void CvGBTrees::clear() + private static native void clear_0(long nativeObj); + + // C++: float CvGBTrees::predict(Mat sample, Mat missing = cv::Mat(), Range slice = cv::Range::all(), int k = -1) + private static native float predict_0(long nativeObj, long sample_nativeObj, long missing_nativeObj, int slice_start, int slice_end, int k); + private static native float predict_1(long nativeObj, long sample_nativeObj); + + // C++: bool CvGBTrees::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvGBTreesParams params = CvGBTreesParams(), bool update = false) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj, boolean update); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvGBTreesParams.java b/src/org/opencv/ml/CvGBTreesParams.java new file mode 100644 index 0000000..b73d745 --- /dev/null +++ b/src/org/opencv/ml/CvGBTreesParams.java @@ -0,0 +1,189 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + + + +// C++: class CvGBTreesParams +/** + *

GBT training parameters.

+ * + *

The structure contains parameters for each single decision tree in the + * ensemble, as well as the whole model characteristics. The structure is + * derived from "CvDTreeParams" but not all of the decision tree parameters are + * supported: cross-validation, pruning, and class priorities are not used.

+ * + * @see org.opencv.ml.CvGBTreesParams : public CvDTreeParams + */ +public class CvGBTreesParams extends CvDTreeParams { + + protected CvGBTreesParams(long addr) { super(addr); } + + + // + // C++: CvGBTreesParams::CvGBTreesParams() + // + +/** + *

By default the following constructor is used: CvGBTreesParams(CvGBTrees.SQUARED_LOSS, + * 200, 0.8f, 0.01f, 3, false)

+ * + *

// C++ code:

+ * + *

: CvDTreeParams(3, 10, 0, false, 10, 0, false, false, 0)

+ * + * @see org.opencv.ml.CvGBTreesParams.CvGBTreesParams + */ + public CvGBTreesParams() + { + + super( CvGBTreesParams_0() ); + + return; + } + + + // + // C++: int CvGBTreesParams::weak_count + // + + public int get_weak_count() + { + + int retVal = get_weak_count_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvGBTreesParams::weak_count + // + + public void set_weak_count(int weak_count) + { + + set_weak_count_0(nativeObj, weak_count); + + return; + } + + + // + // C++: int CvGBTreesParams::loss_function_type + // + + public int get_loss_function_type() + { + + int retVal = get_loss_function_type_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvGBTreesParams::loss_function_type + // + + public void set_loss_function_type(int loss_function_type) + { + + set_loss_function_type_0(nativeObj, loss_function_type); + + return; + } + + + // + // C++: float CvGBTreesParams::subsample_portion + // + + public float get_subsample_portion() + { + + float retVal = get_subsample_portion_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvGBTreesParams::subsample_portion + // + + public void set_subsample_portion(float subsample_portion) + { + + set_subsample_portion_0(nativeObj, subsample_portion); + + return; + } + + + // + // C++: float CvGBTreesParams::shrinkage + // + + public float get_shrinkage() + { + + float retVal = get_shrinkage_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvGBTreesParams::shrinkage + // + + public void set_shrinkage(float shrinkage) + { + + set_shrinkage_0(nativeObj, shrinkage); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvGBTreesParams::CvGBTreesParams() + private static native long CvGBTreesParams_0(); + + // C++: int CvGBTreesParams::weak_count + private static native int get_weak_count_0(long nativeObj); + + // C++: void CvGBTreesParams::weak_count + private static native void set_weak_count_0(long nativeObj, int weak_count); + + // C++: int CvGBTreesParams::loss_function_type + private static native int get_loss_function_type_0(long nativeObj); + + // C++: void CvGBTreesParams::loss_function_type + private static native void set_loss_function_type_0(long nativeObj, int loss_function_type); + + // C++: float CvGBTreesParams::subsample_portion + private static native float get_subsample_portion_0(long nativeObj); + + // C++: void CvGBTreesParams::subsample_portion + private static native void set_subsample_portion_0(long nativeObj, float subsample_portion); + + // C++: float CvGBTreesParams::shrinkage + private static native float get_shrinkage_0(long nativeObj); + + // C++: void CvGBTreesParams::shrinkage + private static native void set_shrinkage_0(long nativeObj, float shrinkage); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvKNearest.java b/src/org/opencv/ml/CvKNearest.java new file mode 100644 index 0000000..4af7cb6 --- /dev/null +++ b/src/org/opencv/ml/CvKNearest.java @@ -0,0 +1,224 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvKNearest +/** + *

The class implements K-Nearest Neighbors model as described in the beginning + * of this section.

+ * + * @see org.opencv.ml.CvKNearest : public CvStatModel + */ +public class CvKNearest extends CvStatModel { + + protected CvKNearest(long addr) { super(addr); } + + + // + // C++: CvKNearest::CvKNearest() + // + +/** + *

Default and training constructors.

+ * + *

See "CvKNearest.train" for additional parameters descriptions.

+ * + * @see org.opencv.ml.CvKNearest.CvKNearest + */ + public CvKNearest() + { + + super( CvKNearest_0() ); + + return; + } + + + // + // C++: CvKNearest::CvKNearest(Mat trainData, Mat responses, Mat sampleIdx = cv::Mat(), bool isRegression = false, int max_k = 32) + // + +/** + *

Default and training constructors.

+ * + *

See "CvKNearest.train" for additional parameters descriptions.

+ * + * @param trainData a trainData + * @param responses a responses + * @param sampleIdx a sampleIdx + * @param isRegression a isRegression + * @param max_k a max_k + * + * @see org.opencv.ml.CvKNearest.CvKNearest + */ + public CvKNearest(Mat trainData, Mat responses, Mat sampleIdx, boolean isRegression, int max_k) + { + + super( CvKNearest_1(trainData.nativeObj, responses.nativeObj, sampleIdx.nativeObj, isRegression, max_k) ); + + return; + } + +/** + *

Default and training constructors.

+ * + *

See "CvKNearest.train" for additional parameters descriptions.

+ * + * @param trainData a trainData + * @param responses a responses + * + * @see org.opencv.ml.CvKNearest.CvKNearest + */ + public CvKNearest(Mat trainData, Mat responses) + { + + super( CvKNearest_2(trainData.nativeObj, responses.nativeObj) ); + + return; + } + + + // + // C++: float CvKNearest::find_nearest(Mat samples, int k, Mat& results, Mat& neighborResponses, Mat& dists) + // + +/** + *

Finds the neighbors and predicts responses for input vectors.

+ * + *

For each input vector (a row of the matrix samples), the method + * finds the k nearest neighbors. In case of regression, the + * predicted result is a mean value of the particular vector's neighbor + * responses. In case of classification, the class is determined by voting.

+ * + *

For each input vector, the neighbors are sorted by their distances to the + * vector.

+ * + *

In case of C++ interface you can use output pointers to empty matrices and + * the function will allocate memory itself.

+ * + *

If only a single input vector is passed, all output matrices are optional and + * the predicted value is returned by the method.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param samples Input samples stored by rows. It is a single-precision + * floating-point matrix of number_of_samples x number_of_features + * size. + * @param k Number of used nearest neighbors. It must satisfy constraint: k + * <= "CvKNearest.get_max_k". + * @param results Vector with results of prediction (regression or + * classification) for each input sample. It is a single-precision + * floating-point vector with number_of_samples elements. + * @param neighborResponses Optional output values for corresponding + * neighbors. It is a single-precision floating-point matrix of + * number_of_samples x k size. + * @param dists a dists + * + * @see org.opencv.ml.CvKNearest.find_nearest + */ + public float find_nearest(Mat samples, int k, Mat results, Mat neighborResponses, Mat dists) + { + + float retVal = find_nearest_0(nativeObj, samples.nativeObj, k, results.nativeObj, neighborResponses.nativeObj, dists.nativeObj); + + return retVal; + } + + + // + // C++: bool CvKNearest::train(Mat trainData, Mat responses, Mat sampleIdx = cv::Mat(), bool isRegression = false, int maxK = 32, bool updateBase = false) + // + +/** + *

Trains the model.

+ * + *

The method trains the K-Nearest model. It follows the conventions of the + * generic "CvStatModel.train" approach with the following limitations:

+ *
    + *
  • Only CV_ROW_SAMPLE data layout is supported. + *
  • Input variables are all ordered. + *
  • Output variables can be either categorical (is_regression=false) + * or ordered (is_regression=true). + *
  • Variable subsets (var_idx) and missing measurements are + * not supported. + *
+ * + * @param trainData a trainData + * @param responses a responses + * @param sampleIdx a sampleIdx + * @param isRegression Type of the problem: true for regression and + * false for classification. + * @param maxK Number of maximum neighbors that may be passed to the method + * "CvKNearest.find_nearest". + * @param updateBase Specifies whether the model is trained from scratch + * (update_base=false), or it is updated using the new training + * data (update_base=true). In the latter case, the parameter + * maxK must not be larger than the original value. + * + * @see org.opencv.ml.CvKNearest.train + */ + public boolean train(Mat trainData, Mat responses, Mat sampleIdx, boolean isRegression, int maxK, boolean updateBase) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, responses.nativeObj, sampleIdx.nativeObj, isRegression, maxK, updateBase); + + return retVal; + } + +/** + *

Trains the model.

+ * + *

The method trains the K-Nearest model. It follows the conventions of the + * generic "CvStatModel.train" approach with the following limitations:

+ *
    + *
  • Only CV_ROW_SAMPLE data layout is supported. + *
  • Input variables are all ordered. + *
  • Output variables can be either categorical (is_regression=false) + * or ordered (is_regression=true). + *
  • Variable subsets (var_idx) and missing measurements are + * not supported. + *
+ * + * @param trainData a trainData + * @param responses a responses + * + * @see org.opencv.ml.CvKNearest.train + */ + public boolean train(Mat trainData, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvKNearest::CvKNearest() + private static native long CvKNearest_0(); + + // C++: CvKNearest::CvKNearest(Mat trainData, Mat responses, Mat sampleIdx = cv::Mat(), bool isRegression = false, int max_k = 32) + private static native long CvKNearest_1(long trainData_nativeObj, long responses_nativeObj, long sampleIdx_nativeObj, boolean isRegression, int max_k); + private static native long CvKNearest_2(long trainData_nativeObj, long responses_nativeObj); + + // C++: float CvKNearest::find_nearest(Mat samples, int k, Mat& results, Mat& neighborResponses, Mat& dists) + private static native float find_nearest_0(long nativeObj, long samples_nativeObj, int k, long results_nativeObj, long neighborResponses_nativeObj, long dists_nativeObj); + + // C++: bool CvKNearest::train(Mat trainData, Mat responses, Mat sampleIdx = cv::Mat(), bool isRegression = false, int maxK = 32, bool updateBase = false) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, long responses_nativeObj, long sampleIdx_nativeObj, boolean isRegression, int maxK, boolean updateBase); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvNormalBayesClassifier.java b/src/org/opencv/ml/CvNormalBayesClassifier.java new file mode 100644 index 0000000..0bbde7a --- /dev/null +++ b/src/org/opencv/ml/CvNormalBayesClassifier.java @@ -0,0 +1,243 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvNormalBayesClassifier +/** + *

Bayes classifier for normally distributed data.

+ * + * @see org.opencv.ml.CvNormalBayesClassifier : public CvStatModel + */ +public class CvNormalBayesClassifier extends CvStatModel { + + protected CvNormalBayesClassifier(long addr) { super(addr); } + + + // + // C++: CvNormalBayesClassifier::CvNormalBayesClassifier() + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @see org.opencv.ml.CvNormalBayesClassifier.CvNormalBayesClassifier + */ + public CvNormalBayesClassifier() + { + + super( CvNormalBayesClassifier_0() ); + + return; + } + + + // + // C++: CvNormalBayesClassifier::CvNormalBayesClassifier(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat()) + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * + * @see org.opencv.ml.CvNormalBayesClassifier.CvNormalBayesClassifier + */ + public CvNormalBayesClassifier(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx) + { + + super( CvNormalBayesClassifier_1(trainData.nativeObj, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj) ); + + return; + } + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param responses a responses + * + * @see org.opencv.ml.CvNormalBayesClassifier.CvNormalBayesClassifier + */ + public CvNormalBayesClassifier(Mat trainData, Mat responses) + { + + super( CvNormalBayesClassifier_2(trainData.nativeObj, responses.nativeObj) ); + + return; + } + + + // + // C++: void CvNormalBayesClassifier::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: float CvNormalBayesClassifier::predict(Mat samples, Mat* results = 0) + // + +/** + *

Predicts the response for sample(s).

+ * + *

The method estimates the most probable classes for input vectors. Input + * vectors (one or more) are stored as rows of the matrix samples. + * In case of multiple input vectors, there should be one output vector + * results. The predicted class for a single input vector is + * returned by the method.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param samples a samples + * @param results a results + * + * @see org.opencv.ml.CvNormalBayesClassifier.predict + */ + public float predict(Mat samples, Mat results) + { + + float retVal = predict_0(nativeObj, samples.nativeObj, results.nativeObj); + + return retVal; + } + +/** + *

Predicts the response for sample(s).

+ * + *

The method estimates the most probable classes for input vectors. Input + * vectors (one or more) are stored as rows of the matrix samples. + * In case of multiple input vectors, there should be one output vector + * results. The predicted class for a single input vector is + * returned by the method.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param samples a samples + * + * @see org.opencv.ml.CvNormalBayesClassifier.predict + */ + public float predict(Mat samples) + { + + float retVal = predict_1(nativeObj, samples.nativeObj); + + return retVal; + } + + + // + // C++: bool CvNormalBayesClassifier::train(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), bool update = false) + // + +/** + *

Trains the model.

+ * + *

The method trains the Normal Bayes classifier. It follows the conventions of + * the generic "CvStatModel.train" approach with the following limitations:

+ *
    + *
  • Only CV_ROW_SAMPLE data layout is supported. + *
  • Input variables are all ordered. + *
  • Output variable is categorical, which means that elements of + * responses must be integer numbers, though the vector may have + * the CV_32FC1 type. + *
  • Missing measurements are not supported. + *
+ * + * @param trainData a trainData + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param update Identifies whether the model should be trained from scratch + * (update=false) or should be updated using the new training data + * (update=true). + * + * @see org.opencv.ml.CvNormalBayesClassifier.train + */ + public boolean train(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, boolean update) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, update); + + return retVal; + } + +/** + *

Trains the model.

+ * + *

The method trains the Normal Bayes classifier. It follows the conventions of + * the generic "CvStatModel.train" approach with the following limitations:

+ *
    + *
  • Only CV_ROW_SAMPLE data layout is supported. + *
  • Input variables are all ordered. + *
  • Output variable is categorical, which means that elements of + * responses must be integer numbers, though the vector may have + * the CV_32FC1 type. + *
  • Missing measurements are not supported. + *
+ * + * @param trainData a trainData + * @param responses a responses + * + * @see org.opencv.ml.CvNormalBayesClassifier.train + */ + public boolean train(Mat trainData, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvNormalBayesClassifier::CvNormalBayesClassifier() + private static native long CvNormalBayesClassifier_0(); + + // C++: CvNormalBayesClassifier::CvNormalBayesClassifier(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat()) + private static native long CvNormalBayesClassifier_1(long trainData_nativeObj, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj); + private static native long CvNormalBayesClassifier_2(long trainData_nativeObj, long responses_nativeObj); + + // C++: void CvNormalBayesClassifier::clear() + private static native void clear_0(long nativeObj); + + // C++: float CvNormalBayesClassifier::predict(Mat samples, Mat* results = 0) + private static native float predict_0(long nativeObj, long samples_nativeObj, long results_nativeObj); + private static native float predict_1(long nativeObj, long samples_nativeObj); + + // C++: bool CvNormalBayesClassifier::train(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), bool update = false) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, boolean update); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvParamGrid.java b/src/org/opencv/ml/CvParamGrid.java new file mode 100644 index 0000000..145e499 --- /dev/null +++ b/src/org/opencv/ml/CvParamGrid.java @@ -0,0 +1,192 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + + + +// C++: class CvParamGrid +/** + *

The structure represents the logarithmic grid range of statmodel parameters. + * It is used for optimizing statmodel accuracy by varying model parameters, the + * accuracy estimate being computed by cross-validation.

+ * + *

Minimum value of the statmodel parameter.

+ * + *

Maximum value of the statmodel parameter. + *

+ * + *

// C++ code:

+ * + *

Logarithmic step for iterating the statmodel parameter.

+ * + *

The grid determines the following iteration sequence of the statmodel + * parameter values:

+ * + *

(min_val, min_val*step, min_val*(step)^2, dots, min_val*(step)^n),

+ * + *

where n is the maximal index satisfying

+ * + *

min_val * step ^n < max_val

+ * + *

The grid is logarithmic, so step must always be greater then 1.

+ * + * @see org.opencv.ml.CvParamGrid + */ +public class CvParamGrid { + + protected final long nativeObj; + protected CvParamGrid(long addr) { nativeObj = addr; } + + + public static final int + SVM_C = 0, + SVM_GAMMA = 1, + SVM_P = 2, + SVM_NU = 3, + SVM_COEF = 4, + SVM_DEGREE = 5; + + + // + // C++: CvParamGrid::CvParamGrid() + // + +/** + *

The constructors.

+ * + *

The full constructor initializes corresponding members. The default + * constructor creates a dummy grid:

+ * + *

+ * + *

// C++ code:

+ * + *

CvParamGrid.CvParamGrid()

+ * + * + *

min_val = max_val = step = 0;

+ * + * + * @see org.opencv.ml.CvParamGrid.CvParamGrid + */ + public CvParamGrid() + { + + nativeObj = CvParamGrid_0(); + + return; + } + + + // + // C++: double CvParamGrid::min_val + // + + public double get_min_val() + { + + double retVal = get_min_val_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvParamGrid::min_val + // + + public void set_min_val(double min_val) + { + + set_min_val_0(nativeObj, min_val); + + return; + } + + + // + // C++: double CvParamGrid::max_val + // + + public double get_max_val() + { + + double retVal = get_max_val_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvParamGrid::max_val + // + + public void set_max_val(double max_val) + { + + set_max_val_0(nativeObj, max_val); + + return; + } + + + // + // C++: double CvParamGrid::step + // + + public double get_step() + { + + double retVal = get_step_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvParamGrid::step + // + + public void set_step(double step) + { + + set_step_0(nativeObj, step); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvParamGrid::CvParamGrid() + private static native long CvParamGrid_0(); + + // C++: double CvParamGrid::min_val + private static native double get_min_val_0(long nativeObj); + + // C++: void CvParamGrid::min_val + private static native void set_min_val_0(long nativeObj, double min_val); + + // C++: double CvParamGrid::max_val + private static native double get_max_val_0(long nativeObj); + + // C++: void CvParamGrid::max_val + private static native void set_max_val_0(long nativeObj, double max_val); + + // C++: double CvParamGrid::step + private static native double get_step_0(long nativeObj); + + // C++: void CvParamGrid::step + private static native void set_step_0(long nativeObj, double step); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvRTParams.java b/src/org/opencv/ml/CvRTParams.java new file mode 100644 index 0000000..a2c06eb --- /dev/null +++ b/src/org/opencv/ml/CvRTParams.java @@ -0,0 +1,147 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.TermCriteria; + +// C++: class CvRTParams +/** + *

Training parameters of random trees.

+ * + *

The set of training parameters for the forest is a superset of the training + * parameters for a single tree. However, random trees do not need all the + * functionality/features of decision trees. Most noticeably, the trees are not + * pruned, so the cross-validation parameters are not used.

+ * + * @see org.opencv.ml.CvRTParams : public CvDTreeParams + */ +public class CvRTParams extends CvDTreeParams { + + protected CvRTParams(long addr) { super(addr); } + + + // + // C++: CvRTParams::CvRTParams() + // + + public CvRTParams() + { + + super( CvRTParams_0() ); + + return; + } + + + // + // C++: bool CvRTParams::calc_var_importance + // + + public boolean get_calc_var_importance() + { + + boolean retVal = get_calc_var_importance_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvRTParams::calc_var_importance + // + + public void set_calc_var_importance(boolean calc_var_importance) + { + + set_calc_var_importance_0(nativeObj, calc_var_importance); + + return; + } + + + // + // C++: int CvRTParams::nactive_vars + // + + public int get_nactive_vars() + { + + int retVal = get_nactive_vars_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvRTParams::nactive_vars + // + + public void set_nactive_vars(int nactive_vars) + { + + set_nactive_vars_0(nativeObj, nactive_vars); + + return; + } + + + // + // C++: TermCriteria CvRTParams::term_crit + // + + public TermCriteria get_term_crit() + { + + TermCriteria retVal = new TermCriteria(get_term_crit_0(nativeObj)); + + return retVal; + } + + + // + // C++: void CvRTParams::term_crit + // + + public void set_term_crit(TermCriteria term_crit) + { + + set_term_crit_0(nativeObj, term_crit.type, term_crit.maxCount, term_crit.epsilon); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvRTParams::CvRTParams() + private static native long CvRTParams_0(); + + // C++: bool CvRTParams::calc_var_importance + private static native boolean get_calc_var_importance_0(long nativeObj); + + // C++: void CvRTParams::calc_var_importance + private static native void set_calc_var_importance_0(long nativeObj, boolean calc_var_importance); + + // C++: int CvRTParams::nactive_vars + private static native int get_nactive_vars_0(long nativeObj); + + // C++: void CvRTParams::nactive_vars + private static native void set_nactive_vars_0(long nativeObj, int nactive_vars); + + // C++: TermCriteria CvRTParams::term_crit + private static native double[] get_term_crit_0(long nativeObj); + + // C++: void CvRTParams::term_crit + private static native void set_term_crit_0(long nativeObj, int term_crit_type, int term_crit_maxCount, double term_crit_epsilon); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvRTrees.java b/src/org/opencv/ml/CvRTrees.java new file mode 100644 index 0000000..57b903b --- /dev/null +++ b/src/org/opencv/ml/CvRTrees.java @@ -0,0 +1,256 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvRTrees +/** + *

The class implements the random forest predictor as described in the + * beginning of this section.

+ * + * @see org.opencv.ml.CvRTrees : public CvStatModel + */ +public class CvRTrees extends CvStatModel { + + protected CvRTrees(long addr) { super(addr); } + + + // + // C++: CvRTrees::CvRTrees() + // + + public CvRTrees() + { + + super( CvRTrees_0() ); + + return; + } + + + // + // C++: void CvRTrees::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: Mat CvRTrees::getVarImportance() + // + +/** + *

Returns the variable importance array.

+ * + *

The method returns the variable importance vector, computed at the training + * stage when CvRTParams.calc_var_importance is set to true. If + * this flag was set to false, the NULL pointer is returned. This + * differs from the decision trees where variable importance can be computed + * anytime after the training.

+ * + * @see org.opencv.ml.CvRTrees.getVarImportance + */ + public Mat getVarImportance() + { + + Mat retVal = new Mat(getVarImportance_0(nativeObj)); + + return retVal; + } + + + // + // C++: float CvRTrees::predict(Mat sample, Mat missing = cv::Mat()) + // + +/** + *

Predicts the output for an input sample.

+ * + *

The input parameters of the prediction method are the same as in + * "CvDTree.predict" but the return value type is different. This method + * returns the cumulative result from all the trees in the forest (the class + * that receives the majority of voices, or the mean of the regression function + * estimates).

+ * + * @param sample Sample for classification. + * @param missing Optional missing measurement mask of the sample. + * + * @see org.opencv.ml.CvRTrees.predict + */ + public float predict(Mat sample, Mat missing) + { + + float retVal = predict_0(nativeObj, sample.nativeObj, missing.nativeObj); + + return retVal; + } + +/** + *

Predicts the output for an input sample.

+ * + *

The input parameters of the prediction method are the same as in + * "CvDTree.predict" but the return value type is different. This method + * returns the cumulative result from all the trees in the forest (the class + * that receives the majority of voices, or the mean of the regression function + * estimates).

+ * + * @param sample Sample for classification. + * + * @see org.opencv.ml.CvRTrees.predict + */ + public float predict(Mat sample) + { + + float retVal = predict_1(nativeObj, sample.nativeObj); + + return retVal; + } + + + // + // C++: float CvRTrees::predict_prob(Mat sample, Mat missing = cv::Mat()) + // + +/** + *

Returns a fuzzy-predicted class label.

+ * + *

The function works for binary classification problems only. It returns the + * number between 0 and 1. This number represents probability or confidence of + * the sample belonging to the second class. It is calculated as the proportion + * of decision trees that classified the sample to the second class.

+ * + * @param sample Sample for classification. + * @param missing Optional missing measurement mask of the sample. + * + * @see org.opencv.ml.CvRTrees.predict_prob + */ + public float predict_prob(Mat sample, Mat missing) + { + + float retVal = predict_prob_0(nativeObj, sample.nativeObj, missing.nativeObj); + + return retVal; + } + +/** + *

Returns a fuzzy-predicted class label.

+ * + *

The function works for binary classification problems only. It returns the + * number between 0 and 1. This number represents probability or confidence of + * the sample belonging to the second class. It is calculated as the proportion + * of decision trees that classified the sample to the second class.

+ * + * @param sample Sample for classification. + * + * @see org.opencv.ml.CvRTrees.predict_prob + */ + public float predict_prob(Mat sample) + { + + float retVal = predict_prob_1(nativeObj, sample.nativeObj); + + return retVal; + } + + + // + // C++: bool CvRTrees::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvRTParams params = CvRTParams()) + // + +/** + *

Trains the Random Trees model.

+ * + *

The method "CvRTrees.train" is very similar to the method "CvDTree.train" + * and follows the generic method "CvStatModel.train" conventions. All the + * parameters specific to the algorithm training are passed as a "CvRTParams" + * instance. The estimate of the training error (oob-error) is + * stored in the protected class member oob_error.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param varType a varType + * @param missingDataMask a missingDataMask + * @param params a params + * + * @see org.opencv.ml.CvRTrees.train + */ + public boolean train(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvRTParams params) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj); + + return retVal; + } + +/** + *

Trains the Random Trees model.

+ * + *

The method "CvRTrees.train" is very similar to the method "CvDTree.train" + * and follows the generic method "CvStatModel.train" conventions. All the + * parameters specific to the algorithm training are passed as a "CvRTParams" + * instance. The estimate of the training error (oob-error) is + * stored in the protected class member oob_error.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * + * @see org.opencv.ml.CvRTrees.train + */ + public boolean train(Mat trainData, int tflag, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, tflag, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvRTrees::CvRTrees() + private static native long CvRTrees_0(); + + // C++: void CvRTrees::clear() + private static native void clear_0(long nativeObj); + + // C++: Mat CvRTrees::getVarImportance() + private static native long getVarImportance_0(long nativeObj); + + // C++: float CvRTrees::predict(Mat sample, Mat missing = cv::Mat()) + private static native float predict_0(long nativeObj, long sample_nativeObj, long missing_nativeObj); + private static native float predict_1(long nativeObj, long sample_nativeObj); + + // C++: float CvRTrees::predict_prob(Mat sample, Mat missing = cv::Mat()) + private static native float predict_prob_0(long nativeObj, long sample_nativeObj, long missing_nativeObj); + private static native float predict_prob_1(long nativeObj, long sample_nativeObj); + + // C++: bool CvRTrees::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvRTParams params = CvRTParams()) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvSVM.java b/src/org/opencv/ml/CvSVM.java new file mode 100644 index 0000000..cc9771d --- /dev/null +++ b/src/org/opencv/ml/CvSVM.java @@ -0,0 +1,442 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvSVM +/** + *

Support Vector Machines.

+ * + * @see org.opencv.ml.CvSVM : public CvStatModel + */ +public class CvSVM extends CvStatModel { + + protected CvSVM(long addr) { super(addr); } + + + public static final int + C_SVC = 100, + NU_SVC = 101, + ONE_CLASS = 102, + EPS_SVR = 103, + NU_SVR = 104, + LINEAR = 0, + POLY = 1, + RBF = 2, + SIGMOID = 3, + C = 0, + GAMMA = 1, + P = 2, + NU = 3, + COEF = 4, + DEGREE = 5; + + + // + // C++: CvSVM::CvSVM() + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @see org.opencv.ml.CvSVM.CvSVM + */ + public CvSVM() + { + + super( CvSVM_0() ); + + return; + } + + + // + // C++: CvSVM::CvSVM(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), CvSVMParams params = CvSVMParams()) + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param params a params + * + * @see org.opencv.ml.CvSVM.CvSVM + */ + public CvSVM(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams params) + { + + super( CvSVM_1(trainData.nativeObj, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, params.nativeObj) ); + + return; + } + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param responses a responses + * + * @see org.opencv.ml.CvSVM.CvSVM + */ + public CvSVM(Mat trainData, Mat responses) + { + + super( CvSVM_2(trainData.nativeObj, responses.nativeObj) ); + + return; + } + + + // + // C++: void CvSVM::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: int CvSVM::get_support_vector_count() + // + + public int get_support_vector_count() + { + + int retVal = get_support_vector_count_0(nativeObj); + + return retVal; + } + + + // + // C++: int CvSVM::get_var_count() + // + +/** + *

Returns the number of used features (variables count).

+ * + * @see org.opencv.ml.CvSVM.get_var_count + */ + public int get_var_count() + { + + int retVal = get_var_count_0(nativeObj); + + return retVal; + } + + + // + // C++: float CvSVM::predict(Mat sample, bool returnDFVal = false) + // + +/** + *

Predicts the response for input sample(s).

+ * + *

If you pass one sample then prediction result is returned. If you want to get + * responses for several samples then you should pass the results + * matrix where prediction results will be stored.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param sample Input sample for prediction. + * @param returnDFVal Specifies a type of the return value. If true + * and the problem is 2-class classification then the method returns the + * decision function value that is signed distance to the margin, else the + * function returns a class label (classification) or estimated function value + * (regression). + * + * @see org.opencv.ml.CvSVM.predict + */ + public float predict(Mat sample, boolean returnDFVal) + { + + float retVal = predict_0(nativeObj, sample.nativeObj, returnDFVal); + + return retVal; + } + +/** + *

Predicts the response for input sample(s).

+ * + *

If you pass one sample then prediction result is returned. If you want to get + * responses for several samples then you should pass the results + * matrix where prediction results will be stored.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param sample Input sample for prediction. + * + * @see org.opencv.ml.CvSVM.predict + */ + public float predict(Mat sample) + { + + float retVal = predict_1(nativeObj, sample.nativeObj); + + return retVal; + } + + + // + // C++: void CvSVM::predict(Mat samples, Mat& results) + // + +/** + *

Predicts the response for input sample(s).

+ * + *

If you pass one sample then prediction result is returned. If you want to get + * responses for several samples then you should pass the results + * matrix where prediction results will be stored.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param samples Input samples for prediction. + * @param results Output prediction responses for corresponding samples. + * + * @see org.opencv.ml.CvSVM.predict + */ + public void predict_all(Mat samples, Mat results) + { + + predict_all_0(nativeObj, samples.nativeObj, results.nativeObj); + + return; + } + + + // + // C++: bool CvSVM::train(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), CvSVMParams params = CvSVMParams()) + // + +/** + *

Trains an SVM.

+ * + *

The method trains the SVM model. It follows the conventions of the generic + * "CvStatModel.train" approach with the following limitations:

+ *
    + *
  • Only the CV_ROW_SAMPLE data layout is supported. + *
  • Input variables are all ordered. + *
  • Output variables can be either categorical (params.svm_type=CvSVM.C_SVC + * or params.svm_type=CvSVM.NU_SVC), or ordered (params.svm_type=CvSVM.EPS_SVR + * or params.svm_type=CvSVM.NU_SVR), or not required at all + * (params.svm_type=CvSVM.ONE_CLASS). + *
  • Missing measurements are not supported. + *
+ * + *

All the other parameters are gathered in the "CvSVMParams" structure.

+ * + * @param trainData a trainData + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param params a params + * + * @see org.opencv.ml.CvSVM.train + */ + public boolean train(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams params) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, params.nativeObj); + + return retVal; + } + +/** + *

Trains an SVM.

+ * + *

The method trains the SVM model. It follows the conventions of the generic + * "CvStatModel.train" approach with the following limitations:

+ *
    + *
  • Only the CV_ROW_SAMPLE data layout is supported. + *
  • Input variables are all ordered. + *
  • Output variables can be either categorical (params.svm_type=CvSVM.C_SVC + * or params.svm_type=CvSVM.NU_SVC), or ordered (params.svm_type=CvSVM.EPS_SVR + * or params.svm_type=CvSVM.NU_SVR), or not required at all + * (params.svm_type=CvSVM.ONE_CLASS). + *
  • Missing measurements are not supported. + *
+ * + *

All the other parameters are gathered in the "CvSVMParams" structure.

+ * + * @param trainData a trainData + * @param responses a responses + * + * @see org.opencv.ml.CvSVM.train + */ + public boolean train(Mat trainData, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, responses.nativeObj); + + return retVal; + } + + + // + // C++: bool CvSVM::train_auto(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams params, int k_fold = 10, CvParamGrid Cgrid = CvSVM::get_default_grid(CvSVM::C), CvParamGrid gammaGrid = CvSVM::get_default_grid(CvSVM::GAMMA), CvParamGrid pGrid = CvSVM::get_default_grid(CvSVM::P), CvParamGrid nuGrid = CvSVM::get_default_grid(CvSVM::NU), CvParamGrid coeffGrid = CvSVM::get_default_grid(CvSVM::COEF), CvParamGrid degreeGrid = CvSVM::get_default_grid(CvSVM::DEGREE), bool balanced = false) + // + +/** + *

Trains an SVM with optimal parameters.

+ * + *

The method trains the SVM model automatically by choosing the optimal + * parameters C, gamma, p, + * nu, coef0, degree from "CvSVMParams". + * Parameters are considered optimal when the cross-validation estimate of the + * test set error is minimal.

+ * + *

If there is no need to optimize a parameter, the corresponding grid step + * should be set to any value less than or equal to 1. For example, to avoid + * optimization in gamma, set gamma_grid.step = 0, + * gamma_grid.min_val, gamma_grid.max_val as arbitrary + * numbers. In this case, the value params.gamma is taken for + * gamma.

+ * + *

And, finally, if the optimization in a parameter is required but the + * corresponding grid is unknown, you may call the function "CvSVM.get_default_grid". + * To generate a grid, for example, for gamma, call + * CvSVM.get_default_grid(CvSVM.GAMMA).

+ * + *

This function works for the classification (params.svm_type=CvSVM.C_SVC + * or params.svm_type=CvSVM.NU_SVC) as well as for the regression + * (params.svm_type=CvSVM.EPS_SVR or params.svm_type=CvSVM.NU_SVR). + * If params.svm_type=CvSVM.ONE_CLASS, no optimization is made and + * the usual SVM with parameters specified in params is executed.

+ * + * @param trainData a trainData + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param params a params + * @param k_fold Cross-validation parameter. The training set is divided into + * k_fold subsets. One subset is used to test the model, the others + * form the train set. So, the SVM algorithm is executed k_fold + * times. + * @param Cgrid a Cgrid + * @param gammaGrid Iteration grid for the corresponding SVM parameter. + * @param pGrid Iteration grid for the corresponding SVM parameter. + * @param nuGrid Iteration grid for the corresponding SVM parameter. + * @param coeffGrid Iteration grid for the corresponding SVM parameter. + * @param degreeGrid Iteration grid for the corresponding SVM parameter. + * @param balanced If true and the problem is 2-class + * classification then the method creates more balanced cross-validation subsets + * that is proportions between classes in subsets are close to such proportion + * in the whole train dataset. + * + * @see org.opencv.ml.CvSVM.train_auto + */ + public boolean train_auto(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams params, int k_fold, CvParamGrid Cgrid, CvParamGrid gammaGrid, CvParamGrid pGrid, CvParamGrid nuGrid, CvParamGrid coeffGrid, CvParamGrid degreeGrid, boolean balanced) + { + + boolean retVal = train_auto_0(nativeObj, trainData.nativeObj, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, params.nativeObj, k_fold, Cgrid.nativeObj, gammaGrid.nativeObj, pGrid.nativeObj, nuGrid.nativeObj, coeffGrid.nativeObj, degreeGrid.nativeObj, balanced); + + return retVal; + } + +/** + *

Trains an SVM with optimal parameters.

+ * + *

The method trains the SVM model automatically by choosing the optimal + * parameters C, gamma, p, + * nu, coef0, degree from "CvSVMParams". + * Parameters are considered optimal when the cross-validation estimate of the + * test set error is minimal.

+ * + *

If there is no need to optimize a parameter, the corresponding grid step + * should be set to any value less than or equal to 1. For example, to avoid + * optimization in gamma, set gamma_grid.step = 0, + * gamma_grid.min_val, gamma_grid.max_val as arbitrary + * numbers. In this case, the value params.gamma is taken for + * gamma.

+ * + *

And, finally, if the optimization in a parameter is required but the + * corresponding grid is unknown, you may call the function "CvSVM.get_default_grid". + * To generate a grid, for example, for gamma, call + * CvSVM.get_default_grid(CvSVM.GAMMA).

+ * + *

This function works for the classification (params.svm_type=CvSVM.C_SVC + * or params.svm_type=CvSVM.NU_SVC) as well as for the regression + * (params.svm_type=CvSVM.EPS_SVR or params.svm_type=CvSVM.NU_SVR). + * If params.svm_type=CvSVM.ONE_CLASS, no optimization is made and + * the usual SVM with parameters specified in params is executed.

+ * + * @param trainData a trainData + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param params a params + * + * @see org.opencv.ml.CvSVM.train_auto + */ + public boolean train_auto(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams params) + { + + boolean retVal = train_auto_1(nativeObj, trainData.nativeObj, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, params.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvSVM::CvSVM() + private static native long CvSVM_0(); + + // C++: CvSVM::CvSVM(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), CvSVMParams params = CvSVMParams()) + private static native long CvSVM_1(long trainData_nativeObj, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long params_nativeObj); + private static native long CvSVM_2(long trainData_nativeObj, long responses_nativeObj); + + // C++: void CvSVM::clear() + private static native void clear_0(long nativeObj); + + // C++: int CvSVM::get_support_vector_count() + private static native int get_support_vector_count_0(long nativeObj); + + // C++: int CvSVM::get_var_count() + private static native int get_var_count_0(long nativeObj); + + // C++: float CvSVM::predict(Mat sample, bool returnDFVal = false) + private static native float predict_0(long nativeObj, long sample_nativeObj, boolean returnDFVal); + private static native float predict_1(long nativeObj, long sample_nativeObj); + + // C++: void CvSVM::predict(Mat samples, Mat& results) + private static native void predict_all_0(long nativeObj, long samples_nativeObj, long results_nativeObj); + + // C++: bool CvSVM::train(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), CvSVMParams params = CvSVMParams()) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long params_nativeObj); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, long responses_nativeObj); + + // C++: bool CvSVM::train_auto(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams params, int k_fold = 10, CvParamGrid Cgrid = CvSVM::get_default_grid(CvSVM::C), CvParamGrid gammaGrid = CvSVM::get_default_grid(CvSVM::GAMMA), CvParamGrid pGrid = CvSVM::get_default_grid(CvSVM::P), CvParamGrid nuGrid = CvSVM::get_default_grid(CvSVM::NU), CvParamGrid coeffGrid = CvSVM::get_default_grid(CvSVM::COEF), CvParamGrid degreeGrid = CvSVM::get_default_grid(CvSVM::DEGREE), bool balanced = false) + private static native boolean train_auto_0(long nativeObj, long trainData_nativeObj, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long params_nativeObj, int k_fold, long Cgrid_nativeObj, long gammaGrid_nativeObj, long pGrid_nativeObj, long nuGrid_nativeObj, long coeffGrid_nativeObj, long degreeGrid_nativeObj, boolean balanced); + private static native boolean train_auto_1(long nativeObj, long trainData_nativeObj, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long params_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvSVMParams.java b/src/org/opencv/ml/CvSVMParams.java new file mode 100644 index 0000000..378bdf6 --- /dev/null +++ b/src/org/opencv/ml/CvSVMParams.java @@ -0,0 +1,360 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.TermCriteria; + +// C++: class CvSVMParams +/** + *

SVM training parameters.

+ * + *

The structure must be initialized and passed to the training method of + * "CvSVM".

+ * + * @see org.opencv.ml.CvSVMParams + */ +public class CvSVMParams { + + protected final long nativeObj; + protected CvSVMParams(long addr) { nativeObj = addr; } + + + // + // C++: CvSVMParams::CvSVMParams() + // + +/** + *

The constructors.

+ * + *

The default constructor initialize the structure with following values:

+ * + *

+ * + *

// C++ code:

+ * + *

CvSVMParams.CvSVMParams() :

+ * + *

svm_type(CvSVM.C_SVC), kernel_type(CvSVM.RBF), degree(0),

+ * + *

gamma(1), coef0(0), C(1), nu(0), p(0), class_weights(0)

+ * + * + *

term_crit = cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, + * FLT_EPSILON);

+ * + * + * @see org.opencv.ml.CvSVMParams.CvSVMParams + */ + public CvSVMParams() + { + + nativeObj = CvSVMParams_0(); + + return; + } + + + // + // C++: int CvSVMParams::svm_type + // + + public int get_svm_type() + { + + int retVal = get_svm_type_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::svm_type + // + + public void set_svm_type(int svm_type) + { + + set_svm_type_0(nativeObj, svm_type); + + return; + } + + + // + // C++: int CvSVMParams::kernel_type + // + + public int get_kernel_type() + { + + int retVal = get_kernel_type_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::kernel_type + // + + public void set_kernel_type(int kernel_type) + { + + set_kernel_type_0(nativeObj, kernel_type); + + return; + } + + + // + // C++: double CvSVMParams::degree + // + + public double get_degree() + { + + double retVal = get_degree_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::degree + // + + public void set_degree(double degree) + { + + set_degree_0(nativeObj, degree); + + return; + } + + + // + // C++: double CvSVMParams::gamma + // + + public double get_gamma() + { + + double retVal = get_gamma_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::gamma + // + + public void set_gamma(double gamma) + { + + set_gamma_0(nativeObj, gamma); + + return; + } + + + // + // C++: double CvSVMParams::coef0 + // + + public double get_coef0() + { + + double retVal = get_coef0_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::coef0 + // + + public void set_coef0(double coef0) + { + + set_coef0_0(nativeObj, coef0); + + return; + } + + + // + // C++: double CvSVMParams::C + // + + public double get_C() + { + + double retVal = get_C_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::C + // + + public void set_C(double C) + { + + set_C_0(nativeObj, C); + + return; + } + + + // + // C++: double CvSVMParams::nu + // + + public double get_nu() + { + + double retVal = get_nu_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::nu + // + + public void set_nu(double nu) + { + + set_nu_0(nativeObj, nu); + + return; + } + + + // + // C++: double CvSVMParams::p + // + + public double get_p() + { + + double retVal = get_p_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::p + // + + public void set_p(double p) + { + + set_p_0(nativeObj, p); + + return; + } + + + // + // C++: TermCriteria CvSVMParams::term_crit + // + + public TermCriteria get_term_crit() + { + + TermCriteria retVal = new TermCriteria(get_term_crit_0(nativeObj)); + + return retVal; + } + + + // + // C++: void CvSVMParams::term_crit + // + + public void set_term_crit(TermCriteria term_crit) + { + + set_term_crit_0(nativeObj, term_crit.type, term_crit.maxCount, term_crit.epsilon); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvSVMParams::CvSVMParams() + private static native long CvSVMParams_0(); + + // C++: int CvSVMParams::svm_type + private static native int get_svm_type_0(long nativeObj); + + // C++: void CvSVMParams::svm_type + private static native void set_svm_type_0(long nativeObj, int svm_type); + + // C++: int CvSVMParams::kernel_type + private static native int get_kernel_type_0(long nativeObj); + + // C++: void CvSVMParams::kernel_type + private static native void set_kernel_type_0(long nativeObj, int kernel_type); + + // C++: double CvSVMParams::degree + private static native double get_degree_0(long nativeObj); + + // C++: void CvSVMParams::degree + private static native void set_degree_0(long nativeObj, double degree); + + // C++: double CvSVMParams::gamma + private static native double get_gamma_0(long nativeObj); + + // C++: void CvSVMParams::gamma + private static native void set_gamma_0(long nativeObj, double gamma); + + // C++: double CvSVMParams::coef0 + private static native double get_coef0_0(long nativeObj); + + // C++: void CvSVMParams::coef0 + private static native void set_coef0_0(long nativeObj, double coef0); + + // C++: double CvSVMParams::C + private static native double get_C_0(long nativeObj); + + // C++: void CvSVMParams::C + private static native void set_C_0(long nativeObj, double C); + + // C++: double CvSVMParams::nu + private static native double get_nu_0(long nativeObj); + + // C++: void CvSVMParams::nu + private static native void set_nu_0(long nativeObj, double nu); + + // C++: double CvSVMParams::p + private static native double get_p_0(long nativeObj); + + // C++: void CvSVMParams::p + private static native void set_p_0(long nativeObj, double p); + + // C++: TermCriteria CvSVMParams::term_crit + private static native double[] get_term_crit_0(long nativeObj); + + // C++: void CvSVMParams::term_crit + private static native void set_term_crit_0(long nativeObj, int term_crit_type, int term_crit_maxCount, double term_crit_epsilon); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvStatModel.java b/src/org/opencv/ml/CvStatModel.java new file mode 100644 index 0000000..00303b6 --- /dev/null +++ b/src/org/opencv/ml/CvStatModel.java @@ -0,0 +1,176 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import java.lang.String; + +// C++: class CvStatModel +/** + *

Base class for statistical models in ML.

+ * + *

class CvStatModel

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

/ * CvStatModel(); * /

+ * + *

/ * CvStatModel(const Mat& train_data...); * /

+ * + *

virtual ~CvStatModel();

+ * + *

virtual void clear()=0;

+ * + *

/ * virtual bool train(const Mat& train_data, [int tflag,]..., const

+ * + *

Mat& responses,...,

+ * + *

[const Mat& var_idx,]..., [const Mat& sample_idx,]...

+ * + *

[const Mat& var_type,]..., [const Mat& missing_mask,]

+ * + *

...)=0;

+ *
    + *
  • / + *
+ * + *

/ * virtual float predict(const Mat& sample...) const=0; * /

+ * + *

virtual void save(const char* filename, const char* name=0)=0;

+ * + *

virtual void load(const char* filename, const char* name=0)=0;

+ * + *

virtual void write(CvFileStorage* storage, const char* name)=0;

+ * + *

virtual void read(CvFileStorage* storage, CvFileNode* node)=0;

+ * + *

};

+ * + *

In this declaration, some methods are commented off. These are methods for + * which there is no unified API (with the exception of the default + * constructor). However, there are many similarities in the syntax and + * semantics that are briefly described below in this section, as if they are + * part of the base class. + *

+ * + * @see org.opencv.ml.CvStatModel + */ +public class CvStatModel { + + protected final long nativeObj; + protected CvStatModel(long addr) { nativeObj = addr; } + + + // + // C++: void CvStatModel::load(c_string filename, c_string name = 0) + // + +/** + *

Loads the model from a file.

+ * + *

The method load loads the complete model state with the + * specified name (or default model-dependent name) from the specified XML or + * YAML file. The previous model state is cleared by "CvStatModel.clear".

+ * + * @param filename a filename + * @param name a name + * + * @see org.opencv.ml.CvStatModel.load + */ + public void load(String filename, String name) + { + + load_0(nativeObj, filename, name); + + return; + } + +/** + *

Loads the model from a file.

+ * + *

The method load loads the complete model state with the + * specified name (or default model-dependent name) from the specified XML or + * YAML file. The previous model state is cleared by "CvStatModel.clear".

+ * + * @param filename a filename + * + * @see org.opencv.ml.CvStatModel.load + */ + public void load(String filename) + { + + load_1(nativeObj, filename); + + return; + } + + + // + // C++: void CvStatModel::save(c_string filename, c_string name = 0) + // + +/** + *

Saves the model to a file.

+ * + *

The method save saves the complete model state to the specified + * XML or YAML file with the specified name or default name (which depends on a + * particular class). *Data persistence* functionality from CxCore + * is used.

+ * + * @param filename a filename + * @param name a name + * + * @see org.opencv.ml.CvStatModel.save + */ + public void save(String filename, String name) + { + + save_0(nativeObj, filename, name); + + return; + } + +/** + *

Saves the model to a file.

+ * + *

The method save saves the complete model state to the specified + * XML or YAML file with the specified name or default name (which depends on a + * particular class). *Data persistence* functionality from CxCore + * is used.

+ * + * @param filename a filename + * + * @see org.opencv.ml.CvStatModel.save + */ + public void save(String filename) + { + + save_1(nativeObj, filename); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: void CvStatModel::load(c_string filename, c_string name = 0) + private static native void load_0(long nativeObj, String filename, String name); + private static native void load_1(long nativeObj, String filename); + + // C++: void CvStatModel::save(c_string filename, c_string name = 0) + private static native void save_0(long nativeObj, String filename, String name); + private static native void save_1(long nativeObj, String filename); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/EM.java b/src/org/opencv/ml/EM.java new file mode 100644 index 0000000..2438bf9 --- /dev/null +++ b/src/org/opencv/ml/EM.java @@ -0,0 +1,356 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Algorithm; +import org.opencv.core.Mat; +import org.opencv.core.TermCriteria; + +// C++: class EM +/** + *

The class implements the EM algorithm as described in the beginning of this + * section. It is inherited from "Algorithm".

+ * + * @see org.opencv.ml.EM : public Algorithm + */ +public class EM extends Algorithm { + + protected EM(long addr) { super(addr); } + + + public static final int + COV_MAT_SPHERICAL = 0, + COV_MAT_DIAGONAL = 1, + COV_MAT_GENERIC = 2, + COV_MAT_DEFAULT = COV_MAT_DIAGONAL, + DEFAULT_NCLUSTERS = 5, + DEFAULT_MAX_ITERS = 100, + START_E_STEP = 1, + START_M_STEP = 2, + START_AUTO_STEP = 0; + + + // + // C++: EM::EM(int nclusters = EM::DEFAULT_NCLUSTERS, int covMatType = EM::COV_MAT_DIAGONAL, TermCriteria termCrit = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, FLT_EPSILON)) + // + +/** + *

The constructor of the class

+ * + * @param nclusters The number of mixture components in the Gaussian mixture + * model. Default value of the parameter is EM.DEFAULT_NCLUSTERS=5. + * Some of EM implementation could determine the optimal number of mixtures + * within a specified value range, but that is not the case in ML yet. + * @param covMatType Constraint on covariance matrices which defines type of + * matrices. Possible values are: + *
    + *
  • EM.COV_MAT_SPHERICAL A scaled identity matrix mu_k * I. + * There is the only parameter mu_k to be estimated for each matrix. + * The option may be used in special cases, when the constraint is relevant, or + * as a first step in the optimization (for example in case when the data is + * preprocessed with PCA). The results of such preliminary estimation may be + * passed again to the optimization procedure, this time with covMatType=EM.COV_MAT_DIAGONAL. + *
  • EM.COV_MAT_DIAGONAL A diagonal matrix with positive diagonal + * elements. The number of free parameters is d for each matrix. + * This is most commonly used option yielding good estimation results. + *
  • EM.COV_MAT_GENERIC A symmetric positively defined matrix. The number + * of free parameters in each matrix is about d^2/2. It is not + * recommended to use this option, unless there is pretty accurate initial + * estimation of the parameters and/or a huge number of training samples. + *
+ * @param termCrit The termination criteria of the EM algorithm. The EM + * algorithm can be terminated by the number of iterations termCrit.maxCount + * (number of M-steps) or when relative change of likelihood logarithm is less + * than termCrit.epsilon. Default maximum number of iterations is + * EM.DEFAULT_MAX_ITERS=100. + * + * @see org.opencv.ml.EM.EM + */ + public EM(int nclusters, int covMatType, TermCriteria termCrit) + { + + super( EM_0(nclusters, covMatType, termCrit.type, termCrit.maxCount, termCrit.epsilon) ); + + return; + } + +/** + *

The constructor of the class

+ * + * @see org.opencv.ml.EM.EM + */ + public EM() + { + + super( EM_1() ); + + return; + } + + + // + // C++: void EM::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: bool EM::isTrained() + // + + public boolean isTrained() + { + + boolean retVal = isTrained_0(nativeObj); + + return retVal; + } + + + // + // C++: Vec2d EM::predict(Mat sample, Mat& probs = Mat()) + // + +/** + *

Returns a likelihood logarithm value and an index of the most probable + * mixture component for the given sample.

+ * + *

The method returns a two-element double vector. Zero element is + * a likelihood logarithm value for the sample. First element is an index of the + * most probable mixture component for the given sample.

+ * + * @param sample A sample for classification. It should be a one-channel matrix + * of 1 x dims or dims x 1 size. + * @param probs Optional output matrix that contains posterior probabilities of + * each component given the sample. It has 1 x nclusters size and + * CV_64FC1 type. + * + * @see org.opencv.ml.EM.predict + */ + public double[] predict(Mat sample, Mat probs) + { + + double[] retVal = predict_0(nativeObj, sample.nativeObj, probs.nativeObj); + + return retVal; + } + +/** + *

Returns a likelihood logarithm value and an index of the most probable + * mixture component for the given sample.

+ * + *

The method returns a two-element double vector. Zero element is + * a likelihood logarithm value for the sample. First element is an index of the + * most probable mixture component for the given sample.

+ * + * @param sample A sample for classification. It should be a one-channel matrix + * of 1 x dims or dims x 1 size. + * + * @see org.opencv.ml.EM.predict + */ + public double[] predict(Mat sample) + { + + double[] retVal = predict_1(nativeObj, sample.nativeObj); + + return retVal; + } + + + // + // C++: bool EM::train(Mat samples, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat()) + // + +/** + *

Estimates the Gaussian mixture parameters from a samples set.

+ * + *

Three versions of training method differ in the initialization of Gaussian + * mixture model parameters and start step:

+ *
    + *
  • train - Starts with Expectation step. Initial values of the model + * parameters will be estimated by the k-means algorithm. + *
  • trainE - Starts with Expectation step. You need to provide initial + * means a_k of mixture components. Optionally you can pass initial + * weights pi_k and covariance matrices S_k of mixture + * components. + *
  • trainM - Starts with Maximization step. You need to provide initial + * probabilities p_(i,k) to use this option. + *
+ * + *

The methods return true if the Gaussian mixture model was + * trained successfully, otherwise it returns false.

+ * + *

Unlike many of the ML models, EM is an unsupervised learning algorithm and it + * does not take responses (class labels or function values) as input. Instead, + * it computes the *Maximum Likelihood Estimate* of the Gaussian mixture + * parameters from an input sample set, stores all the parameters inside the + * structure: p_(i,k) in probs, a_k in + * means, S_k in covs[k], pi_k in + * weights, and optionally computes the output "class label" for + * each sample: labels_i=arg max_k(p_(i,k)), i=1..N (indices of the + * most probable mixture component for each sample).

+ * + *

The trained model can be used further for prediction, just like any other + * classifier. The trained model is similar to the "CvNormalBayesClassifier".

+ * + * @param samples Samples from which the Gaussian mixture model will be + * estimated. It should be a one-channel matrix, each row of which is a sample. + * If the matrix does not have CV_64F type it will be converted to + * the inner matrix of such type for the further computing. + * @param logLikelihoods The optional output matrix that contains a likelihood + * logarithm value for each sample. It has nsamples x 1 size and + * CV_64FC1 type. + * @param labels The optional output "class label" for each sample: + * labels_i=arg max_k(p_(i,k)), i=1..N (indices of the most probable + * mixture component for each sample). It has nsamples x 1 size and + * CV_32SC1 type. + * @param probs The optional output matrix that contains posterior probabilities + * of each Gaussian mixture component given the each sample. It has nsamples + * x nclusters size and CV_64FC1 type. + * + * @see org.opencv.ml.EM.train + */ + public boolean train(Mat samples, Mat logLikelihoods, Mat labels, Mat probs) + { + + boolean retVal = train_0(nativeObj, samples.nativeObj, logLikelihoods.nativeObj, labels.nativeObj, probs.nativeObj); + + return retVal; + } + +/** + *

Estimates the Gaussian mixture parameters from a samples set.

+ * + *

Three versions of training method differ in the initialization of Gaussian + * mixture model parameters and start step:

+ *
    + *
  • train - Starts with Expectation step. Initial values of the model + * parameters will be estimated by the k-means algorithm. + *
  • trainE - Starts with Expectation step. You need to provide initial + * means a_k of mixture components. Optionally you can pass initial + * weights pi_k and covariance matrices S_k of mixture + * components. + *
  • trainM - Starts with Maximization step. You need to provide initial + * probabilities p_(i,k) to use this option. + *
+ * + *

The methods return true if the Gaussian mixture model was + * trained successfully, otherwise it returns false.

+ * + *

Unlike many of the ML models, EM is an unsupervised learning algorithm and it + * does not take responses (class labels or function values) as input. Instead, + * it computes the *Maximum Likelihood Estimate* of the Gaussian mixture + * parameters from an input sample set, stores all the parameters inside the + * structure: p_(i,k) in probs, a_k in + * means, S_k in covs[k], pi_k in + * weights, and optionally computes the output "class label" for + * each sample: labels_i=arg max_k(p_(i,k)), i=1..N (indices of the + * most probable mixture component for each sample).

+ * + *

The trained model can be used further for prediction, just like any other + * classifier. The trained model is similar to the "CvNormalBayesClassifier".

+ * + * @param samples Samples from which the Gaussian mixture model will be + * estimated. It should be a one-channel matrix, each row of which is a sample. + * If the matrix does not have CV_64F type it will be converted to + * the inner matrix of such type for the further computing. + * + * @see org.opencv.ml.EM.train + */ + public boolean train(Mat samples) + { + + boolean retVal = train_1(nativeObj, samples.nativeObj); + + return retVal; + } + + + // + // C++: bool EM::trainE(Mat samples, Mat means0, Mat covs0 = Mat(), Mat weights0 = Mat(), Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat()) + // + + public boolean trainE(Mat samples, Mat means0, Mat covs0, Mat weights0, Mat logLikelihoods, Mat labels, Mat probs) + { + + boolean retVal = trainE_0(nativeObj, samples.nativeObj, means0.nativeObj, covs0.nativeObj, weights0.nativeObj, logLikelihoods.nativeObj, labels.nativeObj, probs.nativeObj); + + return retVal; + } + + public boolean trainE(Mat samples, Mat means0) + { + + boolean retVal = trainE_1(nativeObj, samples.nativeObj, means0.nativeObj); + + return retVal; + } + + + // + // C++: bool EM::trainM(Mat samples, Mat probs0, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat()) + // + + public boolean trainM(Mat samples, Mat probs0, Mat logLikelihoods, Mat labels, Mat probs) + { + + boolean retVal = trainM_0(nativeObj, samples.nativeObj, probs0.nativeObj, logLikelihoods.nativeObj, labels.nativeObj, probs.nativeObj); + + return retVal; + } + + public boolean trainM(Mat samples, Mat probs0) + { + + boolean retVal = trainM_1(nativeObj, samples.nativeObj, probs0.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: EM::EM(int nclusters = EM::DEFAULT_NCLUSTERS, int covMatType = EM::COV_MAT_DIAGONAL, TermCriteria termCrit = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, FLT_EPSILON)) + private static native long EM_0(int nclusters, int covMatType, int termCrit_type, int termCrit_maxCount, double termCrit_epsilon); + private static native long EM_1(); + + // C++: void EM::clear() + private static native void clear_0(long nativeObj); + + // C++: bool EM::isTrained() + private static native boolean isTrained_0(long nativeObj); + + // C++: Vec2d EM::predict(Mat sample, Mat& probs = Mat()) + private static native double[] predict_0(long nativeObj, long sample_nativeObj, long probs_nativeObj); + private static native double[] predict_1(long nativeObj, long sample_nativeObj); + + // C++: bool EM::train(Mat samples, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat()) + private static native boolean train_0(long nativeObj, long samples_nativeObj, long logLikelihoods_nativeObj, long labels_nativeObj, long probs_nativeObj); + private static native boolean train_1(long nativeObj, long samples_nativeObj); + + // C++: bool EM::trainE(Mat samples, Mat means0, Mat covs0 = Mat(), Mat weights0 = Mat(), Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat()) + private static native boolean trainE_0(long nativeObj, long samples_nativeObj, long means0_nativeObj, long covs0_nativeObj, long weights0_nativeObj, long logLikelihoods_nativeObj, long labels_nativeObj, long probs_nativeObj); + private static native boolean trainE_1(long nativeObj, long samples_nativeObj, long means0_nativeObj); + + // C++: bool EM::trainM(Mat samples, Mat probs0, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat()) + private static native boolean trainM_0(long nativeObj, long samples_nativeObj, long probs0_nativeObj, long logLikelihoods_nativeObj, long labels_nativeObj, long probs_nativeObj); + private static native boolean trainM_1(long nativeObj, long samples_nativeObj, long probs0_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/Ml.java b/src/org/opencv/ml/Ml.java new file mode 100644 index 0000000..104445b --- /dev/null +++ b/src/org/opencv/ml/Ml.java @@ -0,0 +1,13 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + + + +public class Ml { + + + +} diff --git a/src/org/opencv/ml/package.bluej b/src/org/opencv/ml/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/objdetect/CascadeClassifier.java b/src/org/opencv/objdetect/CascadeClassifier.java new file mode 100644 index 0000000..54970ec --- /dev/null +++ b/src/org/opencv/objdetect/CascadeClassifier.java @@ -0,0 +1,258 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.objdetect; + +import java.lang.String; +import org.opencv.core.Mat; +import org.opencv.core.MatOfDouble; +import org.opencv.core.MatOfInt; +import org.opencv.core.MatOfRect; +import org.opencv.core.Size; + +// C++: class CascadeClassifier +/** + *

Cascade classifier class for object detection.

+ * + * @see org.opencv.objdetect.CascadeClassifier + */ +public class CascadeClassifier { + + protected final long nativeObj; + protected CascadeClassifier(long addr) { nativeObj = addr; } + + + // + // C++: CascadeClassifier::CascadeClassifier() + // + +/** + *

Loads a classifier from a file.

+ * + * @see org.opencv.objdetect.CascadeClassifier.CascadeClassifier + */ + public CascadeClassifier() + { + + nativeObj = CascadeClassifier_0(); + + return; + } + + + // + // C++: CascadeClassifier::CascadeClassifier(string filename) + // + +/** + *

Loads a classifier from a file.

+ * + * @param filename Name of the file from which the classifier is loaded. + * + * @see org.opencv.objdetect.CascadeClassifier.CascadeClassifier + */ + public CascadeClassifier(String filename) + { + + nativeObj = CascadeClassifier_1(filename); + + return; + } + + + // + // C++: void CascadeClassifier::detectMultiScale(Mat image, vector_Rect& objects, double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0, Size minSize = Size(), Size maxSize = Size()) + // + +/** + *

Detects objects of different sizes in the input image. The detected objects + * are returned as a list of rectangles.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param image Matrix of the type CV_8U containing an image where + * objects are detected. + * @param objects Vector of rectangles where each rectangle contains the + * detected object. + * @param scaleFactor Parameter specifying how much the image size is reduced at + * each image scale. + * @param minNeighbors Parameter specifying how many neighbors each candidate + * rectangle should have to retain it. + * @param flags Parameter with the same meaning for an old cascade as in the + * function cvHaarDetectObjects. It is not used for a new cascade. + * @param minSize Minimum possible object size. Objects smaller than that are + * ignored. + * @param maxSize Maximum possible object size. Objects larger than that are + * ignored. + * + * @see org.opencv.objdetect.CascadeClassifier.detectMultiScale + */ + public void detectMultiScale(Mat image, MatOfRect objects, double scaleFactor, int minNeighbors, int flags, Size minSize, Size maxSize) + { + Mat objects_mat = objects; + detectMultiScale_0(nativeObj, image.nativeObj, objects_mat.nativeObj, scaleFactor, minNeighbors, flags, minSize.width, minSize.height, maxSize.width, maxSize.height); + + return; + } + +/** + *

Detects objects of different sizes in the input image. The detected objects + * are returned as a list of rectangles.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param image Matrix of the type CV_8U containing an image where + * objects are detected. + * @param objects Vector of rectangles where each rectangle contains the + * detected object. + * + * @see org.opencv.objdetect.CascadeClassifier.detectMultiScale + */ + public void detectMultiScale(Mat image, MatOfRect objects) + { + Mat objects_mat = objects; + detectMultiScale_1(nativeObj, image.nativeObj, objects_mat.nativeObj); + + return; + } + + + // + // C++: void CascadeClassifier::detectMultiScale(Mat image, vector_Rect& objects, vector_int rejectLevels, vector_double levelWeights, double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0, Size minSize = Size(), Size maxSize = Size(), bool outputRejectLevels = false) + // + +/** + *

Detects objects of different sizes in the input image. The detected objects + * are returned as a list of rectangles.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param image Matrix of the type CV_8U containing an image where + * objects are detected. + * @param objects Vector of rectangles where each rectangle contains the + * detected object. + * @param rejectLevels a rejectLevels + * @param levelWeights a levelWeights + * @param scaleFactor Parameter specifying how much the image size is reduced at + * each image scale. + * @param minNeighbors Parameter specifying how many neighbors each candidate + * rectangle should have to retain it. + * @param flags Parameter with the same meaning for an old cascade as in the + * function cvHaarDetectObjects. It is not used for a new cascade. + * @param minSize Minimum possible object size. Objects smaller than that are + * ignored. + * @param maxSize Maximum possible object size. Objects larger than that are + * ignored. + * @param outputRejectLevels a outputRejectLevels + * + * @see org.opencv.objdetect.CascadeClassifier.detectMultiScale + */ + public void detectMultiScale(Mat image, MatOfRect objects, MatOfInt rejectLevels, MatOfDouble levelWeights, double scaleFactor, int minNeighbors, int flags, Size minSize, Size maxSize, boolean outputRejectLevels) + { + Mat objects_mat = objects; + Mat rejectLevels_mat = rejectLevels; + Mat levelWeights_mat = levelWeights; + detectMultiScale_2(nativeObj, image.nativeObj, objects_mat.nativeObj, rejectLevels_mat.nativeObj, levelWeights_mat.nativeObj, scaleFactor, minNeighbors, flags, minSize.width, minSize.height, maxSize.width, maxSize.height, outputRejectLevels); + + return; + } + +/** + *

Detects objects of different sizes in the input image. The detected objects + * are returned as a list of rectangles.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param image Matrix of the type CV_8U containing an image where + * objects are detected. + * @param objects Vector of rectangles where each rectangle contains the + * detected object. + * @param rejectLevels a rejectLevels + * @param levelWeights a levelWeights + * + * @see org.opencv.objdetect.CascadeClassifier.detectMultiScale + */ + public void detectMultiScale(Mat image, MatOfRect objects, MatOfInt rejectLevels, MatOfDouble levelWeights) + { + Mat objects_mat = objects; + Mat rejectLevels_mat = rejectLevels; + Mat levelWeights_mat = levelWeights; + detectMultiScale_3(nativeObj, image.nativeObj, objects_mat.nativeObj, rejectLevels_mat.nativeObj, levelWeights_mat.nativeObj); + + return; + } + + + // + // C++: bool CascadeClassifier::empty() + // + +/** + *

Checks whether the classifier has been loaded.

+ * + * @see org.opencv.objdetect.CascadeClassifier.empty + */ + public boolean empty() + { + + boolean retVal = empty_0(nativeObj); + + return retVal; + } + + + // + // C++: bool CascadeClassifier::load(string filename) + // + +/** + *

Loads a classifier from a file.

+ * + * @param filename Name of the file from which the classifier is loaded. The + * file may contain an old HAAR classifier trained by the haartraining + * application or a new cascade classifier trained by the traincascade + * application. + * + * @see org.opencv.objdetect.CascadeClassifier.load + */ + public boolean load(String filename) + { + + boolean retVal = load_0(nativeObj, filename); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CascadeClassifier::CascadeClassifier() + private static native long CascadeClassifier_0(); + + // C++: CascadeClassifier::CascadeClassifier(string filename) + private static native long CascadeClassifier_1(String filename); + + // C++: void CascadeClassifier::detectMultiScale(Mat image, vector_Rect& objects, double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0, Size minSize = Size(), Size maxSize = Size()) + private static native void detectMultiScale_0(long nativeObj, long image_nativeObj, long objects_mat_nativeObj, double scaleFactor, int minNeighbors, int flags, double minSize_width, double minSize_height, double maxSize_width, double maxSize_height); + private static native void detectMultiScale_1(long nativeObj, long image_nativeObj, long objects_mat_nativeObj); + + // C++: void CascadeClassifier::detectMultiScale(Mat image, vector_Rect& objects, vector_int rejectLevels, vector_double levelWeights, double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0, Size minSize = Size(), Size maxSize = Size(), bool outputRejectLevels = false) + private static native void detectMultiScale_2(long nativeObj, long image_nativeObj, long objects_mat_nativeObj, long rejectLevels_mat_nativeObj, long levelWeights_mat_nativeObj, double scaleFactor, int minNeighbors, int flags, double minSize_width, double minSize_height, double maxSize_width, double maxSize_height, boolean outputRejectLevels); + private static native void detectMultiScale_3(long nativeObj, long image_nativeObj, long objects_mat_nativeObj, long rejectLevels_mat_nativeObj, long levelWeights_mat_nativeObj); + + // C++: bool CascadeClassifier::empty() + private static native boolean empty_0(long nativeObj); + + // C++: bool CascadeClassifier::load(string filename) + private static native boolean load_0(long nativeObj, String filename); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/objdetect/HOGDescriptor.java b/src/org/opencv/objdetect/HOGDescriptor.java new file mode 100644 index 0000000..e5490d6 --- /dev/null +++ b/src/org/opencv/objdetect/HOGDescriptor.java @@ -0,0 +1,538 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.objdetect; + +import java.lang.String; +import org.opencv.core.Mat; +import org.opencv.core.MatOfDouble; +import org.opencv.core.MatOfFloat; +import org.opencv.core.MatOfPoint; +import org.opencv.core.MatOfRect; +import org.opencv.core.Size; + +// C++: class HOGDescriptor +public class HOGDescriptor { + + protected final long nativeObj; + protected HOGDescriptor(long addr) { nativeObj = addr; } + + + public static final int + L2Hys = 0, + DEFAULT_NLEVELS = 64; + + + // + // C++: HOGDescriptor::HOGDescriptor() + // + + public HOGDescriptor() + { + + nativeObj = HOGDescriptor_0(); + + return; + } + + + // + // C++: HOGDescriptor::HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride, Size _cellSize, int _nbins, int _derivAperture = 1, double _winSigma = -1, int _histogramNormType = HOGDescriptor::L2Hys, double _L2HysThreshold = 0.2, bool _gammaCorrection = false, int _nlevels = HOGDescriptor::DEFAULT_NLEVELS) + // + + public HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride, Size _cellSize, int _nbins, int _derivAperture, double _winSigma, int _histogramNormType, double _L2HysThreshold, boolean _gammaCorrection, int _nlevels) + { + + nativeObj = HOGDescriptor_1(_winSize.width, _winSize.height, _blockSize.width, _blockSize.height, _blockStride.width, _blockStride.height, _cellSize.width, _cellSize.height, _nbins, _derivAperture, _winSigma, _histogramNormType, _L2HysThreshold, _gammaCorrection, _nlevels); + + return; + } + + public HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride, Size _cellSize, int _nbins) + { + + nativeObj = HOGDescriptor_2(_winSize.width, _winSize.height, _blockSize.width, _blockSize.height, _blockStride.width, _blockStride.height, _cellSize.width, _cellSize.height, _nbins); + + return; + } + + + // + // C++: HOGDescriptor::HOGDescriptor(String filename) + // + + public HOGDescriptor(String filename) + { + + nativeObj = HOGDescriptor_3(filename); + + return; + } + + + // + // C++: bool HOGDescriptor::checkDetectorSize() + // + + public boolean checkDetectorSize() + { + + boolean retVal = checkDetectorSize_0(nativeObj); + + return retVal; + } + + + // + // C++: void HOGDescriptor::compute(Mat img, vector_float& descriptors, Size winStride = Size(), Size padding = Size(), vector_Point locations = vector()) + // + + public void compute(Mat img, MatOfFloat descriptors, Size winStride, Size padding, MatOfPoint locations) + { + Mat descriptors_mat = descriptors; + Mat locations_mat = locations; + compute_0(nativeObj, img.nativeObj, descriptors_mat.nativeObj, winStride.width, winStride.height, padding.width, padding.height, locations_mat.nativeObj); + + return; + } + + public void compute(Mat img, MatOfFloat descriptors) + { + Mat descriptors_mat = descriptors; + compute_1(nativeObj, img.nativeObj, descriptors_mat.nativeObj); + + return; + } + + + // + // C++: void HOGDescriptor::computeGradient(Mat img, Mat& grad, Mat& angleOfs, Size paddingTL = Size(), Size paddingBR = Size()) + // + + public void computeGradient(Mat img, Mat grad, Mat angleOfs, Size paddingTL, Size paddingBR) + { + + computeGradient_0(nativeObj, img.nativeObj, grad.nativeObj, angleOfs.nativeObj, paddingTL.width, paddingTL.height, paddingBR.width, paddingBR.height); + + return; + } + + public void computeGradient(Mat img, Mat grad, Mat angleOfs) + { + + computeGradient_1(nativeObj, img.nativeObj, grad.nativeObj, angleOfs.nativeObj); + + return; + } + + + // + // C++: void HOGDescriptor::detect(Mat img, vector_Point& foundLocations, vector_double& weights, double hitThreshold = 0, Size winStride = Size(), Size padding = Size(), vector_Point searchLocations = vector()) + // + + public void detect(Mat img, MatOfPoint foundLocations, MatOfDouble weights, double hitThreshold, Size winStride, Size padding, MatOfPoint searchLocations) + { + Mat foundLocations_mat = foundLocations; + Mat weights_mat = weights; + Mat searchLocations_mat = searchLocations; + detect_0(nativeObj, img.nativeObj, foundLocations_mat.nativeObj, weights_mat.nativeObj, hitThreshold, winStride.width, winStride.height, padding.width, padding.height, searchLocations_mat.nativeObj); + + return; + } + + public void detect(Mat img, MatOfPoint foundLocations, MatOfDouble weights) + { + Mat foundLocations_mat = foundLocations; + Mat weights_mat = weights; + detect_1(nativeObj, img.nativeObj, foundLocations_mat.nativeObj, weights_mat.nativeObj); + + return; + } + + + // + // C++: void HOGDescriptor::detectMultiScale(Mat img, vector_Rect& foundLocations, vector_double& foundWeights, double hitThreshold = 0, Size winStride = Size(), Size padding = Size(), double scale = 1.05, double finalThreshold = 2.0, bool useMeanshiftGrouping = false) + // + + public void detectMultiScale(Mat img, MatOfRect foundLocations, MatOfDouble foundWeights, double hitThreshold, Size winStride, Size padding, double scale, double finalThreshold, boolean useMeanshiftGrouping) + { + Mat foundLocations_mat = foundLocations; + Mat foundWeights_mat = foundWeights; + detectMultiScale_0(nativeObj, img.nativeObj, foundLocations_mat.nativeObj, foundWeights_mat.nativeObj, hitThreshold, winStride.width, winStride.height, padding.width, padding.height, scale, finalThreshold, useMeanshiftGrouping); + + return; + } + + public void detectMultiScale(Mat img, MatOfRect foundLocations, MatOfDouble foundWeights) + { + Mat foundLocations_mat = foundLocations; + Mat foundWeights_mat = foundWeights; + detectMultiScale_1(nativeObj, img.nativeObj, foundLocations_mat.nativeObj, foundWeights_mat.nativeObj); + + return; + } + + + // + // C++: static vector_float HOGDescriptor::getDaimlerPeopleDetector() + // + + public static MatOfFloat getDaimlerPeopleDetector() + { + + MatOfFloat retVal = MatOfFloat.fromNativeAddr(getDaimlerPeopleDetector_0()); + + return retVal; + } + + + // + // C++: static vector_float HOGDescriptor::getDefaultPeopleDetector() + // + + public static MatOfFloat getDefaultPeopleDetector() + { + + MatOfFloat retVal = MatOfFloat.fromNativeAddr(getDefaultPeopleDetector_0()); + + return retVal; + } + + + // + // C++: size_t HOGDescriptor::getDescriptorSize() + // + + public long getDescriptorSize() + { + + long retVal = getDescriptorSize_0(nativeObj); + + return retVal; + } + + + // + // C++: double HOGDescriptor::getWinSigma() + // + + public double getWinSigma() + { + + double retVal = getWinSigma_0(nativeObj); + + return retVal; + } + + + // + // C++: bool HOGDescriptor::load(String filename, String objname = String()) + // + + public boolean load(String filename, String objname) + { + + boolean retVal = load_0(nativeObj, filename, objname); + + return retVal; + } + + public boolean load(String filename) + { + + boolean retVal = load_1(nativeObj, filename); + + return retVal; + } + + + // + // C++: void HOGDescriptor::save(String filename, String objname = String()) + // + + public void save(String filename, String objname) + { + + save_0(nativeObj, filename, objname); + + return; + } + + public void save(String filename) + { + + save_1(nativeObj, filename); + + return; + } + + + // + // C++: void HOGDescriptor::setSVMDetector(Mat _svmdetector) + // + + public void setSVMDetector(Mat _svmdetector) + { + + setSVMDetector_0(nativeObj, _svmdetector.nativeObj); + + return; + } + + + // + // C++: Size HOGDescriptor::winSize + // + + public Size get_winSize() + { + + Size retVal = new Size(get_winSize_0(nativeObj)); + + return retVal; + } + + + // + // C++: Size HOGDescriptor::blockSize + // + + public Size get_blockSize() + { + + Size retVal = new Size(get_blockSize_0(nativeObj)); + + return retVal; + } + + + // + // C++: Size HOGDescriptor::blockStride + // + + public Size get_blockStride() + { + + Size retVal = new Size(get_blockStride_0(nativeObj)); + + return retVal; + } + + + // + // C++: Size HOGDescriptor::cellSize + // + + public Size get_cellSize() + { + + Size retVal = new Size(get_cellSize_0(nativeObj)); + + return retVal; + } + + + // + // C++: int HOGDescriptor::nbins + // + + public int get_nbins() + { + + int retVal = get_nbins_0(nativeObj); + + return retVal; + } + + + // + // C++: int HOGDescriptor::derivAperture + // + + public int get_derivAperture() + { + + int retVal = get_derivAperture_0(nativeObj); + + return retVal; + } + + + // + // C++: double HOGDescriptor::winSigma + // + + public double get_winSigma() + { + + double retVal = get_winSigma_0(nativeObj); + + return retVal; + } + + + // + // C++: int HOGDescriptor::histogramNormType + // + + public int get_histogramNormType() + { + + int retVal = get_histogramNormType_0(nativeObj); + + return retVal; + } + + + // + // C++: double HOGDescriptor::L2HysThreshold + // + + public double get_L2HysThreshold() + { + + double retVal = get_L2HysThreshold_0(nativeObj); + + return retVal; + } + + + // + // C++: bool HOGDescriptor::gammaCorrection + // + + public boolean get_gammaCorrection() + { + + boolean retVal = get_gammaCorrection_0(nativeObj); + + return retVal; + } + + + // + // C++: vector_float HOGDescriptor::svmDetector + // + + public MatOfFloat get_svmDetector() + { + + MatOfFloat retVal = MatOfFloat.fromNativeAddr(get_svmDetector_0(nativeObj)); + + return retVal; + } + + + // + // C++: int HOGDescriptor::nlevels + // + + public int get_nlevels() + { + + int retVal = get_nlevels_0(nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: HOGDescriptor::HOGDescriptor() + private static native long HOGDescriptor_0(); + + // C++: HOGDescriptor::HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride, Size _cellSize, int _nbins, int _derivAperture = 1, double _winSigma = -1, int _histogramNormType = HOGDescriptor::L2Hys, double _L2HysThreshold = 0.2, bool _gammaCorrection = false, int _nlevels = HOGDescriptor::DEFAULT_NLEVELS) + private static native long HOGDescriptor_1(double _winSize_width, double _winSize_height, double _blockSize_width, double _blockSize_height, double _blockStride_width, double _blockStride_height, double _cellSize_width, double _cellSize_height, int _nbins, int _derivAperture, double _winSigma, int _histogramNormType, double _L2HysThreshold, boolean _gammaCorrection, int _nlevels); + private static native long HOGDescriptor_2(double _winSize_width, double _winSize_height, double _blockSize_width, double _blockSize_height, double _blockStride_width, double _blockStride_height, double _cellSize_width, double _cellSize_height, int _nbins); + + // C++: HOGDescriptor::HOGDescriptor(String filename) + private static native long HOGDescriptor_3(String filename); + + // C++: bool HOGDescriptor::checkDetectorSize() + private static native boolean checkDetectorSize_0(long nativeObj); + + // C++: void HOGDescriptor::compute(Mat img, vector_float& descriptors, Size winStride = Size(), Size padding = Size(), vector_Point locations = vector()) + private static native void compute_0(long nativeObj, long img_nativeObj, long descriptors_mat_nativeObj, double winStride_width, double winStride_height, double padding_width, double padding_height, long locations_mat_nativeObj); + private static native void compute_1(long nativeObj, long img_nativeObj, long descriptors_mat_nativeObj); + + // C++: void HOGDescriptor::computeGradient(Mat img, Mat& grad, Mat& angleOfs, Size paddingTL = Size(), Size paddingBR = Size()) + private static native void computeGradient_0(long nativeObj, long img_nativeObj, long grad_nativeObj, long angleOfs_nativeObj, double paddingTL_width, double paddingTL_height, double paddingBR_width, double paddingBR_height); + private static native void computeGradient_1(long nativeObj, long img_nativeObj, long grad_nativeObj, long angleOfs_nativeObj); + + // C++: void HOGDescriptor::detect(Mat img, vector_Point& foundLocations, vector_double& weights, double hitThreshold = 0, Size winStride = Size(), Size padding = Size(), vector_Point searchLocations = vector()) + private static native void detect_0(long nativeObj, long img_nativeObj, long foundLocations_mat_nativeObj, long weights_mat_nativeObj, double hitThreshold, double winStride_width, double winStride_height, double padding_width, double padding_height, long searchLocations_mat_nativeObj); + private static native void detect_1(long nativeObj, long img_nativeObj, long foundLocations_mat_nativeObj, long weights_mat_nativeObj); + + // C++: void HOGDescriptor::detectMultiScale(Mat img, vector_Rect& foundLocations, vector_double& foundWeights, double hitThreshold = 0, Size winStride = Size(), Size padding = Size(), double scale = 1.05, double finalThreshold = 2.0, bool useMeanshiftGrouping = false) + private static native void detectMultiScale_0(long nativeObj, long img_nativeObj, long foundLocations_mat_nativeObj, long foundWeights_mat_nativeObj, double hitThreshold, double winStride_width, double winStride_height, double padding_width, double padding_height, double scale, double finalThreshold, boolean useMeanshiftGrouping); + private static native void detectMultiScale_1(long nativeObj, long img_nativeObj, long foundLocations_mat_nativeObj, long foundWeights_mat_nativeObj); + + // C++: static vector_float HOGDescriptor::getDaimlerPeopleDetector() + private static native long getDaimlerPeopleDetector_0(); + + // C++: static vector_float HOGDescriptor::getDefaultPeopleDetector() + private static native long getDefaultPeopleDetector_0(); + + // C++: size_t HOGDescriptor::getDescriptorSize() + private static native long getDescriptorSize_0(long nativeObj); + + // C++: double HOGDescriptor::getWinSigma() + private static native double getWinSigma_0(long nativeObj); + + // C++: bool HOGDescriptor::load(String filename, String objname = String()) + private static native boolean load_0(long nativeObj, String filename, String objname); + private static native boolean load_1(long nativeObj, String filename); + + // C++: void HOGDescriptor::save(String filename, String objname = String()) + private static native void save_0(long nativeObj, String filename, String objname); + private static native void save_1(long nativeObj, String filename); + + // C++: void HOGDescriptor::setSVMDetector(Mat _svmdetector) + private static native void setSVMDetector_0(long nativeObj, long _svmdetector_nativeObj); + + // C++: Size HOGDescriptor::winSize + private static native double[] get_winSize_0(long nativeObj); + + // C++: Size HOGDescriptor::blockSize + private static native double[] get_blockSize_0(long nativeObj); + + // C++: Size HOGDescriptor::blockStride + private static native double[] get_blockStride_0(long nativeObj); + + // C++: Size HOGDescriptor::cellSize + private static native double[] get_cellSize_0(long nativeObj); + + // C++: int HOGDescriptor::nbins + private static native int get_nbins_0(long nativeObj); + + // C++: int HOGDescriptor::derivAperture + private static native int get_derivAperture_0(long nativeObj); + + // C++: double HOGDescriptor::winSigma + private static native double get_winSigma_0(long nativeObj); + + // C++: int HOGDescriptor::histogramNormType + private static native int get_histogramNormType_0(long nativeObj); + + // C++: double HOGDescriptor::L2HysThreshold + private static native double get_L2HysThreshold_0(long nativeObj); + + // C++: bool HOGDescriptor::gammaCorrection + private static native boolean get_gammaCorrection_0(long nativeObj); + + // C++: vector_float HOGDescriptor::svmDetector + private static native long get_svmDetector_0(long nativeObj); + + // C++: int HOGDescriptor::nlevels + private static native int get_nlevels_0(long nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/objdetect/Objdetect.java b/src/org/opencv/objdetect/Objdetect.java new file mode 100644 index 0000000..83e8768 --- /dev/null +++ b/src/org/opencv/objdetect/Objdetect.java @@ -0,0 +1,105 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.objdetect; + +import org.opencv.core.Mat; +import org.opencv.core.MatOfInt; +import org.opencv.core.MatOfRect; + +public class Objdetect { + + public static final int + CASCADE_DO_CANNY_PRUNING = 1, + CASCADE_SCALE_IMAGE = 2, + CASCADE_FIND_BIGGEST_OBJECT = 4, + CASCADE_DO_ROUGH_SEARCH = 8; + + + // + // C++: void drawDataMatrixCodes(Mat& image, vector_string codes, Mat corners) + // + + // Unknown type 'vector_string' (I), skipping the function + + + // + // C++: void findDataMatrix(Mat image, vector_string& codes, Mat& corners = Mat(), vector_Mat& dmtx = vector_Mat()) + // + + // Unknown type 'vector_string' (O), skipping the function + + + // + // C++: void groupRectangles(vector_Rect& rectList, vector_int& weights, int groupThreshold, double eps = 0.2) + // + +/** + *

Groups the object candidate rectangles.

+ * + *

The function is a wrapper for the generic function "partition". It clusters + * all the input rectangles using the rectangle equivalence criteria that + * combines rectangles with similar sizes and similar locations. The similarity + * is defined by eps. When eps=0, no clustering is + * done at all. If eps-> +inf, all the rectangles are put in one + * cluster. Then, the small clusters containing less than or equal to + * groupThreshold rectangles are rejected. In each other cluster, + * the average rectangle is computed and put into the output rectangle list.

+ * + * @param rectList Input/output vector of rectangles. Output vector includes + * retained and grouped rectangles. (The Python list is not modified in place.) + * @param weights a weights + * @param groupThreshold Minimum possible number of rectangles minus 1. The + * threshold is used in a group of rectangles to retain it. + * @param eps Relative difference between sides of the rectangles to merge them + * into a group. + * + * @see org.opencv.objdetect.Objdetect.groupRectangles + */ + public static void groupRectangles(MatOfRect rectList, MatOfInt weights, int groupThreshold, double eps) + { + Mat rectList_mat = rectList; + Mat weights_mat = weights; + groupRectangles_0(rectList_mat.nativeObj, weights_mat.nativeObj, groupThreshold, eps); + + return; + } + +/** + *

Groups the object candidate rectangles.

+ * + *

The function is a wrapper for the generic function "partition". It clusters + * all the input rectangles using the rectangle equivalence criteria that + * combines rectangles with similar sizes and similar locations. The similarity + * is defined by eps. When eps=0, no clustering is + * done at all. If eps-> +inf, all the rectangles are put in one + * cluster. Then, the small clusters containing less than or equal to + * groupThreshold rectangles are rejected. In each other cluster, + * the average rectangle is computed and put into the output rectangle list.

+ * + * @param rectList Input/output vector of rectangles. Output vector includes + * retained and grouped rectangles. (The Python list is not modified in place.) + * @param weights a weights + * @param groupThreshold Minimum possible number of rectangles minus 1. The + * threshold is used in a group of rectangles to retain it. + * + * @see org.opencv.objdetect.Objdetect.groupRectangles + */ + public static void groupRectangles(MatOfRect rectList, MatOfInt weights, int groupThreshold) + { + Mat rectList_mat = rectList; + Mat weights_mat = weights; + groupRectangles_1(rectList_mat.nativeObj, weights_mat.nativeObj, groupThreshold); + + return; + } + + + + + // C++: void groupRectangles(vector_Rect& rectList, vector_int& weights, int groupThreshold, double eps = 0.2) + private static native void groupRectangles_0(long rectList_mat_nativeObj, long weights_mat_nativeObj, int groupThreshold, double eps); + private static native void groupRectangles_1(long rectList_mat_nativeObj, long weights_mat_nativeObj, int groupThreshold); + +} diff --git a/src/org/opencv/objdetect/package.bluej b/src/org/opencv/objdetect/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/package.bluej b/src/org/opencv/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/photo/Photo.java b/src/org/opencv/photo/Photo.java new file mode 100644 index 0000000..908a275 --- /dev/null +++ b/src/org/opencv/photo/Photo.java @@ -0,0 +1,347 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.photo; + +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.utils.Converters; + +public class Photo { + + private static final int + CV_INPAINT_NS = 0, + CV_INPAINT_TELEA = 1; + + + public static final int + INPAINT_NS = CV_INPAINT_NS, + INPAINT_TELEA = CV_INPAINT_TELEA; + + + // + // C++: void fastNlMeansDenoising(Mat src, Mat& dst, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21) + // + +/** + *

Perform image denoising using Non-local Means Denoising algorithm + * http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/ with several + * computational optimizations. Noise expected to be a gaussian white noise

+ * + *

This function expected to be applied to grayscale images. For colored images + * look at fastNlMeansDenoisingColored. + * Advanced usage of this functions can be manual denoising of colored image in + * different colorspaces. + * Such approach is used in fastNlMeansDenoisingColored by + * converting image to CIELAB colorspace and then separately denoise L and AB + * components with different h parameter.

+ * + * @param src Input 8-bit 1-channel, 2-channel or 3-channel image. + * @param dst Output image with the same size and type as src. + * @param h Parameter regulating filter strength. Big h value perfectly removes + * noise but also removes image details, smaller h value preserves details but + * also preserves some noise + * @param templateWindowSize Size in pixels of the template patch that is used + * to compute weights. Should be odd. Recommended value 7 pixels + * @param searchWindowSize Size in pixels of the window that is used to compute + * weighted average for given pixel. Should be odd. Affect performance linearly: + * greater searchWindowsSize - greater denoising time. Recommended value 21 + * pixels + * + * @see org.opencv.photo.Photo.fastNlMeansDenoising + */ + public static void fastNlMeansDenoising(Mat src, Mat dst, float h, int templateWindowSize, int searchWindowSize) + { + + fastNlMeansDenoising_0(src.nativeObj, dst.nativeObj, h, templateWindowSize, searchWindowSize); + + return; + } + +/** + *

Perform image denoising using Non-local Means Denoising algorithm + * http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/ with several + * computational optimizations. Noise expected to be a gaussian white noise

+ * + *

This function expected to be applied to grayscale images. For colored images + * look at fastNlMeansDenoisingColored. + * Advanced usage of this functions can be manual denoising of colored image in + * different colorspaces. + * Such approach is used in fastNlMeansDenoisingColored by + * converting image to CIELAB colorspace and then separately denoise L and AB + * components with different h parameter.

+ * + * @param src Input 8-bit 1-channel, 2-channel or 3-channel image. + * @param dst Output image with the same size and type as src. + * + * @see org.opencv.photo.Photo.fastNlMeansDenoising + */ + public static void fastNlMeansDenoising(Mat src, Mat dst) + { + + fastNlMeansDenoising_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void fastNlMeansDenoisingColored(Mat src, Mat& dst, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21) + // + +/** + *

Modification of fastNlMeansDenoising function for colored images

+ * + *

The function converts image to CIELAB colorspace and then separately denoise + * L and AB components with given h parameters using fastNlMeansDenoising + * function.

+ * + * @param src Input 8-bit 3-channel image. + * @param dst Output image with the same size and type as src. + * @param h Parameter regulating filter strength for luminance component. Bigger + * h value perfectly removes noise but also removes image details, smaller h + * value preserves details but also preserves some noise + * @param hColor a hColor + * @param templateWindowSize Size in pixels of the template patch that is used + * to compute weights. Should be odd. Recommended value 7 pixels + * @param searchWindowSize Size in pixels of the window that is used to compute + * weighted average for given pixel. Should be odd. Affect performance linearly: + * greater searchWindowsSize - greater denoising time. Recommended value 21 + * pixels + * + * @see org.opencv.photo.Photo.fastNlMeansDenoisingColored + */ + public static void fastNlMeansDenoisingColored(Mat src, Mat dst, float h, float hColor, int templateWindowSize, int searchWindowSize) + { + + fastNlMeansDenoisingColored_0(src.nativeObj, dst.nativeObj, h, hColor, templateWindowSize, searchWindowSize); + + return; + } + +/** + *

Modification of fastNlMeansDenoising function for colored images

+ * + *

The function converts image to CIELAB colorspace and then separately denoise + * L and AB components with given h parameters using fastNlMeansDenoising + * function.

+ * + * @param src Input 8-bit 3-channel image. + * @param dst Output image with the same size and type as src. + * + * @see org.opencv.photo.Photo.fastNlMeansDenoisingColored + */ + public static void fastNlMeansDenoisingColored(Mat src, Mat dst) + { + + fastNlMeansDenoisingColored_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void fastNlMeansDenoisingColoredMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21) + // + +/** + *

Modification of fastNlMeansDenoisingMulti function for colored + * images sequences

+ * + *

The function converts images to CIELAB colorspace and then separately denoise + * L and AB components with given h parameters using fastNlMeansDenoisingMulti + * function.

+ * + * @param srcImgs Input 8-bit 3-channel images sequence. All images should have + * the same type and size. + * @param dst Output image with the same size and type as srcImgs + * images. + * @param imgToDenoiseIndex Target image to denoise index in srcImgs + * sequence + * @param temporalWindowSize Number of surrounding images to use for target + * image denoising. Should be odd. Images from imgToDenoiseIndex - + * temporalWindowSize / 2 to imgToDenoiseIndex - temporalWindowSize + * / 2 from srcImgs will be used to denoise + * srcImgs[imgToDenoiseIndex] image. + * @param h Parameter regulating filter strength for luminance component. Bigger + * h value perfectly removes noise but also removes image details, smaller h + * value preserves details but also preserves some noise. + * @param hColor a hColor + * @param templateWindowSize Size in pixels of the template patch that is used + * to compute weights. Should be odd. Recommended value 7 pixels + * @param searchWindowSize Size in pixels of the window that is used to compute + * weighted average for given pixel. Should be odd. Affect performance linearly: + * greater searchWindowsSize - greater denoising time. Recommended value 21 + * pixels + * + * @see org.opencv.photo.Photo.fastNlMeansDenoisingColoredMulti + */ + public static void fastNlMeansDenoisingColoredMulti(List srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize, int searchWindowSize) + { + Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); + fastNlMeansDenoisingColoredMulti_0(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, hColor, templateWindowSize, searchWindowSize); + + return; + } + +/** + *

Modification of fastNlMeansDenoisingMulti function for colored + * images sequences

+ * + *

The function converts images to CIELAB colorspace and then separately denoise + * L and AB components with given h parameters using fastNlMeansDenoisingMulti + * function.

+ * + * @param srcImgs Input 8-bit 3-channel images sequence. All images should have + * the same type and size. + * @param dst Output image with the same size and type as srcImgs + * images. + * @param imgToDenoiseIndex Target image to denoise index in srcImgs + * sequence + * @param temporalWindowSize Number of surrounding images to use for target + * image denoising. Should be odd. Images from imgToDenoiseIndex - + * temporalWindowSize / 2 to imgToDenoiseIndex - temporalWindowSize + * / 2 from srcImgs will be used to denoise + * srcImgs[imgToDenoiseIndex] image. + * + * @see org.opencv.photo.Photo.fastNlMeansDenoisingColoredMulti + */ + public static void fastNlMeansDenoisingColoredMulti(List srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize) + { + Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); + fastNlMeansDenoisingColoredMulti_1(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize); + + return; + } + + + // + // C++: void fastNlMeansDenoisingMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21) + // + +/** + *

Modification of fastNlMeansDenoising function for images + * sequence where consequtive images have been captured in small period of time. + * For example video. This version of the function is for grayscale images or + * for manual manipulation with colorspaces. + * For more details see http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394

+ * + * @param srcImgs Input 8-bit 1-channel, 2-channel or 3-channel images sequence. + * All images should have the same type and size. + * @param dst Output image with the same size and type as srcImgs + * images. + * @param imgToDenoiseIndex Target image to denoise index in srcImgs + * sequence + * @param temporalWindowSize Number of surrounding images to use for target + * image denoising. Should be odd. Images from imgToDenoiseIndex - + * temporalWindowSize / 2 to imgToDenoiseIndex - temporalWindowSize + * / 2 from srcImgs will be used to denoise + * srcImgs[imgToDenoiseIndex] image. + * @param h Parameter regulating filter strength for luminance component. Bigger + * h value perfectly removes noise but also removes image details, smaller h + * value preserves details but also preserves some noise + * @param templateWindowSize Size in pixels of the template patch that is used + * to compute weights. Should be odd. Recommended value 7 pixels + * @param searchWindowSize Size in pixels of the window that is used to compute + * weighted average for given pixel. Should be odd. Affect performance linearly: + * greater searchWindowsSize - greater denoising time. Recommended value 21 + * pixels + * + * @see org.opencv.photo.Photo.fastNlMeansDenoisingMulti + */ + public static void fastNlMeansDenoisingMulti(List srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, int templateWindowSize, int searchWindowSize) + { + Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); + fastNlMeansDenoisingMulti_0(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, templateWindowSize, searchWindowSize); + + return; + } + +/** + *

Modification of fastNlMeansDenoising function for images + * sequence where consequtive images have been captured in small period of time. + * For example video. This version of the function is for grayscale images or + * for manual manipulation with colorspaces. + * For more details see http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394

+ * + * @param srcImgs Input 8-bit 1-channel, 2-channel or 3-channel images sequence. + * All images should have the same type and size. + * @param dst Output image with the same size and type as srcImgs + * images. + * @param imgToDenoiseIndex Target image to denoise index in srcImgs + * sequence + * @param temporalWindowSize Number of surrounding images to use for target + * image denoising. Should be odd. Images from imgToDenoiseIndex - + * temporalWindowSize / 2 to imgToDenoiseIndex - temporalWindowSize + * / 2 from srcImgs will be used to denoise + * srcImgs[imgToDenoiseIndex] image. + * + * @see org.opencv.photo.Photo.fastNlMeansDenoisingMulti + */ + public static void fastNlMeansDenoisingMulti(List srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize) + { + Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); + fastNlMeansDenoisingMulti_1(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize); + + return; + } + + + // + // C++: void inpaint(Mat src, Mat inpaintMask, Mat& dst, double inpaintRadius, int flags) + // + +/** + *

Restores the selected region in an image using the region neighborhood.

+ * + *

The function reconstructs the selected image area from the pixel near the + * area boundary. The function may be used to remove dust and scratches from a + * scanned photo, or to remove undesirable objects from still images or video. + * See http://en.wikipedia.org/wiki/Inpainting for more details.

+ * + * @param src Input 8-bit 1-channel or 3-channel image. + * @param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels + * indicate the area that needs to be inpainted. + * @param dst Output image with the same size and type as src. + * @param inpaintRadius Radius of a circular neighborhood of each point + * inpainted that is considered by the algorithm. + * @param flags Inpainting method that could be one of the following: + *
    + *
  • INPAINT_NS Navier-Stokes based method. + *
  • INPAINT_TELEA Method by Alexandru Telea [Telea04]. + *
+ * + * @see org.opencv.photo.Photo.inpaint + */ + public static void inpaint(Mat src, Mat inpaintMask, Mat dst, double inpaintRadius, int flags) + { + + inpaint_0(src.nativeObj, inpaintMask.nativeObj, dst.nativeObj, inpaintRadius, flags); + + return; + } + + + + + // C++: void fastNlMeansDenoising(Mat src, Mat& dst, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21) + private static native void fastNlMeansDenoising_0(long src_nativeObj, long dst_nativeObj, float h, int templateWindowSize, int searchWindowSize); + private static native void fastNlMeansDenoising_1(long src_nativeObj, long dst_nativeObj); + + // C++: void fastNlMeansDenoisingColored(Mat src, Mat& dst, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21) + private static native void fastNlMeansDenoisingColored_0(long src_nativeObj, long dst_nativeObj, float h, float hColor, int templateWindowSize, int searchWindowSize); + private static native void fastNlMeansDenoisingColored_1(long src_nativeObj, long dst_nativeObj); + + // C++: void fastNlMeansDenoisingColoredMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21) + private static native void fastNlMeansDenoisingColoredMulti_0(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize, int searchWindowSize); + private static native void fastNlMeansDenoisingColoredMulti_1(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize); + + // C++: void fastNlMeansDenoisingMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21) + private static native void fastNlMeansDenoisingMulti_0(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, int templateWindowSize, int searchWindowSize); + private static native void fastNlMeansDenoisingMulti_1(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize); + + // C++: void inpaint(Mat src, Mat inpaintMask, Mat& dst, double inpaintRadius, int flags) + private static native void inpaint_0(long src_nativeObj, long inpaintMask_nativeObj, long dst_nativeObj, double inpaintRadius, int flags); + +} diff --git a/src/org/opencv/photo/package.bluej b/src/org/opencv/photo/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/utils/Converters.java b/src/org/opencv/utils/Converters.java new file mode 100644 index 0000000..49c0844 --- /dev/null +++ b/src/org/opencv/utils/Converters.java @@ -0,0 +1,724 @@ +package org.opencv.utils; + +import java.util.ArrayList; +import java.util.List; + +import org.opencv.core.CvType; +import org.opencv.core.Mat; +import org.opencv.core.MatOfByte; +import org.opencv.core.MatOfDMatch; +import org.opencv.core.MatOfKeyPoint; +import org.opencv.core.MatOfPoint; +import org.opencv.core.MatOfPoint2f; +import org.opencv.core.MatOfPoint3f; +import org.opencv.core.Point; +import org.opencv.core.Point3; +import org.opencv.core.Rect; +import org.opencv.features2d.DMatch; +import org.opencv.features2d.KeyPoint; + +public class Converters { + + public static Mat vector_Point_to_Mat(List pts) { + return vector_Point_to_Mat(pts, CvType.CV_32S); + } + + public static Mat vector_Point2f_to_Mat(List pts) { + return vector_Point_to_Mat(pts, CvType.CV_32F); + } + + public static Mat vector_Point2d_to_Mat(List pts) { + return vector_Point_to_Mat(pts, CvType.CV_64F); + } + + public static Mat vector_Point_to_Mat(List pts, int typeDepth) { + Mat res; + int count = (pts != null) ? pts.size() : 0; + if (count > 0) { + switch (typeDepth) { + case CvType.CV_32S: { + res = new Mat(count, 1, CvType.CV_32SC2); + int[] buff = new int[count * 2]; + for (int i = 0; i < count; i++) { + Point p = pts.get(i); + buff[i * 2] = (int) p.x; + buff[i * 2 + 1] = (int) p.y; + } + res.put(0, 0, buff); + } + break; + + case CvType.CV_32F: { + res = new Mat(count, 1, CvType.CV_32FC2); + float[] buff = new float[count * 2]; + for (int i = 0; i < count; i++) { + Point p = pts.get(i); + buff[i * 2] = (float) p.x; + buff[i * 2 + 1] = (float) p.y; + } + res.put(0, 0, buff); + } + break; + + case CvType.CV_64F: { + res = new Mat(count, 1, CvType.CV_64FC2); + double[] buff = new double[count * 2]; + for (int i = 0; i < count; i++) { + Point p = pts.get(i); + buff[i * 2] = p.x; + buff[i * 2 + 1] = p.y; + } + res.put(0, 0, buff); + } + break; + + default: + throw new IllegalArgumentException("'typeDepth' can be CV_32S, CV_32F or CV_64F"); + } + } else { + res = new Mat(); + } + return res; + } + + public static Mat vector_Point3i_to_Mat(List pts) { + return vector_Point3_to_Mat(pts, CvType.CV_32S); + } + + public static Mat vector_Point3f_to_Mat(List pts) { + return vector_Point3_to_Mat(pts, CvType.CV_32F); + } + + public static Mat vector_Point3d_to_Mat(List pts) { + return vector_Point3_to_Mat(pts, CvType.CV_64F); + } + + public static Mat vector_Point3_to_Mat(List pts, int typeDepth) { + Mat res; + int count = (pts != null) ? pts.size() : 0; + if (count > 0) { + switch (typeDepth) { + case CvType.CV_32S: { + res = new Mat(count, 1, CvType.CV_32SC3); + int[] buff = new int[count * 3]; + for (int i = 0; i < count; i++) { + Point3 p = pts.get(i); + buff[i * 3] = (int) p.x; + buff[i * 3 + 1] = (int) p.y; + buff[i * 3 + 2] = (int) p.z; + } + res.put(0, 0, buff); + } + break; + + case CvType.CV_32F: { + res = new Mat(count, 1, CvType.CV_32FC3); + float[] buff = new float[count * 3]; + for (int i = 0; i < count; i++) { + Point3 p = pts.get(i); + buff[i * 3] = (float) p.x; + buff[i * 3 + 1] = (float) p.y; + buff[i * 3 + 2] = (float) p.z; + } + res.put(0, 0, buff); + } + break; + + case CvType.CV_64F: { + res = new Mat(count, 1, CvType.CV_64FC3); + double[] buff = new double[count * 3]; + for (int i = 0; i < count; i++) { + Point3 p = pts.get(i); + buff[i * 3] = p.x; + buff[i * 3 + 1] = p.y; + buff[i * 3 + 2] = p.z; + } + res.put(0, 0, buff); + } + break; + + default: + throw new IllegalArgumentException("'typeDepth' can be CV_32S, CV_32F or CV_64F"); + } + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_Point2f(Mat m, List pts) { + Mat_to_vector_Point(m, pts); + } + + public static void Mat_to_vector_Point2d(Mat m, List pts) { + Mat_to_vector_Point(m, pts); + } + + public static void Mat_to_vector_Point(Mat m, List pts) { + if (pts == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + int count = m.rows(); + int type = m.type(); + if (m.cols() != 1) + throw new java.lang.IllegalArgumentException("Input Mat should have one column\n" + m); + + pts.clear(); + if (type == CvType.CV_32SC2) { + int[] buff = new int[2 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + pts.add(new Point(buff[i * 2], buff[i * 2 + 1])); + } + } else if (type == CvType.CV_32FC2) { + float[] buff = new float[2 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + pts.add(new Point(buff[i * 2], buff[i * 2 + 1])); + } + } else if (type == CvType.CV_64FC2) { + double[] buff = new double[2 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + pts.add(new Point(buff[i * 2], buff[i * 2 + 1])); + } + } else { + throw new java.lang.IllegalArgumentException( + "Input Mat should be of CV_32SC2, CV_32FC2 or CV_64FC2 type\n" + m); + } + } + + public static void Mat_to_vector_Point3i(Mat m, List pts) { + Mat_to_vector_Point3(m, pts); + } + + public static void Mat_to_vector_Point3f(Mat m, List pts) { + Mat_to_vector_Point3(m, pts); + } + + public static void Mat_to_vector_Point3d(Mat m, List pts) { + Mat_to_vector_Point3(m, pts); + } + + public static void Mat_to_vector_Point3(Mat m, List pts) { + if (pts == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + int count = m.rows(); + int type = m.type(); + if (m.cols() != 1) + throw new java.lang.IllegalArgumentException("Input Mat should have one column\n" + m); + + pts.clear(); + if (type == CvType.CV_32SC3) { + int[] buff = new int[3 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + pts.add(new Point3(buff[i * 3], buff[i * 3 + 1], buff[i * 3 + 2])); + } + } else if (type == CvType.CV_32FC3) { + float[] buff = new float[3 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + pts.add(new Point3(buff[i * 3], buff[i * 3 + 1], buff[i * 3 + 2])); + } + } else if (type == CvType.CV_64FC3) { + double[] buff = new double[3 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + pts.add(new Point3(buff[i * 3], buff[i * 3 + 1], buff[i * 3 + 2])); + } + } else { + throw new java.lang.IllegalArgumentException( + "Input Mat should be of CV_32SC3, CV_32FC3 or CV_64FC3 type\n" + m); + } + } + + public static Mat vector_Mat_to_Mat(List mats) { + Mat res; + int count = (mats != null) ? mats.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_32SC2); + int[] buff = new int[count * 2]; + for (int i = 0; i < count; i++) { + long addr = mats.get(i).nativeObj; + buff[i * 2] = (int) (addr >> 32); + buff[i * 2 + 1] = (int) (addr & 0xffffffff); + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_Mat(Mat m, List mats) { + if (mats == null) + throw new java.lang.IllegalArgumentException("mats == null"); + int count = m.rows(); + if (CvType.CV_32SC2 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_32SC2 != m.type() || m.cols()!=1\n" + m); + + mats.clear(); + int[] buff = new int[count * 2]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + long addr = (((long) buff[i * 2]) << 32) | (((long) buff[i * 2 + 1]) & 0xffffffffL); + mats.add(new Mat(addr)); + } + } + + public static Mat vector_float_to_Mat(List fs) { + Mat res; + int count = (fs != null) ? fs.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_32FC1); + float[] buff = new float[count]; + for (int i = 0; i < count; i++) { + float f = fs.get(i); + buff[i] = f; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_float(Mat m, List fs) { + if (fs == null) + throw new java.lang.IllegalArgumentException("fs == null"); + int count = m.rows(); + if (CvType.CV_32FC1 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_32FC1 != m.type() || m.cols()!=1\n" + m); + + fs.clear(); + float[] buff = new float[count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + fs.add(buff[i]); + } + } + + public static Mat vector_uchar_to_Mat(List bs) { + Mat res; + int count = (bs != null) ? bs.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_8UC1); + byte[] buff = new byte[count]; + for (int i = 0; i < count; i++) { + byte b = bs.get(i); + buff[i] = b; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_uchar(Mat m, List us) { + if (us == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + int count = m.rows(); + if (CvType.CV_8UC1 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_8UC1 != m.type() || m.cols()!=1\n" + m); + + us.clear(); + byte[] buff = new byte[count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + us.add(buff[i]); + } + } + + public static Mat vector_char_to_Mat(List bs) { + Mat res; + int count = (bs != null) ? bs.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_8SC1); + byte[] buff = new byte[count]; + for (int i = 0; i < count; i++) { + byte b = bs.get(i); + buff[i] = b; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static Mat vector_int_to_Mat(List is) { + Mat res; + int count = (is != null) ? is.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_32SC1); + int[] buff = new int[count]; + for (int i = 0; i < count; i++) { + int v = is.get(i); + buff[i] = v; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_int(Mat m, List is) { + if (is == null) + throw new java.lang.IllegalArgumentException("is == null"); + int count = m.rows(); + if (CvType.CV_32SC1 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_32SC1 != m.type() || m.cols()!=1\n" + m); + + is.clear(); + int[] buff = new int[count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + is.add(buff[i]); + } + } + + public static void Mat_to_vector_char(Mat m, List bs) { + if (bs == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + int count = m.rows(); + if (CvType.CV_8SC1 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_8SC1 != m.type() || m.cols()!=1\n" + m); + + bs.clear(); + byte[] buff = new byte[count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + bs.add(buff[i]); + } + } + + public static Mat vector_Rect_to_Mat(List rs) { + Mat res; + int count = (rs != null) ? rs.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_32SC4); + int[] buff = new int[4 * count]; + for (int i = 0; i < count; i++) { + Rect r = rs.get(i); + buff[4 * i] = r.x; + buff[4 * i + 1] = r.y; + buff[4 * i + 2] = r.width; + buff[4 * i + 3] = r.height; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_Rect(Mat m, List rs) { + if (rs == null) + throw new java.lang.IllegalArgumentException("rs == null"); + int count = m.rows(); + if (CvType.CV_32SC4 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_32SC4 != m.type() || m.rows()!=1\n" + m); + + rs.clear(); + int[] buff = new int[4 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + rs.add(new Rect(buff[4 * i], buff[4 * i + 1], buff[4 * i + 2], buff[4 * i + 3])); + } + } + + public static Mat vector_KeyPoint_to_Mat(List kps) { + Mat res; + int count = (kps != null) ? kps.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_64FC(7)); + double[] buff = new double[count * 7]; + for (int i = 0; i < count; i++) { + KeyPoint kp = kps.get(i); + buff[7 * i] = kp.pt.x; + buff[7 * i + 1] = kp.pt.y; + buff[7 * i + 2] = kp.size; + buff[7 * i + 3] = kp.angle; + buff[7 * i + 4] = kp.response; + buff[7 * i + 5] = kp.octave; + buff[7 * i + 6] = kp.class_id; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_KeyPoint(Mat m, List kps) { + if (kps == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + int count = m.rows(); + if (CvType.CV_64FC(7) != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_64FC(7) != m.type() || m.cols()!=1\n" + m); + + kps.clear(); + double[] buff = new double[7 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + kps.add(new KeyPoint((float) buff[7 * i], (float) buff[7 * i + 1], (float) buff[7 * i + 2], (float) buff[7 * i + 3], + (float) buff[7 * i + 4], (int) buff[7 * i + 5], (int) buff[7 * i + 6])); + } + } + + // vector_vector_Point + public static Mat vector_vector_Point_to_Mat(List pts, List mats) { + Mat res; + int lCount = (pts != null) ? pts.size() : 0; + if (lCount > 0) { + for (MatOfPoint vpt : pts) + mats.add(vpt); + res = vector_Mat_to_Mat(mats); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_vector_Point(Mat m, List pts) { + if (pts == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + + if (m == null) + throw new java.lang.IllegalArgumentException("Input Mat can't be null"); + + List mats = new ArrayList(m.rows()); + Mat_to_vector_Mat(m, mats); + for (Mat mi : mats) { + MatOfPoint pt = new MatOfPoint(mi); + pts.add(pt); + } + } + + // vector_vector_Point2f + public static void Mat_to_vector_vector_Point2f(Mat m, List pts) { + if (pts == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + + if (m == null) + throw new java.lang.IllegalArgumentException("Input Mat can't be null"); + + List mats = new ArrayList(m.rows()); + Mat_to_vector_Mat(m, mats); + for (Mat mi : mats) { + MatOfPoint2f pt = new MatOfPoint2f(mi); + pts.add(pt); + } + } + + // vector_vector_Point2f + public static Mat vector_vector_Point2f_to_Mat(List pts, List mats) { + Mat res; + int lCount = (pts != null) ? pts.size() : 0; + if (lCount > 0) { + for (MatOfPoint2f vpt : pts) + mats.add(vpt); + res = vector_Mat_to_Mat(mats); + } else { + res = new Mat(); + } + return res; + } + + // vector_vector_Point3f + public static void Mat_to_vector_vector_Point3f(Mat m, List pts) { + if (pts == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + + if (m == null) + throw new java.lang.IllegalArgumentException("Input Mat can't be null"); + + List mats = new ArrayList(m.rows()); + Mat_to_vector_Mat(m, mats); + for (Mat mi : mats) { + MatOfPoint3f pt = new MatOfPoint3f(mi); + pts.add(pt); + } + } + + // vector_vector_Point3f + public static Mat vector_vector_Point3f_to_Mat(List pts, List mats) { + Mat res; + int lCount = (pts != null) ? pts.size() : 0; + if (lCount > 0) { + for (MatOfPoint3f vpt : pts) + mats.add(vpt); + res = vector_Mat_to_Mat(mats); + } else { + res = new Mat(); + } + return res; + } + + // vector_vector_KeyPoint + public static Mat vector_vector_KeyPoint_to_Mat(List kps, List mats) { + Mat res; + int lCount = (kps != null) ? kps.size() : 0; + if (lCount > 0) { + for (MatOfKeyPoint vkp : kps) + mats.add(vkp); + res = vector_Mat_to_Mat(mats); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_vector_KeyPoint(Mat m, List kps) { + if (kps == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + + if (m == null) + throw new java.lang.IllegalArgumentException("Input Mat can't be null"); + + List mats = new ArrayList(m.rows()); + Mat_to_vector_Mat(m, mats); + for (Mat mi : mats) { + MatOfKeyPoint vkp = new MatOfKeyPoint(mi); + kps.add(vkp); + } + } + + public static Mat vector_double_to_Mat(List ds) { + Mat res; + int count = (ds != null) ? ds.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_64FC1); + double[] buff = new double[count]; + for (int i = 0; i < count; i++) { + double v = ds.get(i); + buff[i] = v; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_double(Mat m, List ds) { + if (ds == null) + throw new java.lang.IllegalArgumentException("ds == null"); + int count = m.rows(); + if (CvType.CV_64FC1 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_64FC1 != m.type() || m.cols()!=1\n" + m); + + ds.clear(); + double[] buff = new double[count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + ds.add(buff[i]); + } + } + + public static Mat vector_DMatch_to_Mat(List matches) { + Mat res; + int count = (matches != null) ? matches.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_64FC4); + double[] buff = new double[count * 4]; + for (int i = 0; i < count; i++) { + DMatch m = matches.get(i); + buff[4 * i] = m.queryIdx; + buff[4 * i + 1] = m.trainIdx; + buff[4 * i + 2] = m.imgIdx; + buff[4 * i + 3] = m.distance; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_DMatch(Mat m, List matches) { + if (matches == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + int count = m.rows(); + if (CvType.CV_64FC4 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_64FC4 != m.type() || m.cols()!=1\n" + m); + + matches.clear(); + double[] buff = new double[4 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + matches.add(new DMatch((int) buff[4 * i], (int) buff[4 * i + 1], (int) buff[4 * i + 2], (float) buff[4 * i + 3])); + } + } + + // vector_vector_DMatch + public static Mat vector_vector_DMatch_to_Mat(List lvdm, List mats) { + Mat res; + int lCount = (lvdm != null) ? lvdm.size() : 0; + if (lCount > 0) { + for (MatOfDMatch vdm : lvdm) + mats.add(vdm); + res = vector_Mat_to_Mat(mats); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_vector_DMatch(Mat m, List lvdm) { + if (lvdm == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + + if (m == null) + throw new java.lang.IllegalArgumentException("Input Mat can't be null"); + + List mats = new ArrayList(m.rows()); + Mat_to_vector_Mat(m, mats); + lvdm.clear(); + for (Mat mi : mats) { + MatOfDMatch vdm = new MatOfDMatch(mi); + lvdm.add(vdm); + } + } + + // vector_vector_char + public static Mat vector_vector_char_to_Mat(List lvb, List mats) { + Mat res; + int lCount = (lvb != null) ? lvb.size() : 0; + if (lCount > 0) { + for (MatOfByte vb : lvb) + mats.add(vb); + res = vector_Mat_to_Mat(mats); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_vector_char(Mat m, List> llb) { + if (llb == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + + if (m == null) + throw new java.lang.IllegalArgumentException("Input Mat can't be null"); + + List mats = new ArrayList(m.rows()); + Mat_to_vector_Mat(m, mats); + for (Mat mi : mats) { + List lb = new ArrayList(); + Mat_to_vector_char(mi, lb); + llb.add(lb); + } + } +} diff --git a/src/org/opencv/utils/package.bluej b/src/org/opencv/utils/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/video/BackgroundSubtractor.java b/src/org/opencv/video/BackgroundSubtractor.java new file mode 100644 index 0000000..dd40a9f --- /dev/null +++ b/src/org/opencv/video/BackgroundSubtractor.java @@ -0,0 +1,93 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.video; + +import org.opencv.core.Algorithm; +import org.opencv.core.Mat; + +// C++: class BackgroundSubtractor +/** + *

Base class for background/foreground segmentation.

+ * + *

class BackgroundSubtractor : public Algorithm

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

virtual ~BackgroundSubtractor();

+ * + *

virtual void operator()(InputArray image, OutputArray fgmask, double + * learningRate=0);

+ * + *

virtual void getBackgroundImage(OutputArray backgroundImage) const;

+ * + *

};

+ * + *

The class is only used to define the common interface for the whole family of + * background/foreground segmentation algorithms. + *

+ * + * @see org.opencv.video.BackgroundSubtractor : public Algorithm + */ +public class BackgroundSubtractor extends Algorithm { + + protected BackgroundSubtractor(long addr) { super(addr); } + + + // + // C++: void BackgroundSubtractor::operator ()(Mat image, Mat& fgmask, double learningRate = 0) + // + +/** + *

Computes a foreground mask.

+ * + * @param image Next video frame. + * @param fgmask The output foreground mask as an 8-bit binary image. + * @param learningRate a learningRate + * + * @see org.opencv.video.BackgroundSubtractor.operator() + */ + public void apply(Mat image, Mat fgmask, double learningRate) + { + + apply_0(nativeObj, image.nativeObj, fgmask.nativeObj, learningRate); + + return; + } + +/** + *

Computes a foreground mask.

+ * + * @param image Next video frame. + * @param fgmask The output foreground mask as an 8-bit binary image. + * + * @see org.opencv.video.BackgroundSubtractor.operator() + */ + public void apply(Mat image, Mat fgmask) + { + + apply_1(nativeObj, image.nativeObj, fgmask.nativeObj); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: void BackgroundSubtractor::operator ()(Mat image, Mat& fgmask, double learningRate = 0) + private static native void apply_0(long nativeObj, long image_nativeObj, long fgmask_nativeObj, double learningRate); + private static native void apply_1(long nativeObj, long image_nativeObj, long fgmask_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/video/BackgroundSubtractorMOG.java b/src/org/opencv/video/BackgroundSubtractorMOG.java new file mode 100644 index 0000000..709afa7 --- /dev/null +++ b/src/org/opencv/video/BackgroundSubtractorMOG.java @@ -0,0 +1,106 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.video; + + + +// C++: class BackgroundSubtractorMOG +/** + *

Gaussian Mixture-based Background/Foreground Segmentation Algorithm.

+ * + *

The class implements the algorithm described in P. KadewTraKuPong and R. + * Bowden, *An improved adaptive background mixture model for real-time tracking + * with shadow detection*, Proc. 2nd European Workshop on Advanced Video-Based + * Surveillance Systems, 2001: http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf

+ * + * @see org.opencv.video.BackgroundSubtractorMOG : public BackgroundSubtractor + */ +public class BackgroundSubtractorMOG extends BackgroundSubtractor { + + protected BackgroundSubtractorMOG(long addr) { super(addr); } + + + // + // C++: BackgroundSubtractorMOG::BackgroundSubtractorMOG() + // + +/** + *

The constructors.

+ * + *

Default constructor sets all parameters to default values.

+ * + * @see org.opencv.video.BackgroundSubtractorMOG.BackgroundSubtractorMOG + */ + public BackgroundSubtractorMOG() + { + + super( BackgroundSubtractorMOG_0() ); + + return; + } + + + // + // C++: BackgroundSubtractorMOG::BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma = 0) + // + +/** + *

The constructors.

+ * + *

Default constructor sets all parameters to default values.

+ * + * @param history Length of the history. + * @param nmixtures Number of Gaussian mixtures. + * @param backgroundRatio Background ratio. + * @param noiseSigma Noise strength. + * + * @see org.opencv.video.BackgroundSubtractorMOG.BackgroundSubtractorMOG + */ + public BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma) + { + + super( BackgroundSubtractorMOG_1(history, nmixtures, backgroundRatio, noiseSigma) ); + + return; + } + +/** + *

The constructors.

+ * + *

Default constructor sets all parameters to default values.

+ * + * @param history Length of the history. + * @param nmixtures Number of Gaussian mixtures. + * @param backgroundRatio Background ratio. + * + * @see org.opencv.video.BackgroundSubtractorMOG.BackgroundSubtractorMOG + */ + public BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio) + { + + super( BackgroundSubtractorMOG_2(history, nmixtures, backgroundRatio) ); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: BackgroundSubtractorMOG::BackgroundSubtractorMOG() + private static native long BackgroundSubtractorMOG_0(); + + // C++: BackgroundSubtractorMOG::BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma = 0) + private static native long BackgroundSubtractorMOG_1(int history, int nmixtures, double backgroundRatio, double noiseSigma); + private static native long BackgroundSubtractorMOG_2(int history, int nmixtures, double backgroundRatio); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/video/KalmanFilter.java b/src/org/opencv/video/KalmanFilter.java new file mode 100644 index 0000000..2b83749 --- /dev/null +++ b/src/org/opencv/video/KalmanFilter.java @@ -0,0 +1,176 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.video; + +import org.opencv.core.Mat; + +// C++: class KalmanFilter +/** + *

Kalman filter class.

+ * + *

The class implements a standard Kalman filter http://en.wikipedia.org/wiki/Kalman_filter, + * [Welch95]. However, you can modify transitionMatrix, + * controlMatrix, and measurementMatrix to get an + * extended Kalman filter functionality. See the OpenCV sample kalman.cpp.

+ * + * @see org.opencv.video.KalmanFilter + */ +public class KalmanFilter { + + protected final long nativeObj; + protected KalmanFilter(long addr) { nativeObj = addr; } + + + // + // C++: KalmanFilter::KalmanFilter() + // + +/** + *

The constructors.

+ * + *

The full constructor.

+ * + *

Note: In C API when CvKalman* kalmanFilter structure is not + * needed anymore, it should be released with cvReleaseKalman(&kalmanFilter)

+ * + * @see org.opencv.video.KalmanFilter.KalmanFilter + */ + public KalmanFilter() + { + + nativeObj = KalmanFilter_0(); + + return; + } + + + // + // C++: KalmanFilter::KalmanFilter(int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F) + // + +/** + *

The constructors.

+ * + *

The full constructor.

+ * + *

Note: In C API when CvKalman* kalmanFilter structure is not + * needed anymore, it should be released with cvReleaseKalman(&kalmanFilter)

+ * + * @param dynamParams Dimensionality of the state. + * @param measureParams Dimensionality of the measurement. + * @param controlParams Dimensionality of the control vector. + * @param type Type of the created matrices that should be CV_32F + * or CV_64F. + * + * @see org.opencv.video.KalmanFilter.KalmanFilter + */ + public KalmanFilter(int dynamParams, int measureParams, int controlParams, int type) + { + + nativeObj = KalmanFilter_1(dynamParams, measureParams, controlParams, type); + + return; + } + +/** + *

The constructors.

+ * + *

The full constructor.

+ * + *

Note: In C API when CvKalman* kalmanFilter structure is not + * needed anymore, it should be released with cvReleaseKalman(&kalmanFilter)

+ * + * @param dynamParams Dimensionality of the state. + * @param measureParams Dimensionality of the measurement. + * + * @see org.opencv.video.KalmanFilter.KalmanFilter + */ + public KalmanFilter(int dynamParams, int measureParams) + { + + nativeObj = KalmanFilter_2(dynamParams, measureParams); + + return; + } + + + // + // C++: Mat KalmanFilter::correct(Mat measurement) + // + +/** + *

Updates the predicted state from the measurement.

+ * + * @param measurement The measured system parameters + * + * @see org.opencv.video.KalmanFilter.correct + */ + public Mat correct(Mat measurement) + { + + Mat retVal = new Mat(correct_0(nativeObj, measurement.nativeObj)); + + return retVal; + } + + + // + // C++: Mat KalmanFilter::predict(Mat control = Mat()) + // + +/** + *

Computes a predicted state.

+ * + * @param control The optional input control + * + * @see org.opencv.video.KalmanFilter.predict + */ + public Mat predict(Mat control) + { + + Mat retVal = new Mat(predict_0(nativeObj, control.nativeObj)); + + return retVal; + } + +/** + *

Computes a predicted state.

+ * + * @see org.opencv.video.KalmanFilter.predict + */ + public Mat predict() + { + + Mat retVal = new Mat(predict_1(nativeObj)); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: KalmanFilter::KalmanFilter() + private static native long KalmanFilter_0(); + + // C++: KalmanFilter::KalmanFilter(int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F) + private static native long KalmanFilter_1(int dynamParams, int measureParams, int controlParams, int type); + private static native long KalmanFilter_2(int dynamParams, int measureParams); + + // C++: Mat KalmanFilter::correct(Mat measurement) + private static native long correct_0(long nativeObj, long measurement_nativeObj); + + // C++: Mat KalmanFilter::predict(Mat control = Mat()) + private static native long predict_0(long nativeObj, long control_nativeObj); + private static native long predict_1(long nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/video/Video.java b/src/org/opencv/video/Video.java new file mode 100644 index 0000000..4522e73 --- /dev/null +++ b/src/org/opencv/video/Video.java @@ -0,0 +1,731 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.video; + +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfByte; +import org.opencv.core.MatOfFloat; +import org.opencv.core.MatOfPoint2f; +import org.opencv.core.MatOfRect; +import org.opencv.core.Rect; +import org.opencv.core.RotatedRect; +import org.opencv.core.Size; +import org.opencv.core.TermCriteria; +import org.opencv.utils.Converters; + +public class Video { + + private static final int + CV_LKFLOW_INITIAL_GUESSES = 4, + CV_LKFLOW_GET_MIN_EIGENVALS = 8; + + + public static final int + OPTFLOW_USE_INITIAL_FLOW = CV_LKFLOW_INITIAL_GUESSES, + OPTFLOW_LK_GET_MIN_EIGENVALS = CV_LKFLOW_GET_MIN_EIGENVALS, + OPTFLOW_FARNEBACK_GAUSSIAN = 256; + + + // + // C++: RotatedRect CamShift(Mat probImage, Rect& window, TermCriteria criteria) + // + +/** + *

Finds an object center, size, and orientation.

+ * + *

The function implements the CAMSHIFT object tracking algorithm [Bradski98]. + * First, it finds an object center using "meanShift" and then adjusts the + * window size and finds the optimal rotation. The function returns the rotated + * rectangle structure that includes the object position, size, and orientation. + * The next position of the search window can be obtained with RotatedRect.boundingRect().

+ * + *

See the OpenCV sample camshiftdemo.c that tracks colored + * objects.

+ * + * @param probImage Back projection of the object histogram. See + * "calcBackProject". + * @param window Initial search window. + * @param criteria Stop criteria for the underlying "meanShift". + * + *

:returns: (in old interfaces) Number of iterations CAMSHIFT took to converge

+ * + * @see org.opencv.video.Video.CamShift + */ + public static RotatedRect CamShift(Mat probImage, Rect window, TermCriteria criteria) + { + double[] window_out = new double[4]; + RotatedRect retVal = new RotatedRect(CamShift_0(probImage.nativeObj, window.x, window.y, window.width, window.height, window_out, criteria.type, criteria.maxCount, criteria.epsilon)); + if(window!=null){ window.x = (int)window_out[0]; window.y = (int)window_out[1]; window.width = (int)window_out[2]; window.height = (int)window_out[3]; } + return retVal; + } + + + // + // C++: int buildOpticalFlowPyramid(Mat img, vector_Mat& pyramid, Size winSize, int maxLevel, bool withDerivatives = true, int pyrBorder = BORDER_REFLECT_101, int derivBorder = BORDER_CONSTANT, bool tryReuseInputImage = true) + // + +/** + *

Constructs the image pyramid which can be passed to "calcOpticalFlowPyrLK".

+ * + * @param img 8-bit input image. + * @param pyramid output pyramid. + * @param winSize window size of optical flow algorithm. Must be not less than + * winSize argument of "calcOpticalFlowPyrLK". It is needed to + * calculate required padding for pyramid levels. + * @param maxLevel 0-based maximal pyramid level number. + * @param withDerivatives set to precompute gradients for the every pyramid + * level. If pyramid is constructed without the gradients then "calcOpticalFlowPyrLK" + * will calculate them internally. + * @param pyrBorder the border mode for pyramid layers. + * @param derivBorder the border mode for gradients. + * @param tryReuseInputImage put ROI of input image into the pyramid if + * possible. You can pass false to force data copying. + * + *

:return: number of levels in constructed pyramid. Can be less than + * maxLevel.

+ * + * @see org.opencv.video.Video.buildOpticalFlowPyramid + */ + public static int buildOpticalFlowPyramid(Mat img, List pyramid, Size winSize, int maxLevel, boolean withDerivatives, int pyrBorder, int derivBorder, boolean tryReuseInputImage) + { + Mat pyramid_mat = new Mat(); + int retVal = buildOpticalFlowPyramid_0(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel, withDerivatives, pyrBorder, derivBorder, tryReuseInputImage); + Converters.Mat_to_vector_Mat(pyramid_mat, pyramid); + return retVal; + } + +/** + *

Constructs the image pyramid which can be passed to "calcOpticalFlowPyrLK".

+ * + * @param img 8-bit input image. + * @param pyramid output pyramid. + * @param winSize window size of optical flow algorithm. Must be not less than + * winSize argument of "calcOpticalFlowPyrLK". It is needed to + * calculate required padding for pyramid levels. + * @param maxLevel 0-based maximal pyramid level number. + * + * @see org.opencv.video.Video.buildOpticalFlowPyramid + */ + public static int buildOpticalFlowPyramid(Mat img, List pyramid, Size winSize, int maxLevel) + { + Mat pyramid_mat = new Mat(); + int retVal = buildOpticalFlowPyramid_1(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel); + Converters.Mat_to_vector_Mat(pyramid_mat, pyramid); + return retVal; + } + + + // + // C++: double calcGlobalOrientation(Mat orientation, Mat mask, Mat mhi, double timestamp, double duration) + // + +/** + *

Calculates a global motion orientation in a selected region.

+ * + *

The function calculates an average motion direction in the selected region + * and returns the angle between 0 degrees and 360 degrees. The average + * direction is computed from the weighted orientation histogram, where a recent + * motion has a larger weight and the motion occurred in the past has a smaller + * weight, as recorded in mhi.

+ * + * @param orientation Motion gradient orientation image calculated by the + * function "calcMotionGradient". + * @param mask Mask image. It may be a conjunction of a valid gradient mask, + * also calculated by "calcMotionGradient", and the mask of a region whose + * direction needs to be calculated. + * @param mhi Motion history image calculated by "updateMotionHistory". + * @param timestamp Timestamp passed to "updateMotionHistory". + * @param duration Maximum duration of a motion track in milliseconds, passed to + * "updateMotionHistory". + * + * @see org.opencv.video.Video.calcGlobalOrientation + */ + public static double calcGlobalOrientation(Mat orientation, Mat mask, Mat mhi, double timestamp, double duration) + { + + double retVal = calcGlobalOrientation_0(orientation.nativeObj, mask.nativeObj, mhi.nativeObj, timestamp, duration); + + return retVal; + } + + + // + // C++: void calcMotionGradient(Mat mhi, Mat& mask, Mat& orientation, double delta1, double delta2, int apertureSize = 3) + // + +/** + *

Calculates a gradient orientation of a motion history image.

+ * + *

The function calculates a gradient orientation at each pixel (x, y) + * as:

+ * + *

orientation(x,y)= arctan((dmhi/dy)/(dmhi/dx))

+ * + *

In fact, "fastAtan2" and "phase" are used so that the computed angle is + * measured in degrees and covers the full range 0..360. Also, the + * mask is filled to indicate pixels where the computed angle is + * valid.

+ * + * @param mhi Motion history single-channel floating-point image. + * @param mask Output mask image that has the type CV_8UC1 and the + * same size as mhi. Its non-zero elements mark pixels where the + * motion gradient data is correct. + * @param orientation Output motion gradient orientation image that has the same + * type and the same size as mhi. Each pixel of the image is a + * motion orientation, from 0 to 360 degrees. + * @param delta1 Minimal (or maximal) allowed difference between + * mhi values within a pixel neighborhood. + * @param delta2 Maximal (or minimal) allowed difference between + * mhi values within a pixel neighborhood. That is, the function + * finds the minimum (m(x,y)) and maximum (M(x,y)) + * mhi values over 3 x 3 neighborhood of each pixel and + * marks the motion orientation at (x, y) as valid only if + * + *

min(delta1, delta2) <= M(x,y)-m(x,y) <= max(delta1, delta2).

+ * @param apertureSize Aperture size of the "Sobel" operator. + * + * @see org.opencv.video.Video.calcMotionGradient + */ + public static void calcMotionGradient(Mat mhi, Mat mask, Mat orientation, double delta1, double delta2, int apertureSize) + { + + calcMotionGradient_0(mhi.nativeObj, mask.nativeObj, orientation.nativeObj, delta1, delta2, apertureSize); + + return; + } + +/** + *

Calculates a gradient orientation of a motion history image.

+ * + *

The function calculates a gradient orientation at each pixel (x, y) + * as:

+ * + *

orientation(x,y)= arctan((dmhi/dy)/(dmhi/dx))

+ * + *

In fact, "fastAtan2" and "phase" are used so that the computed angle is + * measured in degrees and covers the full range 0..360. Also, the + * mask is filled to indicate pixels where the computed angle is + * valid.

+ * + * @param mhi Motion history single-channel floating-point image. + * @param mask Output mask image that has the type CV_8UC1 and the + * same size as mhi. Its non-zero elements mark pixels where the + * motion gradient data is correct. + * @param orientation Output motion gradient orientation image that has the same + * type and the same size as mhi. Each pixel of the image is a + * motion orientation, from 0 to 360 degrees. + * @param delta1 Minimal (or maximal) allowed difference between + * mhi values within a pixel neighborhood. + * @param delta2 Maximal (or minimal) allowed difference between + * mhi values within a pixel neighborhood. That is, the function + * finds the minimum (m(x,y)) and maximum (M(x,y)) + * mhi values over 3 x 3 neighborhood of each pixel and + * marks the motion orientation at (x, y) as valid only if + * + *

min(delta1, delta2) <= M(x,y)-m(x,y) <= max(delta1, delta2).

+ * + * @see org.opencv.video.Video.calcMotionGradient + */ + public static void calcMotionGradient(Mat mhi, Mat mask, Mat orientation, double delta1, double delta2) + { + + calcMotionGradient_1(mhi.nativeObj, mask.nativeObj, orientation.nativeObj, delta1, delta2); + + return; + } + + + // + // C++: void calcOpticalFlowFarneback(Mat prev, Mat next, Mat& flow, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags) + // + +/** + *

Computes a dense optical flow using the Gunnar Farneback's algorithm.

+ * + *

The function finds an optical flow for each prev pixel using the + * [Farneback2003] algorithm so that

+ * + *

prev(y,x) ~ next(y + flow(y,x)[1], x + flow(y,x)[0])

+ * + * @param prev first 8-bit single-channel input image. + * @param next second input image of the same size and the same type as + * prev. + * @param flow computed flow image that has the same size as prev + * and type CV_32FC2. + * @param pyr_scale parameter, specifying the image scale (<1) to build pyramids + * for each image; pyr_scale=0.5 means a classical pyramid, where + * each next layer is twice smaller than the previous one. + * @param levels number of pyramid layers including the initial image; + * levels=1 means that no extra layers are created and only the + * original images are used. + * @param winsize averaging window size; larger values increase the algorithm + * robustness to image noise and give more chances for fast motion detection, + * but yield more blurred motion field. + * @param iterations number of iterations the algorithm does at each pyramid + * level. + * @param poly_n size of the pixel neighborhood used to find polynomial + * expansion in each pixel; larger values mean that the image will be + * approximated with smoother surfaces, yielding more robust algorithm and more + * blurred motion field, typically poly_n =5 or 7. + * @param poly_sigma standard deviation of the Gaussian that is used to smooth + * derivatives used as a basis for the polynomial expansion; for + * poly_n=5, you can set poly_sigma=1.1, for + * poly_n=7, a good value would be poly_sigma=1.5. + * @param flags operation flags that can be a combination of the following: + *
    + *
  • OPTFLOW_USE_INITIAL_FLOW uses the input flow as an + * initial flow approximation. + *
  • OPTFLOW_FARNEBACK_GAUSSIAN uses the Gaussian winsizexwinsize + * filter instead of a box filter of the same size for optical flow estimation; + * usually, this option gives z more accurate flow than with a box filter, at + * the cost of lower speed; normally, winsize for a Gaussian window + * should be set to a larger value to achieve the same level of robustness. + *
+ * + * @see org.opencv.video.Video.calcOpticalFlowFarneback + */ + public static void calcOpticalFlowFarneback(Mat prev, Mat next, Mat flow, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags) + { + + calcOpticalFlowFarneback_0(prev.nativeObj, next.nativeObj, flow.nativeObj, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags); + + return; + } + + + // + // C++: void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, vector_Point2f prevPts, vector_Point2f& nextPts, vector_uchar& status, vector_float& err, Size winSize = Size(21,21), int maxLevel = 3, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), int flags = 0, double minEigThreshold = 1e-4) + // + +/** + *

Calculates an optical flow for a sparse feature set using the iterative + * Lucas-Kanade method with pyramids.

+ * + *

The function implements a sparse iterative version of the Lucas-Kanade + * optical flow in pyramids. See [Bouguet00]. The function is parallelized with + * the TBB library.

+ * + * @param prevImg first 8-bit input image or pyramid constructed by + * "buildOpticalFlowPyramid". + * @param nextImg second input image or pyramid of the same size and the same + * type as prevImg. + * @param prevPts vector of 2D points for which the flow needs to be found; + * point coordinates must be single-precision floating-point numbers. + * @param nextPts output vector of 2D points (with single-precision + * floating-point coordinates) containing the calculated new positions of input + * features in the second image; when OPTFLOW_USE_INITIAL_FLOW flag + * is passed, the vector must have the same size as in the input. + * @param status output status vector (of unsigned chars); each element of the + * vector is set to 1 if the flow for the corresponding features has been found, + * otherwise, it is set to 0. + * @param err output vector of errors; each element of the vector is set to an + * error for the corresponding feature, type of the error measure can be set in + * flags parameter; if the flow wasn't found then the error is not + * defined (use the status parameter to find such cases). + * @param winSize size of the search window at each pyramid level. + * @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids + * are not used (single level), if set to 1, two levels are used, and so on; if + * pyramids are passed to input then algorithm will use as many levels as + * pyramids have but no more than maxLevel. + * @param criteria parameter, specifying the termination criteria of the + * iterative search algorithm (after the specified maximum number of iterations + * criteria.maxCount or when the search window moves by less than + * criteria.epsilon. + * @param flags operation flags: + *
    + *
  • OPTFLOW_USE_INITIAL_FLOW uses initial estimations, stored in + * nextPts; if the flag is not set, then prevPts is + * copied to nextPts and is considered the initial estimate. + *
  • OPTFLOW_LK_GET_MIN_EIGENVALS use minimum eigen values as an error + * measure (see minEigThreshold description); if the flag is not + * set, then L1 distance between patches around the original and a moved point, + * divided by number of pixels in a window, is used as a error measure. + *
+ * @param minEigThreshold the algorithm calculates the minimum eigen value of a + * 2x2 normal matrix of optical flow equations (this matrix is called a spatial + * gradient matrix in [Bouguet00]), divided by number of pixels in a window; if + * this value is less than minEigThreshold, then a corresponding + * feature is filtered out and its flow is not processed, so it allows to remove + * bad points and get a performance boost. + * + * @see org.opencv.video.Video.calcOpticalFlowPyrLK + */ + public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err, Size winSize, int maxLevel, TermCriteria criteria, int flags, double minEigThreshold) + { + Mat prevPts_mat = prevPts; + Mat nextPts_mat = nextPts; + Mat status_mat = status; + Mat err_mat = err; + calcOpticalFlowPyrLK_0(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj, winSize.width, winSize.height, maxLevel, criteria.type, criteria.maxCount, criteria.epsilon, flags, minEigThreshold); + + return; + } + +/** + *

Calculates an optical flow for a sparse feature set using the iterative + * Lucas-Kanade method with pyramids.

+ * + *

The function implements a sparse iterative version of the Lucas-Kanade + * optical flow in pyramids. See [Bouguet00]. The function is parallelized with + * the TBB library.

+ * + * @param prevImg first 8-bit input image or pyramid constructed by + * "buildOpticalFlowPyramid". + * @param nextImg second input image or pyramid of the same size and the same + * type as prevImg. + * @param prevPts vector of 2D points for which the flow needs to be found; + * point coordinates must be single-precision floating-point numbers. + * @param nextPts output vector of 2D points (with single-precision + * floating-point coordinates) containing the calculated new positions of input + * features in the second image; when OPTFLOW_USE_INITIAL_FLOW flag + * is passed, the vector must have the same size as in the input. + * @param status output status vector (of unsigned chars); each element of the + * vector is set to 1 if the flow for the corresponding features has been found, + * otherwise, it is set to 0. + * @param err output vector of errors; each element of the vector is set to an + * error for the corresponding feature, type of the error measure can be set in + * flags parameter; if the flow wasn't found then the error is not + * defined (use the status parameter to find such cases). + * @param winSize size of the search window at each pyramid level. + * @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids + * are not used (single level), if set to 1, two levels are used, and so on; if + * pyramids are passed to input then algorithm will use as many levels as + * pyramids have but no more than maxLevel. + * + * @see org.opencv.video.Video.calcOpticalFlowPyrLK + */ + public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err, Size winSize, int maxLevel) + { + Mat prevPts_mat = prevPts; + Mat nextPts_mat = nextPts; + Mat status_mat = status; + Mat err_mat = err; + calcOpticalFlowPyrLK_1(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj, winSize.width, winSize.height, maxLevel); + + return; + } + +/** + *

Calculates an optical flow for a sparse feature set using the iterative + * Lucas-Kanade method with pyramids.

+ * + *

The function implements a sparse iterative version of the Lucas-Kanade + * optical flow in pyramids. See [Bouguet00]. The function is parallelized with + * the TBB library.

+ * + * @param prevImg first 8-bit input image or pyramid constructed by + * "buildOpticalFlowPyramid". + * @param nextImg second input image or pyramid of the same size and the same + * type as prevImg. + * @param prevPts vector of 2D points for which the flow needs to be found; + * point coordinates must be single-precision floating-point numbers. + * @param nextPts output vector of 2D points (with single-precision + * floating-point coordinates) containing the calculated new positions of input + * features in the second image; when OPTFLOW_USE_INITIAL_FLOW flag + * is passed, the vector must have the same size as in the input. + * @param status output status vector (of unsigned chars); each element of the + * vector is set to 1 if the flow for the corresponding features has been found, + * otherwise, it is set to 0. + * @param err output vector of errors; each element of the vector is set to an + * error for the corresponding feature, type of the error measure can be set in + * flags parameter; if the flow wasn't found then the error is not + * defined (use the status parameter to find such cases). + * + * @see org.opencv.video.Video.calcOpticalFlowPyrLK + */ + public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err) + { + Mat prevPts_mat = prevPts; + Mat nextPts_mat = nextPts; + Mat status_mat = status; + Mat err_mat = err; + calcOpticalFlowPyrLK_2(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj); + + return; + } + + + // + // C++: void calcOpticalFlowSF(Mat from, Mat to, Mat flow, int layers, int averaging_block_size, int max_flow) + // + +/** + *

Calculate an optical flow using "SimpleFlow" algorithm.

+ * + *

See [Tao2012]. And site of project - http://graphics.berkeley.edu/papers/Tao-SAN-2012-05/.

+ * + * @param from a from + * @param to a to + * @param flow a flow + * @param layers Number of layers + * @param averaging_block_size Size of block through which we sum up when + * calculate cost function for pixel + * @param max_flow maximal flow that we search at each level + * + * @see org.opencv.video.Video.calcOpticalFlowSF + */ + public static void calcOpticalFlowSF(Mat from, Mat to, Mat flow, int layers, int averaging_block_size, int max_flow) + { + + calcOpticalFlowSF_0(from.nativeObj, to.nativeObj, flow.nativeObj, layers, averaging_block_size, max_flow); + + return; + } + + + // + // C++: void calcOpticalFlowSF(Mat from, Mat to, Mat flow, int layers, int averaging_block_size, int max_flow, double sigma_dist, double sigma_color, int postprocess_window, double sigma_dist_fix, double sigma_color_fix, double occ_thr, int upscale_averaging_radius, double upscale_sigma_dist, double upscale_sigma_color, double speed_up_thr) + // + +/** + *

Calculate an optical flow using "SimpleFlow" algorithm.

+ * + *

See [Tao2012]. And site of project - http://graphics.berkeley.edu/papers/Tao-SAN-2012-05/.

+ * + * @param from a from + * @param to a to + * @param flow a flow + * @param layers Number of layers + * @param averaging_block_size Size of block through which we sum up when + * calculate cost function for pixel + * @param max_flow maximal flow that we search at each level + * @param sigma_dist vector smooth spatial sigma parameter + * @param sigma_color vector smooth color sigma parameter + * @param postprocess_window window size for postprocess cross bilateral filter + * @param sigma_dist_fix spatial sigma for postprocess cross bilateralf filter + * @param sigma_color_fix color sigma for postprocess cross bilateral filter + * @param occ_thr threshold for detecting occlusions + * @param upscale_averaging_radius a upscale_averaging_radius + * @param upscale_sigma_dist spatial sigma for bilateral upscale operation + * @param upscale_sigma_color color sigma for bilateral upscale operation + * @param speed_up_thr threshold to detect point with irregular flow - where + * flow should be recalculated after upscale + * + * @see org.opencv.video.Video.calcOpticalFlowSF + */ + public static void calcOpticalFlowSF(Mat from, Mat to, Mat flow, int layers, int averaging_block_size, int max_flow, double sigma_dist, double sigma_color, int postprocess_window, double sigma_dist_fix, double sigma_color_fix, double occ_thr, int upscale_averaging_radius, double upscale_sigma_dist, double upscale_sigma_color, double speed_up_thr) + { + + calcOpticalFlowSF_1(from.nativeObj, to.nativeObj, flow.nativeObj, layers, averaging_block_size, max_flow, sigma_dist, sigma_color, postprocess_window, sigma_dist_fix, sigma_color_fix, occ_thr, upscale_averaging_radius, upscale_sigma_dist, upscale_sigma_color, speed_up_thr); + + return; + } + + + // + // C++: Mat estimateRigidTransform(Mat src, Mat dst, bool fullAffine) + // + +/** + *

Computes an optimal affine transformation between two 2D point sets.

+ * + *

The function finds an optimal affine transform *[A|b]* (a 2 x 3 + * floating-point matrix) that approximates best the affine transformation + * between:

+ *
    + *
  • Two point sets + *
  • Two raster images. In this case, the function first finds some + * features in the src image and finds the corresponding features + * in dst image. After that, the problem is reduced to the first + * case. + *
+ * + *

In case of point sets, the problem is formulated as follows: you need to find + * a 2x2 matrix *A* and 2x1 vector *b* so that:

+ * + *

[A^*|b^*] = arg min _([A|b]) sum _i|dst[i] - A (src[i])^T - b| ^2

+ * + *

where src[i] and dst[i] are the i-th points in + * src and dst, respectively

+ * + *

[A|b] can be either arbitrary (when fullAffine=true) or + * have a form of

+ * + *

a_11 a_12 b_1 + * -a_12 a_11 b_2

+ * + *

when fullAffine=false.

+ * + * @param src First input 2D point set stored in std.vector or + * Mat, or an image stored in Mat. + * @param dst Second input 2D point set of the same size and the same type as + * A, or another image. + * @param fullAffine If true, the function finds an optimal affine + * transformation with no additional restrictions (6 degrees of freedom). + * Otherwise, the class of transformations to choose from is limited to + * combinations of translation, rotation, and uniform scaling (5 degrees of + * freedom). + * + * @see org.opencv.video.Video.estimateRigidTransform + * @see org.opencv.calib3d.Calib3d#findHomography + * @see org.opencv.imgproc.Imgproc#getAffineTransform + * @see org.opencv.imgproc.Imgproc#getPerspectiveTransform + */ + public static Mat estimateRigidTransform(Mat src, Mat dst, boolean fullAffine) + { + + Mat retVal = new Mat(estimateRigidTransform_0(src.nativeObj, dst.nativeObj, fullAffine)); + + return retVal; + } + + + // + // C++: int meanShift(Mat probImage, Rect& window, TermCriteria criteria) + // + +/** + *

Finds an object on a back projection image.

+ * + *

The function implements the iterative object search algorithm. It takes the + * input back projection of an object and the initial position. The mass center + * in window of the back projection image is computed and the + * search window center shifts to the mass center. The procedure is repeated + * until the specified number of iterations criteria.maxCount is + * done or until the window center shifts by less than criteria.epsilon. + * The algorithm is used inside "CamShift" and, unlike "CamShift", the search + * window size or orientation do not change during the search. You can simply + * pass the output of "calcBackProject" to this function. But better results can + * be obtained if you pre-filter the back projection and remove the noise. For + * example, you can do this by retrieving connected components with + * "findContours", throwing away contours with small area ("contourArea"), and + * rendering the remaining contours with "drawContours".

+ * + * @param probImage Back projection of the object histogram. See + * "calcBackProject" for details. + * @param window Initial search window. + * @param criteria Stop criteria for the iterative search algorithm. + * + *

:returns: Number of iterations CAMSHIFT took to converge.

+ * + * @see org.opencv.video.Video.meanShift + */ + public static int meanShift(Mat probImage, Rect window, TermCriteria criteria) + { + double[] window_out = new double[4]; + int retVal = meanShift_0(probImage.nativeObj, window.x, window.y, window.width, window.height, window_out, criteria.type, criteria.maxCount, criteria.epsilon); + if(window!=null){ window.x = (int)window_out[0]; window.y = (int)window_out[1]; window.width = (int)window_out[2]; window.height = (int)window_out[3]; } + return retVal; + } + + + // + // C++: void segmentMotion(Mat mhi, Mat& segmask, vector_Rect& boundingRects, double timestamp, double segThresh) + // + +/** + *

Splits a motion history image into a few parts corresponding to separate + * independent motions (for example, left hand, right hand).

+ * + *

The function finds all of the motion segments and marks them in + * segmask with individual values (1,2,...). It also computes a + * vector with ROIs of motion connected components. After that the motion + * direction for every component can be calculated with "calcGlobalOrientation" + * using the extracted mask of the particular component.

+ * + * @param mhi Motion history image. + * @param segmask Image where the found mask should be stored, single-channel, + * 32-bit floating-point. + * @param boundingRects Vector containing ROIs of motion connected components. + * @param timestamp Current time in milliseconds or other units. + * @param segThresh Segmentation threshold that is recommended to be equal to + * the interval between motion history "steps" or greater. + * + * @see org.opencv.video.Video.segmentMotion + */ + public static void segmentMotion(Mat mhi, Mat segmask, MatOfRect boundingRects, double timestamp, double segThresh) + { + Mat boundingRects_mat = boundingRects; + segmentMotion_0(mhi.nativeObj, segmask.nativeObj, boundingRects_mat.nativeObj, timestamp, segThresh); + + return; + } + + + // + // C++: void updateMotionHistory(Mat silhouette, Mat& mhi, double timestamp, double duration) + // + +/** + *

Updates the motion history image by a moving silhouette.

+ * + *

The function updates the motion history image as follows:

+ * + *

mhi(x,y)= timestamp if silhouette(x,y) != 0; 0 if silhouette(x,y) = 0 and + * mhi <(timestamp - duration); mhi(x,y) otherwise

+ * + *

That is, MHI pixels where the motion occurs are set to the current + * timestamp, while the pixels where the motion happened last time + * a long time ago are cleared.

+ * + *

The function, together with "calcMotionGradient" and "calcGlobalOrientation", + * implements a motion templates technique described in [Davis97] and + * [Bradski00]. + * See also the OpenCV sample motempl.c that demonstrates the use + * of all the motion template functions.

+ * + * @param silhouette Silhouette mask that has non-zero pixels where the motion + * occurs. + * @param mhi Motion history image that is updated by the function + * (single-channel, 32-bit floating-point). + * @param timestamp Current time in milliseconds or other units. + * @param duration Maximal duration of the motion track in the same units as + * timestamp. + * + * @see org.opencv.video.Video.updateMotionHistory + */ + public static void updateMotionHistory(Mat silhouette, Mat mhi, double timestamp, double duration) + { + + updateMotionHistory_0(silhouette.nativeObj, mhi.nativeObj, timestamp, duration); + + return; + } + + + + + // C++: RotatedRect CamShift(Mat probImage, Rect& window, TermCriteria criteria) + private static native double[] CamShift_0(long probImage_nativeObj, int window_x, int window_y, int window_width, int window_height, double[] window_out, int criteria_type, int criteria_maxCount, double criteria_epsilon); + + // C++: int buildOpticalFlowPyramid(Mat img, vector_Mat& pyramid, Size winSize, int maxLevel, bool withDerivatives = true, int pyrBorder = BORDER_REFLECT_101, int derivBorder = BORDER_CONSTANT, bool tryReuseInputImage = true) + private static native int buildOpticalFlowPyramid_0(long img_nativeObj, long pyramid_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel, boolean withDerivatives, int pyrBorder, int derivBorder, boolean tryReuseInputImage); + private static native int buildOpticalFlowPyramid_1(long img_nativeObj, long pyramid_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel); + + // C++: double calcGlobalOrientation(Mat orientation, Mat mask, Mat mhi, double timestamp, double duration) + private static native double calcGlobalOrientation_0(long orientation_nativeObj, long mask_nativeObj, long mhi_nativeObj, double timestamp, double duration); + + // C++: void calcMotionGradient(Mat mhi, Mat& mask, Mat& orientation, double delta1, double delta2, int apertureSize = 3) + private static native void calcMotionGradient_0(long mhi_nativeObj, long mask_nativeObj, long orientation_nativeObj, double delta1, double delta2, int apertureSize); + private static native void calcMotionGradient_1(long mhi_nativeObj, long mask_nativeObj, long orientation_nativeObj, double delta1, double delta2); + + // C++: void calcOpticalFlowFarneback(Mat prev, Mat next, Mat& flow, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags) + private static native void calcOpticalFlowFarneback_0(long prev_nativeObj, long next_nativeObj, long flow_nativeObj, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags); + + // C++: void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, vector_Point2f prevPts, vector_Point2f& nextPts, vector_uchar& status, vector_float& err, Size winSize = Size(21,21), int maxLevel = 3, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), int flags = 0, double minEigThreshold = 1e-4) + private static native void calcOpticalFlowPyrLK_0(long prevImg_nativeObj, long nextImg_nativeObj, long prevPts_mat_nativeObj, long nextPts_mat_nativeObj, long status_mat_nativeObj, long err_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel, int criteria_type, int criteria_maxCount, double criteria_epsilon, int flags, double minEigThreshold); + private static native void calcOpticalFlowPyrLK_1(long prevImg_nativeObj, long nextImg_nativeObj, long prevPts_mat_nativeObj, long nextPts_mat_nativeObj, long status_mat_nativeObj, long err_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel); + private static native void calcOpticalFlowPyrLK_2(long prevImg_nativeObj, long nextImg_nativeObj, long prevPts_mat_nativeObj, long nextPts_mat_nativeObj, long status_mat_nativeObj, long err_mat_nativeObj); + + // C++: void calcOpticalFlowSF(Mat from, Mat to, Mat flow, int layers, int averaging_block_size, int max_flow) + private static native void calcOpticalFlowSF_0(long from_nativeObj, long to_nativeObj, long flow_nativeObj, int layers, int averaging_block_size, int max_flow); + + // C++: void calcOpticalFlowSF(Mat from, Mat to, Mat flow, int layers, int averaging_block_size, int max_flow, double sigma_dist, double sigma_color, int postprocess_window, double sigma_dist_fix, double sigma_color_fix, double occ_thr, int upscale_averaging_radius, double upscale_sigma_dist, double upscale_sigma_color, double speed_up_thr) + private static native void calcOpticalFlowSF_1(long from_nativeObj, long to_nativeObj, long flow_nativeObj, int layers, int averaging_block_size, int max_flow, double sigma_dist, double sigma_color, int postprocess_window, double sigma_dist_fix, double sigma_color_fix, double occ_thr, int upscale_averaging_radius, double upscale_sigma_dist, double upscale_sigma_color, double speed_up_thr); + + // C++: Mat estimateRigidTransform(Mat src, Mat dst, bool fullAffine) + private static native long estimateRigidTransform_0(long src_nativeObj, long dst_nativeObj, boolean fullAffine); + + // C++: int meanShift(Mat probImage, Rect& window, TermCriteria criteria) + private static native int meanShift_0(long probImage_nativeObj, int window_x, int window_y, int window_width, int window_height, double[] window_out, int criteria_type, int criteria_maxCount, double criteria_epsilon); + + // C++: void segmentMotion(Mat mhi, Mat& segmask, vector_Rect& boundingRects, double timestamp, double segThresh) + private static native void segmentMotion_0(long mhi_nativeObj, long segmask_nativeObj, long boundingRects_mat_nativeObj, double timestamp, double segThresh); + + // C++: void updateMotionHistory(Mat silhouette, Mat& mhi, double timestamp, double duration) + private static native void updateMotionHistory_0(long silhouette_nativeObj, long mhi_nativeObj, double timestamp, double duration); + +} diff --git a/src/org/opencv/video/package.bluej b/src/org/opencv/video/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/package.bluej b/src/org/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/package.bluej b/src/package.bluej new file mode 100644 index 0000000..0c9a2f4 --- /dev/null +++ b/src/package.bluej @@ -0,0 +1,22 @@ +#BlueJ package file +package.editor.height=400 +package.editor.width=560 +package.editor.x=265 +package.editor.y=121 +package.numDependencies=0 +package.numTargets=2 +package.showExtends=true +package.showUses=true +project.charset=windows-1252 +target1.height=62 +target1.name=gab +target1.type=PackageTarget +target1.width=80 +target1.x=90 +target1.y=10 +target2.height=62 +target2.name=org +target2.type=PackageTarget +target2.width=80 +target2.x=180 +target2.y=10 From aef77dbedf2ef6d3b427f43abf23279455eff324 Mon Sep 17 00:00:00 2001 From: Charon77 Date: Mon, 4 May 2015 09:41:14 +0700 Subject: [PATCH 2/5] Lib Moved to Library Wrong folder naming from the start. --- .gitignore | 2 +- lib/.DS_Store | Bin 6148 -> 0 bytes lib/arm7/cv2.so | Bin 1093329 -> 0 bytes lib/arm7/libopencv_calib3d.so | 1 - lib/arm7/libopencv_calib3d.so.2.4 | 1 - lib/arm7/libopencv_contrib.so | 1 - lib/arm7/libopencv_contrib.so.2.4 | 1 - lib/arm7/libopencv_core.so | 1 - lib/arm7/libopencv_core.so.2.4 | 1 - lib/arm7/libopencv_features2d.so | 1 - lib/arm7/libopencv_features2d.so.2.4 | 1 - lib/arm7/libopencv_flann.so | 1 - lib/arm7/libopencv_flann.so.2.4 | 1 - lib/arm7/libopencv_gpu.so | 1 - lib/arm7/libopencv_gpu.so.2.4 | 1 - lib/arm7/libopencv_highgui.so | 1 - lib/arm7/libopencv_highgui.so.2.4 | 1 - lib/arm7/libopencv_imgproc.so | 1 - lib/arm7/libopencv_imgproc.so.2.4 | 1 - lib/arm7/libopencv_java245.so | Bin 701221 -> 0 bytes lib/arm7/libopencv_legacy.so | 1 - lib/arm7/libopencv_legacy.so.2.4 | 1 - lib/arm7/libopencv_ml.so | 1 - lib/arm7/libopencv_ml.so.2.4 | 1 - lib/arm7/libopencv_nonfree.so | 1 - lib/arm7/libopencv_nonfree.so.2.4 | 1 - lib/arm7/libopencv_objdetect.so | 1 - lib/arm7/libopencv_objdetect.so.2.4 | 1 - lib/arm7/libopencv_photo.so | 1 - lib/arm7/libopencv_photo.so.2.4 | 1 - lib/arm7/libopencv_stitching.so | 1 - lib/arm7/libopencv_stitching.so.2.4 | 1 - lib/arm7/libopencv_superres.so | 1 - lib/arm7/libopencv_superres.so.2.4 | 1 - lib/arm7/libopencv_ts.so | 1 - lib/arm7/libopencv_ts.so.2.4 | 1 - lib/arm7/libopencv_video.so | 1 - lib/arm7/libopencv_video.so.2.4 | 1 - lib/arm7/libopencv_videostab.so | 1 - lib/arm7/libopencv_videostab.so.2.4 | 1 - lib/linux32/libopencv_calib3d.so | 1 - lib/linux32/libopencv_calib3d.so.2.4 | 1 - lib/linux32/libopencv_contrib.so | 1 - lib/linux32/libopencv_contrib.so.2.4 | 1 - lib/linux32/libopencv_core.so | 1 - lib/linux32/libopencv_core.so.2.4 | 1 - lib/linux32/libopencv_features2d.so | 1 - lib/linux32/libopencv_features2d.so.2.4 | 1 - lib/linux32/libopencv_flann.so | 1 - lib/linux32/libopencv_flann.so.2.4 | 1 - lib/linux32/libopencv_gpu.so | 1 - lib/linux32/libopencv_gpu.so.2.4 | 1 - lib/linux32/libopencv_highgui.so | 1 - lib/linux32/libopencv_highgui.so.2.4 | 1 - lib/linux32/libopencv_imgproc.so | 1 - lib/linux32/libopencv_imgproc.so.2.4 | 1 - lib/linux32/libopencv_java245.so | Bin 921855 -> 0 bytes lib/linux32/libopencv_legacy.so | 1 - lib/linux32/libopencv_legacy.so.2.4 | 1 - lib/linux32/libopencv_ml.so | 1 - lib/linux32/libopencv_ml.so.2.4 | 1 - lib/linux32/libopencv_nonfree.so | 1 - lib/linux32/libopencv_nonfree.so.2.4 | 1 - lib/linux32/libopencv_objdetect.so | 1 - lib/linux32/libopencv_objdetect.so.2.4 | 1 - lib/linux32/libopencv_photo.so | 1 - lib/linux32/libopencv_photo.so.2.4 | 1 - lib/linux32/libopencv_superres.so | 1 - lib/linux32/libopencv_superres.so.2.4 | 1 - lib/linux32/libopencv_ts.so | 1 - lib/linux32/libopencv_ts.so.2.4 | 1 - lib/linux32/libopencv_video.so | 1 - lib/linux32/libopencv_video.so.2.4 | 1 - lib/linux32/libopencv_videostab.so | 1 - lib/linux32/libopencv_videostab.so.2.4 | 1 - lib/linux64/libopencv_calib3d.so | 1 - lib/linux64/libopencv_calib3d.so.2.4 | 1 - lib/linux64/libopencv_contrib.so | 1 - lib/linux64/libopencv_contrib.so.2.4 | 1 - lib/linux64/libopencv_core.so | 1 - lib/linux64/libopencv_core.so.2.4 | 1 - lib/linux64/libopencv_features2d.so | 1 - lib/linux64/libopencv_features2d.so.2.4 | 1 - lib/linux64/libopencv_flann.so | 1 - lib/linux64/libopencv_flann.so.2.4 | 1 - lib/linux64/libopencv_gpu.so | 1 - lib/linux64/libopencv_gpu.so.2.4 | 1 - lib/linux64/libopencv_highgui.so | 1 - lib/linux64/libopencv_highgui.so.2.4 | 1 - lib/linux64/libopencv_imgproc.so | 1 - lib/linux64/libopencv_imgproc.so.2.4 | 1 - lib/linux64/libopencv_java245.so | Bin 905430 -> 0 bytes lib/linux64/libopencv_legacy.so | 1 - lib/linux64/libopencv_legacy.so.2.4 | 1 - lib/linux64/libopencv_ml.so | 1 - lib/linux64/libopencv_ml.so.2.4 | 1 - lib/linux64/libopencv_nonfree.so | 1 - lib/linux64/libopencv_nonfree.so.2.4 | 1 - lib/linux64/libopencv_objdetect.so | 1 - lib/linux64/libopencv_objdetect.so.2.4 | 1 - lib/linux64/libopencv_photo.so | 1 - lib/linux64/libopencv_photo.so.2.4 | 1 - lib/linux64/libopencv_stitching.so | 1 - lib/linux64/libopencv_stitching.so.2.4 | 1 - lib/linux64/libopencv_superres.so | 1 - lib/linux64/libopencv_superres.so.2.4 | 1 - lib/linux64/libopencv_ts.so | 1 - lib/linux64/libopencv_ts.so.2.4 | 1 - lib/linux64/libopencv_video.so | 1 - lib/linux64/libopencv_video.so.2.4 | 1 - lib/linux64/libopencv_videostab.so | 1 - lib/linux64/libopencv_videostab.so.2.4 | 1 - library/arm7/libopencv_calib3d.so.2.4 | 1 + .../arm7/libopencv_calib3d.so.2.4.5 | Bin .../arm7/libopencv_calib3d_pch_dephelp.a | Bin library/arm7/libopencv_contrib.so.2.4 | 1 + .../arm7/libopencv_contrib.so.2.4.5 | Bin .../arm7/libopencv_contrib_pch_dephelp.a | Bin library/arm7/libopencv_core.so.2.4 | 1 + {lib => library}/arm7/libopencv_core.so.2.4.5 | Bin .../arm7/libopencv_core_pch_dephelp.a | Bin library/arm7/libopencv_features2d.so.2.4 | 1 + .../arm7/libopencv_features2d.so.2.4.5 | Bin .../arm7/libopencv_features2d_pch_dephelp.a | Bin library/arm7/libopencv_flann.so.2.4 | 1 + .../arm7/libopencv_flann.so.2.4.5 | Bin .../arm7/libopencv_flann_pch_dephelp.a | Bin library/arm7/libopencv_gpu.so.2.4 | 1 + {lib => library}/arm7/libopencv_gpu.so.2.4.5 | Bin .../arm7/libopencv_gpu_pch_dephelp.a | Bin .../arm7/libopencv_haartraining_engine.a | Bin library/arm7/libopencv_highgui.so.2.4 | 1 + .../arm7/libopencv_highgui.so.2.4.5 | Bin .../arm7/libopencv_highgui_pch_dephelp.a | Bin library/arm7/libopencv_imgproc.so.2.4 | 1 + .../arm7/libopencv_imgproc.so.2.4.5 | Bin .../arm7/libopencv_imgproc_pch_dephelp.a | Bin library/arm7/libopencv_legacy.so.2.4 | 1 + .../arm7/libopencv_legacy.so.2.4.5 | Bin .../arm7/libopencv_legacy_pch_dephelp.a | Bin library/arm7/libopencv_ml.so.2.4 | 1 + {lib => library}/arm7/libopencv_ml.so.2.4.5 | Bin .../arm7/libopencv_ml_pch_dephelp.a | Bin library/arm7/libopencv_nonfree.so.2.4 | 1 + .../arm7/libopencv_nonfree.so.2.4.5 | Bin .../arm7/libopencv_nonfree_pch_dephelp.a | Bin library/arm7/libopencv_objdetect.so.2.4 | 1 + .../arm7/libopencv_objdetect.so.2.4.5 | Bin .../arm7/libopencv_objdetect_pch_dephelp.a | Bin .../arm7/libopencv_perf_calib3d_pch_dephelp.a | Bin .../arm7/libopencv_perf_core_pch_dephelp.a | Bin .../libopencv_perf_features2d_pch_dephelp.a | Bin .../arm7/libopencv_perf_gpu_pch_dephelp.a | Bin .../arm7/libopencv_perf_highgui_pch_dephelp.a | Bin .../arm7/libopencv_perf_imgproc_pch_dephelp.a | Bin .../arm7/libopencv_perf_nonfree_pch_dephelp.a | Bin .../libopencv_perf_objdetect_pch_dephelp.a | Bin .../arm7/libopencv_perf_photo_pch_dephelp.a | Bin .../libopencv_perf_stitching_pch_dephelp.a | Bin .../libopencv_perf_superres_pch_dephelp.a | Bin .../arm7/libopencv_perf_video_pch_dephelp.a | Bin library/arm7/libopencv_photo.so.2.4 | 1 + .../arm7/libopencv_photo.so.2.4.5 | Bin .../arm7/libopencv_photo_pch_dephelp.a | Bin library/arm7/libopencv_stitching.so.2.4 | 1 + .../arm7/libopencv_stitching.so.2.4.5 | Bin .../arm7/libopencv_stitching_pch_dephelp.a | Bin library/arm7/libopencv_superres.so.2.4 | 1 + .../arm7/libopencv_superres.so.2.4.5 | Bin .../arm7/libopencv_superres_pch_dephelp.a | Bin .../arm7/libopencv_test_calib3d_pch_dephelp.a | Bin .../arm7/libopencv_test_contrib_pch_dephelp.a | Bin .../arm7/libopencv_test_core_pch_dephelp.a | Bin .../libopencv_test_features2d_pch_dephelp.a | Bin .../arm7/libopencv_test_flann_pch_dephelp.a | Bin .../arm7/libopencv_test_gpu_pch_dephelp.a | Bin .../arm7/libopencv_test_highgui_pch_dephelp.a | Bin .../arm7/libopencv_test_imgproc_pch_dephelp.a | Bin .../arm7/libopencv_test_legacy_pch_dephelp.a | Bin .../arm7/libopencv_test_ml_pch_dephelp.a | Bin .../arm7/libopencv_test_nonfree_pch_dephelp.a | Bin .../libopencv_test_objdetect_pch_dephelp.a | Bin .../arm7/libopencv_test_photo_pch_dephelp.a | Bin .../libopencv_test_stitching_pch_dephelp.a | Bin .../libopencv_test_superres_pch_dephelp.a | Bin .../arm7/libopencv_test_video_pch_dephelp.a | Bin library/arm7/libopencv_ts.so.2.4 | 1 + {lib => library}/arm7/libopencv_ts.so.2.4.5 | Bin .../arm7/libopencv_ts_pch_dephelp.a | Bin library/arm7/libopencv_video.so.2.4 | 1 + .../arm7/libopencv_video.so.2.4.5 | Bin .../arm7/libopencv_video_pch_dephelp.a | Bin library/arm7/libopencv_videostab.so.2.4 | 1 + .../arm7/libopencv_videostab.so.2.4.5 | Bin .../arm7/libopencv_videostab_pch_dephelp.a | Bin .../cascade-files/haarcascade_clock.xml | 6202 ++++++++--------- .../cascade-files/haarcascade_eye.xml | 0 .../haarcascade_eye_tree_eyeglasses.xml | 0 .../haarcascade_frontalface_alt.xml | 0 .../haarcascade_frontalface_alt2.xml | 0 .../haarcascade_frontalface_alt_tree.xml | 0 .../haarcascade_frontalface_default.xml | 0 .../cascade-files/haarcascade_fullbody.xml | 0 .../haarcascade_lefteye_2splits.xml | 0 .../cascade-files/haarcascade_lowerbody.xml | 0 .../haarcascade_mcs_eyepair_big.xml | 0 .../haarcascade_mcs_eyepair_small.xml | 0 .../cascade-files/haarcascade_mcs_leftear.xml | 0 .../cascade-files/haarcascade_mcs_lefteye.xml | 0 .../cascade-files/haarcascade_mcs_mouth.xml | 0 .../cascade-files/haarcascade_mcs_nose.xml | 0 .../haarcascade_mcs_rightear.xml | 0 .../haarcascade_mcs_righteye.xml | 0 .../haarcascade_mcs_upperbody.xml | 0 .../cascade-files/haarcascade_profileface.xml | 0 .../haarcascade_righteye_2splits.xml | 0 .../cascade-files/haarcascade_upperbody.xml | 0 .../cascade-files/hogcascade_pedestrians.xml | 0 .../cascade-files/lbpcascade_frontalface.xml | 3010 ++++---- library/linux32/libopencv_calib3d.so.2.4 | 1 + .../linux32/libopencv_calib3d.so.2.4.5 | Bin library/linux32/libopencv_contrib.so.2.4 | 1 + .../linux32/libopencv_contrib.so.2.4.5 | Bin library/linux32/libopencv_core.so.2.4 | 1 + .../linux32/libopencv_core.so.2.4.5 | Bin library/linux32/libopencv_features2d.so.2.4 | 1 + .../linux32/libopencv_features2d.so.2.4.5 | Bin library/linux32/libopencv_flann.so.2.4 | 1 + .../linux32/libopencv_flann.so.2.4.5 | Bin library/linux32/libopencv_gpu.so.2.4 | 1 + .../linux32/libopencv_gpu.so.2.4.5 | Bin library/linux32/libopencv_highgui.so.2.4 | 1 + .../linux32/libopencv_highgui.so.2.4.5 | Bin library/linux32/libopencv_imgproc.so.2.4 | 1 + .../linux32/libopencv_imgproc.so.2.4.5 | Bin library/linux32/libopencv_legacy.so.2.4 | 1 + .../linux32/libopencv_legacy.so.2.4.5 | Bin library/linux32/libopencv_ml.so.2.4 | 1 + .../linux32/libopencv_ml.so.2.4.5 | Bin library/linux32/libopencv_nonfree.so.2.4 | 1 + .../linux32/libopencv_nonfree.so.2.4.5 | Bin library/linux32/libopencv_objdetect.so.2.4 | 1 + .../linux32/libopencv_objdetect.so.2.4.5 | Bin library/linux32/libopencv_photo.so.2.4 | 1 + .../linux32/libopencv_photo.so.2.4.5 | Bin library/linux32/libopencv_superres.so.2.4 | 1 + .../linux32/libopencv_superres.so.2.4.5 | Bin library/linux32/libopencv_ts.so.2.4 | 1 + .../linux32/libopencv_ts.so.2.4.5 | Bin library/linux32/libopencv_video.so.2.4 | 1 + .../linux32/libopencv_video.so.2.4.5 | Bin library/linux32/libopencv_videostab.so.2.4 | 1 + .../linux32/libopencv_videostab.so.2.4.5 | Bin library/linux64/libopencv_calib3d.so.2.4 | 1 + .../linux64/libopencv_calib3d.so.2.4.5 | Bin library/linux64/libopencv_contrib.so.2.4 | 1 + .../linux64/libopencv_contrib.so.2.4.5 | Bin library/linux64/libopencv_core.so.2.4 | 1 + .../linux64/libopencv_core.so.2.4.5 | Bin library/linux64/libopencv_features2d.so.2.4 | 1 + .../linux64/libopencv_features2d.so.2.4.5 | Bin library/linux64/libopencv_flann.so.2.4 | 1 + .../linux64/libopencv_flann.so.2.4.5 | Bin library/linux64/libopencv_gpu.so.2.4 | 1 + .../linux64/libopencv_gpu.so.2.4.5 | Bin library/linux64/libopencv_highgui.so.2.4 | 1 + .../linux64/libopencv_highgui.so.2.4.5 | Bin library/linux64/libopencv_imgproc.so.2.4 | 1 + .../linux64/libopencv_imgproc.so.2.4.5 | Bin library/linux64/libopencv_legacy.so.2.4 | 1 + .../linux64/libopencv_legacy.so.2.4.5 | Bin library/linux64/libopencv_ml.so.2.4 | 1 + .../linux64/libopencv_ml.so.2.4.5 | Bin library/linux64/libopencv_nonfree.so.2.4 | 1 + .../linux64/libopencv_nonfree.so.2.4.5 | Bin library/linux64/libopencv_objdetect.so.2.4 | 1 + .../linux64/libopencv_objdetect.so.2.4.5 | Bin library/linux64/libopencv_photo.so.2.4 | 1 + .../linux64/libopencv_photo.so.2.4.5 | Bin library/linux64/libopencv_stitching.so.2.4 | 1 + .../linux64/libopencv_stitching.so.2.4.5 | Bin library/linux64/libopencv_superres.so.2.4 | 1 + .../linux64/libopencv_superres.so.2.4.5 | Bin library/linux64/libopencv_ts.so.2.4 | 1 + .../linux64/libopencv_ts.so.2.4.5 | Bin library/linux64/libopencv_video.so.2.4 | 1 + .../linux64/libopencv_video.so.2.4.5 | Bin library/linux64/libopencv_videostab.so.2.4 | 1 + .../linux64/libopencv_videostab.so.2.4.5 | Bin .../macosx64/libopencv_calib3d.2.4.5.dylib | Bin .../macosx64/libopencv_calib3d.2.4.dylib | Bin .../macosx64/libopencv_calib3d.dylib | Bin .../macosx64/libopencv_contrib.2.4.5.dylib | Bin .../macosx64/libopencv_contrib.2.4.dylib | Bin .../macosx64/libopencv_contrib.dylib | Bin .../macosx64/libopencv_core.2.4.5.dylib | Bin .../macosx64/libopencv_core.2.4.dylib | Bin .../macosx64/libopencv_core.dylib | Bin .../macosx64/libopencv_features2d.2.4.5.dylib | Bin .../macosx64/libopencv_features2d.2.4.dylib | Bin .../macosx64/libopencv_features2d.dylib | Bin .../macosx64/libopencv_flann.2.4.5.dylib | Bin .../macosx64/libopencv_flann.2.4.dylib | Bin .../macosx64/libopencv_flann.dylib | Bin .../macosx64/libopencv_gpu.2.4.5.dylib | Bin .../macosx64/libopencv_gpu.2.4.dylib | Bin {lib => library}/macosx64/libopencv_gpu.dylib | Bin .../macosx64/libopencv_haartraining_engine.a | Bin .../macosx64/libopencv_highgui.2.4.5.dylib | Bin .../macosx64/libopencv_highgui.2.4.dylib | Bin .../macosx64/libopencv_highgui.dylib | Bin .../macosx64/libopencv_imgproc.2.4.5.dylib | Bin .../macosx64/libopencv_imgproc.2.4.dylib | Bin .../macosx64/libopencv_imgproc.dylib | Bin .../macosx64/libopencv_java245.dylib | Bin .../macosx64/libopencv_legacy.2.4.5.dylib | Bin .../macosx64/libopencv_legacy.2.4.dylib | Bin .../macosx64/libopencv_legacy.dylib | Bin .../macosx64/libopencv_ml.2.4.5.dylib | Bin .../macosx64/libopencv_ml.2.4.dylib | Bin {lib => library}/macosx64/libopencv_ml.dylib | Bin .../macosx64/libopencv_nonfree.2.4.5.dylib | Bin .../macosx64/libopencv_nonfree.2.4.dylib | Bin .../macosx64/libopencv_nonfree.dylib | Bin .../macosx64/libopencv_objdetect.2.4.5.dylib | Bin .../macosx64/libopencv_objdetect.2.4.dylib | Bin .../macosx64/libopencv_objdetect.dylib | Bin .../macosx64/libopencv_ocl.2.4.5.dylib | Bin .../macosx64/libopencv_ocl.2.4.dylib | Bin {lib => library}/macosx64/libopencv_ocl.dylib | Bin .../macosx64/libopencv_photo.2.4.5.dylib | Bin .../macosx64/libopencv_photo.2.4.dylib | Bin .../macosx64/libopencv_photo.dylib | Bin .../macosx64/libopencv_stitching.2.4.5.dylib | Bin .../macosx64/libopencv_stitching.2.4.dylib | Bin .../macosx64/libopencv_stitching.dylib | Bin .../macosx64/libopencv_superres.2.4.5.dylib | Bin .../macosx64/libopencv_superres.2.4.dylib | Bin .../macosx64/libopencv_superres.dylib | Bin .../macosx64/libopencv_ts.2.4.5.dylib | Bin .../macosx64/libopencv_ts.2.4.dylib | Bin {lib => library}/macosx64/libopencv_ts.dylib | Bin .../macosx64/libopencv_video.2.4.5.dylib | Bin .../macosx64/libopencv_video.2.4.dylib | Bin .../macosx64/libopencv_video.dylib | Bin .../macosx64/libopencv_videostab.2.4.5.dylib | Bin .../macosx64/libopencv_videostab.2.4.dylib | Bin .../macosx64/libopencv_videostab.dylib | Bin {lib => library}/opencv-245.jar | Bin library/opencv_processing.jar | Bin 0 -> 412965 bytes {lib => library}/windows32/opencv_java245.dll | Bin {lib => library}/windows64/opencv_java245.dll | Bin 352 files changed, 4660 insertions(+), 4713 deletions(-) delete mode 100644 lib/.DS_Store delete mode 100755 lib/arm7/cv2.so delete mode 120000 lib/arm7/libopencv_calib3d.so delete mode 120000 lib/arm7/libopencv_calib3d.so.2.4 delete mode 120000 lib/arm7/libopencv_contrib.so delete mode 120000 lib/arm7/libopencv_contrib.so.2.4 delete mode 120000 lib/arm7/libopencv_core.so delete mode 120000 lib/arm7/libopencv_core.so.2.4 delete mode 120000 lib/arm7/libopencv_features2d.so delete mode 120000 lib/arm7/libopencv_features2d.so.2.4 delete mode 120000 lib/arm7/libopencv_flann.so delete mode 120000 lib/arm7/libopencv_flann.so.2.4 delete mode 120000 lib/arm7/libopencv_gpu.so delete mode 120000 lib/arm7/libopencv_gpu.so.2.4 delete mode 120000 lib/arm7/libopencv_highgui.so delete mode 120000 lib/arm7/libopencv_highgui.so.2.4 delete mode 120000 lib/arm7/libopencv_imgproc.so delete mode 120000 lib/arm7/libopencv_imgproc.so.2.4 delete mode 100755 lib/arm7/libopencv_java245.so delete mode 120000 lib/arm7/libopencv_legacy.so delete mode 120000 lib/arm7/libopencv_legacy.so.2.4 delete mode 120000 lib/arm7/libopencv_ml.so delete mode 120000 lib/arm7/libopencv_ml.so.2.4 delete mode 120000 lib/arm7/libopencv_nonfree.so delete mode 120000 lib/arm7/libopencv_nonfree.so.2.4 delete mode 120000 lib/arm7/libopencv_objdetect.so delete mode 120000 lib/arm7/libopencv_objdetect.so.2.4 delete mode 120000 lib/arm7/libopencv_photo.so delete mode 120000 lib/arm7/libopencv_photo.so.2.4 delete mode 120000 lib/arm7/libopencv_stitching.so delete mode 120000 lib/arm7/libopencv_stitching.so.2.4 delete mode 120000 lib/arm7/libopencv_superres.so delete mode 120000 lib/arm7/libopencv_superres.so.2.4 delete mode 120000 lib/arm7/libopencv_ts.so delete mode 120000 lib/arm7/libopencv_ts.so.2.4 delete mode 120000 lib/arm7/libopencv_video.so delete mode 120000 lib/arm7/libopencv_video.so.2.4 delete mode 120000 lib/arm7/libopencv_videostab.so delete mode 120000 lib/arm7/libopencv_videostab.so.2.4 delete mode 120000 lib/linux32/libopencv_calib3d.so delete mode 120000 lib/linux32/libopencv_calib3d.so.2.4 delete mode 120000 lib/linux32/libopencv_contrib.so delete mode 120000 lib/linux32/libopencv_contrib.so.2.4 delete mode 120000 lib/linux32/libopencv_core.so delete mode 120000 lib/linux32/libopencv_core.so.2.4 delete mode 120000 lib/linux32/libopencv_features2d.so delete mode 120000 lib/linux32/libopencv_features2d.so.2.4 delete mode 120000 lib/linux32/libopencv_flann.so delete mode 120000 lib/linux32/libopencv_flann.so.2.4 delete mode 120000 lib/linux32/libopencv_gpu.so delete mode 120000 lib/linux32/libopencv_gpu.so.2.4 delete mode 120000 lib/linux32/libopencv_highgui.so delete mode 120000 lib/linux32/libopencv_highgui.so.2.4 delete mode 120000 lib/linux32/libopencv_imgproc.so delete mode 120000 lib/linux32/libopencv_imgproc.so.2.4 delete mode 100755 lib/linux32/libopencv_java245.so delete mode 120000 lib/linux32/libopencv_legacy.so delete mode 120000 lib/linux32/libopencv_legacy.so.2.4 delete mode 120000 lib/linux32/libopencv_ml.so delete mode 120000 lib/linux32/libopencv_ml.so.2.4 delete mode 120000 lib/linux32/libopencv_nonfree.so delete mode 120000 lib/linux32/libopencv_nonfree.so.2.4 delete mode 120000 lib/linux32/libopencv_objdetect.so delete mode 120000 lib/linux32/libopencv_objdetect.so.2.4 delete mode 120000 lib/linux32/libopencv_photo.so delete mode 120000 lib/linux32/libopencv_photo.so.2.4 delete mode 120000 lib/linux32/libopencv_superres.so delete mode 120000 lib/linux32/libopencv_superres.so.2.4 delete mode 120000 lib/linux32/libopencv_ts.so delete mode 120000 lib/linux32/libopencv_ts.so.2.4 delete mode 120000 lib/linux32/libopencv_video.so delete mode 120000 lib/linux32/libopencv_video.so.2.4 delete mode 120000 lib/linux32/libopencv_videostab.so delete mode 120000 lib/linux32/libopencv_videostab.so.2.4 delete mode 120000 lib/linux64/libopencv_calib3d.so delete mode 120000 lib/linux64/libopencv_calib3d.so.2.4 delete mode 120000 lib/linux64/libopencv_contrib.so delete mode 120000 lib/linux64/libopencv_contrib.so.2.4 delete mode 120000 lib/linux64/libopencv_core.so delete mode 120000 lib/linux64/libopencv_core.so.2.4 delete mode 120000 lib/linux64/libopencv_features2d.so delete mode 120000 lib/linux64/libopencv_features2d.so.2.4 delete mode 120000 lib/linux64/libopencv_flann.so delete mode 120000 lib/linux64/libopencv_flann.so.2.4 delete mode 120000 lib/linux64/libopencv_gpu.so delete mode 120000 lib/linux64/libopencv_gpu.so.2.4 delete mode 120000 lib/linux64/libopencv_highgui.so delete mode 120000 lib/linux64/libopencv_highgui.so.2.4 delete mode 120000 lib/linux64/libopencv_imgproc.so delete mode 120000 lib/linux64/libopencv_imgproc.so.2.4 delete mode 100755 lib/linux64/libopencv_java245.so delete mode 120000 lib/linux64/libopencv_legacy.so delete mode 120000 lib/linux64/libopencv_legacy.so.2.4 delete mode 120000 lib/linux64/libopencv_ml.so delete mode 120000 lib/linux64/libopencv_ml.so.2.4 delete mode 120000 lib/linux64/libopencv_nonfree.so delete mode 120000 lib/linux64/libopencv_nonfree.so.2.4 delete mode 120000 lib/linux64/libopencv_objdetect.so delete mode 120000 lib/linux64/libopencv_objdetect.so.2.4 delete mode 120000 lib/linux64/libopencv_photo.so delete mode 120000 lib/linux64/libopencv_photo.so.2.4 delete mode 120000 lib/linux64/libopencv_stitching.so delete mode 120000 lib/linux64/libopencv_stitching.so.2.4 delete mode 120000 lib/linux64/libopencv_superres.so delete mode 120000 lib/linux64/libopencv_superres.so.2.4 delete mode 120000 lib/linux64/libopencv_ts.so delete mode 120000 lib/linux64/libopencv_ts.so.2.4 delete mode 120000 lib/linux64/libopencv_video.so delete mode 120000 lib/linux64/libopencv_video.so.2.4 delete mode 120000 lib/linux64/libopencv_videostab.so delete mode 120000 lib/linux64/libopencv_videostab.so.2.4 create mode 100644 library/arm7/libopencv_calib3d.so.2.4 rename {lib => library}/arm7/libopencv_calib3d.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_calib3d_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_contrib.so.2.4 rename {lib => library}/arm7/libopencv_contrib.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_contrib_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_core.so.2.4 rename {lib => library}/arm7/libopencv_core.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_core_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_features2d.so.2.4 rename {lib => library}/arm7/libopencv_features2d.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_features2d_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_flann.so.2.4 rename {lib => library}/arm7/libopencv_flann.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_flann_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_gpu.so.2.4 rename {lib => library}/arm7/libopencv_gpu.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_gpu_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_haartraining_engine.a (100%) create mode 100644 library/arm7/libopencv_highgui.so.2.4 rename {lib => library}/arm7/libopencv_highgui.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_highgui_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_imgproc.so.2.4 rename {lib => library}/arm7/libopencv_imgproc.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_imgproc_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_legacy.so.2.4 rename {lib => library}/arm7/libopencv_legacy.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_legacy_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_ml.so.2.4 rename {lib => library}/arm7/libopencv_ml.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_ml_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_nonfree.so.2.4 rename {lib => library}/arm7/libopencv_nonfree.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_nonfree_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_objdetect.so.2.4 rename {lib => library}/arm7/libopencv_objdetect.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_objdetect_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_perf_calib3d_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_perf_core_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_perf_features2d_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_perf_gpu_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_perf_highgui_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_perf_imgproc_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_perf_nonfree_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_perf_objdetect_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_perf_photo_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_perf_stitching_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_perf_superres_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_perf_video_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_photo.so.2.4 rename {lib => library}/arm7/libopencv_photo.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_photo_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_stitching.so.2.4 rename {lib => library}/arm7/libopencv_stitching.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_stitching_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_superres.so.2.4 rename {lib => library}/arm7/libopencv_superres.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_superres_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_calib3d_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_contrib_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_core_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_features2d_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_flann_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_gpu_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_highgui_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_imgproc_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_legacy_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_ml_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_nonfree_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_objdetect_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_photo_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_stitching_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_superres_pch_dephelp.a (100%) rename {lib => library}/arm7/libopencv_test_video_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_ts.so.2.4 rename {lib => library}/arm7/libopencv_ts.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_ts_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_video.so.2.4 rename {lib => library}/arm7/libopencv_video.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_video_pch_dephelp.a (100%) create mode 100644 library/arm7/libopencv_videostab.so.2.4 rename {lib => library}/arm7/libopencv_videostab.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/arm7/libopencv_videostab_pch_dephelp.a (100%) rename {lib => library}/cascade-files/haarcascade_clock.xml (97%) rename {lib => library}/cascade-files/haarcascade_eye.xml (100%) rename {lib => library}/cascade-files/haarcascade_eye_tree_eyeglasses.xml (100%) rename {lib => library}/cascade-files/haarcascade_frontalface_alt.xml (100%) rename {lib => library}/cascade-files/haarcascade_frontalface_alt2.xml (100%) rename {lib => library}/cascade-files/haarcascade_frontalface_alt_tree.xml (100%) rename {lib => library}/cascade-files/haarcascade_frontalface_default.xml (100%) rename {lib => library}/cascade-files/haarcascade_fullbody.xml (100%) rename {lib => library}/cascade-files/haarcascade_lefteye_2splits.xml (100%) rename {lib => library}/cascade-files/haarcascade_lowerbody.xml (100%) rename {lib => library}/cascade-files/haarcascade_mcs_eyepair_big.xml (100%) rename {lib => library}/cascade-files/haarcascade_mcs_eyepair_small.xml (100%) rename {lib => library}/cascade-files/haarcascade_mcs_leftear.xml (100%) rename {lib => library}/cascade-files/haarcascade_mcs_lefteye.xml (100%) rename {lib => library}/cascade-files/haarcascade_mcs_mouth.xml (100%) rename {lib => library}/cascade-files/haarcascade_mcs_nose.xml (100%) rename {lib => library}/cascade-files/haarcascade_mcs_rightear.xml (100%) rename {lib => library}/cascade-files/haarcascade_mcs_righteye.xml (100%) rename {lib => library}/cascade-files/haarcascade_mcs_upperbody.xml (100%) rename {lib => library}/cascade-files/haarcascade_profileface.xml (100%) rename {lib => library}/cascade-files/haarcascade_righteye_2splits.xml (100%) rename {lib => library}/cascade-files/haarcascade_upperbody.xml (100%) rename {lib => library}/cascade-files/hogcascade_pedestrians.xml (100%) rename {lib => library}/cascade-files/lbpcascade_frontalface.xml (97%) create mode 100644 library/linux32/libopencv_calib3d.so.2.4 rename {lib => library}/linux32/libopencv_calib3d.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_contrib.so.2.4 rename {lib => library}/linux32/libopencv_contrib.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_core.so.2.4 rename {lib => library}/linux32/libopencv_core.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_features2d.so.2.4 rename {lib => library}/linux32/libopencv_features2d.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_flann.so.2.4 rename {lib => library}/linux32/libopencv_flann.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_gpu.so.2.4 rename {lib => library}/linux32/libopencv_gpu.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_highgui.so.2.4 rename {lib => library}/linux32/libopencv_highgui.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_imgproc.so.2.4 rename {lib => library}/linux32/libopencv_imgproc.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_legacy.so.2.4 rename {lib => library}/linux32/libopencv_legacy.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_ml.so.2.4 rename {lib => library}/linux32/libopencv_ml.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_nonfree.so.2.4 rename {lib => library}/linux32/libopencv_nonfree.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_objdetect.so.2.4 rename {lib => library}/linux32/libopencv_objdetect.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_photo.so.2.4 rename {lib => library}/linux32/libopencv_photo.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_superres.so.2.4 rename {lib => library}/linux32/libopencv_superres.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_ts.so.2.4 rename {lib => library}/linux32/libopencv_ts.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_video.so.2.4 rename {lib => library}/linux32/libopencv_video.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux32/libopencv_videostab.so.2.4 rename {lib => library}/linux32/libopencv_videostab.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_calib3d.so.2.4 rename {lib => library}/linux64/libopencv_calib3d.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_contrib.so.2.4 rename {lib => library}/linux64/libopencv_contrib.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_core.so.2.4 rename {lib => library}/linux64/libopencv_core.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_features2d.so.2.4 rename {lib => library}/linux64/libopencv_features2d.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_flann.so.2.4 rename {lib => library}/linux64/libopencv_flann.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_gpu.so.2.4 rename {lib => library}/linux64/libopencv_gpu.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_highgui.so.2.4 rename {lib => library}/linux64/libopencv_highgui.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_imgproc.so.2.4 rename {lib => library}/linux64/libopencv_imgproc.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_legacy.so.2.4 rename {lib => library}/linux64/libopencv_legacy.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_ml.so.2.4 rename {lib => library}/linux64/libopencv_ml.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_nonfree.so.2.4 rename {lib => library}/linux64/libopencv_nonfree.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_objdetect.so.2.4 rename {lib => library}/linux64/libopencv_objdetect.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_photo.so.2.4 rename {lib => library}/linux64/libopencv_photo.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_stitching.so.2.4 rename {lib => library}/linux64/libopencv_stitching.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_superres.so.2.4 rename {lib => library}/linux64/libopencv_superres.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_ts.so.2.4 rename {lib => library}/linux64/libopencv_ts.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_video.so.2.4 rename {lib => library}/linux64/libopencv_video.so.2.4.5 (100%) mode change 100755 => 100644 create mode 100644 library/linux64/libopencv_videostab.so.2.4 rename {lib => library}/linux64/libopencv_videostab.so.2.4.5 (100%) mode change 100755 => 100644 rename {lib => library}/macosx64/libopencv_calib3d.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_calib3d.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_calib3d.dylib (100%) rename {lib => library}/macosx64/libopencv_contrib.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_contrib.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_contrib.dylib (100%) rename {lib => library}/macosx64/libopencv_core.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_core.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_core.dylib (100%) rename {lib => library}/macosx64/libopencv_features2d.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_features2d.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_features2d.dylib (100%) rename {lib => library}/macosx64/libopencv_flann.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_flann.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_flann.dylib (100%) rename {lib => library}/macosx64/libopencv_gpu.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_gpu.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_gpu.dylib (100%) rename {lib => library}/macosx64/libopencv_haartraining_engine.a (100%) rename {lib => library}/macosx64/libopencv_highgui.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_highgui.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_highgui.dylib (100%) rename {lib => library}/macosx64/libopencv_imgproc.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_imgproc.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_imgproc.dylib (100%) rename {lib => library}/macosx64/libopencv_java245.dylib (100%) rename {lib => library}/macosx64/libopencv_legacy.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_legacy.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_legacy.dylib (100%) rename {lib => library}/macosx64/libopencv_ml.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_ml.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_ml.dylib (100%) rename {lib => library}/macosx64/libopencv_nonfree.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_nonfree.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_nonfree.dylib (100%) rename {lib => library}/macosx64/libopencv_objdetect.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_objdetect.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_objdetect.dylib (100%) rename {lib => library}/macosx64/libopencv_ocl.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_ocl.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_ocl.dylib (100%) rename {lib => library}/macosx64/libopencv_photo.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_photo.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_photo.dylib (100%) rename {lib => library}/macosx64/libopencv_stitching.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_stitching.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_stitching.dylib (100%) rename {lib => library}/macosx64/libopencv_superres.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_superres.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_superres.dylib (100%) rename {lib => library}/macosx64/libopencv_ts.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_ts.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_ts.dylib (100%) rename {lib => library}/macosx64/libopencv_video.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_video.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_video.dylib (100%) rename {lib => library}/macosx64/libopencv_videostab.2.4.5.dylib (100%) rename {lib => library}/macosx64/libopencv_videostab.2.4.dylib (100%) rename {lib => library}/macosx64/libopencv_videostab.dylib (100%) rename {lib => library}/opencv-245.jar (100%) create mode 100644 library/opencv_processing.jar rename {lib => library}/windows32/opencv_java245.dll (100%) rename {lib => library}/windows64/opencv_java245.dll (100%) diff --git a/.gitignore b/.gitignore index ff5859f..4b08cc7 100755 --- a/.gitignore +++ b/.gitignore @@ -21,4 +21,4 @@ lib/ *.DS_Store /bin *.ctxt -*.class +*.class \ No newline at end of file diff --git a/lib/.DS_Store b/lib/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0;k*cvb!VTin=)r%mAYUGtLYwYp7UM zSZk3{k*!6AMY-vgn~`lsxfK=mP@!Rw@;s4IZAHaXSo_rP^S$?TXYM&%R4rK*41K1%VR!x+u0g2r2VJiDuec!JbVq!y z4WsF<4LN5RY77EJiMtiYGW3?pfnJp5cIB@U0T@?Z_VAf-JsE1-&Z}IRIe&co-FXk4 z^@oQZExG5PPrv4pH%?}qjzl~S4>eGqfbu~-3%gOIqZ0ZzLfNm&U_MO}j_Xa(cR_ie zz6t+Fp}q&VD9rs(avg-Z3HrZ+PeD~fKLFkV{srzILYOlE%3J;+yW(69XJNJ`=GuF^W88@Sv?8$4%l4)o)5dvKrfe7SOc*82kgEM`QL&5 zE-1M&!T*H)5V#xaYjCdsKLB+D)E=ljsP{pgZw7G%;in2=&SxA_*zHh{LfsB^Km1(| zH39b*!Tb>X55s&h^dE)YCt$uBDhB-=)c?TkUeFD-1L`&?xh^tc{Cx@S!Z5uSa$X7Z z-yuV;w?N-O_dd9N8v6f%dJFXR;1lroIQ)fSo`HEM!+js-HmLW&POfz5zYG0!pj@v4 z8^B*d1rX*BP?yr}KcL?R)d2HRgn13rF!aN)`xoWa!d!x|SHk=gs6Rn}1^7YOe+>Ft zpzno}>zxSqcIZpsCf5MWBQU=hssna$n62ymu=^d9T&IA4hx#M*9`MTye-6x5(6=Gn z6!hv{?F$6*M=?d4#CZqFdhY0$qK_HP3}1Jw!98Ya~{k;hx!}T{V>aQJ=_9Na!o?r2K`^) zb{YKaqWvquKf=5nY6@<@20snEG$RzuunR(81iKj2CaC{_vaWB#-G?ymg56fA55l1Y z=Bw%Fhj4#0^uGklXg3c1Ec~>CN1%dG_rrW9!oCcC`k~JUn?Sj?Kz$Q_z6xbruwRBg z1Afkdnufj$c3*(wZ$K}&5%!OOm%{B;uq$QQZ$Y0y{eOYQqKEoC)PA^q9O2#uw!j>M zelyg&p_-w-2m6nLzlYm*p!Y(>p}z|vdSU+r)DK|yD%gD->SE|02g_l9E7Vy~|AODg zVD~<#55j#AoQ3@e)aT%K1(*Z7&x5saKLjP$JurU)_Wy*+gt`!Bx%NVToPItAyGx+1 z6+5U?q5my-5%^{J{Vn`8!8{E0Iq2UH`|m*iVeqRkhoF7|b2iK&@UJlc1xl_{VV;J4 zG1L1(-U0PT*u4dIUuJmmS`YVQu$QYD+y=8;e}fjSBL7Py@a?uYq< zP#=c56Xu%{ZV2W(q2^%z1K0}nMdcWL%^W3kP! z)utEiotpWRHuK#!y=d!cev4+mUSsZacpO6?u`Z9LS%VL*VZQ_RpM;#V*KoJ2ucPKZ zGGsZ}_yF`@WtygGpAI(K{OH;?SX^bZ9D#nf%`DmjP~V{46Xc&s9Wk55VW#CV@>Gl^ z*7X5PL;e8#PwH2|_fc=hnGAmeEq8(+wtJxd+cvx3L%#vxBrBB7b;Q($9-XvHmsPUJSd(8E!x9 zw!(Ze)Jvd#4s#jQN1zTv$u$o9`{92I>dnx94a_!NQh!c?{YEIc9)jwJx{v02$PU;Y zf&NWUEL&v-V3+S8a^;ikpBgHxt=v) z{Ebro6xl~VkAq)?`V7sR!SBP*BF(4Kzu>K~dmq$`p+5%R3;qdw2HXM4^&ryr6!lp! zf6K7N{R{bB@HKRYSJz*mCSbma?%#+0L(~iY%ck#!TOQ0m1Z!;ezk~iCPUOoyM> z!EHA<1U_lY(*rP{3Hw(vtOWZT)afvfi2>IksA~AB1n))I>*==&=AXj+CGfSdtAHQ5 zHkxqt(%%?)4S5Ir{Sbb7puONZlw74y z<#78K+gdnfFztD5#={xICEs{r<2f_ewkC!yXBhX)Yf z2=pJfJb^1Pe+A(_2X!;l7omOuyZ1wV2zt5x0sUQ2at(reXnz6N1N8=|G^iQKzYgvn zfjSF*e+pg*`^%uZpe}-v>vAv`YLaHLYlHr7s51~|4r&zoUqQVcdbu`0{{zTB0P`m3 z_ka&W{Q_?9h52Jp*7X$p9fjcKpqJ~vVgD_tR@hw(UJbjO zz z{{b)umS@7Rb)5|TKVf(^*n==nL%$9Fc0s=a-Ub#yeF^SQ!~Auq7|gGtfEUAl9(M16 zoppTfKNUFn7W2M>aWcgnk~X67FAudKc8AP!GfJpTIZ;{|I_F?DoR%r=bo)zYlhD z9RL$hKY`tMp*{n9`SUAMMw|FKlu@3;h=m_IxP0-e|)0F!3AXQ0^P= zaFrL#f6mxWs&eI&XQSu$xw5fD*fQu!&v4!0&+c<2E^wWccT(ReAHbk~zH8`6dGm|z ze9=i+7v(;k_A-$rh=4fBrvbueu~0O{q^j}fBTa^o_WEJjhiPPJ+(ai^t}(A^nhpa{-6Bu;IBUM zwL0qSNpH*cX0z??c}k`{kxT{Pd3pOWs)- zn!TxUz{vLc+JN)PL#c>qU!Nur~Y92 z@yoBh;$5e{`l0OB&HWEAp7w^_=bv-g#=pP${w>#jH(r%<-Df}kZ2#LXc;shSzxwX4 zl)Y;F&Oe=b%c~l`QyI!0|C^nv`!lv16mhTGt-9ZJqiUXzn;c%^{{u3~ z@e*@~mfr7$Emt$WqgwjYUZ=W$IA1k)x2fjoM%8>L`;pj6{BN-qEi?yh1BJMM@)G>v zB}xCC=D#4r`{hnhaqr;hSaB<0a<5X!SRJtL19?KX!_0zE5i}m&l~eOGJgO zK^Jyq$4v&k3puWUE8VxH2{|1m9ny;}X=_i{_RnZEP1 z_A{aNzc*{1dZ z1aG?3GMVYg)%wq^TU7U8L^YQNRr7Y6e2MQ}TKj(66{>%E7HeJN|2nNcp3%yGm)4%^ zo2<}g`hQoUns3+I$IrC>TlQAff2JmHRP%qaR{nR_tKmPbji;Gf{AX(I<)oLW;XhKO zn(x=f!_MUXyeI2Z9I5b8~>WM@@v(` zpQp6>=CloJ{J+-X`_M_M`-87n&A+#Wm!|MHtv@6W#=uR=!>x@cmumClVXrFRqt*Wt zn*T%Edg!4_HGGpcA9Z!B?kie;&ei6Fm%Ul_e^47=zmibh4{PoHt+lHAMYj1+vi3G@ zKI_)n%X_r?EKFDBeM1{xj>*j&FQffw@?%>2xma6|rD^Ni_T6x~7_3tLFKR%}A_tf)M`Lds6T_XPqt^Vq?@$Q&b z-}|)wlB@Oq`D{zNS$?0ANO*}>_M@yz`p>=E`2LKo|BCycS}fn}PkUAKSb=K(t2UmT zWs@iIci*7;&(rGb5pDeYjJEzcS6k0~J4cmwxvl&pzARh&mHh0`#?zOdr-py*64hL- zwbu)@`t+8m{x-aTK$Y@^5?br@HVagt!VSdSy`(5&ujhj7k<^fSG%A3y(E&C zXn$^7FG~4$Yvb9BR$jO7utJ;d`}Mb|<`-S3noG3&{XL?(S8Mah7OnrDthLuWHF*cM z{#>Dz-#fMP`@Pnl&b8%V%A<9gn!e9!<^2lH{Gb-!9a{gW)7B4fDploYY4x#B%l{Mk zs{fGIzpvBcFVouVllpj~mEW_srQ}Qf2SX{dB<&)018iRACmUR8d_wW@h~L&_}iZLp0mVtz{N z&(GAQ{EPf=XzSN2wEEqljo14m5HHbwe!FEd%lmJQs=2*cHGf9S-z{2u`A=>9d@QDh zAJpc{EqSW@>$Lt_rmZLcrnUD=+SKsFr>o|NZ1pMmy+CVkFV@zRWmRhUpKn#oE1LY5 zl&kJH*!qLScSIX+-zAp3L_463FPU0>`oorQvwo&7SIzHzqiTMM);^20`D^e^s{c=G z_3@{hRrk5`Rr9n~{=Ym`bx+saGi~iz^6Sys+b6XCcyX;NFHai}zpJ(Hd$sYVS!;jw z+IU`}mH%Q;jW40eFTPZDf3?#x?_wfMhtrRx4j zyJ~(y1_@pg-vzrYlNsNK8&vbZwDr@cwfeeCD-WMG9^V~P<$YIcZ)a-h{o}}ueJAD zt^9LCYIwJ{e!Bh=)qOx453iI)$4j*D)%we6ud!Us{`lL=RPzV4^u9~0?~65a?DcB+ zJGK7%Ev-Hx+Whiztvt@r#)rqX`u&%-KDttykDu1oSM9G;)3^63)qJ^ay(8r{s*Rtw z2UP!`)W)}ywE1qs4XXdYWUA(~weVwF`f4?KCu!@23$^xde>qJ=r)l}SL+hXK*4opO zW;R}!Vv?miKBSG`A#Hwqa+8`roPS7O5?;=BS(li*1o=~wf7}$y;G~z=lRX5{H@yfEAJgym(;Isvt=^d z(>=AS`ODp^`68`FLns_s6PI@}zv?TK~UBn_ns`RR8yB z>xn1oRrg*kJ#*UpB4_TbOX5FIYrj*r@mKO&+-LbV&-}{SW9t(4E88rSS)XTX?fp;M zdhZ#{{o7i8aOfy`N%}gp^`v#aOFW7D+qLn*IQ%P)xWxTUTKoAQt^WRYjp5b!BmVE# z#>?GWd-?7as{dN8{Xe9YZz4za-)bArrTnV3^yF*f`y-nB1>014mulnhN44?n4lTZa zcBXG!-J^}yPo1KMe~s22UZKsOOWJsM z$=lTM5AIOS*J$nWJ6d}_5Lf-zYvuoYt^IDZ^%rSBm0EsJ-mA)cv6kLDwfl$3GS$Cd zlebG-51p#zr$;N_^R)CoT&>E#R4ZSP)}DW&tuJF*`O5nu)+PDht(DKf<(8}2A3vhC zpH`3RewQ}?riE4a2ekU{l}^M<;{TV{em0+Jxtj5RSep+rBoHsren89bTV7(hn&CfG zsG2uv@wI8=c_>@;AJyi!*VU`;Z{4Jtr?v4Qpr!AhT6^+o<*`Rg-|w~d7S`sg_iOTA zt+j_yZ9IIVHh;LZ{C!v(fA(wb`+M4W70*}mf1&0+r?vM-wf5GbttWnbjwv^jGr?mK=(%Ro#m+JqAT6=q`R^I1p z^>xMdYWPeo{MB0f*s0aes9{`2UYX?wEF(7w!RvxQ2l4v>PL$9S6X?0T`TXrRyF**=ALs~%3Yjp+M6;_iF9!{*dbbZCZQ& zwj`35XkT=>Wire6Qf<5&(b~hqT7Q08>%R|Z^WP_%Rr$sT1a0x>)~W9QN*Rsz^3yk` zEF``1KC^X+`5z^g$xMHiHeXL`{eM&&zkcyXHN5X!)qL*-s`=>Es(C@{UnQ5R?myS^ zZ%e)_8(t4+>ksQ4S&<_7E7khnJzD*wY3=JQt^a(`Hr|N5FKPYr3N3%vZk3qLOZ@*@ z>z}si)p&>g46QwULR&9ymrlY<OLd;gx+pP$n5f46Nsm-4<&bH8t!Du2w@AjJRwX!4)FRrP+QT`h|4-s2s-5umI#;@D>rFU=_hg!@o#tk=|KoTcG!5qST<5qxe+%9{ z{xS+3^(g+Yfc=&DScfEA%t6?nhZm(~SP}EJ@c-8PPB*@*DCVbc#5>maq#56+6SGXu zcl^Mt#Is#zxE>1Nozjsr40&haZzbyQ>kp>6uBW`ug3ltpY?@2oj`!hts5Ha%r+>|2 zedsSpGu~Ym`Q2#mVYth*c9!dvt_9?01o25*IuBp=y9Vp+9dA$Ne;)1Yf-qP=1nLDZffBy*p6QziCg& zKY;w+`<0aWw`kAbdmv>#Ed=?ol(`N0`wcdE8zEofyBz-gpG`B~9T)RXq;C-Um1V5t zH-z>boJljjuPN>~!`)tg=l0>gj4aq7Wt|Eso6=T{67Nu zU$x1fL3*#cCFQ?X%m0^9{{DBT+`o?Y_lAWu!u-{5o^E_|LEPVq z@_qKcRQT)BKCbz2nhVXsDF4r(JeLqYo$0$D>z$82bee1OZ8E;5yY`|yKG~BBU!s-A z0Kz?KYu{3D=kWdPv+;iROSSqw1i8UGPIt-o0VI7FYx#Hid~F?(PG3CXk0pFQmoL;A zO1ONj0GwKS{G#=TLpKLqzPEJy>Vj>dcp@092>au4%r(AicNVmCy9;*s6Pv@~NQ*xa ziB%R9(8|n=BTB1>3H0Fl4+DK=cZ)+^laaCtL)Ycgchz0CxIunL9jhka_KCeF( z4>oi~!@ZRrYy z1H1j8_C)c>52y>V6#6Jq%IgoIa#61Vh&1b!$E&%SCj?cjI!?Zp-5ZWX=C^? z&4_Aeq%(+IceNxWDLpN|hJDc>N+S}3_qLuMUo;qtN2Kx+`+VK`QYAG~ily)yj3z>n z&ME}lR#RJ_@F#-4s?K;t0rRfr8-gtfpJc-Bzb=T>#wCE%fET5Y zcH>m~67#Ccb>|0y5GXAsSmh0Q+w0wEeAjmFy#WH%NntD+@n7GO=sF}%emq-`g5eammC3Z$STVugsD%hsBV4@+^a^u#RnNPIQ zWUYh?0+x&ddaHoa9bo6O0;*#NMJTcPyJPOTT{k@svRkVyJ#ya9?h~cm_Qqh z#B5HbmBDyREEE;no&=hP>Rg<1#^BV_9<+t^)Yoin5K%@+7YEQCV-XpU%&L&AC2M+z z(MnV18e@DkLzdKsIxtFX2}hc(1a89M;qQo7xFMq>*wGT*hoI6yD$rZdF^zx)`AAR8 zjn)2GV6Q(GtnZ3OBe6uPsEeC1;sj%8_= z=98*QV|W%nCFo{>P+kPrs{RKDDW)w{N(+KQ2TG=ut3TZeyb zUy_nkAfuxsOF@@dY*e<@K#jJA##oPSd`GaO9(@clT<9<1U?*Ip9r-{`1~xB71Q=T-j+6&A2@HiSU=r(+`CEJk zF_9!CR`{dR|5Fy#e!sc;2(^Z=4)MoZke`&ZcUOH)Lo(rvV0S1Gj2Kyxj3;9a#)6S8 zJB-;+x87EvS#S4;JN%vO%ez`yx?-`QO!#I;!`x$Pq-0t;qHr+Q8=K?WYJ=0q=D3j> zv(OQ@Ilp0?+F8l^fdF(tpVT|GSWIw2uAmf|Pn@ONh=WT9RwLmix;m^|@m zYO$iBlR1`iEn`bUG67Ca0u>yI?L)f?_%V7Q`{59#QbdWN1G;!sPtww8Sjo~x-o(Y4 ze2p|13#3Ya!Y?ky^36wkp;=&Y8Lm4b-7-wpC)~w8U!px0+3Rce2Ykj9ApN^OQIKyrpaZle+I>M;aCx`4 zaETTR8o}Uqr!wr7wgnTrgRw-g2LsgVQO_8?cE#EpM=uOsr7~!5#*kc4)!Hh;WwdSS zY7W*#B8jS2X}boh`v?hE~;Q6II333DnE>+n=jIQ113}Nz8(H(6vMm^JsK52i}!5&mN?6+XC96(rU9xy`OX55#EsX#)R z0mKTX&9RnRV}O9(Sc`Zg;eBn9&IXjY=^0|HyTV}!x;Yw+MS8?X(g`tE23sK-_t?Ct z3KWEIhC9kgS>U5ojbf61EE=P*lHQC3IYLRt2W0iS6$v%vn9E&nEP^{lBZnfJH(B0@ z6d5ek`vKU=eG;;V!3YL(d_{$6GdL!Kl%urE)QoBcQKw(imeZ^iRQh{r1z>uq!0;6F z+iaU-30M547;g7ahe*BDWqp?{IxxD?pDmBL`EK{)=FZ3*);i%r&YGgf{E)QA3d4!!T7?rs}R#3rCtNL;g0?TSX+y1UN*yx?B*}DhaC;j1oWx zsS2aJ#e>pRv5YnTw#W!B>&YFe5hk;m)HRoBr!LM+J7fMwAh{96j>fgYSVUxmvAJQa zhinE*YYlitKiwRY>1m5}AB+#|<|t7N{;)UOlNW2XkkMz+|E+|>b#r%HeW2Ua;IFYJ zvntyh`>bi9pt7bjj!pt!SQy3o2KoctaAbaYAlC>!r;}>dX{XM6iA| zQdEQS#gJPa>1u12p(rlpEUl}nnG#?yL~l)v+D2}TdJ;>NL_FRc!Tlrz<0e8Ps*EJi z>I1SpB-IvIZ*gHKvlC_jX?kMfyn$^YlzS}HV>Cw*WR6Z1?YMCgqYlw_`M?{8&;5m?u%^+GaQWGeKGn#S2R2r zjSMb3(P;xoe>>rD_JRFCbRS;T3Njt!(?YX8o}-z z^w_(Mw@1R_+tjl~)JMYIK~74BzM`u+WUQ|u&A~8y*LPsi*Djj&$X;7AWFR%#E0UBP zV5v8nEcM3l0=*H`9NjFFIl94Q3`w9Bj$v-9!;MIH(764O1O#be>XUuSn7v~OSrQww z0=81KN>ctGh=Kpmn>581(kKO zhm!27u-b|dS|EdgF$ACsdP6b^SM`WP)lFT-6vXJvq@w{~h>+b~yD04ZAwXq?{H^Ft zU>Y&*08Fv6h6=R>J25(lvk?{cv0@=fmua-z>5R9Uv&iN^zz~S3fE2l?%UE!8i_sUO z*xnaAh%i(M2U8{tZM3aZ@IC)8JfDT&Q7doF%gO(5{QM`y37d(-N0-b zMm;n{jE*St5e6-@{rfuu=H%<`O31nh64;u&6`RF?^(>dx(E4KyxHmBp>BZ<2l}3kh zx8-%0QAE3>gp$1j`W;>2L?{~GXXY0sBa94Qk{_7sMOLhCQ;A(YnzojeNj? z*{Z#&0S4S<1>3NyGqh36l2Q^ftt4v|w>PwcU);vTLoMbuvC)KU%!xqims;pBU79lt zG*arx?hNylUH$;d4b@<`U87Xte{*XqCOCUh%P_&)MOlboD!|S!wtKf#AnLUow@8tQ zqtRVs{%E`OD{1|lBt&c8FVUhZj;A0f(YjDuC}5Aj3JVh{XD%#7XACO4p{tDf_d2d; zLb6hmjcL22$`GrpA{L3qQwyGoh_T=x!twUBkT?rYMndho#C@d>3al@{ij56kZ zrD&}?L}|4R5v%HDf<`ph@JBToH%*3((c(oXw@c`57#a3rPN>~3l2mIayK1R@&EHl( zmU6~pL(|Ee#BoCskc9#LqX#Ajx^PR?j!GFpL{LSf+aE&{!b&dXbqgM#S$))aj%fOY z5St6U+e~I(mTg1W1M)WzxH=eWYfl(G&>UCH-pMf#evtPa)+!6_0t*rhGq5-HM!PpE zI<=5QL*=a|wyeyXykzp>W=#iI`(sgCQ-?^gF`GCHSQi+bP3nIKHb=G?Pg3AuHh1Yt zq1Js@b)t&QZ4$|lRV2p3R0>d9xh(CBu4<-*9TtvC*QP+ThiesRbQmO^TXT9MU`7Bl z6=oa-+}?1@ZlUocxu#Pl0%NK-A4b&2H{r=U9$;71ZLc?;zt_h-cq|`v=lgc}0_HMb z-262y#-oq=c%?jx!j29-Vh6L{hoKG+g=4`e9{d|y`i5WZ5tSw%>l7mmvIkvNSD)C_ zjdF{`YU<;VTOUt7R7rW4B%5WlW&ns+HU{0LG3*ZE88Vs`ipkvdNj-B(ZtQtvN7BlJ zv0d0=%b-Xt#uDyo)EW95H#3al38kKQqQfU|8%#H21JOv5i07Sx>10(yzN|4#xmp-0 z9H$7akn#-8+ACmExa})37P6jfq8Tt%o|}**%oa+5Qm&FL9;|$(JV=Si(`m!m zcyuh=(JA**-1_;Ahd+i7iQdUWbub)V%LB5=DzloRDnTAaTRCu$4_8|bR(p^lPUe8c z<1Xd{ALIOq@dU_&Xa4~!iRKe!w`VJs&FGntwoX*K@!;2Z((6H?VDc~y5nzAI2qpWr zmUIuIba1D$t?(enmUY=NO+J)Bmk#g1Lx6}?C+4$Nx4T9ju9`hWYR?`%xsB@J*IfY5 z$xfMkM(p5ojW*%n)2VGac+=Vtk2{zf$~b=_o9W1fY%rkp=%;#0jE5$e{P9R*dni=r z#sBvDBA?gT*hX92WR7g|Y~W_Agp(&z(91*8s!+7}Z&gr{@vsHehJ9Z9DHe0PI%UHV z&%MbU$z_YloJzddp@xpp6+)|=LotR)BuyN6ipgj<1-@Nf39KA2Px|*EX|m5OevCm= z8od}cwP4C=h?uE!D?zNm)sQQrahtDGIReE5xuwl-^LJr@_jhg&V)_qPg#uRYC3J~I zB72O|cJr;>cVoU#ZwU)3wS9f8{>@@4kg>_EH~S;kst__&B%+NcAL>rJl|Jbp+x$44 zz{Dv*0&-c2b?3KW{ed;V6q9#*cXM;n#qF!zj;Mx_j!6flnOQLu1iAAqH#GJRg{MnOn(^o_WXmNc6{#*as|&>?Ies8tMkEUK^PV6gOv=IGRk_pz1!sgoq2>$N|ZT~=r^^X zD_G;P*c;X8bd0K4_O)=Mv}P5SU>n!iyf9j25wcl_%vz;Zh`ZCh$&FsSknO3ZB#KX! z;xVM;`;1OjEKSHC^VI~ZQcrq4JL;?IT4YIsz3}bx+_V!7Sv;huch5w zB1wyGwR(tCaPyF{OwSnOv30i>xvP(xBeYXrE=Gfv6?tm%7XNCC3CTdDG`*0OgiQfs zqhY;m1B(_-v^ja$gH65|ZcdsmjnwYxN+IoYLJMSp%i+?bn?#V~m!e!y!Gz9O&0d5JIIUC@gbMkiONT z*eHz!S}-8v#OiXfWv^A)Mq9gB9U6;0XGw{rt=2BJqpVf+t(A}*r?d@jn|zqEt+kv- zcF)!-GmIf8C?=0Avz+P<>0!^n_5in=Usx1HG1&}qio>iCwW%Ur{KquDMiVKFcXc?) zvbu3Xjvk!AgF7E*eYsKPKDJ^g9)VYg9x@}|Z{~!vt?UC2I#T)B!b5$-SCDS2J z;uc5ENAl|x!$Nj@qV3o(GB$9HW6@@zDdW9ak5+(UH2@y=IYnuP@g{GP3bFbyHs+Gg z#muMSPGOY_!pIOXGiyJKghh<~mY~^oj>t(O(_Ugd7IyL@;#0j626usS6Jq5*zcqP$ z!%2+g&E1hY%ya^OCFU;bmL1j_MjeB!9hsoxUBlUhEv;DM@mkHy5(b z&ekeHj+SlP(jW)s)Npce5W7Ti?mb#-4mmzp*Ra~FahguG-Y%!sb}KCsZB)&eG_!pD z&QHqDjbtORD$x|^mi;g6$Qm~!zP4B>kleg+7nr9&%*}ADwvEFeX1N>dTB}-Ae@dUR z`zw$;KD!b zbsQIqef8G=P+(Q^>o;3d2;Y?`YL?fsoMN_e`kWrowhg~BfYGyCIY#L)6mhuLx&!7< z^S{)xU7V*CTiCl|*gLF~lOayYU89FN$}@K4jYk?zk{pY5la!lzzfq7c4}scZGFIU+ zqs(r20$|-T6&a7!*DDv>LRLAu3#|REE#a=%dIhM>HMPl9U_ThKUST0)pUPUoxbss7 zuhuI8`AKf4x!tMo-WMXEn8)|5p$qM+Gawh?17q*5o4<07k9xbjNz$so!NIrz{L9UYkr=Xa8lPNAt z1>fyocLRh3YjNO~1n`NWQv%i=wU`5sw|-~c8X@VWGaTljTqiM*Y%jb~&ykjr zlnm=J{d%PYY)L8h>u00ZEk){+IV$uA9OvCNY6I5Ep6W)>6J>9}htSU>{+<^o!PERg*CrTjZ9C9bLn>{K^gPcBQC2*v)YOx-BHX6h5BfM2 zmt$euo{$x|MaZfn$4;V9^7x-x`IJaV^;D8mur*SUYH|f05o9|!l|1?6ByNrR56Jdj za$$lutW}Z5S*$h0tsxQJ3;&ghkY}XYjdpURa~Eq-)^AO1Tgd|<;{`F~TAq?ZS>34PR`2S+siIE3(B+h7D;1JCZZWNyMwcxEHrwC5itt>ex ztkfG_PEst7rRHPlY8=qU5z*w4I3tDjb2r)pcLpzM;TsB0Q8?EpLly>{7wL|H7^a{o zVB8g9(BU0ma)Wri@tn=?ZyWgs?3LIY93Ei_*OX6k5V*i3<1fWSKyW-39C+(vk27LMd>OI zaan^?5sp9y9d}KdopOO9U{)tg723oD%jtWh3FLvmm) zD6j;nr}NhkXXUyu*t72YC@aYxJa(7uL%xLJ6oXTVStDM7@ml;&oT^02#j*QHNM7Jx zuawqg`zoq<3vV3IEhb;$C>`7n{yFpBx!u~UAKaQt`5l{M%ThV1XP@rm9V zTWh8?{8g&K@enOmq*e_162sM4A8eN6tg^~lTf8cGEy>9{bY$IlJ#USyJLNkyKqKe! z0e}}chvA!j=1YQ3aXUq9RoC+lC`&DttV$mjE9IF&NIm~O=2l(G`sRcW98amu~@ z%#O?v)|a+Q5gacHae8PV$_x zfjw1wczl?rrG2M=r}F~DnwtyqqYkg4uQ$q6mWqr+W=`SOD6?d(Z;HwR4SBnO--dD$ zqOM$$VcpTxTlG%C)`%Nj&Js}=HI7X~zIqpa!c}+?jv-YT`&j0IcBiPFYAAW%{F#jUc>#j5ku80y>|ZvgU~r1k^rG;UvUNwORsbR>>O=Bs^}uO1G*9 zI0c2LRJ%ciGc_e~Jok~WYd9rA?ggA=SvkQ0`l#91p5HbC#3DJ?{fs=Ov6531m(TV% zrDctzSOMMnMo8l$KTg3IR@T8+oz zEZ8Lxzu z$AasPmjT_y_}&A~T^dJ#)-g3XWx+}V&P0X34K25gX_*ovwmtAoy zDYxMWuDnWUjf7aW+uxJLS;|!*ZQ~|ZrQKLPATslQ#EmP<)uZrtmfeG z`vCAFJtQAe${Ie= zZsfm}qEh*ItM;1;;?KGxz^ge%Fb*vZ_V?PYS}t@t7`|iIHUzZZNiH~nA8E=IRk~%X z(N*vrcUu}#1zzmqE6cX`^HZ+cmobvBe;O5s5>9n)dl+mQr@p3Plk0YR5!!lX+U{@l z4@)@sDk!pEw#G9;?Q0~-{TLk7Fy6@C8p6jKV{7{In7(Sac&l|ksPbf4Cf@^gNRHK( z9loF9)Tmc~WyMyv4l>|sDIbn_NyCnJFl&;K%f~`1j#s@#g@L5jX4x63o2{0KA|TRrsXf08|8g6hjgefrrDCS%E30- zSkjX(2RaB&y4hsf?I6zHVx;8r&36s!w{6+sAkgk?lc`xtD$PqTic(X$yaJ*%ObkCM zN1JHH(pZw=A)QRSTEdeHP6xqBH=A6$9rB%esZ*2dE-*f+%@21w2vDh6+ZC~^2WV`OMEZp}IA}tA-f;a~ck5TlGU#4wG=fCOJ}Jd~r5teP7!8CWw_E zt1qpRSB3AOB%4}6DULJa*R}o~VQgLQBJIeBQ@KjBOn#onEO+&$&wTWHqQq;B+2M;G z4y9@dq;KT|Ar9W@t=Rm1teVvK)=;`uvvwD6m7luWg15*6skNt(CoMLm72#nJ+7<@k zofy{LT0&$EU!N%PRhnO4tnnt7AywTCvJ8>T%T1IMi;<-s1m8F4bDXqM*9HDWB_a^EOr`ECT>G%m#1F8M%(^J{(xviiT$((TSp ztv>k*a#G{?Dzeq|io0S0detu9k=f@5oVc$^5WS6~Ql#-+xPA}6uWS6)(F zar+C(ToHCl+GMBB){tzqD(&b5MWcV{ag({xr~I?VD;@@=VqF5#=Pl8j2;0JMKW+NmAs{l~HDlV8(VUByyvsUeaZrO$hI z4;hOB%#%t=Uo@AR$W*Joba9(UYC51Pct|q2TjYVHNj_L>hXnDa1w=ltKKQM>z9r0 zriiz?LeZAGn)>Z-9Gb?kzVvDQUGn zskUAdKuf?^pq_V41+1DfCcRY6%WB&;)l1Wt^~&>>FOz{xOS0xRAEByoq;@0z7o(6x zSdn~LCMpMrjnCHNw!{{(eHX-vR}LCleaPJO#=TD{g6BWDmGj6WEmRe5-T8r3v;e#| zXbxSfH;w`gyY(+ti)Y;~>3Ju`A=5N`z+P*E?wc3)u^KNsvS@dox z!+@D<(Xv{^$td)gks!lYJP~NQNNZm!!QMpziW8yn%zb3&=B9AC7`W-W`8*K+t@wO@aF{+(#o7t4l!CyM6? zd&+aTyIjwe#^-W(xlVn~I8NjtmBRm-sC#Dm-T5VVlr>53& zRa32+EI(`WH1?OZeK@&Td0kIz&Z%~i`tRDMQe&lWEs4+NZichh(n+rW@BX=rxPNfF z%awtIWkY3#LT9*KX|7w|Zl+Yy_Ai_JRTu7iiOY4G>)kf@|F*fC;ZIezB{8Mc1 zC%c|uc!}{R58Q|%+~9gY!+iRI8g%ZHU3b%*xacmJU*x)v@eiJVhU*mwH$)DT9*=21 zP97mA$fM*GIZe)zbL0ZKNFF1X$QAN9=_)egO(WCE3^J3?8Zh0dhY%NDh(1#4Chs-7ONDo;;mXYOTHCan~$p*5KY$5|> zI~gStWDnU(_L2SMesYi;B8SNXxA9t`hbWGM&sIGs%r) zHknK2k@=*DEFsIuO0t@)CB0+=*+}joo5%neCZlA6>>+!}KC++OPY#kpef3$fM*WIYrKpv*a8(PcD$h$R%=_Tp^E>u2R+)nNDVqnPe83P3DlfWIpL3 zOUN>^lB_0c$p*5K+(R~z0kWNpk_obh>?8Zh0dkNWB8SNla+Dk+50Zz-!{j)5gq$EJ z$tiN0oF(VTd2)eVB#)6xeNT$uaUEd5Anrj*}DQQF4-;B4@~1a*muQ z7s+Gf61hwsCtYu1{g7#7I+;OcksHZuGKb6~^GOd`MwXM6WHsp}8^}g-4;dia$uOB9 zd&pk0kL)M+lY`_CIZTd_qvRNQh&)V=lSjy-x8`D6)MMwXM6WG(3>8^}g-4;dia$uJoud&pk0kL)K0 z$o=FHIZPfPN61lfj66slCdbJmm8FH4KCl|;?@))^9E|bSe*X67yGM&sI zGs!G+Bbh_yl6ho4=^;zVahAt%VA4V zIb<%GPkP7_vW%=GtI1l@OE!>=SC zLza+bWI0($){^lB_0c$p*5K+(QP) zb}~#R$R4tn>?8Zh0dhY%L=KZ9u2l2ha~IYZ8pbL2d^KrWKU z$R%=_JWjf{uztuiGM&sIv&fBPHknK2k@=*DEFsIuO0t@)CB0+=*+@2#0WwTR$pqO$ z_L2SM0J)zWB8SNla+Dk+50Qt-aq9wTAT!AqljIaRP0o;AqA&-+AwsHI;)5%OSi`+=nGC}r`y<{KRPY#fS{PF9lDWG(3>8_7LnfNUqjWR&b7 zd&xespBy0flY`_id4L=t$H;@^A@VSJgq$Fcl9S{#IYZ8pbL0ZKNG_4fhAt%U5a*CWLXUJJ{j+`eK$z$Xaxk4T%U2kE(BGbtXGK<_u zW|KK&E}2K>lOD2!EF&w)YO?8Zh0dhY%NDh++$Psds z93u~rhskmB2suGcl2ha~IYZ8pbL0ZKNFF1X$YpYcJWjf{v);%wGK0({v&fBPHkm`_ zl6ho4=^;zVayjFBNxa;@))^9E|V+dane=G`XkfGbTWg?B(unDGKb6~^GOd` zLY9%`WHnh!Hjs_v9ef3$fM*W zIYmyBv*a8(PcD#)DtMDM5d7$WG0zKZX~nGTr!W$Cp}~dSw>cp)nqN{B^$^_at|3G+sQB) zB@<*X*+=%11LPn%L=KY&$Wd~PJV+iQ50m5M1bLL4B&Wz}a+aJU7sy5O7`a3)lPjca z7wd^kBQwZMGK<_uW|KK&E}2hy$P%)gtR$<+TGC54kd0&$86exqFc~F#$X>FK>?a4v z{p27yL=KZ9GHDP$aFG; z%p|kOjbsj)OXiUtvV<%nE6HlImh_U1T75IIa9AV;AqCXbV@D_JjO8ks?6l38RnnM3B0`J{&| zA$#HUmJW5WI zQ{)UeOU{uCDTqak@8^}g-4;dia$uJou z6J!tBOZJidlhMa)LZcPLfmPG&xJok@MsNxkxUN%j61q zoOCs?Uyx8`J{&|Bg@H3vYPag4P+y^hioF-$uOB9d&pk0kL)K0 z$o=FXIYbVV2gng}j66slA`g?}yjFBj?Ela*n2eGMvWM&?`^bKBfZR_Gl0)P$IYN$-W8^{d zFgZ?6kVnZ$a*CWLXUREofm|eyk;~)?d7N}z&HhBDk?CXxnMr1m8_8@khs-7O$b8a6 zmXKv+Iax_oleJ_6*+}joo5%p!PKL=S*+ce{ePlm5K<+08$zk#UIZBR^hseX^IC+Ge zAdixhAHsFBbiQSkeOr_xsl8!bI4pWpY)I=WI0($R+C<`fovq3 z$N$V#%B^pXu^BiTd-$aXSHCdeML zkL)K0$U$<5941G|QF4qtL>?t4$!T(ioF(VU1#*#GBA3Y((sdojF*1$JAT!AOs0_;WG0zKW|KK&E}2hy$P%)gtR$;RFWEpgl1*fQY$u~+ zg6tvt$bNEw93+RxVRD2#OpcQiI~gStWDnU# z_LBqTAUR5ok%!2m{*&oR)GMmgHbIE+tLza-`WCPhq zHjx3cos5zRvWM&=`^f=vkQ^n)$b;k|a-2LuPLM~*DRP>eA!o^Xa)DeVkCDsd3VED# zwXh$P>0}0(N#>Itvh4pM?S8-;t;#gupCp}`LOT#KLcxk*2oRuRfXG(pVjN;9GZiaV zwqjQURt!)vVr457j99Tk#Rvf_RE&^iD-_INfev(-s9m>WbpuB1V#SCRGSjpZu#igA z&UZh}-Od9u@_pC!?RAa6JNw3<3-4Xve4>ZWzn zL%pm?qDeYJM`?n|i31*3$;+r;W6UHq%zxMuW7S zcF-{Gq+K*d`)QmG(-E4YV|1LRX@*YHES;wd)P%Thw3<3-Ep<{i^-wQupnlp&n`tX; zqwTbVhG`d#&>q@HW3->f=^!1V!*rCU=r~Q&44tA2v_vZ|Yz2Wmb$2$)=@95 zrw!Ck8)*}5p{=xyw$l*ppq;dfcGDi(OZ#Yy4$>i-q{DQSrsx{}(k|LfduT81 zqft6UlXQem&@`Q-Q#4C+G*1h3ftIMbg2yqfrgmCG>!^p;(+29JjkJvhX*&(kFzuvW zw43(OUfM^aw4V;pI8D$Ynxw;Ygl6a@&C+R_qXk;g!F8f`>Yz2$NnO-UJ=9C}ozz9$)I+_rp8BYtHqvI=LR)E&w$l&|(@xq&BeaM1(kPA5ej29< zI!Kdrn2yjC9i!tkO*3?oX6ZD|(E^>P3)Ec6^{188P93y{I;o4gsfT)LJ@rvPZKTb# zg|^ZlZKoj`rk%8lMraT1rBNEA{WMM!bdVZC5}rXK30_0&iGw2?N`7TQXKw4H`%n0C@G8lgS3mquxf_R}~`&_SA{ z!*qnE=olTRX_}#vG)t#xjuz-VU7#k+^{188P93y{I;o4gsfT)LJ@rvPZKTb#g|^Zl zZKoj`rk%8lMraT1rBNEA{WMM!bdVZC5}rXK30_0&iGw2?N`7TQXKw4H`%n0C@G8lgS3mquxf_R}~`&_SA{!*qnE z=olTRX_}#vG)t#xjuz-VU7+SFu0O4$cIu!t)Ja{`O+D00>#2|WX(Mf>Ewq&eX*&(k zFzuvWG(vl5FOAX|?Wb{?po26?hv^7S(J?ws(=j#KTz^_g?bJbQsFS*=n|i31)>9w#(?;4%TWBi{(sml6VcJQ% zXoU9AUK*t_+E3#&K?iA)4$~2uqGNQNrfG&w(kz{(Ia;9ebb*>LbNy*0wNnSJp-$?e zZt9_4T2Fn{PaA17ZK16+NZVZTs* zrS;TD{j`xb(-zuFgS4H7Xqa}=E*hadw3kL{jP}zwP0&G_q{DQCrsxnd(qTG6Q*?}u(=^S{Nt&h8G)D_`o-R=H6|O(6q;~3{HPlI6)J;9qOY5nR z`e`F=rY*FU25CDD(J<|#T{J>_XfKV@811KVnxKO;Nr&kOP0=wrPSZ3)Cux>W(;O|( zdAdN&HC%sMN$u1@Yp9dDsGEAIm)27s_0vY$Oj~Fx4bpZRqG8%eyJ&>=&|Vs)G1^b# zG(iVxk`B`mnxbQLoTh1pPSPx$ra4-m^K^llYq|cklG>?*)=(#PQ8)EaFRiCO>Zgsg znYPeY8l>$sM8mX`cF_p!p}jOpW3->fX@U;YBps$BG)2egI8D?wB=jj49 z-CTcKN$u1@Yp9dDsGEAIm)27s_0vY$Oj~Fx4bpZRqG8%eyJ&>=&|Vs)G1^b#G(iVx zk`B`mnxbQLoTh1pPSPx$ra4-m^K^ll>$v{3lG>?*)=(#PQ8)EaFRiCO>ZgsgnYPeY z8l>$sM8mX`cF_p!p}jOpW3->fX@U;YBps$BG)2egI8D?wB=jj49*K_@8 zCACurt)WipqHgM;URqCm)K42}Gi{-*G)UWNh=yq=?V=IdLwjkI#%Mo{(*zx)NjgkN zXo`-}ahj$XI!Uv1n&xPM&eH{IzRLBdmDElhw1zsVi@K?YdTBlNQ9o^@&9sHK(jaZ8 zAsVKgw2MY)jP}zwP0=wrPA6!N=IJ~wQ4=YzPbIZe2d$+p>ZTrAPaCM8HqmCZKAETjfQ9k?W0i|qy2Q4 zj?fexrxP?or)ZYuXo1et60Nv_eNsEEp|#XWUDQYYw2?N^7TQYNXpn|z2MyCs+D#+0 zhxXD09i&4vNk`}?P0=wrLDMutCux>W(;UsydAdML)O?-mNvo-yI%q9*QWteo5B1V| z+CcrZkv7q0+DhAKkhaqZ?V-K2kH%;}9iVYKNQY>W4%1PZqGNQNrfG&w(kVJkb2Lv2 zbb*$r=_#*!1+Aub>Yz2$NnO-U>!_F3(+29JZ8S*RX^4htC+(u$w1@W6J{qO{bb!Wb zf)3Fn9i}5RMaSqkouC;yNvCL*=4hT4=sYb^^9`;$t)zD9pf$9Xx~QAhQ4g)B4b(^d zw23y;7TQWXX&3FL5!y@pXq3k20FBcG9i&M*Oh@P_9i!uPf~M&touXMfP4l!s=jj49 zH*(!+C9S3oT0?88le%dg^-wQupg!uSjkKAz&{o<;+i8e)&@kcI!aS?oKDa*&Cmj!rzL8>$#ti8>Yz^QqIJ|my|jV)sGl~`X4*pAXppwk z4jQJNw3|j~5ACB-8lwX=P7`#9Ch0I8r71c_Cuo{x=oHP;X_}`6I!{Z~^m4svHMLU* zt)))tqIJ|my|jV)sGl~`X4*pAXppwk4jQJNw3|j~5ACB-8lwX=P7`#9Ch0I8r71c_ zCuo{x=oHP;X_}`6I!{Z~e2eQ(tErtjXf1V87pZJ|TNBy*kHq#c`MuW7ScF-{G zq}?<^duShx(ik0}ahjk*G)af)C{58ZIziJkL#Jq#PSZRs(0N*-<|eK`t)_PBptaOV zU9^sRsFyZSANA8F+Duz$8x7KS+Cjs#lXlYx?V)`%N@H|@#%Y2M(Ig$Fqclaw=mbsE z44tA`I!*JmK<8qp8m9?5M3Z!wj?xqzqZ2euGjxh(=`_vL0-dKNYWleTw3^zfgVs_fbqp8m9?5M3Z!wj?xqzqZ2euGjxh( z=`_vL0-dKNYQD|&r`6O>9kiA@sf*T85B1Up>Z5+zM4M>~ZKFZjPCICrcG7Mdp*^&Z zMrn)=&^S%dA)2Jabd;v(7@eSLnxRuPOQ&g`7U(=JQF9B|pH@>lbY-lRKz-Ctn`kp_p=~rs+i3?4(@xq=BeaM1(I}14 z0UDhle%af^-wQupg!uS zO|+S|&^8*R?X-i2X(#Qb5!yrhXq3k20FBcG9imA(Oh;*oj?oF4rWra#vviu~X@Snu z5;foD`qOG^rw&?6ozz9^sE2xK1NBiqZKBPzg|^WkZKoYHOgm{ejnE$2N24@G2WXro z=nzfPVLD1vbc{~WG|kW{nx)e;PYZOOmZ-Uv>rbnxojPbOby63tqaNy|4b(^dw23y; z7TQLGw4HX)Fzuw>G(vl5AC1x&9iVZVphGlChv_Iy(J?wf(=ZC4OM?KU_8>o-^X%lUxEwqgW zX*=zpVcJQ%X@vIBJ{qMlIzZzzL5FCP4%1PZqGNP|rfG&w(JY;&d0L?Jv_#GATz^_k z?bJbQsgt^B9raKzZJ<8tr%kk(w$L^jr0ukWhG{45rV-jh`)HKL=m3q=1RbJDI!s4t zijL6(-JjzaQ$gDwNnSJrB3Rib<{(>w1N7lpEl8E+CtlCkhaqf z8m67Jn?`64?W0i|qXRTf6Lg3s=`bCoDLO_cXqsl|6wT6Unx_RiPfOH%kLypXshv7# zEp<{Ct)m|5r47_a{j`ZT(-zuBgS4G?&@kyKEd0L{TpX*Pnshv7#Ep<{Ct)m|5r47_a{j`ZT(-zuBgS4G?&@kyKEd0L|8`&@roP3_b{YpIjE zXdU%XFKwVc>ZeV#nYPe28l>&CgNA7*?WPghL;Gly#^?Zz(*zx&NjgkNX^M`~37V!E zIz_W|n&xSN&eIY#cXIt{HMLU*t)))tqIJ|my|jV)sGl~`X4*pAXppwk4jQJNw3|j~ z5ACB-8lwX=P7`#9Ch0I8r71c_Cuo{x=oHP;X_}`6I!{Z~+{N{$)znTMw3a%li`G#O z_0k6Fqkh^%n`sMeqe0qEJ7}18(ry}|J+zNTX^algI8D$Ynxw;Yl&0tyouFx&p;I(V zr)i!R=sYb^Gr;wy)znTMw3a%li`G#O_0k6Fqkh^%n`sMeqe0qEJ7}18(ry}|J+zNT zX^algI8D$Ynxw;Yl&0tyouFx&p;I(Vr)i!R=sYb^^8>Cwt)_PBptaOVU9^sRsFyZS zANA8F+Duz$8x7KS+Cjs#lXlYx?V)`%N@H|@#%Y2M(Ig$Fqclaw=mbsE44tA`I!*Jm zK<8qp z8m9?5M3Z!wj?xqzqZ2euGjxh(=`_vL0-dKNYJSM|r`6O>9kiA@sf*T85B1Up>Z5+z zM4M>~ZKFZjPCICrcG7Mdp*^&ZMrn)=&^S%dA)2Jabd;v(7@eSLnxRuPOQ&g`7U(=J zQG2|+{tjA8UDQoIw4OFlANA8F+Duz%C+(sU+CzJ3l*VX3jnf1jq)9qVM`(&>=p>z@ z3v~2H<@rbMDeJ$tY-OVC1daY@`EkpAWo!Pc>=ca;mLIqOxUBpBvU!?(p!~RVsBHa% zWlJ>mQ2BAsPs%nX%T{kIo2Idc%a2=sTDEq$Y?dY-DL)Q9TDI=7vIROkQhwaEy==qd zWz7?1$7t_p`Ek>Zvi2v-W@!J;^5eEtS?ABnPSe4kmmhchqO51E>^vR$@ABjBUzYX# zPuYs`vg5SxSLMgePnC82y6hw!m?%FE{-&(!w`Fs5=zq(P!|Af#r^_zT(Pzq!Bfl%_ z&y=nFec1^beYX5Ke(XkLt+SbgxYK5m;(u99)VtBV1CGe!%{CL4$8by@-)l1qbX@$I z%}j_xHj@^=WHT8$B|d92S@98@X^@`-r{(c;Hj@({wVAwlpUo7+)2(J+OxjFIbX$yZ z!JpeqrF{R9##D=^7-JXx#yG@AV`{`t8B;5M!x*P{AHH487~>Y7Gp0_QGR7nBGNxXv zv>2awxW)Lzf3=uKaf`(?i66C?X7K`xX%U~XnO1SaX4=FbT1-&9-)7pyyv2mX$84rU z{E5wk#cHeR6gOK_VU7%{DVE-i7tWK4C2wGmL#g?7}`FHr!j*V1JOuW3(6hggkD-J|Wt% zKZqIHk9|TOw_%?Uo!B45X*!5~LLPTupAbFRAH;b&f_*|BcVnLreb^_&3hWQ!IPJqe zA&;A}Pl%4;vXgWG`-FTvh=R-o z_6Ko-MzK%G;}+}_Vh#2Oaf-&VPsro;UzBxYe~`y{n#4XKk2|qXi1pYf#1c(mpOD8r z*eAqB>=R-&_6IRdW7sF;aVz!-u@?J-n57Bq6Y@BOeL}3m{va0UF!l*~+=YEYY{--~ z*dOHa812PAA&*Cn+h`8A88a$Y7?Tp$;p@cp=tx zz7su(wOA+dT{up~qj8*w@5a6$It#q-g>_!0CkHlu&> zqv&6}0R4*}L;qq6`WHWr{>2N?{|WHl(7)J<{>4wAfAJ#pFMbmJi*4v%{CD&(ej5FY zLG&+PjQ+(-(7*T@^e?uffAO>EU%V9ki=RXPVhH_-$DOk6#a|eMgQWh=wG}I{fja5?}oRd zfAI;tE)efP|KhJ~#x8yj{fqtRU;IA$7w<&>;$7%p{9l`Kiv#Fi`~mtG??(UP(>7Bt z{t*3(ar7_#2>pxqpnvfxn`sj7MgL*~{fqyJ^I!Zg^e+zL{1<3NJzqk|qiz)Ok{tW$#KS%%KFVMd@hW^F>M*re3(ZBdV=wBR1|KhLE zzxZps4iuk4|0|Etzc_*Z#owTR@wez-{9p8cB21%y@oAj@;xp)9{2k7J@%QLod=~wS ze?b4@B>ETs5B-aO#Nj0V3H^)Dqkr+w=wHm@{1^X%{>AM!;}c&%|KeZKzc`KaU;G>T z7hgpG;!EgX%%OjAH~JT6(7%{R|KcqA7w6EwxCi}o(aWW<9kW>P%ZVy46@i^+=B7Bekww3wWDh{fc^LoKEt+AU^Y ze4E8Ah=*BBNqoD-n3JHxVk*RUSWKmOgvC^gH5OwRkF*$vc$CG|i0`zRTCvt*oZ`DI z#w8wYF>dkQ7E>oWEyg1rV=-QFlf~4F$68E-=)!e^cpR=1#N%%Cx|EE zIzc?yVp_#Ii)j(7$*t`WJs{Gvi_-`WMeb|Ke8kFE*in@x$m}JRkjwA3^_OGx`@l zhW^DC^e=uK{fif(fAQbYzu1cYPllgB|KdgHU;HHc7u(Rk`0wan{1o~ZKaKvyAo>?C zM*rd^=wJK{`WM^LzxY}7FJ6lN#m}LCF@*lb%h11gIruO~`WHWs{>3ZNzxW08 zFNV>-_(k+DUWNX}FQI?26a9-{M*rf~=wJK~^e=XyfAK5mU%UqWi`Sxmu^au1*P(y$ zdh{=T75$45^e=u5{fjrCfAQ<+U+h8u;y2L0cq953zlr|EUi2@13;l~X;rthGM*m_T z`WL^A{>59+zxXGcnG(N){>9(pK7bfS|KfMizj!P97jHxVVhsI@x1)dY4)k9KzlZ+C ze)KPXAN`AWqJQx&^e+yefAI(CU%VUri*fWX{s{ex_n?3AUi2>}(7*UNp8v)FME~M_ z=wJLV^e+ygfAPoYU%Vgviw~fGaR~j352AnZA@nc)1pSLi^e=8h|Kh{wU;HWh7w@r| zE^!$Bi;tjx@lo_IK8F6q5%e!^NB`pE=wEyS{fndMU)+KI#V66fxD)-0DfBP?4E>8g zNB`n4(7!l_{>A@B|KcyvzxY4sUmQpO;;+!Z_!RmVe~tdd37r4pZ_vN^Tb%#m|Dt~} zjsC@_(ZBc%`WJtP{>2RX7k`ib-vggT|KcCezc`8h#s5S9;&bR<{3H4or_jInC-g5q zkN(9!qkl1r{>8tbfAIzMFa8zhzc`Kl#lN9{@kR76zJ&h89Qqe`p?`5V`WI)=zc_~b zMB*&^7w6EwxCi}<1@tezjQ+(}(7*V1od4oH`WIhC|Ke-tUo4`3aRL2{|3LrZ>*!xx zME_z5{fkTJUtC82;$HMG8mk!+Emo5htyVKE+HfCGtgxCF+L#-w!+N~xpzRhY1;$gTCD8Ail7DR{D zl*D&fjX4D#ZZ#F+5mr+v)>uuoc%;?X#ZjAah(}pXjrdNhsTFIj#wotbYFy&cR^t}m zZ8deG(`r28F;?RhH(5=+c&yblh%T$~iBH;$Up&rg8pY$Srb#@(YMMp2)wGBwT1~5X zlGU_{CtFQWth1VS@jX@(5>K(34)MKK6Ba$VPbi*>`-I|YxKAj0ai36pAMO*1XW%}e z_%FCmDAwctt@wW2FBH$Tntt&ts~Hd*tR^o0tJNgLv#n-O`~dnFedu5OAo>?Sg#N`8 zuK&dV`WMed|6(Kh7tcfg;uiETZbkoM6Z#iFY&BEj`RHH#2>KVB(ZBdn^e6`> zf3XGqiyue-;)Uq{z3|`AzxWCCFJ6TH#ZRJtvCV26;=iMR@l)tu45EMWV)QRwg8s$N zpntI){fnPP|Kg?SU;G^U7cWEq;^kK37q3A7Vh8#cKac*!E78CB1@te5(Z6^V`WL^1 z{>4u8FMb*Qi&vw6@juYN*oFSZub_YN8uTw-i~hxK^eAIjzxY-3FGkS6_%-w| z-hlqaucLpl2mOoRK>y;6=wJLM`WJiAzxXYj|Kd&PU%VOpi+$){{5JX*Z$ba!chJ8W zMgQV=(Z6^r`WJ6Q|6&aNi?^eH@ecGaeh>YN{per(KKd8$ME@Rm7y1_m(7*Tt^e^6x z{>2}ne=&~!#UG)6@gDRq-i!Xl1o{{M6a9<#p?@)BGhT5J{fj?F|Kk1VUwi=li$myN zd=UMM521hYC+J^HqJME4`WGKY|Kd;4zc`Hk#YfP;_$c}pA4C7*2>KVdqkr+2HWLva zNB`mz=wBQ~|KbkxFFuL>#hvJ1Ord}AXXs!2IrA@6|Kd3M z7k`ER#i!7}_-phpPN0ABH|SsdE&3P#7yXNA^e;Y*{>5j|zxX@!FJ{oc_b#XR~KXVJenhyKMq=wB?LfAMAXFTR5Q#lNF} zaUT7PucCkPHS{kQ(Z9HW{>6Xb{1;zG|KcM07fa}0TtffiGWr+yqJPog{UXtV_wz(6 z-Zv6$xc@6w;C&-;9o{z**W-O7af8i_h?Us?#e-}nB_53T1H~#_|B2OjA5h$g_W{L2 z@IIh;sLf1@Z^QGyco^Of6yJ{5|Kh{A|06o^KA`vxyiX@kpC7 zr@^Carb2ut?*ECkHd8IW3-|xTqtU~^@x3Nu$ey5 zhx826Uu;7E;)l_{cs}|UKZ5?n zX7n$96#a`ApnvgW=wEC>|Ki8dzjz_~7yk|Yi>>Hi`~>9IrfAN3Nzu1oc#ck+c{4DwxFGc_2=g_|xLjU4r=wG}X z{fk$ie-ZDm`ozzpfALE6FMa|2i(&LHei8kPSD}Bg6a9-{M*m_0{fk$lfAK%izu1NT z#jl`$@f!3mUW@+4ZuBo+hyKOu(ZBdr^e;xxzxXxuFW!Lu#jm4(u?PK&-$4K3jp$$e zCi)k9(ZBdD^e^6o{>47@FMb>Si?^VE@jK{WjG}+>yXarM75$61p?@)k{>9tTzjz1w z7r%%8#eVcJejojdccOoB0R4+UK>y<1=-&%}i2lVm`hUb)`16w4S&4hr71{Uvts?uw zza5hOPPVe}OcV2kxslFGwOQU~V&7CTDsNAk{o60RJH>vDbxj|*G%5^Hc zRIXLop>nm#6)Kk&_lqYVUQ5%1M<6RgSCN zuX0r7UX>#%cc~m!Iizw>%0}gdqFR5Ib1G+5 zo>V!l^0>+=l}A)gsywK2T;+b1qbm2R98tMT<*>>jm4hm`s@$w{qsl&&>s9us>{i*S za*fJ%l`B;?DlfdI)?ekE%2|~sRZgosu5wD{5tWlF52_qjxnJd|%DpN_RPItata3=@ zpvtW(H>=#JvQOoDl|3rERd%Xeqq1G)N|lYu3$Lp6S2?G0R^>^R(<+avoKks2<)q4k zD#umsS2?P3ugVc*{d%QSeXc|0c9q*yZc({OWxvV|DtlF~Q`x0*t;!CSt5vR0xir6T z-RD)#t30jpl*$>ECsZC&c~s?Lm4{SLs63!@Oyxe6dsOaLxl`p1mD^QrQ@KUuCYAjv zH>m7YxlU!5%C#yxRIXOJLgmum)%vTPS9x0HDU~xSPpCYm@~FzgDi5igPha;M52Dz~fLrgDqQO)C3UZa~)Om`8oit+G?)8kOxTSE_7OUU+3+2L+XLDrZ%m zR5`8kxXLM&M^sL#Jg9P9<$jf;D)*`!QMpUyu*xBogDSVG+^lk=%089rRraXtR@td? zjmmbFD^)fsFTDK5j`MiDdJQqePN+Pfa!lnum3vg~R=HE< z4wc(gZd18MtDIMPTIDH~Gb&H0Jf`xf z%EKxTshm)GK;@XqeJb~;+^uq_${i}VtK6n?i^@$Z`&DjG*{gD$$}W{_Rd%Rct#SqP zqquKX+FM?K$@41bRi0LPO682o6Dp6XJgV}r%0ntAR31<{rgERkJ;{*qVk`Ac5C=P!Bnp1@HkDgcZc^E=a)ZiVmFrY?sa&hFL*;6fD^xD=-t)@(tDIMPTIDH~ zGb&H0Jf`xf%EKxTshm)GK;@XqeJb}L>*Id)p1;gt^`5`v)qDPuSMT{tUcKipdG(&Z zWVz?Be{O^NonDpeRCcLctFlAoYLzQgF7dvz{O);`^D0lPJf(6*rUcKKhdG&t3 zq{@RT$5rlEIjVB6 z$`O^jR1T{gQaPw{tIEwPH>&JYxn5T`7}yHu`K z*`ac^$`vY?c+XkZeO~3f%F`-Oshm-HLgg`)M^zqHc}V4i$^$CLRPIx`N9AsnJ5}yb zxn1Qpm0MJ9QrWL^gUVi&>r{5BT&uD}vDbxj|*G%5^HcRIXLop>nm#6)Kl_-+5*IRnDtC zt@4!08D#zZG_F3EQh7w>q{@RT$5rlEIjVB6$`O^jR1T{gQaPw{tIEwPH>&JYxn5`~dRvQy<6mF+55s%%tVQ1_n;D(6(rsywN3 zTIF$-Q!0n`4VmRwLdr*c;1NtM&ctH+Ie|LVPd$*cGJC9mG=m%MVXAOEyM_>p&< z|Bj~X{p(-7;j`WT>i4+=dyBE%hi!ES{?hukt$%AhDX>)Z&)Bzo=#{R|{OlF)vDf$y zyW-Bxj-3_9nq$8dtlD}~AlrKI*1xt^o^A^swDm8oJ91&)Qqldy3)X7W@X;z$c_{w9 zfcQoVYKZNfw`1ucgy2_k}$Lks^AL;V1 z+iDHrYqnM3du`kL&#l}MI={+Xg|8h`zgNZ^KG*DQ!N-RV@bLzIJiXwy+Cr9 z%c?#0U`;^2{}&Er+AS9^nQ;c}J0?C-9?$-tzhLF_L;QT@BkQ)VJMUWmSFke6 z#T#ekcj50t?>xy;ZL+U!Fuu$779SZkJHH;PK}wFM@V{Tc=gsANi&wxD*7I{P0zV@k z$LBu{eP7*M{3OKpg;IYXHx|#{;>T7V_v0(h-|_WOE8HUDI|Fdv*L|S;efSM$;^UL> z4A>7(6Y=$@$j9MHP#Qg+AKwJ!=N&B{$JZYPE#4vG@vTt4{uWq+kKZKX@r|$*-T>w2TrZFD^RJP|_`0j*F}!NDerw^8 zYyF1?4i6mq>u)F5J@}D=IpjABb=i$CyP$z?Sdslk?oe}b;FK-(fm51J4P4`|da&DX zy?v?JFlT;xaR1)og){2{#_dd3-SxX|%SHd5b%Em=5B`3azv|9|?p`jQl-qQkHQ)(+ z#lKX%V3!+vhkf?l*d2}v)dVc39TnV|EbMqE9#`F#ed(yJN1gZA*7EUteBK=>yi^f5 zBp~1OTHYBrHDEc>nqDrZXI_74saV1ebdBE;vW4yn^`C1AI0MVY>OAJX%Q!PHVcxrz zi;vHAv{jkM|GvQ-|KTe0-1{mG{z)LyjdMGTKS6x_?By!+o?V4=zMgzn;E2G%1K$d) z$8Pt^jOCaJe&XI@D8D6e7Jjq2{ln_z`DSpjq)3!_ze|UHJlImV^)5{^SR)*WAT%Z#ZUfB$hq>9-2vHKmvQXxExtap z2S2X|Kd=6gjrhLkOhw>B+YZ7$*&1>mWedH1d--=e@jdnUp2DcCSRub$bkCTbM=h1t zp~CsCWOblad~T2Dymj}lf8Zp7w z*s)J}9(m7f<;~GqbLY2`(OJv=-<>s&e|Oe$lRaQL#(@>RWahECU)}lG+*9{|EPv$5 zkInu1@o&$X2OgUnzvSLw zcF#UGm#V@VUGy`d^Pe}%izIHA77|Ax!ZqoVDWbB z3^N1TBCXLzwctPA+? z%z1j#x03yv4-Itt4-ZtNkHn~t-;Mv5sq`sZHojba$e$O6Q`h((4B1aB`M(s{@YHV> zYEsxss(-Uko7(tt4K%4+avRL40T=cVIi9Y;3f1O&v0I!ndva)fVEy;qfem;55$CZ7 zKika6ez3Yf9EAO$2Is`y;_Z7Eupey1S+P`{+bzdJc|RE6CHukruKkaRirve_YxWrD z)Jx07tM@DyAKuk9R%K4X{9LV7=KQ4%<|wSU><5WG8_X%MSDCk(^U$vUzjMB4M&>-6 z-#6!XVmFZa+_?L|bAG3qbElf~`73k2<^XfP3HzSmoJ(`d#rj=6zo;^2HdYzWzg3xw zN*m0Xn5)b=IfwiYIqw%fQW5y`g8ACUz;^=+{=&Aix3(_WJpG%GOfMC0nH%4J*4AJM zAF=mtJ_jFhV;w&5^k+h!{`?X1rs~Y6y^ZGuq@R12Td}t+e8=LH|8)k=!@2d!?hV)> zK9K%;sN&?ar_9Nx|H-}8xbDX;i=P*{HuS;3CqI9eWy{XR(5s=3Ex4L3-ik9qA>W6N z!0(yQ#j%He^7C)UZ#neoTK<+We~X+uO?iv+8?&ZLexE!``14PM?%jNU-dgqa)LGao z&kBBKTX{6{71jI+A6YglBfK_Lzp{&#zryU?a&CFA$YX8dvNrfLKD)tG2HqBU_m=P@ zHzwcx;cNW12aM}F|GHR3Y~7IUb`x7Fj_eMi)p6{%>-up-l_?#xx~x0BtE-OD*=oPWVlTZ;x;b7p4&<9Gpox{6hJzStOe z_jwgKGY`kJ>e#8hn=7$eZ8`V$cj7#eXN0}Qy*u1k{Yz)e6Gw0JAYC#uxgF=g%yh`H z%@wLBY^cEM?ULsL`Rb05rD992dcTphVK)la`BMdRI19fVLWrM zmB(|!e&caqJjbq$=dqRX96f`7Xz%IcS4LEWyy@xXVjafRgl9HeVS{b`$w#NxoqS}v zwzg)c#kEv?Jy#y#a`6*8mx>j+{h#q27|~5DBfK{>aM|Bpx;Hd+#S3`mcnK>qiZi}@ z7X9ooJDV`xiTnoBgz+xm`S!%~WM?}9>*sE=ygK&9-p$?q%5b6BZ3u;90GXASL{y%d63B2X_4__PqX=~$eIKcRuF#ezC%j17CkN)PSo~$yr zo`ct3Gv)C=Ie(z>``5<*`kY?>0gm6Q#$Vbc>wg%>|H+l{mx?d$d8_q*bgsPqrQ$Pt zR@Z-OZvXYq<7~Tx+{|mdz>woqE$A8!|_MGdUa0e1Nd#7`S z?d4tY`P?{;o%!6oS3a?A;IdNjskzntWoAdIn3+A~O*`OHaT>ebMHo?d$Mol4#J)0v zeI<&IHDh#rvu5Yi6F44rZ7}(l-@Fg%aovJ(ZCV@GsT@~-&bW`|Zg?y5yRaMHmpe8j z&mVVP(SL3Q_LowzYi*3*#~5#zecOIxycOfATp6S6(o01jR$H!`O2v!tz4AP>TwK__ zRPoO5q=CcctiJrb%hw(?F`GrMuWIk_>XyYaeIcEUR`_Gj~EXYBDRv*iQ07lPN4 zvKMw14s`NQnPP0gXr1}hGti1 z@MwPjz2uj;vTEQA>M?`Pe0qD8x$*2Ob3DiY@SX#8Fm-XoS_fBQ2KzdA2WQZ&W^nb& z3~qYU3_kUiGw`gMF%>tV-COk*D8~EsoX(fuF4c} z9KC<#IQmxdEB<;MN3ULdL|}DCv11Ni97nkK^bdJ0f7%vL;Gxj_1E+p?-2>P7%?a1} zH+*m1?He9k|2=bisd(6&anGbT{NUfWorLGpA8g)`me+OGJ<07~DEQ8?MJsN@IQE<# zvfaN9*8%IF`XtU1InrOuAFZB$pL*$?=eYy6`zvt$x+Kp??#Z3?ft`QKu3S?t6(7QD zwelJA)XIGYc|Oj}y!rY8Xa5Y&eyqSfID;<43fwx=G=lq!n7daJLXb8qpqCU z|9HF}bD78Wv9wvp;mUl*_gIyA?W`*NJ2&MnH*x%L+!NNTJ>ewGVBhiho00MiylMug ztju8Zn`UtMTh1W2a)mE5u&&MEkeU6D$D=WW7-ta03?_EtUge`z<_yeX=x^mY1oH># z;O%Q2JhfYQ(7_ozGrO;YiCO93g_Y~leS5;AGjG+wjcYTwhxaH-#i8B%cQA_U(bG7C zQ!sE)tZ^Ps8 zBb5OM&W_I6%5Cxt<=Q6K79F$3U3_|}7@1v%6`jbHpG7XjxXSm6=AV$O5%iBSm(PJl zd`_MvO2rFySMIkHUxK}Ba?fvZh5hWFCA{u3J9qCcKi6$qdk!vNgOrL#VRet@^W1WA z$4)tST^Nt+X?gx#!fVR@ZPw<^KKv_6|7_i^f5X_0Ss7a|#&#Un z?S{O0VyW2whOu>EY}V>2{M*pJnQD%1Jw_Mawf}J!!DwE?73J#6or1@6b4$gOb~!P& z_@8lqlw;fXx@xsk561ScwN8Kcq;z^F#&#^n=FJzdZd=|kwxf4v@NZbw<2{dZqmMsc zD!L9ZE-%J$zZ%yII4ADGxSr4TJX~ceFs@s2h@jlE*HPKGOjsZ@8Yf+ zu6J>t5Z9J+y*u{zhyy!3uXk|=662ZL?|K)n1n~PWQRBH2k1xV_?#Si0Rhf?GH<;y@ z{#l0u7|*_A{{t)IxewR4tH*wPMvkvXIU>0NFBNavm3Vo*trf?YjSU`$kwtU+A762d z1phAhT7&1~@gW%57TgVX5d*qYL5M`jC}Z>pl41@?9Y(M%%V0_7hzDV61okt^6A6w3V^m zjMrH22v}Z~Yi1ip8{C6`yewU}#W>9|zuC3kf;&_{sXZ+bl6P~?TDcqRQt#;0th}T1 zO}qm21e{w=#Jf4AV(YHRwxwcdWiCIMU0>x49Q6IWE?=EdJw}Dsfw?NYc6$d_Wx4ot zN$$gz$9&StnB_ifsaUb+^_Lclp8f6%l!`vg`B7Y#PT^VP;d}{uPwt^Ab0@A#gE*(g z{7#&a(tUm0I$G{y0e2x+`*;Oe`dH6C#cn7+ZO7EEVIp)|R;~;{MTUAFUX3Y_0-h-h#0n{1#)C>;HXYZQxk%QDYt8 zSd+VWrMEWLp0%-V!dUwbFxCN#^)NNoEo)=FbZx9wjP>lf)(5N1-Op_>pZe?m>ne9E zWL^Dl80(8GSEX`SaNoKHIM$0e*45*Dv3Lr{if7@Si^VezFxCLZI=XgMinEtjrHe)T z+E|BXO2ypl&=6k3VXP0oP-VUzx^1D(EERvWQ(oU}4D7qFXnFPMiybT1`HRID)%Emp z@mm~uVeNVv`~S-IG@b!gucvv2E*8_go?a?`0V98Izw7A|-jP`<2Gz*>*GB#@ucw!A z{a;$S{-1xK%8cNcSolkM?2Q=vIT$0>E`S>i$8 z@G4jVuY?D~%i$sLQfP;lz)JWj=z!`yvtef>4=K8t8d&d~DiGnSX#=av1g zV{ui1KD%%w{v39n&fTSA64$ZK_v5_~9CKUnC-8?2#+_a14=c7<{1f_`Kmi^O}x^7^}O<@NVs@eYhsUJGF#9F^DKrD6xJ!pr+i z@5(Ixj?v$mfAj0_Qt=+#hpgExufGq+*e}4?FaIW|F;4E=6ID^59Qv>v(L-EBiAQ*<@bj5spa~3 zR@UdzUFCgx{o48@R_>YO^%<|x7mEwLMqkAB;r{D$7{=1OOV;N`jO6EdFMG?%@0Me; z{QIZpYH)p2K8EBP3HJ$cm*G`(G>82Yjp28fkGY@c<(QMNl&%(wlTVhrx&!a&$U8TS zc=js4bAxAxnf=EyfxCj6ca38#K8&RsV|nsV`_>^or;n3R zBN)d#t{p3IuD$l`29uoHw=Rx1tV=c5#W}YQs}bk<^BS*TaTcsxzv9Z4*RPAk9Isz- z_V0K7inAZ1=*r3QaW%&9Fvf6_`rXf}Yv)q&w=37qC0zYWCpGvDOZ#WUFaER*U zDITZ2y!Kx#CRV;m&f|v;Fpej2_3B;e#uzL zYW4L8t&OL0Wjq@=9`9~>c9Gw(So|>lu0%k6&p#W}QM(Q2@c0vlsjrv&&iLCUf%5xr z@5TMXlP<1Vx#uabXXSO>8R@~0ai5hwaf>a0uMaq%I05g) zmGHNM@E`8OIdm}Z)|B53S}Z=lyH@RQ3p?L z%uadNZe8G4xKsF7?9z8)f0Davi^buUci&g;lI+2=Fs|!s6L@dPUG8QQ-N?RYe-52y z@Ol7qeDBKhqP)j=%7-nlo^f&Qsn_A&y3FxL%<=ei9CJJ|?cO52|G4#pE%#%V5&S-x z`>D9>+N=J&e|ruP5J_t#EY zo7;OZH$U#LUFVmX9i?V=0%vyo%FGVq%xcxl4(H5H;>`ZIHnZiGYjv5~z~e`(%`En0 zd1lMGx9>NzLolz-ctv@j{)!%6D={XB z`~Uj5JmSeW9+&51Uo0P&AXc>u0P-&6y_<%>f8}oitnMj`#W3GZUo75)S(dwVV|MMhKQaEpDs#PS-~41x8U1~giKGv- zr?g#M$?jgq{elDUDT~Dm@%veuxceHA6VNTi`$uAb?=giLOyK&urYd1RTJ;oRQIxxK#f?tgi1e}8h{+%DtXGXD>8 zUjr9amHvO`g<%*FMnoM54FM6v4Dls2Gs5u(NNVc#Gjsb5YVC#0WzBUB&G02Nvob_Q zFqhO^MN}v=-*wf@tZXQAO{G{h8D@Bg;SS)||9j58a~a05wSWK5=dYj3z4zQR-{(2+ z=RD_mo>O;c+HRsvO?OFKyHLtjue85J`LVR6B5f;?HZ9V2JiIe)(O+X<_*+kD?q^$<)T<2Io zOV`nyicwq~bwvbRgx-jdShsJ*k?v`E{ME@`{S*6w1t?X9J0n~Ss+V^4h2W~^IW(pK`7 zHyQSgI6v)$x^>3VCvCCOi_z zpEA1Zxxi9+C=9$^beIBe3)^ee_P?gL3p=q&$SK2I0vsxKQc1VEQA-rNjLgfVdEIgYzMxw1ao}<%R*&~F4dx#mE0#ifZ1pN3102R)8?}w2Rgl8^1boqZ zBhGH!*n8hs4%6Bd_+2#jKah(=aEguDM#Dbk#&%&toj0fdXo}3u@fwkvQ&?S)=&z9W z{QBMn{<}3C1JnSHUHUjmkVji7KyMm{h0zC|>4dp~T5z#>pyhGPS-}L7>D* zkKEn`fw{p2LExjO3m<)JUxJk8FPKG#;}@+qMNCI|JLC&}?kKx>$SZeNlgMWfw;FNu zH9hZ$6IN4$u}59wF1_u5PQs5U^HtSfR&`XhL;pP&a-AT^A)>$VCTctvo%bf$yL`#& z3(z6Lm4>SU^>h=o+Wc#ZRsT9Mmn9fN6aJ<|uA`|PFi_g9T+4+VVvmBU#1 zl_MEfT4li5jVJ{&0KfX%SB|NbK^w_{UK>{;ER2hXJl==axnU@oyzV6=FR(p zsBl(#&%$l#y$Wxn_b!Z>iS}Ld0rI>DVn#{jGuh@oukL{6=xyP$g+#tXNB=wY3NZTH z>QDmbELTwiA->#~P-p%-Ki;6su5t51HOTiO(qTITL(lVXb`bGjFoy3)=5@ zZ?ckVJ^?RSvwh+tj`=|w-bD@(_fO6)%^ferwbT3x^lkoNnN zB`L~n;ibcvFoUnZ0ON`+2_Ic>U zz*=F^D;ik(@ZhsU7+uffH^*zcA(TK~U^(l3N1TByPKg}H>>-YU#VL^Eob(V!!{V%x zjx*mwoZ~D`q8w+Ahd3oH&R{tX?;%bB zixVoxndTwRDi%j4$4T}OX90`jsBxA>f`>S>S)5yPoJbFGCbKvfxX&Kfz+Q4et(wY1!`CVR6FbIHNtpS;OM^%W>j7#K~rHZrhwZ zM|+6Fvp7~cPLPK6 zan{Ol_IrqPyM~tg5;@LJ4{@w4&Kx<;b`NnbvpCb`I7J@foMmy6e>Kj27H5PUXO#z> ztt?KY9A~kIIBQv)KsgTYAqfGNIMZ32YjT`n9^xdkIOpX! z(H`QAU~vx0aSR^fM6x(L%aROPKJULFw={xHoVR4qpajtoYQ)i=fBTJ5R#zUNI zEY7oXoc$i+oM&;;-$M9JL&0 zs)smPEKXCkv)qjy;ylaZRLXG%Ax?*I7&-|DaUH<5{~)zem+atS^Xh6_N_Pmhv+pN) z(IVFU>w4^E)8F6i`)l}q8oMHDZe1B&=bUK%-*={1$o&dv2#&Q7#k&n8ptZcnrc}@` ziWqM*i3sCvn6{b3{fXxe7V&ym<358u|4P0M`%ZOLUFT5>JGGe6n%jg^&?E?B^5rZ@ z7iKd}f)3$4haNOZB=-zK!*u{Yfgi3C+PpW-%1-G5gHa^TPj%QY_Siyxtmx}68kxHsV6$2fr zIL>>IVtux;P1s+TRn{(?flOJ0RCyO*CN0$#y8DkE;~J{}E~e{2mvo8y|6;mkvUI)e zAzfP_Wek$irANBbYSu!=k=h43R7lh16W(Oz39%Wx?W_U=!Y@|Rem(fzW zj9~oFVCyiE|F7FR%O%>ST$GIe!5;V@1OE3zXMmPVX*FHH9o*zg&VujNxD2?q9{;s+ zslk4Od%3K-r*g5>(sFTNttOUBn{ZSr7pnaa{W4o;xy*7&*HL^GwM!tMfzJW-7I&5fsEz)p9eQY9oHy#1s<;EIX0hEo&(g8R_FkneWxAvp=c(EPF~#uUOcqFM`cET2mWJwjZbDg4y%4^;)ztx<3OQEJtT)Cc30L zgrz$94$FvkXpf@j!@8B0=6OrIpn;A-7}7ln>8(5FO#*TE$CZlbi?HFkbKi;X`HC{| zHrVq+PnPXF4RG0aqAPnU-ET*~K%xh--B2bKgibBwL~QNd2AQ6i2RGT;)`1>*`BA5A z|F*E`pqi^d$=$NL%ZS^BR;+dBAPr9WH!WYM{JUM)Y^C}{Gmy4?taYPT`jQ`XqWqig z^(GvE{2S?V>OhHYj;_%ac3<>_I`o8j)&$X(RMIDb^p~yF+l2zE8^u=YD9>7Qq#bf- zaeRrY7WJRnpgrQMi_#|KqW@lY$BMmO$VDo&NQJ21LZ!;>!We1Q-Uiz>an+90*0&4s zR^tYYHAhh{NA4VJDBpBUOn`L3IVNJ{gEsnhz8zL&&M~oF7zOD9y5dtq*1CLc3r^JEYQuHq;*{pIgD_57led`;w+3-ek++yYqPy zn(3Wdxw`sh9{OPL*?P#A_@=5js>`8EOKh?|d9q2V2zvUoBR|nQE%525pF8c? z9M&$ZsWD5M@%m#rtuM(#UAl%#J6;c8ETPC}KUPmA0c_p>vvcVcZeOg+PPK z(E?KAK7PG4z=JJ%n=sF2LSMHGDW*C+&XRcwTw^q>_J$;#oHz*o7fi&AVaYA5`%I^BhpXIBKAt z1Hj2x_zHMN@zX`o&L8`HV>^VWkAtT$=8vTeAMUye=)X9iOQ9}M?pEe{6{vDG z1(>m7@4~wfBXxC=X;N9Y@g&?fEj&GhzhR=jZlGC>63~vv?nsJ_`ME)Rurxz%nhE;D zG`?hZuDItz{gn7oo<7dO47S*M%%Vko%|GaXM-D?8iF%!T4rNoJfy%0rY|~(IZ!swz9-Q9QpA6M~ zp>o_S@6tY-(po7~nlDjWSl5*K$s7{VIM*A9@I%mAO#W?B^eaO8`lSbR+tP2O&jb}c zG$MG(XH7&IiKsKhrbk1&#_7GrUggELf>su2=|5gpsw~!W-BL(Il0*H-%SNrzdpw>uGxm9%*=3x9cN+R*d9pPDu?Z7Vdrc!`RJ6F zjl&F7W?YPM77|ih{}dIdgc;i5Q;lc6C|#R1918<|`lTKEdjt66Rej|5IJ_UI3X%DX z#{1r?-tv1G-gi_5$?pcdZ>j1ozw7b70r`IJ6K}E~*LGaFxK`p?f@=Y;xw!PW)VLfN z>uPXS;);vYlECqC=&N3n^4Ocaz^P92Tu2Vx)g!z8QU)TILs=RVSQ^u8S~-p1SZNwR zWNTE~7o7y=1YE0eWkH6V{Yv13*Z4Wy5@W!HfHOQdl#j@27X|{iTT1^1m69TRHK`@+0M3H5c&?A-bNfaK-g)yI_~r&jh==4(wCW z6&@WUsu522JH5CZ{&nncsW?)$JY#cGPSfce1@GhK(=9Y+IO=%!hEZe~`~OFmzcgeH zSt5NAzYimuy1e%xV_zV#q+wUi6+SO1r{RMfLk6*_^=mDC$ui6&NnRV~7#hskyxETE zl4KIK(C83G)!VE7nGqR78a~fS#NQ1$3lZA^D}vqp-;#s*g+@h7;*$<|5!lI#6qYgy zuY3diV6u>*jcpeU$LZHUnkn_^^qXx3A-wae5BoZqeVxd@o*`>38uGuOCB|tJ{(Xod z7+IXhoN;Oin>EAdQ}!Pa*LCLH2K{CYx1oyuY6$z);BVA6qx!xKm2HJO55J(XF^9`c zX1_d}N$1;nwMv)yw{lS%b~rWMOI0N5FID|eH%3{uvHJ+iUUrYS9A)>xmZ#aB?w_04 zQS$7;mwS!SFF9Cz0Uww(N%pWEHPekqXFBlMCS1h4>SOkFp}rpd%2d?l3AmKF{Bd1@?W4vH z+syr_=O1~KWw^3&y$yK+t@EmkB0iW~mQj$@v!V_9^OMj5NUZnOei>oh_|S-STD#%z z?syR&k+CcT9>dZLl7hMFYa~p6jQ*asgEqlxt@J2wY7_nlU43Uf2Q(|h_R=Oi=@Q-! zuVl{faW3Hwc=B?FkL(h@N{;`4OE@$?nEsSLu}$db65b|ElW}^wgtx;M%!%*g67CQx z(Yw)VvBm%z^p+K23CGmQ{os3vdUWd$ZX6;f{C+@x@*$hs zK`-b>hb2e!>4Z5%b~mxEbY4)x!3*`X#ola!zkShKJ!5y%Vc zkMNtBU#6Q^MZZiWHq^}Kg|8!a+%qp4l-7%S=AQ}k#K${?R}T5Rye&B7>+<&JL$02i z8eYdQUSIGZ9ie`kHkYAZ;Y!10U~_8JkC?vZUhw?6DXE)Ti&Qn#{u1r=J&D#<{1QRJ zX8lcn%=9-}m)I4dh(ie0mx-8d8csm|7^gp`P1U9pviVw@aK$Eu8Q_@{bI02{ruR$d zYjL2Ng*np--hX^}#v2px*OJP35y5Tzkda*FR&&_SP5vqTCT?vkkesYeFeTt+l86c`G@qFaewEyh@4BX zdD$;RN7W&WuHMSu;JeSET%I;9MeZdW^B(Ven87KEAvdVYIVX!nW~4O}DBxzXqAf zUT@NbOUpf)(tx$qm?y;574@N}+3S_3$s-$(63Bm^HYcQd?^5Sy!&6A{FC_X&vnF4e zr^<^f8=9)zMMMALfZ~+dtB}NwGkfPdgs>X#%_p1BDNMX#2yM+L35mFF&<L@Qu8c-g=`#1g)uiQjEkOily zY^Rh0Y=9RV)myx{i;jHE`dh2j$SVQX#?8!MZwUVyS}$GEQOP0Y<%$iQs1!rJ)|IjH z7FTy|!WpdKYf#>m$b;(+9XX5peq7lr#JcH^=d6!cl&W{lgTA#F_ZZebG_5heM288m z_*NCz_@2C1gSSG>8TeT<6>$cWG9tpHiRs2>az$hZDIp({gXB2zCeKGi%+p5n#A@@? z=5t;XTTaf3NGA`Zl}};odWVo&JCi2^P3b-rszPMd^f4}+6_L@(JyXjh9c|+YEWM7 z4L&?u!S$P<Q&X+Ta;BG5N8SOw^VBm?>laT1$2OsGMhJ{pgf176<+(P@^XCia3)g zht6YbTq&E#;N9WN@a%%sqGgx zi@y%vfQgQXu=lF|iX2n09DA-RLw^2W7OrIBPqT3Gc?x@0v+yUXXl{$ok5?&1&aYCA zgw@E%k}7Qp>%GaM9)!wT9^YOXx$9Asa<-9{@-MFLismzn8DMxf8Zs^mW;ERxQoUr%gxyrfFh4twd>VflscKZ{k{#L-^p(}i z#$CO7sPUIa@t(G`QLVL>SKpU_aei>!KdQubP%Pb_As;nut)(!iB2ka6lOe3KLBq?l?g#96KvpryMm9Ynj zc`~zWeh>119WI5nH0c=M6nm}*?D;bG&lXQ*`E%W!W!WNy$=GIw?Ov8< z57=j=u+b9sabPd;z_&rh-YR0Jcg>Tn9U^;DKttFxZWXV@>eVbAw~ zohgN_kg(4Kd!+}ykIL8_!*;KOoCoaDQdqQveF?ap+RocDwt->W-1s(l!1kBIzHHF3 zJh=kw0uOwfBy5LZt*7HEqRS z675sZVCLJk9zN>Bd7rNr_kn!iq_+D=KgFWC!N@2I_baI(G^W7DH zfs79eIEjC?8~)EV9@@VYwpqfbbv;+&-*uees>OEXfR;F`Gj8Q-_JDm>3LEW$?QBP` zyc;Cm1&lUM-j!~=7kR+nAca-dig|Mie0v%zEEXs4^BC{$^=6(2Y+ee>k+AR9FCLb$ zjSSnpUo?8a9wmiEx?p?i7uU$xmro}va!OgEm_zq zziS`xgA_JL!k+MfH}S{iKJLfK*oPT*tXuzb*aLQ{6sDK3p8{^62W+K`y+*`#tA}em zU@w!x_S?ifY2D>bPT}gxw*Wbwvz|Q5u-(V~XFXs~mBOY;*tdZDp$F_8GIkupb{{L^ zJYYvjVYjU!-9wF~z9=(_|+m*UWO2?7V3*_H5-l6B12Wd^5|P&6(xI0suHXF*m_ zBw>FE+&mB1sS-A9;DFuN4f~>mXI19n}Vm?uXWw#E&6p$F`_GWHq?dppJTfPGTJew$${-LO+VU?}1$FH|*ISu&2w|vnA|7z;*0&?N7vZSt4O)Gi-l1>=7QY2g%r43EKed zn;x*2NZ8P7L!Ri}u(ckrf3}F_OHP#B67xh2Y_kXKNfLHnhOKqOz9Qkc))TXgJxRiD z0Pax_*x?d(17wve;b*C9S(bRf-YR3a{3P;i0q#x@*ex|;S;7Ynu*2Q3mw3RQFJtFR z*fg(8JYb)du=5yph#U4K57?ZHJzB!P4BR3Q*!dFn^9(!84LjTewn4_Oyea0%m%z>Q zfSoB}!&?FJ#Lxv>5L%>_WoKCmSQ9ww$qEVkByd;ZazIucjPrC7J_>$f03-BW_$1IO!I2w7y*ncv_^%*dgMSE6mVx%Hm-70aNcG@c*S)F9^lZUOtE&7NY&>s#iYb?iB)TvU4B}s(_SN zxrM}jUqSj0fhN>thhNsu!n+Q2VXMKb@Nq|fq8R>OQsdRnaoz*P{@ie~3Z82?A4vu- z2-^B1RhUb|2T=}p0DOns(2<4M3!wV}f&ZXqB*8{HBTPy&iG9%gQ^p_5GFOp^Gfhbm zf$D-JJ+#WtG>wfYNa}8m$cTW9yp?)rh`0fr6T08#5QZGuok3z7Dqg_f+KO2GwNk$| zDAi~Q*Y{AXkc!{zHdR!KdH4l*(Uu9qOGmRFr+X5yvVM(2I8#S8d0L@W5}z@IPi*~6 zwcMnDO!|}wdssLnP7^zdX{Zbz9v2rIt0s^M&eJ7oE`>Q{w##<0j(W$clF+M^|5~uG0PNT(jNIaPoar!3e3P4=@)|T{ z1;JdO!yRnInSKrb$hqv$#^sYQ@E2>P>Z`ba;UFZR2+$3q-$?8i*n4~l8f0-!&{%VUmI7Q(Uhi*~@AB{E}0!`dm@Hej7BnTUz z15Kh|gl?MJrt>}t3MJK4Kg|dk{l`UzF`IYiHU1zrgeRe~fptx|6@Gk2Ty{ zOB^YI=lL++Zy0T>Mb;0J9Oc8{NtmuO@~v_3-ayZ63+=L(HX1Frpk^of3A&SZpCXj) zq&bBBmQ{zS<^tKYY|iHfXjatHeKm1EtsC1zgZ68?P~|HB$}eq~{30yBmh-CQr}>4(0$vdAhn~`8{Fa7&pRv#wk|_v15jI8&dm3Q{8G_)8 zuz}8a_-zQn6bS2uumtp-bpQ2GSu)=pJGTmH48CFuE5rUlgn^qfJK>n$ct7b(IRTn} zbX*sAjKwnsG5$+oGN#t&NA=eMnmm^Mmt+C(sUGG(UB|9i|H5! z8a2N{W3NnOog0mBBs{8#;H+2YWEvc3ERbj%` zqCxYDro(BsC+aLv|FYs-`xf*xBF)>d8(+y6m`L;xv)Jo~v0fKD^|h1AZlDkBk2oP2 zWMDsN8^tmm2C*+r;%Q%e(n|Z{SW{C~x8xz%t#$~H{ZfP8cV0E^eW&wc@4Huu^<$Nn z6l?G=-(xOhnGJgo@c1*XDbUG(tyBd)1uvZXwjI0ocIYYF_&&U1I5gbymDsyiVxPB< zw0o~(yS*8I{_CQpudAhfdnNYm={~izZ$B`}tcvgpn`LSh=GLkTRNSoeCo}xQf=%yc zJZ%bOJKBo)0(>>qstTv`Md?aYFm|>vk5gd&7oKFg&M!0F$M)!3Vdv=FqaVoj=mjCO zE;>0z-lK07o|5+HF_)>2WP9|2Fx~0}eRXAomT@si=0XcDaxwTiCzBW&PTl{HytwUmi}S zYma563gj|9FSYS!5}jdI6o&9LXEiZnSe}j@`}pH&X>4}9pjV=HL?!=T#$&t}v^`Q^ z3{)t;o6dij^oaQxtim*oM6OjhS)(YJg1Ngl_SC2F7)K*Xn`ao073DsqtC@_ZWV47&_>^;e%4Z{x#J>r{iT%GKr*h=>DJWRVYV|V#G=5zJMH& zo(_-$RnVy!c)a|N8Fp$x)1k4ymW0#p3O_Et$zzU=^H49rbIwK7uR7!bDWc=NW2}a| zf8tv14enK=w_W|5-|&C2wP&# z(xxS0=?3tfYMEBn%*SW+%UEr2wM(FO5m~%9Y#LgH1gy(y%28{fgWwPj{-++i+)RgV z{om;txHoq{>=*7&?S>Ow3#a8LMVk9eNQ9-DhW$3gvh$DzZPdim6>%Vt?k_j-8fee7 z3VwL&Ufv^lBw8BiVFk7N81;WQ3hOC)+TI_IkO)PXFV?@Ep*Ri4~Nl{Y>bMdz=YPQ~8jyn?ycVVLsLywPJw_ALlSH zZxz-^oM5g_LMljP6sKnMv7jrM#p|6jvO;M_Hp*p2HV%AL-@}Yd3yW5288kRe1q&AS z?mst>aqwG))i%LudyK_W7W0Iv5NB7Sq^9w9oYY)erh3n7lZ7wlRs6fN>FLEJHo~k% zi?T88Cf75SsO^xdP?UAhUq|BPDf-@+PT%@sy@V5x-?IQo#JO57u6X}d)k)(?Se#HA zUzImhY31><6=w#(CsoxX*rhH07ITaL?X-be^B&9BiE8ehCsuLqOn9yI6=NdrWk31c z5@T=F9lKD8mDjW7Z#)qOiA)S?cpB`~#+9#p!o-UywNt-N(V698_!eD5qNkd}xREHK z?a;9%(WzMBk?6!`YtB{<^TXjJN?R!it6`Z`#4AS7HQ36e7p_n(*?g?dZtzzeeru;V zqSht@OJnx>Ld%1;HM=ka zsa#_=A#WPf?ZQ)ddkral46RE<_(kYQDUNWgg*6ruuGrKA5-wGKD?O|F0kSr_;v?iB@$p9f zjj@n|UE^TKgTo&~%`X9`0QI_=zv}QS@@-W6Qk<9~pGJkxB;;ehRmDw0K3*@szYBkh zjtoUz8fyBeOi=*-*xFYeugp~x6eN{ijlefo+a*<@m~-h>FTq-edhNVX_2&j(B`wu8 zRo9>OC0oDIks{QmgLs~VdlqCt({Rt*j5RMV4j1~8iGG|h<@M6F(4O{t_hlnZUz?Gh zb3G61E3^;OA1a^~-G5?(LZ_>QjL5Mf?jwxyXB_JH)eotMy_YoZ3h9@K@?2T7Gbyct zrk<|soB4k^{7R=d`j-wKe-%0XG)~wpND44dLk{eKCg*Z)=Lukxes?7i^+gcQ)=e{- zOc$zR(5DN~I92Ud7I$mZODD4jv2#2yw|GdwO#>};y|QqYNnJ>}@xovA5cRN+`1g_y zT}2*3LNL@6ambl3iZE|+S6fV>OC9r^Y68{F7THTPIXPwsL4*^mQz#R!efNB4;>HK5n`=WX8JO8!;t z3DT`>qV)~0+F537pz=uQp>cywsBAvoKL&r& zm^1OAZw?Y5=xFt7xS+pXTYam_E+p1C zKKABTWQ=S2!<3U5|HR*8d|1e+jKvu_`pNe#$t1QMuU!2Zo5s(}=G9YaTc)09Euqi- z)i!mIU*j*M@jMRCk;FF6YhW0jjHJVFlG0{WE;AH{yqOgFZOo7a$oGbfJ>7I#m7*OH zlFe`HP^h-rc4*$yz7YCRQn$;0EK@{HY`LAaIMWVWLZc&!C`NpoWdFu-%yBFM^@D`} zg1M>ca;6Z^QO*?NnG;jUY2*}|I#U>p6pnI9;hqqr zFJhu~o%W#ZgMc2FsXWi$tcd(`gGxhF`3mMjMHa=dolmOA6pgcgLuXU;qH`?;Qu^i3 zKUMj$l+Hs+NsVHV?t4Xl`s+o|VafqlSII}NGLL28fM)JQA5mP@uhs%%X% z`FNpmd8w-|w;d$Mv7xCZCNXHws7pVV1o z`+PgRJE}SFe8|LYv@M3Pw7v}sebK6$YM?uWo^8&PFYL_!zv!EwRY-=FKFZ(betK!E zu)>Cw5cFZLR0%?>rB#TxEu7{{HsnK}>rpLPvBaCq!L|Jr$opQoyN?1p&a}Qa9|h=3 zSPmq~J_-c*I;Ym*sKu~n5pb%&ulp#l3xTqa0=wWR`zUY-8)P2^cA<`0i!0y>1k!TY z>Dyt;?|yn?D{RUIcuS=|3ZNr^)bEEK|Kk6_S%Dj2|Bf`z#g&O`$70wuE*8)K@YQ{S z^I7b|Rm?MSUW)W~@fglnk)AK$SvJUe;IJ{AL8SX!eO z8S|K*!#7ITVFyYDPlxa+fjUdpM{p|;k^nCzLpW;doQ=L@Qq8I|+Uw&aC|yOZw@!_o zk)D%_6EaYL<87$F$e#(dKCEtv{tl7XlD|W{5C)4#>c`d&4*>3d8C!88YAf>q@M9JF z>iSlA03at;UDlDh_1~ z=mq}l!hX~Tk-zI!k-wketHkLM!3VYE0OsTV*h#`{(CXyxTJ@d$)u88?2`wzj-?QMa z0Q}7XZ#Oai-F{iDBRVO!KgSu282`qqIZfufrDX8Km`f_gq-SPwQ8WJxO3&5@e68L4 zTqbn!{R7VWH!8fS?@#LM^EmpyXzVn=myC}cr$CYc8m!lR8nr%~aGJdx9xoLS9ZNdr z@FQAiS2taQBqQ8}HEM6}r;MW~24rl_c%M%&Ob))!u-b6LwAB>Hb*Cfx^7n9pj~yq7 zXt18zSj$tK8`BKn_sg1SuBmGFB^~kqOh?YGgO1b`U$O#okwN#EhKOHHLmxQ}7pz*B zG>CQevdx(WtTN!ch^C>q`mHiOOT!GUAY6xqI+ahg2)F8JDxy86B9o;8x<^>Yrk>Ri zHPSI*vY3vgNpv+!y<&u$$MfZw#Snfgf6k`z3Jlq!|Jb%QAozRoz&P{Am>rG#W*^#a z3p^KVxISfcrjMSwHjLMn*`W*75FBMHGMU(JWy8`x@uM(HTA!r5Ode=4?!G>Q|vI2i;o;O(fiPm9Qi43%O!Jl*s zEDuy1i`A%AE<-#6r-OBg0pq41y!;;({0ykL7I?e$F+*O0v6h~lV29@#x;ASD@0aS` zM^DVE(Eev&m;N&Pf@zp{PLyegBD>bMoNP9BU8@gQvDyn%IZTUa|3JG8FQ& z2w(Iprdr6fF;0S#`RtGWiE-Q$sr?`a)NM?g2s!9(?eJHPYIu?M+Ngt% zao}61&F-*rx*`~dnB&sm#{#p}#YW$aYWOks+DL2Lv#d2jvRtiYEzOs;w0W$yDW#Uy z0=xYYa&4PmC)T!wkb1b*whxi6)M^bw(J>T_8;YSm3Mg060yiUXXv=F6{E;^?ICZ86 z^2Sn2y@R(1L3Qrl!69Wr{ue+u;8m25x~|t9{wTB!`ffT#hD_Re;U#sUTW43fQ*1Iav0l;?Z#Q~RP(8lN=zQZ&i@VG0k1UuNsm5s zjgB$7&TuH1dlEBXeaAMe%N1AR`Jbvp8HVI9=XIxF6wxl4u1#^iHRRtHVK@I2?rFHR z+!)wVzFzuBni4WTIybTllOgMgGb^`@1m{`2!rnc-1=4)lwg#9#=7&SZugX{8By>CM z^5}UCbUi{aw^QiUyS$29;KgWcs28V-4QJXV%7g(4F*wa2m=EC;LxN2M*uFGI6nGD6 zI2M`QpJ^39AEF&+sF}a63QrD~Lbf5K1|ih9fFNvcCt*KTd2Jzs|78~a|KId!72yBh z9*T1uTZE}LCAZS_bycs4>MgGt{{{a8tvETwo7vH}KyODoex*stQ9fX42|f;2E!>HI z_XKPx$NQ4O$P;=t%2~|I@*yWbj_WC07vr>K2kwIq#~+uwEv+wXX(8c8U#q=(@dV_} zumE4-NQ8|ywX3B&#Cm9BIc;gt{+T|x{@B9fowl?(;CsgPbHg2WSKNaVZkP-1^*1|l zH%Yd?I>v7$TC2CrZv-sXJGu3C;dYIL3g0frI{7{5g6p&?7V}|0a9#5u`mX$%BxG2G z$oB55U2wbdyQ&Mn+wRKmFqvP23+`;-(s@KL^xAD@IlP8TEDK6S*$jPuS`VBBW{2-U z4Ht+y`X2g8D!G}|h2BdGpxGUk#Y!{lm=B~`;=ZgMzEWrjQ@!uwM|JXv2fAObc9#1tl==#udMcphgW89=gKPN=x@-A}8UXrZ<|Ccu zlMCDiBg(Z1HA9XZu`2eSFOjHo$y5Hx1MZlVV>Zs%-!-=l?_B0cnAamOC@&%}GOvH0dP9p)T9aSaFTbelk?}U< zjov&urCYK8?ts0>n+^KCiVb!l+g6lx`2=jDaUuhSPA)^#roUKdyxvaUzr zed~e>L)S$Xl6CzH73(4j>H45=Ng~(hJ@c0>H~}C8{dqVy6nk*AEN9dmmGV0h`P~h& zW|S}TTbvcA-XT@7V^0`sXsnQnz|WWUC~0fv=Q=X^u#PNwL(J#FxD#CSkpEYpuY_@R zVgNUU>GNTl$R6oYYf`&tr-zeTum=$IPCIgE5?Ox%63 z@!v2By1JA^DA!G+oYp77%h5EhuuQq@w(wn>sVp12Hp)#8^WC3R58IMddDW@IbhV7_ zg1plrnCimVNZcxn|K(PdI!qCvTGxV;3Mh_podVC{wTeP&0e2c!#h0p_`f`e}3d|=` zAukJG1^-RB8|d@+Ecn7&RZ6{#3^RwNlKutL-c1TL_f1aa@54Ti9_<)F8~s#t z49>qi-Rlf&sqZ?k-@5KAF8{W^gbyai)t;OjsSi#{e9}@ki^8RI10$GWcG_%~q)&e_q z*pG(9AsSvtn=u>JH);daoD$lp%i-C944Qy>X{F7ZiwkSUxu_cay46Akb7nQ(d(|u8 zsX=9;-lzXyArFQa$D#jO3$N5TA{b#e_887F>NtGbrh{Ms+j!UrHd z^c25U+#U1?=qW{dMKZnn&Eq&qaTF-Va6e#fO;18?5kl&k**v=ez42(voonVa^uVWT zN#tHPzT#^yp}*K)+XAm8ML75UDD;&&%PEkR)ALd}`JtTBrE-eXP<;fo{Y-i@#t~1Y zTs~P?)oTZdh_y4PuikJteUn`~J8uwD#S|gSc`X{VBerqW`&(`vtrNq@b zOQ{(vdG}IkfyEqT=rwni(z7+Rl&;sbU_T#cn8*2&UeL$tEGOz$wzEAYpeJ*-rxuMJ z+VN`pHF)Z27M`#5R8k36u|1Vw7S${au5p&s+}i&?CDkmP$13#K+tVeRdr5t1bC%Q^ z=m7n;l4=%m*}Av0q~_IhmQ-Hdoh7xs&Mt&_D5=#pT2g)}spPt8gRzF4#mWim*Y4a} z^j0I7cEd<%{n{c-fDW6ue(i^G*0h$cUrj{=xXzVpisZ|y>&g|QVZGtdB&PH9(QjC} zo`iht{BxqN<4NA#r(p>{qim*&t%X1OUD4Je06GiJRHG>(n%fP3{t1b8;gy5iAp35r zn1#Rf70{%Pt)af6BO+YvEShnG8aKgPR`u^G-xlFPH@xG&6VKZXZ^Q4z z`w?Cf^{_?~&%$Viyzlp=_Y*ff4tR8YSi`+PVFS0flnhv6?6&932t{PTg{X}KHb!p@ z_%z@`%*KHmV>gcA13ooz`Kf>5y6qlp$~H#r8MKkC9{^jh=sn8yF?&?&2kud?kKN<7 ze$XDx`UmzrkmtR=Sw17SJ;aGkm%{===bPK zlFD_SHZQ{-Lm%jf;xt~^*rb_bHizNcKzw@&T3{Gqv1-Ic zmI1M*ng5y!PsR*kHxOdY5hZ^~iOrAP9S+Y`kV)5=%X;mZn=&ZB_ipNq(Xf|b-S>9Q z!Lq(v6oY4)LkqVgsmjCfmo(((JcsplEB5Y5q_%Qa@+^sFH_*HW9m_CoFk1ss?t?7+ zp&7}4lw#a=6jeO~KJI6H{Hdb^Yn8rvbUdf~>3#5Yp^mOn&R~`DRu%0vTk3ZnnwUIE z`u#P>`)qX+whps)Xy*Y5xw)VH=2QG;CHoqVuZiqyKYZOTe|@bgtWXiVsXV+uF>ph9 z=oTjNg`aCp$^8W+w_kxGcRAL^>#IQjzgh; zzi&$dI9pav9>iFQk!NR)!wB0pZXT~4(JX}4Lk2ut$@XQZRv$}hEMHI+j(mHyW%fR0 z_!)Rve+l_E2lH12q!kyf&5!_uKn4sy>-_gTa?EDO8OrqRNN8JGT7*;95Xe_HU_XDs zRW122R!frbe0&jXnQ+(RS_|3T+mHhgJfA{%4X$WhD{T-<;^brO7dcpw1bn&y`529w zG!Qa^L3@=O==q{qTs9;!`rU)~AkFLJ&^qGLI)>9{ICb5tW zIkQt^cMW18kF$`sa!jcY>>A8M#;}kTIe$!ja911)8IF)e=IN{CjWJ@1rI=SGNP^pUb$BtiguAtjb%ISPC2+;hz1ue=1r)pNM~5RR9BZ&H^X9( z?q@jb>hbCpVHo5lKVan)hZZuY8da-EI9obq*dZOcv`a@WzYN=JT*bI{;@W|$NV+!T zPOmh`Ri+kbNeRwm8l!>^&tcSMtnsOQ%kekNxv)RC`DtIWubJfB$NWeVto=eDRitu> zFtb6DOHfa!;IKyQL3G*3GxW1gZ<25F-gWv>4tO>`Q%k=ccKJpn&z=5IAyHzs_K--Q zSo{xB{;)=pr(Om>A`etL5@ilf^~6J_#Df}iaEi7^9_Y7)F5ldF(91kHc68-o{yp&! zE%BfM9is~!*LdFKCM$bEOeskwxh0H_cwyyaw>7IDlEb-7Cbm+NQPCBkU z&~L>q-`sgfl6jcn#>4IT_mU6OB_4c1XMqbHS03m$-sPJ+4@BnS^46~Tu>GERFi1RT zK?hp29`k{Ii*otq&cku$osQOrsct-^-4hR&s>S+1`@$wj$UKz={dUvkn>!DSWgg7$ zcg=^Im+qx3@+2MtK&RA&j%!)aZ(Ciyx$`he<{{0EhrD~@VY0+SAn43>q2tN}{bq9c z=FWqId6lDOajvLqK8(I69`q6qexNhRg^nu^^jlw-Z|*!Cka;k=@o;nAy_CfTi&!6g zK*x&tj;H!Szg=_r=FY=>nTL~q@0t&5?umyr5)X7-+UY{aH6Q4=0+(;@JjBa9jCSK; zkj%rc&0$g{9%vu%ybB#y9_Y7eF5ldFXpr}z4{qt24>eCy8J3zmeUCDI*Zq@(#Af0C z6Cdz@D6>E(uNkKiMzi}uXrML=^Ko85lduCe2mkl`Da}HCm-Tp)u;h2G@0*3s-0+?P z-ilJJfF{0H8c^cT_4@n7hJo4@7Jm{{*rTArVJ!V7q{5M0^iP|wLbWzmPwhS4)c1am zS`D1gt8B}O;!$cfP~k{iOKe0hS`84Ez*kaqrA-+~k#P^l-H3aV0;hDS zY)Q(zq@5>%xTE+x8a4|??L_M;t;VKRO|oKM6F-5xJiHk8YZ*6d)<$zW~y8)xhva?;{T?J9>1t zLZfIov6v=1BH|Q?Ppwv7FsW=yNwF!~^r-OiKGo)Geuc${gmGI<|2DB5&WZ4<{rC`$ zrh#&xTz5eKl|y|pmo#ICk)|mcsLH?!geeQ1=@!%W?swmgeMVc-q3ZEx>pIo?sxQPh z6@B}tYCY%_^Pgf5l%AdWB-8a<51oQZPdIf7UV%=CVIYT^euCb{Iz9|vb}JB^O*5T zybXJ(!_1GQ_!j#?gQ*$k@=(2kIIg^+87BbxLa(6vt}w1!arbq8g@NlDg%M0T(j@$V zJ#4Bk@Lp}VOz}iSf0q+k=!u9i;IT=#R!8kE2C(N#bvvq>g&Eb#-I_hvg~d*+`o4fK z=sH~5f5kqmmhHoSRqLDiRjpt4QS61$^Xt%#nT`UrNWe}MEE1Z85^JEFwpz1r)Y>A% zW3O-?)_{X8&CvXwIsj*&6ym&sH0UYJ#d^ooq0$nDW?@!42 zp5~VCGjM7jJ%h$BC|JI8F8OYp*o|s*M7?U<&Uf3`eP8zt{w&X-*%)mxcSoL&mh${z zDbF7;kCO8|T*~urk>^j<(mW49o(C2OAkX#ddhBk-8j#AQnxMB!b3GaQn}PXQNny9B z2kwbhs&Sy=qF646*X=_t7hCQ8YB?g9VW3CKR!KZ)jEFu{`n^ye%2Mj^ZIaupqA4%hH_n+Hxr-R%1z zYa%gmuvDO0RedC_s!(WEH48~N4Jd?ZRrNIokI&?9pzZlWzp6>duUU!q&@3E~*QHs< zcJrFLwWAK!-CBfii<*U43wr6^i5J_IqACdx5 zUeVXwTas5$eW)g3YRwA9$2^&q{@9x`ANU47X1jcIALnn&>%uQLbRFl@?umX7=xdP= z@h&u6>1#nh(&d{w{i8DdG&lM!8KV7o0Qb}cHTTpLON_4=hoCKuvKT%Z0{LhYba8WF zZ<}8>X}lteY8LsMCrE8|F?!u|w%4IM(hN&LoG+PQK78c;U_m6rQqkEjXeF=Y=*n9a~tGaOPx#j*WV+De;*>)0Qq z97o95;S%-`V9y1%8rW-OY*8DQ#9k|RYQp}js?(n`sn%Rj3!LnDUPL8N}s zt1fTKMcZ87)Qire{?{OnPp$7P_sw{pBHTOp8n;aQP8s5t>!($lN`pMHv`7fHT zW}LGr6mRH#;InUa5@~uj=L?%w7tBxNCt8bj@9{tIJ{`LD`!l|E1fBfWVOTqP+*b~L z(aDCtF8$D^(}kG5@@8{5gXE;PF;q2~3yt+Pk9CBGj&&IP#yWy#jCJVejCJ@eR#oej zp+gm;>dI$v+qqEgyzRX1^f;|;weBlNP|+EOx@a;-8fusN0>!s6!S^eNq4cy(>kG@6 zoY)4HudXpv7s8!3uHp_ih6dbco(vkPxF$OcxEpW}!aWFgJ??tk{c!igU5mRGcQx+n z(9n2_G=1FF@(QHlG0J%kcu~SSL^;zmlS|_~b00)Phdwi|p&T}0)T0=m(v;HgWNT!1 z-221#ga*$&4?*x&I{c zJ{`8i@Y_KiNNm`LncrDWcP@{EHLoUgMCjlka|q8lLI+L9U&EjR{0$m(1%LH}LNYjq z-=OLEs~uE;zv@AuBNQWlDqlWsCZ|0yn0v%bkdL78)M=Zq&*`SC&y&UkKd;76#YF2S zA5Di(TTDY@%ICJvHGa00exWhgK*d5uE*UEcR$>>`{2RzvIuF+`Anqc`|AH2d+Ny4iVI909^^QRxIssbfO_&ke3F|KM?Qo@OD z@E+1xBA?nm^**3KLBw)-v}M@7e^=2IozswZs2=$UDL7q2Lk7pYD& zWb$025N@qZSHx-A8Buh+%&xCYZ-pOB{jp}@vdj9nQ)j#hRyE@9$%*~%h0hO-{O| zyU=Zy*ZODP?o4mcZ=yRFr~A6nJzG~K(~Z0f-QzOdG#9#OX8w9UcTI16by%13U0oL@ zm#?L|QwHQ(zRP5~7vJhk?|jguI`)0pd~mQd9}Jb|gZE)Rh#3bo>VnyPu&0^&w50Xz zdnx~^4OgRJwZuvM(Dx=GsVcUrRZI;Z})VuIiC+p~+T5~5~`RUGbA~6wWgQTS|wnIw4YdJk@ zF+Jy6KMvHIo^#gAAep}Ym}@!hW%Q{`auDb{Z4!*XiEg-so)OpC4zp`ba{agrtp}%! z(3S2yneP5KJL%3Ickk(KfTg5c`>%Cr|97D)`v0Tp9qmFlKk2v7Ev`3p>35X1Cb{3C zS{|O#EBgPVbkD5rOz&mmZ=pN4zH2)?ThrMNBkw}@xYgOurn%7FJodNHjj!+8@2tiN z*kZkEf*)$n>0Kt%z4%6FdJq1ld~U5Xb?NUW)R<(tC3m6A$#iG9(5-yzx1{%QT~U|v z4Xi1W%C||#x(i*MO!wxh&h$n;`&;NPsp~qfT(UXGl_t7}@3CH8koTbHxX_JD{4I2o z>bj2K1vcmS-6S|%)|Ia9SM>i+>vPNNo#{3G@wd>`)D?BN&QBXaP%&8pOVwN z*oCh4H_<&`+cmx6F6qs=3*8WzuG)oe>5Si!-nVPRX1Vs0x2$2aobAYU|KByAe~{Pi zYgTsV^Nim_cXDmlexumx>^GX2o|~usO7#Cv%Qwh{uKqXC?S*l`HNA7KCb@jSblJmp zP4Dw^dN;q;ncjoX{FZ#aQq#3w#alb;)!Mtz9VpX{bfG)@H__dK{>e4Ht<|0V)l`pk z8-+S~kGNz-XL|L&iSEprqAvC7aCMPfuMBsg`=LyCmk&s3;LJK8_ z^X9V3ess(j)(mMBX?SHR^}rgUB#pb4{-_|0Sxd_wwiD%CM0IwkeIn~>&>t~%ef(A&qt4x*l;T6M>!-@rZz{S_N`3q zqD*Z$s5z=%tJQ#7ue(swFluXaK9{ja*F{Sy86#mIm9dY>*fb^Qfvp9$#pXFBe!!k- z-Y+BTWn{gCyhBFbAtR3f@@63GfPDBa$d(-P;BfO68Too`n8fqV8d^V!%HENY*UHF0 zl>La8?4+1CKL=!b?${Zb&FFJK>@?iFYz!=@L{MPGT7!_)A5UQ7INcVON@ z`>g;@fc#vLTLOU;^jgonr#CUt)4z!~T zZCTDq87&h1{NnOs@_T4agOu{V68eV>o$k>dw2&wgt(H6rJUm~~Ww(Gd)Bt^odAE$! zYI9;atYR+yUB=4og0<)ltY%<6VBRESnQaz{uWux*H8R$mE?7tIz`B&fm93JIO0X}P zQSRJx5KDQdg!wlqCyR6Fs6@w*?{erEvN5M?DaT=Fgle0M`xdUd4X?JS_LaNVe#Gmy z@0?r6qx(*vA9&u9fL2W!-oXha=n0}adx9VKcIjP!F&G%MCn%|1h~F$0djgkV)a*S2 zZGPIWldylPfmdClw11*Er=oYGJK`^y8zk-j&uYE0n;?gA?)r4L^Z9Z+pUZldAj?A2 zSvT(e_nE-G)^ZS>jJbo83wu}Lm-T|9-huDxa`eMk+tMCH@qb7=7x1X6Yw@2+CX>k| zCRmGjbQ`vm)R+)LNtO3QmffteMVl8Y{1a ze_j{XoB>h$^@l)G?FG#rV(qPE@*brKv=in0qn!^%=g;;Q8}nx~Kibq;WkC8uU6nES z0=R0UF|&=C_lubmu#ARA<{jhg(^hO`_-+-icYdi=zFjWAiFB70nEkNh+i!dp?ZYd+ zS$fRZ?Q`p~zqu0BNZS-Fn-zi=c6d?tC&jQ=X?iBLC$~_SXL~X8LIq zsx?6ML-Ge*d|oBzx!L(hbe>LkzT}*+b~Pk8=MLus#ypO?2cum1B3!xH{}Zgm3!EbA z$v0#F3Evd&{bzhr+k8c z1;8o!w+Ev(50rWKY541V@K@y&?pj))^VjbC=||0|V?<6tm zGGHgTHnYjU>f@(zvMxcg6>F-<$66h&sZ&6(O(qK;^fSTw>$u5ZJWF#~)L-$J+4u?@unJyvWlrV} z#_#EL{_sA|$WTXB``}mUhjBss+MBU8J|XR(|Erv*yr8A^nD3RZIG>BVrn=61hNzhT zYVf_&W4_5>alV&(%vbak=lf2N`L6rE8ou}RnD4`{INzWi^KJWz^J(4AlZUj$Uva)a z^_XwwSDf$HJ?0zx73ce5kNHx+;(Y(rW4`lW={)%b?WP{{?fZ)JCH9za%~zT)tbN+; zJb8#4D!$782y4ITF`xPs=lfZY`38Q)`5x~v-{r4V4*Kmeo%e?`aF7)g{bs&v<`4MT4zWbEzS8zkM3h(I^^ix$4sXOQ=L7~QMN<3 z=Z0X{>E30MbIQbiTXLzl4r_&I(`?y&46nmWx6x**qN$9wP#G;wev6LNA?>kXBbHyk z6(Ma_Fn7fuN1ahmoZ=jB%vnkwwEkQ2l}7zG_i5_iqnlYst0bmX zjE#4duJNNL5s3`+Tf49_cb!FAL5jQo+Prn*Ogp~_-K-8bs*6LMfnSA3Tl+5|&6JS# z2hx;a-ik>5w$S6k=N9$bVQmBb^h4VFzMhrEVXDtBQ0vT9O4u2bJz;H`@7zM0vhUkA zrBKz+;I@oUR7CCeM(m9#jI7c0mL~^zW`L;Q&Q@=$PE$WUwJET^NNC^rZC-FinyWpgU%s|b+u;wMMbC)x&d33eK!1+2|7{)o{9Oli5TP|p)>E#~Q z7Wfymjxb=J2h0b&I?Of+a|=Cl3fe`DiozKHv#kB6Tl|~^Rdgp6CjuVy_Z>mMwQq&A zQ}9Hh?KgDLx4=*Bbb{+kkG#=z$b$=l+=B(BC>7J7NNqkP+^%@P+_<1bg!TE z4r_k z_Svee%HC>P)pu*$s;sSH+5c~bGoyKGb#H?j{|hx9bT$V1*ct=bxs8Fo!!sONYX$tt zTYeM#ZSHS^?+y=ZikrF&D0z>w(x6hiFQkn&sI38aAGpt=&j>+V@Y1z=YfWW~-9GUaAu%Fm=sNkpc+HqVAkNv2n4 zFDX+}kSV>SOer>GN{z2o@}{;Qp>@lCe1y^tpg^m^aBpmmUe(LrxW%P}yT zDKUxzBWt^5ef;LWRm!Rbm8tKAw6cI%O$Ij`xIG1KqL*l7m$76HVE9|y1W5KOC zsN>cY6u51eZ&OM*pE37*qH;DTSsB?MpO{yHV>LLgmN@nT$EJDd;Fz&Gb9HZtVV0rpV&5quU%8g&=WQ*M{ZjAleqamc#e(sC4Hl?q+)h|qbw?LrLNDR^5@PEc-jXATHXJbA6MQ^AnIiv#rW$UMu*l-wcwi zPJpgIm2~BPsY~1DS)gm;YTIh7r0aLWp;&3%dYI?KhWUIxY3|Z~AC?WEc+|lu37)_)U7dQ(I8rOS*)Q{;j z9w}*j+#@(_K-B<)#{Wx)pO2w%A{4ej;R#TYAe%L^+JNDBEi#;;3L#z z&RP+n0Ov8zB1FOluWRlg{d@$y zLgGdOcye41dVPw8An0X-UP;hv6SfWagZSfK48N_@>nU#+y>g&ek)+qOZuEK#dQm$d z>2=aC=vDPy_9yhoWC(perBAZ57<;F)7kzZHg!~k@wpi5{TgD+Zw+rVQb3E zoQ`}sq^&?7j&s_Yzh=niPVI+un7P^6tML}BIXnH`fA(k)i(4tqcW$=Z9I5LBuhbEn z4H1{?G;E>}^?*TboIxRKso;|$C=`*>4V=`=KuYCT4K^rp;Pa+7vl?^_yth}QuprYd zXwG1NX+iGii3|NB>}EQ)|Gvg_%ZQp&E@kk3gL>R8^<@ajBd9kzm>#p|Dxuz+=!@S$ zz1=>+Ls?JTl!4IC*$&MDHl-N4<&7iW{!Od$9G~YgnlfcC!@HKNlc6S3A8J~l=E3T?LJ9Nu4#Cg-8wj`kQ;n4v7f3$h%>oj;T72b1mUa^nt7t}@PbV=s`bk2y<`Cy=n z&WC)lbpFVt6ucFsa|Lvs5)gELE)YxS6@f4^P$_&DI>$*mAN-C@dH+^=2r-uUZOXeD z$;!^MM8!7Ks$}!o#wh1IkmAon$4z;mo3*fW~&crvS zRVO)a#RKWoz6Qm{IBgA6pjZw3D7a%F6dU9Gl|8v(lJTwJ(#*i2*4ONbjvJub46OE$ z_6oC&kQQm4Jw(sOpyu|Qq#s8w6{vS2C~n}qz3O&@E5Fa30+WwfR4Wusgreo#CZ8fZ zrj4x{Yy9SMeiLviL-)Iyo(O4^JX@b2Pch=}bQ>c}&dv9Q?K#|OXHH+?PAUI@bI+E; z{q09OmHlTJ;%xMpPILQ_PHQ{&ccGR4P+P8IT|OJqj=JNxhol4>&*IzqgsHTl;;mEP z`d07OQ}+6bKYvin-$L3$p$}N0pUM>~<93IquVVrF#R@h1V^?TDe%e^CQnZd08twJC zR-m8HeT`mYKGuP;C>>eK=hHK+%CZ}fr@a4=@k$!!gQZ*zQ?VrEps;$Y+8*%dp!cdg zLaM**PA|tF+LMWF6jb#Gg4zUws*C)Bs>SSo@ym3qG85MUj`9b!8UDnWojMoz&U<`7 zch-ad=O4v$ucs!{`v8v!U%VH%?hh2ViP*j9%E?OX$uOR`ljm8C=S`B&3$vDG#JyO2xVg zD%%S~+CuObxsWV>Tub`fCoY)G4%<`_WIPT;LYKH@RT0h}$)4U1sW^ye0ukm^V z_O^`4&sBcdS(3so_gOAg!0mp|etv7gLrgXxDFTuO1|<9NOG6?%ksm+~9vDO3hcVr4 zQ#SSzeqsNK?8Uo&+G62e59?MN&*#_v7M^dr`xB4#@lQOKrC2fkx;7J zYV+ifHrzkG)zm;<)?d}uo~DdE@lJWg{tG50tj+KVdM6ok?_qA6G4~WX_ho1;aGQ#M zawtXKR0iILrd@GpmEZ(VDwPHEnQDeTV?3D%oFyDv1ayj2K z{*tx>pB*ySTi@YZehM6<|~2Ue%z8d(X>_PxNW}at&Q#y zC8SmR%iB!11EtyQ`#q3yDh_WgGVgfieaV>j1b%dgScJfQ4|?oqvcNkt2Jh#6RT9p8 ze_7ii@42}feCOt(4ZX3rXENh#W5z9V#wiBwalpEeIdxcjM`4}rt7%ml#X5EPs@i^4 zL7pw&l)vX4Q(0JBBB70}(>eCvSYz&ma_$0S?iAq7m+)r9z&qSnr$h8{FpX~q`u}zN^qyt~FyA?)eGoa!Zu8YM-+4StvF;<~e8-LX;+U`Q zALcvd)%j?FoNvRxFgHiV((*~>+h@#Ymh){f=G%_lTJjI`Z8zqdBInBoKDQ@!zDnj> zW6XCN4>+VfZ_GD~`P!3u;=9b4uUN+y_>c-Q`zDY1o-*d!Dd&6Gm`~W%EB;}=S;l;d zjxX>zy3N<&4N^0r({H()Z?rMryV$Db|1jS;W4@F0i#Vs$FyGv6^S#e}1C9A+%lSk^ zO|0)c<_lYsmB)>E_3`hfso8js_(a?(XsISYvqLSohck;&#zr*pu)SlZinbrhh~TE*8}Wj~!EPe3l*rBE~E% z3-c3#Po`kSjdD($m@hR&HJ`>f?=e1A@acAErSS<{t$5n83fc3&16iF(&;QBvC=X~^ z+%qU{)EZwkUfL01?XK(Gt4lA(H(i6(J+3nOZ6r_apCqy%u-T5+ zizNZC?sg5V8cf_VtbOrCiTZ{sk0@AJ)4nKHH)CN4%{>*rr_S9sMsu%phlKZ^?;yfr z4u*(4mOf@vo++W9!ryF)%4;b;%2-~}iL?7+(o#@=t3vB@{AAZ!z%9RCkH%}>`=9O4>NNd8A zxs!D?t5;op?o|p!6g#BN@dUKJhLYtR|-z>zDxRAiW^tW9{KLAt%f zNxw7w?cTpOmb~ zS-@}-jd2wc9SmKA;pcZa|ARC@jzC_cz$`e=Qr^DAbzQ! zL8*NHDfmC~{IkITPl%;K0nc|B&#&kC&R{lv%nF`8o6)rN(x;$YQ1 z^y!IDR-BCRu9ws?_w`j@Zc&PUP?^}AaLN6>v?Hl&($@4@lXf6c?v-@*N(Or+)7UF> z)#3E(=`Ht47JH?S+$+nd`|0B-ZY``%rQOC@AJ`<| zc?#(-)}BfTEi;(%s7*OMg4%1wp-*f|7Wlg3BKApsbPcSb6_OVN+6ugfp!Qm*rmdfZ zusjXg0pWL%i&KG5SXNmEd@liCJKnV5jUwP9#sh~$c;iGMKxfph_;`>T1IdsX@KFiD z%_C3VjBN*8&j3>uL%-WL{?cWstN!&3$OT9${RZR!w;OFBYMDsn={nhqpjH{0+twf0 z6E4*~I33v%4kTQ9jyvoE^kH07lLUSH81Rn*{yGExgX~*md<^{00RL-&Sh=$^fW2+N zUj+QK0!zMWQ`$?(Nt`BEW=d9;FoqlY_S$z-xz)zdw{+A>;=V^4w{7Vfx1hQN-z*u8 z8&@CDo`U{KO^JuF)!2(z*1?*#TyXe09t)A>z^^Wpn~*lZFLYo8{-ba}z5ze-5cpGK z;Lig7if-^v1OCJ4Ex{iL(P(jj12b*PI{epHh%a;~$;u&yRn4DRvg^mvzX)nq151z* z#7VfJ0?P`xbiSxZhE@a;E-V5r6}V#g;(6dYAwA5Xl!rlWKim=Fi$nf^cG(}x7gpeU z&VZ{HxOV%4oFT9P(+uD#`3IO<448@~Oq067)aln@y5{W;QyDNVGhlKe3xeG7*#*;c zz|`CgrhVinOAVMxg!cD~GY<3c{SSRGYU{3A~ZzP_`RP+YFet_kbzL3Y8l$ zEtW8ijDe{Xm}Yf@=^0@1BK^hsohJ@o;6F^h`i$46lnQwM0j4|yrezYQJ%QM@n{B{k z?*Y?BV7e^*#Gp1_!gLnSimaX27tnIL!89D0=p7(oY5=B_zOlem4NM!Zv#%L~PP*LJ zeO7dWA1%y%mPizlV^|i{<^*(UAK=_(}B3gI@5FPc!O8c|E zkTxBNgtVUqL@RvCJ1-4QR8oQE>=!me4`;}@MG&4q3)71!pq-XGuP~r%;k_}wso>%O zxj|jhZI>`SB&jklj=YVWQ;(%b;vqpY{Ycb+@?ucC=3mnGMexwKMBk(A;0oK$;2&PR z5KO;Xh4+Wu%yV?@ru(Zw?ywAKV~uAml+VhS_~!Dgf!&_f*TA9JzrlFcLBAvRS>qzl z(*4bx9BlD_cwRHlyDmLMWT$+dn|1fOwwV%@ZP&;lNWWW;^PszY4(WA6R}tG7GBm8* z-B6Lj-GTNz5&Lme)Jxwsq~-a9Owj#W;wn7lus3j?N>CiKtk) z)m79Oq}*;M%OLKMM<#F%Wl(k8=6L$eYuX<>TUz7R)5pb;YtKx`?3FP5VS9#iztaDS zxX(?dZ{+-3QhoxI7q%T76439LPk*~E_BYG0FqU~;di+Jiy#m?-1IB&qeKaaC{uLOH z5|>@Hz^ZKg+$LhM_pfd#?q+SrVQr@lNyOS#tZ7F?%=?HZed!TT=F%JOX$Q(waZ~xw znnb$#rGQN;HwC8SWo2SNU}4IrQ3_F`RH8=dOF!{sZVh57%x%l4Do|r1MnsXe2DFER zeOg4{rU=yUvjYYiP|uc7bDJYkqf^t-NkbBqC%>>L!K-2)W+T^^%7_(u#~5ZOcK{6?`;6H2o-t59qhAl1 zvWYCXkSVRf{mXU+tCoo!mU|2t=8H&{_lZC1J57)GfdNkv`F~-*i2OfNo+tT;2G0<& z+hw1+#tkNSe?`!)e~;%qYn=haHvFmU?z8%jMwt5`*#GhruWdxRuN ztx7W>dQ3v(a-|q{*c>3*E4^zpr-Z0ULPWpkfJLza(-LZO)=dSj7?^TnU~2NL0H(e% zFf{|y6a%Jl-nqma_m_mU<8B?22yF&SNc>US6amQs1Co8%+Sqs!l2YJ!=D1C{o{ldC z9IsIq_0MQC+q1t1ZS25NXTWjUv!Tu0Q9fdk_t@!j9+p+_8$B)&|Lz*w0LUT$Vs?`x7oEe z4*B#@bZr_uT()5NpQZBYr&ytL$iy6;Kb`0M)9BGT1t0Hn7foW<=9Ihp+W7Hny4L1N zk64>J*2dg}4i8F5y4GeokZh5VATh9WgIc|W#No3jC!IEB7H~WR92NY*Tmjyx2D}al?|9(VEh+YY$WQLE%ex3@ zi^)x1Lvo)+{&b>=@V4Jc2e(*K`vYj*!% za2wbSx0P-`xppBr1KMzj+tH|J8qhur`ZaSm+;YI}B?Gr*;C3*myi@A6DEp}y3SPD- zwH-EPGNXjM(uMvnmg{D{JpuhMayw{*P*bb9LEQFUV_$T$pFSrEoCe%rZ=@?g!rB0x zWrf|Xy5|x@!E+8KvJ(BO`WX`EM(PJH5tnqiBEEGA7(8!aa7g;DPl3VTWLzbHZ{R1w z&{bK$^T~aV1cT$OPB9qF4lc{X_WKmLd3}bl=QEqKoDtcBgQC0_(58p#TJx%(P)(Ba zO0QS7D#TsZ;J*-w3<--9$%*BgZur*t)$qq?13vCjf!hbhP`C*Arp5Ty0phJe(QR3B z&q?%JR|Z)(Skbh>oB7w} z{!cosb%{h^6VydDoX}PLbUB^gda^S`iMP9{Mu@LbhKzW$Sj8gaPE_#=@$XKMk@g=G zpAw0|`Zdw#KL3IpVC{aTasc!^2{ilN@h&_~9aH3ZnpG5)Tw=!wDk zl%a=Mv&qn8S-@`ao8^d2iGwC5F9@C!w!6-EC34NuZ;yxX1Z@(q)X)fYy@+cZ4)2M2 zj*U*P>`eN9`VO8I+6m_RxTwTx_zq!Vq`kt_mu^nCvXi8{O8V(ty7Fc2 zaud4JYv{@VT{--kUwW7UZM1|}WEdj4vfb;)n~c?!`M`^{C3WRW;63WiKZ~t+(xw!^ zKUIw7#P(_!R<#6Q@h2_uXe6`LNCE9#U;d5}6U_~x=2v8pYV3ZZmE6g0O5mJ*yu-95 zVJkb0wVTXNFQ_uDja#>%(ocs4vzp)#(oozHFaV3puN{zfK>w=#h8=K&#O((4V^@wL zZ%2aLa|Uh)*sDm?7~JN8+Xin}U21?{D(HU$w?c5M@n(Zh(?ROs@J~BflQ$Ws87-nt zh!HElM*PFRuJ#>j9X2suVq5xXjXe+BW-=L7@Ks)D6+0aNDx^!T54qb;-{Fr1A`&aI zZb9uW>JC3+-OfeVZ6BPJZs7Ki#O)fZ_g6!gn89tdH+J1Bg#9`V4agnIW%O{-mvBPxG_S!?mabE+D#NW`ji z2FzGGx}7ZIYDi$|j|MbR0T3WZ$o~x}Td<$mIC{F{W$iWePe5d78i!uQp z@j9`NQNS0+2=7W$OYh63)}DAw>JQN836~Pd>>TMVCKqZV7h2k`M`uRL=!}`@jHu&D zMh~VCo#{n%hSm3VwhpaM+H8~i{J*a3r{dR7m85xaoGPrP$9%okC$CeuTfwX*VI>KB z4lU{{X)91|)WcrZv`$f9AHlW*s_Xgx0J&3vt=v%tw&TFo-VNJYunnVS1h#%+*u|dF z_t=ymKDr&huiQgUhw<-uEXVtgJeEfuKbFTnl=)EdYTgKsmHPjY$ClzRcjvL?;B!WL zrvXk32z(B+h7le+j+L#%oFfR3(GO@#z(?@dBJkPoURgw4-S2G57S`|Do#eWyv8rTX ztKM^{r3!E0USrn?skWW!>W7_oH{4_R`obF9jra55u>|$5iFZkREU1kE&U>5*&Bdy^ z`JOF9)Ls|r>13^Jpeg`6aI~ydxck`6s@?9>mT{^c8Q}SmTE=UwN4cp^?eobPb;CEP zM{;xaU9D1m*ewrt5=YZo_{KEGJT^`(=i8wF?F#R)#(>J1fP-@y1NYPxR#BU{g6LLf zN}YaAAX}VUdX^{ntAxe-le}s(_TLHYKj@zz?Z3`nN9EBuIAE+n&xOL`r%QAfJ%>Zj z>F!iy*Kh4P*uzgEfkb@%7}Pw2WuEI+-l_h?qSO&HS@zdNWg|S&%n0He=ew{0&x^gQ z+kd(~)Az2`(}Xs&Y)*vBFKPDA=W>F|ver*7Izat@Ra@NVepLw#sqZbQRM-f$k*<9XaTq&yN>DDfFtsM>=?Yw1T{}?&$ z)8-K85PM9yvB;US8L0)ocks&_juA@`@m}HwDZ~@vc%MZqG!j>^@Htz=MoojNj(Dtm z%8~I>+ueS8qYc2b%{4R{nKi?+JsP!$4!C`sJ`(HU*JenJ6)@JdwO_M>@z_w8mFm~3 z!FWG9?>OtR7j4*yZ@%-CMY;G6F_l#ON5&j#yUv`mDcc!yW!>ipnS1bSUVOn}&SESf zMe@!;cHSVT$)ODP;eT0JkA---`{_vG6WQTW32LuT2IB`4eT5HfA>Nn9aXP}MRrz*u zqi`|(CFZ`JKy^q~)a!c~jGCn{?5C%p@cJBXy*dQ>9Q2|2V^>hrBuw`!Lq7taY2dR0 zeBS)XqPz)jr2zlgzuAtqEv?o$iPxnB`7fCoGV`;Bp8lqqv0*us9OdUeYdo(Uo@w2iBm{z-Jk<5c^N; z8)L_mfze~Zt{vmY|CAQE+%aCXWl=EyPV_K#$4nbGAo!$5y;JM^ST7|R~)btW8(?0g#_&JjyPa{`3p-lF5%}4iw}QZ=Pr@0^lQfj zj~TF^3izmQid~IdU?&47VPDS7hXN~!ZEsAXR**WTX^elTyU?`3p1RAY?F)3*U5BLZ zLa&A*y6eNJx7}qQjo=(+tnTttSK~AC|5%vpdB4p6>$=O&=_4Qc|1SH8wMN2<{J+p$ zMc^_k@Zlih(tFXztV8*2uteO$)8{Ht+BjOto6AU)BDh)#oyWEFAFCxSn@P&Mu zU3w3G^8XP`<`NYZSql-(%9U0uxe6-i1txztEIr~L@JybuDzhXe7FH#KNh+9(?}o_~ zFgb``5tzJ-2EUHqeHi__|9zWM$J%USc&Vk@$B4+g(GrjJ+xy}FlDIJzgm;-y6Tt7Lm^D22dHIgMj(+fC zF!-ObTmFB*PS^mk*v$jG1JVQbYc`49Hn=u|-EN-`Up{sPJCUbP8Q9GMyG_I+#$wYv zpFobf-J*;Gw^T;@t`#lmtYKGd+>iIVV8=)*veG1f&Ih-orc^xjdgd7w$azbg{1ld& zIQ=;UuQ76VSwHm|IZ2(uNzk}wbt|uz{Jl6yP2S*HpSB{D9<_&ve1qFV25t>V3u2-% zxK)AM{BF1{6S~5{?Hcx78Mx(qU{OYI<;;~C8BZ*4G$UQcDRrBu%kT1Y0$>GxmmXWb zq**?nj!op(Dh!#pmpXRgGy1ei(aH)vD&*I;!9|JzsVX7mc48x1=hr&CK5mGMmF1;C zx&=*j3Le`8q-VWl$1Td7Z&;O`)ITj@lo)z7P1Zi?^;_uGVV0Lo!xCS%Pyr?M>JW99 ztbR(A^~S=AZl`80qFF!b6q+>=?n|Ls4jab5plyJxMVny_XjWgsB{S7>{MECF`_m?0 z6`EC#>Z1X`$RRymyb^XiF@7+L@YOOf+7sjP5@p6WtpFpjYbJnEt(TnRtu|!}HV=1P zD(iM*FA%TW&4}0x{|UFEZn)L?PsUgc-|5_7;MPD*4w*Hw{CXvz5>K4Pn?T&$r@i79 znDy<3*-7|jxPjSRiCK0GW&^=&Qa8+IBIks?d)g&zaH4-+e;M^Soc$bml^EA9VlUuy z`jqY`kH=45ZTQI+{NzmOCs!GM@+4}8E!08AE9tM_QY|VWsS~GWI9?G|5h!G%Bp&IibAvotI*G_Voh!Na&7bC z@Ls?_9{dM_zc`Z}iQ5FU`-p8^az>&yXaK0OQN|3=K-7_cm+2>p<w+p+`uMGNaL;oCuep`rwpU2NUeAJ?x+mxtW z#6Q|gZPIq$*B-DbZxZKwi4m4{Lyu2wSm72uapKi<`$x?15b}+zTK;!f!$WwsX4Y`U5sUI# z5xF8_m)o|pZVYjkO)jryhV?HtMcgm7LLu(|q0m7+eRB3+j8ER79sE-F$s4F67Eu-5 zC(o8u1`)k8&*Rk&7#^xk;?e>S>QWjB9rDs)piA!%dtt{u1>{1O%|W|v3hnu$MNx_M zjTbO6BJpoMZz6nhvI1^r>tI7KUeu<4;gGR{qjc#d?n%{ws1g2irz26;Be33HA}Nb% zI<({~qDCZQYvuv#QxaC6wi5}$o{xcb7O<{}iEiLA04sL~NLaIgbxJ57Sf@_4Dr$>O zS@CC^GMf>Bcc`>Ab$C_x$<_h`-VW{IFE&Vc9a2u7axXCS#0)FaE2Q(bmt4o9nhV(zeD8G;gj7ktOLW#Xta-6lZ)uOQLM@6o5?4@^EFp3N;UDlfsBZ6c1M(t zu-S;n-0#e8zHMTEM_;LfbBJj4$bNXIC%UM*r#Z9dS3QMe5nl zO^^SM^SVB|`phMQ<=4V~pZQ=C;PQ}xO9K@ONa0Yi`ZwyO zQvP63CN}L8pwFID43Y+)6gbBol%u3;|JL8#1912AGt&%IS-Av1rEF>y-nbiP@gBx z_BYoXpJCL<|H8M=K4?)kZY3`YozFA2IBkmfd-u4LUb90)IM9hruh$WDXm4HHPv*|6 z4GifqCod6;gmtFD%FiFnqhpthJ56CF9}9}OlbK9vhxW*|uDBCX5+K}SKp1B4`#6ix zB_m$(d+CH0t254EQ%plc3Lcq)wX@uR79CbX9hDWhPyb2iGChZ-_m8s5nh&4$d_cs? z^t^6|_RVW8i^DH7&5hY6TX*F8ev6=IBXZsEWaXn<`y!i5uRBAc4wvc$nzz3!n*jIYwQ7wQZ)cyW3x@E zW(a#?54LsopDfD!qf##4sap{uGIj%4SmF6Os)^J5KJ8t9)ef{`H%tz4gP?WC>-M2w zVS3f4O(kA22Quw zKaAp23_i66KKqd_K6(28tgGd^MR_id{8STl8w`Pu8$MF8e>WZJS`$&TRvfb?GyOSW z(tS;4fr+!{ny~+fVigZM_I7KowCB$y5oh(8FXeG+2?^zSL9RtsRUc#j9+X+^Thvco z88KF`-<4+U=6B(&ECUm9S{W~2S|rLluf1zgcJ{}n`!zb9F%mqMVFL=BP80i!@K;Y< zI?2iR#N`@Uw3FbnBM%O<#0xI-{n+w7u)*iacDx(X&Q$UO`8(V=esad~~QKxoDCXn?Us2Tv(Z?dbu~-wl-Pz!}Jo*PoaBT z0@eT$Ab2ai3rDER3eiK)tIff32*0gYCm=^(*k>zzWby!Mhs$^0W{W)E|A$e>exk6cIsbQ1XxJrg2w zYD$_~o9P}?jQq4fsn4{|FQ>HTt0Ir+#p)Ju zFFV!|_p!^!MkI&j*Jk3~pFP%Lc&9#ex>wsNy;CeFDk1U5FAb4cFXvRe`So5G z@~H|?+Lc0EeRBN^SVkk%yY-XtGUAbp{ltFcWc-89a-$}oL%R=2a<4PlA*zdSgC6Kx zcDd*S>ZB6VN-vH&1?d{tVgF@Cf|~xxF!j6Kak9?z^>=RR_nG~qJ?9&yulclvzTyYW zr914eR)juQhTPp*T%vbjzpX8DQ$t+)P&^jv7^{PRZEL;S8mw$`YTh~{f+9SFT957- zc(jEf;TgQssdwLr@Y8NM#%fS@vZU-qxGG|YUd0YC?nYUWJ>8DZ`q<^e|3^;YGY%X^ z-=}>RrCzlvV{5I-wL{3nU&3R&u3_BWZQs2r_g!E1U4Qr~Z}k9!pKgbr`b&NqAojvTAe5N-CDjWb>=$QcU$t-`?ZDe`egpqNV2I`)5uA5 zUU^*PP#0E`RVA}(5t&uKF`L9b+vg|e%J~{uzs+kVe~IHYOZ?(-eJ?iqMYdnxhnxI^ zV)kJ!bkooOLl1ltBD~NovJYoMx8*TC{@_=~I+0WD*z->wq}LKdk0X^$ESscveo=-lz<5mh6k_VjDRzR2HEqL%l=nn+N>@k)ZPF5%nQMa4v? zMjjh-|F(X@S`vT#VdeMw{RO*%409+^+7%w|a_QgAN)ncJPF0RE_Z69qH#x?rkte_F zr2fET$vEgrH?(ty+e4;O_=0c^Qp=~6uoF+YM&sK(l*L}!U{fA~FDj}1FXFw>%`4%X z@b9T{B2H;lM^ZEQf;^uqs<=({!apq{J8fz#lJeL@6xD;rZN{r(%mw3`S3V0Qav%dmjxJ;g`)J{^7mTs@TCr;Io`qVGYBoPMA0sTm1G(MqN7V8@Dg%AU)Yi3cBF?5mQt>=?Kreh1zPJ1)zB>tP9( z0$h4GG2}mRjg86Cd$lUy68)~y-zK7p?!6qCo}u&I>33~P3&X0G!gHcd{)Lvv>47-- z&Z~L3Q!%dbMuYD*!gt|-^)0hHeBy9fi{;n8^LbNiF>x(%a+7YN*F@c~0uOF;i>gNC zYD}HHm+mbIM!q|*N1gmyKv2OM9MezuqTA0^*Ql~1JZ7u^`}a8jWw1AxUq zEN&7w{t4Pwf#agUwky=Qyv~`fx2SPpuT3boDhD}}()n9@@iDCG4aS+j#Vuy)Qxjx; zY63XV4LY%Lbl>(f?LH!l`FM;862FnxYDA4H_)ReIyJpnO6RmSc>az8?keB?bhwPv( z3xq`U%F{G@y*zsjENWsRA6{9L;6?MC0*mwLq=EpoE%;W`sBM{fhgBI3+!jVyuGLFw z8PPBLThiA;R6h`J>c>>#UMbCKYfMx>#?_}0`SP%v z^LG>**3M_zhu7S#Mb*ihQ^58!?N9i}Y3P~)+415t?GLPOk|P=GHg&y^UTXRZ*G4H| zb4NOx>bgV3y*%hLu-e)%`FdGVN@RttctU2qqm{G@A z&bhRb{p=@(E?bsR7ejAT*3VL($oh%9Y{xp&^DT`Y?wk~zB#A)`D4+^SROBhEsn1I= zA%8f3fuu>#PkhCLb`U(C!0(3BW03rfMOni+4vT4xSF;D4)XNPfujAo%H7_x&lT^$V zL@VaV%5YL*?%RW^1{sp(@6ww-M#mt%Yp!2A%{P8BT%xago*k*m+Uxm$`~_qwu@mU> zu|bz2=rS#)W1*LLDD_t8k;8DtNi5_n|5f~<3Tgp!8E@h%O@+4?GRhhA7&-4y6R=N@ zlZu`Fl6qH^TSTmMWRF;BFFNDu95XH&DeZ*?Cr&e`!7Vn)E!JTEj$RF;p_Sfi(!-gH z1htbBkf{x$V!l0%jqGmygdhwyZ^7q(*JDw<4x4g>ttR}y=_yrG5(o8_HzFJWK2&NZri{Mw(e=q@=U+L-7%YhPsGCeAB+ zwRCWcXyY7kQ)5m%c*#-vG$-<3Xyf;>RIT8)2K)0=6Z;0aG0%l9&$zzVrW|LC#ts$y zAiSfSrFZnRXwNXwrCje{GCbNbOh^n}UU;~B;3QE$y;}M+ZU5B`NQQ{4XuE1RWP?ZB zYehk)y+o!wQ7(X&xPb(vA!; z_`Y27eG(ahJosMtEd!0*x99;hh#o-M&6fI{=0v3CA|e5|5J%|0IZsN;aaU`03~0F7 z*a1Jol9@r3zmPA#;0$9XnS`wV{pnqsJ!0Ux#z4+(gVuXEL6!j4r+w{hg_8#~i2jx7 z7Hl`Kg&wXRJPxwLx4ckupk_$TV3}>mtICr*!l&)Rqwa9tpdNGeGvvu0_`+sT6Q2RD z5%~o@O1RKnxf1U(y8OJ}!3zbe8Yv=jglEL;dG}^2`Jk|Gj6HgJ`K6E{UnmC(8idZ=z>TgZFMC?icY}SK^Bdr^aeGIIC}2l;eAB$|lCYw%gnBqkr``TOzwBZaq7N@-JU{ygNTQ^={MHhZbx#MWMQGB+2}qh}wwr4%YQa4(0Rs@P38e>`&$%fV6d{ zmEs(5ylPvK;&9W8LQr=*-XBo|YOS$_-N~=m8h?aM}dwEbM?yYGUl5c+*#o|mR~hd9Y;4r4>dsOZFF|zP5__wZB7w)IJZUh z^E{8+r~vS4q5^;jFWjlusCzhj3uadr~&H9p# zt<*pJ*aNy3F&~a-uI{$J@YXhV<$2N{|~~&iF*ESPzv)ZsKuW8Eg+V175oB z30=i51fL1ua}m3>T)@qkL)>x#BjWG9&+kuCPyWySy~A#|mS;SDmVCOEr|bS6`=6&z zh^eRW;5&J#kQKkcS47KR58C5(f6u?urtD{kv*L556|gvZHZQ*3%z0=9-A4BXVodRk z!Y|iPM7y;PI)xAC7K74>rPA;7Y2UpTZZ*FmEJ5MBZ+zNKRdfDZ3F@ecHyOW}#VJJPe6Vn&9Ro9&^&a3e4hn%Ca$a6H@NPPW6SARp_ zl|z%2(g*fvC)vALfbiL$r3DD52i@AvU`ABmdFV*smHn;`up(v9pI`>Wo%-c++kCi8G5b3w5UBH?B|MZ_CzZ7MEcT!h9yxQwV($!55ngf zge{SZEs-v5Xi+V6zq6uxz~+JM_>0sVi{0J7`HqQ$9C>obd#GJkxD_~S>i~zciky9N z_Es|V{Y_*d;#TF*ML%<^*!LbH5*~4;ulN?S5}C>PCmt+~4wpX_~y7u_9S}m3PLeu@qp4l)aYT3DwR3ZX7 z9LuiH`eL;=_8e`@S7ohdm!jQ-Uqgl8u^T358aADm3I<`*xwY4PT{c}`DMKT%D4+I6 z@=hJj_vksRdy(Wy`NoIe^GAd01sA)iJ`y&wN1KU!@yL1_y+eYq&fT1CiskxAc&qQC z`wl^KVJmRg+dFC+vH*ThUv5)6UnJkpn0nZvG@rz0WIXi+_x%vd+sz2?N>Lx;)(5E0 ztq+OZ`Y@SWA51A}N7G-M0JgGJo=kgq3*`m$+t z@7Mdh9Je~__4w6&UN^6HYv-ZQWcXjNwz){mNYq$nG~Qvzo1a0cy=1pV9m_U*A4e~4 zWw{m0F%`>EM1{(C^j;Sy>s6JNS!lq_EkEYD25hat$_zw0R#@AIR1Gm~>qMx2l+%XAs_4}j@#%fq=U8`pj$I@=sCTPC zu0n4;|L-9lTFjb^4@UUE((fkw-xVDc+Lsy(k1|y7eP zlpkhzKY@xM!4rb7lcOxDsF2hR-sUyl~l4 z_;m^P8fU?_{SjOC&(z^i3!O>L*m>fB|6}A|rb!EJ6&4z&frs87zA-gzb-H1pJt8f% zfrYh|mii-|S?#vMjMa(tR{WfV!X%;~3KHBz(t}14U0CXoQ$JTLX z++QRjSjc}k{J8Wu;Xt3TFZcQ*+<(xE#o&$QehVwddDkK-{n1%Fy(!7mBd}Ia4!0^t ze_~Uzd4CG|mSx zp9imwChCwA6FC>Xf^v4SXe6lVsjz!&c(?46E#^`jeJHlk`pp3%PGdz&43{0pgny50MLiThEJA^8W&^ zlkmD;|L>9Bim<@D;2H>Ale)n*6Fl|$e{xpD%2T{kfUEwB4i`68D5tjwxNb*Yyd>oX zbqvUhl=@!8eib?cnd&X@F**VNqm)R79@V+1{h6D?ZZ@Ji7qz2TN{IJGB6~u5gvEW2 z+#|z1jzl#q{Y5;O9D~$+^L;0YW%P>by#g7_aFf02k`bxMh)7%%FJF4{!_~hSkqqLV zP-2txY02TR*RiIt8X}zKz!E>`>560^@1ZOm=&>uHH#en#mr zd<`wI;abJic9l=-eo z?;B|+w3KHLGOAtE*K8Uv6-&5ol6*euKAo4ShL=`(5$iO_Ib3GCW?0D;;4zDnGA?dX zB)$!o;XC^_)QG5>z3rFwk6ZJ(bvFOeT>IZ-y;r^}_JNs`xBc(c`?7nqSHK{Qrz~{W zSdXwOPI_B%V# zVO8KX_s2$VlQ^WPG2evW_7LxN-($r;;mI_3(hN^pq?L@vSbVE-qK$|UKg-qio7D$CipY8QAFFw zvblw2=HjNKE)GqX9ID6KyxNztPu+0!M3h70pxQJUUqJdG8_4k>Ng^Ct1(oKzV>xsQ za^t%6Zdrf4!Lja#p+$MKMcH$Vz8lXY-xw>YH*=ev$!H$BHV&^vrr;M_E^ZpN$4(18i|Bt$zu&afYWoZTBe1@p`WG^ z7b@q~&geK!ti1^x#yXmLKZ@b_l}&k)5$StAUD_T--}Be?E+u*Gy)NX)x};tg@_043 zzkm%eLLG!HGH8;xcX%PVP4d*pF6dZvK=Q7;tgT;+Ch0mOhh}xVG>d`RB#BuV9@V9i zo9cfT_X~ENdUk8$n6Cw0a~jN=(T;9v(O1NY{2DgYXS!_Kw-Nqi{BxO!y%^GEriVP0 zhhF0UTF=b}y4})mcXNY>p!;hv&}{*_lQEU@Zt~AW$PDOa0G%Va2;Z>zq7GL+Fm?P= zz%_}u^sib5vsVh(D}(C`>W9=1VXwI9|DhC5civr{zj-iw;ez)6h#gLG=DmLB!~(~F zt;0FHG?3p358cg;O-ga4Gl$yxA&!1>pX6>+))2km`?1((X-ZtPh~-(gT2>8OpO0_8 z8VlJ?uR18pZ*HPiNz^8#($m34eUd`=$hf<2s1p4k7Erfj71c<0GoQClW9H`6_sB)6 z1(IK9dPl3bsY<I1ZH?D z@8n}g?hGd>XBHA;1Ad`@3c>04FN9CKT)j>Dw71>=TxC+T<M5FG{&#aY!oZbO(~Y%4ik3&E1vvWy&W#?`B2R)H+?n5 zX)57$3N78&(9&*d>0R>le=)LZ7PqaUg+qFO3^YFUn9z+LQ=!Kp?#2Cxl|F!`qBi)Q zYuA&M8!uk}ayT^jdG42AzK`$kr}pSwh8|mairC85mb8ma{XX(2OMg(AOoZ#v?M}A$oi$`XignSq_=A zVbH1=THVlWtZJ@e_N z*51Y4roE%FOQZ!d^^8X;I>m~f0@`f9IG6Ek*0y{8TD$R%b6+GWanyK^9AH)6#}7Wl zIEVjhe~)|??{kT3hN(N!*Lf1_{Hiz!Y^BzeHGWb=(UN|5IsAQnt<#sLT{KG`$Yz~I zhg|DW(dRs8O)jgPMjUNCt8CqxEwdQ?SLZb6QB9KDJYY>69V6oLkIkGGP9S?~VT~{1 zBi}^zVxP^~at%MdQodu9(M#fjcJhjqllyx1?1FaeinCSJH5SXd#tYheS5~y_%!}-9#Q; zbQg%#>~CV}&6TsN;eV{@jY$p9pK zt@At8y*Fpc&wcnj&)K)}ThVBki~9m}tL|5I+w5uYe_Oq~)B3Hn-z!7vkx+fMh^U*D zTk10$z1Je?!C||Mo|T8!)b(j>dcuVc6j6g8s}n!@ulq~YA6AZR@At&gem@VGWCnAXK3&tbfb zFyurd3`af~j9=lVGTx-V=W@|u@K$2?qOTP)37XGG@Anfc_p?trr?2>^Ge-f>5U(Z5 zi09SfPXv$NNpCK+N8qE*yqewC1fmiyt&o0R^IMg|U4D~I^j>w*{XdecB9r9aM3>QpjVF0Co}TUTGBs?)2b65D8VPNfg6h-|pEOm?7t z|DPNGUvSVAERZHN(0Vk`bo_`MZ2W1|A1o(sTAFKBirH(6|JSBmT;Ty~`#6hX z!IE3JUhv33YR#T+88AuQ>L~2F3EXaIa-1hdg9hv2V~YM(5m?@bU-j<)ZeS9yh<+Lo zYcA94!uROPJh`>0Nc=-+(95fF<*@!#STw0|&o?eCA;|$EPnxE;JbW4jf+b{OGUOzYP9?c#u0u zv3*bXD~E%JB5@g3Jq+rS$|Z`sX_kmUZPWg@URgRgwy(evCl9Kph)<>D)gh<$lm zyhA{K0Jy^1wX5P(T7>$kz+EFfd=Zln)US?u_|O{2>tZ~7H$6vKAA|Z9Ag>@6@tgMR zUlsy;>V(9AABZN8SI3 zK6VE#-aVwY@|T=7c&{^Mt*CdnwY5Y|BcnH&T*PzUlWi$$lh-RFl0H#}KF~Q-F^yqA z29wvPtZl5g{BcIBm9@@>vf_5@Z&3}l{t3rDCwST)UE;=hX#v8++oU&*1&B-#5jdVy z_<<{Btyy)t%<55gUT>%Il){wt4|WzRrc#%-j^{M+loYvs=b-&Ew+~6PJK8}H+lif8 zPJ|+5?N+#HeDGt}g3C$Dl!F%Ku2ElpIgPkb6=O8}b~tu$Ds^DnUl#l0ae4pi)|O%H z*J3=1VfDrJH(?)FP!6DOH4Qghj@&3?k43tVPBmV=bi)IzbuB*eo~O&CQM$dBXnxp67Y* zy>C*n^Pi=k&(kCq4DUJbdCq=*htI#=Evo4EYTVZ0+cQIv#YsGAsNiotd|L7|3j#zJ z%r$tZ<+-Ri%8WyjL>1qT_Dbv_F~?NA4|Hj3pIth}D2(yu=ubws&hXyrrMG_bB>nTS z)}3u8;>Is>MArjC(e=)@bVoElFlYrHGc`ns=S7aBGcroYF^x><8DA&Z&)1~>PWJ}< zsBxhEz85z6=>U+(rBOD}^S*a;vrP5`&D`N!A_k*^m2i`)A4q{?{lSY-m273+|*CH=0nNZ z1+wXfb-m)|&7Mv^Sz{^?8CkJC|LIyKn2e0>9%XOjhW6??YDb1UB0xIB_o zGu;Geh~@+vAJ}y5;Qa+e?;8%R(j3Tu1IGVrR9SG~K}Uq%Nc0(?f+gJq*a~b@YR5rn zd-{2>pZL0hlMa37^2r;q|HTD9l9?ZkJ0LrDK>vQ#Dv)e>1hu!wfa~z!+0k}4bKk4w zWzk@_L_QU}E$#-pcAn9-gbjfWZ=`g|IAs3@f8nX{z8(JFs%Cpm8lMXq-`4SIBhiYy z<%8U9nUhY(E=Jb5t@K_lokP#zx#fSxKV7T+)5K3Z?Knhy)FQ7U3!Czx{VC2;~|5YH7Q8V=K$|EFM_gdw)*Z{8>Z1DcGJyvO?0!qrI|U zrk6bC#fKrIGuIcj|L)G{QRBT*rxcI&z0P^QBEFXh^JZ_~;sM+J#P0NHAGg0b z=o3+|ZtYt8snE5bfI1df&y4u!nPYV2lPn*e=kC*S;G`OlI%{T@{U<- zSR;OqR4>EM?k7$azq@3c{O^-HANs^?2uj#8uu#W-+0=_-_p>=wqu_HV!3i)kBUTi5mS}w{7W0Fa zXzI$5TVhJ0w#4G=Y~}65QPtnozI%a4aWIF`rf$0`{obaR8=cqgS9vzwThLUEZ2at+ z!yqCFw@2-Kj6drik#pwO znYY?+_1Q~Vhppk!yJUiKK*Xdo<7ZB=|77*0<7HN6%!^O)CutGm&y1Zp*8Y?4UILM9 z1T^q3jyoOQ5;H8CXysf#JRwl5&;2ID>lZAitlwb2hz44tSN>~b7N3>y*}u}GMg-j& zv+|wBY(Cq}XYZVrcz9)N?8@Ia=J45iKKuP?iOQF@#;v?F_76-&8+n@s-#60?E zer!~$$%MUwjyZ^Z*mw*%;Vk96%$du1jx&X`l5^&H>Awo&tql&%b~h3+hB4R9sk|$K zxh9@J5f8Ht&djU$>PD|OGFRhQGwtV*Z77n13;ODfbRYS_9uH?;d0O(BpQ8E2S8vCc z9NBte9X&^tZtH>9&75;C&zSZF&zaukIXg8rTmY2qM)s`U_&HzVInV0|xlv8g&*?*I zO-#{9Kfj+0gY!AFc+Oq+bC&R&AGHkPIkS1reZMl#x#Kx%xg6Q-BC~5FbezNV8na#z z%zBZtYYq3}oudan+>6J|5a{~Si${z|HgGOa-rr$qq+8xhlQb$XPhM<4c~uMhUYIY; z@&=!LBTs(B(Hq}L{I7j*&EaW0`6^=j#lJ%~D%{re9xiLjo96jze@?#($MpLDtaMaf z93BE4&OCoyfxAE|&C%G;Mykc%IDObmygf^AM_P+F0av*XncYJvBfT8@d~VU zE7fQxJv|)sR3Q`DHa*g@+^F|y2I(0BJ@c zkE8U#1fT!yW@5$WaITiy!}G7QpMM3?L^peMN%JVFPC`#JRkG*xga;hb7}DeR+}$)!epk2AVa zmFRdY$xvnDe=@jeWin3Ok|0&{plW>=s)~uZ6KtwHP~~kNeH1+dO=-}y^qb>zv2DtMMKtwwx=A+I2IYhUuvEt0`c>;cL&z)E$)p^~UwZWqJy- z2Xez*Io7iIjSR9( zItg8}8*6r&xM~Og;@PHY+?hxJ(`~I^LH=tU_~VC-%q`{XZhDuNbMWQv+T@*-SLoeb zTHRT`M*1P5KU=fV8WBCwli8V@c&bNH&!S+37aJjL=I$cmO*^y0YRI{N#_VZRi!JAYPI;qhw#AjOnNAjA#=3R2+`K#!iI>eFO z#3bq?1*MbnOwZqs7g-}#=O(_NP?S^@?Dvbx96P&rVZuRr*>%^S)JT%Jk-fN)$z(fw zlkN1AO%iR{6C@3ZrS^6nfqv8VcTW9d=NfP$dln~x8QH5O84vgnPdXRM2RI~Ja3X{6 z&I{$mI#WQ#C?M}{U2{%|@=Zh>M;%l8Xn*eF44d+4P`)Zq(cre;{>W_|#J)bgi9Y@^ zb2saM>amV+BnH{!oeuRoOYekw7afo5p!o&OGU=`|!E*5*>4=F&Oj+(@{eDy5%p zobco<9spb_#r}(g@5jCdh_9>a-PY0na9caD?~nd0)~YBqd^)zCJI`@>hiN-M)VA|X z><;Rxf!ZV9ZKfp6H8~oPPU*hMTaa;4FOkC;VIjepm&pMK4{aNYFdG^#2 zHpFGybdd?crZtmG9qMnd3kqC}j{U)K(uvrYfu&v6blVEvgMtD2CzpZ`oSwj zW1x%hnasWPtEtei#4$w=PoUvEc1377hs7BU4JqU^E_}$n+|O5H9J!ZXXz0(${?*S* zLu{W!Z0`nD$tvi9ltpV>m>ejDf*?H|*H)IfUy&R|9Ejc6bKb+t-FgRl zI^IJeGz4_i?w1a9X!roT7Np_h_6DLL=X(&1xW%U7IcRv7-hr9efh=e!e$_nxTJmcT za{gLw!cVj)&u(%P9qI+1-SqX5nh%IWs(vR5`JLpePAM*G>zx%G`|x#QVa^X&T^ z8~^<381IuFT+RP%p8av2eGNK&(FbmN=ghO8pogi16MS~TMjVzG3GW5dDo*p>&-TB) zcuxl3xP#dw(~l?^$9~Xyh}!c*Vh<*6U#sn24u zMQYAq%aYp5;q(5}ZfltR%t<_N{aTUxam2qHOS8Mly`QG#LvAwmU__xGOo$C-vD@S9T%@19NPe}hW6c{kIdqQ)k0Usj7*KytR^YSpdfVF| zQv!9zb!3cAf;vANK7(Bf==-(?=Agr!QbZ->_m)82hfuc&>RuuauE3|gR_?Z5yUu*) zZu4H|A0W1S5&H|Bv798^r;?o~Lu|ycDBg}Ai3soU>QF76W{;O2Y-~ShbzR?!V7nP< zg0DRJqxO^UMbr2-zHsm+Jjj!;b0F^fK_qCTSJ1o{7f+tsHti6xI?sLbn$evrpCOLt zxb#k}t);i*^~9%5&^~QX@TwB=X@eG&+sB-BmYr$rP(7}S4;1}MFYGAz@13Qgydwxy zqvDGbsBC*~Pb^8=&W+_?KSY%3)Kk4f-Yu~>GIc>msc0olnU6OUq%6?dpi&*V3BMW% zWxMedMdEf~fzQ&nz8PJ+=-=H|`9JfO??q4TT0?z<<6;6R`&TcNj>(IMzMh)C_<}@g zpcXVnVEK->r5zq&_wjY8omWHyM{4|bwsV0FF1KD{C_48>^`Q3ay1geJo>)xy+KsYa z4$3A&*#?`k_qo}ArhLMbt%kCF4y*w`{mPBZ3L47B^1GL`9zTd*^uF6VzlRu+?}e^+ ze#9&+M?7{Di}VP^B0ZRw?g?FFY}=Bc$>fPU)PyTCCmpFL`apljnmcI{io|PeU zpj*5fjzs)qrW#a+P0=Dvk;g$%G89ekLeX3(qE?{a)d_xgB=2hfkI9+6=e9DS>2>an z(Ddok9_s{0a6cp-na~ZJ#Sdn?3)~Os`;`f>kH$eb~x<@`JJ# zoCqLi)Y8B#f4F+jzfb1yekgty>CA)T4J}9TSw22RPsG>2jr4BAZP1N#qc^IO8685RlJYOw|%KwA~Mw|`44GI{3T+?F-Z z)Iir3(m(t%IZ$Znq}M4(Pgi_DKNFjKK`U_55)Unlp#^KeZ!Y;%{#Rz6Vw(v`qY4L(7o@FeIU6 zK6H%b7+-t}zWCPC+wjF>@WnTF@x`TUMf`Ekvx!AP54~5>WF1wG)lubGqRPYrf&whF zND&1de580h@doyboWtKXTO&Hu*DpzAlBD%(hnjmy@;CY;SM&B}+Z@?UR6-O+eKqKHPlF?mcj3s>=$rF8u9SK@Z@>8gV(O9ax~)a%)cPNh z3;Bhy8#ABsSm~VqTYjW>;m3y;o%}d;u{%FJ@M8^?b7!R1l^@IC$J;teg>#x82Saf- z?;n4^-VtX@PC_cya?}0PyCh>ggBZL2AKcc1=&dtPgURu-;YZFYj}_1PzvV|hxtOqg z+}nj8xqrcrwa9d5^B3_WfCNmj`LTw0j3~^(kFoIMp)UMb3P0$O)BNzESJRuu6R&$t zx-Bm{?42Ldi}X*1A6ZX&tSc*n_UR|&%B066oO>1ipVJ6UvYom^H4udl55;IzMB%?~ zw|czTmOemounkPPpmT}B2RgL4#A>G6fd>dBAL;l~o=P(L$3gE1`VS}Z!Y>5Ev)z8B zWd%wqc(bda6Z*m4@1v*j;Z5(UaUz}*tFXbYp&{C zOsqK)^fNzGl4QrA;PazRKa&6r#0JL390gO~&kTN;g2hl!7;r{8d*O(n^9Tj~`P~l& z%JD;UPPnc1^X7SX(EGEDBiJf}02VOLK)OXhmfae zo5I=XUF>-%7Ulim+YWQ&0Q@Qk3S~o&P`D2Z>?ao8ODw(ichumpIdh((uW+}a^(cA2 z=Qy8VJE4je2-i;9)p%W#;GN)=F6x#62-i-Cp_=N64dk6P$ox4^EE6?Gsv5`u)OX1M zG_a9SdzC7tQm};n|09&~tY^=bfC_SLagC*tyi_7FwCn6FYP7J9m95+0??GAb#Z>myGH% zGOD+QGOCF>qiUwlC7;>=mP9z8+ODduh4ZQH>W{?S!F;M;?IyFbDey)4RN-Mg{XT~J zln$xk9StGhV*%RhLGus$G!4uv`!(K`==tSP4-ypd_GoDL?x2@rWvsPum!bcD`lW_( zg0c5-V!Ppa(Zt?@p&qGY?*_WXC1+sjG41Lf?3{tAz#!>Z+0i%58E~t4`epXhzs?Q! ztNEemoA{5X-|UFK@fx`Og3v4XB$cOsfSkdym#Brk?Y8pHndjc}eUG(>b4LghMB*OB4PeXRk9TZ-W3WSugTSOwI}ENCnH!?@ z^URmm+w-{S6{Izwqh85MSQ!m^`=6-}G|*{Z54vDjw__{d2Q#=-2gt!;@!ut0FCpfA zpE;QM|B5fu5oay>vhnYB@P3bR);>W#h;!~&&w1~gqY;{hnLP8>MTzhC$?u!rFW+RD z<1E41Ne8J{=J)SGst1BkJShJry6&y~zVuf0qe>uM#{=o`5PYV3W_f&NQ)j2V(g$1D zuKpQp!rNw>y>9JlH+xTSot^edKmBLhF6WdDD9$OH0Jf_9-4`yq-WZGy6-?S&bPsa7 z`l;s7O?)zX_8|SYp5~L5^7QQ~#i``22W?L)iJ(I-l3fe+WF_%?+n2pTN0!Wh==l$j zXk`W@LiSbT12(;9M>=FakgoAIuh~EcuhY?cztNQl^fSDEz%k3;pcZi(D~MM2!|StH zuoQT`oEUr+wkT^zob@!h>}U`BpZGbQlfXGcjwJPaU}7x`+or-B*rvEIv`zIYz9Vd# z+SS`v!nTRs)F6EXZBqhK_!C6wpKqHQ)I#WfUB|Qy>?9DncOz**O~1Fbo?5Dyy z7U(G558_gTpo6~^PmJzAMt&M!`OtD|FeT>h4T8>3zxzL@wXoeuYhD*x?+MW=77ZGR z-)}@~J8yBiP3wA1>qrN!d9H)_4ZFJBrga&#?x&VLk+?kpTHnFP-P6L3wv8rV;idnlKgZ8>>8<>R$8U$0@etBsd}HS7jO=8;Qj+z`D0^Oe9d1umi{w4PFm*T-UfJ6|!GY|3n8@oLJE}lQdetxMI z|fBoNktofW^Y*_N&)YEw_PMZ{Y(W$2|hhjtGCgkH#w<>x# zHpGWTqAuvzuz{H!xjjClF8H1Tc*LoWOhg0o|HeX#yzM~moF#tWMC`lh*Yxn>V~qa} z^`|X{)?JIK0dP!i<{l;-eo>m6_rqMz!I;l>e2eanN#gYyA2;#6cRu}^dr*>2*%&F_4-<>VTW5nWT*pbgllk4=x!( z|3L-1A%!fCv$j6mQCn|NvG8u5&AVOH*3m`ewoI0f{2#nq>4>X|O8Jel@#;_T?i#vo z3cM>Mre5*7+j@!I?Z@?T*5iDYc=ge5d#uSE=~I_l@3=haQa5_XUC=}KJV~7w%hi;0 zc+{-8eePlxUB?Y7t;q+SruNXL##}4L(#3fy+gu(j9#xd+QJ!KW|9hI&bjest?RS8U zDoU+P&&dC$UA=1n*S-#)*67(x^e1))c`=i?_2`~@y2-=UJ9r1R8?logLhX7i=-VCd zkkj@43SaFNYV&+AbPijKUE%xXi!5O1JaQ$bM3f6Q~2+!ZTh#K-H^ZfI9_OTqv4Sbc1*Dp#f>DDRZsSU_e3Q36a>o`v(>bD!8d&J>M_$-m51-WPee={TWQ?57O z149d<^9Pe37|K*~9atV|ho{?Ffi_ie*;jUlofT+P&k;4;Zf{#@BmQ4jmP0OJpuU4^ z$Qay2#$b}p1+=M^WDEqQVKDiDao|g6eWNo5L-}OP>>>J-Z|RJ|pzX3z-rAb6y+0X) zA=@LjCNasEPS@f~corY)tU)qagCsj^kP^xo)KlO5H+v^VJt*pR)aT7zcnIBt4n|Y` zi_RL<({W!10-*CY%;pv!$G-1_^KW9kI$P(xh2KkG>Tb?1$JY|R-@@4>pFdAsae>Er zjU)Ylb8I(5VmQR9)~wN8hAk-VwUzq0j|#L@@xm7#IPLiVmaYh=&&chJjmu@^#ej#(OK zjM*t9FuH(ESy09O2LU~}8-n{!W+2%4!~G{S5Jj(8TuB`pJnBIO>8b(3UO%}a-ju=r zmuHgqC>4g_4a$U$xsJ*{nio1MbcBfqhQy?_T0bQ&JHY+_CimZ_A&uv6efY1@V0EFP zl88S{!$(B?h6Z9@ZtA8lpy6p~DA!RY8cEaeW++NVQd{cSZR*ql(a^BUreOs%9BP@m z9vuJ;fk*KPHkkJ?m*>yoXt|^MD{^P5zx(VyRFS%<5vI;NI#lQFaUc=I$*yvDsLd&N zi@DojxqF1W9n5vtfn)*=-Le}_xpP75e4EzYXtH|cbQ< zYIl4OsdOio^hP6jxhI>cb~}3;`e;Ac)Bw42$fv3QqyIHGr2cRG;CgkU8U5cJ&J&Y0 zh8t4<7e9Cvwo&T;LHvq`h{LD7$~*@BxNrYwoRz{?;miDoumv2Eg9%#yOtAIO!7lnI zYD7>28Cm#z4YZ%!P_T=k(OA#CYdv_G)?0bR>e>6;))0DYkDp~mm9Ikc!(YcA<``M{ zzv+FbQ<;uhKkwgB+ip`Ib*pXT3FFyr{DNA4Jr=!=$iV32Wa1&R@S)na_~+<`98ww5)``4=nL$QB7$Y6DB7jSB_-Q4I@9s46IP%wwsKM9|5 z{9a;Wp1<(N=J_|$gY+>P=(^{2(yQ)^OBBTk=P~>l~jFmcF1RI#mDa7ec#+R{4BDcG_ z6BWd+n~7VO{4>3#_y|+J0mjl=L+2j)kydd8CrWy)N9M^!9uvF97Ptyr*iJVd0Wl%f zNASL1iPav}i*zyuyOrBiai{6{mCjjrNdxn#^`KsMwV~nZsd{!;5Y^fgeZ&o`XJ=se zCa!~`LjmVodxzgQ#inR76g?3*g$>=f$8A0S5Ic{bF|?c^C%BjM`Mp$l08P$bs$P%O zu`j^%QepM6nCF{5uPe;-QenGwjLn3j-Ae^uk*={e71FU>&je1em#Q8-zdEXa&ZzTI zs5p!5`GEIwhP#=>d)b#qKkP5CqvK4i?HoDhRB8nY&{6x~jcK{U)^lfHSRI=hsnz<{dzT+>>}{mA#)n3(p8hI0+nk_Wji#PGIy7@y4=N-Z{v6X^wIK8DW%_HlJ^j_7jxvWT z&oc-8c|6dywky4dgSt(AwW^M54R$t%`yZ%fQd!^_CbUFL^U&=5hiBtZ(VqnAX`7YtHTEBOD%(H#aS zsZ~95O(y%|=@d>FAjn63@Vd_CaBKm4Q;CDXsw}uBVeZ^y~-~``q*B;C|wlF&H zTCYcH)ogGCqz98KAok~M#~0}hgZ~%S8+9N6*4hYwNF`cLZd3TKMMKN@F0^=|MGyc* zZye#@18wiECuYW4>;92=XpG&1N#44O^U(wB!{!)2gP{GNye>?$(pWxcg|_3I;LuB*$rl-{++fQWPeFVFk@S%o@lG3%f3X2YaJ(83!BD$o))^#Xl>@K>iMx`)3VF=F17{}S%H`=w@Nn7QZXvm58CP zA<^E284;T!H%I+J{IQ$8%ZBV;P!CF7%aT_fU;KE)<)}~Um_EpS#J3>BD;?K2+1u2} z++T9N5$*jm)AjF=Ly(!VbgyiGZ$>W|6ks$MveHUSoyGHv2VW3bZ2wqi6v+R!)~W=@ zj0QFi4iz_B!~Ot=^0C0>)Tq~b!JdB6ZM{Afy)X)_W$9z$h;G<)zsGu%6Xeq!@aYZ* zpN@hNHyu9R7UI(}jb3&Od=e=aF3C*^aw$8+rB7Q8mo7B_C71e#xU`@PmsU1Axnwpi zG^m~-F8z-FjL+wi3ogyKxwN}gxRe{>(j2(7y49&YpTirAwz*`%rKzp+?*Z$4m)m;o zc8|4mB)eK^l2K)!?V)?f?E6KJid2{gpIiiPtU;1R8c+XY_6K{i~KYMfGdrdYdXGyQxCLmDEkWe!|D8alZv{YQ+8MJhO+x~F0e^mzBccT zFX>1+Z)Tpx&uGy5|IRZrHG`bt%-JLLyJ!TFrn&qcKDlRhE}yh24_U;T@{tF>!#>6S zJ-{mc9oS6Nb_R_C8TodZLlxRUalUtF+pw?iY|DJ?ZfadwPu)@M+M2uF&E6Q<7b6y< zPQ3&M;c$>@6C~dVQf)HSTM?r`sf`~hD7CU@JMtcpApU+w-<>J1Bxt1CVe~N7f^QZ? zsuh&WS~7M8jW=mT4kBOf!ATt(*Qo@wC>}WS@Dp2?9Bsz`Ywi=mfWczn$vlAv1rR~!Pc(#Fq z*aK{7PBbUB2cM4O++K%IM3$2LfU`eZ>S!i6f?Y-4y*3Xv(N|ojvO~QWWM|>QB1dk7 z+k3A7 zxURHHezs>(LMT6*iyfY{K=#0P&(GGXK}f`{2Wj1YbmZ*k6%Ah242Px_ZH6P71*uoEYUOVXbMQzIbb>pA>eU+DdEb(|x78B30K(=u?*iN|KsV{&XJv)~+&ziD`g+e*bl zjK|;qZOGs6ZBOImAbatz$}~T{qkS<1fng7|;%aeVnLAzyz>=e)yX z=&zCLCm3>9GN|jDuFh7qt}{^9mm2Q~YB-`BSL>NUgTqGu>!@DT_dU0#Y)dVvVP9_@ z7#3Y`Yd?4zyshbQYpIUD;T)Pszfn66Uq^IMONJqw$wYUerP6c`p^hEgWM=~ViMzKH z(kG8Ce}FiA`3ib*z87a*ApRDjC&;GQBOT1GJ1@a7t#I97 z4`w=(w%1ZArLIaO?9^nk^(Dxg=bCEl`ad~C&6`umlZlpUssE$J!0a74iAOp4%i^q4d==g-_tFEv5#G!~hPIc^LWX3|)Ja>0B;OtE z2JT(drzjPa1$HKQr^BU0TraHUK!gXk<#L_JmY z&%bZA;9l0KES;S|C+Ubjg~dmpRlX-^@R?|gy06Z|;x zCF#wIvsUu`a$6Tg!9n;q`lRyrO&Ym!zHnbsMj?II3q}YIIHlL=zrxk6T z#Hv-B-PStv&@N)vs@;uyC1e~8?HF-+B4nm+LMmE zR->LqQ;hA#o3L#|OT(KdbRR)29RtFfhaJ3G1aIV2qO2=>%X(tnrIINYsq^yUGJ``~OJ%Z_Oi@=YEY%>R zg(d4%Lv`NRp&U(r6%rDZ4S!1w9R^M6WOL6v^g&j@s2JZ6S6wb zUe3Y?3|gJFm!+a@I!ap9Q!9gToiUJ-Rp+yX-3Qpm{$+^Jc*f=S>lu1>@K zY3r1YcTfxD{QvM|bT^*Fb>qo(@H)dh>Ad9R$tP6#!#ruG%HNeIdGKUcw>Z|xlg-?L zS{=v2JI#|*$VpH_&NSDk$Syodg(sV^S0W+w=Ygkgy=6U4Z`aS<*3`RVtt|L6gp6e?(_fLuGlkHR8?b=nG~JlHrZ;U@7^) z863?69mh|$vUE_tTT41 zX<@P3n)xg>%6ZiH4k805Vy!j%4G$JiA^*t{%p$>#$?k%UlK^^*pySliyFL?n58m@N z7bTzR(dcpk^>-ks$y|FP5hGhLs2Oz6X2yfjjV$vtL!i`IBN7xDlg+GEDNwq|rgT4d zoOg`$2PvHgrRyDa@>+0gYe5SVxvynL4(TAjkiVE1c>@?~3)qGCE%H1bDE(=D{BBG&MU14<3LAYaAW$wN#+R3pcV5 z%kQ264}9pl2iLo;C+O2Fe>%>3n6H~3F?qZvZei}f_w{$|yDJ%?$>rnho|9(vEjsDP zB1s1StXX|SbH>;WwhF_Un{6p;R`*>$$xRCKaOU;RWj(h`cZ#(&al4y%zsGhTyS}Ka z6Yn~G3(+S*&a?)6YMSOujn;_fHU&A84QC$i!kH)FOhCuGM2pxS&_~~zyNG^*b>!oI z0meZAbMp9@^~Cve{@L86v6I+i&k;Gg6*<~kdMkMpLCpEsmLuuYH64>ukB!lGwC4fI zFC`w3{E{H!NPek?X_Bmv&)uxvzjh8OF*mDO{r=h!M||6?-qQ2-o*sfgmQ+%!{K)_E zZn3|3-NE~`2woh~5iSzIJ*jnU|EmF?&qw7k>~+@Yu7pQzUOWshN{Ib4i2GxI?6xw{ ztA)%wcZxni&x%eNGSOpQn7|y3tzRq9uLg%>0yrEpeNQc*Fz9vPazn4XFc9{-o7FmW zYH$Zcv-;r`r`O%Aen8i6(CeO#bvRGt>eQ)Wue%0Z4WG)f>3>?&KZLsyl!cMdKd%e@ zkE25bpI2;46?Un=&`*4S=^1(u(5EMEWghq&an`T}MxP9U_6y@b&zGqBZ}<|)nhU|3 z@dvVJL0^Kc^2DV z)ZR89{<>2N0^G2NZ7$4*3x#cI#O_ztkmtCc`Vji`8oB8s?;06M7y9WPlDjaMyYP$B zx!i>$?m~@y7X}s$vO5(bGH)u%C>m^cD%{J>NP5*Zd{yp{f}uqN(IkV=BsXz0GPoIo z(W^ra#4u?VTi}zMOx5~Mdp4k1O(7eYQ=WEU>a0O*j+CvEH7bv2HoH7$R{woBv2|5G z$j4T++-oOFu4Xgnh zw}$@zlW$}?>H{)=Fx1}OU8i0F+yArj-!*2;9IWdyRdpCK=lF}J@DdZtjwcbuCNG(WXrlT*s4kG2&2pTQB z3sQDTM}Qz<&v*oR(L3uWm&bnMikWDm<~FMzU75$O>gZR}G&l3eq}+mcDEJNmbq`en znKKUiK7BMwV&4)tOqDc&DrwSVY?gb9e-Yi1*1o0-mUe$~mcjj-R4SRd`mFq4?J5g- z75Mmo&L7kWB0)xtng$=uY#bgMd|ciduCm}Wl8>OG#%x@z>O@OSY^_IUEq$6Cln3l# z`btXpIp5Y}%ZL-UaOT~@ZeWht_fSyk%}a!LNt$=!A0-#`Mr&Kx{|IU-;u|uLmUTz@ z9U+a~PQ*B=JpRCxS-pyT+Hz^}cgB=Yo|U{yc16v9WZ7cZIG>7cjiWj>vfMRUI4a&` zqxvVRdIjtQ7A__rnXvK6x^?nr|nO$P8N| z|5hXZP4oVny%DHJ&F9AN*U`7+B!rj6cm}3og>Go^k=+b$1fnKEtnwhd`~$pv0A4=V zavb>^|3mT-e0}&E;NiJFRu12bO|Zr>6V3Oj^l9aA;>WT}gMKGdC;l1J1;M=d>fERT zlM$C$59wE}0Tc3m{3e5F+^oKIS?q$zP&BLQOb!LJ;?3$drVEDJ`H5z5kCuh9;$-~c zQ=nUnDV?mwE)rQGFQaV^JfS&|8p=P^s6lWbuf?em7r+7OconXGmV(Rx9h zXB7e7@XZ;kiN-Ur8g~x9ZPu|lRRf~Z8n$VMEy>zupDK0q;-fu@N}0jc ze47m4o{$^`ak+mL`^u4<^ADlRIKs0J=wsT$5j%2+_Ra6GYg0|1P}a7@2rvvQNc>FSf8LH6Gy zl688XVV}H)z8W9B_LAd6BWdomhGKU76g>7Jj!p!L8Sue$BwNFb9G0Q!6fx|fC+T%W zwszgetn$rqmY-bk!Uv7N@fv+h8#o3V^;xjzy5A+aNj3C@BpuES-J+K-N=JL}7VV^? zJ&4oMq~2#<*Hn|Lv30!tZjrjoP=($;^szBU)Atej0A0l033^lOpzn|)Qs?c%uPHWt zlcDd4rV4TqTUIiUJ(irrSK{aeH*}r3nI3SCy!-!YZQG}o()kdslQgSg7sGXuM)irK zw%x2!FFNZa?4~4V{Mm;U zt7zo?!~5DmIdSXj|3Lm9TeM;f@fiBE_5t$$Up05+wJhF0NBHzVuWkF($6adM&FXn9 zdNBXrtTtfL!?DOS%npQWZO!V*&TwtJhEAj!P&K;Vjd1>-s0!XZp(FbmK_ovWqpY4B!lU@x)irWNG>k)cgrA^*=g@;BuF`6~LOcmz3cj_IMzMT2ZF z6|F1LeJdIy)T{Tt^zca!ZNjrDAskcLE)YkjG?-4AvT>kqVVYg}o`611q24W(?^HX| zO>$JdeeC|4j?FT9qe-=0cVV;4o`ELizg}L}k4S&Gu41E`iD~hLdf*EU=v~}rTi=p2 zV%qsoxVhVwfdRU2EoiaUG*yEQX6{~^=HD?S%=G;E)QOI2;vlERiiLlxv1xxowpL>A zU+dUNJp1SmnEyqlT%*7RyFbo4`helxyiEKa&R@z^`+uW`>tn*mSyyUO-*og$G^vNK zhvlkCeYH!^M3efmqi4cLwome2HB6a6EvBkCN=ZgGJ7`G4UJQY0Im_`I(-xI>5O&q z(bHGW_7Niq8#INZp)e>36QS@?M}NLgt$;#7ycX>nz_JY^*3X{7Zlv#lgL@w`!0~?B z*r&sYxtO}sirsB@Z=hizfZjs>c;nhJX+0U zQ&?+|GenLag(vdXL0nQL%q)y0?vI5hAE0MFb7HM%&g48h|1pI8$6%v3q@Sor_Y=kb zGWN3zi>%@Cll2yxf+XUm#NW$ph_fOx`>@5vDoi@qyWpp~v*-@=YlNOmi7f1WFsd-+ zV02-hgE58Gpnpg#HQDyv*o{ogKc;*_=$7PM%_bT(-dmFzceS%@?7h8r)z8~AV_5l^ zS<4nnw6*Aw_;J;0PFsw3bJ^j=DJ6RU&6YkT5nFnZkA4ojbEbXe8$%aocq1kZT_E0E zu$RA99o5nOC~uwJP2>alqMF$w!%wML`k3&cr$~1b`BWtQ%yysw_}EHN14fR_fY2j> z1^krW{WN0vE8lfnZ=!n-4kyoeZ=AKk*2Aw~W-gcSmvBDFz#rzgytDMa^PjQVDdPp% zlO{RXi@EOoab#iL1y;UKl@np!UVaz1Dh^M!iG7Jy)`aqD+$)#Y`%=~9bOzU$u%+h)}1qYef<5! zcX@|+zy6)#h}m!bd2#3BudsP|=s>gr|8Dc?9-?WUpRhRM_6Z9Hyz%|T*?Q}6)Lu4Z zmcG{+y*u$FeSuIvi2i`|d%xz`6;)$TOZnKV!Tr?ny-)2$@Azb%gITDEncQ4( zwa!r=_tD{6qvGN0aX9PZcjvaYB9|{MCB7fVuG(2~RtfrhFQ18Tu<|B$L2$mS9pdNzGmoP9IR{o65!ISwm&6A*F>aF@ zbxC}1(?8jyhFvNrZ)7@In$*Bc#TtP<+(XHn!GLOQP2cZUkq`G#a?6p* zu7qaR$>u@(ct>A?j|_lMy{V&LYA|Hr>F0lIIy&jKzsHt(nWf+y%_9!ZHShPM{>%V! zT z8cO%`CgydE^JTwo(36@Cr8{&S>mw&2lx}cPx|vAim?O@|`fxM7Hl;J5^ywyxJNf!z zxApNo)D=0gHjS>1Pk_cni)n z%7(XOeCPB7F$Dvu5mYNOd1S=KhGMU*i!-TNo2kYoHJI*&n`ifXrN6d_{mF@pm_2~3 znApqS&J45X*qlNzG8-vi{oj`_;LQ4 zK6@XOy0GoqM|Vb8pZybE31Qo{0~CXx?b?lR{{Zye$advIU!jiMeS%29jbF>X57M_0 z`i?lJ<*7--ujw{@;<2v`1d#Jxi`-T%GEu=Pv~ebVeZl|Xh%Cqkz@pOWc2srUmW5tL zy+cuyJM*?Kaa1q*Gi0tI0(3&NV?^?OSQk20vO^y8h|VOY8cff5lgfZ6qd;*>(I{>{ z_QgrACjsQXM3DR92P73IGu4bN^Nvn}+~>sd7SC|dOoQZIqYmIteOB(Xs}qv0|KaKcHA%Q6`2Qaw1>%JZ{(l8A=-c0NTPg5p^#E!))4?g3 zZ*=35@%S>_qob+d!E%IW|5vrPYV|(Rd$`sXQ0u7RcdfPIe^KRk*4o6EmQRAUHov+9 z&*8sXYvcE!Efbn4`KZ82t&MmW%QmEAFY)bj3*FdU>O-83+{Klg$N64tRAySNmB#leeBIb5 z&YIYVdnrfns=84=maJePd^g!rd@X!81L{Anh3_W2(XR#XW)<_~)IGaR*E@ZU70`Q9 zN4(W4LDMS}HRf(s)2UoVFMrtAsHXcL3kUcd8xjM%Z^#FriF;!(?=`>2K(v`viOHKEXtK9i+nn^dxR2y*#?N)bx)A z)Yq>`AEoIl3#hsDJS39ekhoVWiFxcukW8aQzVdS}c>seKl!QjkK_gx2)~z1SG*;91 zQbldhNWnTZ5&pD;E7@@6VMkX#RztL~&I9;VJ(e$nJiv6~-ONY9UEt1@ZjQ5LmiQ>V zn#Na=gR#Bo|KOPZx##Vje;!>YG@UW=h;r*Nv(3^Zu{O z?jO@L$os!4xgFUb>E70(q$jz-WOVG>Zg~&s8#UHiq_|3*&@pMXiici&Fk~Sp1xMPd zl;1IN5AX2Nm)Z0_4!wJc@pmKRo=1>_RP@X==BlO|+cPbhxkt`_rFK_MH(I!M7f}D` zsNDrriKBMM``6tJ#v|kXGYb^-#!QUu>DJ8PA44Z&J*jk=3} z?R@zk?MgA3Rd`*jqw;E1h30^pNSMiYke_L-qL)8hDXvzh@Fr%!>kr`dG2AmHs@wkGzw9DvsQzkz&^O>wZ|dsE<7iHEj*-V>3Cw+R{%d#e zT}S;qz&?&}{XD=vj&S`vptd>c=K*$;IPYLR&o#3f&CWYG1)4YMI35W^msB(36V^FT zL-T$|{k)pki7JUr^LTzY6}q>w&^fyva$BpDxr^Mxbk4`O8k*nj>9KZk=G@CIyh}Db zMSnBF+sC^><1<9=w?@Pk#r)DLiv49o(K_zfP_~t1dv9i=OO;yIwybQ%teEYR{fmBN z9<$Ww3$Q=3%VLV77Dr5oTHum=)iVnRM+|X!tH52#K{Kz)jrPVAvsa$`*Ipi1JlH$Z z8@nBULf$?ZlX0hSx$j50ai2vmil6+)&WBt+<`>5CUoUH_>`Yk`#{_qbcin=sozYg6 zN^O~0cD6H~jp$k4mIZ?TVGSMi$Zs3vPvyUfB~EESEkDIE+g(MzAc{_qhSUB=gNbBr zR)vl=@k%s*Pas(@>1-JOS#uSW{9&Xma+2_OJyz}o?%i7K#WCvaCy24{=YAb&q|dV# za{+u^67R95ab!kO^7IqB^>n)Rj0iTNC7YXLvhw(UmpYlu3|g-rUOHZuH7g1<>}ail zwzNn^D+(_<+9SQ1ynFoJtk$mGJt92=XAaaoDK)C6-f&oF@4+FiMw2YG>D`C^AY%)? zLI3$d=w0WioL7T~g0~30;y-(!*9*Nn$SXWB-)&7!!e&p3v(DPNzmX%Dh2_sy^+29D zU+k+@tC1~EUoG~M(^tDroEVIw&^Mtts)}x&YU=+tileIOs;N?+>X=$Q7@@F=_Zqa) z{-!FB(4D?oCUUm}yH?Q%wea&y)9%~Qn_mOBBas}!#5ijoKa1`xrl09?&R?VJn4>2E zY%526*q{zO;=@MuPN*k<`U2dY@db2Egs$~AU8lI=ItmZbwFL|CL4P2V8 z(a^QH>0}PO;pb7COdvj-5NCbB&qCL6dW!Nne~qpOy3jSJ3teC8Lf1S8U2}O3I#_NH z_g2$EQAOs_co_>cUEJCbT?x=NwF_Ny_Mu+_dpTWu}G{P`mhfq{C(wGR=@}&tV6s|NF{PcaE_!6uysc zDg~czH(O}?>bdKPt;y6It-?yMud1nguj%4V$Sh>gm++|p8atsjbgi4ZWI%a3bLedhYQU&Y*#~Ac zU?YCT$+D5N+&jg>JPle$3Kk|EKCLNxb+O5%!W*7=4`_bQ9D-yMgE<5rGw%}5PVhF^ zS%hlP|0?bI|7tz|k3WUv8E*?e1V2C0kwvHm{~t?%-TeR#t;Zt2-Ej`zYSCBO=>re$ zf1O=m$lQZ9hA)wP*2&#k9t#El$N0BiByfA_J+=hKB7rWDro;a2JDA5JF!S{r{C@ z7{Yd`iizQBbw8XEyOal~HngV`CtF`}TeFe1{*#C!Il>>7qgiL741ey_QPiFAC&A`V zy2djXDe4D*5^Vn5I=es8!!(vdR6Y=&oci zpRUUml33l9+@LO9JYF_{%x-LtROx>lkRmGr>H8acxF^$JFo9SH9{Kqy ze5#5B^M>=+>pE4S*M;jkjm-at>pG39jF>uD*I|z}{(P{mLp}t1wV+!LAzas~QV&4m z%Q~heXM{$nQqP5A>MHvGE7d!W96}Xc19)gQjkCoAfX4F~vYE)=Ikh!&zTE2Do+zn^(@=Imo}++g zq_TDNbPTxF-nhoz}LpJ$g@)nCY^Z8!xb95v#$MW+)`t0~S$=AuNU1Jtm za;e(in`&Fz1nfkL)`Q~h^$FQmnJ^Hz#D@=#EE^jXDiylnqAyp1*Yf4Efxzr(eU30s|`QpU|b)Of073(-3 z-qFj9HKmKlL@nH#mP(LK!?}$rPzi|^;Fnkj(K)M#^Ur@B8P5ZAejNH?RUA0O%m#3T zSG8B^cjcJ=g`eZ2x#({J*G#sM(A(nC^M$g7iL5PA_*gvGpci)A<($J&2X3LKC0fr} zRAG;-teeVXUhxpe7hgSI7F`mxJ@RtYpJPk5&PYFPF+CVp9I-WKyHq8r)I;d4kJyEG zlVg@baOKkND3vMX8)UgW|hn~hWpGKRmr7r zpIM{YdMVsz)`(x>>@y=1gq@$xeu5kInNg{LE9-RpUZvip{z(p&xqzU*wW1m8;;4<2 zHN-0!3s=P78VOexNxp*EcGqlj6l3V)A!gsfd4IUkhet1y8{^0w`YJYLTj^KrT467{ z9ue-i1jR73XHoCMsLTYrgICX)9Y_Ix(kkdvB;Kn;PJ)Fdo6V$dD6vx|^F^IyciG)# zjr9K?FPmm}mo=)$OB>4i?7Yf^6O>|oZaXdC9c`cf#^jl|>Z}GC*6SRT+kZ3RcIQJqCQ`OAB5mp)EwYwGsiOt@M@BSE#Q zQt#vKuWwoT#t?^emTQE*p8U6LC*r?7Nv1a4wnfX3ytj3{T17vU$mKz#^Vha5dY#Ck z-ht9rr7mzgAIHY;X{@3{wo1L+R7wn9IEx(#=&tG4W2~dY;;iLq+>}h>tiwi5pWyqP zOW=!f{x|J;AsI2JJ-_#&)1K#E{9=3laFf%XS2EpL1@7e++w*FAY`fd@_mR^Kn}>_g zVpU-LhV6L@Je=V`@6| zejd*GPGVTj7w0vo+BoX@jp}VjUZYX{OP9RHYoR<;C04RZExBP{1FZ**2X#HaiirfF zu>@%eYV?F7v#MP9_km*xpDsj;aZuW`Tua9-mgww59jIq>UY<}b8dchjPCegt*lZl;klM~ zqdA?i=#Ead^ zg2n(eMndDIx5<}(6-7w+{L~((z}d-dJA6E`snoJlS5ME;u6eO7McK^nPBE+ttu_gnIV8-qcv` zrss|rZ7OH!AVc57=ZP~oF7Lg_?9WSQYYbmg#}@CaM}c71ibfURE9$`AA#baa-St*h zX8H74G5b8lvH1I%`+PE$O_@FPrc7fgGxhGnn0;}tqM1M!$LA(~7X`(s8C#`%ka^@)elV(1tKN z690Jf&E80_pe9%1VZ7g%p>1WQ`WTrQZFBSqbQ@JzIC@OmN-O{zoztEgn#rzI3($KX zVF?bv(GRe*3GJJPfH5iJV9>v4F{m4506gdHE9s;7dmgA6#@(fopgMnGe~!x4?TR zu|U$RR>7*=biCq$k3F z;`Ib)x2x44{qD-`IJuI(nhLN-j1~fYJ_C-Ti%jOXQY~()Aj1%jlPl>+AOi=#KZM`? z;rD}W0b=A+)7{ns=-Q9r^}CX5uz9}vJUr&-9iK2$$`QXr@=b#6V)E)KA>InQ3zIoI zj|pb-fm>(yCW{%)#wCI4BIquC9KNAw7n5%?xFmk?I3?d?W_A4FaZ0|)OdTZH^o=@_+A4b$8@iHrtyc6n=sy5>O2!^?zQ z8?a<@-`8Q?j}xPAB0gPqC$j{|;4$)YQ`6(DY4j_3I*gnyJ%=vf$p1{N@h}{Ffjwx8 z`~7E=^{m-DnEI`+<)zqr(B5m3iIyNH)?B1;lFrdcw~s|sY7lc}qNPF|)Y*cbyYg&2 zftSBmp*9hlO+OvuojGgf%qMeK(z(!se9FY~gctAOZ>il}0%(R*6E23{bDfm$93D; z;#YmH9Bm}G|4DqrA3L)m<34f4%6s!%dApI09s5()t@4?5<=w_%whlRh)OzfB)v zb%A$v%GM#*TF@}9{T(`F8dmHz?9#vRmP+wNm^_mI`Mx`S?w9eIrtvNYQ^%0J53%6= z_}#I5pThSe`M&mJat_FQEZ>WsIo_8U08Sz2WIMPV9G7Hl97LgaCm^uRhtb3ey- z%OFU1lF1;51XrjPI$lQdxzm+wbPP-L68Kc?h?nt6h{`kIlSp$qe45v?4!%4+6?uIz z&hibS?!(U~`KtA8o7-B>ae0enM&?T|mG&Wj(DzkqM8EyX2NMoh`F)B8?C)C?!DiBa zMUnZv_YWwFCeJmnC?-E~|Dd9{{G|O+g)VlL#um8Af_ca_jf6Yax!s;BIAKH6N{pwTw$=)@qLRz;;fAZp*750Qadl>X5g=+>DSAx4U zczf@2(XMFk^mDJT8B{F4Z_}Deb*g>cjOl#0ggx4#nd?*ok)CyK+nT}K?>-kDHF)c8 ztAgpgwOF_cutd^JqF!C&EkuV|TU-ZkDv8XG*djZI#d>7ye6KTY&3m2cyJPg$*1_9T zic`16Z%rfbO_azjc~GN#%s-vsy=hkkbrEZHocCKy0cF2%F}JYITJr1m6H5x)B38}x zx+ed+{r9WB(waKz=WW(g6-+|i?2VWd>6KXjLf{u{8_w`r!z(~!i}j{;##rRdvA*&* zviTc+{>Go}zj38~jrVA4gy+}o$2}G3dh@rY^S3_WZ+&ul%*@-o&PpR z27^*M39YRO+atCLvPcEUfR$|4?AqNd_*sKyn(Zu93+VS1)O(X#AnJv``8qC!8{ExG zy#XL-zc&zLI5z-LG2&mC2KPRMdsE=vil%k_q@Rh|V{)SPoM?XD3y)s1@9{7f`BJ{G zXks@K$K{G2(J z?_KwFx*n6g9|nJ`7y54v`tMM;`G8dFyrvQAahy=2GD^VW5*xJEA;+zso-u&bs%kF z)Cj2#m@PHGz2JM2%0N@9EYpTV9S$w{Xk5F{5TUemAxar8U73WgtW;}7iN&D?^oX{Igw z>Y+}l01wpq&9A=N`Tux(7r3bE^nd)!Fn5Ll5fuRq0g+pZTBc@-NJl|eU2|Q#Y^!B9 znx&St=9-O{%FGM_0l6rd+oIgNs<}yOu9@~7iuQoS8Y#_5EBv&qo9Aa~GS?^piN#VJ-^+{_O7=2>j_EIA|w_iA;B84Z(D# zFyPOjn9kHFYCt8&E$iink)_z^@;g>Y<#;FEP1aaM2I1I!QOZa*A@|rQKIRtfH(^f# zWXRx|8X&XHhhyme;^*B!5EyYa{=%u>xlY})Xr=MNw%IODm<@Ei~ zvw+E-#2DFdxq~P2hrf z1f2)2eY!4)2X31sFc9_3wYZa|x8wxaEPbc7e5|EGEM_@@Mw*=l%4A~Qii9s45$QD` z_t>2ifRBL&&j)21L7BPm=E;bcgApq$?v?1W0d|ji_~tZA@@N2E&R&pcLu-7tvo$_6 zV|Gwi@3?Em?ERp?aOWN=UcixhbdVED@do~;ova(NzX)M(?LN=U3*wfWaUbe;jp#y^ z4e00RB7%V`qZ4Bo7DSfkMk(thr|FcGCk;owM4K7Odu16wo4L4`M(pJ0o>iqv-(9z$ z!V9AAW_?%ua@5)&`Z+1~2<#fNY|vyW=MPx~#l~Fhj$azFBVm*HIXqAWD0T>V{pU6X zVsG1M=qY;Bs?CT2zegPyeL+cx7fFZr<9XwG*ghQXAK%?7e`_oMFyv}?>BER&uy4TC z!H_Ejs}-S};Ec2b_(PzcmhAveb@W+3-+Xak#=n1l^+l|lLVi0C^z2te-36|mjDe6b z+5s*IQQ{zIXrow?({icbfyz zosU{QqwtI9PQZGp$XN5BEG{a-JPQ65F@ViKn|1`@QZZL8HK;(IdN<@x!Fwe80{sNH zmbS%hfnT+nvG!Vaf9Qhyu~O|M@IYS6U8VoB%in7M*OOS2oxLP;>LtYO*ddG`lv;sa zqI2^H@&i)8a{4Bc;0AOlQ*23Sfp{|tv3w)L;jjR04VZ4nqnFux1&gAvE` z$Pdvy&O{GY2oA~zwF9?E`JnDSo%Fyhw6ei{*Kb$^#Cwy?akxV*yWdgF^FH9L&R-B0 zA^$Matj0_#htR76ynWSt+7j9o$4SxM1u^hH-#Tl;jadKR zAci~Xxs2=_n=V}Elpuf5lXsiY*N7F%$dSW`p9DQm<2PtP&%;e8yQ1CfVZHqTb})@djBwKL;p?=TD2i9^Y{J@*M)d#pw0*Sd^LE>} z^@gj*0k|5}NoR3xBlbff>d*=Rm+jkP2Or$#?n&99I8cXd-vm%+wcQW>s;7o)xz(7P z84t{I;%nS{9cl-_d?a(lcx?Vb%YB4HgW1Hi_I1o1=<>w+E?`EeJ0(WnMv1wM z62rPEQF*PK60gIH)wcc`N~l2z3CT&C%034lDr;?Wn6#Q*P`F!KxtI?|9q}X9{kzw|yZ8QpRR#a$PAW}f zRBD5axTy5AtpO9?yE_gW#WA2#A@fwocY*`wZI$COzdRbft-u0nepGTfp6C%%VGStk z1<~n1Bj#V>AUgfuXMZ)2HoNV!b3xpWl|`=EU%*;!{Q(!mjpz^Pp8a(}{QLFp6?qM) z^*18o?n#^O6?u*5t7#BdvG}}^O+#s9`wO{fV+3u|-Magc6F~Gm*h!lJ&}N>ka2)bq zQRtJ>W0p7cWDAbNN;P-IsXIBym94-b>CJY`nDm4_Rj{~GinYDqwN!{1HHaA{WI0Bl zb@WmP*#*%T69a}hW5)}~P0~7gDRR6ZDz4K`o!xUyrL~BSn5l*1mfra8$@YkQpc@Vr zH#cH66luW_M+4U_6@FQy?F$)6qgRM9|S4i>4 zUgQH;nRH_VqXK->dXdlENp8OXyUFX?y?XB znmp&E3FSlKPu*zp1Za}%77v3Supn3?$7tdQn#^v=LoB>0Ld!+KuWmxm+&Ubk9%{~t zN79qzI@I5Bxa!css6%T;58&UyYL{O>lez(u5WDNp7eqPgBd$911+lY79r^<13cKpi z$cTcA`;igoy$;=oslGsHEHZ7x3Us1CZC6I1QEW!6VQ`!C5A6X3GFe6dSs3_mGwRS= z5tA++hFY`&SmIA=E+4cwkLPs%+iKBkfTMeF2b%M5V7fpP+oio7C}%@^J4l(`3t|dZ z%DeWey&%4YZUfie4wyxc$v3yImGjK>+KIX(?)9Eqx((blARPC)+Ig>^lSYeAxZUeg z+-s&=w?QK;BJS6DuY+-~kKkSpK<=LlNA0Ik$(@#=ryZ~9KKIn14v*u%_fa&WXQO*P z&joR?TOY**ae!MN1?+!UA4LNYe4}{cma232+G}8LNUF|xXS3Oz{S018y4`?UdxIF$ zG4o^uNbJ9r`*Pp5CXi0`>3I|B0!E1q0FtBA26Py`CP(-5ARaY05C#8Jv1O$D-HM zJ8b}ZJ-M`j=mPb{j=O%?aW?>cpMmK6Y!JUiv`sT*rB#yW#W!x!1P!SV{k&+p>GNS= zrlBwn>%T&H?*gBqm(fuL{BJESqrHDIIm0uz)i3aEJnx_BzcU~UHW1Pq(lQP9z`b)b zh;lV=OSzgx(YK{uT*Tt*25BBDwA|IL-+=x3>am)?8(~0Zz#9W-*#JAKBYb|b9I^G@ zFw{=qGbhw5xleAwPk^Q*Cu^&)4h{$CIe9DpAo61=pr;S$sQ^7gKuUX zLwcCe`^yD9O(jzl#9-GKeW4F_C^i&oQAO}6)S-&dr?5}D2dFh3)Ea>dqohOU#r9v~ zFpo^C5}p@Ze$fD{OI5=2VgOb)^g$dQov7WfL$$mCQz7!oblIdw*xevYi(KLi$jDY? zCqjGB%9m)h{w`;Bwn4_JZtcXlXxIbcH!bxLmYFTm`;9n(A{8V;_(IlP`&iI@fl z(Q+IA6lAj)v`hvqwV-8Kk6z^P^kL}{&R*m~){7j8UgXkpqVEcRK4ekL=o?-b3i=K! z3`>`#MHJqhE>DYuOh!Q_quqLu8!!ohiX{esKBpG=~bSL|xJ2_xFx{@D7=khb?TmJhLha#G)ERp=s%&4sBYx#J)9m*ljGn;WnFdud9=-hw~mU?t4OWedn!r%t%^5N=G zhJLozqrIz|U`r&})k4bR0t5Pdf3v zpycweDWMli?)hJo+=_33&2bQ(cm{c=_e!5}=AFWkcha-G)4&YsOrY%3V73}f%0WHR zm4g~sbQdyCfknZ{I0Y3AK?ctk8N9*BIo*YvQ!sK)Ly&W-XT2nUHUFv0s=N&f{MkIm zSvNZ`zJ}Mc%zi~P(XG&rWeXb6fx$&Q3f@aO%MAt=&fze(6g&>XV>$e!138BSZA!-TOL&aKxpMTa zO~m6Vyw16(6x{lf4m^%4YV!TIxou{AScD!C) z?ch8b#gq2Qd_7h)%Xd%aX$AFaV?8?a%>V;BnGG)wA&H zgYB_jLMQfXIeDd$8;r-8|3Q5o2k0|pD?bfc?cCC-=s-3gs~w4~wyU1v{SD0(R$+g4 z4G!I#-d`Fp_0ogWhCh(h&ut29zoIiO`ZsBg0cz7&-GtgS);9%UkJv!uZAt1d$piMt zwKDU(c(ii?yot*%e};;Ebe9I%ueFy^MwV7Rt--DrCQQ_&6;HI@iB>yN)tA@nBT{T&wl` z*=oH2+Ck+0Agujtz{s2vMxKW^); z)-<4FutBuK$2M3Ruv>5gCY5tXwjt(T8h|wiu%$XPY~)q++rUrm#S!H6p~I2Kl;eR&V^iB^Dicc9TqP=C-lh}j2P4;lckJBV2X1+3mU ztn8L1!C-HCR?oiIInnOCScZCbKlJzoX7+b_&Eu%MhMEU~zO<_+S%w#|Dn`nut2~21 zXHDP$R9SV=1DSo`V0Y3W%}K)M9u+cFWI$h6e*pSK&>!$!#bqY~Mcxw>-^5~icqFm| zY`%~SfvyI+f8TBG0Wc}P19%Gvv;jMCH=s*70P+0K{;(JDTY2B0ck&8yU+5yq!9jQ> z=d_ah{Hl^W{1x&*I0&aqg%y66Sz$j|;bYF)BGF5lOGQ(6{Mf!*w8oZ@$za&pL6BWJ z5Qzuw6`6;K1W;6(33y)g0YxXEZ&I<3*4O?hHo|}1gB3?EY2~gNnY4~b8VqWl7ahpP zLsns9r5#$z+9u<11?w|V)LGXO$nk#mD+LeBoMH6JnawgJEf zz!W<-5B7$yMg=akqZ# z^WtBys%n{eZsOqmk(qY^xdkILHw2lv5M0@S$~7197xNz=uThK#0pP)(%&H>vs}=xx zukq>_Yw$jk+d5s?vHb~m+#Y>D30|?PM@}Yr3=HeEqK#F#~8i5aOeV zYM%*D28ui-M;C$&ag5%??WluaY(Q7v>>kaYPhtUA;wB}_k_>^jyW zaKSc)M}Yr;`1@PCR~^D$!RGIR$E|~YWWg6#Uo5%InW$Z`>!0bRxOvDOjPH+q4&hg4 zKu=Bp&xO16{T6_am7&l-~Uy627-8K62G$)5@@xiy$;?Ic* zsKraYNDYwsI&mhWC2SR0ZHmI3RhWA0R9=U*{N2%RJy!oC2MhaO0a{*xACA4a7IE$q ze&|JNS8{Ebv6)n+<>nqijtTVpyhGBB!FWFVODz|ML$dGlm{0u^bVCE(7!LdHkEqCH z;l0lszvQRwk2wiGZ&J?($zXq{?Uwqsr-8zl+l{_$e^?+HtUhLeK~HGKKgCDbFAcil z3o1W|x!uw{gmdCeNdE)o`<*uboG9Y;y66?*mr+mrXO^vo}$HwVW=2N zHtQVr0#84CI(1zP6uE**x=lOS$(sy4i5XB)uHzgSx@WcgJEtQk$kIb ze>3Fy@r_r^-#DwM4dOlE$LlQnP><`0AJ2DXAL>zUuS0&JTej=5VvZ2FWcy>l4`bVS z#Hxe)A_lpxQ^)syVCV>T#4G!&(#^?H_OBS0MvY!n&QX zi^A2|MFZ3Q@9{nzm_8`|E|z&1k{+5KhG&$W8k#O;BxsE^^>D4=7b6F;c!nM;%?F}` ziQ5sK9#|BE+VY?xWFeT3pq{2saAzpEGYrvdBzl^n(bIHykq+E~E=Q~}&vRm(b7BN& z^St>XC)PP92IKWo^Xx8O_~Z2xux&1G_~7*;=A@+HeM1;`Gxr!Z8xyl_exg?eI5v9hVBVm}E zT#SIGO1;qa;wWg{CYG~+{K0P3LkC^C(0Z(WtV1l>-3txO2&*|9IJlAgKd{5%R(Jch z7qRTOuw!%jp#KVWux)r=4_Vj)x!8rr>(I>voEwiL76;k9AROe=4u1iBfP+5o)9KLM zsnYDQZI~UF&t`{(7J7AL>M@zV+^Zu8kBai*S)n}55L>|)B}MYDCUD_v5{9MMi8EM6 zsL7FO%1>-^23o%6HeJcw`ehY@jbbYFAU^z5#8jBkxbzf1H2F1n>+sc|sYho+*(<(x&u|7h`8vm}~p-)4Fd z|8F!cj_zZAYx=gXD+jj@H_9dsUtlQX!oRjWgV|!#9dp8R%&5ToH<&chmWo;JD&4^j z&U=jcNh23sd&Hwe*TjV`SmIFrl~r(kPEs@%NcMq7y2fT#{w{;41g(9~x!jDc_=)H+vt0I@jXvf z?7_GA8XsA3c1gfOE|k7)^umAR?qtz=vpo9MD=(L1@yV7GM%i$MSw7s~tQh|4mE|QL z@UM5#;$zFKcBO8eWzUjx#yQ5U_Wll3g`zINj$%(J}? z!Td=U8Dy>(e->nsofUPcgAVD+I@Dv2I#iWgxbO>jmmPogvj@8UHQavDQil}=1KiF9 z*)Kq2Qd+7kotEpCErX#S-iWg=BcFqu_X@~=GmdYt=BN-y7LLp;yaz`ku=8ddr$5(n zL3rGP$7ML?;~;!MbEtN+IaJhZb`OgfDN^*!pe{zqpZOv(p_$y0KO=OFb^7pUeqB9g z{_tl3u$Zoi+;y03O!n$NXV2|9F#`yD9D16i?#6TCJ1mA7+2!xv!0SkKib^rgIq_Aj z{)l$s^K;^A%M=8XEcoZ&HPxUV^Z9`=1a}Ov02vc$_dn$4@Yx38F~s6=dh|I0OGltC zJ`{yA|k z<^;L+lsPBnUQb3>xFS=QJ*o?)iDp$`^h;4>sKw_I!1d z&0-KOET+A zCMWt~C(rThToBF;2R@Us9l&GPB0i+fNZJAQoY;=M5oP=21#jd33>;J==*K3fDX_j@ znd+adVqN%k;xf>LR=tN*(2N=v{-r+M$Z1h5Ta11<)`$OZta%vbJhKvK?B@NJFlR;? z-HkYN&RpQkDA!{uWgVvTQ>;;sN;LW6{qVHkI%ACw5Q#LmbZ3<7F_RD&4Bq++3)SF| zoo`8lkNrT4dK&nWgX3@uY6UpbaEwL1coYs6lh-P_qu}2bJZ9o?H6By&7<&ly2ONZ( z`|+PAT`v86rZ*;YKD%!nX!JNVoYvB;F?tjtVq5>vvwho5OStx2(^x*9AHts#{{^(d zweK@=L!Qm7!|bY1V4i&$iS!${v}WZx%?eFMLx*i-=T+P}%`>haZF_z3r zFu8RlCb!m!5w_>DpIopan||d={I3}4VHrF9{;tp5%RY0IW%Ts>y3S>>a}hWvdE+aP zY2Hbdt*|)IW*hyI0g(AV@V;ZMlJ{L?jd1g&FG2t8t@J(*_}O{Z5VyY>t?kf;?^~PU zXJwI1@Xfr{-8av-HsgE+&X2ctuM-CPvsU7|qd*70T>ReB_AkVjYV-{MfHgx0(60l3 z(Do_T2H_w-9RfXiAI}NT`r`RSJlErB|3u4~aU8($gJW(O=~2;Dk5_(h%-BSFw00kM zXX4L_2ch{~djmAdvvBP$mmVF49*tspG!S-RWx*9RA7X1YpT{9|V zyGQc>c9=}EF>5msPh0=(;AGM@+uB}_Z)=dAZ-$lzFSx6z5?tk?%P+lSirLSFDw^jw z6nAlv<(J+t$wqIC7b_~kZw?xI+BC!M?_B$F=ie2Ai8DJ)vC!g6E7w=fG8)+gUZBfb z*ebfaPLtV*JnO~17bWCbi)aI%yuQ+*S*haJLU*^cV8#_>uudG?^2$Cgblv{a1#2PC z#TU7dwVC>TvQRDsKEFi`-8qezF0Or#3BNsf-OhtEunShF{L&*Z?`G}-)>bQtEjo(`;7N- z$}()i0Tg3Rfi{r6np&|ob6H89=+m;Q_;vo9%O?%q|6}NoDZEc);GKlsK1Y1uZw|({ zpI=>zehU?^<~1D}O=c_SN!~$ruVaDYzQXw8rpuOBoWJbe8~?QAwywh8%`GbaU5f%3 z--udVEv;3Gc=gKa5~5GX!di4HQ8kW``E!=#U5GivS}U$%S%Nxb00=QZZe3@Ig^hsq zy*SB#Icx--J$>=jf_3{%SbMb|^WMl-)FC&v4mQHXY{YtIBX;~`G}6T7TI}ejV&8V7 zxCXIIy||~1CMrV$?3E7Aq-NaTYMrrr;69?gnN}0N z>l^TPaz(I_i|~WKtlfFY=pmR4&=K!#OEe?72+a{$@OKVV9M`F8pSv)5p=Q(z4_>So zGs9>^HwpVi{%y0%1mi&CM80#uD_OFU0~Z_>H1hEa)owIXfoAp!7sWV6vGWz@Q75KI z1ogAd_Mbu-?H0h!1eoi@>Q=I0wdm!ioZ=Tof5d40VYRmkTzEa=hgSUU3MQzz{-$Yb zx71>ipI_IRUs^4&O{;`j7CXFu2)0Loc`fh2*5Fu%gY45%;Ii0{kQ)O=%lZ(p!`qn2 z1Z?~G2U>0#ju;#PILIb#EqJxPk$hNQu^fdDd`F642FCR_Qe9HofYRYgpqGrl{9kSD0XH#JG0xgB5Cx#d)S$Hb|&5QY|_2^ z#;`Nv*qOIY(S=JA@5_u~Q}60n?8>#jW_l`V?7ngAeM9lSm(5Qkjo)`adp!uRSDOEt zG-2NZ>~%l9USfVMY2v;~?6ntOFEl@#6t{0Od#%Llhmlo^-#3N5{txW%^nhuzfULWAIm8S{_B%be&-eQN=)oG%Jm{ms-k`=-e1eo zLwCv3hwk!7AGS-89=S`&GN}Wz15okeQU_x%WB6F$pQu*T!J%$(O*G<~kZGaLD8_*A zxr!)8n#TeiYK1>%{RYSwAX=#tXCsR!tuL;_1b|vhLhHs^b(k0c?gRgR2Px2zoC*|+ zhy6#x87xZf25N-6q?dz8JhIL{HPBV_XB;7JaYN3 z?>uDXMqurLa*CCklhV$4|6K78K_;iaa8W-t%76r}w>4Q5{AT#WTC83R<|p&ANS&F! z{e9$KPuSq0V*WT7U1>oVVE5bD-T;0^4`a45b1KDxK%|K8S6R8Rk1Ri0&Vp{wS%0u_ zNBB++w+!@%DnV`tk0FH;E!6xg#to&MF>HE%{NEG$m_w5keAav}D$*R1gkL>&*ZjHY zmZG~Wjn`KE16XdXQBww5U^1?Br;7W#?@S}Zo%u?4_Ab1kbZ6(-oo#LV!Qz$V{&^nG z&#Sgbcg01~eO+PiehGeU>~#(Mt@OLWx2c=Zfw+3RhUY^4_-hGVs*RVW%2N@s!5&Vl zh$#!^ZMgphl!*djU)8Dr*KEKVr&@wR&1piebu)J};D`^=7L!XxDEZ--fDo~s3v(nW zngyfmp2P8(e+)Bo+3#}?Il?&NG8e5mLWqKFZfnIqS1dJBFP~t*w@=*EHO^BIM=KBSrG#;)#J@lBne4jGIcaM~x$AyX@*)3;p=b z@z)YO%+O|_;-}29;8>B-##8?o@hjQRedwZ%pZU=)+Hmd3run^4XO#c!Gt zQv-glwJZ1~$g-0R?6DCAX4~93j-G6A{1`Z{xP{{fd*yf~K9@MY-^ua&%ar_H3$1xY zjr12Kf2b=Po%t*HlglWnYu(w!AI`}iCF9SyU+ORcK!i>!TBw53Q=a@_`?j<#vb0!y z?!dhj;2vsly>QQLZ-xFx?EuyjyY*>n0c<5|E;I8bjQ1x07skiez{f{t$*vw>*&ked zm2q(|BBe3rKTlT_Ryj1LY5aej7Sjh7bHCs?IJhhpG~X+jVKEO0nCZjI^6SK4yOEde ztHY!|nbSIst%wCzHnm9hGaIrV0-m|sPd)6Xl3|MpjLts4DEOSvp_PH7u=I4uKy9tqpu$corxj4!S9c<;o-msjvTmZlJw4Sv*zY=>U{Vdt$ zL%F~<`y~6k`TcIbsGwgGUyhGVc5*@Hi7z2;d?5>m*|p5C5ntvB|D7)h|M|c0WwUTczNo;LCnUbSe+yrh3IClh z2HdTmo$h3=HJDjGi7z_vB?^2ofG;)TJhr~ToiDYR8dyhbU8i}!0S_2@?NMy%ThI1Cm+pwWz>p#*vM$JXJ&rcL%>m)@qG{l(wu(HhlYIb z{gA0P%u3itIngAmfoW1{{2YFdYR-FF?nyVEO$5&t%t&TD`zzy_ z#>q3{(ql;rcrEy(0iU>3U4bmslkrNI;R$Yyfh~+OYkF|YjEXq-p+4UW+zM{f7U{vQ zTJiadF3JaiTf81X&JAKHpFMGJ$Y5i*2?$fRXHMpF@nS^J$N<4b~~@a1ub~Bh|_X>hs3MGhHhTH z|E`2f`|%5tww5lOL3vOvTz+-Z6KfOX1++_m`jj&Lv6=5byb+ar@Jp^rQSX;K7HTn;**N;DXpbe{K(#u zPmOHOeNR5wQRBn3`@M(3+?R=VJQ-Mz({G#EqE=qjTA=Yx~)GxcS_VN~;3L3o- zTD5Q{*+Q~}zgDYifNgHqsxa`%rB$o!F0DG*+O1W!==pNjDmmlU+}2yUb-PyG|C=nJ zq*XE26PCMwlLaKL@^#~uri)vWR(+0K>GwiUZmI0F=BFpO`h#0rTe`Js4kkai(1?$WMDU=_LTE1_LAw7LT6aNL)Q`4Yd$31QWe)? zRe`ElvyV_xn_beb{P@8Y$U?W_JD^>J7=sZ*mbS&cRfB1erESwZYhde<yh8` zKcXtuA(pAdoFwFD-S~D6o-0ZqzV+|&T=c&2sL*1CUU-J(2a6`ZRy=Xhoo_{!T6nHJ ztE!mzHV^UnM7zYd)79O4%gmwLQ2Q4Y|K3Q@UyWNffY*2U>$mYgIGW$hEIkaQaXsOs z8OkCq+&kT?BTn0heE1^{MV$AS5(3eME>H3<_!4JVAOai5_hI-$2cPC!;E}2L6%bI>5H6E7I^NJYuHtTd1}+AMWTYW8WOb? ztNHq(ezVIa=cnQ|-CTsXU{JbPCRSupbVn7!{gjvzUBR-GZ zR(qK$Uk#6Vv_)B@Vjj^?aCx`@aMNs8!XrlD{`Y`;HJEgFtA$m%`9t0B@Q0@xHK6p` z)|0={A8K*cg4S+-I1e$kgfW%yhrrh0O^h3F2xHD|p)S>)yjcXlS>GmESQU8F-s;X9 zvaq$7|F49FU2BziGWRja81@U?4AmZLpM|n`bQqTC0cOt zJq+6BMV#C?5z%*@I2{>}tCk7yf4?>c|B~6?6#EyJ;kc@o7`)fG;TW^OFWHZ{=|;~O zoRAK1HZsX^RYtZC2UlS&hZ*? z_C+1|8)YZ{dZVsegPj^8_;K*X_KIqB9#a>ZJAdI*;F*6$tSIsK5>O-Ycj5-D-@2gT zez`31_atgYq_@jZ2O|E4BR2+HI%Ttyzofeoe_gs;jmhQ`e@S;2ClZIDp}pwt+l~>? z-5{pBD(J3P#|XK*_M)2y{8d4FyZKwnqJ2V6un`y4PVJpzb!jj0*I-jqwC@-ZO!`}m zbn+wRO?HRErb01a&;{T(m3^tW1k z$(bV{{+ilu=WjCXxvGo5a=V)G_i+_J#z1u(G0$G&DIQ12;Z0vybnDvN8dH!a63BYu+=CeQj&%bmGM z`1WyIjkvCr{N;Sx&%n#1qt%#P6U%he|A?|!03UPM#D8~WTq733dSkci@GTY_?C@Yg zS&Y56j;vdU+221(I;t-Q)}gu`_;=!z#K(2p86T&<$?s02oai?2G4~JTV^hG#SKwo- z#hHjr7MXW~kJT(!{YPF^QaE2xtV{*Yl`B$IDXB?>dNIv5WxEp@Rf|c`?(y)c=CZe^ z3&_FsH>*k%;OAM2L9yo&r7!YpIx^4Q)b~ytsh#Z zvnv53uXT-F{uK`S(fUb0rjgt+pX+B&(I%{&!h*0FVE6 zS-k=Y?@d;>BL40vtM!6QR%2QvUqZ54BU;lGT9Lp0ZkMcgZUB3f*JBPFBZU z`@hL*wYd7??~~PP%x3vLvRaK9=D$x?2^Y9zm2&SStA5BA^pw>a@#IDF0w*rkh(Ezn zO0o*{eew6p>LuIlvMNVLeKqn8B&(}nk4RQ^Yp{Q1qa>@FP+L+#MoCsfPAIvkWJy*d zw*K$_aSw3mulJAD;vVD?ZugJlkwfVAk6Uam|Jdk^H}m6zA)lG8-Tv_6|4K~X6Rg1gqJ^2|f;TD^3o@z`mKk}O} z&linaTs6M+Uv6HZXXHsbe7>~?d3)mN_lP@Pm?r=e=Sk;Xm?wno9@vwoqu{A9fh&A7 zFc0VjY)F_VqQ6BqX)lS}b#obV2;I7Q z*y`e^p^KlSo2P))B23v`b z*MS~fd>jKz;m*g2EQipOk29?`*dyDeo8_=o#K)sAXt~#`B|gr6PstgMN_^CnOME=~ zI^*MHZ{EnT>lR?w#n8*XD_uE+D#Z3?U4N-@Y-eqka-AfGrRkjJ5wliebaYOx(! zOj=o5HlJywFEUJ9kSXwCI$4d~OuRb$G^%|RH^;QNbTX9uziqUU{-#DJ_GJE1tSzNj z@_?0RxuC(W%5k`R6e4ByCINxgiVyyWIQc;Dxu9N=^ z^S2`+zDV5kW89?7eYH3nyFpMkP^hQ^GPu=W0#(8vH`|Gmc4%b1oj$DwywuIcOtngh zm(PKJMjWJ*#~+cv!uQ3++@{j~6p$QRJ` z0Cm0wnRq{KwRpab9@WUiqjq6+p}4<**Oyvo^)CGt5fi%bsZUS|zE_Fmh_%s;X;tzP zpY!4i%rBx_hGL6%ZjTUpMfxeo)>Vsnwrv*IXG}uGS7={^_0^015X)1JlM8z#L6zo_ zrYe;0;L;EuT346VU``DEE(NdJi4{ZaHwEK2QT&gL9)8nPmO1RV%(^{x=rzaxCbQm; z*j}?Fu-xNaM+yW8zs_Ci)S3DubYM>Q1sZDo+zi8HZJ>$m99`@53$=56~NaS0BRTgri0 zu|}y8$2R1`=HY<+BA>a9*`i#??^f8N7gwrNqWobBcZ$sm0gNu{1A~ZW!9}f zWPh#(ITd7BG#~os4}|2;)l{SRlOFwH<*KpX#Gl2_`SE|L7=V4Y2BEi*I%)=*A4&At z-=~1y`Eo+FI2^ei??x`XpYz?(Kpkw3*E@e?8e7GpZBz=ZO^&!`x@we>a&OCHWBD_V zY0Z=QCPzXu?XaG1z2j!xD{t69TEIpg4;Ka*tb0BTe|t0g=6?W?_G zm%$6S^L``OCTx!PfTdi8NMVL~ffJ4X^%gYx*ez%j?`ORNghpM}XTqNb*4ZJU(Q5HA zpiV-g)tJER@_fOtOxTswiC(I)gFT^_DsciW?>_YkRJ*aKzyZD%l zC}EyC-$;4#r@+m3=P!aBSBqN&6EDRYgsuKDe(1;4UHh1^@Uc--#d>QI&`CSQWTfuG~}z@?c}$-m9EPrC!2p(sPb zGn1LVH#rE;nD~M_>1(h2JqrF-A^!jTPWsv_e<{kS5>*%P9A&8TA+_iatH!FuYRt=2 zmN+bGr%(C`Q7~!j73gX^+sCdacR6tPC1e2JWQgW@tFok?e8UpdKTd17O2RNWmf=YI z6tkobNPJGlb5;jIUh9A7a}J`1-uRr39@AgP=QL>it$ZGi%zzY4R%7+Ei_hk+ya4fe zYpXk-A9eEiG2z$oxkhYZ*@553=izO?htIxk|DDf=Srz9{8_;@-JD)Gvy7_#gh4w4# z$!9%q2s|X?Gw5|u;`6+xv1ad7Pd*oZ3?6?X@i`yQiO&;}6UuYz-FguHkhwF`SwCdX zAC$%1FfDq3Xay-W)MF<)e{|@F)T8==IUwkUl%;yXhmYnxGrTgY#6sx(RI?WL_#LLT zeWA5HasxDB&yeo}y-jb?l^B@bT5VDXYd+T2rid_C{cxqB^$SZPyRw%YdavKO zx6u#RU9#s{q^_r4a_GHUrAgx4l0LYy+CHnxYMe%VPF7=tSyw{TBdlbC*8`nfQJ=AP zccHv#@y^$kAm(bdTde3&@BT{#dT;64YV7Lzz1#VoIrJ*=eKxDGr?h?oZxw^g$Gyn0 zN3==5;V|sd+!_rx=A`6zYCn=>l;krRd9HV-r%{jf6zZ|&pWMvv`a>$4>hwxN?hKCt zbcmIx(qkUxGPEUBL)WkliHxL$JYp$>CVfqUx>)L>2y3oF=Mt@!Qz4E@X4RbmW_7U^ zcS)1_X!eROU%y{E6K7Qy>lkiGb5_^$n51elPnNA`*YTg__np7{pnm0?ybY8OP) zms&iXRUASbqXcb)e|5zhLLRFltxq`D$ElGMJe?zhNUv-!1> z`emaxabNhtGS7MC#@l854w6c8a|l;XWmhh^J%YM}r262xT|%B^+?}dDca&5wTxk+! zvHT0sB zF^i!xfQddtE%NwEswuycpfFcrX9-%5oN6k@nkLRroTM!IJ7TD?W|C-BCrkCC3+L;K zy*lpEVYd?2_Y;HOfe1uEvCVJ5`f3aIB*meFE|K>HUXrEiVRi3E1ac4Jqf5y7VU~BM zwJeLt+Ojm4RavkAZLqKx2Q5!X*+cGz|G zC&(_~H}0DaK4`3xHKywinyS$=)&T4!z0-t~C$u^Mr}4Z>niJ7st8uawfx2xslg@F5|wNfl~_wY z;x_3E0Sd@OO?(+FFR~rthViPJO8$2{6y?Ib5z6qR3XX!Cd73l`zM!37-H%3uGnrLlmAr6|348EboaYe zGCcox^Ly0qrr+c0nJK5fC`dp0-JU|f8xV%!j3XE_Pd#t0jwtMK%`AF4>v^jb*Fck| z!h)BUr8w~d>TI;?0vLm=ca^xPRb8Tk{jCx|X>{JX}Jr1*qDzUFdEdGyZd*K$0AzAMH_-Lo) z&S&y0S#C+5OIR-9X(wv<_3})%_==U}nUG2^ovyud{4=c8K^$l$d8X@ry*w{1AzOUN z`V`q!LIChk)#5TB2Sm*+Rlr?R3{olX1#%!Xp)gBSvSCeWC3XzBU7`;mD;tZId1L}z@;s)sTb>V^ z5Gl6R!A84edK{!Wdr3S`QF0}Ay8G=i{UFPQ{2rOEM1|t_$aE!YO}|~HjgaX|bP+!3 zlxdZfWLl0M=4z3%R$>01OQy$TW&iCm9Vgr_)3cFzJ%I@Oc9}lpv>Lxvrbj@gE5%>h z?rftanFhAL*i)vTK;CBXzcidbV*6!~VH;$bWcv7Xl1v|8#$HcOvCDwr*f@7s{?e6<U&7c` z;&H)+vE$l5hJ>$cttSb`yVe7P%tlRXJmj0Ob|u!O5ZBwWE}8mQTv*#>u_Xk;)}}BSawq_~YA_;kV(Bv#|2> zkyCgU*18J&XET~xD%~vYKYt_o7|kR*xplbPXDo4hZnqFpufoLEciqk=Tek6-M|4QT zC442x_3(d6a{bdYl3d%6&w{Ogi9FUzmO{(Y;BQ=ScwjjG&~G#{%^lH*@82gv_axt`{L`D~6~2)yrudfi_t z)+NcZ=^ZpPBN>R|km=nAmzlY+L1oh9h$qV?mW{?_;L!^L558cGWwU*Qc`d`hu89w= z7P3)$QIYr9FUPlEvChU?dO0#93D(6)a{Bxg^!W$kpRABp*B^+VTp_z>#)J2nZN|&S zIhbC72^K4+U|wP0cJ9fY6+!&x_{=KwAF26=d08f9E>|C%4SZ9H{_;Q4U970XJ1WKg zcKWVraZ$@<3q2c;b7`0WkvM%ek9P$h3`mNPM~|Qk@*X4n2w4ekGh^=9SWtMYC5};t zi|VER)~o#QZB*fN)*;U^id}j3c5JIhZ{ks@HxWM4Ia6d~qY~FWVSm*^-0^R%!t}px zjL~Kp{F^>)l6ZHC8lN}b9$_JRx7)vSlgKv9X}4EW|Z5H}es*aS?Z!mrkcEPacs?? zaW)^H)px;U995W;b03r9M-zwVCt?2D%L!HJA$~YLww9 zqoKp{WvR>CWwPP>M_|Hm6{h+R%l{5Fn_>Bgo?B)l#b!V1kk6K8>-@Q5gfkBqRWaQ8 zY@azH3GcvKunD)@&0g!o$&L4d&%ChJG8}xqATlH|` zJ@}*$;eeZLeB<`K)iQ{Ep6>S5X1#viJB_39dDm@kS?KP!V5I})R!87h?icdiKDWiv zVYzHM^^u0V^puu6jw9h+CFgip$t}hs$soz!hEkHimePMOQSo~IwI#FKqqN=`+pQ*_ zkxNkdGJI-ma!fGw+5T0aPsT|>$^C7~EL*X`|F@+YLF3=#7+b0r z^g(zY;gx}BF-?wp@?^q#HJ;BvJ+%C5L8n6nG_TU3&%?W>sRi`_I@jcwfy(aeJcBUJ zFw3@Ihx5Z`*$VVcj%lU(j++y79XH3*Y(ku$$_djsZKuw*8T+znJN2G2VX9`$?qdn& zgag8(-lq0G>#yP$$0dI$Xg$1lWczps8)Y&<)(6KB9FskS$+CCixl4yOoC^GR2d71r zB$?~zzcDh3yIi%wy8w4lt`!oM{(?-FYSkbQv$k9(yr%T(SfLBrfs9Mupwu&t)y-Uo zWkX~ncX`Ka8ey8wCzI~JPc|oP!QD(qKJegi2bYKGrOEGM-m1Q|{8Grq4~^A$R_*Xf z{#x*mS3CUjQhemXh9198E=-mm^5;6@U#H(=drd7&l`n6PX`z9||ZUeiBICdA1GBqt|q zPN2yj{!OXKiRwqwUZDDWr#~n^Qtwd2>N*rUkE=Icei5%59g5ld4uxKM^~O5ry5^?} zeJ|EScfqf}2*CuP_tq2I2}ct$%b3{UU7D>ixL_E9OLQ+MI@2sRb$ zo&uj;!TI;3d+$=F-kaWM*O+v}uKVy#EVzx|U{*k2`OCE0FO`PIp6hLU-XI z#C!Y@$aU<0Eph~RdHri%yFnWd8MymWLBjS2(;**gyuZd;n;)m9(Nv9u0?bymjfirL&{u@5;i1d!Td~-cb$c($} zvmx&1kyDOcI_5FU*0y$Ir(yeP;YS&13?#i=+u5h(F~^WtJtnGxe(NAbS`Xn(?McB? z@6}<_l?r+eSvzphlsp77w7EHSJJ)gVYmpJ$<&;gQJ2(SArPMonJ8uwX8kE8|!(o!5 z<|iRRnq-oqxXZp9RUQ5w)sC<{$fiHZ5tAW>>^FTEPniYDBx{}}3xkZ3g4lm*UwEg1B(Y=wHN8|j6u z9OiO1|Ifv#hz1JIZ zFAaFDQ}xHA!jE!Xt@ji*cXr@)rN z4yd8ODzbdlXhK072k?Y#r`GVk6cr(myL z_Bh+8&yE+DxGc9}hr8`Q75M7a$f5_Dxv2Y_T-s9>-l{*xmn zHfktYa%E>wh@_KS^^*N2yH0lvyQ&P@@#@uqMayk6Px{1Jwnsfl$KUJpELd*K_AEII zozZ&jOy|9`mC)z@KP|V(_0l^Z(@XEv+n3vN^hYeqZ3Y?rB~c@tm%Y2(wq4eG;nEbt z?m3!&cFLb$ZcCB3{p6J+N&Qd8mkORT$_mdo+MB(bL-G{JX*(|%1#=U_n% zTr*~gm_LbH3lFqzXc?S-7<@CAL-R^`AKsGA%yrhaKJ=8z@!O;ZbR zC=`Oi4@Ve|1htT$@Y_9MAlET$V`L0>dDRBon}!oMYv6xPU%?lD1TW=dA}E-JtGQ93wQf?3U7JHCXpug&+rgD`g*0|d3^sIJRhb5UCAEnQb>MD z=gATb$cP%I?o{k0RIml-`lk)Z@e~s9i|3dAHGv!9-=xk-BkOXHqrq2!~JdJP0V@3UUe7xi(^wuTo!5R2!ud63l_uVnK3lkA(-_HP&xAHLI1~#!I~1NO zpu1at4{6g&@Rbpz{$sp$`{rhYJ`XJDL$YEbNql92tjK$}VHer0lMc;_lMelYOq_q( z_`dPHktjR1U>xky8Ao%ofByJ%W$KIB_os2Gl2k6faZ{Z185jAgIHQQ{BU#5&u-dZh zqk`ZGd!6@)BRUy;(djw^`_OCc)f+G9gtvh2CzF@f-4y?Hh>M83oNIDje-zhq0$oqu zntrFDGmx9_h>q3ZI%wL<94JH6x^Ie)e8$DZT`uan_AIVdyl_-F=i$3MDnu=8goHwZ zr%b@i@*98am(e%PLzv>B5H@=FrM_^1i;TOxiLPaGe-d&pJ6JBXXrn@O!kd~$9Fe6b z1sx~v95@ij%Cz36&u-Z`Pw0RlgD{}HQ`X@LzX4fDz9~-qjEkB>o@+8_ATtkG-zi^T zE}=-xw85IrJ|T}dhLmbLxpp0JBz;QX?ZEsd@>ReT^8U$wDL$CfdGjDQB9uO-TNgJ- zx`pTdq%WQFV^yy-qpv@pYC8Iva=I z$t7y?I%Q?#%l2DN(pmPcahg4y9Clhjwm@bDb^-t}W zf>*IqcKW42HlD|)r1;|#GVT3y{2=9hTz#}y|wjwSYn5|AbC3wn)=D^nL{WIV6 zX>u%UChaQ^(50z!ppP1pk9Nm#;Hn|Hx*SBo;IDr(#ledL^=Yact)Lx9k#O*?!MpK1 zlHy&&n((Z9HG2AM;^jctxMWSo&9OMA^~}lk(RSPf&eg}Lc5p%hBzL31D~B|k(7hZQ zE)x=DQDKDh6_^4fyY9-?>>h)dZA2d_^^KxOMHXOa?naRUa2$$vMd39Jp%e|$Dlr-L7Q@lMs zzPWNxf@0T+nU9(%eYZ9b_a2oJi`sB2Uh1?EhT2xqEMzd(W7Mcw&#f4<4a><5& z8!&Vi*>L~t6rYy@$%+T$5~BFTM>bI-#AygsxFHI;qEi*-mz5fykifMsUT+KNHEV## zbt@v*lj{fW4%}5A*z0@bdqA5}>m#G$E`PS3!~Zcvn{GR_5_U)vHDsP6Vuh^JKLlQD ztqRDHERR~)VBlgr8VCI-o4~aPuHOmIqV3RYD;^~8!aPedc@|jhHTuy7@j}H5M#UKD z*^56}24?$b_22nzfM0fCPKwXh0Rwj8Zn#=%nD}B z49G~4xocp!mbI&0No^~)+!koE7wlA=a;%Ypw(a-Oo!mz2b9vuzRoAN*tLR6D|F_D1JN}o;|4)D2g?=3!tuoQm z>J%4j7Gh!xiOHXMPBgN#r*lBiJC3|IPD>%y9nZCA=+4 zezoT{SIl}@;BqbaQ0B2KIdy@0YslCf^h5BoU!Eb-oTrSH($}sDR}!mbzp03x+f8(; zk$#I_N4rAiZR-AJSnlUQ?h7*UPLE~X?C|F^@OkP{Xbx#dcdH|>YdmZo2CxZ4aR)p0@2BJ2A(J&~}e$8GH;YngY=kEmwuQl&)~UEGT?0w;lI*T-Ucd5KCRguwZT?56X|*1MKk_))p`p3ERj)TzJT+b zMh|5YI_LUO7x;rIQuwd_8JwGbnBX4~^xx&YK62_gO;&aibJ+H7C)kB`Fw?R@L)p8x=7@0D)VUt9^vfxO_t^G1@*0YR?@+e0R z(cqQ%=SdjytEBkefQBs}2@n_f%4#)vf=<>HY{=|3c`RKSDoJ&9RB$C9c(CK`F*llY88tYs2r_US}5KHwOiE?1pI zC)RoDJaq0lsm@L3g?YRLkx_9^u&2-gTY^4S(H?$=*n^YGK_S+0`t^;1$f2G0Rbbyk z^}{RaroXlI+r`StsVGmz1N$ds{PWxxKfQ<%>xBuzvzUCGkatd0<5xv!qs|gkK29%H z2ba4s?&|U79_o;CPjzUymwMDUkxH%CoHSTHOoL0J^@_!QnpMIS=XLJ_hFE}Bkpbf2i^ zU#3-6$PIiB{Z5dBQae0*Y7nv=4Q((W|@wSF2lDwh%-&Xl!U;MByK8!abV?43nkQ&&RxxI3F2Ene+b#m|aiHw-q<$+e)%6D?TUp!c0k0p;gTkR=@t@dd z=7te+#cMP-kbIN{PHH+bOilN(exT8jR~)35GH&1l#o2MVUvSn9Gb$s>-I?)v55|9( zmO{!y^`7dmaxX@vo>1qOGZUCF*mM4Qp-ki;XB*=vib@HO>G%PiIR9={cFz=1i$hGD>R*4Y&0) zWKQ~D(eOKeNW*7rayp%-;hANXjXMXw&zs#bf`3RB)Dt;VtCM*SdI^?a(>dS{%M{S? z^n*c&?x!$G`f1EmeJYa-Do!%`ol0aFL^I!R^NNQvEv{(+~sZJ1@y-`z9JH>+*mfaVKr(wg(8 z8Aqg)L@TAP?s3w*W2=}VbiV_S(F!+;{1Fe%0lIEUG5rxg^DVkwG@#r-FhOt{K?Ux$ z@6z5aMC-@E?=4r6U zX^5N5F-m|sepbxXNn+;!QEcf^ zHO}Vwxs=*J&Sv2SUn+kJllBdD%zB)anIJduJd0WR3_uI}RZi7Ixvl@%(+6B7)mcdrb8kF}*W`7FK)Kx+5>@ z%>vMPB<6NQ=TmB8BVcX^Yy=4dTnT&O6c-O!_~(w;#$?zw(Rlty$0_8Y%_)%J3S|cU zgJp~S|D!e+kJjd6O2JL6JM`+%cl+mw85uGl1kmGmpY_K5s{9X z?x&X-M8J$r#7Gc;KW0;p)4PE>m+`Clv7JnZD*Rcj7tyc3?*XLa4x}^kn+VeR96C3Q zP(LOoiS<;1g;# zse_YPI=Q>hpP+-=Vaq|t6I;$`y2k=Z&&H#TACKq?0uRawS zW5855{#65Br5b<^_*Vw|3WHk}DAT;yFbzjcPUL_r zs7*x&=}vSFmy|88a@F5cO=$(iD-1%0268L5r{A(|^rGrE$fbE@bC}u8EM_L~emXOs z$NMZ-(-(h>((TXeRGcNbYzmXeBrub4mUt#*@GSfL!fx>_N;U##i7cDQ5bGlpX9;7X z2G6pwPj-uENnrhPmVh!}hS*MCIE#!496XDnuakQVyBpClldl5{=KU^w{@Kf^*=hXD z%Tt^N)4H#)zjmiW2i~x3HjN|b&(dIW!{bkuc2f)38Kq`xJar(8*l^e--voasx2vM*NDy<_~s=sZ}pXDck!J>U@Ein!Q>~$)6Nc zA-EAgQN#V1Ri2C9SF^!KxPgxr>gZ-=RRa}sa`_I$0a`e)WSyucc-n4-tS;Q>deZCC zKB=cBk@hAwPqa)c){3+)T6e9RR;ra~U9}$C2<=Bpg&JXvm+mQkdJpOU5vh(qm+UB1 z`w1f61p^t4fXLBK*o@>^ zG%ET!xucfo!3^Py{w7r?w;5dn_DPHAsB#~}0<7ur-oXE;mt9A5z?fDx)xS^W33_qo zy@wJ|i!1c7ka2-d-g4F#_Ya?SfgdWgT*PZ$uVKv`P#&n4sDsK~8L4_)xf^^@PEXV* z57d}=;C40y-x^vLjL2yaVyOOkflSySmG;}SAsai*LfFqYxTj!eqwXa-a&SHFM5W@b zeFr)&0S$PTH)M|dbpn<}-yqN*K?BbSYkXk8pF&4O9rET+!MeX-BHw$8+hcyr?C|@we2PFz&^xSI=B@;Eu^X| z3>~}(jmR(&uagtv>RFDpgWG!gN90wX1J}?!!J}};U5IP8z<2N`OmPv>K8Ahxbc20l zruPw3w1H6ISGG3r7nyxRzU_o59KHqNK4%S!Xq%I-NN#rL44$9R$GSR)^pAT3Sk$m= zsE_s2s2yiH3-ASo5(AHlsy)@RYA@hXf<}evSC!ecHxp4WV}v#6Y7O7@(Ehl>_bF%I z$t}$xoE4$gDV_c0!b^52CNj6va&+$qH+FERQK#}GD@Q+smlyxY_mzvNC)fWF-w)v- zHqsDPO^$SM@AXC0N202>6Z&_Ef-?FKsmQyJTOv?xwE!zJnEba?*SD0POg^SvvYx z%*J*Dt&d8qV}(k+;wO}qnw=*LPQ)jU-5IHqWOQ^fvX74s;q zFjAQP{&3CKQi0|X%!S`l1hE1mA%;58;lNi%_W*^I+9Oz*J|r*c;^7 zUvMlqW^>ge{IM|p*iIai2Cl2#QnnsQZ38CGcW{Y3e{C+>T9(oHt-}`mIOeUN;Y9rV z*B3o}kct#UZGPpPIlZRN_7C3`4{qc01WK=RfMB zDWmlfsfzl<>GiD{_WDx&sYTPE8IqZ)OcKzPf8ONs83%>&Q<%6C3fasvp^>N`J&G2T z&pMc1HknB%i7!qa)=6`#W~1K0)+PJrImh*;#NrtSR04D*l}uG9BZv5S6U(I`xO|U@ zy7VtqU(OST9l+Eb+#7a#&Njo7c=R-Ass_aXEWInKx`+&Z07yr53ntK=(ZDpvKlN4<$AA0_+>WSg!p+$c(c!D23 zw&of_@R(#hhyB=y$n&A1R&#-XN~HShRCm>xu%qInS%_9uhe z!QARj#8;>2$>gz7nviIg+nuZW*Wp!tf=z`coCZyJb6uw(0>E;!EfYs;(=m7E9yS{L z5>pn*L_kJ|V`nBZv4a{r*y20-EMw~Z*=!KbGOi37pYa1i_0RKVf(OrX#oWQILtXjw z4$@nhMK{^qLzB~94i$%bi4t~SiLRfZ4=WcW2w6e=?kf}O^}(`(f;lf_?!GdveuXK{ zFat6*+inZ3z$2XuP}I$Yk4_YzCtME9dSitXTHdx23& ze+B&iV;d#suSNcmsW4|#E;X6z->*81+@&F}!CFL25#jY*Hj0L+$@4i${jMr0JQQ0o z0p-{3)q`hv?tgKHZ|ipQXIP0deBGUcs$wc3ll7fA^!>N@Q1e}^Lh*tg>JEmFFk`n} zNCXnOBV0?}Q=qQ2o-exL*Wxkwr}kXP*zTTf_|@O{Jc0WzXqCw{LEdqoV1qdX_ivaz zaeo-7gP`+q@SF5vj_wTp3}t2ukIm1RRyss;?+MRDrvC}@JWO!u*LiqlD}t1k*d`96 z_6{z@hBY=*@(})$P5gbU={W!R`ENeE*Yv*WGyZr3B=$B?0_>Dymx~eKD3Vh0En@1% zBZzC*#MFuV#MGyFu0xG@P%M1Ecus1@4o`K?^vwps130FWLr3jst*IMp$@W~tv}W=x@Ah=Tue1jKg~BOf z>ib0JIxQY^o!qfbcl~ai0!FNp1=MqMouq^76gId{EGOl)dA}9wbnu1Dne0D9#PB-` z<^S}-WH+oBa%+TGAqwjw7Zj7+W-3}jGW8?f`Fwo=a=TpAE>#{SB+JXI$`kb)pSlD` zdZ|T`UdXJFsY#B(FWlx|ohz8#^VZ-B-i;M(;8#!$+I@3)1*-=q-wmzc{+DT_8@-dsb1cn z;r57bi*7H84wo$NSGoN~_pI(Y-9DXLY^%A@_`FUKiJc^jWrkZ0bX#T!Y@!qdPCyK4lRBA}2xHKcxlV3dqlbmG<&)L`evCg}!yi9T^elh;K;YxmTlwRa z{PE|Dp5u>i5BxLqH95bAKmKgdKK}TIz$dD=^T+q`$DhIPS`|peP|?jl_5YcQ`g7S` z4nbvwy+Ge;4$rEv7rx+rNs3JDM_;Y5KU2NG;*mrCrSDi>WRJb7st86Evp}}3JfQUP zSG~1cDgsNlzUr&-(FT=1@v8Ub<5^#tsjPF3qb1`@x4-(-!E=uH8beB-e|21OusXC< zFiFnDFtN-eCL=1WwACD$^`fJ=D~=&`{i2S)<6h9g&_y&AImdE!_5+HA-oBh8jy!SMMd)Xs zq8`n?S1q_C6|SpVQ?j~hwfaxYT3AZK=5>&Zkng>|hx`ma6|<`Dt8VDc@z`m6QiGh> zk|&Wd+usOW5ej6krJ@Cz_j*qzv~w%%Ve<3{pU8#Kj=`4u4vOf!@|7WezAnhMg3V#Q z@1Sp^6s!AK@4V3nXUtgFR(2r}j-qSVl2fAyrYX*dZnu)8X|dho~*9Ql%cxcklQHwTaC zaioRa(tRZRuY*U_IPwqn@$SRfhX;>5k0T$jN!fK=jQu)R29;< z?>GB&7r#$;R^6?+Q=QA)gI%I~?nkt9_mzbD1k;OyxU10I&V2~Go#;5x7ce!yq!A+?I9RW8@6@qht zo4$8;WT-=}Dvz7u=W~ba)c9PPwW9l`4f)jwd(vjg=k4Fy*jqj0<8>uC;}+{?^i5x8 zkb#nJKyxPIx5{nP=>vuiAo&+@A8Z>!WB2iNJlj4C{C3_pL@(cP?>hujT!CO>NAwVE z#VV22nLCWRSY%mtz>h=8FYuk(PXJS`EAzsv;)9M3QyY)4PUHT5{@BZoFZd%LnLgnk z--QS1JsZni>7|a5cw}n}`RkuwevthAbAA(iL1|A)sXX{wb#F?jDJ&W6B-1>pbzTPONVkd~H06Y?$_8x9W1#VQpGBH*7xP%JGf-Sy5ObPLL6p#H1 zF;$1h^LRAiu?3HB;PC@I=Hf9v)`|1ZN4rsb3bwrSQo|h&sUKo8vw1A$Sxm*MeY}bb zXkiR0C!yEsfe+wxHX_IF*M6)2<~Ijv$$e<{E|{RF&5_x8VYfL1Nn(SK`DE!nLY zU*|fo>H!kf@oxA2JlRNUwvx4R+R55yd0o9-`6#jpQj95D(m!#UFEkn-KIjv!VYZ7V~Z-$ODrg4@?>=uk~&w6;u^=Iqb3Qo(g-UM_e)a zhRMY#Wx0*F^XGSESLVc5C-7(O0unrZu7e9Q#~~h@RF!C$R-VXA)=y#L^+`;EekzlO zxbJkmfQdnFM5>;6LI@3?>5b9H0#Pr*E@HoUTEmG##v<#<`l(=CtvArRm(f!f zh#DI&^*BZpCDZw60yII|V8*}154-rlRUdroD4TdpOH{@N!6ptwte9A$zKE6h=lL-6 zuRKxTj?B}?(US=Hu-C@#;b!34LL6!50<6yUD#Tvq*`4UTo!iyxMCbh-?cB3=pWh*J zmebjZ_l%0pHrbGeh&ZwJ-=;DZW*t0=_(Np2f;}1s3Hy$d-QLrX>>CVoJYA}&PqpA9;Fu^^1q^5=u(m2 z;Zq$_6|4?3P?6(HWL4mR$WY|8$<)r+5Gs%0c_*3I;~<-RECqBJtVY)U---CI`@@L8 zkdOE)&XG8J2bXRC;~0NCI^T+}iLZ#tB-ccjSahc(vEEzTUmyPaJ<3LA0(cJU;U-vl zk%N5tlHFn&JNMu%$V3Y`w*wgfWhPJh0(wn?0Mm#8w zPCexJ@PC#^;{Q<|36an6e_tLgG2fI&A$F%cl7ecT@~F-XZDif@j;29MBVtI)zym&@ z1UV$bpE`&I4BU_BcXzPJ77)?C;D|V@%-~yfhErYh<>f{NdB$uZs4E{{%xj*3){|h_K;|ijK8`vTph_fCwo|K zyU6qMKG@ZL$k-Ui%E~aC>lc>Wg}A+#_AQfPa*N~_@_RJAwQb{V$DerF9c(t4+JOwD zqj=kZ=w~=$`=5cYL(Hd5TPG0S{r#G5gIpY1(=B*T)^snViZAf=4YPAizq;yN)4BYb zuKX{o>2>b>ueZ)If4rt~Bi6JR@_J}Z`>oD3Jr7I!=9+G}&Z2|t%{5(fovf*ax4%cO z>E<4?roEQYYkI>%d}q!z-O_W~BoA}i+O-4IpZeC}QzfQFYEM)|5qbD7D)>Z`SEKJy zaVFJYugWOMeArh=U3weG`{QstuOU;)reH2nJLdej=t+kts((RI98JYm>MHAB(`~Cg ztb1K|ME6(S8@e}j(9?ElgyOP<+C%)`@@Gm4s%!9f@-6q)ckHErz2zLIXx)<%0 z(51;e+iGihk5aJp;nAM+@K=U&?AC7@6>}O>pHck}JaZT|B|{Y@{(0*P3g9_kpC6G& z{n)i<6Yn`cyXW!p2M-pOt+;vHYvaoM;^;Q6tWO{pbwzCw$f*Qm8=;1O`cuRnajv7G_BE{K$aQ2n9~)LtfVGKO zU9zM~8Yzt0sJ<8L_kem?^%|_-UFzKGKVkjWmXh_mPnTD^UbhaJEqAN$(dFuvFw1qz zbW3%sba&`h>Q?A(*WIa;^cK6n*W0~wjbTIaM#KHZaWqzQXFED^twx3ku!QCI(Q7#( z_k4`C9MjiL_20GU_iMRicr8yp&*{R{OTT&Tv{xa ziy{P(;u1-ToBB)i>-qboiQd$YnS1_@9&KNutH!2H@D<>dz-BHaNzOc7@Gta_Ihs{-jEWV+3T6vWi{?{FcGeYjMO6P4 z)JI9>q)W^40^v2!%cbB^K*&>Fgf;~m+^ECR3^4IgU`X5Gc zlnz%Em0zZRJ^!Hohx~GV)H+;MzFto_>asm*(?a;aa?^Gy?k%tf-=9wTU0y!ul zjnsUsU$?AVPA>sIdaviJ?l^cA1#aY*Nuq=iZ&iB@NS-diuTB5nYryB}VmzDsJ@!rZ z9O0y-XYtzS`@fVPD1D)Hf9bx`=SyG2kruUz-ivtHi9PK=F3;=lZ5*sunyOSFW)wkU zM$XEwTj!FTG1g}G9aQ00xQlgn(^d4_bV65dmWUM}Y~zjth1||Y6|5C4KK*)ONP!@J5qoceAYn7x zT|lOg6Mysfg;AUQ3aD#A+l1&*Kv_SRyfQuxddJ85tex@@%p~1f7G=Tv;(kB-jl*+A z0qKS*Z2dB-;9k+9!Lu&p&r0=IsD{pa)-vk6RCsV4zVWbmNE+e%JKIdLo98Nwo`rQc+NQffqH#;tn zf7&gIl&Zy;E>7r(^f9;KtiUFH^(IoAdd?Bh^sd!Yf?lMtuHA003`>cm`Bx(A{6MS3 zbF!}QneLm{{2=NkrSi81sNLs@kJOX+y@_Pm&7G<}aS>9fSW^6jDK~pH?Oe0m>&P0r zARN4o5AcgwOc>T&AdesP&FUt8R_FK1QD+j|gt%zen4jN~<7l^oI~D&f zIOSNCLS_su*mU%wkWL$XmmA)-_X2ubLh3lbi(JD4CQ;<_ezB)iO^^#_x((3vN0gKs5lyc-oe&W*2| zd}EcET8rCPE5+1FfA~xAsKa9b9!c!Ni?;JKcK?PbnH9al75PD!{jdx9J8hghP%72` zWYK#CWUH$ynq#6^}QN%tRS%&om~PNnxfj)0v3kC_^+8SscZT zD{QnLlE`+iODt$p#Lt$PZv2NbjutX0^bW-wtkFrx2soUI+^R)t67BY- zKU9)$+@biD`;O069bc4IK}{j^EZ$w*o%;@S(n~7LwZwbck)c8!$r&!G8gfW~&vve$ zZx645e^I3zq<{L`%H*=<%l}??#vx7i3_0TC${-rf2(O7Q6aW2Wyo$%I`6t!maSU0h z1O|C(nHiUDs`8LzLk#*0t?@X^z48m`WawIc@55z+RmV`pFFpfVM4{(FkBV7NMTwc+ z3JD{hkFFMr&pDb@X>T9RjAru+-UBX}#b0Y1=Dm^Y{F_Q#_F92T@qNJ$n@&6A4X07J zdKw&1xd~lx&M#bE_{Hh|6-mR#< zBzM2t;e&pfO)B{C(RH8=Sq}!qj|E}V(V40XRn|>kI|6*Zb_C0t9G-aX+12Eb;+V9l z3EclReshz5g3bI+ypekj~6bLEi#+n8`JRe%5%nRf$Td z=v;hLk|h~dBE0ezQCS-ohsdt8`|AuuVmr80F-Dy7)ZM0R!szejpZq+JbbQ-V4u@=aIG&Tm@5f74}nyfbYBymd*| zZjIgPJu-K+p_>Sqp*b>pydBk0B&J41AA$D>ej4+arg@l$emT!MC+iA$zVi|IlHfP8 zy(HmtUzg!|ja5o1S708qMM`~&+eshPN4aBW+Hx^94L3h;r%gr0TweA?!vUI#cv_*O zB?EVDlo>r|1c6(V-NULO#VRRzv>I`-M;Blm3Fm0$d5?Qr! zZ8)l_;soApt>$=CUM1p|go>;MMjd8vHY>k%B)&~*s-DBEOk*A`z%;Lg@EQXw_%_(m$Ol6?WvDFjuN?cf*m0FNtWUFi7-=amr&r7s{7iby99 z27SdUsVE|!h&p8~GzchZ7 zIe{%G$juV7_uzWlxbG2ZeTa2C7+2_y9@w@5>n2zpK<@#SRhx?Q4F#+6J_T3%;NG*+ zH~)@&(wWbr%>9x8y~^S$&!MxZ`=u~EkH+)3$~{^CM2|9g1DV|;nA6NluPl~aCFvA) zCTsT<&-##>OyxpIuLw}U4encHwa>pIsGp~dt5P6--NEG|7CrxpqpltKz*+FN&8w1> zMZ()2XKdqUzys9A6(ft`8aEd?1{b?Ye#~|&*G)2Hwpv3>pIY68^9(7*keajH+gCT3 zCTEA#Oy;En%l-Ikg9$y&l5_FPi7a>vD23vG#}VL@R_=12lXkLEztD>b%fnu?q z!3~%ASyJTNS6H2McP>IMtmbDW1`cOG5@cIwY3I67+g{pjgCFH=Ux&&4A=E`K6;t!p zNGZ#Hr!1&jBBri%OR29Hi>YmM#Z(d=18}>F`XYh>Wc0^;J=tf#0PvXXA=4Ge6le#J z0wjEVJO&o(+3($rems}o;IkWK1G$^2NiyU_k6Fr>(y67%r75Kg zO4Cbcm1ZI;KEpV>v<7D;{d(t=zUc6NG^3s>omMX>B|Y;N7)z9x)68T-7NkKI%s`#x zLZeISWLg4=5skh4&?>Fj{yr5m75v(Uc}|J^<9^g2v?7Z#Zm@fkN*O_CfIA7T{1aJ} z-Ry$0yvALmuRQ89UD+Ku>D99k<7nqLV@>{bE))OVI{e?QIkT(h@c+%je@~oi=XADn z=r|{)$>fr3%-;2WGy*+Y`nwXV&p9q&_U<%~^M$Ci44rDMu+K=jiDY!cueK znGFc{I`maP@D9N}4j%V3EFyHFuq5iIz}tEs?`;jL9bZhckD}lWq0mVO*+;P<%s3sQ zD3UOO!D54r!D>0G*h5Prkbg9>G)Nc8gqKVxxyH3)4tg9`p}KdfJ{f16!~Vl6lm*pF z<&nkcBrN048R=qe69^}f9ytVok1K^&$2EkUc~Tg1_fVhH#h+b0Q60eql{(LS_!n3H z%)5G%pn+#Y2SW?m#Hg50fOduN^9JxWG-Mmsgeecl_-7xOU1^hvT+g~*=U%)flO zsF-Z!?&^gFj5%E1ICC53b~(9&Xw8-$k{kS6?l{|n=zcF1;{vUT7%3_@PxI$a>t#(t zam{HLrIamKN`3zT9&=DnGTRx8nVl}C!f>PA#8f73XOZ(m;-Q3JJn0iWW_}<1@;P$e zKg=iGLUP{QP|r({S?VBWo>z2?$IOknA0ukq&TZ^b(H|+LRX!yorju$E4y<11S1+38 zbtzajL+5(Zw{#{b#HWj_tkZfH$2;pXDZAHM3<6M8G1>0 zqPK(xI!h$$QgqY!I{aN-5jwdppmdxrNEfIJ(~(|SGf<^66ICj5x&pJ!Los16L>H_Z zubZH2;}Ur;|C@?jiB1_{K+SUTqqG?OgDU#jO(zn-DJWgE!2?2zRJa`tByoWIj z_(V79?wY~R=V(F1BkWkRL0lZdlJ9D;iOc@Q4O}H>thRwSdw;|yTe%~)zF}PIgAV>F z^!KJ@NK})ll?(186m#1(Mn#cu;>Rg3tfZSk;jX!6L>t*yV)eiV`cAo=#^IPCHN}3 zLH{1CbH@%~m0G!Es|fU-XvJFC!2|XH(Duk_nr++%_IA7rMiY9j$noAF>v>eMJdZi3 zt=t~WD$1cxo6dsr!>yg9UOQh(B_c=V&{WJ;S%^Fqp_nR1o{Q7Z@(7Qy$c)X0mkS0? z!_Oj0*^R!KBJBMt-kz^eRq~#eS5+H%t?_>D{iq#l!<;|Vzd38Ds)j14;^_O}+mLz$ z65osZPfD+n6jW60P{>f#|ND9>lH<&GDMjs+zp<5@(MLX+`#u%x!_%+Cnv36KKrOv< ziltI{4;{$U`VB4%7QY2_liVZ5%pFuLRTKXd(a^$PAKu>g9mC#l!&G*sz26FZzpIjp znK{bd&w#Fw!`^S@VsF~}u=9C)A9j7P{F6LLSf3$le?dNMeZ2C4-5=Iflz-FK|Ei+P~Z>&(=Vm$)1)_oePk!^pu49>I`}{;HJ*N>tCh(em_sw z@mbZa*zX$n)$IAjgMPJEZtY-oHkmU|^#>LGbfKj5E4xCNdF5H%&^N9JabfCe?UdI^f}2>aCG!yfhvf zPmPZzM-!=u&;*tSYsP7UG!ryono!MnO^7C3lck|Gi!|P?N}?ZuCq9Jaey?i^q+zyk zuQHpSp0@xJF$X7aved1$j#T@J`p2|ASVrZIeW>@ghV~B4rfv7cot8?^cXbU3eWdVpOp`MRtrbba6nNzDtQPE zA2^f45+=j3q0}_BdBImS5QuJ!EA+ z?`eaSYULEx&{3Lt6@17i5mQgabTrZn&5M^~;e6iVY17xB^$&W89Qz8sqVlCqTK^EV zz5}*o%X~4Fy;DL>ix*QL-Ui>%*XZ(#ThI*=TX{QqqsjUl#)08_c8P zSfJubJStufR2&3UJWdy?Yehd{D(+#_Yx~I7Dq}&1kMdq5g2uBwe&OE#l{`{$d2B8y zauQFczi@T%Fx(A_EQ7>$0sfW4KiZ0J3`E-$TID>gMT|*nCwiTb*N2JCy~nq3}S-_>KVo zX2T20n!&{9jk&^F*OL;= zIuC?Bv7+e^a2Kd5@g-2mP@Q`Qy%|$-2^K?r&F`w){ei*)p;aG;r7vx2g)d21OU2D# z{mYsf*YVl30j#jLos(d4BKgl`HpnmzsB2YEKz$JE*xOL47sS&;11L>cdzX@6waK7F z)I$AI46AXqHK1Na$J1o;b1UYGk~K{O2y|^j?UE|lO`(82IEs*)`L8<=Jy}d zZF5k4=*ep=Aegy6Zo=ywoqz$8XgK84WUB!eu{wEMu|SnU6|3 za(TdRbw?BwbALF4_4dV7H+w-&Am9&BC$J#!l*P}rc3T63L>v$7M)!mAwI z$MD)*WGBMgGLxOocPL3^$-K?+!o<^O1YduN^|dYx6VnsfB zmkip!D!e)sk|!JAceO7X=Ljj7z|+_VrU;&rim!|553t&sp-<~OlF&V?4ZSu_nFO(4 z@>+m=@+@Q}IsyT+ldF&8uRag^-Ozgldwjl^V-W<%_p;nA<-jeuM^T&G?bqe5zb^ohP>Amr4~ezy6jlgf<>c$P&}f^ zlbASEzvnPY(8g68x)6Nlb?LZgU|m$r4TWrA?Pl4f02%SN6TdiZd?7?G6@G`Kh5*?WwTrueEB(RX5nNW=@ z(D_WmIZ#c8ZLT2&5~;zu0RD$o%!gbIX_%wWpl4Rit)`d-z~bfxbv@z9IVBz+3ni2kA=+`pT90sFxc2 zKwsOtd_i9-SvF5!y9Vj2SSC>i@buLH`r7U85Bhpf=8V6%5f7M$lnJPHWg=?`m3cyr zc!1(^_}YBZQ3u_0KxHBPRFVmVia@84ptDvpZ6uu)2&F~wlorR=A0{x1nI)jLD`UQ`12JMF+ob)CEy@hWABDKy%W@sz7-QuLTt(K+a$_MGK z!cKb-nrm2uZkV{PGW6#~yvRnPB3HSLpIv$CoBOj19Wxlk0OCRfLN`c%5%4%1GLc`G z%l|qF|LcVOucIuj+zOkM{v?C+SAg>m(cfb>LVqJYPVg$h6YDTju^V~%yKEk!zh2Ce zcEBcE2H&X^w`W7el%yWC=nEayEu!N3c=}tUqhC-SpkG*0*|0;gf2nX_-8yQLblNUO zkkK1c(7i5sO2?Ppe@|#B!Lwp?~2gwPOM_!95#p5%>HPNV$c3y6-d0 ze?FYMdNl4Cb87T_ZfAWsKDVxK8<5X_ptTk*7oB`==aElHeFk2i2NHrGqlb7YMEDcZe>D zP*SXq?)Wkyo5&OmJi3z%50U}ms}0tU(*|h+kOwjz86a=m(`ufJ9FI67LDGEAhr{O+ zk|Q{j`vK(bk;P|>(Aq`XI%qlg>m72uM+*EChpdC>Psq_sRLr?hZ$kGi1Yc(vU&5;$ z3epGAg_#Szw+ON*8(nvEjIPX5c(Jl+DLP1b9DLC+^Y#`*K~^;S)E-o3f}#k9cGq6# znqkE!cEGk6#KsxusTw$%Hpk2xf08A19fE9;k$uesY%?$!a(D?vzPzyPy~d|_zVRpe z(IP;O*%z$Ru(%pKXi?NEtB8N~s8u=q{YO<omMhG77Xg74Bs;D&S`rg~-|Q>}uvdN7tqnPGnum^Nkbf6{1U8 z8)vZ;Aws0-S!TT5=HroGyWX5Bc+oBk9jYHp_W-i9RoG{H%qwo;qHXhw-{kphjM>s8 z7Vws{HrF^jEqy+BTm4e-%Sn5>Nl{ByNY(3J){NTMR_>7bX0@&vy8d}ymcIpAOza2~ zbp40s*ijg=#oUT%-BgS_WNj-tpDJoknL@$AW6WvMQ1$XcyLpY4QKr{^WkHpEkY5 zSG)YDUv~y;zxb(dC#Ji>*V7=q$$uNLn@5ThwDEY{-bJ#nCL$XvMjMS>tSD5G&S3EA z$MM<6Tg|hhyxZ@p-O2>a25{gtaOp;H=>y=>0&wXjaOpzh1G)n3 zCT$C{g;q3bl|JY{ngG5e*MFV60efXLI5XvzBW3TXW`ZAQlvie=r#rHslA$Y7pevfu zl{9MfS;mY)QT;mAWSk`qSzAto9*MI=@n<O5XH1QSmfx1oM^?IPu-G9u;LxPQY7HUG-$dT;<{`1({W1{&MiyTU*; z(gpjSh6UjAhkEbRFE*^Gy0coS&1drT>lk!-WeQNan{RC99Oxkrgl7&n{LIzB#!E9L zLJIVtYkW8!FZA5cONFkUXP~S0^%n9{VKt<}+TTcp)sPC|mJ0hCkNY4Mf-QN)MpFje zjM-G-hGulu_QCxcdkZ?T{)`$+F)}~Mw;o4ansk&mcQkWtd`xc`+UK z-G7T09oGK_FMfsYjichq{|CJI7Odd0dGTY|7^8XdZ5+KdUIc1`kLdTj7|8Q7;l*8u z&O3Rr&gSIBaGR4C-?05LFA`5rGI(ZoAK}GrSj>bMTe-+S!i$JM^buZcMI|rc#ZPVj zi5GKG;qTl=8?dN{cv0H>dy5)8-Ag>=L%g`( z`labgrj!!Fr%$CKZVKCZqnCs_6nv8xDL*l_6~4E-z8~VnZkt7dLKn zhh8LFEKyZQhZ6s565>Fr2xBxMVHShSPa%m;!w`LtCIt*TCw2zSW$KoAP<06}`XMP@P&psb5zXY9B;r zhr>(f8Qj=MJ0xR-ZWmbC87WD zsJc@(oTbEhmZ}kFDOJVeER)M~vq+Xk4EzfIdC~k?JS{=D$dlVy;-N(Tt|}DhndI$^ zgAc<$FNi-+4`Koj^ZIeMaw0mAlL)Z$H(!DENO`1F`b%vfORBo!(dhpQR;Fztpg@DxP-Htz+`_>zQ@%>~Dg0EJXL@yW#z) zwx)wCH`^zID?@)4)2XH@;KIA?S*D42Txs86A~xwV`yOzm!}1WXA=f}d=KV%PuJLH* z4)j*o*Lvg?H$#)=7aL3~a9-9t5&YO@_Qm~4a|`-&Ik{2>?Ay$x4ObL2bMtIu#>#Is zLk&99PNsuF5i88W1|Lww3E1e($fgQIAL1e3_lM{t4O`CEifE}98;Ad2 z?P=kZ=rZ8+12w~UP5eN8CeVjf_B$^Sd?)2d}J(SL8CqMueo)=LKbHWKSQYzDE#Q;`>v?&;kYT#H$dO-JbQ z{A{vd98Gp3xgxEO6hN7iK0TSvn-yEJ(6Q<1R%r zcNvy9xq>^Dso29Deh+2NJ&Ycsm*|+tJeze^Qy4OTTTO8iaUnT9hnE*=u$*rjw4CP- zTF%Mr?7>-3z^1UAKRY*58B#Ta-|1zL93Nq)-?dLg{dGzeGGWS7FcWGTGZW}H9sE5D z{2c-Wd>nal&Dg6L-WGOFFeH6OTe%I^x4_dfvlM-Fr0ApL9vWZg-AwueSJ>mh&qrVf zCt^}kGdxhq#iT1H!L`kpA3>%;2qsI|Z2BbPSS`pt;^4mq2DbdZrws791y&NW{~y42 zo$tG7qG4;QAHxb#z;mE*HAOj;;6|K#}q@`qJUedZnx@?`(tCNFcn4<)* z!@L5gN9SQa8#9M)L*1zY|8Y(v%YwI=cy$(mma;)h3qec7t1}6E(PAYPKZzdq@tzz~ ztwt(S#@AfqF2K^sM#WF~E$X5B*=T5yC}e1oF1HgAoAb{LhYmW)z0vD4`W(bF@S0O2 z-kh$uXt1+TM%KvALgXBK5v5qn)8+V@>BF@t*SHc~WeuBLm!ixW)I#pyh1WrckJ(&N zm6{GIf28j=L`<5|Ynaxj8*Zyol)LCCW)Ubd8x)zPZ$@O8$f6mjV{>rdqig_^`bsqaKKrqSn+vV-RxcHuj~ci{c=Vzh zEIm$q#PcGgGE1meT;bn9eh{&xz9c=gf1qN$ z)DiVJ6rLrrfrmGPTBNwS9i>SO3pAGq@51weweTxEKd}aRxzBqI9NGb2ARgb?iCQoq zr)1ow;q@y|F4cJrRBk8KMY7uicxr2PEa9ctc-=T;_JpBx1lo7&mg^>H!!?U^-d~XK z$wn86JMerdAH(?jvKM=#qItkn(&DF4S>{TuH2h5ogvk3CJ&+0H!erpT*`J6|TY}0Q zm6*95^cG=69zdDbL9!yv+)K!5N@S~*{#8qPRC0|o{vv;S8F0rNd}n$u_OFmSKLU49 z{okuz&&jH$dE~JKR=Ll)+aAtzTNiE@npSEEM@H~I)+dxN_E0W6 z6vV^o{Qc5Ocm*3ox@P3sH8pO*8mt&xgUP?B8p&^?*@tpw8mA+!_oN~n{@GdJ;Tgb2 zZOE}mKkILxqEB)UA#!pj&l8Dt`N|bM{kL-a!4WFZ|7A-SIzHu8`NKb2VV~!bgE?gx zp)}@{WhN*={fj~UOPJdsn^%Gp?f@sGf<8$MxtVL|$y)4DVTE3HO6Lh(iFHclvcYe- z2j4&>{il|d_=f5DhH1!nyaRj`%YRzkGh?yGD@3b1KTY-_$Zi0)$a`1uXl-`gdNU2Q z<^wOMQ@YQFbbl9f@aB4CLAp1X>0&Pv`P@}oGpFn$&#vH^0v+KPH6Jr4pUfQmVI5sM{`Zj13Io}(Hij*QIN#3fY&A-x}e(M&qkf6i_k$?%z6& zHqPhu$Zb50UQ?i78Z&P&3Ls$e==sJR#KAM5Hx@u|OaUKLuPp$@zSToChTQ!=(~fr3^{D5kuyVyv_D`SV?N%P9tj2Vf5?Q^jI5FK zPRTh6;f+|xb4O6^bmcsF1_|E1#%;${*03qc=&Jd^EvP7jKjsLic{O|pl7p$x7Zfu5 zCxcS<*%s(Yj(;?`Fay+WgX}&D??4pqHypyO&mmW)1#^1KF!>H1{=Rp4T@iJvW z((WaG#+{||>JU)h7LN&~bC8)4ts>YZtki4ZyAIsaxNX;ONZqQ&FPO>IWQ+MXFyJ9&Y+x-oS>ZGoWbxsHWa^{ zv)Z&UnDoZerdbEqz=JP0*&VdiA(opY2Wbp+UQiSJ^3U?!_^WWLk8pk=XG;#TGMAei z4_4rBDsXNE-dEy%CEi!!{T^1kxKp36oCB$|3%);PZoCZnxEphI59Vq*Y@7_(I7{{E zIg-H{IkjR+OWgT4;gjsNO4+UON@l{gc>;RYg}?tLL$Xmzo7ox7t`%b%{5t#mPn@sB zLTHI}r96gZ*CL`7sBp*gqgdBgpxL{uzjgqAt+wvvT~eUpV+295i>C357eziPQV`ac{@I**R4k0K|1^V96nFJx2@=k zcpZMbmu@xI!0v`;fN4XHHoSE58t(ozlcdLX{u=A@$q<AaY!;pi)qxkO95BDo!#F0(y5<9A{QQ^W1LO%QVlXqE)7?R-Ax*uG8jCuC?Me z-~^jpU&^cnmcK1CmF0NT6~l2w;`68cs#@`a$qTQ=CXx{Mu%08DI%|(fO+Y`uIPiHr zQGjM^61Eie4FL?K7IdU5wNFHJX*N_M9B|~lkHV&Kma|zeC4!JR(TGn%> zB4_q7#U=Qsd*nf2JpFDh)`<%?*D(S`kYn9?3pW4MpZTYr%s-_{2AhA{-aCzD-sxCj zq!1;H!x>G?Lv8P$!Z(G7ihA3UiGpWk4(`zy)C_N(L3%n?|Dx4H9m_n_aaeKVvEn9R z#Z9z&sI};HYw=LmE5~34lZX1N`0?-bP*-&6q3%Y#|70fDW8tCBvwEm=tsd(1x;a)4 z^@}r`oTap$m1sRnS(jk#3VsCqXns6wrU|f_CNh7P>>$z$J)0ui`Ll=M&t4TDG@iuVd#4M3wibLO7Js%D z6^oIXI-Hx?<+-8k+>w~0LFTB;cMRm|t}|W{|J-2lf@{SutzPh5ST?sB2vP266WxK| zy9RX(6PQh;GTwwvd1siEyVD2pR&ZQQz+ALpR#$(OaPMF{0h>m0mCZ%u$h?XinNsux z-XWieDuXR6z4G0n;^5z=Ige2|)k2F5CUj&QI4)LahUNFcH?b}O*?kMVE&bb3KTTeT z59$bdWlw=;o0~pZ7?K^9-8YA74Ekk}LSV=z74n>aow;SS2d`CgINAmvAfHVH?-{pDVxO z8y}Znzlh7=^8h97Hp14G{HhWA1Ig%~{JIW5_J1wEfJV2IU$7N1%Se9x*w{V!RfBwP zdaCc6I?Jz9rjwW(ik2tiakaqmZSw1S1Ie#iF|x~ZE7`duze*b{@~Z|+Sy#lx(Ab3H z)uP7Dg5r_fxJ3_L6c@gxmf6NPfAA60RO|5o|1+ zyvi~cxk3M&br>Ph<WttF$p`n$0htGXzgdA zwP)&S{imHjt#s2p1DQq?z-?F+W_IHEWck&{6!35>AEcfK9c$&q#$H7jQG&m~UbPHgOjy5kw zKQv&hO)Iov*hiRGYo*6)#i8bnCl_cJqnC%BhT}KET3L;!oDb%Y8nLt`Kr4!ykOA&% z7!56y*Aj}JUa(VIrlITC71Xe;f%S4lJkzodpKD-8x#2aYiBQ-uyo*F`t~9qs3~v$* zcdfZ4HDWfj9%1lzKd%9{=?QJpK5_=T8EZWhl?PuVa&|=wYPxAi2|->iI6}f|CEVs_ z^qa+c{pbe=ZXfWI{isPI3{+R_hR!9vnLYF8B=lDdrigXuHu<1Tslr<^Q|VGb8QYkz ztR{r2M7&Tt06u!k%!XevNbe`OWW)ahY8GARQV_2sy-zs6FX9$>JwqTTm&5NLq6oyj z%xv=42I^|Wtbkvf6}j!q!i1=6yP>xzu z+y891Z>T|jf!kv`ycYHZ{3GBCf5bgc1LyRCDhYQ4_g8`a_rR~bO8;~Bxdupk=lfg& z@0IRz9955O&$yh{)#_0*CCNHnYV9He- zl^uh>K3g&@dua9$@ZyZfj>rzr9u9t-rC^a>n7u6L7|va!KZd--2`n#h33N&iU@gup zFL5zE!e#IXmqWL#fNq(jU!J`py9QMz3Flvdep$pa^ul1XQ$MQ>u-TuW5uS(^f;pRM zgea_*$F;#0(-XA86XiH}=ZIAsJkCp3BhCdMT_@!7iE*lpR&DS&8y#dL@-0%CHb{gv zSe!c^vWEWKiTH01)P)tul}jG_1JO7(`QGlLrMYS6CtGt7rFE0gSDw1c*NWGhisSc) zn~Fvkj#I8+*!xxSBv#Ypd#j6TMLS?MZaM|zR!l<@(*fs9$-pgvS~M&;sn90rlJo4c zpW+vuOyQToV_gm%@RXh`|5(V4k|xpt8`{9##$>;?DVfRsb4|}O*?+d_C^(;L#Bu0u z2pxy+hR^^eXaHnA9s(Lu1MZqgmUG_-d9Q7JydtAk{1CWj1TrN&n6)<4^mj0RTsK_n zCq+{d@T$2>x#}|b3PuO+)+P&v>1LL2=^S`gQ8UDH3I9L3R_u_Y(Q&Qx2YT|KSu53u zc>mAWN;UdU{ZH3QHDkx`yjFe|{{RoB<68MyJdI=jZmq;Oc3vwEY<$QSt7;;o<#G#D=+WX>{s0FvI6`w}STJdPKtQD6=S}WDiegWEQ@obA5 zUeB}uzlUXESu0Xh*HL{6trZ`J%(VBkf#^WH@*G;XJ$*#sKguc7TOW|w zo1dn2RF6Rfmg2(B{9wfZ(lCPuBeD|)cC%x`s=~g4du!>AK6uUB?t@$wFUEc1hujv= z9ACj-c%8A9^vekp27m{Jcua!9krE_~0tPr*Jry|x^2|AWsE{ed3S)#}IT6Be@S8*f zX&lK=#~2|C+$qz9u{jfj@jxG^2ywz>VWKceh!@oGWD?GMellN}Crn3&=%@4=#Fdr8 z3jUZrQAiMG3$uioLXt2;NEUdUoBrlXAyrtAlOjANEEJXrO9e~Ur&i!Z9RE72=F9}5 zoCSTPhCbq zEZ0&oIS}JGo(Qsza|#vo-TlAH(tBf^fLu#;rVQgao$$4evkeG6>+y$}$u?y_-RlZfCW&iJSLvDlwAUaud9{V8KxeLw7Tjs6Bt>;+@2p&#U1 zgxM3XL(Mgav=76(P}B<(GY-j1H4u7=nz$evs~TLp)A4;zbmglSeauF~kA`Y+pitIK zdvpgrBP3=|GZzw9|4)MveRn^FB=j&7#FV=8qOx=&sn5=!jbN_fSi`0UQ-WQ+p}=NA6-=I@QSbrp%F3>BVWZKa>76_$uB^ zS&jOo8ZoLRL$L~sb#7VK++Yv8)*UM&dEY3H{8QkkPLMK`JKSBcD@4a9R1kM&4I7Dy z;WXe!o268*ZpmY^)wKDcdOZ^O68I}n=ZE?tun{;R*<(6bFQ``?MKWK&9pp@QgVVVyEP6liLUnILT`Zz~BiWq&S#$z+@Es!HKf+5I zspyYc8DtJd1{9u5Qr~+ey!l|Y_+iVCgt-c^?!c?vCI?0UPc{&< zX34L1ir@sQAFqLb zw+!OJK0s(PY0Vw5N2h=>tQMUdzc#$JTgol8xA56qN1ok((C9H4y~SB)EZx0OKV($7*hJ2=2t1*U~_yEii4 z`)iUk5rXFxLh6EZC@z+$f7Qx^NOjzn`tE3jr83Nd>J3I`!f;KE=xbD+-wBR@L~C`{ zd?{V|XK^^ti1qM>!n!RLv(qCm2ZqBNvhg0sRAEeKG&clv_G6yt4ZTzsh%p90d)c@Z zWU2r*#?=P5ZjQ02&Ie<7LwDJD8)PasHbx$NYYST$-Q&qhrL_vn@*iJ-C$k^<=SQ5! zDwFVyMW*VQf3p)dn;Oh{PP&npz2%LgQC9_@rExrFZw*+9h`>qcR6oo{M&gu^RaoGq(5Sv zw=>whoe7KSN%I!+p;>i)FXk;hDZ+XsQ8A$>VeSBgkyqoUUwq9J8%KL>W19@Zmi% zrxVbjkGoU{zr7jU9SQ${?)_TAxwL}ct_Z#fwl4}S`ZrUt8#xPq#Jt`k_eIXa%MWuF zeDW!0As5ta`?SYJWJ3a&+fG;G)LY9@_ z@4Q$U_3)WJZ^=p+;AxlVZ}F|Hix=aBldOL9OmkXBM*=jFx1rv?}qjCQ{a1c z1>+be2BG?G;=zAiKVKtmG4F*1>V^CJj?oX*CRO6*7Qdooh5wlxW4e8{7}pp97TkD6 z66_id<0x?_2v6>A4UAV8H853(e}*ag(mZ(04)^szLj${y$#_1F>^@SBLYSfoyngTz zOoZE4i!Ke1_^{q1GC>7KhvyW-^^}vb zL++a0CU?n}7C2jSHNcJT4pcb;nHZtqfeZr@OnDg`n4No|-p`I7kN+vZG2y(lAXPcs zifOhn{{Jx8G2PAIe94->c{aXd{$`bU2=$SX#n#^K84<|hNwD?|aIKr3I9NZ3*@LH_d$yoi_J7ans!Y#A)+@ylNn+A2b`+4F_Y=2hG)skD9slA2iqO znAhYt?1N_0x}#>hogXyU<{dRl)}!w;WK5_z3^_xCbV1hKv}&=x*-kM43^?25zwi`u!2#AKpZ!BKgSC1l#cHfC7c^J%HO$IiMaRQB-|w6srU1Msp=2|*RplT=E}KZSFAl*reFR-ti51s zOl=@rdvle}1(eUSLryGfE-a#E>o^s}2V?CG)cXp~STlvVv*YTVQ5zo&nP^#kwf}wK zy&Ca-c%(xVy>PGVoBL{e>2Mdoig_A$_olfY?r?wH;Q_eAfw;rFOmv6Gbo&l(MJEJW zsUDpr_7g2!pE_UsIvFIkrE{}8&;t)h>?(0FD*Zwp&M#*?l~9w>8fAlpXMGly)%rgC zTt}m2wMH2`uhw|T%nwmF{~n&p1v5IkUYhYDdak2h-Ua&`jS?>Jo&WUxZ${q7ejAq{w*Bs{@7gS@$uqvAw6jb?f%8fXD!Qjm81w2`eF4#`vZR}M~|K=F_8U^zU+4d;K{8+?dL%-tX83$g@vKA@pEWE#!lFt zfs!x-c*srM;2b95yqYXH_|{CwI6LI{0Q2tuC3N3B)V;stz$sA!zw*D>bDe!y@o6Gh zT+?dsT(2fkCmtvDf%u*12uII0j{xZ2qbucusOz@NiQ2qE_bM(!)fg^+CG z^+wiX3Gsvn02L>UydO~VAmH62dCG;4(!>A@_pF-AI0xTGz0|1^>dLi4&haS5W5#YoPFi@ky{1poqwvf}qDGIH z?1qCeQKkxK((qbC|89>`ly0f?D}JofuL_L2UfmugGrcE%iyv}(I@bDOW*`qfk=fGA zl&)HI>I$BU%v|YldIEI++t3Sv3)>41Y&EK2k<>k=S_fRUM&V;hU^g+6-~$Y@ejX`Pn?)HiGAZAVV%%gaBEJ$I{ENt3D!?5 zV%~O&2$rc{4Q(z_3}iihYQ+=in77X`820LI^KjH2)*yRx25Jv$#2e=Is9LWQw>Bi; z_4x*AE^OHbU=*lsJPd@c3J5be1X*mUO1$0lIE!>Oyz9gzdrP@B1_{@O9P|WmX~Za?y>H`by5g03hnL1F8^;E|uCv@7fd`@&D1 zsC3Jk(`EOCo_sIdOEB9DRmji(eKqQ1y0p(#@a+99_Icq@B~?L>Wb=LM$LYH>d@UA=U~(pm-EZUD1CZFN5%8u~K1_YoKhuAy!n7J|8qc~K;P&zK;e zQMXRaJM`C5SR3R;Rf#@LkNeq}eRxK>@UxHM2?-{uTaVg-zDi~%?(A*k)ME3+M$h8i zHxTnXW083W6?&%2s&N0+&}E*f7EM-*%53hAbH=$OZqT_jDfE~Z(_=lM#~g(I;LJ>r zhU8HP|yb+QcXmm6W%4G8zJh^(2MnvcP-Zp$RXA%;%_EaEI-zV zFaR8Nz0s3!nuc`Q@NCLoq%4uCKt;j!&Ch{!BUF^?kcqwdKnXEs1+#3&FffG-M&={t z3#wLtfQAh2wgfqoJ{-?49DIhv`x=IvFPUmElV%%?y}M~2?jaW6;Ql8*jJL6V%2d7C z7)s+4P=P1fn5kA&AZ}VmRGNU$tR|B>p~ z@(va;uVE zoU$!*x_$RA)YiA{N~b(2Hm|Lnzo-)5(&bB$9nLiPa1CWcp{MgL&no-i14`T@yq^wP zCf7@jbC};fpr7qfXK&A;h>p0IV z#V?zG?>tX6 z92P|#J4*9h4UgSXHwrVbyk#7?W2(fGmdTjmgt1INIZEe{I}Tp{WY|Gr$X~Al!wk)H z``>Kyyhd!nd&@kJZLrMqm%D5 zT+2K^pjgXvr=mTV-c(>X^ zmVbeGL&M3kv{i4h-V6nn`RS~m3onA^XQILzGqguAI($K&4!|4@z#Q$1IqIiJUk`!0 zdQfk1sw9@87apw)SD-&$n5!RQ%@H?DcMw$Hz*7FR;XU5EdE2+#7Gne^{( zsi?E9cj8VsN;w}qHv#b*(!YvK3D=Nqd4|Kyc85I`uQGY{HslrglQOFgma?Z;p1vE= zWiZcsz@r4Sa1eiCoC6}s#3@Wq&t2$O;038QLkf)LuA7S^@10MWr;VRiioE7vbY|f&s0&Ov7{Pp3F2{3Eq}K#UP+Ac896TmHy{gWsp@Wcta}a5WCjadFMw1 zRkF#1H*o9>=E$K2nj=ooKPieJMW6M#;5#pFjyqI^{BGcLE>+@|2I8H*Cvp#n^?WQ) zw<>WdavaiG)EYSy$c4BF_9*0O8V`fD7&+`l9p;R*j-)|(6G?-TrYg~^<-Va3)lGhw zDeXPfDr8Sq<2)|pDQGn})I_DjvyH`F_BX}>(fI0grf*23Ts$!FcYuHA zVspusZ~?~zOyq`*$O6n<-^c$@jH!LOW416HmblQ&H_ zSv@n$y>;)lc{*2N3qQLERUjG53gh|p1yjI+9!pqW&=`I+tGgJ_k5Z3C^~EIqMSfOJ zGC!N2s#c)epB(u`N-&;J2aEm;ek;EuD^<5NdpW<1pQE17&*SGJiz$VFieJDl*7`XT7C`xG{1^p&8P9(_-gbW`L>NSZp?>-a&vFpzwk72QT!XnbsnXQ-x9T#dhAYy zzf3hX^&oT$Thweh`6V^>r2H2XpJJa@l+jh?nebO01 z?~Z4TZZu)nE>RQ^Tv{&F^S8(N8oYADxH}(ZP|xhi_x@Fg`Qc|7H^>}^aX)0As^B?U z#_fY~wWGiiTe6gKy3*fLM~(nH?yhX_^&Z^a!VJ1b$MMzH@jt`(8?591*p!a(S6j#b zvC%TV8FpvFC~0n4%JFI39e-R+KI*GBNnjb{r!i!ZrM=B~)a--kTy{epdT+ zo%OZczrE~wx$IMwxYv5UeYir}!|T!2SGT*~L3TYUuGbUS3+?iH4%X`p#`R*Y*Hboa z#{G=6UQcPg-hzzZl5GFoD|r9te_M)cRf=D<95wsnT6auc-OInFTjs{yg4RUKf7Y`p z?Y*Da`zrKuvplz<8Mhx^XDQx4y3Po8oin&j0Iu^G-n3rlU_piTI)(7#*8(Z^XaVa(349x8 zH(hejUf>Bgus&|At3oWxC3lo=`^m+MqsAU~cD!q@^U1T*Dc`SB{LMt#>?-sv?jm&H zvLA}!{nd!8fkoB-umaJU!ln&B%)vgd>F^Izv45v2?}t&?ztvRp1MX8~0_%S}vHsEf zwe8*?Z}+kANDG$u(e_vR=-z~T3-e3dhXnUBfSid{>p#no9(_2*!gKaU9s(lM`!fi*~X5A)@LXL z1AV)QeLhlu?cufi?@VXkG_Y^pqgUtM$g(^LED;ti{CZ+(_p2k_C^fm{R>F>T#;Xe6 zd!@M4I10xX8>_^;hBN?}NPr+q=N_&tX$yn}DqeG0a=oGO_K$7K5$l zYZ7kZHQ3;X+uPtLnJ-nutfjjR zMSY3<)SWvMh2yWdsXp0(nlS!Zyc@!QL_S*Eg>gmGjTv(zci zV>o=WV11=%2ZVqu_U>l!?X+g`p?in5Ly{$nuR=67ch_@UX7${PAM@NQ#7g+~BidQ) zeVwN&iM2KnwgcUh`{HNt+5Z3}Yre}QCG{O9#QnZlj-#jQ!cXlwKbF-tor-tCss_4` z|8D5#bdPNJ2XYK(wnOgl6R-fm?K@YobbXlqtkt@&1bR)@eI*cOiDC=u>RB!J0G{Ntmlk~n{Ja7vM*?2IXokgLqUR}M9Z_ea zN_4mlWRb?k&nrc_$-C3qm~R4x22XzsJpC%%jlUb-osXVIUxD@RXD4nxu=2B?fF}c- zEi|va_DZ-!63ExnB#tue<+;wr_=^xRAKeWZ3o<~np9IpF3e z_~%9H0@XfQlk!4!zUl~A{JA@0bbF9lJfpssV!{A z<6vFJ!iyLKJM$G>HT?WccnWc-@rdWAz-yS!E8s;;gAcKfPsv)YL*(ntCGZtysps(I zXDo#8Fdu%zJa`eW@@ujX>&kit-ojG#3ZDFo)$krx!jnjaAF-dW5brmA+gAP07mF|} z+-9M7($a!L^hP0U!@@s!v~57CXVo4SIWs9aFg*1D!;>X?X2x1Aatmx9x3Z2C*Wq0l$as*UD6>u(vC~EHtn4C|%h8 z!!j0OloVvx+U|)=HII!FV4l`_6zV9MYGK8EryWnUTSgK_Q6k6IcJF1XX>63XrZJsI z>B56Br%%KvzGo)DpRqk*nQ9Ch=UUT{ZjbXx`Us5Ub0z}g*yfW=HH3{*+|;Mr;}oP5 z|C{VgAjYxHTA8X38|Mh@7I&-|LJ2l1d+PS!%pLMs`AZA#i#e<|zIRa?INUPY9Y4&D zyBDoeM(OspJHCeM)Dx>`LWy7QH*Jxs^b^!iD-M$C4l*`?fg;xU zc6~JLHdv0pF18q!0uTM7<$V1t>_2U(tB=FJwxy*$3j4|y4?_s{r7aQQq^lAyw#>rk z3a}jX#OwSfpm5Dmct^dpD7%lY^AWB?Rr@Z;N~;j#an~!*X)X?DO=zN8jS66!k29PS zZi<*Xp&+bQV^;n@4WB?Z#x~tB_@Ia2g;wBD!1vRxO1P!ilCf<84jPJmPi*~PmT-T5 z<&pk&Q@p|S_j#tjw?KclrcyuJU(p@1LR`r(kVa^4RVg>PAk9VpK<)$Wo$ThH=cMs1 z3{(ddgoDSUAa8asn$Km8yOb^k-Z;; zObp_=>z~sP*%$%H%@~jqku#KO?qN)GA8ot1`Dojv&7`@*^`{WA-wFvdoM~-h^o-C4 zf+ukbszWZ?+vB>I>?O!KtrYu1wv!fr0vqBJWyKyV8=|(;T5#+(8)B+8o38k=Y`O~Y zpP?^%J1rjQtWbtyZd9$LO_Gw3rj$ESR#=5zvX18hc3UP0=}<^jJ-6oqWg zu5R9m3^`=6G#_W$d}!Vho5H`BtD>>5(>%(vHmB0BxS>)=LP;i_!(DeY4{7!gM{=kFI%8Jts<0ogybtCv0S#x#!p{3*a7&Re*qZnE=4*Gm7P{G%5)tK{CWf-p}BycK2 z0u!Lmrvnu{z%R~HK^|>*ld2h$)r1AkVOZco=<@}@1rPFR(B|i|*7NK5ei zHT3x^AcKc^$R{K6x&OJHY4c=g^W~ki`3Pw9R~~BfebDBwKGf#>q0L`|Ha`Gueh}LH z5VScqt2W=RuRwi~O`EUkwysfT5VK7hv^imy^lN3RRZPt>mF^g>XM%87gjAeE2VK*T*nz{w8AZTMWw~=^7f(*C%6N-&j{4kNx+JE%hh~zh=!bw`lXFxa-jV#(12y5V@74&9603 zOrw4D3sHs!WW&Pyw`lWZt2R$I-hgb3S4z2hY~-XvgYKC zg@l{}33+knbV#vJ+EkE?UqeFHVSj)7Xh_IWOhSh9^YVxBsRhtJh2-Ciz|4(+WPFVu zqCUXn+8DLP#wEXIBG$+3tU0;~d~((TT^zqGYnARGlWo(~7K@tvnZU90eLrTAz9WyE*{~xLq;02cJVv; zXVuU1&+(fPMcIld%nSS$L`;tI<)}pizFX$r`pfQl`KbjDzkC64?32i1+Ou?;{ectgOHPlASVw)P9A}rJPJAaGNj}8Kn*Rv&X&$=w+p#8Cu0Xz z#&$?Y+wYXAwqWI!BR0OK^C;Ap^Er!`@`rX_N>m1MSgnV2wA~GvY7HCZTysk2QM!<8 zvW(>zWf`QS?XJsIDQuLz&C1TBbnzeEOHalqJ>Qs(sEh5{%2Y}=&hq97oyY0o*IY}d z4wt>&n1XR^GfAeJz{VNZ9MSD@E~Hbot^XU*7{@kWWvU1^PEXh@?ya6oIzF!)VwH9@ zFP<%I6$4pDgn!XShGd~-w4Yhc z`rX3Rf~CsC_=cn*UB%3f|4{gB;Zw>ZI8Iq{t>}e7KF`;MM{@%pC4-sF{41nnc_TPK zJtRf4oODI+H^@()oJO(|HS8MuY~P$|V2mZNhrVgQAA{QG870(nn>uX|(cMHIS?}D} zPTXh<-~8cpC+3knk4Qc6d!WN->PJCVR*A02T9lRqPOiC`5fgi5xG+i~<1 zqQI+JE&=)@AeUe+%Owb8xdatr6w4*35NBZq9xw#58CVP7yApW=RjA+QkvD)00pod= zH;|0^qHRjR>$#>1@m}+NLpd1Y9%nbes4bN^j|*8rc>~TCE>)Uq4_Nj4SC~J!EN|e) z$x<%+wiEX*xDH(Y1YG^GwBYfE?L%T?wp|G#6toU!`9J-eBS+4X$c-}pbDFRKwP z>5zBQX})Yg#Y5-$l8U1=UuKvn_rx||mLT`UGGEHk9gzIl3UFjSnlJCdpQZUyDL!bX z`9hg`kLF8eBh8m1jTOl5rupK+5SI3SYK0ikTq$NYcAhV{87m#lm-I%;Gbx>joNdHk zvi}0V^?mrQm@{9mgKhSF*Y?;W`Er4ItpC3~RxWNr4$J=zdyMACf6^Y~8vYaZ*bUQv z!XEpx=|5wSWt;vp_So~NrS8Nj)P-I5@9eQzrhjLTr7&OdN%mM0`{qgZ*d+GNlkBmP z?3>Q^*dTWNaeFL)9ktkF*TBs}*4bYRD@gn`*<)U&3Q^Ta_Lz&wW{;JNTbtV3 zW51!Ez8u~ENJq9Y-xX&xK5mayi2EBMW8u4Yw#V)m$sSXWN8Q-BPTa*0;iLXh!hM6S z4%;1Y$h?KU8ry@XC0yjjNBZ@E;wZDn^svV$r?Z^ld*!IXCw%YI^!-UxLl#`LQ$ARb zj+YfWXx>qHUU4n}{v6@0@yQ-o;ER0prEW63_H}W=>wH&dL5gZI>^fNCKjVAa9BH(xi50QiE)=~FS-lN+@kHv~zW0hg} zhBDCZiG?_N3Rr(Gc={ZGApaq9j>0Z&{`$oV%4Uhh97t;vkZoJsoZwoH{1nILOc%<} zxX?WP@ZDywU%U~!-3@zfry&^jTBZ0SqQieebQt(><9(-O*>;=XP=WKfkQuPu`XcAk5;>;%Kv?rv6eD3g_$x<)ukRmD z+$PMeE#SlWGq_#?R-w*e-6LIG$FPxqVP+9BVw+iaU|Vx{>=`Z+t{s;7EO3!+Zr*}# zUbwEW+B3`juF*9}x0N3XWF&J(Btw}#Y1;x!B!Bw=esy7g;39qu7xCql`7%DGAb{lx zqlSv_!~3XT0_s4>UZ9$~9SmgHaVQ^xjS&svO;Zx<7m7@@X!se~WL z09T1dEO-ZSk-l#dVmU!=@r4OLjt9~_1+n0r_;!94p`uE)#p5RII2m}#Y{Y?g@hh`d zWfSt6qPF<;gs&_Ip0W&4;FozkDQMseKt!(YR_3Pw5$OsK*{Z+Y4i9;Wt&45IL$(7C z*#SIcC-9J6z(ZaJ92rWL2R6N&AmF0)5ZU! zP49NVX<5>9r#6o5MQ_7tqJ<1S0CJERc^#E=H`IrFIV;PJ0Dj7nIII;PLK^lWPiN8B> z>&}Cptx&?P#+HI@SAm3!z`j4Wh07&e?bA#Kf`7v9@BzhQreCwIQZSF@!`NeWZGf%( zQr$cyymk*RxG)V2Fa^>mARYNsE#Xw_3x0P`p6VRp6iMLBa|Hh$<%_qkmpIz?RHQE| zUEJ?JWlwB+?i%|7>ecI3QV#rx3nO-t?)sBstoXy(c-s|ZDk+<#6Hw#c)28V%>J`dGj$sVtVqetNeP{?^Ix829de`79 z0>N#u)REE?+R;>wXuGZdaTyRHOaJ3Cbcb_)49PD?Px%V)1X3L-;si}p$6|-xQ+a=N z;`ZWs9MOUYVJR@E)fRo^smMsA%p}U2BRj|P93*%ST81P1C4C3Ooz7XSSHi)A_c<0{(7XtNN@y?G#^xRUpa!-uMv9p*7;ixN{qZj~-cX?9?YS_xbyqTgHfX;&D z==>Cm=iCMi_^x;kb>IgX=7jYR4511bLWTHsv%u!Xm(A~5YgNk7{hI8Sb&cg<|G#f2 zLx;i6tG3OII1w-eRfk#?tng+_tqLOK3`5v4T*~$T!ioDxk9GS7VmpH!xPZl|IbLa* zCqEU_Eb-4$E51sks?108L}2)VDueRZ#%p_JjzdJJw=gnqvTm$;I&2m7&Y8N6h(Eix z7H%Jp*V=7SuvA7eOC^$@m_LS3Dj3Z#Djda+P)Ebgh~+oIiU>SUanXtDIF^%UiBeC2 zouS~L#lsLq`jzKhjN3Rnx zjFq4$h4;XPKwj%g-2Fu5CVWHM*Yb^?eY2n_2`u@~ z;v15t^cQciM#Qv;FLAO3$n^ z(Z$@8Y23S?mx0@ax{-I(ts%gTPoQJqSKw!;5VK7;4avi#T*7Bg+$``m<-a50PGLKS z?cKK}oOU5%3d=3)a=6*;a6xO*%d|S9vv^eF9P#V8KFgWM`V#XiCTZvAt<)`5ufw|h zXvbz9|p z)ReuM$|9a?V9`I#Kf_<*w`J`n?;&F+{De(vig`ZEVxC+0=MndOi7(;zXT6ql5IH8h z)O+~du=)2Pw{$O08LF@Hm-#aEp63Exg3B^Xu|_X6)aKU~%xm%*=HBYRd$ma&9HBU; znAhYz>}gYG@Vq9UVQWmsf^G3qE|~JN%3&GUb3FpfMWNxGVxO6FebvnM0k6O?I?uhe z|89z{TF$Lt=i1@i-A_C>q}{od>|A@CoA$(WhqgPnik&OLxpSX*Ze+W2tJ%2@ICnzB zI~{&6#m(L?yfgDq{>yUD%Ggm9G2?=dwhfNnk9VG*c$V9?kHM2}&lw=GE+UKO5~}Hl zQO1rhfwZ~|X;lhus0`jvIlQ3?@P1V4o%t%g3tz2w#pVS!^*k3k8$n}`A_03)}3Hw(Q8VT?xOXWoE=LkdPTWOcgcEe96L6vXpeH2uEcsQ zk{y#5UCx~ZIrbi_q?=e&mYb{lTufs!Zgf$3?iqYaWuHbARpg$-r^W13SW#te9zM-u zp9U6H+I#{X!cXZ>o6r2^wE66n)8@DSfq8HCyh@0~%{J*j2}Sv&ts3=6nQrJ_1`J{} zss~eUxwj78O8HIgDm>)T!d;r175RG@FZ?XCy@lG+Y0I zZ;G2|E{Q(lS$PtlPqz)88jWlkdv5d@&re6Q>{(S_Ha>6CQ`C4a{HGfwkwQ!^{HHj= zhe-COAiuFf`~?`$8}(t3=E$H$)Oe6#7o_&M27Ub|>_<1$)~m3OYB1H$#Xh1zYEWrQ zk>fBIujS3I?w=PMp8<+Qa(w}!&$^=l3{11*yOm~;m?bSy~;VOKG8)@ zGCb6pGgBsh4-1&Pvo+(p*@QZJ>i5Wd@teo=)4OWMXa~W|S?w|e{8dhPcglH~J_(+V z>g+o}2nmhsElex)0{(bS>M2YuK<3{q_q(^Wl6UgiHneY7wN`*XL#!BRC8ogE_4+hI6ZtD?}O z;gdGW<#>%;n2L8dZjBShq7%equvblFOje$iNvzj+5u}xcZ<*DbAS*ALS?3`+V}f znMFhLoX{I&R<1X^YWTNIrrl;4?vv5EZVWiMdIZmA99%z%*V*xCNR!eA*AtG&_;uIG z(xfN2OxN5V>KP|)tXl%qHj+it%futh(}hQgm6@WrBSt@Pp3H=vHUOYNlS3@Z=C#uym^{MHke^^MuR9 z#VzGx9pd$u*lMmtw>s|9jj4#uS2}Skv3{G?h|BkJ;Ofr+-$h^Neb`H|T}N#Y@zEbt ze8@14%kZs4Z=!Jk@~YW9a0#k zACIU2@zKp^d~`j9m?C!}s(_g2!dzXkdZ}`odbM~Uz90i%TyQpZW}(bEVxsHKnCSXr zHTT0>j$}MG6NQl)VxkKSJ_95rN|=QHEm-|vqjTaC6h~NJ^$GeR0yF ztn;I7zr`JG`&S%#9|Li9zxx|9-X~$ry5Iemy#pTuE8-la6*?7#JR$$Ii}iMv<)21k z)ppE2Ek)P+CuE;?F-iu@KJA=)S}Lw->~1%{mDX;2anm|><0}>Ag!D6iCKRk`VXjiO zzjmr}k-`bwbmOrueiCQG(|@JTUq4A1iZv$9?5&u9;~dsl977DZ7KLJsQvHp$Vl;He zPht!^z7FfCzv*<_Z(dGdl{?+`FXYX0S(A?2shv*r$XcXWx=zX}dSfxe>wyuJiw9aH zirI!xwnBCTBRG$&P|DL=f-F(sd(ArxF4&he=dxM9xRKVtLZA($;`~NDANV`OSWzlo z#AsAM+W0v%pqaY=w%2E6$eJn_X8}FvjZFD+QQ7!+th6xKuMPg}4mg1~&Ld9Bm6tkk z4^APbb{zcn*nZ4KU%?X+?#nrB9ZV;@@1SBQ!e1ckTSvv8BLUx9m;Ot(WmK)pd}U*rXL6i?uNg!*;F{Q7`L?nx{u0mz>p z$XIVYvioXq2jI8=9xjxpyFimlVX=1?L!90Pnp6so=Rie}O@_Z_UlgOHdZ@7sPx?s= zfhDLz2BwGcIxau6!{~Rdr~U4wWNdh$`mNNrLS`GWHnDOjQ;2(I>(R8l#g!N>H78 zZ^aC(sGr2$?E7TcInJpSudsHE$Abg_0qst`8&$)UmD6$C%}dLomnp%j5iH-y`^0K$4*>*4&pNc^7gSs zV4Kgwj*%lyjT$J}EmIFD=CQS@!P=ZNt4HgL&k-xxZn-_y=NZ`KX?0w1QJRBJ_E1*G zDH4>P`R-YidFicIDDA;$^<*jS!^(vfiUk!4Av8Oump7F>(L2U)@`4mqpkE(!so(Wq~{@j6hy5IGPRy~LNUD)A%$HGRZ z`#pu-@5FB2@9RJUiOH?I_j`Po_xmDv`F-hrBkDB__q&JneqTk-Io`J*8l~t0UXBcC%hOo*-*)H< zLLCZ9fdM)w-m^Z9Wc0iIj6IDLri%~PLlmCIUhwMKp2jJRWqBI!7&||W_aV>1@iY$N zX@ucvaF@2CpI6`q;7x%4G`hlp`z8U;WfJnRrnBcU{h;E_4z^-cYG0NMUn(wVILKXC ziw=42YMHKstym-|`xTx`L)=(O)}s2~tcrnrH>|~+zS*T%%{FVXQ&wRYUWhimFXT@j z0_MSSG$>wl9zgZ{2!k z;JTI1tXfBGlVF6q<&x=|B~`oeWJPX6zF&(Nf;{`_h7cwVyx{%6#%$Kb$ZH_EywLbO zx@HW-e^%Ra4e!d~|NoI$vGxVhx;RMd?-7;1EZ%A=MTG9Y;j*~2lV@Lw90Oo#@a$y< z`vS6J-NATM3YJ^yS$G}u^jedB!F9uzQBrQl^N5vaA@}|{)ULmdK7;aqKSwW%ZtyR- zJO5A4(I%`4i%0W6pQB$OLf+j`x}KwjuuZy}qko3aOLO#`$udVTqciXSDRcC)7z#_H zMf?K3lx>b)g#S%*v`mz?&>SrjB`vl&x~-Y!Xc_RHj&n2<49b_qV~wTYD5W{-()>93 z1kVssma_lb&(X`w|F_K1y^SHhe89_Ir;=xW(~oj?U%=-Jac2?5;0V z?2!`^Ts_8FEkz#TQrESp77|?i&_~H%Gqtc6KAm&(!0T0Kcg>W1H@v$hrQTZJ=%(eR z@ztbhf_ds>*jLjBy$^}~djPn;`)h*0JDO4qZkSt$x1R+A%&liMo}aDIs5GgXm6~<< zwj8m58*VY^$u(3n1pN<*7koIlzQZ+Z2k+|I ztqB@SUF~?ySj{-iB+Ukm-Jp${wFR7p>TJ{W11NEBpM{yhcY8Ja6^aSq3F68>{bPM<5+4zp!iUK>EuLa1?9&uKYQaM<+uH6WKW+TJ{kG>|AE7#*2(q2D`h3xp~qEd9hc2v0PlHdzf zHzaF&>mZpMR-9a*9jsf$t4^-wQ%^q4uROViFGX*r#RXGfPnL>d4I54-X_IxU^3X+> z-+0oFU!X4)haywzvN*#$7{AnJ>pK`mVU0$adl?2}A7LJiUJGUDV!Hsn77*bvZ^7rw zVu{HWuOFF8MYpEGc=sW))JeNsKwSSWWAvwy`@{CK_+XdCuqNV-YTp?RzZ*z&Avpc7 zzz3sDft!Xk#ZKJwhb7$B*PXccp;L_OEgTau%fO4W3;oitWlyrLUvOt``Jp&daRe*a zS#KQU&2vG$aDQjG3_v74;p`y1-{(A>_tk(yM>8VdMN_D7rp{gwQqAN-$~5<6o$)OF zOjlryrDswnoqL(4YM#=h zV5Kk8EY>7z=4j?>=4p<$y*`IHZB&ZCDyN`&P=z^>3Z!^GaF3N(o2!6*P*=CfSewVp zap$in{d6l>N68<>eE8ewX7W2apoV5dU|o0afC{Z*?0zRnr;I?1G60d9j_hfdMWxZL z+xpO$Zs~%W{8$%MXbo64PqX!usSMVstpDvO{I>_{QqgVX2;)_uZX|~ipUO)C+es;E z{NzQ5WeAkpW?6Q~UEP7*E&f+(g=PW~gZZfh{FC!~%^vQo`8+NxQU`w4h zY{N&a-_jFq{K%7bL!J2LH=<734O?#-m7=QzHg%p z)dM4SKt&sLhLJep+gO1kWuh1A3LF(9wVsL+QOmB>)5^WzUKgw_5zn;v;q^p|6JB#$ zN=2iI7?&EL?|oQ>K}T5;)IgGt8a2fYBDvK!3FOIHhAZ37x2P|X7|hPpFIG55(cx* z2pl@^_236%eUHbTeh0k(9gsOa$l43=u2=>ylrRUEZX$q->b$TLJ<+Sp9lYnRyfdDs z3p87axDS{^81r*O(Fbtc!#;px69?$~G3^tGU(z2^e-iotR@@wnK7i4Q0eXM-U2hzZ zf-m={o26nJmDXo}vAx7}YoUV;VG~YW*r;8Fs3jyj6^1xUopT?imD>4^od-K7H1j;E0 z_6K*ybj0*~sgv?1BHlPvh!>_H%0C<&8*%7`1>Gc{r(TpdKXZ?Ep|C(mM|x zBqKv$Cb9_T;+z@sRQ1}tm03r1tA$mibjO6FLI%H2ctzMN925=+uL-XT`-MX|ME8N^Do@Y}r}?wO8^Rgk zZQ-2oU&5QhTf#dyD^31K^SqT_a}b#z4TR|v~7;zg0I}a^Fj$axq5#3k?M3@%~$T;dSEptyozt|Zj~gu2lD?8u0k9a zpI~hac8dmoS?i%yi09(<+qSkfl4u9KKfDTz0ywf+;TDZNlhz}vz^{havFJAHjCV&@ zxvz2%S-WyoH=jI6>-{Kt?J7nz*c+*I7{QG~tkW0YRU|9Ij+{CoVn{CU0z z;}o-TW;DIk?Qyp3)nXj~8%NT#zuV))>?Qt=fa8ZT4rN=uhU`k2YCjw2R^zsAkK?;{H^%Wk{xZg)oX+jY z;gqSiv2i|XT-WV!?qyKUuFvsjF%D&qu1B`0Otp@UliRqo+v8lzSc!3D$5Szmg++7; z##ze7+0rY2F@S;w}IB{&8 z@W!ZakFzBs7UT3hKIZ=-@9X2DD%1WCFfhYA3L+vI@-CtxDx#tS;s~QD85x=xnVGq* znOWgW24-cg6*9v>zD2Fu1I+MZ+sdmjx^A_lMXj}MTj{#$yboH^O>JAh?{%Luh`c<{ z^n3Q7-{<$oIcGRCocqj~>%Q;n{$Ahfd*M2w`s!3%Cxu@}0W9U#M*p7TU(A~z>;~y<4L{pkr&G1y z?8DO8BmAtr?kh-56&*Vy-M@d&v-e?J>wC-V$V)iMt2{oWw@Y!tR@mZ zz+UU+`jQ%2>wRajG`vzne!({u|DH#R4%g6n(1zDrx9Mtf@cIf~Z`vp>d+ns(_k9n{<}DKG;+zs?OSt@KM!(%$5)`3bewgbT

-yD-Ywi_xlp14#z#H zMc0C3@%LZ#%BIVdKh@mb=ec}tLQV4NUf{5n+;>;s*$+(~rL&azIaCWg5)!Dh}CmF6(ftv_E+HFpe7g^Os-2vDE%wmrHQmXMgwdV>s@$8!tbA;~u;G z%3U}X+vBg~;<(ek=*k=%i|h|xnS|q3`}0>`flnM@DZ$>y@)-6C%LCZUEO%k=Zpp>| zC-WTae>6|RzSn$`@S%=fx?-At+jb4W*0RfP|l9TYw0gz_CAb$!xp`U+bdzJQU2&7~eCVHX4lW#%3e=+PmJA3rWSu z5q}2yr7-^h@D^m)egapMeC|VqGU3Rhp2Ah|8oql7b)B$%g3W-f{?E|$xmPAUoPvBe zY%gxddLFq~#03jb>UoT*1@XPw#Yd-7M0E#ct--UsZp~Pe{_o_K?M&l_8gKwUIP0o25o*A1GCm5zF(nkuIxD_=#%l((89-g4ATC(qdjzH^}Z zrQY+&{@2bU2QFIaS?*OA1dGZhivr3fG#c24cAWeDi>L!T1LF_%UTbgHv6E2Bmf;CU zq3q{tNJw2&G#}cp>?`3FhCz_odou254)a1k&;3 zV(2&r1Xb(+;#T12(Np~xA$m^|wBC5Q6_$jaLQO^095k_As^$*b1?&!?yLp9`bjtop*OH zOGQnrWThJ_>u%4ER^s{KeR>iqXo%+d2nkcW7WaD^>w4*I+V2;E50@%Ep;f)TctT44 zga{dW!kmQ4WUwzVxXvfX<4Gmeo&;-(o>c&z)%1SPDi7z%Ob_l%kYy1d(!3Xur-rFe`)+C~K z^&z?NMUhNc@-ML;*bd5tQ%|D4_z%c&zbq5Nk`zKOa#qOJ{gH)|A0h{r`QNbi5am%G zy)$q=uW)TtE$p#zrZyGFYQrRLl6ImtMLSiSjOu<77J6k(To1vYrou^IL&edjWcWCoCj}jKV{|@bPZQ#2{xVIg4GFO|UCEbR{ zb)Ty63q#@m3Ed(s;nYv+mTKKm-RL&02Qt-{XjR(9+GSc9GS)qjvF-(g%)q>O2F7u) zfEK&#JtURf4%^HFdL`z2#vzeXz^d zX($fEb;eXuHTw{ZmcfO=YH#T}tHFJIijTAps{(2s=e?V3>MDDPE9cFFrd|Yc%sKYW z&)>t(r!=fpB_hZC#`)X%`M8FARB6aDzj1ycKOc^IOZ$o_%Unddud-SEd_coVR4(!_ z4X9a-nJd5hiOT!xo;(;)<6D}b^ivza&0Mcfgb!kX`u4Bt)8K;`P-Ag(Lng2Z18a2n zRIaY^J>Yi`csJ&258!wa;7$8uemsZFJMaNbp2~=7JKOnhFJ&T*MgNv7({S91m}-X7 z&Nlzst#pDig|n^&O;NUiy^k68k5{H(COXCLK<30IBVme%%pYAz!?DurxRQvY#_Vs5 zz_HYvWDLM@pLq$8@lF=!NUQE-F^)v+BT;j{lZ|r(U?1){$u`25@aOmT9PW+g?#mw= zSgDD0Nc*#v;D2I73q=OS(?HBmvLMIDs2%#b@f>Dqp)&$?_&$79F06kXH9;N&=Nenl z=bl0mwj$IIO#&`7!tOEb3a_O)xCOpZU>TPBB-B2FRds%&|F!qLN5eNOE(+Bw1p_-;>BrF! zUrk<}k23++_i9fk$}$>N_eG zR{3IO@j~Tho#ItDWUjiaw*ZBI3@Ch_E?1L->f(!l%ixy>(6pZH~)8c0R;!fxaxy~OyC-KK~7T(Ms?_?WouJgzL zAo0iZ7T&B~+{u)-K$piHZSg34qY#XMH#5l5H^2{koR2t#DxVt4Zrz@pRezOD<=GUO z4R=EonbyhT**JbSwc##kB0tJU75eO0es*HRawsBi={p<5&qg+6)!(Li3}-1j_A2w_ zXGdY2ahMgQJ~b;a&P3Ypt`4g)Agi^oIu6Huwg>8_WBp@5WtENksoV?MZS$^NQFC`~ zp^nBD11bY4Dm#I2$6@ri6VdMJINts*Pq4e};JkYPLm#h91a}B1I5IrGx-yxMASvK+ zJZH>;hqSZX3^n7Etf*T7p883)wL2a>^^ZSVE=;^{u%r&e`JevvUSM86GxCvwrP-R9=s-uH`ft2f0d6QSFJZjkOOWCVO}|E z4`#|BI&(^cA#?7lMOyEBGGKq9603vX1kRsJ6CR@?tt!1kc?FD*3O) zLOd!EOO>_HJQi9jRUpQgfeOU63&__lZC3_UO@gG6@Na$$@UEC#uc!#o%PXRgi4|rD zKotT%R3-35O@eHE9urcz<>IpdZTAUTN z5zB#23(awnu>Z4Ig(u+nXRz+$d9_gki!;lVDqd&%6l(xeM2|Y#;4s=(7(;NpZhstg zwokIFb~V;aov3IUiTz%S7c6~`1+%wpH|h^l3s>!VQvG4{0UI$y`e%(}6RX9E+j)BjBRq$b#fo(-UYhyXRv(Wo7kIC`E>_pMbhZ?rkU1n z--8=KHKjW7egbDjV#FB43^1@H8}KMF14{7A9Wj4x+~f<5x=nK79599r*lh4)zC&zp z9gb%(CaqhEb=Np?TsqJ5L_PPLb1h4@WqXym^(5Z6Gh5VG?__5X7Z$GWDyinbTWQHf z{=5By|8A$P^3n^)tQb}8iyTr}sl4ahlfmkc(lNSK$duUUJ04kFr+wmep_(b`HhiKqLuS#TUrm zna=&4S@3?+;QIvrX;tkUR0q4e)~&2THL-BD&J#?1clA9wFJz8p>N?qauuc$5fG3mY z@)+ePo(FrGi}e$!p3OoOIk+&5uUS;4M3=AfQE@V^G6m}=Qdyg*o`9<)@~b46B3!;o zV=+}i9uMz@N<+x+kP^1lLOP$2DXJ0=O!_UuYz*@yFKY+|OFcg9}97f;!+O)WOG<=Vxro8H@7+Y1QJN3j? zHLz@xpi$PyD;L-1pXmu-Ds4TK7J)b*#Z5( zWb99OkFM@Om3eRMJGu#@{N5N_f6e$UXNl7N#Nd9&|G4yVC;0yZGo&zDU7u=8QDZ-0)yy&SRp`VGJg0i>^e5V+wveAiKythA$nzZ?`LiDkM^SyboZ(*c9 zWA-aWeE6~u7hZi(`%`w1E&z&UVQQb!gxay%RNWXvFOQ-sWOjA9c3drCD~9b9+4~X* z3zz{~iuU@JrdEe)H#dyZib%_7?ICSkSwd}qI=(Eac7irR8=<}#X&JAL)gIPPFPmLE zqm(d~MD<{V<%l*5tDlA11!Xz4+1fPqV2tH8ZC=?5ia;MDghi<)WMz?dsrELl6FaHW zt|?nrd!M#Yo3Fl0yFz=XcD43y?J8}7cBS?n?d#fy%O25g(ymoM02F8E>!`oDf1Q?8 z2=3Qz)IKC(HXF3-wGV24r9D_LXb;u9aqQ->?tE`p4Apd+pgaN|RW+UQX8=y}ST=*dRb_Z6xD_y?I+LC*5l{FY= zL}jA_TxBJ{%5}Tzn9%|WGb$K>8FjEOyX%-yOi32TnFWE|JrbyS>Yj+(tx-$uVBLAa66fne!68Hhgkqd!tKZWoZF zgZMRQyj#li>{WVIn1 zY6z9tU7d(yq~rOzG_3Y$6|+yTtRc_yMf?8xe4tC6>^Zx(VJ#wDz}W1F1J$@`o~m`% zqOxLzU!?(~u|HN{Z-OI_SPVQ|B}U_=(5uVGQH6~DwKytyCjWel#?!DK>||9yq&8hi z=c92VaHso>Ycc9>HEq9=kK<<3t5=jbK5S|R!gLZ>IwOJXIq4+MffFii{DWYcPsO#MBmaqK2zCM)8nKLTqR?p;U69=8jz<>QSZPwT}R) zD&mS&b` za3=SR^7QgK;A^LWwLQ1|v2tKkM=N(fcK0EDUo(P`Has-wjr7;Q&Y-oU5i2Lh$h?Gvm@Qfi zZv%P-q|EVmpjQSK=J?@C9F8H5F+i^jz^2oHUKyC5<1V9j=@jlm9z`{-4)%s64*MgP zFzgRme6X*ypbzu-HQkjAcO@cN9jF&LFfIq&+-`t+nGo{=ACX?Rd~Ey-W6>Ab3!MR9 z%Z2yW-WZQ6cguxz#GKz)3j7Kj^2ykCy)P3wu-^Auh4{1&YR?3T2@&KeWwX z!phODCj;w7%DeAiZ}HXsr^Vu`@qF=feAW0IR*judfA=iivsj2tqH2D=JF*pekDebM zOToC4qre%Tzz-tp&zh5wXA;C$j1<+SsBbVn5#~FxJVftZt|*sdlnA?kER%A-a-VWv zj6TzFrU)@Q$|nKlbzyXQX!$59$0V$LOgTl-Qgjb;HPC4~8CH63>SWF|y`{W^ZN`{M zs-R;AGfRDu+jdN>O{MFrP4#e>h(tAJ-Hb$aumZEINYqjZiON{$jIsw8`f}eZ6>*Ij zd{sr?d5$UtJ%n`4L-gG&6BtA-u&7kkG|buK`RZsU&Q$@oi0TF-qe_m?Mpb_SgyNou zI#G2d9@tYS>o9v(M%BdE9=0d@5=zm*t^h%{F*UNvpWzvDC)v|r44ee6WMJ>vUIwmY zV5e<*qi1Oxa0#ll-N9Cw3$QOR1JSY4y)k14`@~9-m;U_YPSgxD;8Q}(3gbV4VURvZ z2dhUeT`n@B4XAzcxpDiy8iI0svc#Afm5wzf!!-o327 zQHazcb77xS-sAS9Fg{J~U3=C%-Hj?0oi(SrjjtO~Ho7JRS>(Z7Z_q~_jmqm(nOmVA z1(gqZ89ilRuo;4|DtA^}_lO!?Zyo%B;>hyZWiR!1CkK86wLG4&AV7`AO`QshnFaL) z%1BtFdQ|CwC@|>-`(NvZUQM*F1Nk8Z$Eel2RK&s}X22-?YU{zyC;M`>0{g0ARP{Sh zcgj@;eY&KopD`eVpo4kAi%BQ@dVZ9CvT9bJ9R=eq^{}I#>|Xe#DE2lPs+43sM@)zD zG!m<))Eu@?_dRS+9XtEL33i;Xa!+9mYh{mMef)}%IO?_5Dlpm&U`kPzFR6ia0QJhj zKHY>!3{>$uShI!lTl!~OcOpNB&@mzAVd%m{@QOw+Vs+lZ%B*S7x+S|wHhpfi?2rqq zfM4~#;3+i1FFFpaiz0JEblI{$Hfnix%RIHbNAaX@Z76=17@y#>G05!|)$IM$-ep20 zIHlL}6|j)H5$f3bAgb!?8;HHk+h09eAAr~{ainN8ny((eUl>o3oOdhYJ|~b>7%=!6Lq^kKwJz!r(Xu*Vqi~L^u|c#2^Q2XuRMWV*-p0FOiz%WTMzV7U*Pt$1xhrzz*x-i z*>{=;YAnDaF`BspV_62|8@BKG)cDRexiEPpw!7eW2grmEr->R1rD^cX!DHd}7I(gP z$;A&E_bUV7Lkm#>J%PJIp#XB$oQ`c?^fY28Mf<74ac-Y70vbojJ~4>RCRGVhr@vY0 zu@H5o3l(vQ+^jB6Xc(g?!mBJ@4zLLND;LLwJbwF-N>DHA(;rUP6#|RMRn9~7RtY2` z1NN5%Bq9fjp1DvDNq{YyEM$wxzN-=0n8xQG+<9X-Y|#zBGTGv-F51z(U9qsE7^rbk zorp-_kimtwayvR@A2IkEKA3G^7iZhrzS%ajYc#GA3R?`}JhdQPV+6lO71qy-xh;n4 zQ&h@6Tdc=AmPWElC`9*q$-*usv%| z&cROBZYPT?zzl3*nRXbgv?Wta~d{cub-w@72!33krz3# z!o@#g_A6EXgxN2kXUr~P{Cu?!X1`Ro5co=IZc%k3%ff0#VeSjzbDys|roo3sH6L$2 zwkZ%rjPwpthhiQKzzpcG9)YndazF85LXj;XY6}oT907YBi(0$E@S`%hHP7aj1*_JH ztR#v&60Z~tD;#AwUcgatf&8mXU7@NFb7H14gO6;pVOi<0tQc670bG5$ivo-o;=C9N zYx)9FWwNFrYMKw}cdZ_#ruhkW$mT!TnuZ+N<_%NR`~3*fr8lBbRgBI&7akF_Y|S4nB)Id>w2$W?C!z2jbi>p&B{-f720E zBZvQQYVWh7Mq3WXuzFhu#-8ttCy-}Kb7TMLTnC$tv8@rf!baFpB<99q<%`^>l9@92 z+}Md4HlG`9TY)bu2cKoPr%<}VQ#c?S&W6VSKWRfpOhegFGZ^as85`Pe{omVA0lcB1 zZ0G^pCD~9Ol>0>+Dz=F>bk#bv4RtUZV#6ocNW@sItP^WUvLQro9b`kD>^sK|8+zGF zHnh*$!7Bh_o_7qkp)dzxq~J2#Y(pp5MTgUdr~+X17P+t*{NzK$p2Cv#p27-uZbO;x zH@a5~F{2TW%+Z`ct-$lWdnPV~ue0Y@{@1z!wr9_)bMKk3WmmTR{j3 z<Hw!kuB6ySIX)qqL)i$zRrz!0dw-mrwc*=> z1NiIHXMskWUb4%4JDJ1MbQUMB(c8J}y4jRt}*RFSOD+WGnenD_5U6-}|uC%0(M}mqMwP z3s$j}KwLRZYNZ3OllxlPgH|@86$M(^J7g<~QY$CWil5XtpD< zcCe#TyUWp*VV3yq_TpJI&vHJ)JJ9ZAwCjgdi;R^6SD<9#zluE5kLMwaw zTDh|41k<3Ek!a(EMq)QSSFME13E60NL7 zDl3Mu#T1k*vd6P$-;_>Ywq%%i&yFpUBhopA*q20!r;xoLA*VQwf&+rJ^ zjYGR(Xg7PvcJG(k-G+ACM~lyJBX8wx{uk_6YUM#(d5P4@NxaVPYh^oH`Nr{*JshnB z4cW>psTCz!DU@1CRlNQ()%jiSL@Nfg5`k8}HVtJDzEUf} zXho1(k?~e~L&Y8>lUfPKm5ZfTj^K4sUn|LIWgl7@hgNEbY~`!nqSajhZhw?tw-X9} zWDk{6D_xjD$R57KFBc}Y@-$ir>TAUZt*k;Tk!Yo8$W|JpR_f47w$#cI-b%XE$`PrR zkI~B8Go&7bR`fHRPwetD`1R3B6k5q1vXv64mF;N7G)nySALgxm$^XKyFSVjaD@&wS zX#CCYYo!ydT(`euk3lOVhiv5zsg*@&Wv|pq8gHdoY9&o-MiSzAXUw04n>&Hs1grJoqsTD8YN~F|^OlqYNtrSbGe1z9UeXSfpD=(v!1hk?b zvK3Y${`&tYKHqyLM0{eOnCaI)&Hn=0238sU`VZmC5mGBh@j9fhl`Uvx6>$C&Te4=}4 z(3w5PSET#gCEe$}CHF}8*L(63^N2ob(3vrPXDTi&yXvO+ znLXFn`pLdFy8B+;a@pDL;=Whg`dXmiUU^(`{>Rk5SNHV2+Ixk5tFKybd_VUc z?fjfq+w#qFAuCTV96-!w7q-pV)?%al!hP5R@LIs8zYV#D*i_iku}#MI1=4xXU@OA5 z5nCuW1-7>rqm~4=<=Aqt&BkU?As-RjK5VLb2@uXZm_JuG#lH%69zB;#-lEdgaDBFPzK6 zxu4k|`#li_&K9-OuanyEvwk>R!#}HR6Ms2*zyArqyJz{k8>M$A_Io!7?_R^ZZ(kAr zGq2x!Bk^8n8{NYT{6BB&*JcRbo5SC$Z@Y2Vp*vpSzt~@;mMZ&QEe!v$hX2REq<7o< zy&H~qcjMh#CENQ)zxN{W-hbh}Z0S9(el3o}duQ<8L;Stl``uwA-uXsqX#zg+n@czD zGit}^OU24RNY}fq-}Pd4%$3eQ-j*lz@)x20E+nk!x{deoN4l0sJ-iSjQJCc=#F{!- zstseMS6ZhK7mc-6bm7=z!sMr~C)RDceE)_>uAxhRN7b#rT&&dhyOX#bX;OQeagSa7 z-bvW8rr%$QJ6@DN_X<8|lU}z=-_P3b8%*BuS-<~F+VL9y?EFh!(7dPLSkC|6$oG}~ zKAF7ZAb-E(l3Uy%jF0W?Gb{N%ZWDSxZin&@Vaok8kKm0vIv5mkWB!e6Q^qAma>Pnz zDK7&jJY%KW4^L#?=%bcz#GKDJT;l3dpe^6n1>If!c+{Rx2f}m}dg;97n>B z!@1(lEkKo%c*f79@+rtx{2It!;Jewn-KqlRyLj#S>h^2_%HnQ7irs-Bdmv^CZmuDg z=Rqd&+{ien_e=zruAO}db-bvoOo+w*3K*a3*#a*}*#gyBiD14a zpek%|VLYD?tC20>nv&B-DMFhwAXlKBJ?C<+z!E7!n>Qdr+YYT;)H>j}Gkwc6j*=7e zfWGf$eBbB0qV&`I@Mt?*k8|^{Kh@BIN+Vv#3YYB)P9Z8=2-ZLFLHia&XFJ#r_EJP=!PBtU14SH-2wOXQ*^+_% zi(vk@0~0ubDka|nE6h_+J@nro4${sXs86?2c?C5~+F7q{V69Kg$Mz1?E)!xBVWZnE z=U}74eRAQ@e3>x)I}f2b3iaZ`J%!Y(+*T2jg&%y;(6~=2jQ0D(h$EDMNOV8)HbUWMX;FzM% zO4S_rU=(!=#c%jBBI?6nibG<`_Y8wew*&8U6;qr)3@+V{C~TxMgRJfOP*ej=mvCu2 zn}fU5W2B9PuXQ_c=@V>)g)qev>>cx0y+2+cTygG!d8of7#?=V7M+6!1uy!y=$hI_y z{_g{-2yD!D7^osfCtI6@OvB2}dH7_Z`2=gUeg~`h+Bz_AYr!mNW3auE8Jg5Z~iDiKwOw>%ONaFcVnYyQ? z{HH7)CtHBg?lI_i6dR%|qAJEC_bDFPPcap-$b)Lf>M9T8oUA6Zat`ceWL;#voju+w zstpcN`+7)=5w7mzwXYMAf0A%hYD_^}3yfn7m!Q#S;A;wODN-vX8nWzi9`z zvk_I`+7W&8RNjM{(;d)Cc?316JJ=rUOGX)v#a12cGPH|EvM}fs&^MG}tQ%>#|8DDdgeY!TGW)^60ELQhLpl0CnMvfd^~jPLd!GjM}enw z+J{tce^mHlf6mVbdkyM}#9WxBC$3~5SW5xW!@V0kC1kmjN8^Yr7jTrqpoBXPY{{|U zEk%}3L5;N4c+XIkQa%+&6|k; z3k=FM@FxS`xsm7cs%jeYT)d<_7dg*!X-EBt!eeTlzrmS!xZ7!Z8}q|&Db}QQJ^RHV zWxr$$$bM1xXOaKziVLbOMu#w=lr+9$8+Ys zEaVw3{h!??o|V5Ac`O?5`N(mZh?vjzY zHALvj=ggvp$Tj%6UvoYF{yZ;?{^)({kYR5JyYK`ngyP^P@Fu*#lh|<3Q#kVva0ka= zEr`t>aUdse;?PY#ZkEeFuK%@WMSwO0x<>GE>wdY9i=2(({$B30|5sk_Doft6qCPK| zGDL0c3GfYmnU7m0`M4PiZ}xH9SgPGsANR3geB3tF6?T<{c;_%aE=GTN2%?W0qMyq% zT*+4H+y0Dr_};E4%m>7{p!vYYdU$^LE?Dr_P?;eQw;in3+1z4_?ej4Mv_ree>EX7s z-LR~7RCgoGy&a7I1RQVsk*ZBm<%o8$$Yjd9xYxVe^bq%YcbT5&Uhj6(@AyoR0Baoy z{zn_KBfVhHe)e|gbAOAh)$4se?^Z-m#hIXu?dl#_PXb;g0V&$ z$=7J!SfeTMezQUj`wh^F8QV83IOl&@rR8FkM%=a{rsv z8dd-%(TW%GwVG(f!T1e-nH8&twc>MDSFL#KuvYw*)m1BAF{~ArA|5X?gvg3>B;T2= z_z)uUWW}>ogcyrfT!wS=uagz;gubq5#d@1)#Su2qiot$i&p_Dxe{LTQ<&aQ$iw^!d(TvImA!a>ZXQ+n576n&Ibj zmnsS4I-waOFw({2S3pgd*iaW&gauNS%BlfXhuWCQ?EC8*&GR(Z+rvBd zk#08qCRgA+>;+=h##Y$Upt|rkjH%y%c`kHj-6j{FOG3=T8!-)R>0il(-*OCt6w<%E zpD+yBG$(>V=tYLN6TKiLBdT@;aEGM&po(^-oGZC_aV*2LVk9gg$PiiQ2lW=$F^q}Y zzz+z)Ae@3!TrzbfL!cOg5sWtAe|g8Ayn$foBVi>Wc=j~=yN+P&mk^A`0SHDbdb_I# z#+G3aj8^tmx2p(7{xAqeD>UyT3Bf>i?MQt(&!YP<4kGX4#Q0%deMgYJPkHNm3&R84NuA~CT2q=x)`q9(#CR- zDOLb1;{03rBTUmX#thyDPfOcq<0?U90D`FY&QI-Q2d2+-nTFN zwlSpInMI{9Tn(Bc7=|E+lO~4-ZG)AL_51|;NIf@a1>ONzEXgpFrxomoEj5@t6t2RZg z0tddmYAP!B5S9^(yw8b1G7^AiOyc>Y*GuQ4IuBtO(~$Q$3kXIkaEv)Te>AD&w(=$A z8Nfvr@M=GS@BA7W79K*2DyJ{^vrxH7_b4UEkf>RHu6Su%i9PY zI##bz)cl&~9cJjQtOBcVQL${H%bD|6i>WHQ3dmcIdXZYG&IW3>5U5!zTj7}L@>M=A z79;;T3vZ76x3cMu2`*pdt>P)Tm&wTDC#{e~L`;GUC-8d-a*TEPD(YgY790t+5GNBn zxNt1Lio@eTaj8GK%P0sSOsSQdnI>Zc7I(v$ro!F3}&s33)8LddItcjnEM8B;9-(OLa4=>;o z$I9x^nl|>KV-4nwHt_%NuTH`79mj^c@#w2+N)6LHTHED(RZ3_DD8G8)&@7Y7K zf5)!C{uI`vBcb?1HR9WVaDtY3U7br?AxC$`)A|mYJceQcmu1v^GU^iIs6Qb?N?|tEXtnU+x*ejzx4AfkH<`u1XlmeHM`cJf0s5=eQWq)sI;(Y2j<;BqEAX9rc#r-aGFH}bB%X4* z@8vbQ3Q?2G1J!?IsKj+2`mxqrcKb2ajiig+MexvcC`MVe|>F3ygo;DuhhH5r|QMuxs%t~%2E~L zJ&JZ(*$;fZoQs}!$?SVX%^j$r zQlM*NpQAUlV(uDKZDM&)0HmI`wXZGwg@YI7Js)gADb`s3R@d1aK!7WXS&k*BIv&Qtr>hN(wkR!#B=SB~ZxIss~5L`nU5loa_m z*H$Zn)vqsmJI;)bkPdkKx&faq3w0m@+Zr*jcM>9CCOl53!&)1{ph9 z$U~gX^AOX4K=~mDG4P}L8z>X;K}0|WROFdiNVz#~(6@3&)|Ut3pfcpJig`Kvt7v7Bu!&YiO1>thK(@s`ZeiJu!`#9$9L~6CEBoBy zT?rr8(uOLm-x*uka(nQAH3gyr82LX(G@at2Nyz+)>C5~=Pj-lzU+wH23+3TwWnyfP z1ZMu3hmZ|TY*UwLPk(OQ&+RD!_7nqqTF31vQyow{26NwQmNvI#m?TK<+W!@vy;rdaqk$Bikl4McgWnz^wXLjDL$aP(S_g5FkmS?LM zsy!&9(M^f*9a)XAsR(XUf$wDUae5=h=>>h`bheH%3m;W3gk5F9u9T8p&4XPn;+ci5 zSlKx<3p4Sninc|Wg;sVBt5mYB|7~VrD`tlOZDwIB+X_!WM87Gs(7AH3vJ%{-9@tEEqcuzQR5gB*;f+0AQSr>ad*lr zL=@9bnT5d0?as`?R#t8Hg?$}0w?TW9>?_JP*uGM1?fBeKnT4(3&N}T&YbN{pW*+j@ z$3es4oQGiQMg7?i<-!qZ-73c2H~lwp_oO;;-P*1U;%inpU$c5uj6~c$Qvb^%?-R=h zu3C4xv}$c-M-X@4a%0t69|@~z`NgZ&=cQF^^`KSjc9-Ms8-`uA7PuUDR}H&rO+{2h zTD8{C8!neh$RX>fdT1D?S4d)26ZevjnG3MUJmLujq6*jpPF?X`fj=ojv zhhPDVrlNxdGzitm7> zeIOU!=)+P*-v2JJ6hB}ovi%CE-v(i{%kd5+{kGC^cuktP!4)Hcqj+_0hwLemB5sLlC1%WQNh@Tb#0Uwgnhd-J47fqMxa8*% zFT|^c_z++O)GjxcU%8+yBucVmDK&9LkC;+$` zQ@KrwsSH%gZGi$nf~1rig6Iip;u3mC-}W1QCm7q9B|NOsm)y%!ObcCgbPMFa)>4$CWu zptK|Zzm8jJva*E*fUDBNe7e1=={v&z1CA2X#|eQaZ*g)$TG@840?;2vX=5*9v=vY> zF%cNykI(=Jm;fAQj9fU1*!A>}JcKjfqHYU(J>pfT{n@$R`e>=h4ME;xnOD#0^d0+# z3b$w94Q_~=dbLje>Qh_^m2^;Bq1nCRSOw>YkUAaRGu^R;{R``EvGTF&43R%c43Vm= zoAps!SgYOjNXnbTFhp8VSIG59%Jai8L|RZ)$kj;7hG7{ZE$|Byq)19GXNcVV^j34! z;y&VtD6+T?Dy^x+CoAgz-9b92-s#|ph#B)1mc{>P5&BXRkPhOBkjF~#lvb>G^03Y# z{NYaUMOs;2A5WwO@k8=G9Hzv@R@B?a^nr7NNsP1KG$(v5#n}<*L)IM439X1ph;zaQ#M^03 zxM~yUgf?~xb3zNa8iVHqu$a1OPG|!HKyw25~Xsa=Q6z2qD zvkK6L(O*2<=mXwH8Ww&W!7}=;ka8fF)AOnD}U1pPV3C-^cspW^Ge||rn|3HqvQ+tNE{-XAbljEPMoDV!grY4TRs6EpHG*8qG5jp<3_{~D~ z7IsL&8-}pFAsO4lar`km{;xUyEr=`(#qqbWT#h#&9t4g*>4vl-_k=k9z@PeY{E6XD zJpZ(@#PP?h4FsTteFv=9$lj1R{%xp5O&tF=_C+7ZzXg@%XeQre#{3Pep#_;I6qmSp z9Q@G=mV}ez-={v)!rlSLpOo2vc5#mXy2)}OF95Yr{^%hTnP5X5PCvQprntn@h)dj4 zUayq%h{Pz^hsHOwbnL*GMA40y1gSrs^B##vV;~sRA?0g$41?kk17i^sy`WeH#VMl7 z#kd5;B!-GdP#j`l41!`96u*#S6(tm>m{*S4@iUyUi6j4GY@&tb*b9%9^VkIWd>mhg zzkEMd>LT;V_1MH8rPxHqfY?L}a+O_=O&lF2Hi6mE073L0z@2U%d8l z3!84C5xRf$p_OH0RBuI0-wIE;75e{_FPNAl7ZiTT(?vWi)#xFtx(t129QCbEzr*V{ zLhSFC{r~gP{*i+oz(wNb!WBB7}FUmm|_AsFxtp=cc{`h|5xVD0x7V^D&y=iHxlo zI?VIdf@3?-x0ds`AKhoX-pu|0lmXsIjO%@(Lh2J41LAef&zY}< z!_^+KN{Vk342W+uvlPT4?t(Ys^&4Wa&^Mi@x&z;uW-$8BbMT#??TUvAW4;uvGqb52 zZP@{<4@V|}Z_QkI2}`=OVC9GHOMcVBl--M9`G@UGFq70R1;x?h|aa}>VH>Q3F6_)x#hQ;*Kx~lHt*(^|F*db2n=N)HnSxTvUHV$`he1b zUT2~V#C6`^N>R5KRu7M77SBLzVa+DW%emR^-!rwcdB`S+fYr}-oHH5{kTa%0-Q3F_ z!njKwLe53xrZ>rjZGAZ5bN82_X8a4v--C09e69w$eu@@VWUgf^ZiTX9M)64PapeeN zU{5CYb-te*`w(9*Z7?!qQ4jfoA2@g3VBH09?{}+VTzMoE6{(7BD0p@uJWqBEICo*Z zlEW0;YE=#}!P}KndCWu94~{^k&lZ+vAs-{eH(xacej&v*g7FJ&#Cn}#8euNR%ksO% zVf?xU@v^Z{;v1tLjo57%I2g@{8~C_*jnu9nTq6+iGOFF{tMf z7k{JQ-~PGr>sYWHy@0>9c?cmFWWxJ@5XU;TvSjIt%9chUc2{^dH=^DhviDHM5GfP1-8d9P|tAyq}D%0a0Zt)g{f zfZUBMM>XKQ7W5*r3Q_}}A@$o)unJoLgO53s8n8F?FZYZ3mo>~|&hs{t0Xh9wp*~eR4tFHe@(85k({78e|;jdvIqy@j%|*@40!z6K6n^`Yn%$ZQV^+NUMX12@P0*=!6Mta^q*nKhb z_GiEXH^=iX2Ga3+JlUPr%kVdjkCzL3+>x*Qg@-WvEy<6Mf$CU(U6038;ouKG1vOI7 zo_!Damda)XQSBdIb2*_pK^I?2wITLyAEztQ#Hr(zNrZ01kJaSYhoj=@(IV9Q>e;uA z>V1W3MrlG&@hey}S`$#Qjn^!Vs1R#?QT?wdRQnpQiPdb^P`zcUvYe%sMbFecg$$Qe zO=86)O_F9JYMoBiB%|ur6le+V(Ch@ZjcUuNV47Az^-X7?-sybJJXHLetI5>t(mbun zQwN?!B&p;M&2kMG5@(eaRKGOvog!U1>|qhApx&lgqEVrm=`u~R=A~YFmZ|IXQv4E=9aD7JX|o zD+F4QakU8yCD$wL_)8UbG6q!GX=afY*DLHeiifVs+zi&K>-BX$8m209GyB5qdUc(~ zLDhA}t9>|Au$dhb1(IO z%Q{rbX2sA&9f?%#daVU9t;4V} z9jJQK%4{}&VbB6A}4Tg?&JZ!rg8zscN! z^}kr<2EQY41F@z=GrMFZ?m>TblNPWNTA|z~#2n%4lXd3)JjFGUa$&gu4I+%mmoRpm zUgIeo{mDa!Bo1fy_gI6|K z&)>`@;x6?-HA9h2L3;l1{<)ri3p;MQS<8RU!4~9uCS#`f5}v?iwijz&aRxta6=!gR z^~MZd05yNCa7-;==FyDuiDh6;1u$c*VG9uTch2B)=sM8#^^%%D{6C&k5gGwJ{YPj* z9P<#;KLhtkC(SjnXNHKadX@%bmAcQDSD6HYG@iImlPUAQe|(j4@0}6WSTF)&z-K+< z{fjd15nKHaWZp~BRjM%gD*VKOnfJ|Xx~=e-rZ4k8797?y-X^vT$i*+sP&+wH=6w?) z^+w=@7E0*S8ZZ%_GR8H`!esH&+Zss|E_I_MYUe4 zElC_1@u?o=?+{LO#+!2Q#dWlaRr2$>7{PZ-x%co%dG38PVxD>McAJswK{3^4_GY&; zM-4p%(Ot~&;2>URSFu7)Ks*)vF3W7hQ(K|3vJ&yrRu*F^LOd1hLW>rEH?daa-Z!!1 zjuz%+js-^4#2Ouh5j8Q$q4zOVn}HxX8LG`Jit`n2_K;IBVpmI<_gJSP^IquuNg)@u z{(xL%@CXAw^$=42A`{l&xVQQS-^-(CB+pYKZjGq4GHjkw-LQE|^}jGr>Hm`P)r|4w zCdQXOvfjiDSgroDJf-0nUrne@=W3qPaEz}e#Kv9CQyPx()x^@VGI!=F5#vkBQ^L2+ z#kU3H3w4N{Dk~;7ALoei)q+Y@BIBzWdasnH)Ql_(Qdwz+Dw&w4)Xd(%h}+DrVve}X z4q@d^j4wpv5$7L?<2l6niP_a<5*c5~u*40xnVV! z*5JgQ0WNPN?hGOiHgTL!w29;VTc`;sYOG+)2ivWQxj*xSfN zAcYkms9ZZR)I%=3eH}4?E)O9N+%(l$neYUTRd4+N)2Et%E)LbFj`7ts<^Q{Gk# zVxNjYet#1@qh_|jCibb37O_tSSZ?@EP0$B$`c6&ALK+xfYX&|C#0$9@VxN*($agyR zvzM^-nup+Z)G=QZ^dQ zhftn@;ja#_pmk8s8$1V5*)U9RV&n0PA?;p(4lFzz?27tdsZ{P0*(R zBg1!YX%pLQpI6$BOi}p%@SWiQo0^%ImF$8^v5zJ;j%RJSO|X941ix-z%%qv60mH4w zih4Zil(nFGz+a8){pEti<|S-*cnEL20et0MQByVxb}I`MgfJYnha~%rt6BC@V;SNs zPiHUE%6lpw4A9ADhN{PCrzyiILKYpUji@F2EriBa9MrY^uT=$vpd#7N>A|Q)=Bz@N zqm3yej##*wIA=H4ALeIDCtvEFc^@ej z$fBP`L~RM|R)AiS8@OriKz2RUi($vN={%<0uJlxI&t3v_H9^W4}khrU`2YhKuA&Dnj{JhLk+H?ua2fATw2(Iab4 zLWM>@uqK@`f%^esWWO8lw};qF3UWyuq^9I$-&$M z?ie|M?qOxwgbHtw%1cZG_4`8PA+@kB%NFDzA;M=Vgql3gJpzzOd`}0*GB{0J}02=uo5~rjA&aPKG6EaShL~ zyj7bEmL;;`dfSVMGfB4X6-_DxdsT~J$1h7GbAeLiSvr-t$fAjBITXWErEOx#P$eh= z$5Pbe3f0Hd8QEeW6SUHGonfhw7?v3WlmL!H8OBwf+vCG9ERQpb%Q^se4a2ZJ4t9DZ zF)V?|hxXIsqHlYSuXJPj>2Vp^`_f9c89p#EE7LIlUjk!+EI!|n1%@R&bI0O?Y1MOU zvXED-I@ko%o$R{^*#9LK^KVk*y2K*??NsDyLLRbAc_&zwSeu$31j`aUee<*6#kC+? z>bE?fBmwq(1^0QJT|^fDadrV~;AF5FLi?2Enozag$+B!hcGEybu4c9tBmC!B14qKH z&tVPxiCiuSW^gKj$C$v|*;NV+nM%CYinc8Bh8vdsr4bQoA7=*tkp~s~{bo`1+WD$CqHz=yGj7gIJ`S%TL3xKWHv7t$x^z{Z6fG9N_ZqDHp8=2f zkRnxeJKiB4v60<@v-7VP!JAHphN5rH99a7%dj_}ijrN7$5jU~*_Bp6wvJkaWRj6LM zWbAR)ff}i#h;$s{UhfBzhCuZ6OjU)q( z*t|5(vHJJiwhMgxBz1~6VB0co+acVx^Q*#jOJU2U-chja1a8}-@po51DE99uxLy1B zaqITghGFDRFvQi7Wd~vG4n(EYWw7aerKC-Nwztqg8uw$kWsifWatrFECihu4^@7vI z)A;D^fzewokVW4PzF7*aTrRu-jwji5r2$G_;K9dpyH3(3!nUVq-MD4DtCw*_3gV~T zj%d+zcqmJts<;ES?w4{a57WnCT-yNLftYC%2m2_)*mz~}RCqYa@RrHKlc4AlTsV>2 zc!Kp77j1l7S2V5>1y$#pz4qhqJBRSvhqUp#xz|oM9s(P`*>lI(2oC}_j*&b}KVP*> z!Yt_oQr^+x9c02(iG>z6t7_X;l@6r}%S87*j??%nweAEoaTm`T#$%=RjA?uO^?{ ze*!#oGuE5r!%TA7`qjnLV7V#az$L3`j8B5~PvX{JYj#x|V5?MhIAftx*FdZ~e4Ood zSsP%5dS`@ZXA_^vN>CFs*sE2Q3FWDjwKNxE9R(iVakj@C4r||SCck`_87lx-cU$;}TQgGh+(ykz#0ytpT_HLE828qcom9wGTvS{ANJ)zv>0$SiIZ3GaTH$Q2}AtdniIG&@URt zXl~IQ)+E8F_Q!usDxCuUPgF&$hFC+1oJSO=i3d06h-Nk_T+b<+3;s`X#WcdT%y&l2Yyqu`0r*xw}N;>W{ocJ zo6Y?1MVczjN=<=gNyRd7hR&L`y4y7Q8e$dYX>Qltsd-)Vpk}>hRmHgYdo-&xcWc&a z?!)y;Pd=b6)O^*OT(w4Xucj8gGVtvEnm05@`Rj80t*-c;<`Cw$!i$3DFpldkx@nH! zxNkR}AhzTxcg>6V+-tgjWA*&+Wgb`~SLp)KFJDK$d;|URx9FEg(Jz08eyP>@s5fBF zg@1tc^EaxHnnQg3Y-D43KEgkMmEP-e?|h)-0sOcBk#z?ev(~EbQx~e&pnu+r{`m*? zMfdH4uQTM#B3xZqau=@iRd0IZ3d}9}>O0iS)p@wm?fgn_cP|)xr6Jei!qu3PTwE#W zNDl5u2?Q-03MavZ3;1U+Lj)*E9J4 z$usyz9!bI--K_5Y2h__O)!x+@zCObp|-A~YeQ~j?W;f}npiy$iKp4u^+zl7j@_TD(UoBpwk=x? zU#N<|^KpF>`wV&iAvIeu@_CuIRYzgub2sg(POc%3^_l8m+ z(D=2kyG$pmKWx|f27chCIiPpf{PvJg?x7)kZEJPqp=~w0YM;>^(7bx+pyqdn6q+Vv z$)(}{wp->^|C7c4A$sRXE=IlZP@rXM!>c%qe^Zb*uJVS%ncZ-=e*;1jjtf z@hkIiTw?j_6~a|=EbK}Yj@gzlEzg6O zLA)uSu2p#Fv||bOryTRJ?*KFN8;rE+=7<4oD)@{Tn@=H=T0PL;c=&e><}wZ8SO zZ#CR#INv}l0m7t>-w6571smpE+p%ZHxLHG*t|tzf;df6a^uO#XXugmMp4d+YuWCFV z5qNAl;VQUbPqou;zwIiRYFvfs$hp3=x6LPMU1TxEX{&$%!X}UPxNF)Nr1VV+Je;kL zDVqq70peNNF^*nWsdcAssdYu9G4pms5?+VKGWp2v9Df);Tw1MzN+{F-W z(CaQx&V_&2n4nM7WtIO$5nW0ZKs3K~|A&rIhSicECa-4qz}Kh-`!!m#&{&P$Nmvz| z-<~T(rbj%R9-`@UxB6lFq+xP+T%I^P(d)!T;zBV^O%`RAs1WCh8NlS{i1T3eWN;Uv;klqj<#@#^LYQ;}Z)v7~{ogb+!TZHv8hB(Y1Kr zIm^<0qwv@AeKFW(?Sm$hW!1j%*sk6;4%@Z+$nPHm4`_q9Ud$15#f{<#=(DT?Tba(o ze#OH$3w>bub8r^i;i=iZv0PaFrLg+TVfC}LKH^5LFFdur&~)&FqMkp_nf;8lTJruF zjhXo*KPy|{R|wJ7@O@+Dk8scCDMh$^o#RY;0EIEvKCT!gf7o&42Sln@Yd3c|+ob=0 z5jlan#`V7;9(0&Ix@1$>B@sn!wbsE@htVE2Z!&uvZm?DE_z1RRAe8V5{_i<|_ zY{nbk%7n0sGGX&)uEHifq`NX<@4vvm#A6eBK4zS?=dtGR(1hRPAr#=jdviox zTxi#iv{#P>=XxF}gXY10Q z=(SQ4_Xbuvd!^=lUa8sf+}(0ykw@y}9V<0G>)BWBS>y|zsqHF~_3U5#_*~qnr=gWf zm6{ZjP^D%K`ioJeCgPtaQjx4-tD26szB2K?j^qiBV^u?|Ax!=i8~eRG^drAwF+d1N zKa$q^JwONsqka=it>$q+#ZG`LH6N(h2`KeG3RDb{S@S+TuSR6=gRQx-h9%pmeiL^; z^2@2%U+Iuvu11YpTYkA38Yo?rPGD1PCy-?kM%00`0frRMFYmu56JCb?TIffvLY^LV z_HUs+I^RuLwHsWheEa=ZAYZ@mko@<&t|<_8O{a2^eVgo?xb;r{-~ez#Lw)_v_q`q9 zbg_IR6)7C>DiCV-9*?bsP_%i9&CLr7WzzD ze@Va^|C(z*RUdJPHbCW}lBwKL^W>xQMh%n~Dxktp@#G>7!<`bSt%lB%5EBcOGQcZU z5#iuV)-&Pz*vcAaMKn8CJ^)v02(HL$0RuEg(K|g1xa6?G@N%2n5oK1hDp>6kP^Q7N zjIh_Yr-v8Uuv2Ci`FO{*sloN8YM34vbUk8Y0S?Wy<7u6Hl~uDNmKb>yU%802mS<~p z=+f9%xD;LqlYwvay#-k+D^6`2v+LX39rUV{W>Yb-`20Q z8VmrkD-~uLERM-s!vvr+9s6}wv;IJ6?EN~cSy(rOrUn^9tpDHxjK)lT!+Zg2f2K_) zybm7O`=zeJXBxx;e?X1+^B%$ryk6GcXVLZUd%f4p0%Q0$HM2k^d#st2dajwJn(qnP zhk#H}#cV9En4O4OIR#iJ)y(1?SnV~lEAH3K>Tj}I#GBNu!r7YHm5!R(<6UZItC+II z*_zoYz0}NBL02e-YG$o2yaqc6-6~>%_mSS+P4*nVX&Ew4xyZ&--E0kNo}+6}EnRzdrkaiCJr1#YK#iaP)w3s9f7DjEV;|_Kt%#9Vv9gvdY~O0BVc%NFMvdrJ zTTuhefEv6{h@#qx`_;46><6B0{~U2?U&A(YUJ4);&h|Qul~+ z13o6|9?iFQ;hqGtKsA@}v0Cgkmp~BUL03b`XE68b?OJDlX>h@LUW=}_GW7PK*Ykj< zw$(>gp>h(@J9rCp#y45IqM^VGuxi|D7NUNGb8Bs;`pDyEnehA>nXnsNvDK&SG54Tn z?dyX6?i-H*4c+vT2`c28o#_MQvb!JT9+D3(h}EVPWRxX~>oo(F%eDOro-7+69@2y; z^U;CfNLjF0rU_I|*Y+!TyG$-#&}3)>lyk)EWgh@JtxNY8Z)vAU!}XJSwc`XJn3I5E z#-Zjh38>%|!kb>o!LCX11osN_*RgyojtYB5yc=1WiRK&6C5I<&#Im*1U zbl{t5z&mFG?L60NPG2D{=arB1fM7lV409psAD7@2Yx)LhJ@Cs_(&Nf!%AP1Vq{$U` zD)Y;>i7zVml&zGqfn%-(!nuayo43-RlAe?vk@A3kZjm07HcOkNN2RS8=T7=A)KmUS zdPaI0RhrMCs`G$SS$43XOtV`oRvsxU5KEND%eG6;N-yBut?92xhooOid!(17{n9>Z zuk^C?iu5X8xtD%aIwFaZQaUUZNrjSHk|dS%I$jZGYm|nv(t-~($HW@t+hscOgz{9` zUkk2l-dBE9rj^Qo$sPyh`3;H_^+zmxue|K&FOkJ8(cQ8Gw+sY+_d z&)FYJA4n&~Pn8$S&WQg|epYs^piWs|c2WE<>6G**={@OP>9q8Y^k?ZWc(?EDe@Yjm zze^uUf0JsZkEQd{S?Qeg30?`9{grfG`ds=<`j^zKyj6BXY*G53zam|hE=m4>^*{fG z^rdta?+c!NTe>ARNcGZ9$tpETjgnci@ORj-?R)n~jvBbhukm;sR+XN+#rMJfXpRei zcD3~mR8w8?_pux|zD z>064dVw2b?nt^177F)!Ev_7>}?3DFOr^j)7sSe{@E4m_H7B7ikV4Ok4UpmGqvYvB# zoEy78#W?>csue#L&x>a<&XD4Bj&ZhG|LpWQ@9%yebByC@ zRXIJ*`rXACr=+MzEELtEgmD6kRgQ6PG`-^VIO)3&V4Qo87EQMULHvLLH5PXFRHj&YVYEpmFC_jfJDILnF_iw__NvJm48C|=|k zC$?#()8o9kOOA2o6cJvT280rsg5sHuaeSNNogQb;u4IgpQbg!xf*6N!1{cRW#v$I| z91}3&$0+acm0~Jv!VQN__=d;v-|DX@6gX;j95wT!P5LhsSvYES9KFns*6IJL$i>lH zZj_U_$v)*r|Ed2(u?IF5eEkA9>-tJsaBt&XFo_|ZS>&nOCT^q%7=@&9MssnY*N zaSTTVcl)?+@uQ{sKPz-N>gG6F!jB%)pH!U0QBTLw0)AA~*C@{5sITMbuldov`f|lZ z91U&e4+;mT)z| zsatF{6c_s5eQ853@*nUQ8~fhfvwmBtZ-O0v0Q#Q0yOg4fOL%4f#)49}B=TP_;@QEx zW2F>D)Z*_$kLXIhZu@#sjC%&pm77kMX5#Pn@VE5nnNpwIZa!IfeiF|MvoGQsRN=3p z>>H)N_yz_#nnTZNRmjirjb)`?1^3Fn*0{wBsy-57##qJju|fgU4+XYA#8idK^r1yn z=y8*)xz4tJw_Cmq`$xZfNxmEVjo%F|D8N4FJ9qgp>>mQlUx)pJ-^t`Bv0w3>hx`oo zOTNpOU&MaVce~^_u%Gvxtl$Cps*F|gp(WeXyMA^xJBFI~zuve3U;7!`ziwQ_{z)6V zaR&P>wgER!V!z3jbW?|Yu5HE5W7w~={pw}`_G@j0H+N&d%68)BHtd(%{&h1K`^7eP zGYk8LwgL4D>=m}8`l;BW1nXGRlQDA#V%QnVf%%p0Naa}-Pm5RY{RzJl8fzm zOBS{tSrpiwu}sDGee^lU*|Ai!K2F8CIZg%L`^f4H5-TO7! zwGKm7vkaul=3KufLel@IeofSm>EHEhdKj<&&;6R#@=TrpoV|~@PP-0$*w7OZd?7p9 z?k47N#GP3DUa!w0B=UhORNG~jmjH8L3rw*MbHBkZAe#QEn0sw6n0qCYTAat+%X`7x zD-r!WkGU&*!Q3kmn>vfRKh-PdUJ3R8F!=_Ka4rDm{!piLm&kMUq>ea;AIoT+#iIep@q-e5!d@wv!^%D6hB0&7&Vc@Z%8YL;ML3WTc)>P;(wcvV5Y z2?*~*g=SsxB50KO3QNmYYO8>P6JGH?{DEuiab(+5vEPjThj!c@RuDP%fxz8s(5Gh% zaQ7N^tWg1+9q2^kBf#B(mNdSC=as0P^ucy)3syo4;qH~ld=c(m$s#)FCRVXSh}j9r zz5PiQE3$OO-K&AnAy0^Ef-At?ktO81iGg3r1Yhv;G)G;9I&kz%h}Dnaaeb4ku^tAOSa7Ut`{^nT}7-3_JyYC<|A&US8H;^^F}>C(Bi68wK> zJGV}%^+T4oFPNj9JGWMX{~t!3Tg|s#=?DF|ahOXq&u%b={ICxEpX=c0Q`c5_1}!7_ z4BFjHU0Z9g7RF+(7{Hsm##TWqL5+T`xc-(upkFJ{GRt2b>R_1V&6#xln}N=o$+o`9 z`{Z?0tAghO+dCSJiKl>n-S5|01(q9G=+ANO2Q?G^wf|o-A^1ZIYvX^; z^-kt`fnBfnVw}le=6VO)e*X3T8hCb3*ZW7r{&c+?!BV$h??KJ>>%HGb*SngH<=49g zsQWdx4w?h@>wU~@zuv{>``0_pM%R114Ob6pftBci+Eq&io)auX9BL`LUhkKAuAHuS z1k?i2SLUBGA>)sz6M7WdL*Sqv*+p9Af}#8?c#&Re!al-8Nkfpdh*$fPzD%2jU{?jfB&?jhDG2(1 zkXSZ_>jx%r{lK+~`0O}f<1XNjlX~D_bWkPzz$);I+Oz``9l5s1MdvARw4a;WNH>-o$+Q3qQ;&a8s+S62B7g-quz36T?t3-x+1 z=0-fuI9U&8b8aho=@L}Q?ld}^b4%%^OHd_Lp~B>oxW5t%P02XSVf$)Phv=BB$4AKh zy1zaWtHr&hv3#}I+;p_{eAq}-F@7977FJ|6Jd8@FL}vdQn*rqoC9-MNY)A9&kWIt- z(EM)us^Qn>%nHI!*dIj5Q> z+DJKvR;IRIX_bioWAQ)ESlr;xzHg~w!RXC!zgHT3TZaX(Bg}_Q3@FaHDdSBvt^n=PR^_%jL2I%}$fl$2d+HVS$36_fbP3?iD z+6zmyx(?d6A57x4fa}1mO5*uk>du?2%|zG7m2y(QO+hVGly(U0UJ{h`-J()dxXNG| ztWbFAJYcIP!B%;^J^*tO*c`W2L!7Kip3cWBBngo`JU!I%Frdfx+VO`WV=%x4`H< zQZfb{=A>dlOfJEdN1nJ6>HuN#I%WX_fVw9kdu#n2RAG?6x8Ao^Z<{?}t7^$ z6~}A+JKL(XCR~5y_O~|BdAo0`e4){2U&G3vp7-flnc#xl?vQn;LwXtdx2x^`tq_^N z=#)tft%?`ZlSGfZS2lVfwY2_*`M3<`Dx{_vqWg+{g1D5{u%D}}Sb^;SPxEiOD=StY{_m))nC$RvzG6G^Eooll zA@fhlidEpOPT=#x+dSnrq^+1zJQ=k-38>`(hX7X%J}&&EI`$>_=%lO&eg{`p1UlHR ztXP4peulh)XC@sAT)m;V>Mt;QQvzcH<&coGYOYNOR*HB4fMV@NBf(4Y$TNyHUZ8Gu6E-V)+MqVz)%nw9|Lc}O^+C94YU+?y9rNb+2@83{hLL9 zTP=TF@#&M@p_wK(Aw@?Yg55C+nn`>A9wh}sk2F8-BB14 zte>k$g)Jc#(N`=B-&0#V6V)~8yys<6nn&%dxk+WosIRGFKbTEvM=qz~)oJaoPMw=l zHkrTLY!+e;UY=EyQJy?EK^w15d5qdO>)V&QmE>PDJ9dy$tBapX~_5I0!OI?!X4JB2j?p8eV;OK=HB<)=KT#Z zuvLqp_FsYA3B_+T3m6zS`@pcF6)qOGk@RdT*y(OP04iB6tehPE0LO#LUWF{nzZ!Ob zfO+{A>{c0cjxInS;YmBMChOOzbHO9wC0F5tWsbQT_4UGUOu^70-w7RZ+1>hFj{@IB zZ}4bB%K|YU_xgmvGe7MeUG^BXVf)^0^@8^Q-J@}sEi59PU54SZ*pG0;j^>BsyM&=;jy%O7=%j%RHvcYr zo1ENNC))jr#r$X(;*+W9Cg54R6nE}s>q<@k(n@Ha ztp&DL$<|wQG;!EpXv))vL)`>-Qqxk+%F?xEIa&d;Vmnas3h4jGY3kUU;KS3(avXg4 zah!8q33juayrn)BKFh`CJ`Hi$f7%=dJrv|Hn`b}|1zF7Iwa`PUVjna=4{aZ;a!qlX z3bwjw1h%W1`eVDi$rIb9O_l6TWDoqo^{zmTo4x<%P2BHiY=q)<^l2-hhJqX#;_q^x z^HogIbYC_6;$LKfANZ;o#M^%0R||M3-a@Q{hhmAVaPa|t&(AC}xoOY~p}?@RawH&s4bRsjCImg}I5Yu)Fqd2_Eoc`-=mhFBPKgi#&D(be=u~H!)E$ z0CU7{feXJcD6c}#Cp(w05?V6>`1N~m1(NZ*76TsuW`*DRQ?iT!z3>++plA>y52e|I zo(+f)n;RXR#0oYHpVY=ltbq0{a2U>1uV7)_dNv?yg7f(n`uJ7jY`)P*oW!%sfPMbn zRXBkC%b6a+>Ujs`$luO-F5UHRV@?5>V81gBFjA)+nf_{P*5Lp|e>L*&INOuhx?$SI zifr)8X3E2PbT^!{b%(3n^!^g5Yyc6h^FS{QcdYJ|@$f|NoW5Aw2V-3i$C*8k>VG$V znqqkS+SZ78n^>RD#`#IT@cVK910#&2v-VOL*22;ARzm5GR?3*UgUg0sEyVp{Go}6Q zGOa_DH4em`la8*EftsPE;l;25CBwuj7RT?6O<0NRSbs1RDiFaVe{1^-kfgrolu^MN zEtPCC&bkM9qBN_QqMPDOc}qhDTWjsA3js_KNaPJv3mn2VsIy$bHK=+=CLB5@6VCq5 zRXF*qz1C{{PWa)A+Whb5;eVgt{x|imvF3WAcTF5tsQF%mF8iNv>ssRhHg-mVJACog zzQL*x9d+RvQrEp-O+1eGhBgzL?Ehh$17A$wi1QlYi}4(Bp2QJnLYot?PF1jGMC!{G z9ze&&$m2MwND-MQuHjFK$d>iet)>EqpR*BJ67N>i535P%PBn;)5UD3|+Z&=AtB7y6 zoYg#12Ofk--Tig)cxo-wsiu;pS_o}UD4v8U{YluU3bu(y>HUG|NQlI6{jI-8mm13O z(>(A*bzcRmH@m~zzHX-Y{99c8Y~B~KvqgIxcw;@H^d}nVoV7*i6_`J5-u6YVaX;+8 zP^$}lz9$!hi}5Qr!EHgu9#$3L(bU0;x>xy26V&z&RyB4{dwf55+Dm<@ zi&e0X2l`jZI2WJ%?EiZmtUB`XiH(n|?_lFEyU)hQ)%SAXG|sZ|d+T6T4ur&6Hhym% ztjZA?-|t{Gw!MSZbymaUaa?@|6Cda^nD}({(;Q6v3g{=&{k$1#_f-}OgoZj;RbdUI z4pvnxv#G6vRXH<*gI^Bqq)NovbnVx5f}8c;bEC9Fq!|2lWyd(}VD!!zY4371GC+D!Pg)w^KiePM`sYK? zRVP>q1(VsokQ6b77KTaGS;wxC5iU(akBh{DC7M~{Y;A;;4*kVwFrP^qV@%w-r-3p30Mua?7d(Wz?4{CU;u2|2p+X`yI;mnT zKo_0);F1%YI=`G0FqRc&NyId_YhtVft9-3ggIC}F?c-pH=M?&%&y|R8PQ2(x!5x1D zJn}rS$WP!sR;WkbI=mD7@FxnlNyIaMR@x3m`O|o}304lQXk|I9+)Vk^*1aRfWLI!I zR|z{;1@E~U>KHYslRg1;j2E@ffPY@Wwn2CAYHRig|56k04s)FqAa+l^6$xhZGfsP) zhF#CYn|KcP>RIqoeNHSa;$^F>eY|CU1= zZ@SatOx!gK7$>HP6nG2W^EABb@V6%BxQ83I*baFNlqrwCm3 zaxe=?nInZObL>_OEiLC>Ke&*H^k>}phhe8;OlgJV*h+rvZ-yO;l+sGau|@pYpAF9_ z=9E@Bj?L!Bstr#nmX%gJj!ogmv{38FDXnoFo5+v-*04$OMCl2~v61{(p`j9*%=vQv z(!KEA)0z*$GpYdhPN~7SVFp{FCI$O4+Y$X7Fj3)u+J38B1|^OPSk<=-Ii+Mhi)~ut z6Q$%8t8IHV2TPS@g@`%q{Ll*ay*U-HoHP&BTxA>YdC13LpK~u?o`U^D_xcvh!T!N} z(5AzF1y}(&*e|&!lRts|qI<3dDe~zVY4X19yw3{u7J3p*sec0YIp4gXJ_q~f&713& zVZYt{YW*DSx0%c8Q?P&B{82sO^PA0$^+U1GGkZ7qW53=!x?!279NIT?u>Ibef^DmH z47PWyL$UqZ>W^)+wSrA=$-!TZ)&}q|n;V|M-#4sxp^tR4;Tq>{(r=p&?}K=oEuY{j zw{-1{gvbzU#wFyQuidoL3Sj-RA@GkfVJf&siQB>Zdm5M<9+&VqQjFSsJPzP-W1*X{ zw7^yPI^7=U5a05CtTU%utTW5$SjWv`My!+E9_w6Z;rEW0BU1|I1~Dt~t8D@Jr3HvQ zh*en*g+S5}MEv**^{Fd|?i7!A>=kr~c`8wz*2bzV2gmF_tFjzj{Scou6RWZubzWUr zl@;ilf*cw&2P<)QW2_YO1S2P=DFVOeWmiEtt7jkkE%?~J|4Tl0e7ldG(4&t%6`A|4 zKK6CC9C80I;bWHr@%y~fZXtbI9{Zx!gnLRZ|-4K)C&0Aw)%s6l5R zyPWOCj3q=HITB>;q2GR7CWICOKbV7h`bXV_<1>4n3t#^)&V~E&)+9a`;ycA#6Kt+;D#YBg!Q*(uO^{|deD3J0t)}ZAbH(T%pcw6U_uDOjVum^t8jYkEO-j+x z+7QZVcm+d~(T6KWd#mG&l;=1N-e;R$^nJDHxroF5om#X4YSE;;*rpfl|6vB#S8P*) zCVj;$`76kc&C^Dq^C)SHj)ki3TrH_I#z3cW9JG+;s~12|mlUN*7u_AIcjf5nmI*a< z&r(tuM?X&}Mqg)fxGFzQFD)?9wah{e1d*U+ zen|>+$7Ug4vH)s8^Ge{;!k(0~)j$TWBIYI4=&Q^fy%tAgOoWO^Il4Q#$(H~nC3I*5 zP*UP=yabdK_<;4dKn?l>Jwl9&?^@2zm_4zkzHhEz!KmuLuNqyBwX02M6nB49S3C*W zAQ074Koo|-uDxw0{>O2>Oz?*8#0P1p<=E&ZQOvjR*?pZ&Y=SMt!v3=hQs_1uh z&p{Rau3<+%HSd{snl#b-B9|AaUZxILjZjBormsFtn!2G-izaq6;eKJHE{Y7^a=dy9 zJdx7{r0=!3j1mk9(<;W<+uE`VId#Yeg?z%8JuzFA>&;B>6DSU z#zOE}Gwv+Iee(nFf=BmFta&;4S%G`T3N)=86=q@bD{L!1 zlRiz+LZ2*e@xnH}WiRfGDX{M*c-M4%GC!W&QUM)SdLGBWVJx2Kqx#<7<+&X70c6kn zjIzPYe$x`(C6iFWsw@?#h7h6-AamIEfAabN`4DK$kd{vx{@(Cm!(Xt9?6GuTMI`)h zZ~X5+;JvaI6YjB(fYD^Y{|?*eDx4{m2|HhN6&B_q_qg6osNdi!1Ymz4#Y0$#2k>EqFj+@7bo z#{Pmh_6Y6=1FtiP#vPB`3|LsBB`gw9u72HX8Y{F=fEcCq%6z?G5j? z&h_O$neDh=1^Pmg53=eHGNJxunK102tFXP=o(IUroC-reDm_%LdcaidT<3H#4NVSu%kzD zg%D#|XMlA|_`@X3-^pb&aPLOL@+RZhAVg5)2arX*!ruFD4`iaQuy_93RUVar_i}$^ z5Rj3DP#LOVU$7V9QxefGz3HU+?}hS zn1FG|AiG6;R1-8%$RFvL;Q(sV41^ST#z5H0$GOIT;O}L^mHnSXkv%R}4^`}q8F z=#M7YZR7v@wN&M7KVs~zwN%#__5Y)|>gU!{nUMegxwSP|8J^9hH7hIo0{8dYq0(|kv+?Af*+3f z+Qfd~@pZ>qs&eLsE4&Bsb?4d|6Z;HWx>QR=2szoa>2Jt{guTF8l2Bi~+D%A}gGN05 zVLe)zcy4d*faIx!6!aG3pvfPrlDF7X_PKX z90MP)==eUlOcEm2t7aPp!Mfe>9jKb63&3BOJc*Tat~H@9L=~(G0yifRmTUuT&qy5I zTxVzI5K|{y6%X#&`(Jyyp0i}b7lrDq$^Yx%DWusw99D3IDi_?FbCz}5N%)^{*4ddl z#N3$xuFiN=YHY-Dw|CQ|bk!{Ub*awI*O>vv^LhuPbpsf!x!UQxzBNN$jtbX}TF=)> z5kDK5g>{I@W^1Q`-7{5{sxm?4d3$-OqHpPXZWa69e$$(N4^_@zW7&xBQ*RZ4qcgQT z+T4pabzOcMRzd2-FcY}`3=upnsLlpuPX(@TV#6B8cOR)2j@zH}6ZlADkH^E>PJ|aS z0X-&yvd8n0S}kGSN9s|DPq=j~Kbns;;&>EBiWG?@3{NIIjE{7|64;&D-Q!3v)Q0kL zVvY~MIOGKdiTyB6ARosB{K2kByIe82bVJ+u^l_X|(>3-sV%`~dvW%m23l+Hzt1Ls- z{F3K2R>ogbp!)TaXNF=UUY*IU^D%yWx)C-RY=OR|^KqZO-S~iJFz&Ca#^o9rVviAx z52E_D9Nf(Lng>dkmp!OGYRg8?m34#3Ga!9K+{N%@D4*;umla)MHpGpoxGOEKuAFTX z$1Qa_6brl>l5lShvkh(-h5aDg)P}*>_qVNS=!<=z?FBqHf#c8@Ta5(>bra@{59W(W zR%AkrLkp^5P0%=G4K|0ap$T=ncD~K+PfRSJ8(M+;q{K3#8MyU^&O+ne-e3USF3U+KHT>pe(K7KKwAlKM<{N7iP zi3)8d{^vBzZiR)qWtB5IloXOCPT>8r%GnijLP<_}pNKzy^U!Nc%);<~>9fp2K*24+ zpM65dob~k|uJwoCbpPAQioL}NB`%}g#aWkPOI*gJUXsB>pIq!A#+7)A0VVzN24*kL zTa_JOoKWJWoP>A#C=-i)l}W{YmC40^O6U4Gjjm0>r%o=i>u|^8tRx_(O*uFt>i?5* z$82kI*IZ`dKf25Nz}ugISoUJ$`wii^=dzmo5C;&4Z5&>yK>mL|-_LJ)y`3p^-8uLexj~=7wGJ37Y-*f2a93 ziEYZjdj3xw-7o(ZvBP|y3y3g2XgQCyb;l8zuz$C!pqmJtj^%E`*Q5Vad?|Q{J=CSw zu^${P{GaPf;q3hEx^yEuhFn8;U#gC2ISap>sjYTj3XvzTOE&>|Ctu10bx*S5>wsWf zW`3y5u=`SqM!PR1Z*23Wj8OQ@#I^jQ8CM^b879P!UF(#~SspC+m2P$EKslhxNWK(u zVZ1K=>mr$u`XY1#$GZwsm$(UcVmk7H+`tF&jVDGBG4+TMG(qi;^(@`f&Ik%r*U0-3 zGCiKyKudi+unqQ+ffLk=KBuZ@ea=v|U7>R|x2_M!Cvr3)mUI4y5oBWZoL3+0v$O*v zsE!GaYyWc@K}J-DIcr~fV+0wIo4(Ho8q>}Qs$+U!1v*&3B47oi&1qt1nu!r)V#&>R zZB9h`V5sBj!^61D{84*Bj3DszEyM@{7Her^1mWs~8 z3Dta#x||I_WfM`eNAZ3VS4+>7hpAQqt#lFZSLeL{b*g)k$^+FoA*kv@e1PiouGKkp z>}y256l49|>Kr4hvi{ua93xXA`t4rTSI2&XZ%K1wKlP?=8EIpFu1sX7O^thuc^$H>lGJ&KI%BdduGf{uX^5kObu z5wiws$9Y84ovU+9ECTb8@>k{PM^CYqOeGVxJO_Vlw5u>;k(+Q~gnc~_B6lr3X%fPy z<^k73klmH#9*_qoCWutu*H1a1l&br@Ir`YCuFu%Lu5Z%6sq6EIOe{!3{U_D-C5s`E zDX3HM|1bd+3W)j8&(V%K(p>QnvgK=_VB(6IvLoFM^`Gw0Fu=XWBXFwiqiAC;e3B4d z9sBAhX1P~7n53FCV(||a2V#XE1i#0~-gG+4J;lK!P3^)YH9~TkRiJ9ib;;h zp^g>s2pnu6MBr502fy4(wS6Ww#7dQY=gj;zj5pb?Y*+0QeA8+lOoz zqVelWnV@(UT3#{euZW%w6%lqD0>5L?SEjzVw|Nqx=yllRuGdC%Nz4yIoc(zIV04}f z*REEL0$Qx|nV_Zk*&q+5oXGef)f7XZDh1JbdL1$&QwJjcuJWe(mk`9Cfs zHv?I$qli1qQS}mu=sHHVR+Yqi&MoJ)T$*bhL4}-TiMN67h+28=Kf7i&= zsBk~5=vTVh5ffH<*Rcx3)?47`$94CuJ>l8zIt2?0uHADP z*K065{edFY+k#n^-Jg#$7F7fGjtk2a!%Np-wnB#)*Qkyq<2U~rD*aX7i_mXj4Ccv> z^g1>UM`zsGVazTnDp$S^rpNBdw! zC_s%NW;qmKF0(vv>!&u1!=3P^ZD9kUSjTOT0jEL^$@VJN&-A8syub8feU4>0`b+zv zzx1y$KPa5c zm);SG-f)T9U|kCCsxY4*DS}tzXL$#st`_~LIj6s44ccVjNMOsJ4wnesLf79qdo7y( z2PwQVa+BiGMFL`As=iOqrlLk{uAHi^sRDlr?w1kh_SUg(&4|ue*@%k6eAX&bFmos> zhHeSw&LH3*^KifPkm>IciOO+g`j4k|uB?J@)KjLvN4#e?&-BZV1md3SSY1VS{cWtb z(~uo?<7(^l9pe;J@Qv+r`Y(uTHNd+J$n3A=mU*#d0%j-btY4jc6n41;`9}*ZHu9pQ zzjhRpyCu&f+q-VzZ_WUFrWJNSvVbR7yCN?1?T2s^(ga9^j8LA6>rNShLn21UZ;D?#Nm2(Ww@~~o&|gPia|QU z0i4bxs)mTJkx5v`ueP50?f2=_DdJM>Mf$R1Z)NF$puO713id=EOqO4fG7M<^C}MA z4UwPjG_Rh*ub_Fg7F`KwUK!b;mX7nvi0c2gd1ZvkqJ3UPfVlu92#j2sSAR#R`nGu$ z0|iCDi^Xi`E{=AZ!Xj##(KkS1$vRIJs9Q zr022qWlE2`{u@0GBfFCi zZ<6;0JJ-52e-Joz1)dS$F@)vE!2djuFGLJbWg7Z}dE@Deeqp!I_@LtYZvGA*RRa9Z zm^$he7J!V8KXN^rR8jCN;p;(lW)O1LVW_?y1{JNLD(c~F&-CQN*Yy9oF6DEJu0NX$^bJ=%ivRV#DIWghIZGan7u8X=dL%r}aj4K94G(sL>M^{h6}mR}j?YB42HB#z zBzT{bkh__JjL#-V#^+JQIh(bUdB*1&@;$j)5Ab_j(MweZ7T#mVt1<8k@=6e!*;bUUE{m6^alb%N*}a0ImO0&# z?}4wkBi}vrNbm9YZLJLk27Uim`4sz$X+XZc#RZpU0r^&3@)6TZ78Lu6vrGDlA??Vw zym(HDpE3jQ4p7c5?x$1~2P)?k2X#ljd%Ql$we#_*3yRvhQU+zufi?gk-v-_{$iP}# zyfv4Q^Ck|#C3gJ3UE~Si4s1_1f+OZ8ud4Ufq`|vi*mANV0qb9F%e(co4xVkPs}IHg zOv{gWhY726SOe`(x4hbb^9Loy_pq+{^L1@0Iv*O~JB4Dq(CUrte5(=s?r%{)VF21j zy$d_C35>`o;}d~@JY9bUK2w^;5B4h#j7S3%W@F?N<)7fFAJFm7u-;9!p6!&cS_Ll6 z*U<5FNw;$$4Kq<29s>SBkcU8dEy_a^7n1DL)b&lK5U^@?rbm{^?gnsW2p?^5fk7U` z?UPJ7P#dA#D-Rb(YWu5%h!E8xBdKG(@8g5*J#W%>yx`kU6%V^KqRy_BMLJozoc}^v zSY(qzxlI}hWeC!{+Nhe)Zkg6c`kx0A(iEqf2GxDl{>IO zt;bVUGw}D>IC_^rsGbX2Kz%DO;gYTIVd}s+aGJ=Xs1UB z*%gaXCLk6bhu-aD#8DV!G#}-o#vx9R@@;-NW_U!=eYG!H@etmFz<{cEKc~mJm>-03 zf{X4eet8u8ImX!y-2c&bF6JIZRO!a{^LLQP&)LR!-HVDx+HKEnesmdZ&t|9vfT;=F zbEIjNCJMHvsOcfto;=u|^{_og=GL^VbQSE*Oo!buu;-iQ-EB^XY@4%)=hj;KP4|D1 zH-6Df*c>m|9M~S%9B*+}$^2rnJicOZyUm$hEHCM+oP&4!!}8p>JI-a>l56MTQ|A{^ zAKW?UvmKNzM?ThWb1nhnhL3CWg3Z~4*q>}p?sqQoi4L3d9&+F(>%BBHVRM3QCmZ5n zOVqXx>!Yw&+Uo0x3wp@r4pf!A@+iK48At)n9oucYZ*vAUM#0X6g5_#p!Hr(n1~wYm zI#djh%{gT4-_7P610oxYnt|uI&6&w_sY{RKtGC|5i9m#B0;fqZLP)|QjQ46tNk z)&*FMsLubEu0OBQ?-*GjHwe5n7%Yg+{3ZkNBYRHH#M~@|xvzaeCcL>G{NF(I&x1~Q zcHg#GYWJddIOhH$bcQ_c25l6=rvPt$~FxH($kyTj=KvH-4*rhnEN!2r6!Yq`@s~4H#!a8XadK0NMXM->OPgD?gr+^ zxetW8Qzv^n>VB2QIAV5Z74=s2g1YO$EOfS>XlgI0yB?lJn0$IW>OR$BS+B5vA||JK zaoVA%XJogyq8_fT*^as=fP0gOF4D3%_D+G{B1yg_^!D-V1|gl1=L+^?u5G=zz(qE?nbaIKWo_X9OekL z7SDX{Dx3{>6DIp~%-aV4hvaSH=R1@fI_7P!un{=JKLxw#P02xzS{Y|!?B4RWdiFkI zZ1hz`51}Av$;Yu9?@_LL7koaOtMxU;w|v&geF>JBX1N zKPwYV&{EX+f&UoqCQSG07!&_b==Me*|6yBvdusu7{ z6LeQt6OVzFJ;%U3MpMGA5A*FYaB7zrSdYqd=VM?^zC8xc=n@0#!NKk!2JYp%?Bg+T z1pHyD(HMptWKi}{c(QtQcy=~E+*C`^Z!o;!`!S+FA0rLS1uz>OSoa*D2kYC<+(MkW z8FzAwvk(t&Xg@lOAI(BMJiVRic!eeNqYI2i7SZG`_byf7o>|<^aW zTbTw2(TaW|Rraxv2SuI)LI~d3fQD^e`|IqrpjxEF1xLqJgDx9shK1 zts(>bJj!GY*I!|4fMO6M(fJ%>lOxA?Jgsw%QI9I@{!Yh3XY+?(J`DyVv2%t|&%Ot% zy`&u3-?%n5V4o(JUhWKie$PEZ^z1X#q0QrVeYk!rsx|G5#4GG3;?Y;IzSmkja1N$m zjtaI^FcJ;y9%{&^l%|&Hp-B<bS9rMU3JU+s`0Y5^| z#<#d?zGPoOlg|~t>^Q`Xhnn9!m5pd%Va>>rHwjT^!A8uNkJG%0|F?sG6LWJaR@Z;x z=&PKKnC^W6_gsF{`G(J)0yphJS7DGBdWwy66V|(U2=BLZf&1sYYZ?;&ZU%9Ix2L-o zkY?2+qbHdE8J+>q=Sj~G(+7c{#?@iRQ`K}9*R0BfW|d6mhbqg^y8GI&)9`%z>ESBU zhoxQ!oi$=<&6%eCgM*zmog*N9Z~y2@9i&M=cDg!6Jz0IQ^~Y>ID?lYZWnPaera2T~ zWmtc-h}NlBhVu%rpE?v^HC+^7^=vabA~<_@ROa`?Jr)48vtu0*ts5EeS!VLPV>&9| zo}A}i{Ew`nG|$>6xT32J87l*GYc{35c^MWE`GJ-4SllU_n^KA+&=ujw*x1r3%E`r3 zl~c-qoS8@d+q=ctnx}_&%~Qv38iBjQ1NTY#rATaNUh)t}l#Ih&5mn+P_G`aeqKikC zcq?OoGWaS-fwwyvyxlRy{@oGG9&N~|+OfD>#uas}_ZsiGTWXMcp*mfvC=3+|*%T<0%1&o!?kuZVZvL(BkV^A@7W z&+CoMYLV$aZP)_t!17G=bX$R5d$D%jK<)(GH$Tc8HKO|vH!9mT*h4q^l=&u6o#!BV z2xn;diUUj0Z5OKBUQk{~|J~b?Uq5ltEqBiV(F zL`eUBh!_TpZZa|~nFSH*DX0`>s1$`hRzB795%VU+C=^aM_9>iVbm2_;B<%)dd|lCv z-OkqPdz-nsi&NphMQQa=|H~}*(yxIUySiu5=+t0|-tP=1j_$Jnh z_PF9AcnW%890sV5e2Y1g$&rVSTyO(03FIRXQ)V}K=^J2aDWj{00zfL(zb{~I&zL_4 z?vVJHOsHLocoL5bA#TEmA6$g`d%&RnKjPo$*{3|(?Cjrw3x{a)m+^0O%+T`l{Tm$= z*nhr%qhopSkh}Xgm)X!Lm-xM^q`8PUnCo|#fqCfM|7R9oElZ*-{ar*E>ovAcf*q!85>t>90*4IR$O0`l7<+=M;f zaeEE@j6a|jTZkm)B;kC6Hu}O|2P>yh^zJ!b9HAcsz4-_+4(!FH`G_MT2Ee;1^7BT` ze~fpiGE@gu4X(x+s3xskKd5H}sngX{)Kk^7)pB*DDhgT>gVm&;L45pSYO3lVTQNla z-|0iS>OHAtkZQ&&iaFveu9&eST@bV7=~`00Z>z+fgPc#aHe9Vo{|NGVV!WSM%!1ED zaYd-^GMfneWG;Lj=cC-09Z{|(t#g#CV_!QRq;ovS#lY&y7#2KEee0baWts++}Y ztw{;yGCP3?_}94i-r%|bW8oKn+B~IXB36JI;?$Bnwv8?S20nSV@U&7(Gy(7Jy>0V zMn1=yKYSF3B<-I&XPdU^oNf9J6Ii?Zz}nTbM9%!TV^KOVBWYfrM3n4pzJqZS&7>dK z5sSjQg%#x#V&LIWoug}e6PQ(%?JCxZmp4KqdyY)VTjVMnf{y6(euA(q8hX#*Djr8I z_G<9UD!#SPW#TJdHvMVgVY$Cnh}fAv0Nk=6xuk`X87V}OBFa#0l<1ad`rSV)LgYZr zUb#;}XaTBseU^g}Jcu)b2da{c0jfzl^a;AH^M%3yYFI;n7$wx*uf-*elOj`3^|(}Z zd(N^HdC(E6EY(V2N|X^jdpbcv&%F;_Bh{GalEPk%yCc1GJuYQAQ{}6``W3`1px&84 zy%!_fxdJnPCGe(I@K+wxy2j6ydq7#n6EzD|!F#ZE>jQdr0BQs;qV9G%&*RZ|zRWa; zw22e!e2wf8ULzY1eq!hPSXh5+Sof;d9{t-FYN!ja29>1A5!Ebs`Q>?_fAnS0MMRMlE z4x<4KSPxXa&cvNE-6luXtDYIG^T4Fc0*f;9Rj_d3A;sE`wr+iRw0zaknHo7qispg$ zyL8M!bGATekskTBWX$OIS_Z@FZw1p}0jgUK=uPtws#y)}t(F&1-AXLI68KFawKV%D zTL;6cCtLesn_$(mJin!EOQ1!~l6VB?|wHxPHlXzNAJ z4|pz^Dq7#1QL|nqyfNQZ*!iTJ@V!5{mSLD--^he#+hf=N44q}zAYL00?9)+a`7&GP zsEu%zS=k#pufw|VbJ2Mn`v2WW=kMz*UuG`6HUgMDk4?ide};ht5)NB;sHqK|&ja69 z$Myfd;QD`r&V%96Oz1oiLLeS(7BQhI3-63=(sTVk_<7y*3E)$}UXuP_=c+0l^#APW zJbB=hzdMOo?D|~vHD@kD<6!9j-L~68QQlc^{Xa(wQrDyY-(~ho>Hq1F8+SHe*PGtD z4pDDU`hS<1UH^}=wRZhKy+i*G2z$H!pMjmU#NY~({@)kv`hNzf$ddk_0TF(i7Nm~r z|LIU!3xo<$I`02&745kHt+@ZY>Hq1#5U|%z8BirabD?ITOfZAL=yKOZs2d3VzZQE; zu9jD!&vOmxDtDw$)Vkaqw$4rG8`t-CveyuOlHB7Xcy%LvQE8b0rG^BohpFiC>;lw^ zdOW)nx}xKO7kVDZ)E>}WPXJ<-mp@oF6dGs&g+WkB>jzylLa>6-=V+jM67Tbz0Cac~ z(BW*k8dXv|(sgWVvk)WPQs&d>N^q;~o|_9ix21@+gh zR17r468Ue@9uKF{DPz7bR+YB?7(v+{;;<4I(G^IK@C`a3@%^j$u3}ipsA4xUyu@AX z+m87}6i1fGlp|2b?Wv3^_EJUzB^?Qr)H#IovDz4X@~9%JkQo8Jh@k99aWuz#;2pse z)3GB~7hpMGungq=(QRWyh?A@B)Y`#VZ3h)=((E~PXh>v-0C&)_0<(vcWAt})?8uni z@!ZfDTYx(x@|?AcZWvG~1FL9yw!sB6IKg_OAprYW>pKm@upetZ*g(I)0Cn1V995!U z6=x-si7+V_T|d4K)?h>(`dI715^|Fd10tnkI(Wc^w@qRy)-7)<9j)6@9>TsOrux zm!J%|%HiutUh%q1tQ>K8>aD{+iT8Fo-_CnW?ZSK0qW_=sytm)x+j(!9U3hO=uq>VD zz3tAo_qojK(&thO#XD#DVL6;1){*n(C)!o4v|KmnA&x-H!54h6)42=6xtnok9n?}J zuNexet0N8xomClsWf|CTlND5ZMse3A_TPv#XWUU3_22`$%e_kFuvq=g^Pm~1hcaWP zW-j~!Kl3v92Rd~3mTTsfW|l2O)U^qIo(?R&F!|?f3G(-&zy||+r*TRHvAgy+t^)2x z`Tb{syBXN-#zH*TLf6j=+d(b({}%d&8FDT9%-=?)QOo>V7`#Ti7N8CckG4#s4hn$& zT{vNSCRy~P064;iOe5~VmdoTntda?*pt1Qui;M6KxTV>6Tncg%=35;16#6xomg?_@ zyf9ExAb$b<8r<)OZ8#wLPVy;$%2B}pagt`Fc97&Pj+76SXeIF~7^+35=({gJ=Px~M z@WWqk>L>Lr43zpy2c;-5QDdZNC?buN1{V&KhDt-ENGU=JN5_UR zX@qo0ibrj2LP4T5Ng4%>lrhq1X`&P>O+cT9@lu@hnluxdZRyf1DXDO>l!7h}Gtir1 z8hSNMl~SbwX|J>o*%{CB!_p$r|C~}W#A^kJ*A63IQzBjy(Q)Bn+_R?|Gs-U`K05$wO*7~VMDD4#zQo;{<@EZM zzH2T1_ZswCcu-u0?glGh>$70%weTwxPLC3`D-)wELT-P7I3N8D=3*2DALSWa54ub} z#%IDU*)Gy$>aNM8Mc*Lj^uC@nijr zIf|sx*Br;L@nasw0(5a&i;6tTeyy3iw`?EK9X)#)eG6Y?t%g;K*`-Pz&t76@`2T1& zELSWk6&=Uk;m2+m^i0De*-eNiKeKH`B&%Z=Y}+)Wu&=c}t4YHCtZi?}PW|lC!^dLe z`^9}FuZr-zZ7=DTU?s=eY}>Ehq!GlerQ6D$)%H=ls9YuBhd>c^Y`J;1=5yu(9>fyt ze*lL*F(Vw=0Uf%axygTDzXaAP!F-~AHukaRi}gv^k2T+|rzkka9L%HO5#UZx6dYz= zisxE(60xrqy>E5U9vy|hD$pU?P2QKszr)}aP@cOd(3JE!etkN)nIP8(QqC%t=LL7ZQTmws1jwu!6J^kQcJfa!_PGgi(h#)5XP%!2P zbKSNuRP_vpHxLUBQeGW=3d=gM9yh7hgT?fyYBR6YIdi)2?K)rL##4XAQCzicJZgEy zfnC1^dTnXEw<6FbaH5W59069!4c6*2RQ148eLNjZCioK{ zsy{9ez@qOXZbF}8LY;_N|0rzNX4o#O@Yw>}wH3B&9dM>}?F+c_tHHF_DSS)w_ZHWgXCpzJB!`Xo1vb9$5u`SBPf4j2=uxQz$fJ}7$} zA0@bHxYMIl=Z}EZh$iBNsp((aK^{r`+Rli`sZG4GQ5mM(-{INP*DGYEFzOw;<3EJIA` zqrZujUv4$+$`v=1Y=kX{ZCbYEN2vYY(d`b2m{5zU?rG1w1l z9@#Jm`$5g}24C`&nsf17%S?^F*cuvjP+pF~UmB=Okx%nxBeACl8zWCA09wlvVH<9L z0w2gm$Z^No8br=ErIJNFCn?b$uSF>l?8#NDNIH2n;z~RH#RyqHig53=beC$UI&p@6Xlo zNk#mupRanN9vb4eS9^Lp)WazU(xx9i53%yzg5H$0!=P?YwKL}|8MyajRiwsE8Egf= zUtdy?GB@dmQ#K?^J`etXA29GzJp?|r2<3i$Kjjnq0)N@zPcVtDLSBg15h8PW$Bhw zY||{{wNJO`fDnFzoW^R;Z;&5(75Nb@dkgsSN_jovUEBpteKDsyWq zR7jDVsE7!Nh&RM>gwYVqlFE$Kh&3}aGqW-@Gb?ikW(E*1ac6FTwr;g;TXflKD-F%m z%+hUJnHiYDO#QHJ+y36?^UQD)e!ub6{=Sz#W`^ehA7|z{pYu8AectDNR&V9(Bq2`Z zCrxVcGEON&qz$hASoCvc7l(kK6wLLrpm&eiio{Zi@R61cZwi2&MqH%n)jp`?1!B51 zO;(IlM`5}ps(u$me1{O9aIT#-#ZX{pCnfX9Hw9Kn+@w*c?xny2DT3~!l ztgb`UNQ%c_V)hN5(@j8c!Nf-tWFc*Z{`dStAb>NWt?)20@u96C*3t}23Empb)dPcq zYbzXI2;VjlJ#qOmp~a4`FK$<{uAEJ}Ie*vlSxgpy>=T0OUMLMIs}o%rqhlCo=2*1S?;U%=p*h{(4Le9t`*Yn&@3f<&=BV=ueVUG?2xu$xH@zsTG%>t+1< zK$Wa46uVfq_h_)DWK}VSVBGOyuOQ5N@#4Fgv@EHG^z`14`j>^kTa0LpYVw0G5NFxX zCx4){vlUzWvlWSvC@)K}&w4ppJQ=erzU>w+27E+y{auQD`A0Zf0M!gt&T z)YbLSg>hD0{ZYHRdiH!$;u-}WT5!Qg#F~0&-Z{%OyuD52Mdl6QMe0%WcCZ9OjI(;R z)$wWn0)$|9TNGxPk+wSdrmxvMobkR5`@eqSq^(ZT*&KK&_+HyoZH8`+ ze!+e{@N}}w_1GoPvhaV=Y?rev3_1(qY!?F*&fVn8!5wU3i_E_PyAV80^YdUAg28UC z$9_h$UGz}@i^TIetA|#PK65ZsOrn;$V>g?PYX6;FOWnY-O#^4U7@-+xWXDn4j{>^; ziIrx%97R3<0JwwSod@OwzHOVQtB`(HBAh;Lx8c~cxqmwxru74k>jfNlLZP(8E$tqS zCvq^(AxsQah8rmJQlr0QUJF&}Kl6sR!nGYwP(Sl|-(bx+BYI1II3*nh1UC>l*-A~c zYP=y1`U;eTjp9mPV~~-NCrf}&QzDC61BI_Cz;W;8v+G5@f70rjgbXYJ`Bwp-S)a~( zNhr6PfeehgNj|PR)_nli__-BvbIGypQx|9RIV%Al-NQSX>(R-)vZs^zUUZU3pzq}c zJoi4taq|rKX$;UnE;w>P;RA&$LWstJS$sx!;xQwR#0{|Ocj7D1cIeY{kM83(?#0V!EzNNbt|z_dcV7Pj+Lxx z8E4FfKA4Ezir;ED|IO{ddrwKT6b1MlMU7vx0De9P>x-rU`si}7z9v|6wIg-F*g9|H z>#VFZAMqmmf9HMsbF{g-+rX&F2f~+&ctVfbv=h;J(R3{@Ep-;VC!t-jI;_v zJFh?yTaT=awO_X5zItHH4%}Cd`tU&hfPq!mjLaWM$$8{Rh>OD~8-r)QCgCI}@i){+2%J9PBG%UPPbyAdrIR+M-a;pnZkv8DBFM zM-QHa-9H6>gSf9WcVuuo*LTd}c0Z~E^E;-9dy9c>#-wjT1@tVFqXGutLxcvLW$oyG zckC}RU;%m9UyLlmwhH@;5xU&P*k6n+#CFJ@FX@@lLKdyJ(Eie3F@Wh#`BGj_zNE*j z21mZ6XCtfwcNhbU$F6h;7{Dl4cctZ2d*NIUp%5CPci~Yu-c?wJ$JrASVN;uju${-q zg!GZd`>U%7$vV!WTIY>Bkvm}k3dc}r3~Kz?`#k8 zSS~NaF!dhdI;4qcm=bwG4g!7>R8?Y&2>F1L>z(6Rl8*c8|W?s^>I4kwv=~)*3nr!A6=bzKOIrv*B zChP)p#>gbraxiBI9THXQ$p4j@DS}p;4bbPl-mbUn|M$(L4Dgz$ks^C0mAVj~N$p0~ zt;d90vGW~S7&_k%O#)Y9oU35_LLz+9?JCrLE)fckbHBHsOuh+xiDu517!SV0BxoH_ zXWcDLo}WyU`|&DQ0z4bK;L&ME6ITMuh8Y9EkZ><^Eq6l}O^gXoa3*{);|()AiX${M zH*Q#Y2)MC9e3D!ka;s2e%+yPl0Jg*=&X!ot*%EqI$1`YO?`4VvtX*QU4a3@f$KKIC zh_dFOm66-HvSv8g0sVX4^q4B_Y){$LHh=8keuyS|`^uzz2VV)){zmojuh}}DK^xc| zcBKOYzHd@GFtBbDDIFLvA&~Az6!z{j>~r+6lhOeg%v|XJ^?@Fx13f!r%EMnCFd0}U zGH0y*j)6VwSp6MX{hS>@cY-o!@qS-H=1l72#DzH9zHz39a2k4-KS6^!4X>L&*Ty)J@BEkm^YlJ2*Y^YWgC71R@5qaY$Cq%Xz)0h` z=5y@d$Uq;4KXyKf{=*Pav<~|J&PUOeLqyR!unL`xqMy7Wik4Oe@+yT6lcyv5%w`;E zyj5|VJvN|Ey%V2$XWO!h{3F>pxrRlEcb|~mY8cmSUNuf?3hF>5T|_1D zLF=F>C}8%Yffd@$w4ZMuX*|=OI~`bDl&i4lN$BW~g+>PA=l9&fqp(PXz>gjK$z%QY zlav4D+Btgm6rX9(e@8jT%77^RC$w{PEZ_QTcN86}(}V3O=NR?>8xdV3@;-+#u&0$a z+EMht6=+96+=vWF&wj#=bcQvcZY}O8-ib6!0`B!@AQt_#bM%;UEAA+`|9sYA6l%<;#vph4R3faq3|#(058+S`2AjD0&wPH6 z3LS7B`nl!)=$Og$3c>uKnaT+KyhR$NA67o0G@&W5{4KdBpCaUe{`{-{?&U$vm<8J! zX{7l<-ssTxD)%eT!W!9BJf?g!Ub91D@AT$C!`eB$RG_QQ3h9(Fl;P=I-90dq- z4NocZ5Odxs&*Hvr9(bYV_rHbrf>;2II=Ar`wnLvf8TevP8Td7Kx~Kp56DI zW1buj*alB_8QuGCx=1-3TYn$9h7Z)c)5MVGGEv2BPibwB{f2G?L_!m9R2 z?D$n(o7#i$dZ4Sc-3_l5n7m5J8u_6lWKlYBC*9zUCUolIhktAjYQEaO19c+0R%utB z9a$q!Pj#ZcePEO|pcaC>eiA&=XiO>0Nc9lT-U2STFKR_y&@e^y@TvF3JzI!9(T^LM z1a73S^V~=;2RD+>NWKWXP5S?d8>w`T8+m9DZls<`O$B^vvdE2$Mb!S8^c;)kOof}{ zMt*ANMrIG-M(WsiH=zk=9D*B({r@I30bd@18wsm`BA|1K!TKS&k=Xx%8Hn7-g+1Iz z$9MM}#|Bol#2V*=8Rwe?utRXTTVU9Z4wKIkt|U&!PPvyDPCHvHy1vJ5rC? z6M1~qT(R5^bV1K-76&^Lkt#ZSbU+Ab{d@o|y-d8e0!<*~Y>Nyy7|?{#U`%2!wkDwp zYe1jgV$P7XS=XZqYd~-F9vth~PAKH*Sg}RV+Av3(R@znz;b!pu;1)P=Gu%nepC@jC zj&*hp#La*j0eRa9T;XrP&9M7(ZowgN3If3++KW2uF7(~J3Z39Xc)a+oM2KkZjgM9G zor_+zd+-O5MG(1NE_nxMq7-++h6$*L>^nXv3Pld*j`#$=A;r~VSVX%6wFLZq0 z6?J@#JvzQChN#!-zzK1-UOU@fugxAXwM0i%So!oG^nu>tWGI_Yvq{{-6uc|rpf%ZNTBRO!d(5=~2B8CUmm*3X6h_?TFQam6zzmy1sN5RZhn;W1 zLrdGpURIjjD|Bpy8NL>HgAOV$1E<^QnW@8ozY}8DQstI^kKfyoSsnNRgv_3S_2imQ zwmQarBz$?luuYWrcV$-r1c{u?k>nPG<=V|B-bPZ42VfS94rB;*^g z)dgibmJ76EQ%|){L|4zTBH#cwSFd|v2y_*9-{dU1x^@V3RmVI`&Z4V}hCo-fm>cY< z*U_g;?}My<#V(_wjcLeWoH&rxuh=EdAAh<9o(OybLRL|GG!wE4?_wfk6|tA8w_@i3 zkEDZkO{DxZ%K|b|0QA(rwsmX-dTM}%jS}c7V$+UhJ9?@E&z$Vk+vEX{CpGC=K3G(J z(Ni5;4;(~fo9NIVK9Fq!Y!H6(7gU@l02BDB>lAW+NrHzU1!q#o!2EYB>T;bDA?=T- z+1X=Tc(rk^*VaEu90M=?aP6>CcV(Djwv169E%tid=o^q!91$<#&16G_bQ)k z#FR|18{p4cqniRS71Iy#t#5f?d0Kg7SycI0M6SzPNp~s~T2v8;X-6W$9bG=Hd{Sw^ zr-|yM^2uD!W2)gvJL4dY$FF6^~e0$A0N3Ix-ydJ0pDD zOJ~R*QrrnI7u~c~_7w}m-C2tc*?1@S&?nmx5m!z^f9ymR;s4_hSH>f*)I#CeSyszE zb}xB&0INmIUb8vtCFcymYSBXTo_k5C3l2A=Dln6(X{0<2K79t*OI66rceh11P3}=T z`HGeCTzwntvl!a_?)&oP6JSjj!2;H?rI@^Pn*IAS&-k8pY$=qn<+8*{XRp*v|2FJ ze|=1=Wr4sF`o^?W8Rju9zPoVFqqrfa#c%xiZ+J|LPM~}2oj~Bi^Yb}aC!Ls`M=`Aq zH7$y1b(p+MF|7^?i?;)}(t)w+h-tOJv?-=-Hic=&B|=<_tB{L6^5pL%!fUTdgzVqH2EJPe=H({D!SSG@n9@GNI5W)D5x zv`5u4;PSki9QU@jR%%1?fGCZ+Isi=HVf_f$1P-ipICJ8P&e-RT9$-qJ-vIr zmZe(%e3UA{xJ5QXDU}bCFHunUVW7$t8gg4@9x8X08%O`VROs5q{P2;gQDBXXF!-xo z%R|wzu4PxyVUgD6+B8ow3UM9P?Y?4uKNbB>hZ>j%>UP7hGADEl(}wE8^;$4FX+?f) zJ!lQx`nv6-tp}}PYtyo~$X>U7xc0Wru!%2Qg$H%Oh!0xBy(J>fR{NaA$ph_`*Q%`kl~*mRK`tD@t9`DxC+n>id(D&faYCrYYM+(~xGQ;>7f4>= zsm_6sKXxu)juL$Dzo+$Y{(Jj5)JAK-$5Z0fbQpDUJZ84=?|s2=`3s8Ipa;Jf-%_el za&6NpcLOv8RDnttd4Lk%{8$g57K?nLjoIQ_a*NSTIUYSzsK`pamBX67l%7hd(pQ+F@4zlS^p<)Se)y^Zz#C)Ee4_f_wyT2=3={;YbrdvVH1tnQNr&lDHbIKVi2_9Jxf zUVcvt{s`$yd|Lcg(_Y2No^?JF_dvMZTC8Id@D#?&Kfzr{g=*sB=oQ*e5Y^?H_Gnk= zTBCH(s-^GfvyN}fdtJUUE77kb!E33@F|$YXH)b`Cy>VQIV}Bf1;5Z1!l{gN?@c|r< z!to&-kH&Eoj-zl~gX1_H@4@i|m*3<(Arnf9@a)E)r6mvGXJ7stSW<|eL-_NElKb&< z1b>b!S%aTr_;Y;8YW$o~@{PF;*G+V}4@XHjYQRyl%Ss%j;z*05G?x`PO2?5NM;R`6 z;%Fw0j5wO@vK&X5IBLdGmdi36$#8TSM{<{?ILgM+t2k1)EXL6y9390`uFI`B%EQrX zI9lqWz|k@sy@8`UT(WVLkE1tnbeGHgYWShIN>dc}?ykd?S(1ih8fXCbd z{=-pZr<*wc!7bcFu%MnB?+sn0Zi!HehZ2vycBM%P+9x^=A!N075hR+1OJ-0!<)W(L;9Z7r7C&{OeeCaDmej-*h=D~$w#FE#AI%?3BtIy%WP^>gQyE}B88*lBd07;74Z3F&6Khv-ix+?_o6+AUNmJ-FIqMj{0bm0IY5&a zLXRw8?gnJpbKf|y_hZWvz~7$$CjWRapA=9jo_wE{&4yy%V+yHm0k}+j0;@E}c$Vb@ zdmwypywml?J;jq@0g0$D(tM=}s(9cpWR~#d@eFC>GNJh4*7grjBzT|!NoetS$3M|l~1>z z9)#MqRF{q%@3L(c@IiQG8*wJIY=!M$_v9(*+F80Ra4!`55%D9(Lv{x~-j{4L)cfvf zkAc;%H9w5$uEf)O8Pi?$=+ZOTQNl*{lQmP`h@K{?wvk;$4k-hNpWdOPCk_7#XYo@{ z8vMWM##mu9c85;njpMK~Ox9EFwlL^Up@;J553WM1i>q)P^SA>W?Y0}+wD`o~5G}V| zrRTNju-#zg2*W~DG4h# ziW-xiLM|*8wtR6MpKe$n--)T*0%nQ1ro?Gnu&XS>)Cuaa6qWhn(RDpZBo6Njch%t=6ijaLzy1b`jX zh}kv7>1l7bwKuY#dHuX^OoMfV-Czf@;%M0PHq_6PhkFPS&_~VION8^sp=o^@@o$~O zAM4N~?8AG3#V+A)bP4;SOIQJ~yT@y+>Zp7iS#<(+3BQCcVesJaoBvFYaIa1G_6a+5 zQ+FzEg+CtHDZB_CIvXDPR^BO0y~HWKv);i|wAdf3NUa7L3o^zgPGy^TIjm6-Go3W;^u?YoR?!y~0{B zjHy=`=uAi7Ug1Wz4Uu9aaD1}h)jx^7!oY8B)GMrGl{V@XCM?F$E8GZ16Zz$Y4lh`6 zd0jqI!?8(2m)UIFNyHxafxvJm_+FQ z6n*a19>S*!7hiVsbxeyS?t8}SFAn>r2;xNke78sCBj7&4a^}Nb)6BY?$E>$>0x{WT_H_*N9s^R%*75*o+Rg|kS0R7 z-ztvBH&kiy&QC_k%Zff(@26T)J_)^Glgks!(~5{`nrqmm zK>taB_$d~nn^B@#LKSsn8@pSn?i9`IPVtz6G7dE;uy5?B|v^;EPeuO($3`s|vE;y77ass-VVR?q>7wL02CyQa|4$Y0Mz)Hx@L{!3e5ZG3GSj=e z1Z<&^eP=%0zM#)L+JXOt^d+=x9sVB=T@wDE1D2`U@%;J~mL#l)so+4R_gp>Ie)T+D zov!#Mzxr$DopyD&Mlh*Fb+|F8AT=^Q|9yj5%dP_x7yWmT{VyU>i*IBV{4dJ*`zyuY zeTTmK3+)%VclYg#KrG)^9|kr8M6?ur0PnPa*WQL5;3q!OzW?74YFdoH+1CCA*3VPs zKj3+b`6FQBl>vxvev$}DpJ5&OdkFFPITbv=I6NxwNXBF3A!y9tA+9R7m(Cpi{bC{3 z&oEr|j7*63yf*8BFs)bV3vwwq(VDw?%;om9C5XX;(*39jQ zerP5FW?rE?@#ErDL_WlVeo2un=RE0)TV(^8(gJjcy`VNhdq^%U{5I?%%MkzE&biS^ znntKRQ9o5LJDT3%S@tk;YA6oh1V4J8ogbY!pvw@e!!+8-Xnbqi;5~+osEs?zj$Sb& zJGueW|03nfu}v)yO0!A8iBb`dTnyegspQ zq9RgMX#~1MiYj7Mw1M*h`bR~@7OX+=0m6|9TL+hSyXdY?Uo+^FTnGY;f6)I;7P~6Ek9-+F@KwkaJz7HxQqZBDN`<2+$VQy z@(0%A%k{Ry%l*_6&IQ0uaM8Wot=tv63VCA>_+y{)Mlc(*(OWTH5sq)2u)i~G2EN-k zpnT5uO~eh+H_?Eo(1D~9P9*kfG-4W9p!T43giix_-s$+pj$6VP{@A1Q@IDkUb?hkm z%)}mxMqt-TJ*aB~li@p8bBqDrpI5m1vWwxL;k)F(7+%5mo5eAPMPBePW_Q62-0@AQ zZaF^!&cdI{a?=| zZh)@PujLXqp#T5Za)}$z|KGzUHWHUu%<1XV()_>OZIR8yB^DXRXV_nProRpT;FAuK zOH4UQ3ii!L^cAFoQ;f{qCUS`zq4-O7SBeb(6q}63@p}6Tw<`;u}jmc zPm(tDxR#gnab=#2u6fZ@=p*^rdcIyzFwFYFyLvhy5urlQ9{W)uA z*dCt4KMhOWj2xagYmKbIN}RPuR%0d3S|baBM!usPxRDhBk7|UzJ6UTxGWUDITGK%( zBis**fIz<6*B=ou}HDx?`TuWr;8XHN(TL;O44ag<`dbuwmgIIrgY|j*j1~)n)P~ zuyO-F9ou}NTU#QG8*Uu0`G+h-O>vQrKDczE{C=t=Mix+EMotcyIb-a(c_qhDJ3z|+Z5vw6HtBX47-4wG9LSu zv)SxCyYf|4R`1Mnf8d=BtO|ShBuDP>?1T#Ck&K0R@(Ck;ZPOQK717U?S>xKK<;3da zpfK6U?&uQY-~9%4Bq$rqm51S;zSSAKKMd2=S|Z2k(5k!! z{CxxRRxQiudL2>p_nmHrm#uC-FIzprBakybhi~nmHNrj-WVC%ENGv9T#BKe^&Vy}0 zekZb0JP^q@u%jJ~@KLP2yS)MR;ek4}ji@?<4Tt%CYjJO1?f83poy0@f@twO+YLf_O z-<0tEk>-CC0H3KVwfm&lFBglNAW^xl;lLY1sY00;Zn%f5fQ-=0 zYj$)Zl4c0S+UF5#kHb!x0Nn8uSl7Khi7Wpvu{K56Pz?F}Ui?i@=i$YH@iZxiIMhM1 zFpZWJLI$dc%toAk4@cYUkSni*z1_>T8Hu-DfEfHfV3g7dQVa3g_j331OB%rBDL7Ke z6+mh9Gr>8KlkM7un7Rmz9^M+aRiSu8Rdl+dA`m0AbB~cB_7pPIO-w%O4 z!^(nCqr#j_bk8GFz39vP=cVYN_k%XVvF;76w`gc?6gb5fNsla;&yo_g$C3>9B99d? z*)NetrI>C2B|2hvgqPtv;2eQK=+Gsh4nsP0LB=yI0{2XTEY;a4@=kk4nl`I{M;anP z(;!i#J9;CQh`kZ@J-rb#>>X*D13J zHK30*AJ11zjVueb{sv@_)X~s4Lutf3RAhkYsDB81a$t`Z9o8|TQeul~(IHD3Wb%Kx z3!!G=E?7yx!CMB6}cN<7I4?2YgNzz%>cILNuNn+1SZhxKJbP z>2GmY4>yWEE-73qcSKKTOE^4dHkcz3&{}#5Xhv}{b-s**=S;SDyrlAumwV9hk^*~> zpp%#hp3-7R;jz z2e&A=U=()ddUmSISr+$Dvq$RLt3Vv4aUV6@n4n0p+rw%04)R_j>_Kl8XOA>uV)Z2M zV-%)%%+*b;oVp+W2UU*-AOw-}FIXfH12q_2sPA>q>=7`$Ivle{>e2h^UQv%O>qcmE zkS(mW4D4=cMCVi^{!WN_q@BLAOFcWf3$QF|S=`IAdqYe(gum-+j#7p*l z8T#=dw>MpjeR_np4EphVpdT;0|DZZ#s<+bL5T*=O#py??qvc+u6myMGN9Y68aayRx zLsi)idht8Al4^X_6Tx_XYfF%NT}z;Pee*E&aP=W|Oz8x@uPRnKP93Knr6TS4aIPI6 zsU8je_-OSj>g3XN{Zw_TI$lM}@#DF2e4;u5>hV+5RqD)AxqhBHOP!`7<@jk_Iew-( z1M2Z})Yamgw>dz0U*QpCon_R74g)45_{$Tli>SN~0=zwEI>Ic+?YEqV8 zqrPAL8#QUmA2j?!Q2?*@2(;x7aliHo{8|-kxf)1Z4Uo86AaRw@XI^h;fWBz<5gU}5 z4{}8KG&3UC&b<6DXv^R4bY=a%?GNH#udRF|t4Q^Ls!+8C_Iy9L=XJK_PG2Q!JE@ng zK<_T;)90)1z*Uy>t7O}9oW4r*_QkkLF1mL~pI)KL##M6oRpKz+&e>VSZrcY}ny$yL z;JniG--l3|u4fmm&MQrSc!SckmtASP9twcYD@`}tpfv4mSDLP86^J7Iu*wLdd=NU* zhwRo+&vA!;G(P|p=~wLM_VaUH&8rn@x+?p*5`OOQ&G#x~x@!Bm=lQv>o9|XE)z!cY zZIzv7PxEspnwKjIbhY+#kMeULH8;Y`_$YmKk0Tb@YkLan)5tz;+o77;09NuYZ5m#8 z*j{Rq>BvJpWm6iL>S#~uv}&3QbhLwAvOcbTO1E9V3svDtRE1sC671tbr%wf99P?7` z7wmO(_ZQ&xcVN*^%O+L&2B-kJ$#-8WfOj*QDlaX?>-VNVT$17SqUkKOsI}mxu}krI zJ!=Yu_OuogY^Oqt8d#j^_VzSwJ!U7z3eRh-zIa|`MV5-%g`b@M@6WJHiwgAx z_}wz=QatBbWq8iDehY>AdiGftapw9~7aG`kc-E!BR5C7^QG-A&6cY{G+COXmxc%Gq zsDHQ%tI*GKr4ihpKY=UsI(R^@q27#F_3tD?>+f8J4bP(v{T%nB3xw!rb6-1LhPley zWXYIoC@=KXk4OtB4Z>XIVSKK#R5e`Zi7Ztn^-zszaxWd*;)kOUFJBdOT~%J_gZEdZ zsBS4C9X^`JOw;v9(Q>3J462q9s#H}H)Il?gMvG72-A{XE59_Z)_(K(2-f{mwH5pV?BQ=R4FuNgwUsvf=Z&Z<|* z?dnyT1JtYPp?c?-WfY7Kz1}@_INqhPEfrH4k`~^@GicAY#GEAk6lBc}>`i3N2fx7% z2&R6HJPvjpWu1r}7ugpW`xA9Z`lM~T`2m(T`)oaHR%O#E1N~!_#}9j>SrA`^V{53aCXaJGkfG7*Rl(RvA$`?%;{j_ zWw?0hXR5+N}M|7h=E2yMV~tVA~mzI=gsNi;-q07=CSTJC1!l1s(eZr@X<`@VtNXK zDjj)l1}tfo;dvYv$k#ys%2h=(@7+{$u+K%qx<>Q4ff3#-6*FO7#4{NV>zWQM!oSs6`bg^%Nt14S#_bOV#1f$g-S zMv89t+qsopx14F$|KKi^0Z%)*OCr2#fX+#?L`dB3u&w{8{-_=kMtE<~0DlyTd-0#~ zN8dTErZ==dY6Z40a>M)kqhrWUhxSJst>ljym=;xd(I1_&i2g_cy*-CNszVjN9vU}f zDP#WO@JDx9MSpaMRrE)fQCX~KHgf|My8HX1JJ3bo@J9t)7oopD+5p7%5>NusA6+z) zKgtD8aQ3*nu={I?P_+~LnNIXaS+E(mXQ)=Vw5L|6K&?=keBkhK1Jw(KSgIGII{uim z6+=mrpPIUd2d^07{@eOj3{SHte5?P7ieWt>3rEEe(TYv17$VAnx*Jst>oMCd9pCEF z&QSv@hTyOOr4}oO^+2wBD~8xVa1Wm9AXYN`{|)m1;QxV_CPHE06?p#%=+lU_x(nAo za~GP4U#}IvXZ6#V1KXoLq*(nCYOx_HD!E&UR6nmg5MDmpD-?a%GUaIeTr7>z6Z7RQ zxfk!yroQZ-{XNSQfq;%^4QcW(k818^!pQJVKUHjJ_Ov>fJ-tkt`Q-`d&%V$dSTjle zlT7Twem75rvVd4Q1Q(Js3AVK~U{0(j=ib1#Lo;AJqL6c@3{(yq z*lAOecB(EzkGcu??)Aw2V|g?)s^}KEOY*1!s>n2AdJ|P-Pz&gwiVR}Y4ywpd{jd%! zhj=_+>hjRiXKVmV2`b-Bj{0FeczllfVLiLULdap?`XMUi7K(R;*a^T9&Vn`Yc89x= z{wZqOXMqhhxC*;BxeBkH60!TsTl?DTB5td0|I2Lk6sp`*KkT*DFIksZ<^B)YY8^W~ zm_q$c+Uj20uea58U`PyZt6T7C2iod-oSSo%Y&9Qj3gU^?qqax3TF-WNk*(IV;x5rv z8^9JLTODl~%vRkjqOA%R(N=#p_u49{9MpjsNw&JzMmTJLTfN>>>#m2|#*MZb2!Fc^ z8UtjjJ8WOJ2Z4LB=VNza&6g74<62i?=cCXFwcDy}i-=>X1b9FB_88dQ{eD}B)?lm1~ zP^=*WGv}vi(z)XNbWJ_xN9G?{uJAH?0^uY*AJ2VZ#xGela&(G*oMq!Kw@>cL95A`3 z4wq@`epWj$vt%}6S2t*?g7Jqc&FLp{i9PM)%QR@AYr z)_V2{6#weLZ5pWV2;PJVJ1{c!hui6UbD*5{zz0N>vjJU}XWLg^au+V1M9uL@^!Zew zM}D(t8zu6mb9ZyRdz&m8wvk)tW%Nx;h7Bz8@%v$?cfj#x`7lxqPV~h7ntj+Ec0&42 z!-0Skn(eLWMZX=$cdBM8e#cSns;C<^o<2&(l-NvYJf#5f&XKQ#8d_=}#M`0qgw=-J zd^yLvC(0do_m`~Fo|`+1cRObB)47;XYp>wES~%U+D>&W1G%{a@1BG{NwNO% zC0oFA^9GiTS%>cX2%%YJCA>T3VCo^<8%zzOsbiB2@CC@A>yROlH8datr&&A=?7J?S z#RLD))oTr#JKbRo8$0XSd+3(0L$}R9%o|pP{ckh!?T77{H!jr!e?y~q3j9lafeX<*d*G65Jxju#O|$MFj36^THg8~)q=7S=pLZnE$Z5eGf;QH zZm$q@Z~5@uqF>d;vFy2FWD2wzi+O4>*XQ3w{ZVE zob7m7KScLd9h9M+?Rd!>qI(OT0a?7*y%h?5{A{}&(tY@nx#06YPyH@Z>faK(y1r!B zd0qG^>~|ND$y5JUJNq}-X%bs&N{a1 zhTOg$6XzRHkEZS|REax?)0qT5#oOo+D60iurUqPQ^fxYj#BRUQs@(VOSmSJ9jd_JW zz#0iN93{?QiSh+56yxoq8maRt6(YTHv`jh-YD0ml*p>hwM}m(Zo?X2BRbj>;)f`xS zL>10@k1Y|##;BrIVWna%bqo-rTYy4FU^+-9{wl9HN|S+^CDf-1%*dQ>xX}H<5!7Ew z0zR6}F{6LNiZfxwb6~}3cGT!M95qVksL@=YMzQb+Ze2yhzwutJpc&VM8qs^8oiM_V z0}Qeg`XCoLBIW{yo?O5&!%my!c(=g?5vWJhv2SmBhF#_jJK+reeN-f*b!hG6P4qE6p1DlKmBV?9I&@**gTY5T0YOJa*7-*N2B- z7S>?`fU~*5k|CIdb=Vmq4!iJ7l=TZlGCKb>LBx<$5DdIKf^+P5W--vtTiFJcN9< zf!R7QAc3@U(gmAML|*V%n$%#~aJarhSK;Xf^#u6Mt-?0#TeQzz;k zQ3o5mmE+{$gszW^P>wZtl?aie)j7?#prXBZs}InatG*#>^lZ$g{Grn)NIk54qzjt|NUC?rm zVv=qX{yp{4)A#y&etsELcss#!dJMlIp3^C&kSZYkEw;jZP{F}gZ@%PYVtdGuf%^wsY(f`Nn;Rns*= z{?I`|{jkwRy;S?KX1Q+t!4-zb4;-|v_1U12sO!Ppq_vuA8KXUco_9C7NBaz{%?Qhg z_DOghVVTxG2CpHO<R4{a zL2HrELrUyE9qW}kJ~y2H4+-Kms>=Ge<;!bP8JVQ5Mf5ZV&pT|x@LX)8431)=pG`Dz zvu|~_p7~;j+W|%Y*Dn1X_1H`3C98)5_g7fwv(F(m`cNWd{=pqd0JPn~C6@gebv8Vx zH-osRm&+x>>>@|JukFW&zvI7+5C43>9jniTtRZJDE-0&G>wy?NfgMW*-|d)=b&Bmq zE`D=dc=ZrFRxP3iXSwi?4Y6a@Vrr1HJJ#|ccC1?FhS&$wvA~g_2!SRpI(Dm5ED)cU zxbV|^^jl9c51gYeU~r|7gAx}Ws{#`j=fe6TFmZ7#UiVlX`mI2)kp)nucoet+Ffde& zX~((;Tm^9^7H|u6H{Q;7uTs+keD~UGdXDd2F9N#|X9(A#%A5&{j>OExS{7lgL)WE) zAMe4n;p^B={0|~OzLxFn8aNZH4mIU^76L?L46Hg3dJS2p-G$WmCBji~^*$~E@8Gwt z!Uu&Cq2PXQ!XV>Km6#8GmmO96{dGW#T3OPdxm(5T|ste1pDti<^(K(qEj97x6*_PL?E3s zp`sRDcVrp0sN7L)?lZ(bWEpw?l*l8qNgu2ZbI{!6FTp1Q!RtBzjV5@ft~X&3Qt&ow z*{IGuJco87eg|$*3#C_@h2FO=SI6q$??wT)C<1SjCLxK{rdqZh|0hjCsz(GxxWzW` z21;8c!gsKREIjmU;Nw<1;+PBFhnF>78}{@_9?R?oUoZcDuzIEj7)B`luGKslrkbi9 zXYf*btHx>P8RC>u)jaJjhTZB|RfBvY*Yy~szOQ8@qMNt32I5)&RDk-nW`9I8A?jfD zFvJ|G7lRPlynx7M9FEpq9IYnqUNqEP~Bgfjy%PNsSf&SxG8IJQ(xOumr627DB7 zjG`bpczcV~3&G!0sB_gX<9C}72W_b?P~VH67yb1PH8J__RtJ2vTR)NGY%g$}?GkXd z7ZC%!gcxWKVxX511MM|2Yvz9HhBszVai^VCpPHM#z zFpnl<#%ORsqRPvD9dyS#eVx|gSX?KrB3ebf6tJ5LB30h@>-buNoW4#~aTu;MqJo%F z!I(7~To9!4v0ul8Jlzkgg7%I=MY!&Tp1XM!nfjcoBU@G}#_L|}Icw%;{SmKC)4kMl z_FI0|wdGF5eBB-%-H|nZxu0$=yvlc$hqd8| z%)Yg3(vHXLIm;8;X?Q(t+14~4tUAQrmS>Gqc%+tzeuFv5YqbySHt7YZ+U(u$qIOk1 zVW`KHUpMZHR$(`+M<z^*o z$7`DP{H1AlO}75?(s;ZkTEpAJ@jAgequmd$an`$WTuZ&Lcz)XqU)VVwKfM7>XyVh6 z7mUIGEb{4+aAY*k$M3b~X?Siho4}{TUv!dx>l?8lrUYuM2ez>K(%;%m?ccW_hexYH zkHW?i?!tHA&@C;72G<^maDA^t==?3PEsaDthgaY2(CS(#`Ztae{Z&7CG$EXvuekFd_j9W*mqjbaJ%|3p@7r*^} zONcs39!!{i)F}0)Cd$`Bk+FrV>(vuVl5`W5L8X(xGmb13y&8GCIOJ-v;2<}s(@QdR zGt|>dX6rK4iKU`1Bmb5PZQyC+FYlOpzj}>2zw~a-Nha^M5}xTE^?mS8?_}v! zh7zNy$k3$T0PJl~2{Zz>7nTMbe-t_AZ`1*Q4meKiBtZ3ksCI^FE+w2YT*}k@QclMzo>^_G#I2RqrpqnWbnp$ALNm>u%GNB zKPiHAb?``(vE%_6$h<6U7h)cCdc|(pPC4cVFpk%&9#XASt;L#sko&Vh(>+dK<<9M^ zag|kI92cnWRjpL5z*X+SRcgSEU+nZ%GPd82t1N?uUaDH6%2VayDvSA5KIxk4^i{&Q zFThpg@X#_9WxbiW%3OYxhOQK+ukv&8bX>)EUpjKkG!=ew}a^NCJ= z`zGKz{bxGWu$5h5PQRbGifO8o|Go%Zr+*hs4f+bnvrc*1FBJq0ni$U;To`^K)mL3l;e~o&DTse(rel zDn+5L$$l<`pZfrQ_Lt6K${^i#_}N6$E^Q?I>IBnE+C;p@nUva@c#Se?niTM}wdlU9 zH0J9lGTYPjO0&MwxW5n#=7ZhuYz!zpXf?bTs6zhP<)YrM4LGn%_mW;|0Gj13Xk68S z2PEb0Wp3)`10HJRww7TPwa`*gXxmuMFHpV0>-=Bb<%M{i`-`hQ-R_C;H<)hs$fZJf z*v*z_FXiKPqowAO0X!-Z0NW8AHj6oj|Dma!o?K8DCtQ-C1 zHRv*r#Pbzf5T0$eIyM`-fuEe-#a;G%+Pkd~zq@G5$Md%~1)k5@h~-?1jAkIqxsDaV z!+(oBI-0Yb>o5zUm*p(Hvll$2Z{Lsz8K{}YfzMnCu4?Iv$eqzAvJ0=tEvTj9ch~bh zglGHplC8vSUbhsQ&MU+Pd^)jZa+#+wsO&X)0I#IdOkSC{v}_EYF}=*!1IHmger3T% z>L2sM1Xh1&7)C9{kg)=Olt8Um|>PM_)bqmFPDN`5&N1U&}rdIq?1T=)Yi>?Afcc_A!(m zeGSt(&4EuFf&*W}b^$-XQIGx$rs8#Wa63+Sw6LEu}|u-Tk%(Kl1AWw~8-_`896^tDj`6FUrH z0mxKOe*nzlRf&*r7hQ*%|7;{@&|<^(=xKwj0RBVE10b zkqgv=?6U}Jp~@iM`$+pNf*STB^lfUG0{e6uW2jfty{}`*d=>%r%AQ#SSp6Mz??33M zg+4dk`^+0=5!A9g>^UEz7TtdqK@C)V9rykX%py4Vp1UAB49pd4J`j53A1-y^RR7(x zRBPC2&M)sjOZ5wO*>)pwGI;i9J5qwpfs0QX7$x0p zg>_&rF=7hr1a0Ulc+`$biPO1jSPU2oqOWhY5GHlH)m_L3S3L6$i4gfDwEy$$cvS3# z?wM-t|m(gvtz-_iv&wA1qjqr z8bYAZy>d8>GqQj{X(nnWXh>IfGSp!cHOZQ%HJde$YqsGk(rTGz9$tY(X{LjdH%BvD zGfOi=GgFhPc}BAs9WftV32{AU-h_@A`B@h5Q5mWoG!uIX{&mP-DJI!! zI0>D`YgHH1kE=Sn0qU-@!Q4N%ZoyUs)Rh)$weJ--1 zzGxBUR52_U?po+m`9*$3 zmRH-IIh*y58|E9Ikkzs+oit@Ts^xYdN!xhNLU;5J{NIJO%s1vKp21mSjeNnH`B~qV zT2_JH1V7!~Sl=I;3xFBbvJcGn12ZCAX7m2F&C`&rY*9VF{|V$N3Fe2J#zLhLH5T*T z+5+AE`Yq@|+TlUqiNdD*^mFG$nbO4^>0^sQa17Mw#aCY zaFZVcN>m5@<^oV6R7*O40ZN2wN#`v+_zXR{Y?U1{0*#M0DDT#1-t34^@U62I$Ca>D7@T&Bjw+eKlmFRpSjb)xN)0Bjq3o$VQatF<*1h zp(R5T><813A?=ufx-q0XgK77;#?P%#9NkXZGP5}s*r6wLi0jD^e|Q&IfcL_4kfMy& zKGKvSJ(>G7HEdmH!4VDDlA*ktYsq+>WX~frzF94q_w8CTne%69S+7G&W&zid>HFQ8dHtQVWajqO zlEJrnp8tkx$#~6xmWyjj3F!U zx^I84l1vRtLswD_pX2*Eivx-f2h|v0x0ZCM#(<}TTKh=6-euVg)tEZ;Z>Vux4eqNC zo;z)|Y^Ir1W2({7L^H*z*>_N9a?BK~0Y>3aji~_wFiXwFQFK^>? zO~7$fX`z;y`a(0bB-8l3+#qmdc7xls=&;`p1-`_ykA#+ps2ERddoQlX^!6EvwnvRu5Oe)K;5XCEdt^&pYtt(`e|gIWOw4z?(V0N2`)6wQTI?Sl(I#Q zOxT>2G%|rec9KRWVo$56ky*peLyf4035d5oXGS0rq>)+6OkJdriAsGJX=K(yoyMV& zS&c}FeB7o^H+a2`oxmudI@qg`S2K;IrWLQZShTA&t_V3UQI7x2kZ zcVQ>E(-H-6&GoKA>JC@ISN1A%d zxuv59)7!4MS7Xx4z&y1EJ;Sy5yMcLXHEXlyskP8praU$ARd*qz3Yu5&KU>y86E({b zGg@{eI}iuH|>FVvCPvqTHL5 zkfXgcxlmomG={23QPQ!4t;Y@~&OM=iu`u2*=Gc)Q)+Cmu>SI(h(N&PFQj}ueNfqU# z<1`d$_MKs}0av-Iq-=C5l(14XVs@H_zOfmaM<@o}PWfdfG(YBPD9Q}@=pnv4{y~wg zarBN9$k!Vlk-MlKM%K9j`{N_nA9D>0G;)nhGas30HKPB#BTeWOd5C8>nb;W%In&`N z^3M5A*NQF|FGNhFKus@OwLm3T$q+%#=MiL_Wt!7hIZ-?dSDA^LUIt>&bX6LzGL2tF zVsU=r(5v>ImAnB{TB_MabGXy*MO8c&?VNQ#m%9C>ws#ztt z36F9wyI2vbTi^5TH*yBS+!lo*M)z>f*#rD+T8mtftlQ9YwuGOZ*pi8O^^u-a)$EWt z8Q!|m9E0Zyb10tGW+|Ra%{5?fi<4fe+0V!iN!Rf2C#u2o8ra{5oDpdB9xw;~$RpI< z=HC2p_F;G7^g)Txibw7N)Ee-}z#|clrL!GtyQ|;YUW2tA^8Z-di=biGe`Wh!E8g;7 zzqS{u2VdEL?6nhFsfK%!r;tacwW*Ynzl2 z==(px-r#lX%$7wxt6My8bu4Zz%Z;vA77~UIJ<=E^)JK+b3<1so@=_Kc&_TIf%1}LjeA4$*s;b#EH|$4 z7)uSSgaX0qSmUULcJ;3DRYrGVStZuFpJbv(BYk|oTHj= z*rmQzMLiqi%1N&uqdkb&)}0N-%|SVfD#y@amn_LX1)Eh#6(!lhs>=7YD0 ziNH`{ItNCspxPy7#qbHw!6)p-ZvO%>=6emdgK>11x*F;c`A1FwGup}7ZS;vxFzWx$ zy!Tej0z@!y_{tK15lnKT1)5IF=F zWb>a+G2Uh3ttHZ_7 zxK2#PSae5@L7!xB!Du_r5^KX6==7&j77xdDLMwt*L0~lo;5vbJo@Euf-U+Yv;du2* zpjgjwgju|sqW<4VuocJYcJ;Z-PjXhgU&}JZRNeD^&Ogl0yR zQI3L`lZ2d{V9WV=b2I9M;2HSn3bC(mv^@yK3kZ~TgLWKV*V-O~vPm^q&s&@3fq7HS z3T)39bAUrsV=`i)_CehS{bPtTT-2y2@QmQPX(YT*Ei?h%y0iq5#J8s7m*(O1oXL7= zDqc^Ue1VG9vXiEXK*4I+annMeVzrpu{2-32&|B++=T+8f@Gi&Urxob6C58-nxQ~%- zioC^a_^mwaJp6tU`2SU?eGzZ5ihbNgxkX>zVm0(ofS~~4*wn*YtYLENH|<-s?!uxn zaA9U)PFI1e5CJ}Z-~$ptyBYe*M5#tr2j^^4FE2Mq3DMVe0>3wu59BLF%@DnR8jzK<< z4!waq=ndQnjW(*3ZIy)s8H?21ck7IT>0tkn9w2E_#2TKKRWq5nfVywJ?p9Fm72R!` zYIcIHM25c*YCePX-b#U#Z5NR;Z4Xj*4_Bf12A^RpBKiLPw^f)7Gl&w!pf#IWT#3Tt zKrp7q_Se#{!d$3$yA}n#$H_qUT%t*@{{-{IyVat`bcL(`O@MVqStq0S1JP$^U$m^6 zNq|*VL6c(a!j7IfF?-s>C|d41i0)cM%Uyrss2J^cshwm!NuAZ`P`VDAX|_>!Qr{^& zK+bG6_&Xu?H$cT=fQpH`U=`bqcz{qbR8TpW|GQduAsZb2v}fG~$!g4~DiCWMRVfbJ zd3J#9Ow~l#bxUG=dv!~ed4*~QvkmY}p+vC8*BR{+(OPYg>03PI*Ci{bj_+; z1>UpmD&!%Lyi-06Hj@sU5p_(+ZsJV^NA6O1X+4!LcI^_;Zn8woVlcZI^nTxES1+j= z(6e2|+?<{zICBW~k}9yU`&&#JY+SS$8}eNPEaoh#>!e>&&9-$Uaf|Wj?5kf=jm|T& zm?~RjAB*`dx0qeFDsC}5Y;X5i%%~2}iYoN~S3{$eEJkf5R#ktCsk4GZfvE}W+P%QI z?X{Toz;Q*3@dbj>Q0*>cfP42#vAYltdsz>DH1)KtpCZ~znS2ZSfLg%0+bSDxkX&1f zKA@0fzaM6LO@q?SBB|#m6!ZA0Z!so-ap#Nf@O0%cXjTMp{dU)KH?H391wD5uW)4pR z<8EDXgqmgy4=X3__8_qEMnIo^ICy!Z%4jxJ0%k)^LU%=h{4T^DsEQxiD*LCbibdI3 z7R!74ftpURSt85A=?U+53^C!o3d;D-qF3o{gZBefq2DK7KAC6U!X!K+Zam1kJb07sumj=P%7dL@A6r)h$*2 zBAaWR3mv$t7iX6X>MC#$FCKwjGbXZ6+kT<@jkJ8OcpzL(!jUm7-)2=U2nLUf?s z-#DUllzwFCC^@V@%D*%opC>+UEV{b4%0?lkqUpJ!qAn&KLr?bvGk2#O5>$TCLe$4s zTwN!sE_DAqHA^?C6s(+5`gGEix!nb`YS>-Cq^nRnp~&X4iQZos-pQ%q@pAfvMaanL z+@9j;2huBEEgCoK-+`{BZ)OQsUN4^g&Ntn@twP6VuG3YI7P(g5{m=oIW$$?W;OqKh zkuV|i$N4+lRI)4Xz8AXJO~t!Tz_${MyW&G0{X+Lse@4$z>Cfoej%>yI+2B?wbbKnf zE0=b=8P0XP32vot;9D_m{{|`w=evEA=sR)4can+kM7aFhVz9Z5f%;Lp5v6(ZQSy-` z^u3Ig`RZ=|3*V83J{u31c<_*GL%k;{yf0s&{D@xS=Co}WJg=*>)YqonJy zkzvLG8Lonw3a0d=(l<7$D1vL*A)-K);G^bhW-+~gDZRoC-_V?^?=%ZBK;=wtULtJB z+mLE}+3JlcHNHKa#HscfHEH%4H8U`yX69Dx-B1;Glh48bIY%{ff2ImGPG0A#VkZ$9 z)Ib%+P5#?UuXcEP&$+s=C1Zbh_?sPn3lOe+Q7nv&mM@k292r?~XMr@uCF36_+&VsS zo35VLexW-uB#+<0-JLt|{&u&W>y{=-CT3oKdON<|q`M#5=W_Qu-aq*IBGPI-hwpcl zT=vc=OdJX+Dkz%w&?t%UgWvYAU4$QQ-F|%`K7FA0?H8k4Te!S%JAE_XW5Om?=OnP2 z%*%_mzi#{ZCHMW$T7?(A_OIxSUu87E%6t4O_~xK=oOwBfUqupw zh^uvv)#b%=-9nPfj8E`P_vxR-l7wfjMz^@`_xZuaXaD>;S8r)aulVaF|NZa(__3=n z@tLbZEyMP^;4gnJmf*;zC9u+E!ZTMrTj+Wf!st?AEMho=3DRdD8X#lbFm7wnMnC$onO@JHo@l=>!EOtdhUr_adA7ZR>CszW+BB3(j161-+Az@>ek1RNKDz`oI4VbMGG?Rdw%sZ^GmU zA%*}EB1Q}lF=E7s5hF%SaU3y3j5MVwrZ{4xDWx<;N)aPQno_isrVt<*CL!elmujTZ zQZA(kIS??-0U9ZXOD)BeQZkvJfo7Q9$btJ_YbIYg>U~bnd7eKWUat>p&pT`Fwf6e8 z_jj$e*M8|=(4TH~JZ@Y*Te_<^6?vSr%JgWu_ir3{id%q-iGcOSC9Y>#_Tm3Y;(xP#BxhIS0F z&9RMocSzYt`_pA#wa48TH)+kZD<2w{;$pjQy|X7aHtxaatj=k%p7z+}jxn|=wkhv^ zXc{et(1)f4*V3ZG{CL{s596|Ly>(nq?7+DDucySl+Zb9JusDk8MLU}Zd!hzDv}wxv z*o(^g+3NXHs>L>=Bi5gkZ~8x29_={fGnN~A)U-zbE#rp2MWr9|4Ol*+OZ78Ru6VxT z6mA{aP5p1$f7(8xH;4QmymHuQEI3S<*e+FL3w6$3qjj~jck#)N0qH%YVhdl=`z3_PY`kk1IH`b=tdau9p>3Pk+rS9Cq-q-3( z+x$7T>bm>+`N+a^m$u}Ek zzseq$_eL4>c<1b-KYR~WdjqWbZj_M>yZmFmM>a9W*oN!m87m)m;yp3OMc#W@&3h1D zSn7-@qa-877>m^TbAD~H9WGn{F8hH@I{5eJe)?sXar*#I)ECaX#&T~;v;EcnKkVLl%rxI1xq^W{=GZY^#OZVh7=TSC?Ruf}CuTv3YeS6$(_S8)y% z_exw9_X=DU_i|hn_flLH_hMWX_aa;s_X1oM_xwoQ^CEH2jl?}C68CH!_bi_pqx=W8 zGSKDj`Ws&%{!`cKt`ED8b$!rvsOx>6vD7z#4aO9W7yrsW$2VQ7h+AR3jlHxM&L5|q zA;t~oO;ux@+RwkFW*qzSW>@NOyqnATCerS>w3LpKx(w_4whPymL67H>Qtw7ph&@ zyJx*Il7?wXgmn(B_wG%WFXtc`d| z_Y8h-e*KJwU(@f0Yw<;I!UMaenIGoY-~6ts<>$QPKR-71h2slG&(p7G64#&2tm?ZN zVYq9(T1jySBV^+lJKPfTdQv`rrY~%ZM$g38N9!|XSKh{0X8)HJhTBv0nc8@o34eF& zK>nCtsPClc>szKut0Qz0`}U{U3;Vn0I@MK2+s6E2_|ANrQS;Q>&#|{kA**s`-CFM} zy1U*t`{8Wpdxi zt<2v%=ay%`_DtE2c_Q3i`MXg=YHzxy*SU+u9$H`GDg_g93tJfvv zl0M^(fu5BEqS95Gvuf^J$8)h|fcpM+I(rSQzwhly>#`Z6c!(=-Ro+|k;Lj%i?09G; zDYg8QDSeof8si?K+#h77`2qGA8g9?l<$lcl*z|nc(7JJ~mSTj65^`Qts}XAa)m=s! z{S@nTjeBc9RM%m9yy2ri>L;$>@&Al>f5Y1QF=`CK-*j;}?sEU}i+|)^JYC<5RqI^U zrOvg;e^8&RglFduuozPhs{TOLti+C|YIlbId@ZT=7oog=jNu-8#XX$=FS{E<(|kQ$ zTlj+9XTO}@7^-CK^9pPBpSbAa{9AbU?N8j>6LsC1&OQ1*+cC{M+`s4U*y-+Xr)~6m z_v?RuebiB>{{7?8@io!Ln=6@H{YsQE8y$Eo%19p*V>~ro^^nV~Z!{MaG=|o8S6G#$ z6WcTRhfhBFvULl4f`;vbXq$2O=nmta*q$*zd~C*ovcIx_(b$e@Gp0TH*W#D0DP{B- zzPqs`?zaEn-KjG!6my^VJ@RB)*~`{J$}WEzoB93z=fXdAeXA@(=Up~FyLlGMo~bO6 zyUSKs)fsxQMq+!Kt5iCS9}X-VG`{wUmdnP7=4ns#G*wNQxx)JQvOkyw_t!r0_p<1W zCoUhT`hz*0U&{ZVs|I8|(Q~wF%nW19nEbrUp0B=QO?zs&b>O9K#$hvk=&RPzPqti+ zial(06uxXdZ0=8r3SRkcR4{q*VKZe+O8#LpZD^4h)i{Eq+@Et)eb_Y15A%M28uJ@y zOZt_*o}aE=5oH+Pi8l7)mh=43hIVso;uheJO^Y$E-p=>B`=X7l>{mPJD}C2*F|S{* z;qPbB$7bi;m`h1f*yC$owMKIfG_6O>xJe(I9aE2(ma(t!tMR^%%{G2reO>)Z+j_*@ zecur?ecciB?02yMNsW>5zdk%-TJMXwl+KwNr$%3D<;>2}%75M%{AImtjk{Di@QB%y zbi~Xdbji<;nD(C?F;h508n(u!51DV98gnUUKwNO=;#aH#gFjw$*gRt?=e|+mKWv^# zzD6#UU-;N8S{fH)Rq%xRTJNl1er%eL7-K&+ zdtasGX1~ICN6$~t*Iy%6*BF!Eu6roGnAl790k2vq)!=T*rgBVNFpu)9dFHTrVd!D= z?3lx*vGuTNSx5Ok&%CBS`#8?t^!3 z=}qDHntP6z6{+gilH4O^X66yIw&{pjw6HN$<$qmY?I(Pk{A`J-M-$oaBGbBRlk7l8^R&Ot>7vO(I-VRH_bF^e}lwxc!?|xV?8D zG0Pr~3$A?Oh}rz`j?g=L^~Yht#eF;WQVS(*xh*cZ`h^wNm#r~8@f|fYhLdNhN6l)! zYjV_FGw!IFJN&3wb^nggPj%Ssggrw_P7(H0YE1BZ<*(qcsOG5Yy`LHqbi8oXG?I^+ zh3QAl>Dhd>PKSDvP)(_^mt0YC!EG;OVMo9FJhh8>Dyv_%UjD|Vc`toqM`*70y9B?h ziQy_SR1w2^yNV&@#iM3(wu<2`VrV6X5LDLOBz zr+jQqC(q3VDld8`95J^|i3`3*UU;YM2;Hf}JwmvQ+?e27gj@Ih1i~FPmpt&XnUZ_d zJo}>hb>@3V&D9UY1@{tf2k{No;ncT=&1ccWuTq!HSJ01DE$iJJ9h_i4Vs=bN`<8Fq4eR7%#zruA0x zp0PRd`)=}m6nRd1-_W7A5ITeOwh}s>^agc$Gm2@I532O`5Z9@Z^fMVp%+wtAT-2ee z2z7NzbTEb-Ke1Tl_z|-zhrF6XxfH8kF8tT##0C2Z-$}SxI$Rmyj!lUTo*}ljQS=Hr zw&t%LF{eMIVjF<(l3R|LO?Oew5A6s&s6##V4sRLFr&Ut&G4pm@N;57~sx~u@_O*J- zE7rf0it{hM^x}@t?b`nX#Ij^Uba1kr-fUD{F!6=u)`a`X@4H{79Nmjkv&dUYZs)Dk zD@w12+=$X4QwV9D5Eo2(A@%;)VDI-;i!Y}AXLB|yHKIlL2>UCJm<#CxcIR#nUDWF| zt~N0iI|sd~YEqH|O;P<*(t_xqkDB`r&QnRrP5uV9$CMS+qvUz1558hepy#W3u#lZj zuF)5|zE7V)8DDsi?^fyac5vQPcd^$KSJH9Ipx}tel{DU?+PY>+yPU8;oI@V{(oOfZp{Tc&Ogy{^M;B3wB! zrM!@Ee@t-L;+OGh-u$s?za=`jpB%6+RiuR4n?Nltn!G*qq7L^8;qpl7DN>qC_)(FR z?jNsGI&!gvl%{ii9fV5b8dJE&={n?GLMA;F6HKK}BoNDYb)Bew?TDH2s0urV^yS`u z#9VVPb?njYp?h?w#|YJ#5*=Lm<>+ALtmt5cU8T!=JNZ-`6TIWQ$yv`m|El$t=Wpiv zCoFc@w}*!6aCZ`}IcHEXbsoK0O2VX9sCD17x0z+cWsGYxD{gNyJ06V*PJF4&Ja%uJ z*-DJP`W=vXLRNC_Nu;wPB_{YFcMW#^y<|5YH>amEo||&-yqB%+=N~t-Q@_Oy-G9`f z0_@0Alo1nr_@(GerAACJZ|QQYn_6I{eOxG4^{bPdsZKj$o*|6~b(nusySS9`BsrKi z$xb-@Puohb?X#*{e~en6n)459d@!4~aKU)RnmWm5?#&$(w7z6BOZVH%vP_#IQ_e{&#%S@a~G;!?Lus9uQQnR?R)W|l9-_JLV- z&j)7ZZ6BD+j$76ZT)#c^-D?9ktG;R#=WG7apx{&FL(|AX!C5Oz@*yr*Q{HCg4dsq7 zvd!Fi-^HN#~(5y)4Z@k@RE}S$dIGfN*Xc1R}#a2p@29GAB+x;vJVPAw>Vk%H^&~L#yv>?yHC~TEba`Q55)(q z^dK2i*l|sV%OYIbgYiKdrJXu5Iygkvt@7W{YUpp`gI~dSD*equ`kT^+wui3kwGTI| z9Ge^)98GMcI<^nY1nxtflRq$*5Zec4`GgP5v?=kyufFhsSw?Js)#3i}*2iYci1^@w z7n1Lf4?at6IBHslky|5Tf`hp)H@&CG&ap?$;$cV4My~Dy9q!MBs~!;@jH2zVnI9iq zN=Tj>Qn+`HSYfqH+Ht9R;LFwo>PgdH`lA$jE^1F>&i2s%)ZyPJe98Fe;8we3bNN(N zr?PoJw{W});e23F@S86wR{H?G>>bp#7q^Fgti%1BaGA5wWYueB(!*vhRrkM)Rnfr` z+Ir_Bs_vyte$8sq4t?Kzh2C?~V(!9^#AWTglr)g@HIPpFkzD$Z=k+HZp_3e4P<@Fvb?u z^V3!IXVEdiZPeGC?aRoIoL~1@jbLVZ%sSfAOR=WVbL^j3@|^ZSOz_2WMT~6jpV{ijlD$3DN|<>swQ~oJ4_X$|HnS6$ zgCNh+U$M5E%W?(>zs+CU=mSn=+nGyiH%l{Ku?`NtxVW8rYrEN!ah#c&?V(2E*quH& zxR!R^oIE(VZc*YTOQPlecGKc|ndjqnbN9%0v+>UO;3fOu;7{hYn^zws^qt#7KOkgP zc6{)sw2yYPnlvqkEP?FMq?D(rh1se!+s(2EwuipO6;GqT`Ubv=iK+30!9fSLR>hS< z3uv*oo7v+B2e%N%lF996&iL)2XYiZF8PDi5p5@slJzG5|x0_iH(mKfv!f;o31b@l+ z8>;;k(<-we7Dy^2Ijh}FrNvFS`tSMIo*c}Rq=kL{UM6hZIQ}wxaIlFuGwvT8Y<_`! zf-Ss1mhUMKyZZMT@xgDC`|akj`!CJ-Yw@K5@+G^1ecLTp1Nrg6`Q%9pcP+|cqBlR` z{sdxj(VE)L+KJoQOENb266I=OT~!%>o1x0inRP+C&6N9j{=SS-vOf|ZoN51fM)=CE z=AOexyP3p&`9plX${7l{uV$D3QHOL*I-}jI^acOI-(TnXboHc|;K1@fXr4alq;_-o z`@5gSbEk_BzWz)+cfC{g_}~irzZMJ*o|u=MHR^>5YXar5<&(Dj!9mvx@xhnuD&^Df zJ;If=n@x|ZUnTc$53T(q#xXb;^}<$c%!B!H_y3(MwwW23?=#wNGm|s7hhF_;a4?y? zJM%Di82jEC$|h+yS3kTxRQ`!E?$^@?2PZr~%9eWHGTNU}*$|q`u9*$k`xf02ceqqb zOoJ@+N7qd$Q%Cdtg?}*)9&<$-qqjyIkG$V^{rg3I*9VsLU4I^TQapPMnSIwE;Q0Ad zeb;ZtZ8D>c_|e$RjnT&6zl^PXqVM{2#`gaf>bw3cet&{nj~jdC`t>((Yp|)mf4uMd zw>iFTf8X`zlsl*I`ct?wi0^US@5ArIJ@^~`hFko%Xk-1AzUz&*#u)2(7day~#t6;o zyZ)v3`mX;8_gUP-xGRIv#%0D4-21=Ucm3Cui9__pSI_{tOo%oAr<69iR-b-A##plQ~ zj73hl{TuxL_aw@P<9~gc{6`1*{p;uZuK$l0`>y{3{g&gue7*1b2d{GN zjGf#*k1~P%;3@J7_eZ#|;=Y0VJ=~w*ejE2^xIaZ-=XgEtQ@9&&w;(HPj@zD$b+FI3 zwSiq?)y(&iC(~^6^zRto7T>|a4eSH{FN}hPYxh-eTpDZA-3Ns9LfS4HMq}ur`!_;e z-X#2;bMFz-2?_f<>3&z}DQ{YYwz=OC+74|D9dQ3bNQ*ZQvd6tc$X@RN$Rf{HA-lW= zWWHyMkR4uTuCkba@@y97)V^kUHVIj$&y??}5%Q+?ljo@xQmILf=O;pz>71D8c|*t| zea+)NtAxz=rjVjk&q^UBI<92TYeI@NiTAuBB;PxTP*I*`LZ*3#LQMA(AvxY#AU*Ey z3d!)Gz`U|YYTY~ z9l22_`UssuCr}$YfcB#IP!rmLoMYu#i%__&j`!Tg)tu|*iQ~5Z_M7U7 zft`XS3mc|cJIo?1&Xb707Fcg;f7om85wJb53&MKcqhU?3Q^MSC)eg79T7`AG$HHo1 zyM>*0t6IGlwoTYE_x-RcSdFklZdH?(!>WYsb>9aohbf^x>dVvhUE$C=~kt_1C}N1Vzc4}sawFqnIR&`-EtXWu%W(BZDVQ+S; z+@A)k7FMZQ4y;nxQk@GEVdcUWYL*VO3!B%i`l3;=LSe<4CBgE9&FoffGX|C=%u2qn zo+UsI3Y*d$2fGYQ5|*Wz3uXu#-#rL_=U_dz^ygoyW~X4C!iIInhn2(fgza)A!{)=Xgl%)(3@d@933Iqcz-GacgjKsn!luIvVXIszu&J<~ zl>YpyaNPpShII;C>>34|0P7G|=DHP@25S*E*Odwz0c#dkjI4 zbKMRz83(8oHrbU1^TW!8WxB?|F2L-<#=6GBK7thrOL2{Zb-?n3CA!jKM`2mQEUr6X z`(bGjR?n9W$HU%(B?;>Z+zHzWGlX3XWWX9=JtM>USI<{@?}F99I)$AKWWwHrbqH$< z+ykqGwFo;9m%7uCT55u;>?846bb72lxp|CUlM_|>kJYgsNKZ31-WeIEb zt91z#ury)&`~zT%VM)T8{ZX(om?3P3e;{lwtmo$b{B!zM-&q9f6t>Q<`k{PShp^Q; z-aJ@~uoXJq$*^W&OLV-Muts4E{H2_4EUa3XUHeOcRSKJ})0YS<7gpf65Y_^-3!CPT zhV^;LKVdok7+4Q1PuN7A=8Lc_Vd*-}XJKi=M(MPige3_}(rIaf8Ny<8S`NT^lKb=T zs!ye5H>^|GWuGdKw_qK@Ts~DEjj$GB=X~ngHo}^Po${&jSOaSm*6vefz7kd~tVPGM z3|1*@kB(yztXx==PnCHo%r0!J4m$@{D6Ce8ErjI>TdU)>!Lo!^>9D!5G-1ni*h#P? zVdXmB445HozGkDnS0naCJ#|s#Ub5E|Ql#Td@CJnxXcFbUBxIUSL!ZYlWQtBfkH;+} zOULE%bO~W(inz{s{wid&Pd!VV_H+svp>zM3=d6$f9eby0gzvrlsQ`o=s{+>fZ+Obv>m512#LY-_t+RP#>z%ehNHa{>xk6;vH(y2ktA zSCAiFMCZ_HbOIekhtL7E2kk;TPy=$Hjc6TOi{3=5&`PudEk{eyVzdY?K=aW&G#AZ5 zv(YRx6XhctnueyL$!H=Pk4B?WXapLD5>X7g_D#}`{K$6Gg1_`p{&Ls?T2R#3W&v9@R3=!wg|#J*q{Wf%Qb}(|X?DQ?26!tW#K`4%-Ur z5N6R~_rY3(^|@8pW>~YZ9=A%%4p^hGi*9vib;7EJoyAJu!do%)iW5VoM;H(UX?H4v%vqV^vumYVv3(P5O znoeIIcHtUfIqoN}T<4no&}Bj=y1%1)+zZe$p<~_3zWWGPEKFJdJpaR{3scrV&;PI- zVHSOT`(YWv`mn)y{)eRq>%rdU`5zW1>>{=`&;KwpVw3Xx->rJ`8kkGiNo;2B0I)N{ z+OUs#{)e>*JD}rO3fn7ew~k{WY^N~3%0t+Buv%e_I&3j)wXlua-%QvtVQaKME38b| zO6_k7tXSBxZdJ3@tm|}Pi*(rWupD8fI(@0I3}JKh^$vrj2rJZK<6v>ZY=mtHUBeDE zBQ|M0dt+K)y)c)sNu;lV-8soWVHupSf%mA$KVhTw`A)<33QMLu8bZflJB7vTI1a&T zg&ACL17F4@|Abv}sd#t6mI?Ffx#n%KGGXUk>N*^-Vqs@o>N=`n(}kUI#lcp=a)h-q zM$izdfMp2Vr{i4=OA*$r<1K^53EQFLoeML?CUt6mMKG7Jb=qG(?2NG0I(>PtHeoAV zv7}`(Y_G5-I?b7|ox&FAG>?VV3bVV?2%7?1Eo`=HC@c}SOjrS99SwXhoct3ujd6_z z-asP%gypyf!FpiRg-vwD!!E*dgrzfP)4=YTQiLVxd})Kl35(JBasXyV zY*Hs9QL4V|hPi}Y4ybzY7VL~LS3uRjMp&D$a{*NkHp2D_JEhOJ2DVdJyJjn4wZd8g zs=h3PtroT?pz6ya*fL>FI^I%PnXs)oeRE*N!fLg@LfCX+Yjyf;upD7kI^JAZhOp&2 z-bt_&VdVi;PcvX~!sZ7?!$!l*h)wEbBudqTWSC3XtUw+t9(G39^ne;ArS)S8whEmZ zq00W>Ep!sJGOvNRk3O~HouLebV2dx&rsmd?(B(y^4u*iAapbLe@=xaIvD-m`T z8w>ZUs$b9b0Mrr*jnw+26GCl(*AN` zYlJP=*ER`OA*@`7&44WwHeZJw4J#2=qQfS`3WUwl`4SJy6*gU;&wynLo2t)u1sgL} zShhZ&7nUGwff}4IDxc55yuwE4d_Dm?D=fhuu1klYZ9=1|HD=bJA-vf{|{Rx>;%>^{XeWsSgTK!dj+gm*govo z2KG#)?FwtgZf(6>2%itn7QRi#T>?uN=FsV$1xprIt=V*#Mc68REmOV!9*2kg58* zuXz3$y{RTz=`qv9M*(q1mnsuIi66z$Ddjlf? z|4T?a_I^XC((@}JE!gR_e^0ZJyk_vGmHp)j+(G)ZZ zjYp%=2$YCoP!ziMVwBN`uAs}vkKE`YI*-mpq>evAr_l*?2pzaV>i3Omjr?tYgdf8_ ziP|Gv`8^u>tH`+3x#eifHW$Bk0y zQ@{Jud4^+^o=&8~UO?(RJxHbFDpL6uhg5zhqfs}wX&g^LSxA+^r?c`-cCS-CTAcgS zS$QX4msf538kS)Hth_Tc)uUQ>FRV9WsX9a19`)4chFuUgfz=p1{liWPOJf~HL+CWD zRoDntK=AYr+bt}Cng52+A=oxyQJx{Ny|5Z#Cbm7TA66yI?^aK*+h9wCUBC{1zMTFa zS}ODmbLK`~gP#5Gx8$9n6VOVin*YxezlX4s8}$5tmax6puMK+sKTX&!>{r@9VUvVy zbE}?2&Ho$19N4rCdj7vBVyim&LXqlC)ck*^uvKo=pG<{y2&-_H!m?p4!WQfFO@K8E zE7R#qgEb19>nX7m8txunuA8w4J&U)*|ebKHnNxv#@sNym|hEH41C#ews0!hLE!U*9hICLo4gQ zLRgcIMOps~g>BXTl=WXCtX4B+{TB#ZtC_O?bA?rDt65q9nZndsl=M-PSb6=j)frmM z+$&G1ZKo6TLilxY!_B>eW(<6O4v56?E26gSgSAxv-tI)LfCF$ z)mXdX64)kem1ep4s}WX#m0i#G=IHl?EyilE$KoRYgq5)>pguGjRw`_+YalEcRwS%Q zvv`Erc!p3UGw@}SqZJnz8 zOJQW7K6CF45&Jk1MAgRD(`KFmN64w&#tiKpRh#%RYvKs zQ^HDFQBWTm1q<8pD(sv<8Y~GGw&E2lWb~n)o&U){32S4NqMqGs$UkAZI?b11Rl+9e zu*&{lA}m9PRn~v0u+iGir~N~VgbveZY=>Ee#p%#3uxw%1ur=%X8Y(#`tQT9eo;_2@ zL1Aue*ZR;_m_=A8wrf3mrjUc$PE}WYTAyz%?1Hdk+Fupyl(0kke9K|2!uD!b4%;nk zmkv80woTYJ#slbsVKu@W+TVEk@G4=|`Z`i!ON6b`=^F+s6;`3+je`{lTddD_jXQ)@ zSef?M3(FQZSD()fOBYt8{i*wOvao#Z?=;LJED!s=K6DJ$tL;;jf0K2*hhP_kW$N?o zg`E;MR{PrpYZaEF**4g2VTn4d1GY_=MYC#Hjj%rK+4|5bSe39ItjzjQ1#F41i`bd< zp~bLLVP|#NGFXwYlRE5Nm{l0N_Yk%SmMx6$^1$+8>B88(2bKp*7WNjFDsy8ni?BwW zmP}XJmRGsQ{yq2_>k8ZQimcUSlq+n@E3!(bEXftN5Z*;y=X%PV5jeoh9$w!9)! z^?BL?VOw62Y#moiAZ*JklBxad350EVMaGgJPR8lOw!9*vd`~lv<_v8MgspkSl6-0S zas*nnJ*`NLFAuUV5VqzOx#q2etPZ>{etOlsznzhhz;A@P)%t)!XJ|>_*TOoLKiGmm zcs5?0?KC#MGc+%-OMIPBA@iJ};=o%G_RL?Z*%wr)<_6Vr>_m+bxBqw>zu!X2-G%m{ z1E?LHMCXtj`Oy^=Rf^q<;?W2+9!*4(Q9hc37NJ#W1zL&TL~A3i;+1IklTk(~u2LDY zqY^a5#(B^*G!;!o6VU`T9*vDC9XA6hHw`7DICS-K>`Qb3ok7P?3)+o#A}6XrYfu%c zK;@_m*-n)v1#}jjLdX6u>VPXLKT7?-+c`dg4xv40 z7us=yqMpMJpBq*mZVY>{T*Os;ESiSqpm}Ki4O+nQqKKB`u0*RM?h0JR79-_Pg`10J zqgiMsDnR)WSN$$T>TilGe|DteEJf-ZOOQHeC8|OyF6F9t`{P^6vGP}eHnS2%*(XkR z^<;Eb&%3J{KA-t?XQ&63AglzRm9&_P(5u=eRXz)`_8A+4d4<_@Xf;xQR#>j~r^e}z z37f=BzLT$(F}^J!gjng}YC2N0boPLe4czr%KPG2F+@Tgu+jngj_ z#;!atHBMh5%!>`~)Z_F8!p>t0JM}nyuCO!M;7&bGpDFAF_NJ3>^!Crqt9;>2R+t*6 zPY{3mbi8Vu{_3r2U7GUOtm9DQ^j={*n1^?U)HwZFVNUm8m>Q=)CTyL15KN8J?-#aO zUxyl}ZxXgbr$vp^JB2M_ZrvGDfoLU<-xq=~n5R1}hQP)UB>J2UZ|#E4HySG!d37thT!t#&=;O zR=ui2Yr9oGkAkI&zp8GP&q=TZVavNmz+zxmBNi&{zdHeT)kFRXn~w#}=m@Oerr(VH zugcs7J0|{Sb!WrQ!S)NA-fe}Qf;9=7+MNMwhdG61cTa=0z}5(x(47g}1FI00);$T< z1Y0O<1oQk(cJ3tqge5TA;SANn3WP;^gN z5cZK~^I=yb7V2hJqEz9p1m+cXl#u{6HcI{p+mAJ>#zx6MVehdvUX6{Cf5LWR;j6Jx z@=sU;Ywew(2{5Oy8s1f4b_}*g*qd1MPQL6*{t2se-3&{BEflttcN(0$r$_z?TZonJ zR=^`4VDV?3R}h6ct*!yXN6S+o`9*5 z`eVWtvtAwx0Gn{Xu(H5Rm>Q{X5;iyRG)#@uJB1aouHLCf>emR%59GttNPUH{yg&g= zjnpp`HaRc@rbg;Zgk=Vvf~k@E0%2nVQ($VOK37;upb(};>NABU2A+Ybar#tYmcYZX zYFL7>KK}~XD%jPCed=H(it0-$U|wMt{Uc$EVP}P%_1^?jEA@^EW4BtETBo;PSQ~Tu zPQ6aAN!S7Z12DBt&naxT|8baFr?*DfTmHLXYMow%utxu5Fttu^p|FkqdthpvUWu?Z z{`+BSonC>kmDs?}kXomgD{Ps63{0)l%M`ZAe+R4&yE9c-ssGEc9$12~IsVsR7h&P~ z`3#$by>Jyf3k%QBD`xZm0Co}c>aSe6n4e;I!ukzmk9G}whUGv z?7Z)X_*(?a6?Voq7FG(&6n4Tl4mJmtDy-Ev9##lT5Vp^EC(P!$98Kn zjO(KKSq#B84un_QD?bZ-%OJghkHwE&6K^2A(q8!~*5rKPjQE-9`v##t3WQhMD?c{h za!5xYyv|;cT;Ge3w!jGqm95EvK)a9$zGv~XC-8xgvA(ZEngWM~r1%y>wgp;*Bx&LZ z>=zQF$vUy@xdV8Y5Nfs9^^Q=lCY6ErB$QW^rDD}Pc*1>wP>TX@o z$c-+eE9ly16!mp%MAQfOBNgT}YDWjqKC}nDhjyX2&lTg{-)IL zSGg*zk~&Yn)OnRu9Q{&p_Up#D`{VCV!;SH;C5+OW|4pqTT)$RwyaFx%0xgC0Yca=* z&;m3+qItMVb0eCAt27(ULNif5vY}~cDw={Oqe*BYnh?==+<$b5L^y<*Ee3t$#lc;&rfi!@Us0O6JQij``n zMgV#u_NyZ_M>A#rcM2=iOxgb(!fc*bh(p=`Ey8j&Q}%zeut}OJ`@d0GhNptCYMoxS zu+f?+`@d3HvS!NuFBcZC)1vHuyD&rhQ}%zMuq*Db5Qnn=^MrYsOJ;5amL=@GW@?>Y zny@qOx%gA-^pb>~aNA+Z{x^iRy1xTc_J2>res!=Dg<{J7?-bUonX>;ogza#DAAidJ zZxQCyOxgdUJ%y(h?=jv5iu5iB$Q}%zQ_*=pp{AO+cmkV2=w z!tCxj_*3?Op|IKR5}308^Mn<+zXenFf0nRmnkoA~O<0a*%KlFhHqreOVU_)F2us&Y z+5h4BdUYM6G*k9}c)nh-B=Q?PT+5goMyV=1`6e$E8Qzg*Y^*7S4#huMXt>GLW3zfjl+9j~(g^Mobnd{Op)mar%thqC|E zgqhg1o3;I)B+SqH_|4k>H-uea)?bZ|l7A8V)xn!6D*u%I-zlual?GGxe}}N6*u$H( z{of*Nzbg->?EhwA?=efy+z6~u*iM~3W&c+TYjC}WKV|<{3afFw3RCufxv)1~s=g@u z-!80Dr$yQSg~FEVIF$XLCv2g<4rTvm37e;{SK0q*!isg8mHnS2Y$mHFc>aSK!mK(D zW&ihx{W?YaQ}%zSuq^FQ+5a8F#_M>M{of)iRmZFB|7KysbXaBoHwuf>Oxgd{5mv_& zuF7X+|5pm@W$t*hw*Sk8xdWMJ1Y*!$Su*&{7gl%JtVY9aXdm{F09dleN4rTv$3ae(F|7LCfcL-ah z<5l*5i?9mT#BbL2f3vW~0afpn{og38Ovj<@|M1%T3|n1ju8u?5|CQpei1Cii+Ws#W zme2bS%#PvDE-a6g51Y09Unp#{&Oc@U=LyTy*Q@OREMa5ybtwBkO<0O94`u%+2}{)H zQ}(|h%%ZPD+5bHe`?ZcYQB*xp_J60a9&BW0M`0bpE^2?u{%;X>7CUmYw*Q-jo%E}6 zQ}%zOur?jG2v#lZ0PpT^*7kp;u-*E6%Kk4G_7?97Fgp&j3v2Y}!Ib@9C~Tunv$Fs5 zgssu>D*Hc6*haT>30sCO&GR2DN!TK6bJoUSs~W;eHB3 zuT9zi<-+1MQ}(}In4#06?EgYxSD26I`45&S%&VEQ|FeXhXO0^C50)nEjAqLIPZD-Q z%}Qs{`o*%Z)o6c-X=@<-4!t6YT&;s$dcyC} zD`N4*L#k!gzAkjttFB~KAp8!!@^hIL{X889zWkR3U&GG)Z)!f_{{N!6q-748h4PUN zO+%B>Bs3n4MkCNL6py0NwPI{fNo`{ zJ`oilRWGZM1F?cAG9O>ZUK9hVz3MH!@Q!$@M!(6dxI@3C7hZd>SS7RP4*iy1Z^SyS z3oT`)+`(7pSb-&Mp+4Ugti)5o=6Q7MhqVeT*6ciNx3HNy-ZQXm!mK*p6R;X#Q#5OZ zRSC<|=i3KcB5b@qUo)&!SgKCn4p@<}VfuVdm{nMu4!aJPE$kXI+m6s`Sh}!YX5(r7 zuw-FwX3rh0jA8|ruuf*^)yOC-*do?xUFbA3;12pm@=w??W~UvY*|1Z>)EfW}zHLta z3EQjFG7Yv{n0f=i5z2vW6ShsqI}uhR%%RVh4yzJYt#|RmI)*M0TBSoL!AgZK=Pd`U zV_1=}a^8l(I>sin3Y*XVhFHh2Y+)rjW*017*eo5#Iaso=>H6wU!7Re2>Z@yq^+xQ| zx==Rj1stIk*acw|+-a~quv5a)bi7TlR$(J_yjx+rg(c|o)xx$3i(=gaW8<(IVJ3UV zIYL#iDq((XOsr$r5@8px9kGsKrNTbKj&ab^E<-)93^N!F7 zm|fTueZE#$p|C7{zJ0JfVdHh0n_*eP)EfZ5XYU9{XdAqLroJweq|fPq_0QDTg<@3B z=5harUX9qKJpW^V^0hKjrRM)#!u>9uP4K-8dP?X8mulmSV6DPFVg_4{jZ*W3bzpI- zu~BNCu%nt4!fJ%=$AV>M1Xd;NJyJyZ!)`8Ve0*VH9ATT z3airbCc$ciE!XkJz^a6m>v*qX4=xclU!U(XtW;P@K=qq0Sdp+-J1kvTHugUKKP*|;1bw}GU>0F~MPGfn}yEw&%ClB z>ByB2vm7C{{@*Eo$>SWYm;ask+Tr2%=re4!p=`fudo8f=d-RG;(DChoh2NuBEKR4Y z2^M~jUa=9_^prm={2skx2|8>oEJxyw(qY%aGK87hR;_}i2=ileQ~t0xVHdQGS?;

JhRRd!E+s@(S6dLrrmY3)!L1 zoaOqP5T{OehU>hL8lCFVu5&`(WNm^YG{W^KAuD~uAqlSkEo7NakHvLLNV)I6H13Uo z4k7b>_d_lR!aL%r+@0%7gSZ0W{qPhi@(qP_iZx%$Q*IRGbRfJRp7Jxz_nn>rMjgBK zgx{i9EQk0R9T983Hk75q?F)q8qE~*#`_!%tb)n`!_&s{XQppjXjRN8K=oLxk`gt-6 zgx{lABwqX37$9xXCVu^n#wm2X0t{eHKA>&5jjy! zMCx}xtBtUAxJnx%a^Nw2MO2789p#~1l!LNRCQ3)CCBov3Pvevx^xlkuM zi%z3+$c-)|e?%8?)!$B{6X@s-YL76*`~P-;W2HT47kUeAMGX)}giNO|%j% zk7zM&{!G>Y;4VNbkP81E>Q9G?Td5VP_>>Msq|&I;qx`D$`_W}&qCTXqKaqS;`7;@* zyqk&aXc1b1R-n~L)dMF|W!a2W`KoemL#o_QqO<5cVs{OH-^!dCIk0G^CtJB3wvz5!G3>D3BbjQz~W7#87bVP!hr z%dlm_=3;x({$XXpiaamE&cTX><$KCtr(n~C<$31A+F?1uChO~Mfn^9&`~TJHxAank zsr~=z^jmsy!cufvw)#Hpnbq!p%#Qf_S5ei5VsyA_-={tERcyvOMgaO(QPqZ&{a=Uu z|7p+c)_#`y`d3lahR(D4y)LxS_i4|3hPC;1%>VbVqN-($tIC@>zE6ARqu9W8p;^BE zk=9zi2%@fIy6@ATdAIhH=j&fZRU2x;TCEFB_I=tjZ`GkD`ubN<)rK58u5{n0J+s>X zzmDFqe-%|N&$#MJhxz))S!;R1Rk6qW`d3l$1mMoYlB#3=zki&ymN~8@d|mPOkF(Z> z=IJx}z5U~?wV^pWwhP|=an{;Uq1vsmurBnG_tT!)rsF&5?H_5a4NcW&YV-a=?lIZ= zOfB9WLNfKW?eT6EGM00yGj_l|1+LnhxNZw*h$XE?3nj8A?^6;gcW&T z5vF$lfaQC?FKiF~@;Fo1wtI!Wo4TQpdB! zbnWlrKI@DE@ps1kn5KW)_vnl#gkA|eqG`vz>>}6P-aF z=oo52d(kem6TO9+(0iyk;;P>}(6$@g296t%1J$B+s0OV;tI+C*yArqZ2KHv;Sou-M zYf&}Yh}5~1oDr$`RNN{ZDveurMCEG20#+Z3AQ?dQh9o?j2K zeXn4IZA0_ZJ@+J5SYzL@1Q(SiXAe2Xm+3p7?iroPcg}I+ajl(0j>NuQ;~HqhrGGPj zSWR^L4>?|L&EbpX!)t~#88t~w18RmgMb!*z8dx*D$x>6xI$h6Qhzzm*R$4Y*D202lm5uBlGJWb?p2aV@pZf6ta7UZ65LqzD~Y=_~rrY z6Mwgfr#j=Fr#tR0`?8su>r1(5gK^K39Tt2h`v#0ltd6z~v>*KYb3grx%eZ%x^>Ic( zs(Xwfp$`@NYuP`SU*kJ&awKm4n|-VOpYuPmBg4o(eL`}LLD9>{h&IN?QB=Hmg>_`p z@QntgmhAfWQ?FX7c^O7eRprPn|MT4iGd#sxrX_WJ^T}m=`+Y$0d(W?)Hn3w8cYY(| zdmXWt;s!+Rw2oC{l=F01Ul>xy6E6$lbundq#T8?lP6;k>M^VzX?9Xr9 zGh0gdW=gnFmvFjwz_=0rpOkQR$IXrr|Ai7Bu1k2t`kN`?<^D9=EtIguHevdGe3Q}< zy3`l!B9(WH*l?ihW+~Yblx(H@{AWsknfqV52HcfExlZQ$dN=DbPxGlVpUHXevh`5D zb9^^{u6!5xu%+BadP~=*wPCd9ye{pJDeIH&TIM*7gL|H)UqdH)EJg)=Nbk3zjntLA`GIbQtX30-E+MD-+M36?#dK$+!xK3itb zQf8@?SsGH!#E;d;Hjhil|Bqls)8UJ)oRKk$oQ~SpsHw#;y?irBq6LWUUF4qtv z=Iu@NatXiuQNFQhxpghas=S6b{lxsLeLzhzz1>ZhW?KgG|5q)R8rr#EwVNXy6_Te7 z-u^sobcgda_24S|QM-}x9`#{yAe^HYUG?Ty)9QT*gX_)NS<$xPb;i16M{UU8ZQS#e zw2GnR@DsXK)Vc?Z8z%W%sQbME81`Ri6?f@YF>L)%^0voY*hQM| z7`EYqE=vE7qz%KnhDcjTqAm2f9{NmMxa=DF8RBxi-!)W14I@;i>lk%>sBR}~JgS}S z^M?Dz+sLyTkHt1b|80f-TT|q3mpGIGogMG`u}Alm4=DcU$+z;j4hwDDejw6PA| z)7|sfT3cnoNLt;H%dT7g*loN~VU5{k2`(-*?yzJ(-7_$Odu(^Y8;qL8p+T*e(+3^z zPJCm?(HMP?O>P=;d9M!loqVI_=BDM=0X3_redV@?Yza-ryOZ9Cs!420-Y}56?s2oI zMb$mVP%@Txl z$t#Z3!&mAmh8$O=`E+kzMYt!5OE_*8(<9M89UXFfzK#A{ol+Idiz2*-<8+BeUVGmk7!%!!76(z^`n&fG2Rzr%c6cb{3)C3eK*G^*I8^M z9LD-vcp@9MIcoi_n+L8Rxw$TM!ewL@c8t?cW*1V1)fk!Knljy`YKkTDWLBhmDC>WC zGV}bCPiEno;BDPUyP~NnJLTETRko3@JbkvNH2OcS zDFxlCrYv`VT2qR;2lDiFlPwiKJD_SxMfc~Q&DM0+^35ybj!$b!Eh`OaciqOp#e<@a zdDGb=VpX(}Ti4%TR6jM+ww)(&Bkomed{cCA>5HmoIDDn!<>>K4jz8U#n{d24`Hdl$ zjk{ETa@?$>o;+VNq;(^eAKKLsqv$aX%ZkabV2|p54bbMc1S>#ng;yime&f6i4q8 z?@`Z#uUD4o{-q}xOR?J$-2ePWz8)6tT|Rx5f06nVHaRJl$^^w`IU4#w57MecIRD7U^p?visYBaic%q*Ze=<&1m|X6+Tsi zZqt3u1edBob2v|bUz6?X?`t;E533q9$2I(Oea!;;l}_J{eNAoXj88qEhx?b`J=(Wo z!Tu%d%O1U}849}#tzt2<%%`3Y|rK2g*~u|w}g$1B^@QYtkZa!pW?Cv zze`z9^?$ak=TO#3o)}vKWnJa1H}6b~w#C(LVtw9?7F!IZsw}pRyzx0;T&$Gp|2vB< z#u59)rTT?Wl3|N2c6|(`y3Unm8_ZK7>AEBK3oN#cp$6~oyJDnFV=2=bud>);DASFh zynrgxa?TPiQ=XUupO$HXFVMA-ule2dnKH#T2-Gr`cB93{GoE)7U)!@pM;nvaTjt8^ z(Z=`tJl-Ai zhH*y~&#dDpb7hs6rZ2Zf*9_e-gw~zZ`Kf)5Mb>SxmAyN(cGb?cIIHFZQ$87+i+dyzou(ITYPP33by-%=Tq*Z1V3lHcToot-LbZ(dtK!lSy?uu z&hmDWevYW}C|lj8OMa@h)ng)e1bJrtZ#+9p(6;);FS6CYu>B9}$cfnM8`-h3x@)2> zaYNgGu*Wy@Wa$0O6NJu$7cV^pvxHJE?>~ogZ7v`(cbGS0mF=VqUiO+@j!k*RW`BkMViy`Y1*Kg!| zjcVLPJ;(j{X`cF-VKBxh>nwgiY;ZZ&S#qM~LE{cp(}!&s@_ThR9KJz4H9uOWY_l}m ziv0U*CrqO}`t9Y`VT?cxX9Ox9n=FA5sEy1MDqHM$$5P2Pc4?2~+D8%lHuYfmiFhA* z*5nH3S(>}v7th@);nrbwSYnQiwAdu6#b>c>hyEwI_qh?IFU-A1JCYnjb>4l>{}<-n zXkCkku1_N8Ci$PI77wKsPqYo$F#a<&H_v^4SEBS?L+HCEyC3UH5;7Dr(fz+a6Dr-k zhx$I8ah(g^0e5}!;YRjQ%lKUFPU8-=kuixe)PBP^=5v2f^7*@JLsjlSVNIz~Db-%i zVuRmhJ9R^Q*yp~?Uz2p3a(KNQC+g!Gukzoj{qMcO|5ok)CR?Cu{tCW9Z$ujnk1|&L zYP3;W9c@@q&#y<=zQMhYUN~~ETh6^MWztQT(h|qn{ttU!9v(%N^AJnXKcf+v<4oXs&m1?3R~iqYhovs# zoeB-N^9+}0{TA1GW;nB*M&gQBx0d6pPeShw=YPPs%Nx zYf!^#D>QgJ>W^QMbGln#%YRA1%`WunL%SR2atVzy!`l;8YqUW^u%HsA=Da^sA*gCs zXywHr?HVq$SjG3^-_7O(IXIo@gJjXyb*eJUp0{qhoKu!Lz)bhUB5Id0yX6+0x+<)O z&XLYfdv$dNaMYzDIvR6cJ%2YMI&^RAqv05Bc2IF5c;ur`|Dm24cX!0S~OT#RTwx9`;xjtONIo z9zNQD=cd{|_q+2iqPhywb?fJl`Omk~n~uPalyh3nErs1SwOYoVos4)6{JnYwYzNqm z;_sQ@7m@}y@YS;vd`5oJ?q{{H&nBw4f!~n1N-H;;qgikh#j+nz$wSv zvCTmjGZ%j8RuyZ;Je;FE0&apuA+h6=r4GJ9|oQ`yL$Dhh|7Cao6K!;w{+rtg2n=>SXlO9Q#$Ss?mjR_&c5HA}OlN(Y|`>v{-O9&Bmfd3W3?#%S2WImEfANS6@Z`KO! zvs!4aKXXYT72HUxxhJ;i6HN*q)rWxU*Sn61vu3WqbL2lO_|L`8zdr4HU7UdyaU&O+ z<&!$7d%lI677Xgnv~V+o4(bkO)IGAEQE7&=2DH5+=iVOD_B3fKg}nS&E8#Klst-x1j94R-){FK2H-}nFs{O44Jf?(eE41)1qqE z8F9W_KINXlnc#>j@i-`Yqf52xbI|jH&nUR~ujE{e#Y5MIbPrAM{-c81Hvu^_-jQ+p zA@>q(FWb;T+n>&odujWVIYiqfpzYO+w%>xEf@}L;cQBhv2of^*U?H?YCHyt#e`hJ$ zH9?{J#_VUcJM-lw(07DcysDPw3CMqpBdRe=nD@MU+Yg}O{^rnzG6|L`Hb+#lIU=;7 z6uE&y8`Rb5_)d$}L&;T`ANFE?*kKQC(D6etKTP6{Qnc%O%@1$-=7*lnD6i@=d^5yY z>kGaaVwq*VcZM)Z%e!djXWSayJMCPg#_XBXau~18{Ez$u^Ds{w#Pj@TimJ@q?q&)l z5cf&X4EMTkh6}|^;o-J>7>E4f-(<7xg8z^N=V!kG{3p+20Fu*lpYkC2>pmo(CZ&32 z0Q>MR^UFTRU%EX1Eys*5vr>)YPH{Zm*i+tmk#Xb-`#Q`Uk$eX5eHm;36WP2G+7JQh zt5k}21XUSP&w#81=R5J1QF^#_V3%0~8eD5BGHGy$3z+N`2|DZD+9ZC(IPE9$-6dZrT!(TzK%DBYw9v-{@DFrtVTjhQafA#*8N6sqV{T<}2?;vNT zdgUy&a7)g&kh9cG&H^WKVNA~Y+9zjW4F&k_v`!5<>x56vN+p@f<>8^vbegJg$XTf> zlXM=kRH{nD_i5R`ZoTYQ#`>LERUUc`I>1cCRD?o`CW$W0I|_@tUs8hFbDn?r}3m#PY9a$dGAsOn{Dj@u+X?7S-F zeJdKVf;u*UU$ z$HQgd-L~T%zE$0f_$F*5?`{k@XOH;tY_zk3XHBR%ag}F7z_W>#t2}Fxa;?{kXWzPJp55ukvu``M zGoI}mPX1?#0F+&v2TK@HRcG6D5TreDsKBLwS1Z830yh%88t>I9hV;^6KIxuUr}feD z7H%}nVb|tO|9^*9r&_!6Dst;~;nh^@{4Ts2Z|%yfG1hD6)qjFlji?52`kHywC>?3N z%BzjAK6K^Pr!7HMsKj)|1n+HUUVX=tXTYi}^s+cWfr4lfd-n+KLBf7xN)V{C>@9X zIR|>}cTNSr)fo~NRu#U*B;`9affgD+4}!XfnxobnR<5Z~8$tu^Gu_&16PSHQBZNU} zc-kIX7svk$9Z$}8^S*9vxBYz5Zv&ECe@6T#w7j=k zL+kzt`YyD|xw9#V6I^-y#_j@q#WZ$uzp*n&`L6%_V`s8?#%|b^|9_o(O zoA{@1HzDRoHSBq92qFlcM+Cvo>D+MQyU1h2cjP?@Z%?gSaVF_NYoHz%!Q?*D z<4EeeDif*;Oi~P!bvyL9vDZx|{O|O*I*Uh-TcI>bEv+6s&Y5RGd~)a6_y6m0Ww!=n zj!AQLmJJQkFh^%S&L9nTbk^ex$RE*JHZ({Y$F<9bq{kVNDdpPrI3wx-bjXGV=?(bc zJIjX9-s~hB?){fhTI;$h#eFLpq#9RO+0cj@fF|@rhinK8c68`*2IMclDjVwmMY17$ zsP?Y1Ve&^F+3=?l8Rt&&$cD4?6kG;2k`2pYqxtXGY#1cu*72_8;65OnSMkV-vA%>w zbh}}zPlui6Nfy%$&oKUetO;wvwIq$o)h3EwxHuS*_y*)9C4LCGy6R#S*ER%PoyMXC z4AM@UoCnu9f)T~Xwe>HM#~Gx>h!&9lYP`-5fzblLV0-yV7hEo{xS0D!V6;HDwg|56 z6pI#+!z*Zz-f|xo6Osb`&jINe=pvNrxscY5OHCUf^6zta6U&LiwzCmiU{@9}Q zU8BM^E!NsD*L-8$C_d0{&t$srONWAIM;jcO+C98o4_#YHyQBsu8|c+X(8@BY0VOykPf~;3!)rX+u7T8#_!$9 zc&3uYQ*49n4KcXv?=st`;x^p-wpG2WLQMQ$IhQ>}!TgRbwKA^%P!E2oCM&p~CVB8H z@d*WITXuag?2@G`hP}%0%P5^hgQ~`jv*ZN_3SP- z3oi9{W+O7n>Cn>x>L-qd)E|xbsF;C65#wh-weG>_^#u&)W@CN8kYc4b#;-fyZa3@k zy6wSi^WgF!6=HWWe%<2S2*}x7!VqCF`gWUwcE#-2!M z6AL~zNL8@Hcf^_+rT&(~_+>ErrP6)?s590U8SpMG-nAF+T8THk-D*UR^TG)V?wPOU z+}+4;ao`OZH*CCui=ho`$Lg`J<-eC(1nDPNC!W7m#qYWp^1KFX5RK9|j*NG?o4B^^ z1?f0C0c#J}R$gF0eQz#daaHe{xat^va!G7WO7%efh>{q7R5dsxetgNy`t<76h|Iow zSFAv*L3#<%!VN-yp)c0y4G`7{QF@&)Qb-X7>%k|&5Mh{*BqR#)LV_?{C=e!;OsSqI zXiNC&bRnxGyE;`rzhpu6O~RceIn|?t8-=mL&B7QVO&BeV6V?j13cN5$e~U0hm@H%p z(}fISsxVEMA*>Vb5*7$E^|uMRB@3&wg;~NpVXiPom@V8c+##$N@=6v73x&J&_Xs({ z?}P`12ZZ~DdxiUi-wS_&j%q-DgV_heFMhXrX8ql}e$v*pr)&0s+lk|8f7;ElwP&kq zkq03$4y#}%5(YG`fm9ic1uv?eK$yV>A$b`C@x^RP}0x3KfjH~DXCtrBQ}>v^Muho# z?>WU4F1gD5r?SP>uSz%k%q8zIf5{N(9q4gER;mFjyLd5AkmLa?44wqXePJhwOTfGt ze(~_CwS0JcR77e6^&nR|sUgTDD-AYw^eOEtu@z#Q$46DKDIbjfigg`xx4p6#9&+-S z$nh)(qSK!*)_L*E=P&QHo;Y9z=+@uOwe8cbDu15&%)5z>?H0qhwk%8HPE?Q%J82vbIcXdZIB6Ukol*FIpL3wNknB@Tc~opwRhU_?ea zif{#OI0&~>Ei1iItpXwkWC_px#$4Gmd2^_FW%+a0;H~V(iUKua0^8NBWemUTHpv&FwI3@uh=OGftfOQ$S z@+8qw{6Bf^2f%hDU^P1RaevmweOVtzu|AGqeXMpGP~lw8ud0f~oiV^^L0a2-V;79Y zGl8orSaA~Ywn+X{{7-0ZkhD$@n&)^|3;d%=fR(rFIz>n?xZeVObW96Lgy(?N-#MU% zV}>zGryVu0+E$4*Vx3qo?iSy`(^oRgKkhP%OaCF~7Dpq`K!c3ifvpVN{(2erH1_$} z45Jm?25h-$3NHI01*gV#J@8%QmrDS8DZ_WU7vEi055B*`@V$ja5>x=+UAX7B@O{-U z;`{T!_tn3M?`y6J-*vx;?*+e#?`wYr-`5EOvcn*cCGh>5)xVDKtAOvRe)#@8!}ry| z_cg$G9q_#X_`VkSz7F{Q1n_F6keunpKli3P~+HU73I%fGsZtFtiU&)aJ~VIe>MQ)TdZG*D||@)ERg)P)r0Yl zSH^Wgf3x-eE*O8vI?;#mn{0&fkZ_<+U&Z)j*2{^km*ZJ44`#g_%X&H52LF_UFqJTV zs*^Cj*b#{F+Z~X&vHBst3&zj45ylIMAMwcigz?KAff%pD{m}js(|j1;X2neGAdI(J z12KLD!}v=Mv$&~A&Xx7XI&;JbAHenwwr{F{?bvU?_SQ%RSA}hL3f3C`9&5<3T@Q?p zv38Z=8+V8E#poqS_sM@~qWD5NbT5N+0mo!{?8_{V8`MTA4r?feVa=F;=;W^a&9!~1 zGazP6$;X0PABO%eAdlY!roZ10{arvs6Inv~Z_sOiEcOd>)%b!5<>}q)@4s-zFw9$A zLb@;kG`q_2bhCLbH_U*V%AvJuw7mr7#nq4WsZ~wrElf7uP}|q^7C%HF4Sr~CA5)x= zRI4_Hw;znSQMjo#6o194M+xEW3qn<Qv4L2p5QcauPKj)%X;a7@PQ)?3H^52a^ua{eh zTgoi+pesYig_M#(y7G&r>^u0$-kBiH?YVS|<4^$r?JqK<_fW!MW?fKRwL#x_j~P;dQwKWGLoV9t{yFUqKAR7L-ZK?z z%@d#EI#)sC36AR`+fBkw|d9As{^Yp=Bt@iKcqMSdC$@?;@ibtAU@ zJ7nD6TVZd&R(>z?6=D1DF-tEscD6grRq>y~)<~GO19pa|{4lE=$4h`&3m9fq$&Jz> ztZEnX^oTocwsV%=GgSw7>M5NifQ ztQoLW5Z(wFndtWH66A3qJjA2HK*NHIRYK$`uF&=ZzZtidFXY6YnXpAuq zFy3H{HpUuf8b@KJ<}vr##_)EB=0;q#zB$pDV2n48FeV#^8`c=Qozcgmr`NnxQ)=P=;drhHO~<0L+O)$``^tae{0^7 zeGh-D_t(An)$?4!F##wz)BDRV{IV~UOa9ng1Pq+zJ-g%IT=IVNOAPObf_7T|%=OVEOju3&D^OBK>|9v|Wd9hk;9Qf&XJ2BN>_ zT+lVsf}f1k%?kf+v4d-STlZM`QqpmYd#>XOI>?dhXp4 zkwB1KJ1?$hmP_gf)M)eeK}0|wOuI0q0e%7KwbTpq(F=O%DaWJuUdk(>hwVU?u*8Ry zzLgf8Y_>hMT|QdAEfSx!o1sod2Sma;$sZ07CH$LYiw5$(Z)1d$hcmOmJm_p?7HR^{D~(Q z;>o}FA>s>Y$pMEyB0kFAk%qMq+y_$+$hn=t$i-bQy^FEV*kFso_PC*vM&Xmav(tc>L2cOhK-sma~ zE-=&eO5{UCzrfa_M}^f;#)J1TS@1BlSM_gDWq|fdYnAWttepFcZ{-}stdWf`+l!UxSzHVT^NWJoO9MxqY#+x=Y^jy0C3buqRh%Elm>yesiP z()F(DTO&wk?GX*VvBnY-sk66l4SrZbRM|<~neM%_ACrze@i|qxK2`m3=OM%#YD4=} zla?Co?Blz4LjfvUjHQ+GG*UME&kRXvv>e@duwg58t!1N?W{GH^Cq}}1J(0f+BX`IW zhLKyt(8K}#QsdJvd+5RLcHI8RV$6H)!Cqh{)T9`5J+evk@_y%iO%dL|rd=4=@Ubsy zC$Au2ePp*Q-KR-h!@z3HcCD9NI-Z%)rYh)o>eEeJq6^5SHoo0*6R$BAb&5XMOEH$O zy2yA@mcO<5N3nzcEw6M!t`ca*Ihf#(e`@;0bH>w4($%{c(I#gbn!&LCb<08^8?sG`}3=`0!OGU-r}?RS)CaY_)P$gPTt zVNaGocc3}MKW^}O%pp&~7n>VbBDEkYpiu-OvwWbRUTS+*g7Sli2^A@Pd3p2J#+47PxRBfP zXwGSCDa#4PW$2~j$g4PwY39)UP<`TXOH>1W{S_7&Rp0(a1oZ*VAYa8hjU?|Mv&JIB z;igAD*$+1~MtpJBV#Nww>4xV>35y zc1y^Rn1MGOEk5S{0#DpVGhX{U$Pq-jg7&doK{N{{W7Ly}VQrFK6P(rnO9Y~=G`cYj z^3t0d6s55ZA*Ev*l%<0kRLG_$DML%gHK5M+*C$0=)XZRN7*%8n}oj)_e-ovVpk=T=ky1sP7iMps#7I#38P{29nHU< z^R2rs39BA3Cr7c^*YSEtE=g#)ZlzW)jbR$zC%VyWW?s&o-4FD3#2fLyPnTXb5wmMF zbfc51bVU0L(h_Srn`N;EtPRgh+)dX$V&@)Ty(@TcS_@MI0*F^H0N zs2_NtFFh3*Z4i^I;PY~3^}Cm+u56I8wpLBOCU`bQoLon zw{_DlyjqXw>%<)_OTpKHvoJ-o={;Ers{OJQY^c8lczLgVT}#K4GTJ`Tal@W#Nh{0U zbAjbpQJF8#8dfu$je9C`UaXPi4Wsd^!~5%nS;;jc*sqkidyRClzMW-rof3qxYxSVa zGPdTye{7~*$gUSqYa@9}{H_rd;l;UeM$7i#Gw!nOXWW&$BT6EL668oS&*Bocx}~_h zoGgr$EO*S7h|0*V(-+>nr4f|g=#DBrn5S4+;2p0s;Ki|9&R7R$jjMPO$kTUwqdR&x z;Z(sQU{Ds2CvM$CDGYNmk@s(8Am$_){YEiBKkGIQGsm1#K9V6$!1dk65Kb_MLkFj5 zuoB4mht9?M^sS*~B~qRJZ|D5ul1mUbXgs$CnN}z&`rmlU zQ!utmZN_sczBi1-8&=u(cZzB*f!|=OnCiRcMqIPR{%K3+R^-~NTUM}Ej0v2_yK4hl z@JRh+v>?^Cuq6aG{0-pcW}9Uxs+|6u3<*Q-$bv_-1oaovm=38ywycvX;`pOBNXvd} z-cO1ps1tlYWDtK}fgZJmpefmjT0>x=I|*L_B0(XSpHzK~xjh0JWeFq@&$_2Cn@S)T z%L2|k@7uvuZDjvEXAlcfaJ3OOF3h#?@h4it{#| zj5~Qr#$AEUuMHc?DsTNk#+k6)KSaST%T#bF*mh1=aOtS>F{6jskvr;qq`#k(|HnP~ zOI3UHKw`?~Q6kmbl2JVb;}4&a9{!x%{1TcM*jkeG_dUN|A_{P@btRB(K45w=|4(fPt0xS&j_MNg4#2a>}6{}#W$st(-T5$;?=*Kaw_%UF52_n#B z3FEI-EB*yU&ng&KKK`1t;^|B)9*@}060GEl^T{7Op%tG_8eLDZ(a0AV8gy~TQ$r9> zI1}ClN0{;mJSM{@!y^){qKMNMXvK27B%}E^j;Bl;E451GvkVtBf*RUeqyV`h?Pgw9 zG6Gt11|$pZ_7KPm+-~#+a->A7fZdP;mM2a3`d1G~x_?n64b;SCGcNVMv?9jEYe+#Sog@%DLA{TacF5A_Vb!=DD7X`uQaxF zU};Qgbm@T7L8UK1ikkvkh9(hX$H{~dd{FU9Z5)3ykI`E08J#j`{+`R(eQPI|P3ILD zRW9Lx^d`L76Px;0DT-yyeXA8E3gASL6|v@t&~h!Op*fE??riyskN4(SJ-pXu@$eT9 zD=qYSYcqK7Y0F<)#%=B3(sb{Yr@U9Du`5Se$KwqTTB=)4W%sT2_}LXD2c##^jz(|% z6w$|1c8Z@O>UcW9+VKFWgGXe=bbdIf;~8sem0}BYQfp3JG1kq2O45c`r`C|oYLa6j zxN|lvA0^T<=2`#sys!cDZnwPoF!QYUJZt4d=5Zp=dI_vO)Wh>EHuP+|kz$1=84rjP ze7&~K;_2TEZ?Bn<$xM%U&h)Zk)Q|kcZzMK&uT1e?sbg2JwlwD#OZBZ4EfamueA;_u zfA5t`*_HX0NqA;?>u&Mt=sxJZlJj19H@k8Hd*=Gqb>hf;@?iDZ6OA^_WWFRhy!YXt z`NOb7lt_1bb^WPk1)lv4Jo?4bsa8Gw^Zs|HTIgC&^gl8ZVl-p0GLL-okWtuk#(1BT zf|!J2=+b117{`7$AX~se%;IBgw+GC9nZWvYU`Z%AR{{?)AMpDa{9c9MZ^8Y&ZPNmN zkH_z2_&v5o58J0+GJ|)ETeraGys34s_uILV8_m&Z=aJSI{CcAVD~-v1UBTwsXT|4z zSC_J@-)I$lnm;77*8EQQ?qv6F79a7uro43}dzQb=phPM}3}}X5W8Z>(4EEn)ufhHs?0>=@6@)J~V}AnsgV-O( zz6$%#u-}CJ5tqEZnJbnKVPA#)0qmD!zaRT_?Ds-$DVE;G|0PmHYd+|EDl8YC)_7V_ zg(p!9c4AL0*p5B5pb~p(K^gYc0t5Ecf@18c1)H#^7HsggV7<2mI&TYBd0Vi8wcr^S z*;4-{UJzSFn|KcE*uNG}pnl3x@nh6bd0(VDL;@s?{@{*pkV)VVEW5QHnPrhx5)y{{ zR+2DghY3Jf`Nc7Lv7?KnkbpRhyTA`8;6a~zVLn=xjT{rOL)q0#%2Q2hb$Qq|Nf~3Z_%Q`uFrz zNB+N#T*Zy(sZ3u_y#-HCWCvT@jvE3C(RDq9hIVHYezfOcn)!K2=>H7 zLD*9}t~fmH_z`<*$0h8k9p|y9cDS&ocG$3|cCo$VSANOQaVlQ$Hd|@Re-Nkd3&|RE$+n_AYZ-3A{nDkzuvPs1`!O zvjM-?xyY9qhm|Ln{brp95sCW+W*9Z*TA~?x6IK5k!Bd?V(t|Pn_R&}sOh=w^wb_60 z9?@u@^&Jrnoc0|NzMtqgV*T)$?}&Qmuh`ii@KNAwl*d`I*ZGkr(&6`8&x`ie<7^8bo4oxUQ~F2T3ZF&2p(cX|-O zcSK*&-*-e`5!G?TzCz_YqOS<@9nn|(X!DLQqv{L3Bl-%5?})yl#dkzs@s00@zTzZ~ z{J+A?GTtM*CPw0{-N?RysOz9@G;*+C{i1yxPhns5mhXtZsNQ!(U-VkX5&NR;z9agg zGT#w>k=}PiTNm4i!$!sc;qguq5n6EVOx7fo%w#$nP}Buwp{?N z&=Vg38OM<-lF$AEqyddlCx-U$smXp%bz9VUYKldMZ|Yf_qWtbY#dKb^<&zfTm24Mj z9dgU(A{}XQR3!b>Dzc*~?C4VK|FEO+?8wzha|?OTOOVlpX3-;Fjdl>If%4Fhhr9&y z9R0S>`&&Ea@3Xf?hh?!X@r#ffQuxTHz5k1sl!flPpx>@hSqXupAH%`5iB3 zE9t}ekHk;Vws8JU(G1;jKXk{P-(YM*PLYA?Rb zM?}qXFTTvi-%FZ~GallAFDq>Fi;;OiT~@Epzj`vE%WS8T&yOoY^t%Fmw$VC(@s9^x zbg0G`640esvb4hL_gy&Chz1$!+m|z) zXfR^SxR{ZqC)G-ay5|YHB>R-lJ}X{_UrvMmq#s(Y6lD7N@*sUed9XgQT&_*wl(( zk&-3E`uhF9)2jxS$6f<&MKauq6$YZuV{O__@i|4(DAzy4fj&P}Y*`U}t%NR~>*Y2= zml%dFvBq8E0N<4{xH8Jsxq@qvq;g&K%HM}99rE{g-g*Hm0rK@v7Sf5iLCCHJM<$+xa>_Ne-C754UYXjt}cc~K-Go9c`Ix| z%lpH|Q!VE*PRY2KehMx&8M$tTUmt#c0ddqF{Orlmwad%V`LJ$y>UhEOjlY*Rono{% zMWsj9DEY-8#QqjZ`hb-J#M29*y=Q=qP9RDIwjC#BS|QIg(Uu9`@gY9i>LE{e>sRI4 zdKFN%6`B>!1}Qj-}Yk)fBx^-=4uT-r(=hcq5D0 zV9^-c8!6jOM+`^opA(FUUaqG#A|tR$WF(H`cvwd=M)Sql;kEJgoN1+YN^T6eJptUF z2yRaTw+}NZJ1)82tn>DHiSz|3*?Ic>q&Gic z2gg^S$7@i>j(U8sBWYVa>Q=@e(@iX^TRA-Cj59d}{BIhzO=cRgEy$FzE!Z@2o7{wW zTuYpf=a0bOJ@hxmCBQb)?YIm!Q-n>ovP!q3;^BFUuYr}rGPVoPk9GV*^!I=lVcjY5 zyytqgB^EqC#CR4G#Zd6Pf94zDpF`Q@X%UWdjOUA`qh6i62#D^9QNGYrgq59Lc^-OG zYYAj1E>UbLf(^7Q&qHd2&J7<~C!HHoxmV|=95+SM0^n2;%l_xlx#QT`nJ#~x-^414 zd3b(XZw0sUFLG|l2^p8tOTi^4DmWFk|5UfQYmWs!RkB);e}}9Vq?OHgB9}bx6hm81M^syvHzrWaA>(xILE6V(BM)MRput zF`FuhhSzdQtF*n0;bgxwat66kVGO|omo z0hbqJ6%?0D6)DYUJXubtw=Q4}JoT14!2A3@dh3WKxPtmiDab2Sf}$c+2&qt`edBFP zXcqKds#8z3NQ+R3k*d^pc)b3b+4*;7s!`u9r*_+SquA_B73zzm*{qhI#yA4}7lm4+ zf64Elv1Gf4#uDMbFP7r$&7iUQwvo^wI(Pv$T;c?$H1s3$rI5W>5amZ|<(LeZ=N&e>ol2I-AJO zo)vXZ%DL_TmUCYmm!Te@C+1=k_D}sJ=d_5iNW^wMWc9MnYY15W&Bi}@c-yUoUI;sY zXAJ?YU-+A%C3dnk1aZNLB8BFw1-))(dar+E)&lhJQ?LnWK#MJqoQkBC)*zO9vx3d= zuR-e#_Qk(-SLLmn__uDeMsjVd3xdjnyN`eCmO{C<`+6%JUFh3zm_-m$&(5^N?}9${k^khI%vL`fjQ8W`?5_gHwdOcy-7>8fp)*eB<{~ zVD#WGj9*E8B*t%!Yg5Z2@7=k9QGX$bhg;~0hkMO09!}E`g?rOnO5mXia!@fUG!RB7 zdhI0(!3$R){|2$&LfCKpeYTV$DbAK1fX`_*=*X?yu#lFH?4m`e8^!14A46W}yJcK_i=0~zThJ-k zfc9bg@8^_4X_?>Jf{vWd`wXuzdGTMEQ|4eLfhQ{}ydU`cQOqgZS(bvc;MJ#Kv-sna zg{U1ywp4%oqngac5)ZotaqC{#VaX#C$JMe)PkT29^^wBIIJ`!^hnBH%E`Z2XV)dpQ%u>GV)`p42{|aG80z# z$`UNuo84>DZm50Hn*$||A5aU*M7;t}KGg8`HNjN5;FvoXasG>&kGb=)X|c_0E`oKD zOCDjq0hKNPaoasCD|BzjjZ;w-ux-!4s#wfJg_h`D1KB*(Y#qqvp^U7Iwkx_A#OkM- zk9hAIZazG#S50rkW5YHJiGIlGXy3JOA+kG-AIfy%BtFV;ml)}@p+uEo^qnia;K#$x zd&Ix8b-^^gYb|fG)oe7z>jR?ge-FOC+ujRut|krY%|^aIvUEa|v2^s*vcDqzTW;a?kjXeBcJrVN95cy z-$Bp*qi1basR zSj!wbnyp@=bu?`)x+19#SrX!_l+`hOWCf?l{63Qj3&{L-L5EjcD25*=e zalJ!9v4?Hn-^Rr!=)>D{;R~Z^)ZmK`E>hIXZzhizhxpX>h)<`d_i=-tMMg6>Y zKl%Va76+`TzYq8^-m1eYqCQnc(!-9vm=#{L^{I}pnT%W{u=ZFzcE-;bW-Nq_;c8Cj z?zQBeS`7B+IJHPOL zy&ZBFWj!H@jV9EW$t|dj3L0k`y_KO6<*z0SULo?=wgEMswiO`{E!EDUtkxJ?=evj% zEQF=IR{+MfI1mekxBiT;$g#mfun_AlmSZjGK^ZsaRu9hHjT+k7*d9hs>xZMT#v=0i zU`%;u-5)g>ueJV8A@cDO_M8c*qY8_twOgw4Qtwf;_6K@swSU9HJj zETucIS53xuuel~;wXY^45Xj-Fzq2W<7?or^^>@Z2db1FDXFR_Cj+*uVzXA_c;DP*S z>0E)Q5E&3}>7s|;Z@Ik-FE6lkuD}D_uv~iup3|tnQz-Rs{YT6AYpkSNDE;W_sK8Sw zwP6P9T!9C&b!!kXVV|`K88fa*GT(x)ZMLozcwkk6uDKk3drx&nXn?Hp6jf&|l!|Rs z86%`>1F*qhr3ySn&~`dk;Hi4RBWt8J%D7Z)RGaZMav__sk*pD|@zl&x@VC!d3%Q^e zmUyaPpUv#ekD6!A%3!tWiB>mc%FTt+YIuLRB?K zxa8lN?Wo-tirS6y|HImii+Z@aYZR;9_)u6qsr}HZy1#SH~9d)+3 zq>nn)a@-7yku1(C#`)^9d@EwGbYxeqSggM5AH$HXsTW5xa~wxjRK zI8+xp0G)L@<*wIgZr}@f{cg&;uBhXZk=3w> zK<$c1)UKc`BE68KNl~jtX1OR+FH<3l1Ah){6O&bc$d14ooR!+2r6I_e^rU$?B%3?k z^E8OEwJw8XlRI%Uaw&MQW$*_h`E|J;=5omoV*Wbf2;rmSHX{PQSH*Oma(r+?RK;e= z)k>MwM&{q?j-BF+rr2ll&~HY?23bP*uWCd+&9gR#MLwT{54C6W|BcAxpZqe*HRZ#WrYoQAA$&y7AvXzynLfON* zi9{6|Miuyuq)<>rh>r%8pnf}RcuBM)Ih2T0h+8r&OD-u70#~_hq3TQIk(_l85eT9jN|1B zZVI+Bpbg5e@%e5p;ahj}8w0A~L$A85oh2kRjNTPrnka1G6NC~zMu_1DP!7xZXkmEu z4f-U#ES|K3L5OsTh37jP^*OZ2Eq7B%nsE&3cqF6l$1s+!Zlo{*E5Szzv-P(_qw8;< z2}($DgjCDX{CiM1=i}im6pY`CA2T z#Y}L-ddI}7TdSwm%&y=tU!+2Zoe8R}W9#VVSaf_G|DLqqauA=!Wg`npUyXJmY#9-1}+{)7LMTjiu|%$WlR%V?}D_0n(->p4lcN4 zKO^!B`BnFFvp_~S}xUoUtr;G ziqUEhA0=p*?PObU{RUWJRb!)#Fx&q17t1IuaJJccn74`j6&x zW?%YAR)jTHKeW7 zzOLsvDmUml*68(2etMYMqG^TRW~tDrZszcBQUPp~Z^93R*n#8NxWv#N;^WO`B~Uq3 zuL`VJ+^wA`67w{z2@Exqv?vYFwxnV`*M9sTY3z>(kTBy~F=VTUd-aGdDwGm!G<)BT zY6vQJz6i1LbUxY^iu2jl54uW!)}l^&<7U(cT_$QOQ}+<6~{vRlwOvmR2zW`|%bD@g*05m*3d?cabB>idTdx%CrvIW2q{Xc38<$ zwHa&N$WrxBCY4?SlFqaKT|6#+2JNs=%5@&Yd0LqT+_Q_2Gq@{qZbPOU*)$^gR5P@= z@mx|=^NuC*_O#%nh3E4Al9!a%FSmKgLwV_A%Q9(J++le=Gupi9QEsH&Ojv>SBR2)v zLuBy3_V)N+U$KV-(J!);Q%!N8>IWSsMECpd_X{#_w7Taa?f{n9VqKG4uH!HL?auK0 zPtSk7NSVJj|0BC9?D)XBHhb9dsNltJwqSQ)X2A)Na`c^dH zdsRfYL7wm3ZdZA~muuUrdv(dMg)MonW+hVi$2YX3Cs6F0Y9Y5!FDiCeY&6SsW%O#Xd~yNJG&YkRFgdnvI`<9Ypz z3Vi=71-PDTJiP= z9-%LqeA=?)rJSJrnA}YCH1%jz*1JtxEKxFTR*{#UdUZ=rL`|Ep-Y0$R_N-|aRx}Vr z+59Q0QxA7g)K`q6#@fFWAI|mkWfiQQL}e-V>Ec}55!F|2dF~OrDp+a%Q2Dn^M$_ZC z#Hi-IOO&ALDGPtf+m|;buWzn<$*Me08z!};PxxEr4d&;%(6pixO)GpfOE6&dH$8 z_duPOL7kI9ozHpubb~h8SZF$^b0Lj|-|YO{qrTN#M4g>l-a(yx z%5Mix+h0Zc!nKGYqrSu!E|1UuVIV3c>k`_O z!N;RBv*aHT#S*nnyHH)v4e!T~_0r!f14FJ8{XMd1cn|2WcL4omf&Qp4n2Y^47P9k1@XKZ?_>BDZyPwrAoh!RIFRw2zAH7CPHnYvB(nJzX zi4Hb1>;>IJsL%Xv&SKaW$&y6XC?NZ^%T+Q&k_AJCZ$=FKByhE?GU(-fnKu$2rT-Ex zduaHiGkxsqpy95#+&v|Da9NMay0S{bpv|yFr15g58IQ#=wWSbiU$F`VUePzO9)zS? zs(Oz$LR$Y>8t`wrOJ|jp6NU%9Oc?$_%+J7Rr9?f|?BH$+;kTXPl2ZAQ<{$FY$IWmj za# zke|j^JX&_^3e1JkAJF^Nu)XY+*8Nn7_-z?He?Ovs6I~KC>ud|@75*_U@D5>=%!nH} z%j$ekn};EX%xL}}u+JPsofo<4AL1p{`K{qS_Pgz{-(6})O8QH3rfY}uO|#?}R{`Vt zt$T_fyZBOGe||&ezqE3omz&AeAL))@Jmttas6Ym!v0?CywnwQo0(|zmgR>Bw5frZf zFdLa@^3^3PwPC_&UIPoCx)$^Vk81`hM0+CnD1zeF0(qsPhhzSYayi$$gPJNr9>A%Ue>^Yq$VcIuhk_IaSwn=X?@NLfKPLdtoA zBS&!ACiqu9{=`_&w5PYvHf=(FxG7yI7&csB%rb`q9!aWKy;lmk{0(ULVf3r#KB{dS z&VPaWG^1Gj#gV{Valeq+HW{&Tz(HGr_?llFMY|{mSG@|ZTItv>4q(qb5co`rc**q0 zKRf;6?Y7tV!ZTOcD*R%~&O^?OK-_~_>+wab?Iq*-qAnNN%@^K?y4n{!zKHnSHe|N_ z^*QZ2WR8t8$7Rd;a^x9IFb^j0!h~%(3*7R_uOe&P?#$q(zhp{H!O#%olRIX_AbWST z!KGMF%g~}U+c{B_?f;A*_KefsFXvKbxaF}I*|jmYcJZ0%o;EE-8_DL@AGt?nkN5bCQ9@e>I?!`J}V=W$vs=Sd$D01Rk_ZKVv z1m6MW<}xQQM- z4t!oW9CoZ>ekR=vX`xH zIO9IMJAEr6t0#P950mTicSc>w*`A5q^X;18slw`$jcri{9?C-A?{%S-ix59jCY$}i z*?i4dg+X1(rDpsfA;vmq!oPFK`k-6OC8>;x$}{ff-MRN1-V;)}J+r?#wyqG7Gcx}0 zo{XxjnhdP9qIS6*VRi+V0UDg`%3#rNyOBHgrM0R``M5LgZY0zK%gmzNZ%yYW3^e;_{%vl`LVogOiZrS5(`bx;0y&`!H4KX?8881 z$+Qb=8YWeIY7Y+HV}CU=3A_@niXok zA9mnrpvfR7wLm_0C)zyCOPvMy!sUq`4*x1J%AR=lfCu^O{E+{VhUwL$&rPkkr$s)A zP>Nv>VDR?CG=khSF%;<>O2g;2@2w49Am8s?i5 zcts`k!F&8o_iabl2N6r(!+l3d{nNpqJchnFcI)x>Z$1$g>BIE3E-Em9$!DB`j|VaaT9FqXRyDq2JxQI@6e+AMwH zimh9S-pPe8BVC=o>-S;~^t6~BXZ>%k)1qZ==htUF(H$Pi;^^raIO~8n@oLP62%Y35 z=zg1|(^ywV-jNR>1GZQ*Ap`p77KYEzx*0i?xWoZx$%B~Rn!c--I5vBK&Je;guS^u$ z)Bu?%y-e$UFUd1wyi%B_4X)F-q75}po9NG!D6>m?rgWpmAB@jOrv;ojaZB?f<2~o2bS!;ulz#HZw!cwV=k~ zkIPWA6>=8IS|k%Tqvm08n2cK+A>(@6JbKuc-Gg{pv8p6UPnoCxT%hNt!(vFfCuyGZ zV?5SDeOAW#;({}7`F7aLdc$6piG5$}wWr)1cgC&S9vbrrbX3_7$7A%cq@7!TXIAP5 zr=o-H>L39#TCgGb>kkt?Se}ueI(&3bLmxWmU#o9&3fL%&FcrR;wI@0@Q!Bt5&IEjrk8IIwSO3V#LX2SamzD~ zxK(Kbm~`y1iWVZO{dKQ}6kZU^EZ^U~Ef-o+1|=`~65dyCsNN!$<55_D zi+dMqs*bsnb$yHbSB2>@12iJ){?nbnI2D z@RIzX6z+$;Yi&yqrKIjxfhrK&J1p_kZsIADZHaF&>dolM+@tmo^<0}xO*YdLS-Q5! zg3s)q1%J(j3LoMZ)B@iA1|*{$Yq{Zf!O{pmQ*o(NVeR%=8#TYmTg|X`H^bVEeJu8I z*vDa?h<$P+c&mBCJhC=krQVEgd26q(tF5~}b1$S$Mh#JC&`SEQ4$L?TOgZCD+u*$z+;G$r-9EDUj@;MU zHL~QTxUxBJaXsuv`Ac&bSLEF1mc?~&cCb+#50R!lN81B5}BTB+kjQBwe9Ju9qs9%hqG*EMFq9ws6AM5m)5?N zS(zg`=HI8NAN;J$@+~3dn+EQT{(dR*BKptMo$}Fp=4n~VFtf7!kUcCcsBGx6bunk) zb9nusM;;2ZYl4P9^!nn+TygQ8ZgtdcZcWr3`9UM(r-8$=S@^5qOa?y-QtMtJNKS@ z?z!i;ZwdN%oQvu@t^dgA>cVty!5oU3nDO`bTNAF;x33Ae>XlM>2ut0of0uex2QBxk zUb*}ns&|n!aSjnrWKEny)Du|~=TNbAuxrAtnqaiAWW!bc>>uDy50Mpd{%5l=VhynF zm&Pw$HjC|zUpgNJ#$dWXlp%*VD8 zUG81|EG!od;#3{MRSzbZb)MCIa5Z@b8Bsr*+OxM>EpAyuHmk*0UEMR`D@@C^%fpxZ z!=B|@WjArv@(Hk7kT1}DE{wIWe3O5-V}IXD+0ju>kC$MzRCPqcwh9eCeC_BPTCpb} zF~VL|0`7OkMMCKTsYhLnBFBdkuIL^_^oeB_{x@&_?f==Edk^Z(!|>+h4Qo5};8??4 z?}06W;M`P5T$tK{M=a45nZ?_EH?ebwI9MQq*05`GVK14L@+1UEmzKxQJW9ef*X zAkpo-z62RXEWiJZ-yIF$b4>B{`ZKuddKa|l?H9m_1|p`R1kp{zit+<``wYK!c9p;f zDbaf7Bb;eP+!nlqpus7@6B?TnQB$rIen+I*svWA=f{({lr_k%4ZIDr5cOe`vxms!> zyjAo#LMKvLkio5r8IY9GUf*1)QyAb!E!Tp~@@3=uA zp!z#`Q_@28YY;KdL1*RxKj*EO6DDlAPC}!G9)*r;O`F#TLnsv`UfZScam#@;)2n-p z@R_NR#7-^!5xA5InlVbK@#Icf}PwRo)g4F(Gjsy zZd}CK+27i!OOR*&N36R?o~st59dWxPa{AFMTEm2pdGI>N1S2y0yZ}7W{gBV6WQy5` z%|f>aSQp>Jyd5+?ms);=tx+)ZD{OO`KQ+3DeuAAM-Y8;r=>-8h^+$~^b}^79?CMGo z6Y;s8JSm;pP7HcrAqS!HQ-?;eqb?khe+fUejo1#Zw!9+UttTU!fSF%!WR48G*W%Ai zbe{Ki*p<}x{`6c9w4QO=`|ACveh02^XTCi;B0Xn20{S*NJho3P+>Yu{c_TqjBp{jVcP2 zr*Isz?!k(>`iB&a+H`Ke;&bi8TnaZ<@gME|sD?X6@w@f`ZnWZ()?aZ$J4)f+o1Y`PV-Y8e^uf`W8RM`$k+Bd}bcK;=c$Ny!DEqjEeUf~s@Ib|f+LPuU za5>>0KY?E}K3>K>g1WlPIVtxz>g+zot-#qD+;jX_RL{zKam&lxOXSSQq@rmi>h`YY zoceS=CW{n`s20SYuu~c8{7u&b#5KEp4*7CyQ0dte_<~=me1v3 z&+G($wL6z9;flE;u7E4#N^w?==Ov;oKubl_qNSmwqb);gLd!&3g|;VZ8@CnrNRBBu zjw|OXITg2stKgJe70w=rs;&6EUdz?tc*C)s+*{lZ?mg~Z?j7!JZWpJ<*~3u>xeu`4 zbF7JLd3j{A}Oi964A;p_gU=+*v>uO`bt&9Nv^u2y_v;PhN8*TI=M zBi9Y@-8QbB>%=cQ%mNd614mzf@(0(;^>Ej?Ke?+M&zZTuaP~BQ50{X2g4Gu(jKqwi zx~FO6b)|dn*NCHzz;A_EL+d2?{>TFHSsRn!-n%&?af&DsYv~DmZCp``yM@QWp+$<& zgD6rs9b6TTg3$ud{LzM?`JjcNoyAkvV804&7Vb;LUV;{nwhS#DEe&k}S}K|p?I+x$ z#(o#t%eY5|{RXs53F>=buRtq9D@DsiD@0SFohQ#idl@YZZATj382bpcNVI6QS!nTS z60{3ZrF=#Qpm!w$2tBnDzQm!zX#yO7yVA~j<}@psW{>03D}{6$pb}1lb+h+BD`DNl z*`P|%G$$OLS?Nr-$ZImq1xMelbfy1qkcg(a;pps25w>5ibjS9mm8bxNEh0kcFT!bF z*nhur2>nkqZ7B9Xtn{W^;k04c|G08E{ZBM)1Q!ClmTugE{3x?R0n<;PGnYBe)*x?3 z5a9etmrFvIaZ1cd*&+T}c)Z7Vi*k@p&G{T=X@Dp2U8)TAIk{q{h@^BLIhUB){S z{YIc)Ka4k=j`!BpNc#1m{eID59%zHP%V@9UUWcV&RL$R2JH$3f@|K!mg(>EDb+ixc z=Psi=o!gCm{I_*gHHj@y+Xm0-cG{1uAu`rRb8zHzNlql8-90_ z?xOviFewN2BLp8l$D)(p)zALslUt)Iudl9ju#U+_P28kP+Se}dV`<x>CVg9tom2&n{?i7G~7*ENaxDKw0>YecbWGma%8Gk>o@hyyNzpbldh+I z8H}q2_C+$@EVq2acl&+Axj8SYUaEb8j>*QKxJg&hevTU-8`#fX-YqC6L-j=MO4^T& zA99mEM*G=eOdZ(IUEa+;=OI-}{eyV7V7ks9;1+X>xP_dXG?n(X-Y6c}*InN2*X%@9 zQtdoCrUb;WxL-265Wdn)DyDs<8RJ>^-kEQJdwA|clU)gkyI_u7M~vRu5&_dRUdSBg z8N}vE#)E=9dCshGoNuEbPY9!sOILY_UjRRkxSn5Oa}6d}J%K8aj!SG;Jx%C-ZdqW( zAwHU3cM{h@qNntz#_=iJb+^!?AbRBiTzQsIEvlPdq&Zu%dQ+9UMbpR0X7+n{@F8|2nlB zzl=Li;7(zb<-XLYb7rmM6!zvlfyx{F6ZG20wCBuCj)kDP5mBT%s|c3Cuo0A=_nurK znzjxSS0*C2H}Odr_png8=A3ztBdHO0h*#5b&L;B{x*g{aSkKqc^HXr%6jG1#hpp$! z==l(wKO1rg=Z{*?=hE}OIDb6kOPoLMD4_kXrRUvozA5B8od3dleic10!1KXX$T0;AMw#BUP(wi&yEU|p8s(7-fvc-tq{`LJAuq^oA_GrHrKlX z%D`_*MG><#CHy3#d#_45A4g$0s**0z2qQ}PKpbzCK83SmaJ21-S2UvOVZf3j@ZZji z91X#kUO3+MQLY0h$3WY$K-W4#bLVnb93 zd|KUmwNGzH75NhA1>AcZo~hTkUJ|;d;{R6s-~8+$_^oQj!TDU1?LcLUd$i)S5u(jIm3 zhRIHveNd0ke+TheaIf%CdMogjh5%JlXMAxnH2M2Gzi=%^9B?-KrTPHhXwG53!@kZO zP<9df_skCL73|+O!z-&-CKW~;;J2Dl5wtfSU-kgM#q5OR&Gfw7d=C7%4F7++{NqH1 zTPyMSvKSd+p_Pov!r9N4f7o~S=Pt*xMeM=ErEEa;S_xAjP}nKSc)c**VP(be0g}PP zuJE9x?^5V*ZtyDnN1h7nE&lru|B>;OqKeH8{ZO74zI68hvj-%B$`u$K2jxtTJUU&uj43qN0 zQ`k#Q2QDnZewpd)h56X0o6HwxW1nX7yGU>xkkM)p&D5J+>DN+p+!4AjkG+ zL!fFu|K^Qx*cRW&!I?9LSFk;0cnVvcVF|Wh8|Gts!Y~`#&!DLq0dG}zWQ!?A1pup$ zoz5~V#r!^lWk!nm-8dq%QOs)%S8!*o;Uc#08otAJm*GoncNq4dI#)3u?e;qL1UfOp z1|nbF;J^8R;N4qgke~IpzTLO5pqCmBk+Zj5`H6nJzFOaa?X7o&vDtUH`sLfMdioC6 zEJF?XRj?c-IubhZL}LlBG%UyeX^v}}G0G>#Rg_>X<5Umc*4OL!pXf^h*u*>Z`qXV# zFNJQc82YC*Xz6G#=AuR`+Eg@0G-Ah{#0Z#UIRd5utqLt4ZLy1hiA59RcqsONIU|2u zwt)E}Q^0gzXP8nQ!@P>-69D_~*B0Bal0BAO%D&&kgshj`TW8mMYQ-<+zp??rm*QQ_ zlF6*4`e;POgoX(fLhZx~5*_VW>rozo=;$ekj+U}}l3!rME7nV!{Vg?mLYA7)zH96|ld)brC3skv3W2Hs^88id+nUJ75|`0i2}`Rap@ zDmyLngtXuRk2;bA*cBQbA-l_LIn=Y*Px@P?({-T&1awg`Tq#2fYweDp2e5&ik- zQuuYA(NCZru*IlE7|%|lnoVZBy~-YbX7<$2tV(t;LD?eIu1=m}jXRytaFG8B@^gu< z1d&7&8XQnDcnVvLyjmgfFDvFB>IzVYz{0;89t*|13_9KY{Jqy@>?``mk}NYtc!C8+ zd+K@jbfbWsxP5{O`qRbGo~~$t1%8$v6EsXSmb>8PFCW8=(b!!RULGwU4(;hMsy!`F z7%KPXNF@XxZUp=#M{(1?pU`j&*0dOQXk7%JI@LH-O}BKASanw8Zgl~K?{}&<$ zV&K8yb7Vq;Gosguco{tE_w$qgF5$m1Tm~Nb(?F_j^!Mv1L4BJNTt`;ae_DsC$s0Y{ zd0zjEegZsje}lYzpcBUpKS0;Lx^qIqw}817G4Px4Meg1=EHJk53gTc-A`}m8#upZN zk;7M2`g8tO`6;6jO2ha=d7UHLLv0irE#z zlraq>D@IhfDqOYh6gq}h5O^VV_`EA(0Y7O?aTQJ>o}bUEMc(-cRCz5=Vn0kqJ^P(A zrRycJ$XgMIycM$%-5rnU?m4JnE3R|o=GHlJp>^VlKPAIMps)M*q*?g!TtM8K_O;Cx zkIshqOZy-$roj~urv`5n->sw&=Q&VDxQpEK3oLS2VO8k~BwLvmVl%y^o)qHPj+mMm zgM_#U#8doJh--%&8DoXGpE}~%1S`b3^na88|uFRHm9dlggZWnARK}{yI_CA7-)_(q(zhvyQ z6bi}g5hyHfv-rP04k(cXRj5*ANaDq2=?RK;joxzVY51HB5-)3)kjU!{!N@&UFGtHQZt^5p~07X{~^!Q4?i7GWkv67cM#Zd(VTy2eBp~ue@CZ;N5rsjwivI97o50_W zSZKdVg_oIihmxhp-SPtU1{ZL#fV)|MyEwpIJm78);4ZWd5bi)AHojtb$Q(f2KEvAP znUuz9`_Z^)3UQr?_=)dU{6mO4XN9=b0T5SejIN0pB*eXH{HG9C!bfzS(MMY$E)W*V zxp#oLx2+KOV)862#6<(*mO$Dj5GQGi2EBnjY8Z7WBm3>>S>zURk47$I6&MS z3T;Zn`4*wtDuK55|CF)MQD`GGw@w0{)aq@%9MBfCJ^KGOv_P1_j&Cy7pZO_$WesKqZHd)6&Xe;X|ftM_Swy&+w)((5_FZyUJw8a{6 zOrh;^do-YJ=UgFEHdM%{U$xM@DiiVzzan51FAEq6Vz-E`^8YU$u?Shg?#?5oAO_+8 zl1DVP4W36l1B>oIqgW;I{=dm1X1D(n9`OU{hf0V=pz9`&7}aj!5hd+}M=U}nGy*TH zk>PMZKkDkOJYq32r2c=$BNo9zW8)DIw^?|^ZMVS7KjRU_cv}mPSb`e1cjFNYsZITE zJYo^5+}dc9jYnJ!u5mv<+FXphO9WaB#(~gcHiG+W>(3*mwe{x_-)*z;h|Ah6JYwu* zArtJ0eB3Wt;ALr>fZ6;B5;&lZ_{at?M<7!*Qkm+WI@44{m}I+OlDH6Q$}9m58Xt}t zL!k}hD;%^$o^q=7EDwh~buX2tc0-;DgFJOF=uvzbp->K6oGXY#W#JiJAX9l%5a?gq z98PJ?yZvZQXoCZ|y90=cjYk9oc{A5Rb9XwK7q~l5$Wx@|FUddaEp?^b-9v`Zn(>3A zHSvaj4l92e&*(#~w8mtJXD8o5o>EeI%0as-`Ccon2?ebQGFxE916Ak7vE!gy*lh@H z7^jH3=x{5Hd$cgInb2x+e7=)6e>pz*93rr8;uCE zv6O=wBOd_{Zn#yB@|F*S^h6}7k&vbQAxDK;@B%qbLER9l*bRz&9ZaU1!T{dIwOH4CBN*$WfsUg~)zop(x|297Uc^D9Xw{ zijofcVWB9Yl%f>!KXpZ5Zc45f^Ak`VwTOR+Rxj)?M-@Y}S&Zw*O8UOlTkW7NzqH2#dUn*`Gf%By ziNA@{i3sG^nut7D77c|Lpel@QK$f0x^;sx3U9W+eBLcn>rVPXyGZ4t6qYq zsMk^Oz~IsnbVN_VbnlbxfdA4~G1J|Q9L_|76m-I6Pe_3tmyQ&h)gnxXa=I9r>7*i>Lye!Mx7R+8xxe|E7X3# zCqXOF-C|$q23?68bS1I6=7bVxN)n+dvBcqhz~6^!NbY3#1DJ?!78#2p^d)oHN1rZ2 z?FT{)i=a74X9F}Lwd#2U<_v9-_+EaQDjJJ7NuR^Ex4zRS=`@I)sm6iyREWxcO~1FKGam4A<*#DC9D0W$NQWe= z=&kVbjiDH^Gh~HyP`yJTKLL6NqILKknyHOA{tmu`YUl3(F%ih@rFL$C?qTS=LgtLK zB~CsQ`n_GyMFc-*fgk4{hEaaVFs^7XzW}}8ej5xO?Sr9@2Eb4{&5K}zA>XY16o&Ru z7)mwduzLp$Lv{ZUh9beK*;zO-aLEcgrO*}FVCOuJ&r+Qow64&f5}n;Q zJB3WS!%eu6MhO}7(-ycn-pw$J4>HX051=mx?aAL`fg2IKBgqMQC$@n0Y)c{eM254% zDH~}%kwX5kHCy%pN_&2hZ2{agrz??tw+J{qhpoW>0pJ7HOT5du~9ZU~NtBBFXY zXo+_nt^(xEq%^{-X2_tpCd|%1me7gT{EXg*NtaKQm$z@D8tjb=a6H#M*IY& z5W7Ls_UoQW3ZOJ1+>i~=+eWN!Nf}w+UX)7wV0M-{0xXZfiJEKzPPUN#r*-kxCgMZQ?asx&$@m;8F|Xf8dXc7@P=Z;$)o~vDyz~75l-*mx=Id6td~S%#Q$L zla*!aoQk)#;eCmI3WfCSYclp_tl?1o-cw%~zo6PKjdcxsH5T%ZVaDIW8Xi*dj-HG* zmHP93iCo1;;k8?cJW|nEuYX@HLL3@ug2S`7klzQXYYHfWozAPVOaCWU@kdk!@1GY1 zn)41&04jrDZ*A9urw;oGm|tcj7(d$R)p!c)_T4vx%u>6Xc~O*C80OC>E$jBT%M7y= z&7+=Sjy7A?EYUkVXKZGXB_VjDWPH72Z}zK#@pDjDI8x;K4ap1%?bXp;tdD~5LL+fL z+|2`9gERaUk1p&Mirf_&VJf@i4!-zt`M+HGAD$)eV053RDB7e>&a+7;FF;pd5l8GjGCE)gpLvIcabTG zp1LnnSbrN0ESQkLXIP=*Si9GtrJ#1%2#j}lRcZoi+W@vbAq9Cs3M%AJw;^(d!k8E^ zwo3OFuw{)n6EH^J3|1#_t>wC{fHU&{I!O_-aS|RrC2_snU4^C_pl`!N9zH&ckhKZJVH1x!Q9CDM%j)Ese?P2s@h^O_}Rh`L%b;glU zkN7KlWp5-h!D|6y&Sf^QcM+6Z+KqMpgG)4`h+@(*=vHIEq=p`Km2 z)jW0zQ=U*2Cy%WZl(SXw^645>^DB?5l~fnP+mYlwDn#sj0I@omX3+dGGim;qEcRx$ zqar>NIq)3W5qHRTB+|`5_LzNqHDo}N?MP_NcJw}S$HXQku~U(Oybv)HGWN?>H%NwG zn1+D!GFW+9U+}brs8ID#UzQmr^mj_l$Tfr;TuXiXiI)5(??`4|OtnFL3$)C@7LV&^ zsK`2ZZidv?mX1EBwiWT&u#|hyH>`oBoZt+SVaebuM24lk{HYu5`d(mTyNk?0kd<3fT=kL zTGz-h*{B;&bazbfFXmMlmztOWC9=<#vW~rqSDA4`>rS>w1b&<#@MIe+aKE(1ASocwVtmk)dF?Ovq$U1IIiA9P=!6?yG=do&$zi4O`dqbK$&vAGr5S)YPFV7=r6=z56OBb#bHf(8xm5n1;D-oubzI22i7f&ppX(jdTl0i<4O z&GJEEr~+gez6*xBs}&g*UT8g}Rt)u`Vfdg&{G)6XM(hVYjSsXBBfxohONY}DXTui# z0>x07z)+dG9n|huBVGv{RmeBM${yFVrm>oG0!6UMyHQK>Q_z5%Gv_u-`(r5CQDk#r zf+lHVfTN=1(OfjeQTNFw14m7wII1imTz)SX$xY*?auJZRrvpdHfum|<;pzfB&5c^F zwo)n4f}aQv5PxNl3>vX!CivPdT!6H4DNFDZ;Q|iu_d(-vsl~0&y7wIA0fsiNPL^n% zQLTpsPK~b}*W=RoZ1Nb*bC{3yk^{Uy?uzTV(eS)V)x4a(yGrb#TR9*36Jh_X59PDkb3#8x9lcA`_NJ*{6GE@bSk*)w)h0h4i$C9m|Z#{^6O+K3++=I2KR zjL?%|PMu|#AJi5cRmvJulZvzZLeM*S+JXIC^gG#AfrM2*-x58HCGEwpGnXluF) z(V?Df3#)V8;yiwVVj+H+0E`xnihF5g1Tzs{$BptjUJ4KJJKl=>6$uJfF;9`ANL4IS zBr1}Cix$)=p(|pLDgEn(c{q}?UqSaO;8JM*r-i^gsra&sfO#g@A=@DG|Hn6flI*Hk zK=I81{vs^P7EUIih8-lH*!_=f-^Iw_eny`_@dUWm&O2D^UBLytW${+7?dPq0&}utO zyv=r40J}jv`vAog_XAI4=>A1H87ajQRiN++U5OP(jA$oudZmySeBL6`LIRZ*5UEF{ z1y|vJvvZdXuyenRY*1A)AFR9`9hPXlZI);~M?~vME!d$7*kR9GOz@w#V21?gkZxj! zFwA`am_24#DKF!$TLMgQExk${qkA|hu{Md~ zfqjtHHu3)8XiUi6?W&0a9zfr$^~&dJA4&E7WpN}QxR29Vw-lZ_2VGLfYTt7ix*^< z;ff`I-^Gdt6iXF|Au1aWzJl;2qZEM(!flLEj8zccL$W7GaZU0Fpgf&I`7*_$iZlg* z@->j8!h&NU%RB-&7jVnKWh?=#F9obW3RsV-dr(mbFGwjovxUJTO*(~kGL{3p39_k; z!x%egdi`Kl2Jc^OCHrZX_W%CW_nc{bRHbfyoxZtB?4{c< zpNvGDN$O8jiC62^C8gCq2;5Rgs?h+q7{FtEg!<|t&1~S76~v!>tr*f$0sm3wJhh*u z5V3d5s%NXknt8x7Y1I#L4^}_SJp?TC2qdL7uwUUVI~P$2Y4L3tHVgT)ZA3mAc?bE( zUAGAG(O%vSqbBl^gH=9aCPwNUSwHn&{`yrrc46WaDkm*%ld+0cKge^Do$t1au#f1x z)=I{D&~FMG25G4PIf>>1i|LGyQ7pCqSnMHSG1opUw!p1`&*<9Pk>ZwCM)1pnWm)~C zt4}S`6?!Dx&i(q&jh|DkOrLaxUkVU^Zqdvyu5brdn+D(5ook8A6n#|$oR-@TIouS6 zqmAu;*luX=wq~C#;0>_%StTvp)73&Spw9pdcimKkDp~}?jfQ*$8#?^rcdk7Pt^6Wj zyRU)mwC!K&dxDU={jxoC;$isQj`s}z_e zxV4P^1)R)vNnE{0@9~Yycqa6dKe}JD_y#U;JK2`@luH9*R+|V#ziPRc$?`Ij`{v^a33t2)b?9|5c~-Ho8EWz&V*h-Q%KR?7m|l!isngzbdhBEI`@ z7MYX57Z;NwVYPq@Pl?kyEj+;`ew!+lJM!}gi5Ud^PQzS=CJHIX(h!z{muD4y_0%N(x) zPPEK$H+YP=o`sEw;~8c-N3cU?hXcima~qv*!HVC(PDk+KG)*k<;!OE$ZZ^e>vw$Ie z*=bh&&=mPp*fo$ zTXaM%Ne|bi*QGS9laTe~VIo-XEG*LE&XfirD}e_Xpw_UcF-aYRBc0*lt;5w4O;YWW zx~&~)Zc8fCw=S&!zA*Y#qe?*FE!mH}%KGwL-QgSF)lYRum8Euw<@-5T4X?DDAmW_l z%#|->BWQM+zW9Hzv)C+!S6xUsW8yX`@T=H zdb$O{E1`fFn}X=-$mh>@Ic$I3>fIg7euve8e64hKY~srrelc&ba^UcFqggZJdh#0{ z>*rY-KYCH4i|>AZB_!^Sme*RHKreiZ%Ui$cvllgrE%u_6hJ0Ra^-Ne;@sWNJ>;=T8 zJO%cG0#qe>7xxZ_N7IY#bM(W}OHaF0PkQTa_t(Fk8BJ$~(2E7FBwzWW#*_x~-=S8U z2Wx}%Ig1+ek)4sy4s*-miq~72ke#hTYI61&R1(|6`&^H}cKCHCbv`H)Y!q!4tv=*- zc={5$;eUjD1q0~DskX5L=*DktpwrY3V%0^1ol>b0BwrKssAsc^ln{4dbI7dP7_RuZk*^_O-MMN?l4!wV>%P`OoSB3Ky z!FU%?5<*oF+XW6<3~hg$BA#ga=gd_|AVIT=IobpwT| zdotp>y~$F@#ze#a0&0w{m&^ryk$`8m>HfnZBNy;d#`xyCWM|DB$~EohAAl5Qk&$C- z;s%B7^Wk517hReg~tT9?eBsq-j;a-+YbeA?gdT1 z2Yf@krS8D?`8@G5zp3qCp?eKdUJW!z1bUsp*C})_Q5m@qJc&(4{)=Q_TO50vjO?Qc z0?Y=;1Gzv7v*Y0X?aTUDb!a2xBWe9Oe|Qs(g0}zBv(J7z0&CQx(;^`g8yfQ2^hwC8 zEIt;u$;Yw%vm0xT65oAY-+?`dGkMFVV}w z`_&@V5-MkrYCBuk|4xK{rnk`dLTJe6@=W=#9}6M-p@C~ivK7o83fdf8&vgzeALk(o?ruGn(kh)q6oQZ z&*%fJ5Hz7<@KIQM?h=Is>_Q<9+XE%O36t*&%b3S4xExr~Oziyk1xCxMydy5{L zN{E+sI1^3NQkK}s0w5vo=ewzdcr&_dKI+D7>WKpEoPt$G<}74Wk=3dYIj91F<9Brh zP+4uq(OL~*A}m@rT7{4vpX{2~CsFmb zl4ld?jQD32@M(s8Xp+e}LLuu1%2#wQexrH>zGYm+^ME&nekfq zk};js&p)r8Pl@TWf;k^PSL5 zGTnd6zD;Jvf15M{a*pp7yI%2%(`M%;(bOeo!46b$y(ID&h3MVP^WIwsEcyZ#M*$Wi z*w5juF>DL`wYKE*ztQRKwx$`>pRFS(cgZVwt~sbZr6uMTei(Uqf)2 zUzb14;hlszN^8}B2*9;pZ9iMeKtNmSVV(?*w zgoq~~KMCadc0cQk$F?9t9>pX};oDKfD-a>ltM_g==r@OT0B4)epXq!-Pcq7rm9OYq z`8J;=Z}4hQVl>2OX;{TFtose{rS${#S=LUtLf5Mn9c=-;kGxfW@bUZut6trHMNj6X zLmx{kbubf_=x7e@DYhJ*c18io%rT%=$9q4G)f6C#`E9+w+8c6EdfQi4ij|M*02YeX z0?65B3L5}94ba*KQ~g8|s&y9fW7^3W>hz=O7%mUGT1Z2~7>r?sLDR2VNOQjo+0X&Q z2j%d2duCskJzP$ z2<1uL!IciBc9}$2Xo3|%3O`tseW1e~1Fh~Th38k$!Cv%NT$cnuck9V4Kt>32Fe((I zO2_?Fdz%97?L?a8H3c@EWW{_%KHt+JgYNcpunR0TL6nLvb0ec5W7v;xO|%_vQ7ds$ zP4Xbu@|l0^)eb8HG!RMFwVd7NKWLux@hnpDR#-W%s_?d&xB0Lvu&Q}m)u zv5xQsV(JXvYI1@1$#3AJO)XA+mTFWKq}tR6w`$?25 zNv;GPTIp1F%qVbP*luvPzvgTvckbyV_P0BE+DH;ax+HKW@AIvY<}SDFNU(X@lz=Z; znHa-5RZW8qIUh9>WNcdNNbo5`jWw+<*n1eir*b>N%S@<&O7!%2{2u+0p6JGbI^gnk zd8mSA(E%q{gc6R%`o+k7i%`YjCicB^RiZdPw_%|spTE$t30ie;93JTfStp__PCdDttM!DJ7pZ&hQSQfv z5x5K__0L1f`@$k(5JS8$Vs|}dl}axM&{uC^SNAkktUB@S5ESfu19WC8Mg^pNjGxY zxMo45TwY#AGKUtFlMJMV$WFSMdlUIcOSv-qo*L=-N4WR5ZKU3Ogyv+MIS8IoCz0Lq z_l4bKJUow+>NhkiWa*%(j9YFwsEkLY98|_O zH{Q}xP?b44I zq8c*1>Ji_TO|%#x4am<9$j*f2swvU}*DJuQv3zi zK_kqrQ%&dhsH)5aP4OdHj=%Oo24u}UG{&r6zKd(r)NxzH4f0y>X&TC>y~n*RS94lO zw9jWB?N(*K4eF~hq8cvZ1*#y?x~NuB6xEN`^~*E?req;;W?Nb9yYBiWh1_Coe% z&HMDt4#{_OA8J0pn;n)Pz?0fmha;Zg6|xjO)_8|^bsz|Q<4QQ}rtH3tZ-DIH-E!-DUdbX~P*VR%atM4C!-d2Tb{}7f zE8}`%>OV@JhO-gYvzzJJN%aSl=iqFl^=uA38&tnHc_Ge5ThChZ|M_4(W(j4@?^-=6 z-E{Ry?!DQ_u$N6U?B(-5)W1PkuTFjxcgI`rUPAl4s(m~8S)7$v&!*6`o!afm>u@&F zdUh^7`@6O(xd3MuSkGGW|D8pK#8hEwvm(h{_Xj*9)=7BKqMxY;`U&_y4MP;$HO&!d zMBZ&buD%W)YA0;@vgKc@g;SuzX#WP0^7(vi`?vMeG(?~AZu?p791U67JKE2ILnQol z4f6JRMgG?Ks3w|7k;r*R_yqsRe10?fdTQ;nns{7Qh~ri3)@g_gk&WXQ))%1aYCgOV z{-{F^yh{Ndo?nq{dN~=$9H$W8lIw0Wp`UG3qWmeqcb_Ad&<*J{X0FUSN0 zk0M#jzwknqbIlg4bLR4PxsrQJvlZ)nhr9;syqd1_O0Gg)#cks@et*#K8f?22Sm7f& zE3m@%v{~S|@|G38i9c5Vi&-VJtZ<7(@k4Q3&w=`Dw?Oi2Nao!w+KUB{moA%b@ZEcur{Q`5O@d_h*vt#cL{ z0{dQvKB2e#f;JkMvxszbOSd(iqh|)OfV?1717re81}*TL)hIC)G_{ZMjjA zevt`Thi1G;zDB2^0NZxMI&2MwXR*Ctcof^;3=6US*)Rv&pA6HmJ!=TT_6%fGlDBU+ zpMB%HdN;r6#x-m=+{j0DzF+h|T0MjDj_@%gp27ca$wS03iQ?<;Lzs`qDbQGJ20!z` zcE~e_4Ik)r`mgo-^w(g+IRoq3pN4Ps*KvFj$32D`)Q|5k3;~ zPw=l}%g14#dOl7Y0G*ZszKRRZyI(qt$f~gO?!C(OxCZ~HxdCq+g=}>mG6z~TS|nOH zS_E1=S{N!F6k?x?mUa}G>9JpimX7u^+8VS~Xc=gkXlv0#xW@}^*sq>np*I}uK@QCW zGUD}}483*|bYjCoEIKjRsDQ$!;jhdf~T#k?z+37QW(Fd2_s zugihm_0M znt5BbfXM)i9M=EHcL0$r<%{z8!}PuKko5<(m_u{wUBRU>LlMVj~62{A(wCMbW-Q?zjeN&cPPth{RH%Y%-TBWZ32xJny|8Q z9;FFvKfW%~@4xvWZ4uw@hZKK#H?(f0uM@4?lsckkaW+n|uAN<-7WzPJ+Wk2o5)p#B zzqi;fh_9yVb&JN033|lr154aqej?_x^ENK%O{#Bmz#O}p=vqN_uDP`isz*Sf1AwdY z;2m)L9LM&XV^KL1oIPNU89K*~|8xe6yMy+OK6czuNdmckAuG)K<@bN>!0eFU-@SiETR_%QedOks7##hz2XW}Ryt0F=PDKZ(R2Sgi5HR5Rfq1?O z&zHBJ!&AyTegaK8r~e%}UYR536A%Y}!k#$|d&Lg4rHDbPoh*Qhn>~|`edi<#uTY46 zE?Vu&(38CbfAt5Ds|4)_)Jji5D_@L!SZIb%EjNm+W0U zkySbJsE2a6a+uOvIYc>BIYPNu8KxYM^Q(@IQI1ynD}$7Q%5loE$^d1sa=wyypDs|| zuY5r1`T6wPZHT&CR2K=~QB!CEj*({)_-%1rno#tMb|G;#Y$iRoRZ#6$W zWq$nwRYDdsq;cKKl-qHRDwis^YC%OFt;(B=^|PRAe)YG0)n4|-96LLs-B7eZG}MX{ z+c}qeVI{^Iowvf$aIXY7*8C2505@dWeBw_~GoPl7lG;lbT<2HY8{M2AFuO&ZH46iP zpIsKV-{YHzdjpKbi_(GkTn9e{jC|J2tjR|n*+p2zou->Ox$733ydc5QLbA$i%!a4= zC6SwB)mNEw&4HATE1nOmOnQx_DkC|Ku z{F~eXE7wsSS$&?|Wj4;=wqW~5u=hG`jiC1>n?zCj$$i_LDn>L%VAS#S&I?9&HTf#J z{LjV|e78LKU?Ah~g^8MJ^6Qd>Mvva18=27Q^<>5DVbhd^3yQ>wfWPs1&(9ZwqMr_q zXQwL@;3Kx{=z7V(H8NjmS!s)vDawV)RJvw9$K7^vr^+`Cj0h_St0vH327NCDeZTpo z$$0i6{=ckwG0neIhqbT<+1*J7o_CNvWWFY))=lnG4t+c%_=pzx5BE{|wa}IjDsS=l z&EroPZ=J_u##`s{pwU&ugg9XymSPqi&Lg^BNGSSK*PO~8ru5vgETlC><0Ah%2~Vzb zgY*fWw966tk7JO{XJrvRK?<>@P}%8Y*z;r55?H6^l+ESls);pUTsDLAVPlBQ6tG2n zUs*JC-Lc$k=(%Tc@z4_V$WrQw1^d3bFxCYldAaL8`fbQOepu|To3C+`r&OlYTC(i8lqdNn!(+YzvUNwxP2b&r%ivSe zlL0#Us=)$BZWTh-wKAn%02o5FX0=NNgC|cykQLPlmOMmcZRPU*)GL8+Sy;iquV;RJ1n?U9{)AeON1gPsddr%~ z@$ z?|+2cf3J>=t*MQSbJ-1ot!mo{wt5$%{ljP*T~U4i(PdpEIhbstTYr)C6UP3m|6czs zDE+p!-JtU>s!jSrj7ex3Jzz}3Fs5Q;?t`YLjoka8{^dCM8XdpM4|Mm~Ge1H0pM@5V zRu28Z<`l$uprw;IkMRP=3yt^zo?XQ-yFwkKU0R!!#UX*s_Q(U{JQgt5#H&R-XgCkn1{y!cCs5$W!DiDim)jHY=10 zpYk!azGb7}@!-K(ydj8p#4z|ncq6)giz2j~2^=rKN8!&gL7)85H1}lN#vp=~!>D(J z=cnX&H1(MXq#lGJ3YDTjQK%?VR4R%UC5lo-m7-cI}WnYCT`)Wklr`2sh{Qj8WHx#*iVniD-Zx5igB=V_I*&_J1+Y zX5_ldqR-0Z+q>c+n~r{?ug*{i$@^oe&q*v7Pf-2-R}BiR$2G89{59QTw;=j`0&Tf` zG&u6Oo`bMmTo`USN9cEhk$7sy^)xi(Q8_o)=BW+)MWo*hO#y1FB6Z_$<)yIj~=N!hR8v9SzymbzKf}Ut9dOi4DW%uT5+ilc3$t$h%dx zb<^ec(|nTq_wXMK~%;iz(P&v!&lJ`Sh228dR7=618-|YWrE$H-}1S_v3ne zaPJ=2e{9bZ83_***CT@6C?K0~K?S#PL5b=IFpH^Wi@C*=3tCi`1THAi$_24y^H6b= zR2xm^=A%+o{N){42mJ~V4awF)a&2d=vsuXn**)n?-T zmv%-|yFf0V-A3#JkdckqtcSXpzvu59)*t(jzYC}rPvJJ=#p7PK&+iW}`)iJWkp54b zh_8^3H4^k%X7u#A@D3v0LLRyT`0q6Hj))7m+_FdP_=C+mi0OV`=EN4$K1--)(5-$! zxyX}h(MH+&z0%jOB*8;hO#A%-vv;{glHmBm%{hNl=E%OCxQJcKx|6CpTLenrdXtR3 zbYUCSJXuz5E3$c#)pSvtO(8Pt7KjYj+=sOkDUaf!C`3+2z7b#K^8|ezw?#Ywal*vc zBm%x(laYla3igOBXxt}YCckN$U|mViSThX}o5W4H+{FI}Hi@3B+ienG)n^TW%u9H? z&Xx}^`V&i&!TX{I_B##JE}o?Ne@lKKi{3tmkEFVnt1a1xbUNsli_t`5H2H1DizJqb zaya8}dT=wL&spOm$hsuImS42g7PkF={o?KV{v6)U`i#7aqtNwVgasj^fm~(bE^Q;} zq&fPowiH&RQ4cIzkLy}(GxIYlNjLMQ*9x$%imv&qbFhLuusvnU#TnVPENtJn1|GGG z2~KadLFO^j@6-|yjTJWu(HF!L@Iy-uFz$rg_%iSe(BynXvkBbb|2WjHht}0}TyMje z+e{zn;kglZLI=n&!sT0DFI3pX3v(dq>ETQno^WFB&f#1LAx3GPu%e+IQGM$V?F175z zO9Iy<^!sN^xmRe*%FKfjS7V;TXGVLM$1%F_mVnIrCC<%+Uw1M(+mDw%A$?nl^(T%# zGLj{I72v5mU6{#dbn)Q%a!`S6T&AGF%}(k*N+ zav^%XWPEL*@dU0ea)I|hNB;k-U5Z|^_4UlBn5kN?ybr|eXGRy7#l9<~WL!g9Uh&-v zJKy?7GAAF3ueH3aFNd}%3HReWwy(h5t)xdp10o|1L(ZOR7gYbQw}W;)hnICRM@0Bu zHlEuZdbp#*h|T9cs)sv}E9l+x!U#kMz{m`)>O74Or~sAN0lmVmOkY&^3xt+S6&%I} z!>bU8QrIe$>n!g zH3A{ht$`SFAs>FZ(knU-dQ6Cx@~C_%D+T45Z$<6OBg}q0*5X zJaYH&@BAqY^3{BC+1`FQ*FB#JbdV>qBjll)k>x(+;WhILdwd$ecBxq?_1(TEGtPJJbG~-WrFPj50_M^mvJ)gnxmO`( zgUKRK%|W#{A}w5%-OAbdW!;Zh>e-EX&_WC0O>VF9uN_ZQJqELoh%VJtksbmL;-1nK$Plcb7Nwk6m@E zc4E0^_+C%7MJ)@Ac7&rASn~1g=*!2Gq8p;}s*X<}(fuMsX8KFr6hszKBY#7(4jD2d zMAPjn=bA&;2%srQE2UL+UR$`1~gm?=EZxL|Hi;U4Ogp862c16UZbDdL#7dI4R z9fGmCRMJh32j~iDdj|A45T3fZ@I!WFi(8)poaOT!@F}QiC7R8s4y~RE$l-%K zoLFY^o)-J6-Qt7i`sd=DqdUkp>m#fyDW%hq6+}$d#YzR!g^}dT-DntroOKRm5%^Z; z4Rie++3pr!4f%dGgpcZRZZwd)aVCd-{MtBO+Lp!!K=zKHsR#SXvjz2s{i|u9rA-)9Z#Q z8ScJ1H>Io6Md_vVRC*{yN_W^5qM`Ahri_GDAR0P?nRq+4BlZ31s`JfyYsAz@WX29~ zovMX5LA!uG-yEZ!30y|bS(pj_*#&BEOzZz7G;DiEo^$-NTE` zOxUy*CU`3S2Azh%$L@Y=joppEEp|5>^**Eeo_QI0F5>#|bM!Wp&Vz+`AAN&ufG^PX z`D^-cvrsoxn?rNMMF2DV)CQCX){e7c!X(Sdcu4iMTGKu_8@bOY> z6vWGo(<-M{=VlUoJOlXHL7@=8{{2qBsoEKJ1RJKcJJCq6O2L977cxbWh%E*EdG#C< zdLQdpGqpO+?ATQ8E9AgaGIO8vvzvVSCqeA#H^{pK7st|fm4F(=v^e#}ge}rJ)EtJi z6xEo^uK*rc#0C?)oMEaqN*$~T#8Z8s1&_u)0+f&VL<-pe#Q)|Y{y$2ejyK3fo`I%I zH{x>Gu{vSRaWm88P$Rfc81B)KU0bva--|J(KIMmo1bM2>bIW*R5D$Usr5Bc%i^+_~3s`mW>@mZuo_+m@U_&Y{{ z9l_GfggCypVYUXGZbx_P%zqeHwXIeXNFpEN+Mzb{(3~n)QapVUP-~8;1v?j_2d$QQ;yz- zQ$i?CX;OS*#VKmZWZ;w-;FL7rlwdc4Q=%wNajW&FI0aVDU@7z?FR(KyR!OB;WdyK_ z3l~#299V?llpz$SxC5&UrPyS;a+Wemc^`1jOk_BkMKMdX606FX{(g+g06ho1`T;8{ z#4nTQMghAt7zlQmt&RhBA!opQ8fK_x1G}WQ&+dctQec;0w;Rbbfn7+x;VA9^0lVZN-_kU?KiE0=Sg5~bBo@lMDn8-XNT_W7>y8y#p)Z{REaO`{vSD0#P;HI>G*8q+y zu4i52!6foO!84)lmT0Tz8}F-(vf`OoE1rqJd_cC-FG?FrF-?HsPM9YC%I^P%ws(Px z>bm-d4;KNE7?K#HXv{=KF)AwJ4KV^U3=k0r;4PXA!_2@;gc&+BAS6}>>?KW_L_x({ z6W_E?AHf7PP20518%>+$)-=VmPn)*6HI3XsVoir8dD8E{);?$E3=C2G{yvl8oU`wH z?Y-A+uf4WM;F^_$YfccZfiG>wHCEu7+CQ9-aZPO}wO}%O>{8(x!oC9ABm&zkf_`Ni ztRVM#cMPq3ItA97%iHFFpF7?s@XZMD4u_X&D&xf;{=QPc2x^4fg{inUVzi5PZ zrV*ZtYjBdO5Zn>uVBoY1IKMQH}$De!{ZdG#@W4M)!;VOaQ z%-scyr=&Z+HuenaI5_Sn?diZ&CH-f*(&fClWAg?*zq;OgPkS2jX7q3BB2Np1pH@TG z7x-!A)03eiY=oVMSWQ!4ztM;rO;dpDKJMS^4ZqmvO}ZFAN*jREF2ZJMK$^R{>jUtB zjX3@KSAHHH_*3`WDh&D}_I-**!WZ2=OFI%i^GNqPV9vSFh5e*|<P zaQ41;!lb%fREoll(J1?##uW$U^PeBv~hPn>4JZ~6v!O((CSx$rAW9z{VF zdOWgy+J4AP;KSfSAa!HcI&Jue>}M;B@lefx?s(=^>C_v&*Y5uh`T(2<(6PL_ z7yJL|$1|?7F7l0kZTFph16;u$yVK>mqH*>h--ivQc(nu}4Zp}*?cE(4z^mL3&2yD3 zDVs5?z<;xKekFWm?ch@fSdF+3oG_QE1J>F5N#m?zEg|kKfZm?&EVwq$5YnYQc*<&O zxDgb1N8n8b5s<_95se@u)iL7Q-L(Ro%=SqBbW66 zO`ebWZZ6USd>8M29Y9!TBg_9wfd6N=-GTeR7RuOP)AZN7G`Q7fF}Tb{yl-?nd^yo6 zy1DRAy#Tns^|P!~DUe15pSh;Fb3>+tS>*U5$jgSPzu7<2VRmuC|koV`2ykC&@ zq?P6UILQ00p1(rg|C#3*pS=H5Powunmi3<)T~_t7m8!Xg7cCghnS zyk&Ln7c<3g+y1Tg^nkhYJGO0qpzF^now2Y3pt>5p(`2uQ83$kM*1@kq4&N5jh#iJg zT?v51QQU79vNWP|T}jBw@Z>zeEu&)#aN#AEiHkZeVMo8wJFmB&pD$k|ngd%;j@5M# zPu=J}45|79mF&E>OGwz+%`vLbC%S|NnzYf0K2HkD%^AA(MtG+cH)muXycg$d_jF0~ zZp01G6!Tnf79jO=_*6LUEg8wi-+M>)dcWw`!G^c7j=cBatM^j32D*9b_3>U^`vrI) z5nqRJ+n%r#Y=n+mco_+Y6=-9fuKn%qk?uxs#6a8_sxox|k^}q8FNR+(+%*BOTYJ54 z^b)1;^ora+>NfvAOv-&KOuA>H%pdQ@z330(p3A#&UreLKJn~a`5|4bm7ll7z_J_Z^g-SJ>2w8Z=NT3_!E3wyTz``6EcmUvT(Hi6km7aI6H?~LUllNji`Sr=v+WfwT=w;xMNWw0$9_l_QZP5R^ ze`0i7?BX*`QZ{78M(^-Nll7JU=;-a!P0#|jMMtp~_yg`W>3~H!53|m9qSWYp=i(6S zdw~xJq_Z;}etqwe)1e(Z^CswH;5GQ$Ju%XLPyuQ9oTnVOy&MKc{e`D&Lz<7LCXD(% z@Z91~X*Sk7?ta61Z;frbe*X>nNV4X?J)$Xme|YPJrkJKlP0>wJP4H0BbeDc@-H*J| z$OIXi-qc*UVOr~y1K}rcZkpQkkFBIbod#^`?k60e>!Gz6-J|*0bRTs~fIfS#w{ai@ z-d<`ujn+BVZ?e383$W?Ij1}+D8vACLsU(EG)cS5yKaKl3CP|CmISW2D>gM;P`OoXO zk3{a9V}-@86xJUZP4fE8Km0Xh@gEX5e6sbSj1}mg9sOH&KIG_4@I84Gd{3_Je64>% z%)9WroYo&BJ=fh9r_tX6to`!ES6P3xhvo1F{f(WGL$OCkhF*Q(jX~*ws82$o!dIQl zd<8Ofw!Q(k+7Un7pdWzL`37X|x6YIHtI_)uOXk4G2z&YbPlsN6ziHz1SUcxE8Cw!TXbodtsyW;!&|5laU+FYl_yBqPZfQnZuTgrw{4BixG`(i}-YL z&5ccxW?GwcyQaxXUhAT_ADG>-12#1GvJH*xK-x1=xZ&Ww0|wkOaX)mgxhKd*hCJH^ zdbQhm;P#d|@S;WDLvA_nTlmH&FVKsAJPTf+Z-vKHD|>;S32U1f>;<|6`m-tVUxVeV zjk&t}!PQm33dRg8n4QqorJcI%Kn34xVLH$VYeLT%x=}$I{sH*950lB$FVXbxfv*Fu z0({bj?abF1!PhlYHQWyQQiJ;^NWLR4a5t6AfOSU$yzpGTF6_Gv!jE0S zC|y{Cw|3}0Zd2OD?Zxhjb@<4p`2f16IQi z>MB@QJ_}!u_nq46_Mtm%h)lQ3`Z8KN=Nu&u+aj+bSA z;;1Yc-`rt1a-WYws$>pHy433tl+(~0d1M3Z6b^@^eR6%n8`g*>u@^Fxd83HK_Z^$C zEv!F0%4((6EPe6^=8i6So~AzYZph!k9eo46OdEcmY#QYo5vAc_=8gW?{uF#E-jDh5 zfbLoEGuXXffBDV!C+?csx|1zAp7nm8!+zN=%{D&$=*aQg%T67>>vnww>>A2o*YI!e zHjaO`eXn;ny!>r%CHfpYlzTF&wb9!)boa@rc*emCb#B{zt#`v4t9gWTEreo6Tr#h?vOvzO&Ht3>&*q$y4*V#b-x3 zCCYaLIIT?Fcz?cqyNc6#)*FW&51;>a*HlQOZ}k*(MdA5IkE5&8cMF(uTCc&oL*7Z> zaqy*X;(6~oS$V1fXEARA7lr)`_yY-98rk{Uh*Y3(T{m|u?1rrmOYHMIK0&$$Z^b|# zb6wH>#C7fUCUERDnd^GD7x|_4yC=aLLqzjXm$>U=cb8)E7>!c_A>GvTse{JDk9aSS zOvQWbU=;qw4Bm&oHx6ybI6VJ3bQBl=(M@w!8tm=v>D~@r=Q5~5@!%)jeX@V4M(pee zIsKjU4PN^g5C4tuK(ZIN1Q69Zd7&SD85;N%Wa)v|!=%Q;unPRQ@G0WV4wo+dui%T0 z<4JftCLOkuxPS8FFL0;cgOZer-)x+_2*B^ZGq&^|K>zH!TOgOKfj8@PSz3BuiCe@uN-S@nUrEp&SjdHIiH@BYc(CSU&} zxLJsNl;X^t=Qj9@A`ekc$m=ubm*{8VKB3tM9?;c*YU~6i--Wx0?6_U%(UIKejPlUM&MjlBRmy*&Nvx6wF29Bu2iJ>rx@Eu!)IaI@S;Y7U?pSL z0|WQ-bKk(MtH%1~#<^J4bAa`gJHBoznZ;QDME}*_RhHQp{I0SF@1Fj0Zj*k%eB5%C zSl@ijQa|=EAoZJNtRD-h|3MYj58T;Z-bi@g+~H!|e%c|gT>%R+4?&kK{96!WU13%6 za}AL9Xoq~o)6$$(Z3+6PO4HjuDiu5Avr9f0NsGT-Z!*m>J)rwL>>W%cb4vei{JwFH z(PT80KI@(OdHJawpaE0|?Z<3y{$M0_g*5wt)_a@oYtrCGT0sds>-KuT0UFTY-P})D zFOKyOvCu#COrcwKp?SGTx45mp9MNGtQtqP#wK6SeX%Mu)+6+Gm9-I}C?>&qHfA`Kv z%xOUphL|F3hqPRN%xWc?Aoj=W8)$!gAG8g-lcSEF2cIVmKibd$KZG>P9%>L}lIIN|Px`5}bF zCI`Yi6*VV~1M|;7TgHd@y`SBG%BX^PNplaZ5*}zRZ8E9gT>*H%2_JY3-Y1yKl(KGO zYx~DJ0^0B5j!lAge*4ZN_m6^h+NI0}!qfMktOcI_BW?y3(B4LtT{w?B7aE2bY2rQs z^RG8Z@rk(EYOd%0V_Qciq=ofQh$;akCcA`7Y?m;x;iU}<)FT1)7kO=mHU!Sx8yVt} zps|&pu`l0Vb?WDU!!FR+DyFgj4cUJEW$!cK6LBY=xpf0*?0tO&Cntfnj`VFhIUP?c zPA+vXyT5J2NvWy4wV*AudDE#7aF%aE&TW8?+v&jYt8kY8e=huhDQ%;7s|<(VJY!*v zhFP(U!_7O%aSK|LL1&lkU*5c&>1@*eJ3wa_$aHo-{G!o~8H+({7r{PG&{;F+?0mp2 zZD>Ad$osfUQ_*KAc5#Bv&c-Z?#hFObeF=>Q(b;M3<^Vcd#u|;7uIbR%#|^ezn1tul z!Jk|RMrTjTr`atsoh|3_`)9uex()1-4QXdi&>BX#}dPus5p%JNjH~~*jst{VefVS zdp}^kv~N28&g-3ozq5PG@i(E*e6rp9*1sFQ75)F{HiMG?i{!iBe|BG#DS3m}gR`ro zasLY_`NRFA&qFqPk3!cT%bJg$e@v7-y7$-JG_sd^UII^XoLe!_^NViWYJD~aXCY2D zg-KoilIeR!xHRAu^nD+mMBghaaVJ+wi1cMzxb)ONCFw5wJfGq$r5k4@U6QnQouAgn zKz@2{Y9ws2&ay4mxw4Q?x8fe%DES`U2JGjNv|ipQ?$Lb=R?sKr!A@rR8JvndJnz}9 zjX2>Hxeqr|yn-9OCQI|b(8YY3y88vt@Kr7C_^Z9NeED&lQ+WB(FT$WBp0}o%LLTFg zu$wg4cU*vzQxE7QSSN&FzcCbd0)}BvF&rMN7Pc&ES=_R^C9!2m%hHy$Ez4SxT9&so z;AW$JKa=JzW6Jn^Pa;2`>siRp)8L!@b^@!I{*zDDkrm>;jK}dlyh5$PZGcA+v4a=g z7WK@HO;fNhDyUNw@Ezb2zku%vo&tNVXjpJ59>%Z3t)geVul#%Asaeg6-r-hIr{gQ` zY`qP73yrLIcnFwsHE2=;yjat0d<$~slhBvf!lIq%Qa0|blM)VfE;_Q%hbL2+Dy3a| zpYi0vlc`wmFUpqbV{Nw5aOQeG>u?taP^BiOO8*B`DRu?v9;D$XfgP8kw`s#iM`6dv z#-CwD6?d#=h#dfwUP~4bdkpV?P}#kOL8hJjl@L2Q_p2bb_}_`A=J=$#*?^ce`Oen4 zfLOc?F*hJaRK>$5K^x$!SkUbcMv8eaq0x5&;EVPpM-qKdTPM@4 zPVX}3bAqK`XZu?+?TM4mN!Z$xjw~AmrL;?NJhzseT!*J!wGN~{P{;+&4v(6|4R^CW{p zQi&swa|sU}l1qG!W8+!QZ2*Ten*LsrZP{rj;T!T^LsD~>ic(I-&i0=!Nb@VZa8D9o zWhXt@sYt}%LqiKeK`Z{%0RR6K_pQNUPSXf@(mj>QxKYF7ZRBz4lcnAd&U?I< zFU}gZOF-K3M&haGN*4yuFZZDE$NaZ$k2m*XK|;86<0Im%;156t{{oxx65MA;cI9uc zg>L}pSIIW-8+a0(JdqMEJ%_vHci`t<6D}RPC`lRkJv#`$LM#0=lKcjk>-Ot^eR|g1 zdfhG0M1Gp#y6%(9lkOzmz4iLve5Omq`Mh-pQlFM^hsT~`l3&x1XZ3;{{X+enkY3?! z^K|GTQ2*X=VEPAu`!zYcb$=K+d|e~%kqHOIjaugMJ~tTqN!0b>`cUACy(PaYkro-i zOMC-X6R8@E8tw$WGddMs`zB!Qwhqu4^d}k?T8lwmZYz@h^RchKVF4q_7S4B!sZ^y$qx`t{5mq=7p~2WOB0&S0T_ z@qt9#eX``h(gVwI6H3y7silOndG7uRw+I%iSj?UN=^Aa(4CQ# zg!6rE6Q8-M>E^a6GUcDhl>edm3-nWgoo|M9F42C)a%eIvxzBiC|9sJ@+kBLN7Ue%!P8$Rfhg9s@|`tvw@ANonmMB?+){?QHUKTl6~U5n%$T#}4dHIhWC z2fx<260Kn^WmcmHx4{uL%V<=4IzHWaIy3Kl8pG^YXxl@SgEiwX;OoH3K$yjTCCsez zz6xebK3{Tbu8*S40n8Fs>RRIgvw1$4Jp`DY05$;ihpl)6?xK+r7M&-%(gxf`u!Ns4 z_Afe2Yia%Ex7ww-*tO_?z3YTE_Dq;{yY+C^hgOgG!@mF1wfe9UvgWV{o}`qS>GA$# z=u8*ZO#GUIwOu$f$DA1C1-{?#_0FY7miR`3c#3_npP$`GI?g30x1(LV2Niws*wL8n z8;vEZ(Ky0Ie#_u zD*KohYk`I@$b&QBn}fKK&Aq9b2%Ozn z(Y~_TXPw{d1>&RQSdE?~T! z)+60cClI{n3720z-fr-u99_$J`|3_L7&=!#14Z6oX_ZaxTa9+k9TaOWs!y!B@V-Tm zZ`WVG`I!dzEV$tUjq9S5T5yk5y&d498oYO~2Egv)qFyB~3U&^9j7!HezeXC5_xYZI zZuB{R?ZFo20t#6+Z$ufyO+EJvaZ}$0H}w(oTA^H`VlP4E6t-KzBi+NP&Fy3_;en62 z{_YgtT3?H`{!!1N?m&F-PER$bjg#wJ%_V4=kMiI~lGP=@kR2;CyT-}lVJTw+1b1%3_<(Y^urKaPD?F8Lfv{xiIv6Zf>?g23iWo8P}6LMAdl z^$={Kzb5Y}9ETQS=>=)tr}#Y%t38~uLk}0hM%x2>a&b;?G9<8MNR+^GJxSoOKJHzD zzaRB3I{Y2)^Ir^u8`|7I+V4|?x2(SregdVr$sdR7apN(@E3P*aHj^Ij%e^$-i{#S( z2fWwH{-vXMuLiFk`CmZ(e{>CiYr9VWH$dXKo(A0T7Oqd>R=(5sCxo^2JlFjrhRD92 z4o>+yv}iZWDG&Ahu=@!PJJfwW>@gDb>(Gur_B;;V#uv|pNj-a^Dd?A^k2K-ZV|{{m zduMsL^dWpc67TjcJc)PvK~lK%d=JhE;x~0!xU{lclHTn?{ETqv0sMw>fB9+rXiwuI z{a!vBI9Y$=kFurR%EoS#b^hbxy^=jVk;!z*4yC;kO(jZ+|I=B3lX$qFc4#(E z;!#|L{x>*}_h#e`7~K@Tce)2Vxh9-JR7&sc#AOFK|CoYN|6=HMqqh-hU32 zSijvpD6NG(NaVym*n_+c59K+C=@~NNOy$Ih!XBh|V$}Ie9qAM6ItVI122@-q=k?HZ z{qu3aWkVOcLm*>$yq^Ba8zugy>@ShdY3?-g!}T}TIn6v7^64z-oTe=M41T!aUGc=^ zPLKE9-mO1_o;#`*8WPXHCt#)+eVimTIIl|ce`3AvlOIg}LU;Y8Hy`{fMl$A8{q8L- zlVItd8b$i2>FvMm)#Ggab9K`W-HFpOJIJnmUR=v{pPqj3xrX-<sAn+||!+-Rw1IOaQeY)43l;-0usEb?Lfw3-v1AKLQ z+q5QXe~gR#$H#oSaW}PcdkggoZJ)DNAJ;-Be9+S7usN|U+nR&7^PZNpmh_g4mMtyX z7F|nb%hr}1+~RxDdV)l5%cCRpUn72-zUa9f{RorvZIAXuh3+^-+Bl6qtZ4yya$;Tt z?$3^FHeer(&J`^<6$&q6vG5W$<+KN}Q|$X2LfA?#_F7Z(<23 zwRL4Q?xv-^pagvFSgGEZ@ObGC)#T0@*bBO~jrM}(2kZqU&l`Vd;C)-gxZp z=DcHt&D)Mi&H0dc?uI^e>#>trB)9MAU3ctGK1=w!2Ap@)0EY82K9eqm#(!86)*pI< zrDTp#cPV_r_l=k{@wc8TF@3*OgPnq>O8#CNJEzQ8RvKS2$8>w?-;B?AFMI)Q2jFvV zP{3d+U{LRUesBXI@G|e0HNe78*kYPH(vPJ1_~H9xe)uHwb!jK>J5>)!UD#BT_6RnW z6JdS3Psk=`d|X|cimO}2TwNB;4(wwkoK~zTG8*b}qp8rg1=|o zxZ!~4@WPX)>HcYY0f1GPJa(}%G3$=QYOyrM0Gxwqs;OsPGi6fPKBrK zMWCk}pS-U1dn5J6b#0^<-KsBYq4}Kmg{Lt3^z z8QN0R5!SM)BfN#?GtC+?pDQ~5UuW~4&&6!sIv`ef`#>6Vs{4UyPYyf^?Og*npV55X zbI`VdkHW~+Uw(}_$$Ibo{bYIT@ka4!5A5}DtkAZ3pj}uE%Ude#CtTVcoe@L(?sp^g=4~D>BEA*Tl>v8tB9yk0=&==s$NxioeZ_gqZFiij7x%7A76(sEu?27}N)?ZHO zK<-CA;C-lv!qS_!qE&qyGo#}zl|}E@nOA%*OxN7qMe{{?p^n8Kkrf)ndhcKQMemmm zq+u=pWk9U#1o#-N_rBTh@y>+pG5PPThmRX!mD_c`9=nKZM{#-{V9f@ck(J`NKu=f? zUHjXzJuhaeoHMFZl>Qm~5Y~H-qx6A6#14(VSqBsYWt~7-Z(R6$H?+0=A9eo{ejq;J zl0E(Z#n0XSe?r`2{ZDk0fAFJ6=-(&n2mTQzJyDA@adRZ;{?;(*@&}UCb}~#V#PhlT z6867k(D_B-n|Q;&{2)Xs#QBg!{2qEw@QKnq==k1+jt{?mMc5m|@0Ho$5%D8_1YXxq z$G{%g(V1!WSkq=a+yCM96@P{&NC~{%F7S4{z}x8(;hp8|jGu$Id*5>iyxmWy|8&}m zg13uIyYz-agpM=Ex^ZY{@cj=?f&ePx28KIw}e^Wo>Op-OO#A&?;DWaA?hC9FD6F8z&|JaOq z!;ukDD}Ols(Ts@xUq;;68LeLq>&qEu=(g*-t=meXlA@L$M0z83?keNrha)FMt*8P2 z=cTaz)r&Ee#p7kZznKt*FlfXH~lxAkU{k9a=^h< zSK>?kF=1hc{~CM8G-=^$@Sr*Cy{A8kn)qLzL`_Q2m+C*%Kcx>pG@%8aFd}p_BKjxB zg!fO1+&nU|Bq}9pb;w%X$fS~YN5a!~mkgc{f1+4F;mNS0k4$?czn}5h z`7_Z;jU-5hBZevg&oW9dm|>r3NG**&UF1EyeX_X6f*<+Ytno0mmbkvk|_-@fSrrx z+`I6BK>jhpI_q&SIL>W$PHCG7FL}2%O*~3)lICpH-v-FcY91dpGyZiG-R_RFm(>4? z-i5HJTnI~P8U^an8~rgpduey~(YV!m+97>-;JdJDuE2?8l30!pdH|chb-ptawKQ}g z-S(+i^=v;2tDdk)&9Ljy9M|j49?(x}dUr%RO})`p-gH?CO&f0Y{2BVw9?uVXe=8h) zA%0#O{?fA*EgE52AGUtr?9zrq8|Y+4z4whl+(L0~!qME3P-)W9x1seHC6dkPV{9{e z-qYkOuY${aj?3$m%NqinO&f0WP2tv2vPAzPXK8E^Y;`cAuuW4PhFY%-Xd^SAXpPSn>54U~^FZ0Jg^mw~^ z()5X~F|&UHs&(oxZl zU!0ek8oWt8(orel$DPgk9HiI->MdgbhE`5H_om#MwcUI5Yr3akX2J_u`_^t+Cucyd zZ^WpzbmD8$9jGe?U*E%5$fP-Fg@4@YFCTCBTc{lDBB)3`ptgN!sPVIl9^B8RIl0IC z&x@Zh9TZYCd`Q@ZE&gHLH&1l#g<0WJ`1OLm5xoQ*%eqegUA}Mk=PXetG=+RR>1+Gs z`*uO^p|h2Se+&FIA^6;nkRwK5+ z7n`4x*tUdY$xF^_?@61!`lNSZA`$exdr-p zZL{uQbZlw!;$zF26OSb|FFCfnISHPHAn`XZJ#{*Z__#xT^N!7jB({J#*W30<^O|Bc z`))c(-dq|(rX0Mv30__%gvv0w8X`@^U?77k1(Vb}hH zk#m@1Kd{Dk&*DA&e(>sOi}RD~dhdmCzYr*Rp5$@p{ZbND|O; zba>x}F5siibj-2`e6y@BXJy+eo@Hd?@%#5O5bN^AI&)UHt>IXb|6j%&yc2Wq3(d0D z%Dc3fgLm>Ad;#}f{RDGxUw`D2Gy@kxFOWQGExgRFe)7KiR~}1kS@UE{^QvR(T2dfM zrFN`sPCOOTvaBPtMa;#5-i5~&VJ_Z>xp>PnpyjZ%BJV=C>;n|$hmm)odE)&1v=%x& zKfP%-ybHy(kOj_6@+;p==jWgHzWYT4=FA>=#7I201m&&m#F_lbw>%T2nX@myZ)oyd zlqcat{#zlT^KlX%cSwg!p4Otf6cIUttQF^r6Z+NKsO6RxQF6x@kzDe_gG;lQ zLFZVjV;rDQ?Ox2Ye*gbY&}Z_je-qw}&hp!9J(2ou$Vz+QS451}%Y$O9&JBvOdI7el zd%Pu(I!?oniC8CZz_S3YlMCnfU{`1AsP#|}kMhu=4@&X>h6Wot^1<`C&4%ho9efk( zArbOMPg$7M*%Btr{2lyjU=8dBzRlG`roivvw+ z-WKZ*tu>G!=G~Td>2Z&Sw8Oj4;pZ5x(l3cyf5WTbSxT{|JEfroGDKv5WK1Ny9+dNW z!0V&ZF8#0PdZfDVOqxDye`r%kGcZGc*pbkqA;%*02Ovj8^he%uKwp36_ao9icw*o8 zdz@f=6~EuZ)7Pu`{T^DESMiHoMbbqdq>HGttMs?x-q_{(2haRzL^}5?{Qd;)=~wvu zX(S5IC_H2Ej5!y(Dt7tf8GVpAVk3VyWB%~A$CPQWRQB;>(QPVap&JJnV`ra6G;w1+s~0deze_lSElyu z{Jzi`zv-V4y(x2QXCdSPu5%ySy$^Nb8IETZo>6$l;ECSKb-n~G`Go#Sp`|4=Zjct< z^j;foy^mc7yso*U1llRh{rDTDX~W+b4aN19#I6fjJ9PdL-6NULb$WWv#hybOVwdj4 z-QrI}BAFN(-Pyzv5hRjW)Z~d>PX7D;$#O~Pjin_dl}yC*AY_sU&zv8TPN1D9&X0uS z8IETZo>6$l;2Cowc3CWBl|INSl*eD+H#0`yPw%x25mqTWbnbgb$n4bK+EN#F3l2rHSUgGo%wiI@4f9I2$RqH(7Cx(_{WmaLAs~&q@ zRj~J5%Q*U6|7w1|0NVh+rxw6wD%qWbU%m*RX9?Y%YmP{BRv*SJ-xnR1jMHT|-mtQH zf95K9s_kOCx78<;!HY~AdXUe36`zfSMDV#)Se!ostBH5MNfax*d4%qg01V#zCcVw| z0K-NB-zCs#6>y!N{J!WJTcM+ajvU|9JXR~Dily(3eCRo1jq0C-dlIIcdnhBka}TVQ zh(dP5?mN0Yrfq8L6hJFZe>2O91g~ZK=%!#0n|QRt+x*!imWPhRx@M2}<37LN*S|EJ z2lrpq0c#0(+QUxsq}3C(qnD2b6R6(GwXOcxar36OlKS+=`Pid%RQn)*eemi*_)Dnf6S$W-cg9`($nr zD9mFaqD6Ff7PW~e4Yg_}N{NFPn2HZ?ZMq%pqn3$w?LoUD#`^DzLAzcO?RwhRt|{ZT z>$Tp`;W;LW>C^LoWV?Sb`Kr2Ywhs~L>YU+BKb8sB=a89ww# z2b|RLBWpOM&58!{e0(HiJ9$11jcgjW-ULZAOdq-LR;zN3@P{lDu50*U!{1P3-?;_Hx^P%%z3L@A1x+%ZrixzKHjG!WWY!n`GWMFzeU1 zKLDMm)ITwH&AvE{Ry}+qrDOL#v=cBYr~r-Eqtua3DgNE=XrF9+U;A~ChKXW_Lyp_h z9&;pQ0a`?g9&beN%r3=?u%-)C z9$X9Lzn*5yvwCqNKAPDxHByNZj|zMob)&IbKv4j{Oq~-8b3e2_&ex* z*LTMp2?u@58h02HdNT=ebzOIIjpfS|0;F z@{gsnQU6zx@{&w7YxEYQyV_|<(q&4^lJlx7tLoA#DjX)G+u@}8D9y5@jYhXGG*dE) z1Qv=nRaI@+XffJx+(I(dWanq;EN+X*y|KE&Z7VVvD=1Oz=HweGE&bCP?RJM-V{)Qe zi^fz_X>@DMHYc)F)UDN2yDS(<+d2j$mh!6olp54c0;9ul2ul_+?rAp zfz}vHxgHz-QKAIf0?FkP??`X58tvu5G9;9hEubIe$+4RpPHKk6T~}q%ILb6GOGTL` zL8)fGwx-B!bZ>N+Efw=M4yVTEc4^EOr>(~5w$)e`P}^L)T`9gqi7sOeIv*HT;h@eC zT#Qaljj^KIqA5dJMrV0-rN!>n%#U}?*VtT|3I|&3)VR@dP4ZQwCy?4vgwtk1JJ3X9 z9iF>wDZ7j|_XZ0-uGl}lt-_Lr-i_0_tIA)5NnTwFWVqdt7El!);;vA=lE4M>iljm?&8Y?o4brzSl!iWi3X8V6CuW0ke z;H|l0f3!8aVy9(1a9mAa3MoJ4X3&X$74>I094_}b*(Fzn%|v)2KZTPeRynKf^nDfO zW@UUu!>%HKdS0GkV{Sq4mf6fH>e-a&vbe9JO-7f=XtrEw#H*Z+svO_~TVNICr~)|7 zQC5cjm!_b`O)g7*mD^Tnd%!|?u*PC?;T8Filp~B4I(=~wg-sYX~dQDZYtD|lNY3@ zEJepnlT@!#Sy|ccvND@31z5SjVY6enr7}T+F{bE;X=k?4Y2IaYT8gTxsvJ%vKv^lJ z4kxHhjvXke%25H@c{S@}!}!ZSRm5rHtL~Ii3HQsmn*&fbhY>xH15(o~${oOn)=CkO zs>0R2JVn)|W?M~?PRWyDH0>;RI;!nv#K;5wRdE~hvwUT3FjiC=?PwW#tmILz#(*$^ zt9^NWL|BBR2F1j3+=e_0U=3b^Ac1}Z%lgX66tAfaMmfE}g#3}3srsP2jK1V_GG_9X zC1V=jw=({6K%hNCj-1%0gDc z^hTF^y!wl5l~om%j0#6-K(5rW;f4aE(^%<}>(6r9%;{#(VT(Cf(WwP>Nc@#aq*Sqf zxWH6D9OU&b*OQ?S{MHi>kzsUM%z@!Wm`4^zhBCN-1AP}|$swuQ(Rf=fpIKT3ZOLIG z?`H6DJZVz(Mw6w`Vsez*frdq3D&Z9ojX4E8p}gl*RcIadnhh4G-BKavm)R9LiIkdc zG&=nRL~fa!wjjSKr#Qd#eqvR{ltuY+-)P>;3sw%x(yGJ6n&ND!8y&=P%A<7^_*Pll zw{a)VY8Y6U=7j zxfAm8_<2QK9Wai;JCRvi1*lrg8h16OJagvS8b;Ddy5&h_fowYZnqadn$gx4DVTmQD z60EPL%m`cxLY>N@O@c(y#Jc#Ut7|oqw0aYMu*+Gtaua?<(({1~ z3CuX9&|NmO+bS1owb;t7Zt=n4m`_TDkdGiCabM9}Jj(>YU6!wDEESeY4RIqRvc#LC zsqPIqc|iZ-o#3;G{vaHsNSo|C?T%e`&PUP?OIidQy0Q3~=b1Tl-I6ev*Zs3398?RMHqtCmMbZP7k zjjP&Z4M-}L*_wPHYjOsFhAD@`ye z^CM`Pb_|?LRU-O?GDn4uTjF?a+^Q zGf7pTzdTjwI0cSZ-m;+OsY0*1Jfi=kGq5@Ag+{ZjTD^({T)!g4m|A3RA_yEds9G$_ zRfUWxM+Rq*avUa8MYRj9)k5_(rp$3-$CN*QJ9FH@W*CiGu$XZR4oWX8jkP&|b|8p( z?v9NazvQuVVq<|C5Y$$6$oM4%o3FYtWvN51tt{=BveY5hR#s*8@}=rtia8V*I;KQr z8U>k0mDNeA<$~c7;}8&fjpYSxUy>^Hn#xj?>8CxV3tzYC8JYvpVz| z%TrG3ca@S)YH@7K6U6dmZzFts`6B!Ur}C75i|{uVX#Hy@@@LcR1wlCl;L$G zr22F>P)vYBH3{{NkgqFjM1a-8u`0b^*CUIys7y0{zTh%8lzOcGfgVkM3#?BO@r96W zcNpx(N(<|c2p(hSpvhr(I~^54$+g&zd*@AS1`|=eju2fwRxM5cf)5K-4JD_JUm>e}~&~bfVk>Wq7&*F4C zoK%kia>JOq#)Z!CCU63hA>+%(SyF#k}px$yL7Q+}BEHLzn#ReHCd3FVXS&*13 zRxoz`s30F?ZjTpxrHaPZqlS3!wfa>lu2f%eT*HewQG>Y`bug82J2b!x zH5MmZf>c43K@m`$I+W!v^nYK0n0z`qY&ENObpCdrkMn&<*n-_aCC*$RQkU zFfcp~F1NGV7LCaVLnaqU0&CO#})sNa-jyp1KF%FCx$weXSs+tv|c869z#(dg-TsAhMQP>Q}mnkf?@KEo8Go zx_#Ja0r`+L>sq9N>RZe%jl*71$Fz-gxn_b#N{V5lfwaepme^o)8%nVeOm<1*#bY94 zCtS5ngsq>=n98C(s&G=b=R3=3KOvohO|5VmKke9klCMZ>bDFAQ4;R>y@zbYcU;}B=*tFGEPTJB^B^#RtYB~#M z{3!k0*mPyp6%}gD0`}>!OGtAg-@h+uv)kM{nB7&|VR=V(h#a3!mK!vcjTJVtqW`Tc z%+FEb9EC>{Gl3T_zXm}=W7}4x?xK`0{$ch@u+glt(tb{_(Svr9#G<4%j%+T6w4gf8&Muw1lZ^G2?Z9=q}5K0A14nm0`u_woLt_*_OX zQGN5eWlmcQ6A`ydG64fRjo7UM&r6BT;M=thOIev~SwI|R^v5NMGO1e#9}A;0n`#;a ziFb|O8VQmHs@y7|zCac)NIK}Ov2m>*j-| z#dLIMSlqiT7CRYts8xMI(+5WvL12J(XoD{4;AuxK=OA?(Yp)&#&|3;Lbrmo$)?R&| zkoKzkWURenpJ?`CM>BA$sPRb|h_Po6;gGMnakLY~-%9}6!lXt&LH_4B5x&dG+_ z0aG!^SdpY{aM{W$1F$bf+W>SRK`+ctvQ(j-=aIY43#RwwunXWdn#vsIRlN;S4^_I&PudI1H+Lvp%Y*aMGJ ztOqF*ro&^lJA;e~=1~<{=hy0NWw1|C*(HoEvp}T}3I>w`l|Cj|Jdb~XG&r`7D)3Yl zwg+f$Fd${H^6WWk;x)D`%q-w2HHHk5hI)R5G!>Q_n7ychGqxNUGnNM=2?A?VooN(q zchyuH$cBaH`%bp!5}#&EIjp5&=KT$NdWdoP%D5cA_(>(hB9mp#32MQ8iH9s^|nd_*lBJ9jCM>w5>5k53Q-^-K*>%!dNmdb z=Kg_?NHJB|s&Z{?0xm)7l7Q-9mqHdqu#LtPGu12>!ZyCm_=g8S>5T+1N3~O;<03v8 z2NI^GQZaBH%T+7qK&X81M`Vremm^uW6TJ3lJ0#S>JP*K0M?Js~+3?%V%pcx&n-L3J z8UutuY8SX|D|kt^rCLB{QS_raT^0-XVRtUxcjpT@pr6{!8m9$&$rwjX-lkl{`ufjo zl2FNQJ+d zt(1_80iF*APz7Jbf^jTxQMO5(#=sbgGKejvI!i$m=&33v8-&qO4$IIW>@Us$1*Fwu zsDmYw%1D^RKTI2S*^0$u&^Dq?+_*B>w^3f2SNI2bm@%S|FVQBfJZ3Uw*>3q*qd-3= zjlEguXK1e-vLM?B{>SPG%w>!Y+N8`=kb-ge?79f80ElG7}>m2&@{E|F{%%$lQ%(|ChYYK=7vlpgz)? zWSoMzk5eIZ1ckoo$eLKAcoH750g+&?(z!Rp=X3iitys^Na;V;2(7sT-O`eOW)Mu$m zC1gkOOp-v}%vi^F zL0l2SF~9Ct603Cyh(e#-N)&h`>`HCMqW6byRi)XcdcTL>(O#wvn41>TefYuvNeqS4 z<}#Scuvf&_pbvsC;8{)cCf|&S?w>m-kLM22eQdsAMuSI$#*~V#aKP*qQ?I&8JaO)o z$T8_Np?5A|Tp{zXIOu41+sf*ECsv775p5F)LWYe>SAsP~eXk@7FqCPvmAN6P@%z_Y z)woVgLPv;Yr6=PsJPBpdH)aynx4D*cGt{|ns73B0bqdTS)hEm;@oiXcR+cc$2`o+; zb;1+nQWdIzojV^iCYVruWHeTyW`3gPd>1ky+ySMpv8G&xF~-gz_(d?aIBLveK3-nln`Z8yJ&<7-{8|v!?0OFDgybeMv#?^F&DPb;13*? z#SXyKX=3={$?&2wX~JL*;6I*5BpWkq@nqPddQ1@b?5uVp9cDAq(`dL!Q;VZhG%l2e z0EhL!d8y6Rdb>bwm^Zs-X>L8%6P14OmbE4g`^)vIv-vI9{nD zaVDKI(lG`nwpwti-puXt)hqUjtfX$?-KGkQy&Q)4IIl3ed4c)RUg&}u#*n2Mws#Ht5fUtSNdo70Gy}6y%m?6Utx8Qzc$@JX0|(_`TH4G7wpvp16HKUZQh*U| zr~Xg?9z1vW6LXrv34GN4Y`{nU<_CpAx?~(}t}=s+VV+i*xzBhqkAkNXk7e$b`BshD zVG)yo2OMyF(994EY*wsQXDgdY_S}Fu?abD0WvHJb;@v+pMu;-IMrj?DGwggL;0O_G9Z<# z=fHlcMqVmDm}1J(@g9brZ*LTJC>y^T7i}vjd$YXSu*~HC0z9PJ!9j8I13>_sdUu%Qsnc7+nAzImYHLUvC!oqCY;7w!1*a=8~}DY z>U?FU_*zVd{&O%^`R2edouitaj^vjHhT{mZ;15;7%*&AIOYI*Wo^7chrb8W$9m67K z?8k;vQ%0v%nld_^n*!Y9+vm5?UiR9*^-cjLlr9iPXr-~sc?JPAq3mllJP%Q$q z0YR7-yHSMy{JZE4rwy0`%7B0lqy!u#Rp2-#Tr>o@gO&*PW98%XcqjWhs7DkV9kR|K z1dJe@!Ca()tz#u0(>aduQkUy$P+{~0&UdqE-Ls1?)6v%~@mYdm64yu#NRPVHQZI$Q=`Y~$03wjTn zER$nND4ww=%yOVhf{!22iA0+V47uB8C&~h&r+M>m;DsXyfCyN(j+iPOtfi;~ISypp zMKQ}XyLT(;>8ve>sSDwR3j>lYMlNI}A_l{eT|Acm!5GveT6q{S2f;{?bVC0?`)LnB zt)m4_A{!&fcEs->A#`^fChG~9UMV~f(Jo(V&`Jw7+6Y|&?tns0kzr$cu|b=kSDa9} z_)dZlca}Z^K!Wc;a3C|~EZ$a->CdTvR<8v~KtG7tOF@mcDwidx0GoqSydd_1n^;Fy zL!(&=oJ~d=#gquF5{}#@VA8|W6IrS`G**Zu61K=3AceFnuu||^H{@ z&;}GN^BXlODVRf=1R`$Rc?B(AcCY64b!>RWv>40+M+OGp!ADn#2U$RElN)TL^-128 zKy=waYIK~kcClJz12dyWkaLC&){Ng@L8jvCB#~Mzjt!FymMpoEJgWE?aQWpi6MD44 zB7B4dQSk6QvZa`v#$8C|D@p>i{POm0GtITqQ3#6!ivSN@SES$MKO!hr#ymh*DLPS< zN;m-_&^nTyZKH#Cjp&P1kXNh+Ql!r}#d!6Rhb>~ z?^x5^g7SHELMI=G^QT2QpdePqE}n}_v2C`}6ihwherWBIFk=JTV`EP;!ne$3a!2Nm z120K92x=KQaizmq1*ynUUY7|8kh(#v5$mA9tyQ{f2`3Q#h8r7J>LD-vL-rJtqpEHr z42d$>iw*S$eNbe41&L4?vim-jZxWwK4P&WgEW>%ZQGlbvVFvLA#|1eI9e@tSL0jRZ z2N6${DcV#aHWaDd(C0(j0Gdej3q;YEhBgA&gone>a=-{y*G3Rf^tQT^C?D|=DbS)p zgrcGuj_}q~l@6~y&`1V^6+q)kE+xRPbDwGC0FA=CbvO%OS+%@&Il--v;_$qip429y z(+Ru@AbeX;n=?1UEtUnRATWM{Hc}fA0W&X`MN7Wxcr2;>z@S1_pb)rIc*6m^u|5LN zW6m^9)VBbAD{126Bq$qE(CT7R+uWIg8c;ew*=8p@ z7*WjCtbNix!Kcdj>k~^~PUFX<00DJ5kR_AxY!)pHpdo4x_7qvZHh|y?R3-cyj?RiX z4G%y%Mv;`_DsYzKM9rFesA{_51SAI8A9W>sy6xq3w ze@S331?((%C4sLnB}#pXl%|XJ(KalVDO>c>(hoU80T>B-SPZ^|RF~@80DTSPAgn(MRK#^2a|In#+27OSh8{}h#D{-TW!)9Gk#-S4*-A|`Z6nUNeSvC%-?r(9(@U( z$XAAH`#O<#swIF-?X@gXNI$Si$1ENd0j?j~?W=*0*o_C_!}1RbARi^G)zww0wD&SfQ5h16{LXY23{vB zfUPPYU1P#V^Gmp)OO&}FDsOBcXe^hfBvu<1d#6?_b)e6_J&Gz|x-ohN1#ko>Y8_E%c^-gA%C+sZRM@OwPf!Wb0Q%=k zl8JiBXD;v`blvoj?WOotX}?k+-zrOsnqsfGc7plb8*h$aH!LMs`a~l%l68>Xh2;SA z^jI@mC~H|SN7Dl8G)0GyQ2~Q1f&*>qA~ATlY)5sul}u)#k&yRz0YgMlyoeg8&@Hl`5Cvw{)3QnMCh+E`@Eza=a(ueHq@Df}g5JB&DxL0)8S9Kar51 zNX$=U;3qQSpIy#pm-E@HMLs)D^ULv^5AmE2@tn^J+ddj!TT1SxVk9I!9$>(yv8uvo zf*~%&Lq;-Mq3pFgN^LHw1|`*?6dMNL=;V0xwm_RsV*&lAvZq1Q#zx_483YjKv~n02 zCMsX#SLa2dqP#+$n<<6R7eO1(v|l7H+Dvu95{NLS(~5DRDGG*SvtUip4_b88ks>lT zKr5+l6vn+PazN|CVN|uT)Ww_Dl!m`yVOWVX9WX8s)se}e2tX6fP$Z2`R|NgGlW-rS zoD_e0!D$HlILtZhFHwEuR}f6K&n{fli}piff_K{Yp&`LnKz_77g{L+&;ST3U=IhO+ zBGBnz@j{2$ha~vYvQ;0@A4ITR-Y66OMIprWV?C789sul5g(V~A5H`m-4X*?E>}2uE zX{->Kn)?j?+UDLxr&En~Vv-2%z91<3L>=ltnNJN!Sj&NoS5OTtKfyDUlB3XCxjLSU zfCBh0la^+hQ0g(xp=n2BBZLA|7ES;dEAl~rU>w6TJ3~;;EW~&}H;rr<2q@G6IVzlp z1D<;9-sp|6RxHKTqJEH5KhYm*JfMcTGdcs@K;~{hBB0Zn9M}X<9(vMu0q`z;(-UVl zslBLnbO}Xzs)IJCM(|u;vFtaA>jtvHz6cK`zG|HZ=eDGrYC(9QlWTy znw`tdWZgzjvc}+VB;;>;y5)F0Ii9}daI)HvWk`}FNu-q9%l5G(nE{?Om#GhYZWQam zMSI|ocbCb}B$}T}yr74A06N=I>3}JW1xi>^?x-}yj(`*>gJ3Ia;j;$mrIZojV5>R2 zHk&PDVNWAJ8IOo4C<%3zGB6O7CrA+CC0gy`=v^im=|HY^L3guR>`x({(Sb++_qwpy z;dWCtCErGyUCGLE^61fNKA=hL=<<4@g(>q=SWTENR1kCz0yY>U-#g$>NwLqD=djbx z9KLxv0rGB3Hb^PN6?wx6WiZ`HG1cI}lh|{n9+}17wr@gVpGuxjGN$(l@_zfKt(ZrO zt3y2tb1p? z20{VXzpS>FXtP}PN$a+hHI-UAn^Nk4e`BqDOj61R`pr)P!%llhm}|Vz2{Dxy7{P@) zkJ$s(9%cY;i%_#H*U=b3R)q~FwoYLRL{%thVB2Cd`@0~@t*sL92R4$bT-Ty^*b(7!E*bRH)so53pmD<3tp%JEyVA%6u9(0tboY2}q<{G|bKB z^*kTTpulH1gJ@C{$}Nn@FeqSw1+-P513Emi;_|heFW&CQi09{6i|Kl-B72(6p>46@MiI@VKEWiJrhlzIw<<0M&t9^g4EoZICz+%Uf-*n3hXJ7zj9x zZpu!UUU0V5<(5$Zmj}6r+miwVUcjZA^~cm11kjBjJRi^wb9#n3U4G(vV&nlNk;G&z zvKp%xr4pW}a~wn_m3Pqxp0u2}m`v*gkBQDekfXFzozUzF32Dc@(qJSc@~&Tz)C+4QO-2B|Yd$F)#Z6@%MGA zj~b0Ct1AeRzz7ryXiQUqH-UB#!7GTE4CW`%ClTznkqk$Am=rr)57 z?@QR5`Xi^`gf)r2fvA~_tkq>@&_~iNp>HlneupRr8+~j*QeMGRK;>R7?xvt`YzC0? zU5-cLz;Xx>O=O*Tv4aT{HJ&6V=%WN&i6Ex19yFch6WZq{B!L~2D$!u38^S(qt8AyX zjbV&5jtXdnxCDCBR+rKxFK9oNfhKT#fgN&-FRTm;)fWy-=Q6T=7&0iqv|TIKgA3S= z`%6v4yP`v(Pk6x+eU*nJK$o)#=ge?^fev?J;&BR;F8leXu_1u@4m&F_qpcH}wctMS zCrw^z1NR9jut3XMe3SO@0qDLFpZ7|A&h+-;?2SzLrhf+_0_ng$a)&*hWWKJTUjT&?wi*BNp)re|fsZX`FqklLaO z%ffsr(x&HTjtbwDr_D~!%gWSIcy>-vv6wQVO#Ut^NG~kPvONpyZ=U{O|sJ zRGA28U?Lx$zI^nqKD{p=4-WF->8p?4m3;KB)JN}1 zeR-K%X&mx5<>?Ihg;`v;I!sqo%wYwEnVUzaQ->+()Om9=^~KbFby#6eRyIl;Q>Kzu zU8Xwi7F-zs6ZrH^#kkty9x+D>igWX|u!=9r(eYdp-}!llOkEaIh&njUX6%t=Zl$)! zX*Ly8CPQJSmKu|#HDqLg3gD@ip9O^?h{EvB!StR38+^l-94t3~UVqre^dizzAzkL? z^jxYhBX?7wAurz`7KCUMg;88q`lh0yob)_;-~jPTjcbjFH}AkB-%!=qA>AI^$@~P+NHeHQ)w5!%k?PX zO23sj<(u0?fhgYBC)GFAhu?~oD}3<*-y)7{R@#rW3>D>@+s_3ESO$Fi^P@!qj&gqS zt+ZdsFQ*qRl;KWcD@L_X`4(}3?OPG7eUw4nKKa|9-`_rwe^mPd(+9L~>o(|#MA=Eo zJD#FZ2&Z=%F1i0n+q6zCi6crL^*6uE-Jy8#&AEN~vx_$S@+$A>M-s$JU%2v4H|aSu zvV3jfZ!v#Gc@$qbI-ci*KOXsg?Wgo&z4+siKQNxxia%b|Z`hnC@TsZ|fnlN@fnj2P z`opN7)!+eScq{MJ|Lj~K1UX!J2Yh^er+1~DfR8Vn-hJWKHU4nCE8!^57f$a=I1_yB z=3>}>GCl=f$^?RA946j#bMi9N8J~!6?WT+zEw@vArx!BPLc6!57Zw=wIk~xp{7uEV zIhlnre&g_*JRt3&Ow?0AeJsjOFYw`O-?z3fzevD9#AoK_<`fVI!S#tS?WUsQ{Ebw` z#{2?9enFz?i(aBkEo{BE8FG{Si$K(soxdq7+fY=T zo~KJMBtaQzMHr?^1_ZcV09sz(B7Ebf+~S-7en*7+R+lIvJ-yJN%a`jd00?skTl(Xe zVG>dX*GcL1U;;5TSy`DF-29BYXcka={Mg zZ2j#jP5qmCs(PkpR(`g+>Zz{MRMk^ke|ox?&3dCOMi>!b4=~aGc{jQ{ccaQbpc581 zhI4C1FRil}?2CiVjrB}xT9~<6`o!{S!x&OHIcj|H(i`sT~L$|4Ez(|`*@CR=vwvk15EyrZ6nDar;IcnpTYrLI*qL7(4t8j2Pj7y2 zB*SO^T!yVh_pii$b^#H?(#95SowvLE!QOe<@R$4Jh@Tc_EMXVcdP8*t537&(mLFC* zm$qPFqTS|T-4wNOif&lcA_*8Wtnr~9n!{#UPeTxq!`2ty z9V#zxN6%aNY;W))2Yd;?{bW@mQF6 zsS<|Wz*_L)79T@LIAC^R;pgdB$LaoH;MX^7IcVV`+#AQrP{D7{zwx(3c{1LBxA3>; zH-fgYvtw!5F)@^22Rm~MD-Z52Fao!>cM{XmjwR!D^>b;X@A(eoNjq%F3l6;0})HZdP(|bFc!A7IJ>>GrM;x>S!wt+Mhip;SClP> zt?#TELduoH7B_8=#L~z_SlggVYs^l=?_sNRJKNhEl2E4g(Zjc{317P=Jn-z$>EvJG zsKO$B6_&~{w>mFnxo-M}Zg?s~H(X?57|6U0y&M)E6`BYSWE0`@+gqz|FUFp=z%y9J z^07taavJ{GRS&iDi?3Q(iS>PU6YiKzIggDExl5SV%lR^ViPe44>0=c>`(UN!w*!~K zBh6iaW2PKAjfXF@LC&)3HqOUF>t}~N?|fw&rgAVj+2+=GwWn}iCd0z`XbM+wnCK?w zbzXU?!?nTQn$0vK;}~doYd^v0E9{6WMDsYuKfA3}fjS(b@i(V`@F)9_cD zy}G=eev<&Nty;pB+S^Via~}_Lmd}B!vVRV2)f5K1j>r%iBlpVMyHNYZ;SX;v-q)sm zHTI|}d~^LilXVS25XUhwhjQF|YppOyXQ3O+O;+A*tuKXtF4mhYi~0Q1&qp~jUmRyx zd0V3R2wUR391dm?;Uxc-@3#kCYWD}*D{J>zS66&p2>;++=ED|ZcHw~s9r|_GsWg$(3#Ku)C&Pv`E_Vntx*L~-YH`o8J&>clQW(dC>)RSIMHriW z&fu8Ju@gzKgvUhih!j3pxxcmnP0?mGg)whwV+1C%YXhdl{M(w9MIKfBmPe+vJDY4V z7l-fK0>v6Jx3K{?wXnD}i|h>?;a|~+qRGQA!I2G*Xg&I%igYWA=VV(9%&A-R2V|OoR|6Gu2a3tf3M?z zz2iqRUcrB%_D>2Yy_~)P!TxJub6R8NoD>*3yu=<^z?g-k9@h~h=JHGmKj#rX&9bYV zDnoALYMF3^Pn9D_oXy;{zS}0N-c(f^r`S+cVa29tGF-bK+Bk%LNw{ZU#<29bGdS2-qxrDW_DS0KmhZ=UGUYzqz%`2Z_7wN0>!qYRw5l*A zoaLC?!&QvRcxs5;zr~RoyBns)?u1!mdzk$u)r5dk)}hB>_>({MYw9r9397iRU09Nzo?Jm zOPqqeYZEQt&EJmoRe-B_^B3_up@tImV2;-L%U|Y?zs#SLKq-ppC(lbfRh`0L(WVqu z(xwOlk=I3j^#SN9Fj>7U{N83UH0m(2ZD}I8RO2W7vrgDO|2C{GQx01fVei1LKj5i=PT0;vR757i z9wZ(n<9qn}-3fdTcP7(5+<7aCM*iBxviUx!4`^9#+@i!aDVjuxiX( zSf~CGR@L9aI`#Lky8aPX)jx%m_4lxPPDWTY<_Vh^^A@J+7X7WT?wI#5s@s9PjG~F z(hee%Jg!LVVQHQ4w1inE9XQKmGE8YrENKcAm4b$qNpY@xQ7UF6cT(~nE* z-E?(yZu^VYT&U8}3odvt$H$9Ya zqvN79x~F4`cXs)v@IDtz&Lir;ZZ?1zuzPCK&l+jL$)P9rstZyJVRqy~I#_FQ$-KkAbtY8?a zRBc(xu$9^S8#=x(!t97cx&%eoU=ziOB!eeBgtXvAh{sq|R)SCA>mhzj;RslzXvMV` zsk<>&f%PyHauksL3YT94T{!Kqx;|7b!t%jv5ta{qi?DpKTZH99;36y^=z3Uv7+r_? zA$b+XLALwrL+LuKOc!u%x_l__Y0|;FA8yxSC7yr-kBNAbqCh_S(Cfy-&kx^dZi@e# zL-@iQ(*G@Poiz!yE(Y!BEY#Fi!5g{>Mfir4&nClH=hU$%=q$QE&*Iu9#}cg{)m{#l zD?dtnt=|<&Tfd*1w*GYvtrrS-*CJ`-{T!h5OZmO^4@`ft@NY{XQTZh(tir5)?U1#F zbBz)6qq)DOYletmQ8aX1DGdE3O-@aXjI$Hxr?*jtPTYHIt5fkkoFyWaF{SZ4W=vYf ziP}Gfub~Knns^naW<U%oo9mk8b$9m+}0XX zu`LyHOJ&@wV5@Q^1zfqODQjI5o|LkxG@6&8wHvEJ1Ac9gp6bV<{zQ`V$F zmdY9mUQ4w7+r#xa9pEnNLLHNjFSx3JjWuSUrhkKiab!9N?8n8FR zB^|PxaJh=1PwJcDbZ1iPQ8(vl%^Ng=*IPLb9}H*bI7Qsyw3Ev^YUmWcu=s!rlc=S3 z!c8A60u$_jLw^pv6KG?K$gfGO7<^JtzFWBOg?o8hx<*{1kwyS;__sE;liG{<6Q8p@ zyT*vP@5;I&X}~XU+?Oh}loFx!d3j%DEiwhdI)3%;^20&1vL*sCM03r|8!)?T(YxG4${MYt3oxXGcSk=|juN)ctaDV;3dXB4XpEqn@3)x9F2y?X>R;=&~&3$P#hJUcOv0?W?l0O}M>r6isTsPU`aE|WGXoTsWzy>UE zH{1_z{c;(?P9MvR9O&TIJGwnHKm6gQ_1o9*M>&`~3=Fxa~5byF*!p zesX_%u`v9m0^@F`pI;jcSgAdwbbd+P;qrCD#+3SlXplwaNA%_Xm36UK?&jFo(x5OK zU%L}$?$G`>chcNX>00)x+1T8MEqHghG|RGOy6s6bhBr$hZuM>MYtWReZtdkjaHaUN z^My?1+t}g+L+TL&ZtgLT<=0ho42wjFhrK=-OFyTJC zh^@zy1N^MJrKK_CjcC6o(GBCyWEXB>@xJTXcVO>Bs+rv>kBoO+cS}03wvX~Krd5t- zSe0%r6G&s6phyq-U~YN3!7}$`lAF=!{IAFDpdPRlaXV!21t>^Lhm!TmeRx7CN)WcS z@x#NsljNRIygkqfC#z-Sh0`r5QyPz*x48?YbMt3L5j$Q|N)H8ebBD6I)ZMsrx3K^t zv=NITMx<@6Q$6M8K9RWb235i(Y2ZWuCsw}I*)5nLr*${Mp|r*V$W|P>`#d)qC`&I6q}|bmr^{|Rs)*yZE2T>@Z5punS{?n(QdL~B zn+Eruy*!cgl|2LToalwHI6xmFO%ZNtnkn3be-G!`V=_zCmskj0qGaiLv>s@#$@@@+of7bW6F| zr^jrjq)nGd)Es_MauUD41EO2;;1;>)u41pSP_A?_ql~i`R<4P& zmtU@li&t1OPKHE&38tH08Q0CPjO*rC#!cjBFSx)%IE`(~QNI3Cl)du`Oa~r`Vf*q+ zCoXP(mEM(Ku}#yRDThNo~MouaQlK}UoAo@jHOB!6cnBoBDuLoLJZ>*cUp+# zP8_4$T8RAXNV1z1MsBz#^xl2bDZdse`*MLl<-awzWMQFq+Ax$TsC1yWR%YRzbT6R} z(+wAF&Q)+pL`it(Qheo?LM~mA_8w7wLtI(%M4^rQ0Zw^NAiW zt}?-H>5(0KccFZkA-hwg;I%mF4kg>Vf<0;`9-RO-l(X%wubA}kD<>^|$uEZQzWKRZ zD^1<*9q)H@cz(X9$zg@+cMc1TG>^25q0O9PB2rR?t1#Oq`GPhj+;*Z>`2EctsGrfM zDjaoh-J{UsMU&yKM_+|c!$FwtpjL1pM5w~NZX7-=Eoq-JEsnREbYH8E8*FpY%vr4} zO&e}iyDlzqX|g@VqpzibFy4;Ep158;zA)Pt1K!3v9?kLGd28aQwz)P5@#dRc``bOa zm|}&Ew@k^DN36MKv((_!Lw*C@P5Al|F9%z3;F@VWm5nD6^(>^!rQyfg7>^Ej?1>3Z zb|M-*Sv}o2!w9m}U>~+3vUO%;kuwRV;CT zPLBc?rYsBFb&JW&o!QWtY5*@4jQm@hcTopRrFyi6^CBjv!jSlzUNw8EdkGs{JQO_U zt=X?x8?^CDN4&py-UUz}WE6TU^NyyyB}sG7 zKQTi<>W?BUtQFZCnLOAz|0Efo5uB0|G}W+7iWdM20bYNzlQ%<0174+!cLRCHl2>AR zNpc_T5nf@;q?pf#^zc_#xb&G6XY^41HNN0o^f-Y8U1GBrgRXhmyEzKne4)@UrA z58I;Fzp=dNLZTCOo`7D+3dw^ulyOD%AIHVzXfPO?+N$`sug8P^ACJ+R z1)h66TrN|1>X$t|IEslYVYvr#J&!s+9vvK9+{5Pj<1^V5urHBYoP%xixAodRzW16f z!Z!J%s&78wvF5#9-jvoGxzBqsop+!wkDBJ_4f(wNW5aEN9RFY3y&hm0~7W-X$}Nz7Zr>U+U56pqx- z>JHw>)^pYBhj0LU$jkJy-}nqSy&!Ns3V$4C4886Y)CNkCPO$#8_zkey-SCmGqe| zX_`G4pN~(+M_Lnj9$&BdT78dB+>n=*iTcZUt-U0#c)c3_G#+x74FVo@284|E<_c3j zyzFaM?m^M;`tf7yGXbmk0uEybypMY+6McevzMUt;aBojH>;&S2gN--2YCL$Gnqk3$ z7m33*v)c;w`tgqNqtUs$85JX|2{ViFb>OM34Kgm3i;>ANp0X;2B~+WTX5+)!$Dw69 zis!HQNAt(KnCUxUUQ7YUCZ8-Htzrc2*v8JWT&<}=(He5D*0XF+5mU%!xW>kg?A~?( zec@;>%%Uo9xd$|32x|i8{`m=Kf2h!2)n=ao!ng*nQ0e3 zeful`qf^NGo!i(8eyoBTJvlmkTsFR7nYH8d4}Qebg}Z-B(UiLACymwPk>25tuV=4u zMxGJISiFyAQ&xIj#-9>a75RKT)>48UnekXfrz7R z4OeR82}BOl?gL8ybgYIi4`$$tHl9FlYg}Q#Jl3;=FtGzoH4zBJKud<&yj{)IukAQ2 z2e$V)b`sn|9&pKB4foQ}d3N{B;`yE11duB$%F*6P)KdBD>jg?nR>Vg%`N{x$zb$^E zx>e~)yXh%A53`jD^Retzl-Kie_hmT4rBYn%y4`PmGu^im_<+-VZgR4;cc2mDcL!$il>&R39uvQz0mDk#bMtKhF)zDUFHErGcos{omeBqcLI&Z zp7tnWeX)uYW?Njw;MQNw4(9Q*zL}?B61X*3955T_GG4buA%bH}mSUYJn1)wUOXXQw zC01R+m|pt!u`G3Qbv}}TrK&z*ZddGxw%F(6XPUJ~(k`9t5O5ZH6K2sa8&A+U++n~!Y8oI-RUR72VG6mR~1&);W%jIr$qs{!=mcg*SScXLxbjz2D*H%{ZY+;-J^!dcu z$O^`R&y)FR&9N%1({0!~!*=1`WfYWZpBypGPanHD_d>)65K0fH%z?tn>Dkp~)grk+ zt;R}r=k}$<5HCO$nK$AHr{`*$gQ;V3OZ8V{Sy`MBv$+KuAyZp zcRGu^E^q2`am+H#7ZJWzGP#Tf4X#eQGfZW>Ivw>bNwpUG9QiY8Qwo&Vqt1+Xx}DAz zU!aa3b`hwJ(ix_oclM8E6Gd!+wmq21J$eF@hrQW7yaz@lU(2q$318v~QW>y!dS&}2 z4D`xXcGI2P5tzrKL+9{BK|}6KZkeLu&f8EEo}~f=!B&0V9z8QzpvKOgq|HMPYxp-t zxU1b5E2Ay{77u*qSTN2PGt;Z%2rvGJqh~JN^>%NMH1fGOJnRY89eHA?eo8^P_w$i# zv__7s;v9FB$qsy6Y>oJCzDBNs)p+q;UC0KltT6A>4qn_K05|1l2L&`p=cu6L@3%(> zFe>@zK!sQ{IrIM7QEb)INkZzM7$E8&X+!91N)Y7f^_$Y(Cl{u(A=pZxqm#xaVy;}Y z-xR$H!!t$sV|w^-(C84va>Mf-!&c8dGvn#RCBLl2Vy+UV`&k?2X1ohO#&(|y zqD{BL$qZHvc4ifBTRDeF$>_+6F{S41wu{aIVW{?PBlz=nQ_=_XvqzvEFWXk0+;u zRm*TXesp_)N5nxeJSCi7a>Ra*eg^~n!)VCyKK?MAH{*HKwrDN#_1jAJ1O(Q)$y^6Au*;GQU)4l zPS`dMQs5EH3+o!k98CRrjC=D>_K-h39${G5?RJL0%J1s*2mz*SgZf-eYr|yS+Nub* z+GOIelyIk$<>~5VSf-+8YM6s;ogaxyJYA2Ss8)4bIXyV`|IiV+pw`i@Zum6@W;f@w zyp^Z5@VdoWEyzD!2}tPN!B%k+Y{l|MigKx)DtSY}g)2^NxLm72HQd5`c2Q6-==$D= zl8AHEw>LUIj$uMOW3zsi!#8%K#;0qihJ9*NpBbN`(}6i)ghwY5R)`@cztcy^{`9p* zxM(h(oGhSnXev7&9i0x29-r)5xeggODJC+60Mua>wV^I~*?&yy2%D7|_Zg%O<{y{I z7TNA!r2Z=ip5^N}?rQ;;se-_n{B)?WP$_LkO(?r@;39U*VSBaBZEhu=_T)+#%J7So_IAK(&tTJ&ZC_NFO?g*7Gm*{-RvHhg@bRB0r88&ql3%) z(gmPhi(%I-jrj|`1lEjpA&7pR2V3wX85;awcvTXa5~?ifBRE`tR1Ap%fEIidB@iNs=EqUCk!pD{57oQA*EsdA?nAJ zwCfPS4_t6Cl1HcFPd9AyvZ}qA(HZy4*<793I@#m{xUBulJJ(53H-$|(Cv9OEg>e=$ z5#~kTP3V+yUf(6WPKq!%_s+)`7xN-&v6j4*AvJy>ieGGOzoB%O#=UA}TLqowT(Z!f z+So|s>@7=>k~_QPI6nTCq{c~orksUvX3_t^2>$sqDv7F>76mxHM;X`_cWZ&83&iOV z`dsY{(;lvXFx5hRhaTEMBu7I1M=P=dZgdi+MeQgN{?V=tU7zbIl60r1aW0eEkR8Bl zh5If=(%F*j;$tz>_$7Gtt_Zi*fz-SP=;dMAE|s+UAEa|rUw_}4;7oFLLXOb?;AJHU zfjQD=d?vf;f8adzpM{_*^0j<&uDl~8@sW@Bx6EC{g={He!&k(IEav}%Qb$fj-btmr z$~*}>Gz>~9%H$crP{N^Tl7uQ_pL#BHZAJ(R*^`wyX3SF&+H}SY$a<)^NW*uGsSCWLkp?YEt30`j0rZ;Ry56*5np<9amtEh)KMQbQSb+@AyjE z#2S1E^MMv59>PxMA#ZtDEEYX8G)JT77nTy-#X?>19wW=)F(}rsTFnX zhD})>Q@X@F<4SaYTk!< zADPCh>Yw@-0Nz-+RxYGMre?!Q*KVWVaGV90ggqwl*7zfhBsr1CX{!^BaXsd;9@Ty@ zI-v?nz&`t{sQ%D)ajNyu_l?#9;kNx4F=-cx(>8<@c3ABu6<~;HIJqabk_xQ%sS2^P z!gdF$px9`t4N@yq1-_{+X+?L+H8q*&P8BiKZf5uBw!g+(omSqa= z(ZMdK_0PvW4X%TGeD(NH($CQ2UM~Dea>0GT!!di;{WBAeuz!)9Y0nekvZX`k9Oe)& zg$QK}rs+x}kOAuMw)G>awVU!=XIPm1%I7XW^EK@qOkq<`zV$(btoi1>x^@OIAb z@yYHfM3$AR|H1w3mmDa<7%nbdj?EvY>l!m3UGm)sB!hJEQ7<09V1zVkn9V zvZ2XdE;t%kqY*j||c)rL106GrFkz-?C^qsu6B)2!~E z9q;b3>!fVi?HKm=b=iT;oyr^S$7kX_g^rFdMawFpUm@2yk0?B(&_Ra`#o;b|h60&O zl^fpi$O>qY0CH;(4#I`M20eN*KEC4cL-WLv4t*2ptbrjv**$-3Cs9M~Y@pIt=hnj3 zZ2EE1r;_DnYu0uTrCl+8bac^w!dB|ZU6xMRmR%Q-B+{ifa8#YsH2iT}V`OP-&}PDw zd)~+6H_b)6V1Vac>Eyffy)+_IBjYZPWyLhm48y?aK5uXkwdQEB2y@yVu_MFVK!0qy+ zHZS~9R2tUQRS01K+FAUROry`H=TWvrxIaca4A!@-J@{(qGX(g(TQSU&SHt6+Fv!`p z@oo1tH=ciG_EAm0`_#rtjMf{|75KN_SwM<*a(3_OJyk=9E}w4s=N&%pXlM40&Qjj_ znhmp}h7N1L?+8+Wf>XO}jrS2WU75muc1I3Q8-8?mkJ+=ExN!pS+)iP$x z79}#pPR?%2NzhnIz))GRSjuZ{#k(p*fH{T?GBPzscWdqeLEg=Ry@SqiQ?A8+36b_}YZl;wWLg&K3wS&JUmpI)z9RqbvuR=3+F= zNPA(R|A9{GQh6h=1#3zj@gUn}P=a(`w2Y}QaJ*GU-H@oz6Q7#I%j5dmt+bWeK2KLW zYG}pqICH#Nqt&(Ok^V`tqN4qn`q&fWBXTP zLyEK!R^)1#l64nTi)ZlFD_BC&#%|b6C23*(Fis~ZlVKSHhn2@?hvGXs1=-bW4=|dU zbf_HNFh|1}MmhP4@t6SCVl#P}wsT_U;E3&f+2jQC-By=QO1DpBPuKJz#)6A3xn>21|Q_b}Ib+ zy{q#l)O~bzadbR}1Km3wqlZcn9BN+CK5&ZwaGdY?##H!{J0Wu%R`|q(KN6n|Z8QmN z(UQ@2s3Dn8oC(PaFV>uzrRM@N?*aa6=sZy&tbUDP6{w^8cNWy2uDpK?7nrNynZ z*CtAEy4lmZUnsi_|CGcjE$$aetX-GJhrUXHGF0qxy>a064^2CzY^9>I9?aFr*tCxN z2y3`GT{deuC?4q=(-D`FY*$3Sg+K1tm*B)d_1;t&liLfL*s5y^tiw(nZ>yuL!WX3n z=QS^gToW!e23L|Wk^z}X8%0)A-MuEWkB%-67o?uT4*7|(8N|(*xK-V+nC!bef3`J3 zt-*A_?P$@)cF_d3PTNx^yaQ9cSK%@xIF9sg1#%#p4E}ZnV?8mL9UfPB=Cx%;*3XkN zhPUvy3iih2QfAv<^Nk(fUP<{Z&Z9Jrg!?|4io3@f+@Ct-<`FW0>%#lv7RKj0zlU%b zm%QEq**IY=?O&Z6j-p9YKFid$bWdpz%}s9>F{Cj4(ghRer{eH z{l-dW%sQ*IeNE)(RH~?n&=zO8i7WG>6mZs7#p5+gT-O`v$eMHJ(GzGI`yK23p>?FD zm0sMi;iox@;6v0-Wh8JavlcHG92}27@=-5WH($1_tM(SB`mQLCg?KRf*<~rtYy%s~3$^`MJU zpR1KE<#5MHlLpUDPWp%27E>ryT@(Xulk#a~Ggr5>whWZ+I z|Ei_UAK{mGX)AjZZk9ujez+;Ek7I6gRt-kHo&#WG#tGc+8OZn*s*6i`#UZJt?#nLK zFN?o+Vc2htjj;+s|$6$qs_@{`0ep?pfi4(u(RAP ziTskk&4P^5mRuXxqR5`+=`vdSfzRCS16zm0keuD&-iO*y;bwAid)RB0Pzfw>Ayiu! z9WWg(QYVNW=z2}UmF~y{m^Xlfw27_#&YrF9emHV&TjeYLzKOUT!(%=gUtWq~D3t!o zJ%hJ1zfUAmX#2}((HN%f942NYLUO?cum`MW%4=G<=UK=c>b*<3O-X~o>LFYZymJn{ zY>(E(;8$1}LKJd35--o1@mp5ZjJF8uAkXzqr7Tgm!!GKIWN(wrM8@lW=gHNBsspS- zR?*-s*zx-?Tf~(*l$pkuDt|0T)}9g=hQGfAS+u9bn0W56sWtk@hn0xSY4SibtTA%{ zX|mhr+12SK`Mytyy!@?SO;S$$Yu~lCLzAl$F4U(JH_DKOP1QnYd6)**wD)gc4w08< znFD{8`ge_mY^H?ubF;UUspvu4*UB^Hv0e4&HE}AbuYcCu{)mDWJ}LhUwS<^M{7lgh zY04)l5%{kjjke+~v*hkG*Q$1Z0^^9R%i1p7K3w(4BRTls=Xbt1*+MTIsV9E|%|LN> zlV{kzo%pb#ArjSTZRQQh?Akaf1OF()kqtD6OZE{j5543lv=L7)APSnBIb3X1_K2Cv zp7-&LuF&9_wKooFE&a2ymYEbOXx)@W5&B-?eptBQuh3J!3m$2CHhX;RgGc<%M3@WU zZDvMXjI<2vevLBSe6Z5`M3%hWuw zl&upN!Sr|K*uMEq=>*hC7zy(9Y3yNL9pye6!>rFcrB@mB;BGfOEeI%4FfzDVBSA)9 z9U}DI-HRm&2HSyDJ>Hve$6LG!B`knF?kjxeG9BROaOU~(UmCX%R*ZRFc_h`>n0r2& zy-w}3y!NJZn>~{w*QG7GwL?~ujIw8cO8sHF2u?Y;TxYUCoT>CdvwSS2cyO7@NBa5* z(h9`&cH>E4k9D0ELB*Zbj5lN2?OX@nA&qgmeVJ{{d#2x4I~nc+YO zADtfaUavY=q-A$+I~{b{C_0U6S9w6EEiQ?3TGcc1**bcBwBIFiI%&~?vlqfzC#~Y6 zr)Me>dDRpT3E?>xT<86{Ebv$UG+>V=(&?G;Stzaev+V98VX2o5i(5Zl)2+aT5zj*F zofVQy4Q~rbc8C0#@PkJ6)hj5rt6sN3xy5YFQCYhA8DlcCvqG;&pg**Tm&vCcPdDUa zmnyHTW0b{UR9_^vOiu(1+hcDZjvCMl^}1u3AeQ}hwfZwEdn)v_di`4$_T%&;@4L_2 zM$)OoLFI=iBk}f)MN4-kFi7n&VG?FEyJY*dSX5Bf?i_mPOshr`(UO4t$8(a;Ue}Cf0YW$CU82 z&K2uE2xH;LRWjv?HbtzBb>-!n-?~UZ`78f5*lmpxNldb3HqP0!Lt5H%ZVs)ssc<7^ zkkyT1U|l{ zPC)$SqO&}ud)g_r#hWNM(G2nV;?ASyln!kR*b9Cssz`!oB$u@-tX2B@oIfB${3YP^ zlG<=qHQrx>;As3LcN&JOEsPk)I@Ura>R?tY({aq=tFv%aOl6t|60_)yp%ZO)?5py` z?jZz$wj;R5{$8&xcmeq>r?3`FZlgG-kogFEpRg0=wFSQDAcLFJ(frD(HeP;;48Crd z?@WUUA$}fXT?pXk**sYOrN0Xs^Ll;s(R^{NWOe490GsojxayRM3bRUFz!T!~3B6ez z_*7%u+Y-E-{=12X3$;(_@4J3ztAX*MBnux(d{XK%d?*P{W=cL}DgThU0}$h( zM91GZ2_C&BO&CdIO#R68_}WfH8%X&%`m!?&w_h(y=pBCU+$8qo8=JhNnupy zxgBiM;iG*-pj#U&R)$TuaUJ+cV=e9W)dthTr%nQ3bu5$xtCc^^_k0;4m65_)So*q* zz?#Uyt!!I*nmEh3?k=?3B1xSXDBAaz%}U<9Xo3#ml}r;X9^1>m-Y1jc{fk46P1}L& zQWz(A?OfevFJL_j?&R+CjI=fxtcZGP!d-SCp-~%0Yel(WNq8$&l-JABPX3O+8eDDM z(m5MDo};jRqRK%QIc!=QHZvbS+CAUY!JyzqX#HnNSW;*`9 zkm?Bz$+vlgrsOqP-qSEoFdrd#;bBzTJZxiWu9IoKs2@pC%|Crx0KmG7s=**EjqfBZ zX-=mLOpka@Mtq&64R;@wJ~F8~yfB~CBzNEDCap^r#?R%t%DRp14uIvbfEWGS7pA=e zC34K-9HNEjc#=OghCkBejKhY8gnw1yDWr6Tc9Q)_DliQHN2Khu zJ*pNZ3!ojC-kPt&UR33!(DFz_A&mbBC*u&|{*WFzum@%*!)mId_#NF^K#XUr!O;O{ znrBwNDd81|Qm=4Z#tN2o5q$DU*2y)kqm%4lUKHX1>JQ3-8qCPVde9P%Ka6Lc z7JfrPiltVk2`gH8L%U{LNz~Ee=;3;B+TxtKpTx6P#iX2P+*P|wUmpVzxMu8ec>I2F z4F8E?L3-Y{Ork5t!oU&HaHjFy#hzNO%`bb$E73-(oIbzY0DDWSY8bVR92qz3y7QxNc{J@@cvLE zWO$~%9Zr3a_3iVD0)4ADu#1`R5a5^Ug!0ihDox9hw4`>abBRf?-L1nKNsiV$i=#Cw zJ8$IMb~#FYRQr=kK~bot`A~YT5NQ9#okZ+%rE_UU9cL97O=1IQ1tix;)XtA>@Wgh$ zQ`Z(GL+~CKtcovCN!Lf?iGxkhWCjPf;9|&6)g9uE?Y7~(0j!;};@E^HnFHl`8Vh=h z=*_kurMo|S=k^>$*q@!-pN;Qa)Xl;?YEEgm2P;;OcS&1!YGa1Qp7(`Vn7Dpn>xRw* zs~)hj>>X}P84Y=?m#y_DuEOMe2^S@EeX%scbZwS$Jk_7thWz=}fTU%LCTi5((Rkt# zQhKA(iu+iPJ*mt}d%3?b!ro=;PB{@tQ7$f#Q0q_!`5d1M3}26SjUFHo7=%e!Q2OCU*icaKdUFUEkr$a|mNf) z6YA*9&(M;4Vjz)u7m3w_eo=pE547*Qk5pMw6g*)pqLOeJJYUNLD1`+L^x0C-3_l|j zL|vkr(j@$Lkf*Yd+T21xeiWJCwEdT+TNGk=>CA_nd=nOPHwlncRohyx@2+2%dMWI% zx1`k$rgKsdK>pLhRTMJt(iFYHg*Yq+l1AS|+u?LgZ`ZzsX0WxB1v)%iF>DDMVYT$x zU$@;y$XnAzf!MNk(r=AS0WB!2ZE{Ibbc@mC^l-b$LUkzeczn9edYKjm7&_)GL{Aok z*u~b8KYM?27sb5q=lRTQFL9yg$Y_5^*7(gcKx0<>Dc)oY&BHU!mBX`8w^Qs z$+zlK`a9bbDc{0T*zaB;-lT9@>P1P77k7mC(lY>RP#dwPtDc5=0o#I7j0SB3$#2rv zTh;|RSU6+0Pm|_6hv{*Z%vD~P-rzlhqpT*Icy6v#EsQ@pm7QmO)rQ{i{;Q1*uCf{1 z9wE(ITILa>!;G+)c`sZ*>@T7VNw0NBmu_$Zbs~l^wT33W>i+C}N1Gt~zEml&aR8q@ zIJ`PIz`~<R~8bLEp^)n<^)+98Oi9G#S*jZ=Y&k;`#I8;1uugt5@}HGh`iVg&up zZLFZ*xs4fgx%e@%Zd>3L7wWbP=$1CUTVfQq?(x%J*x!`*9ql&xS5dhw3ce-$VBW;N ziF*_GJ0`LtK!Q7s4@|*a>1BNX`XIwDFaT*yu|;P>rwY?*wz;;iCu=vkRK8Rpr;sJp zkf@6wAN#OTrq^brk1@+p6p}oZNnb~Ps+SBzO@V1=iorOQG1h5&d=J?O={jM3dIrwk z-a~i?B*T0A29MJ>gKKU9$4a9EFh9^vt(w5Cy)<>BonjVy4m zUo#w9r|k01p^4(kY0(((GK*1p{w23^rwe1m;F+?AxEMXw38-HYRNB(t?_Odg)}@4$ zUXYpL4Uc+@LsiI-7;mjp7o#&LO$eh3cQi~qDB^7Cdx<}NsD^bOCf(Xid|pKe%l<{l zZH4!kFX_6t_7Ot8AS}c`SH=zX7?&0sNV9V>vLhJ!VxRI?{yyg04-Sf6AikDQCr;5M zKaxd<1GcKP1bZ<)Hp89vjZSNOOQS{iA3HPGhe=~;=C2+d?ZK5`$4i4!c9c6JO$)NI zp}aOv?Jm8g#obwQtM##@T=Yu0=4xpsH?7Tl5OOb~6NRklXzxDn_NX9H?oP?uC0*kq z%7e5MU9YbH&#Yvv8f}uDXJWu<(&G7fvShI0+Gq)?RZh!w%M-V)m!`HUv8DRB0-x7a z$~ARY(NZkXE3BE_PWEoQi2Gvt`oyu!NyZ>3CV}En6{St>dlM)&B{<8lH}_7SIEoW? z`;_;qr6j9GZW@t3xhyuLFbQpKQS2{PZnd7p9wYJqFV zpnPSlk+(cTZ5;$+o4-Z=mINI)G;mh^*BPNDQp#53*vYVr(<(}CWJX?5cTqvO3TrF1 zMfg89ACcR|^P(ETDj5)NxMKh1P;bRBUb;6bR`ZTt^V&+2f)-GVwH}A;RL)- zqaHh4f{f1f@le#UjGIucp@r1R=}>xEJ-Mt7yG+WSsd!0v>UaLgKP<=ESPl0ylBuN$ ze8G<-!H42inUrJDeyA<1usx=e_fivK`F;moDRyU#s~!whIXzK?yGMO+ zW*3rPU+_jtKI){E-Og`Hx0k(~G3BK2QuE9Bn=hE)7<^uljMvNXf*7~Jx|xQFA@Ubi zoRyJ&^Z5K(GbOCgYcYr8s}ZtrI`yV!8iIFVX=|AP)xGd{NM2*J1qsDMVkCi|2rJo4 zT@j`j&pR1cL$=RuU7UI6cmbavHRCq&)KT)$?$ISW`b~Uw)d^l|_nD_p(vgPv^D{q{ zDBa!_YZ`h^jA>^uB;w-S*i=5fopFwDvy~lLUWNZpC6kY6^Ye>CGis6DN#4#2ov@fC zYZrIHwnNn+-3?3rtOK#mm`&m#chi}Aj%iPtSt!7sN+UeXrl&nDeBc^aTX}Ne_*m~? zFa*RqR$*cd5jiW9={G1{6Ua!d<92=FHROH=ovZ0O9y1}C0!ALyF=`5280-wVft`Jp_9~TU7_5x1 z?qL@;*oe1%nTthqc94x^DJ_2SI}O(u5JQ#rsIVpJc6hJ_)jn0JG>i>&mh5_zbU+Im zL)Q0_4cVDnGBKnyVZ~{CwRO*J;kF6Rx1G{Hw&I;XEbcIF(rY$?kLPS~Hoibwe=s|U zivbx6JabL#HiifJN1qv^D>vh_-ZBu@Va(e-dSc6kL$(8rzc#j9@FA;7gtcWR$d z_#!-=hvICkbNH&xF)ZkZOOaambK-Sw7Lq%`17@N-cO$6eUy?~C_dp77c)D^&#hYlB zuZeg0CV8>qYQZ!4wo+|7U$CPC!ENs{t~sQ^J*JFpf01WGeI+$^D3v07Sf0A z5N+%7Z0JX++@p zW#x+!YqQU|Vh;IuNQ%(JC5>vFnjgl?N+H|)wvXPbmB)#ot~!UqjG&Pj`K3H9{Pm3* zV0AoQU#amhoxJ!K&~bEc7%6|hD?}Od^(xWM>K>QIPd1*FZmcP--&vzg^X)s)`~6v+ zktm+ab4F7;mjZ=P_a(LURX@j@+_9J6N_Rs85 zARB`mk{i9#dzj1%e^iq!Vys-*JgT1(r9V>*V=ge}88RjZmpM}LL$I&R1u>hFj$3Hc z=z;uH&&dapiMPcrA%_*!sUHQb!}IIN&2aK<DUYa)~Q`?Pvz$(8JcX6fw00#L|UsilMQKWr}1(2V+BNGd6lE%{O&2$ zkspH`)cu_CbqW8B8#s{*ee?PYY<4HpG!&s}jniazVn-<}x0hpZxP2O6Y@pAgE+D&h z_{G4+Iyh;ahWOezP17|^V`WH2Q1MXC+LRArqUU;WL9lW&h0JZV07__IC?UJCWVVRC z8Y|IZZa(CxGLOXg?;3&vXSOnEAw*uG z8fMxs<~a#%$!_RNYfg`~mgJIgWJ`gh(|%L@&*{Fu&!(%yEcwQ4gw?~eMrL~7+EAnp z*v8y@!v9!QFHO6*2p7XiJ{ismfz~KmFF8!}-WfxC=p=`q=(LXdK<9N-EjKjE`lE#5 zsoECT3Bo#RiRiKoLkOtHw=rR929a~x^T3(va54I-@;Dr7H>j_kd_*1-{wLwj>5a-2 zd8d3-&WtniHn~_1N4>~ovyI6Ec)3HBVr}QdKf_QHIv;U>aIKu#wcUIV=`;qZUom+P zJl-(we`slVUfm{+(l77sp3fieVk_%_+w$|ftbV2tuei;P!OHfA8Nal2{$~2z+Q7Hi z@ONfdvLvIT@qSJhb2j^9O%PtkbALYJS;+&Fy6(yR8k@9vI+wcJzDXP8aZ5-1z4h0e z$K7Rr3{JOM!`$`#hRz}(uM7_+2oGhBu@N_>pA0En%;iXZ2L8v)B0kOhW}Vi?yV4L> zq_;Fd7c+kL4|^O`p~S8il06MQ#KNv(XE5=j%Gb)EGKa+?>iA@+^OE7otFNjo(%v&Y zz0%#c5Q%n>1|jeXvFf}?1#n9cp7t&)a|7Occ!`n`alB>bvN0IQ;+Rq4#=FN?FbHvV zVkkjf1)5+7pv&%zG+qL$aPRFUsTtyN6}vs;P4cbT?px);-3==;fu_NyUYUF=@g76p zAS#1AiZs*|knhgO1H-X<@>)KwXv?rqp5?pdR`;m|2F~lx*NbIBf@>21pWi<(+|ka( zaXLk{3y3B-o6`0Wvf0xYEF&s3)<=9_sEqawI>JXc1>VdSex)Bm`xp~WIjvP}L!Mhy zTXGBL(gVtaLl$MEI}vz3;$`&mtlQBWw3-RWke0TvHoDu_TkDez$|Q9`UG6S?rc3+Ec2GXOxh%* z23Y02ys^4imRtTRX-aE=ZJsOZ3lzEIuM$R{4p`+mSXtXtu4TR+Rw9V>8JN7*X5U`i zez<9MO>V4KVIz(XEXR3@j6Bw6w}e%DDIlOhfO_Hbcwb9=c+XZ`}*;1dvO zw(cYR^lu+^INhF$j!8{v?PnWhXJyU zv|iDu7yH1aqtAp_?vz;IqBZSX@tIN<2ToC@vOvOH_!GVx3)4xnDVdbDagEEu9UsGzLUn5>CsI?tkT1WZ9qcn?a3+$}C z-R0(h`0*+4z@LxARwcKXD~cmwq6x#*csbyJ_n;M^)7)Sj<<)EK`kG{KXu5IoB2Yfx z$8w&QS8sX%eQ%`lOtU*MHTLcUU8sWX0yvMh^33IwD1b8w(x{{Pn$y_=}NyftoOEAnE)&fAEO1;lrk%LF$xVy4Vau- zJ3U5(ZPSOgeN6o=CQxPL8y71J=1-d&#!==3d8sh6n`lFLNFHv|7W0|oYyEvuJLZEv zA~sMGIaOkI_s8YUe(w(+)IbdKV0?}u>Jj?2Dqkn8?;?!07{HYF4-(z0!md};)9_>k zwSzROSg502rvL;`3>gMiYN8kIKyFK9b+P=}bVI1w`rB^tLiL#zj`kekf{YT_Vj*IBBm`BCtaym2^Yeab^{xjqd35 z|BjBVsK@E49#k`U$*j~INi0r>XZPK*%vP&{3JQ#Y)CUWRMx9i^M z$nKrb^DW|ykLGQ*y$@lMtPjGSc5g-&mAPc~;58?79%ouxTO9L#P{@Z@*fj%bL*eU& zrwd-GF9mlb6W??qJHGlRD1M!@DC2z4FOMC5ghay@p;R7gk$EY=Xwen!aQE5!w^#yyhcLA8H4&L07*;;F@8D@#TMtfW)BSJD z(a6JVckJV|g@dTkiiW<*k=#BRECqs4i$=I$@h%q2ZZ6BYK+6PXjA!8hhGr?;HPre- zz~y!!4Q{mywA5uHMFY*BoHhJ~d?(A%=-899jk6XI7?aWH+m+Ew0IS`k|8s6nt(NW% zop(Wr1+#O=?=UmawRUPXXPH2A7?i z%biBN2Cm6kobVLr%X32~sW=>@>e}=bK5`;O#V8+to zld{AidlVi2i=y##W{2W#>CQb}NcUWgz!+*gm$$~5GTGalWGCea2TW=@kmqAo* z6)6`;SLWf!zGK5;dU#%t&1^Y#$||vQVkNq(u;W^W6d$?l%`U{cKVdwy*rDxauScqW z;uWZB?CEmtT4me9^DDwqCeCcyb45};5iJ(R>&xBRg7Cg+{aRS87`9R}<{=2lU{*SF zni4jh;n1A1^g7ij-8mOu+YCl5Ny;I?7Gc+NCkfUUP|r_p6su3fT*U~n&7Lux*d*5b zi_zTkPXs*nQ_Fy2ikARSG$ zX`*I1o%?T0onghlq`cu_+74>-vbDU0>9nIQ_L1XtsdN2CCxbXF&f{<`hxOG?c|xWp zYU6EQSUJiQ)P%bT3kn=HC?cSJmcBl5of2k6VdKh)vkEokDZD&XD_lB)sq#ptv1J|+ z)}Mk^i>NE$M&YY7-8@bvpF-5DA}NpV^NEX3SuHe#d^p7sHnhlYZ$4z%(Azqyr;+T~ zcvmOjWxb3cUEq?!)#>gtyP@_oonuKdpt~@Dxt+GNj=$qc{(n0if5%mr z!_N5?VVQox@=O@m&b!v53(lU|L8Jj$AkcV1zYC#3n7yw2S|Qm!W`@REGWaz42`{KN{B>b$0=4SD<%-%g3-(f=)Aqux=K_`P2)(b|^et7d(n;J{;#r%T zIQA#X!>r21VPTeO1dge32)L3B9;k6~O-fTLEbu^}rJC@7L9c>G_Vc~v1Z8i%N zJk$Zbn~sf~i7>y=WTPnJ-nE-5R(?zG{=TlHw4KkBAmHalX>8Zh&S4>J+xRzkor*g0 z$5-WSMW7`^Bt+h{F7 zn}zXZo{6oyNNUZIYR;IMW;aIdI4wN!b)mrQd+R-aR@4qfI5W>`%|*Klm+#|@A2rzEONMf@Gj_(mso;N`n4$KRK;7H&$U%64Nx1A4lw^ z+p|Z2XAIIy3wY>f9vFknI_)Xk>G@G)A}VUlu)*!v+O=5NX#rJJ`t#6lhe5L#c3bS5 z0fhg#)2vyXYAc02r-s|JdUtMfDNs88!(B7vQj~;rPtWw1OHoGZ!VkxIgPpAC^x3Y^o&C`TLj5!NJVyKy`oQRma4c84TLUXez{swmvXfE!y|2$W#?2#W ziukUqhthBQwSIKCDw`REI#caq=4M2sl`@R!5+rwSxbz@~y3v`bk{p@t81l;rdsWgY zn+Q)WT+kPxEkrRvY+NU;q$!54r70%HE@Th7P9s#U#!pQrOi-XP?0B@tg-r_^i`}qP z%uM;98Fy~$*2VB^Nb%^PIM1=p$VE2X8c+8Agx@sYw4D%ANaZm-?xvHDdL7mZ;})M; zdm(R7Nq$vr&eJk-d-!6ak7_#;UYLWURr{8_d0iq1d=msXNK}yMvtIDNDHPh-+l z5n>Q9GFfUN75rL{i1cXQg-s59)#EB%mc$;N>Hc=P3mcD%vq~`d5_J!sb5B2 zE@$WFtGRc7borVHt9>PYYEMs~Nyu-}3M9^muYEHN`y9?$Ub9Bbpn}!2QU2}1S_(IK z-KH?u&SSsR!U~FykQ*L=`QFYc@(m#w?5A!|AYGjd<5<*=2N?_jl^(xO9I4X4P4U9B zYcQ)1x?%lza1;Y&LzA}h^q@PeJPDPh8*^f(^y6c14AxEqy104-8<^rQ0)`hQ)dSEB z-l85OOongcTcxlx3rD+;l}2BsvEho_`!S=E5SNL=Ws44Oqn00TL4xMi<`%YMoNR*XOSUu zVQY$$35CL4z-4t%#WOSvH{aAWQ`O21)}E)8r1OE!TRmMJ)*d*<@hMLmXe(fHnmY<( z8dwNl;gqP(zsjC5&z&%I=&eF|*b6Q_NxI(|ao5MCmcDu9k>1|oDDA0Cgqghc)!fQ$ z{CfQgiPt6Yb>gZwdW^#7_5Vo6nZ7{_YSek%L?hNNfCrrXG--N9N8V}of~$?Gm#ugv zqumKppK0MY6A{Pv&ebj!NOfQzEEu@DVK0JbYqWcuXsf$pJnTR9c29%?{EAOB*4-^^ zL1$A68()G;wAS6-GzGm)DI>2mMwPC6YMwwfhNWd4tq^%8f6)ToPd1cnzpHc|n=^*1 z6E@Wjjh$e2iM#g;w|~40Ji%%c3sI_NTF_EpjNa+%Kt@aP8ZG5(lu}xCxGQRDxHV!` zR(+K|OCnIW~k$W%4gHCRMGI4VVRw9alJ}zjBE&pp(UfQ>Tq#T zB+|xM<-dC53i7Qx+Kdh@8;y9q=bGxB+-92x2{$*;V6)lRSQ=f5vNo3K z!yZn2Q+ZrQ;L`saE#i=OCFT~MB|S%>zD(}KHs#iBn&plA{=%^t^mLK-l(1BuVk>;{ zgA9dIJr@;dd3^QwFbrnqY)u!7X0=SoB5X43^XRd% z)4oVqPun9a-e2&y!kI>}-t*6ei1QQ6SG%o?yIux&ry@+`%^hnAPHjJu_4%e1qO2P?n$RH~KV{j#y8Puc`6WOYUXF)c-91cNJ;&f37WbJ|uN^&x zKt6EfwwLXO-M~Jw!(%tgukRLpz*XtByoG(FYr=0pzcumLo@F+fC2i*Rh2rbO$f>Ix z5?1fsBtXw5hSDZ-Gtg=E}wTuI>ccw-fjFnqJGg+r+njv5N^8DeC46{^|Tk z%b>r*q(Y=OjpWXh&6!5g6l<aC49S`7G2?i?MN zku^^r{z$KK=)?zA5YN0#-mLDFXQ#(k&iMHFTiwNC4UZhew`%C5QzZ7tkwzNk1^o>d zm1dOketBKk)v-M%+vuB*%vF(MS%9X~>ytZ?h@5;Kn(dPnMat0GSoQ>1)a%lvG?|63V4` zTRVZ9(BW-!q`i`11(i9ubx!~2M#X;!n<^ zZg`d_Zv5!PZ6Dzyg=9#UkaXVud{x=d37Qy$Ye(5W68UDgC!zD9wiC9JLWGw}JX zncR#LeOcH=qKhhjfm|apADuGbVZ^d>ZK15tj+nJ<3rRS$T_lk$1X{IGBu!rax_xbZ zhg7CK(%t7N) z<4fRLhNT@TO|t=ax^dp^y0`S`U7o1gJFHo0NjKq6FL?J)@1d|5Ib%Id_^5uG7M`12WD`Lje?_y{Yn(+1XvdD7rH((Xnl6H|J@s`QnsB+TrD@j|tv_LL zecf$~kkZ&@jFqPLcQ$bCVK|%)6a0LaC7wI z2dk!rXKbwVEE^ZGAeJgs+3HeJDsT9u|5YIOcA1Y@nc5{z*?)!x%Gn0&4@v~z%oFgsc@A$@fq(1Ejafr+pptzqzk z*gW4m@ODlx5jKItFnJpVrcRnu$t)(qT0GWssSsQ1_f3ae$KgJ5IHy~`-)z0(8ld-{ z-s__x#b*2+HTpZIzVVKU7~c6>8q=k6CV3+X+Mnf`V|@EOXJ#v&PbDpPNv0LwLV%z& z?7=N9U!}G3UUAj<@u@BS7N zm22J57&)vo9*_)9#VDKdICa@UTe@ywcy!vV{0hIXDxLE9>&C!IfnO|`%a-hU0uDHJ z5|su&R3DVWDOh}V3|ILJ1cqmU%Q}`}+&CevF>L?Z}_DuJkhSVdU63uU20@=K@l}?)+rDo~T`+a3{OJ_qaaG1M5bRxpfXsMF;4WJsI@c_WxZEv_EFp>V%B)IU}dT-y1H^Y;P4XBl?%>>wa*NuXcWhXEF zsmZW3;aOW+{izZcn-fcC7aK{hua`h?`Os^Zqq82{6fwNjP@LNJ;R z(itk;i3spCev3ZEi;@@a1S`2$3+!d7?2iznbXC}k+9V;J5qJAWAGjV+*<8MN21ZN0 z@rc=-`|VozG$xexMaP9;)7Wr#&gZ$~L-46i?tXr~kgJ%92|L55ZiWBaGkgYDWnxf$ zP@mg-ztmy%S-uZY*U{?s@%t!B3#Y!SK^=cM{wUMpHIyFLAD8KLQ&`+{I?}Bn~`O< z%|JH)l-6wjnedfF%ng2_wfsUm`Smq3=C2Otlty*|Te|9pa~6KJdp?pxP%z<6U71h+ zrmPQA1NBk9%~Lpb03=O$J$ZL7f-^|PcOoevH*O51*@$3Fns7NFX#^j9alHnRyj!2i&XCnE{_?um?h2YlMCjts1^lf$gg?%UkB|3Ta_TSy73)g|Gur(?%yA)} zll-Pg5Az@%T}*??Oi~wye1!1QHY8LTR0<37ZkrV(^2L-9u2U|`n1Q(L&gEgVw6I- z(YpH|8UGY(n6b=ZHhmY4YHm0wo@pm!5R$({)K4hG=}s9LdFzm^mVb}xpbM+ zHIgK1ZBAfl4UTUOH@SZZrU@G_j~nOrn&z4Dd)Q$pnMDw`jI8V-p%{8EzOS3jPJ7dz zg#-&6=4ZCp7btMKti$Yzg)Edjb(hoZkoaY4;PP=1?tF@j+YhG|ZyGb;YxU5G#^z6< zhlbx3^vK^N50qW%d4zMkZYN?&BX=CtH?%``Tar}BfXgbG_$#ec{2g6X%GPS-X2o6L zlKMGL+37+W73``H|D^eoKk5C&8KOS(O27Fm-_P;)hH(GmPk{S}djHAyz|Fil^OL>a z)~A2Eck5WLAMgFerup^W%y0IZ-%e;h0nXq0gv$3z|CP%3^?!={-~T7S)OG*foXRcS zl>7UZm=H7H>GjTE`L@c9>%SJlU(nBQ^k#m$r_$$g|NM7)&CmQ>q~9m~mzDlIe~$eB zM(;C!1>B#Vnd{w|`Qcly^L>+goz8r%`9_=PtEA~G&3C^;n!nup)Th3m2KwI1-X<`I zGhc6>()P6LCxB}*++XeeN1ys^)4X;2r+U3d0Df)eANHDm&~wn{_kO9@{2#Ob=I{Qp z+Mja#$KUq@q@6kLt-o@4Ym4t0Nq)LF^Ygvt7gP0rku*Q|l4&kUbNrHNzC@ZgUNX%m zNi*}3X@2(?>Dw>0?fM^nn)dqQU-(*Yi@N{l%#Zb&Z&+)67r0-{aIf`#;M3MtKh~qM z{(kS)Hjur)Ly{+I;lHQW`q|WE%_V7%Nt@gGGo)RZIqhw}@`RM9y>r0-VQ=Oadd+uI zqA!u=jWW#(^vx$pGgGE{fqZ`Vf1>{Xd72Ba^*;L$Go9p!gWjrL!SIRUmP{!9ubEi!60{Jwg`J*qUer1mR zd*2IjnR%o4{WD*>^@Dt0r(5@DP8+p-(4*g`1O7!$^H+O+Npt7T@BK#aePGUK{$8*7 zu{3p>p9A)%Ul8^Z*y94$=H#2-`^{eO%OJj-`SD)!t%P_P*e?~ZQHHPg-UIY(=3Bky zn+de}46vWdu-DUsKl~qi%^&>j)D}0_>oi)`2W%j^z`g~ny)Zb zpw&OwyY*DAZ$K{pk&<=RfMwr}BIn->aze1SiN;;W}){b z(wx=n>~E6hHAwFncRA#p`88IoSN>7&*1lY)%%OkKgJWxc zwf7gI0e_uzzgnjIKGJQ@{4?eEbG=)S6Q3k?uwF`@Nro|9Ru(+vVp;_cI?O-7)F*KSsLONq6UCr2FIlj`9CD%kj^s z_{_}TVf-K5>hoQnc>>=nwlGbZ<~K?68%pz?|G;^|Pe7~h&b;4ybLP>lRlZ_!7-T;s zJ~EPhn>0V6G{5u;X;@z@&FwGuz5>8gtyw=Fps=dIenqhV`1^qU+r7_!68iMUogcr| zJKJiS-|YS1uiWbWQE$6xe&?0`H*fX6bqf?fcWdVNZ#Dno*3AENtNE9=X8zz-(`#;n z=hMRT%by0%5B2`q_fXE)Kl>A}^uF>HXv@r2dsxW`5_D=C@y&`Q2AY z_pg7JJ;)bSzZd7df0Z=9qBOS8flq!Nv~SWD3(+Q9OoC3Ee3LX!mB#pW=IW;?-&-?Z zBh8QXXvnYi9)RRj?I-1 z67&4lIu2)4zukjTWni;?%(UhBzlHvN%(SnPwy(4{Z(r;EwNFDg=CtctojLaH*Za&u z&85zcwQ2s4E!iL3)%?|dUp(X&5g@Wr>+P%1$8w$f`qwny?all;Brp~G*GT(arTrNB zex0U}c}-?nEA{paYxbArdPFsTRR#UudNco5ulbj$%-Vwf@oVL} z_NvzHU+vL1KPx@~*N;-)pX#xKQ{lAU|Mq_a|3^CKv1q67L*`<<9GTt19oW(7wP@oqyizS!4WOZ{}b1n%}Qre+AfY{??z*eDz;WI27+14B@ukXBkU%o-wS1s>PDDPi3IgPJB2fgK)Q+7`Wz4uwy=}eXm+Xfl; zOS(V$Ps_c)-{}1n3D$maX6yfD?tS2^ywd!>_q^xat1BTGfn|*>YgrAeVOcN35~5`p zFJo9o<1#Gku#U%JUDjnCt;0IRWgW(09oC~!v_#Qri9(2?Qc6{-QVo`cLR6|!m8w*w zf~r(iRYD126`~M@5TX!Sk)AJyd@Nm z$`oc#+vDhPoTK6>3bdVG!B|aRf6INNWSexHx)_gP7>qp(f^xeWbIvctj?Qm7&-tNz zUqe3_hyFM^6^FCZ8RV?&Zo&U!I2YJ>mX^O2$<~0rFjo7VXXPpPs`Z%AMUyjYu#2`k z)VA}h*|uBJZ-8u$-7ObdV}H-ejhaGV%m}5eBwTY1ZC9$T(djqciy7A8m9RfXc^ivi ze50{{OQlQbWU;&JGL#$Q%)!{V2+6%TwEF0f)F_+C#NS5$`PyuJobhT6WmC8sLro(w z%!LxmO647wp}aTFFv**9yMoIr#o~I0ZyW~VXfU@qhiHFbPWvoP zux^mpJ?GnO4UamGN|PELjl+pJI+<&~mG-Y&`wII1x?8>xwi?}KBfZ-nhiS+>R{2Pb z-L+3k-r+-!k9ucTKZ;7hQ3Q^U*DbR3`s%8ZVTW;d#`!m1pk!RS-Ko-i_!DsXVFC>pAB8z~wUUyH*nHbEWkplyw{4Yb9-P*Pm$@btg$tCmxcXxYR6n_W1R?Tlt? zXtPpnKGwNojH`-Sq*oDf{02^q(-r5(a{r9!TU#(br#zk+y zU;JAuDA}IkDW-T@m_{6^vr8OhXdl_!i?aJu{Q_s;!*JXATDOFvd6@fLB}KYrwc~Io z>3isJBL9Deb^Ao;0r~&N3w%Xy`^0dUP#(w*ypm$8R<1J+z1haiv~5t^&Ux8>E7o6( z*;3f;dsUPYTB>!FN7VFs9BwE5zE0aswCyn8&U3CP9X7cIuP7$71OJS1u9d(n1IA)S z5ioIB>k2s1uZ6An0}ZASn4mEj-~WnxuBtqW-U@ePS96fAn*1zQIM(cq2d{>SQScww0llV(*7PtY} zT3{6~usF>E;7Y@*ZZS-xChVjS!?_K(EwkYYsV@#2T^VrO9?W|Ua4YBZjjp1;E3n^} z(f6)Un>bBH0#OF|NO(Uk$Fn*%QEJfQz00ZfqaxurxRKNB7$cTsLsD_rAD@xtE14ZVBr@ zD^Ax0EF|(hNTgPKF{hk*MExxI4K^O}Rn}|~`}P8i(7pObgl|n=J!8d8uabVspAdHH z570!us(GffcuT>%Mt0Ln?AzSE7y@=cutvM0vqjJnbhZMTe2C84fNd3Qr~D|KwbBad z5tp6YAR(P)iM3ftw+q-Ez}B8iCp?Y$RbV&1D&OTBTOM}YR|CJ+;7#A=yb;H&U@R|& z-R@=9e4mqvb2uTPs2KRdhw&?}guaM=3jSV`4N$+Gz40a(ccsQ-e)25-a`3MXyWC6Q zZ*&2*3mw3~qaNTp4L*J_e#M|(k=FYv_Nz4H0)^ZY;}ytuWYRKNpp1Ke1rf!neBT7_Za6 zkN@wd9$NQu;Me1JavNSlpODh3)v<{V0Y3nIcZatj_^x%bm!Pd;bkx_8LT(GEK+0fO z9G%G0Rw=MWHs&mBGUn}W#mf)bHgI(;o$)1_pLp}*>AN4;Ucr99JUfO(Wm;#f?zQ+c z=b9@AfNc`&u`J%wLiTSNF%P%mZHog_Z}N|Z+kxFC*cm>`y1fOtxwlz2IB2g^7|T|9 zEwEKK-s~CsvSQP(gpb@tKoQGrF01*7hKhjA06XW5J!Y@R{yO~cD}tX*(~Nty#{Cks z!SR|k?rz{afX}T*X*sZKfN)#kPyKFojzm4Mdkj`SUT8x+r%Gpi3$V;?@Ik`H7GO8e zhAjqm1yggc0ejMv98;H=5nZEMD;^<*ItdZd#4g*3*dHO5B(@Cd-xg$cJ3bT26m^x zN~dBiHkF8%4slN2jG}8U+;Y+FWWP57TP@h*I@@1$WyKt6So9r5;NOBRVSB~0O_MWN z4t%lU&idV-D`M@Ig{jyrA4T86VmRod!&zft@~@$5{-oCKvuS?-*gnA~dcl{oa^QWp zg6TE5fXTtAvV&*^w)sb4cexcT+e3VG2e36i3j3*e*8{sou%F1T(Ymo8JJ1~5`zU>A z;MgbUlR*>%TWEN5XVuQhRp&O!khtT}{G8cmCd0-YbekVGDbfFcj|}%>!~LAb@eY=TdlwF&Az<}j zTx$2O{2n~|Cp6|+u-hVDGQK0XfibfTo$@4R ziJ~6ZJ%T-!UAN~-SBQUu_f2%wxp=n#yIHW^9pE+H|0P$z`o12vp)VVeRROacnA&sU zf&4A4lI{=xc@z0Ncg+Iu)8&GnCEul=n4g-nzkiOeEE&!2gMz?M=(g*TBZ?bF`SVG4P}H zv{@tkGwaPht`~29h4r4xn~Q-hH2e#+KKKY3&x_3WZSm1}8U0*7I{sJL`{%*^iTn=C zbsM`p{mnyI3GtBW(E!XRGDaC zwhs1a8W^sF{{#OxI_w0d-EbORkzc`Dl*1EuO7GqM?n7#CvC~`h_oo)}i7>6@)barNn<-ivU-fTVAv@Zef2ewzR#?Qe0 zD$EAl*JIZU_9&W;LxY!1os|R4z&8j!u`^0a_@eUDZPXfIx6Z&~dkMDUM{Vh~z*X6J z=V!;egz;{d?9>yBb7f}{u$d>oIROV6D}6A|(vropD-JlferqJ>2y-+44pZy{wnwnX z5`AOoYU#M_^@p62NhhyQV&X*2z&04{PiN^O+!+1 zEQxtnN8%QP@uAa4-8uT%4EzRzmn?$z-(PR;iduQ+IqEpGkHm7IX2~t80KQc4$7L_$ z>s<~IcHs?Nr#4yagWH}H&`n?bZ}KDbpAz6r{~=dRHhd{DkaCuLd)P&ug24>}*Dtsk zdzm$PgrDOOum=ohPM0Xl+K_7*`$M|VPGGkS_G9rce4nrEyd2A-?~eUh_a^0wn%hJS z#jt#G&qix$Q>8Y!H59Kw|GE<+Gte8OMnBsb9~A;0fUi9_f5z@CU5f0jksZ8)iT2L1 zgRlH9`~R%)XV?7scedFcT_W!yz5@PRKHT%s{oEdP0^crp^B2QE%9!s4ot;;qm$_%B z7TDc_&FPz2-TNK4l3~or2Sd~YyXgsGmjPQL*hDUVIV;yc)h{r;2m7~Ju*w}|ycHGl zIUqw{V-Ry?=rFK@2AkDy9*4Ib*h2;@pEP(Y!Ha#0eFI(uDKv*i!02f=usa2t$hcDF z^SvT_ZzBrBTw7riu+@U?O!&=4@a~fDZa0j5aE|Y;0@%_g9PgdKLj3y~@LHUM@e1#5 z;oZY{=i(g%wqLNHNSEduz6&hs9g1wbTmP^oi#P+mS@6kzlxUwsOQ8MQQv{B zG1z23%EA3gAp3J8z%2w@oWz~2+j?Nv7;M&u_~lCQzD#<-D}X+vzZC;pXs}s*+I&SX zeY&q`;zRiVB8@kJmEUJgc;BrBFK27EZPN#Ak6R(yjoh=#Ywf3p?X4ut&!|4kvV z!C(`*)cU@Lnd{yF_K3qfBN1-vJN7%+{|hy@td4GT+a!H@^V<*W(>=g;8f@a2ex?j7#_n5C;u|??@Qp#@s?K^!)|48?t!-qxo{~7X2JFcfbA3P>@)J> zr~5`*fo&G-Cs~?%ZeykRJ33meviRmGX*+jI7Av+?!<+b!5+ElMB5vzmadH`vdXA=`l6BG{Sp z_h`9L1?=)C976`!=m}x3d`dxzYhE+8%Gi^s#p>c?nGyK6FxgvXITbp#hh^fyZzJko9SJg|3b#m`DK%3 z&o6t+)Rdvvbvg9w=$gf*Z|4Rw1pI*DpI|-OfNgz3*j>Qx5bVdvdTf0^if)j6ulx__ zo`eGYb(eC(h^m2KYw!urf0^~$A^+)4`1oA^X(_NpHjbP;1GX9;@diL2@)6(sOW1$& zG{4V=?FY73u%@pfcOEl_1Hd*3*5bzS@i!R*vUEVc6ZZQuW~0-Dll`s%e(P-9)xf_; zPMLe3L7#Tvypx07pw|Gq(r`;wezuIr03Qkd3HI>H`_O-Z=9ut-tj`4b{tn|nQqGd^ z-N1GTcD63LNiuLVu!nF&AS078*8{spu=vkSW(XviFxcM>q+xWi(n~NDsKm1VXOdm0~^K&w~I)H7PgZI($eGjm^ z1e?<(w}Th88w2`?=whw~jsFAqYQvrQ4Yh8okxXu_JUq9QQzJKp2%`en(uZNI6u*B_ zuqX{vGXeZNJJBLC8b{W5RJIkKfev}WxfYFMT&ziM;)Fg2fbRqTlXY1?{PPX+zHmzW zy&H-tUi9IbkFMwJq65G;3IF#K{v|p52z_olu-gP{=L&j$`4v;m8kEh32PvD2f+S4F zvIh8-2A{|FB<7jQKdH7@%0vilAYZI4MGdW0;MQM`{bIzhf=vV(S^q;4(Bxmat z;PGppckY1T$K32ZHmnuk+YMfE9mf8~HY1NZ+32_fNC_bmuVZHHdw}01_?-T%T!}}V z^Uc6+_#9)Y0KU}VpKPv^@1g&sHPCR33ikVgWgTC@e8yMQ@9-4?ebKV5uFue874XYH2Ydj2I;FYh0N1W$Nf06s2 zvtXaA26m71!deYwj$+seY`b8;pWs~#?Ax-3ew1I<0=wJrCcaFqN1ejccqTzSVmV8w zhY6i;27ZI#&e=o&Jz~t2z?RR(y%`l28x_9NF5D0El{-5){~% zL%yp4;QIuh#B!c{nX!MQ*l(S{5lxq8vaMTzZ8q4PE(z=h@^^m-=-D~G@g2a{7;JX^ z9=F!(fn8&;Ih_Hw|1c(;bmh09sF;+yXmsYR6#<_S{4wRrGRI}W_4wLDlRGbta*0Vk^2Z|0kSoY4$a{22EIe^Ir&$~fqz%=xc8XnsX6hu zdSLe$?quw5VmEimm~$tzfUxjco@QJpz;816gjO~IgrA&X$|1o|;PRRYL^1YC;LB|+ zSv{sybodDW%hbE%zr>p7lb!oP;QI~!$10)@CdC*;jb_Vc~z6r)PkV1fScdC9KVx^3lD;N*izeS=!>LJx64lfUg#O&enR&zE%KRYOqhnLnpVx|Dz|vJpgQ6VR`LoZ&_@BrA{oxND?BgbA3b}tpavxrCdrq_((+&7egMTt!R1f?f!9T%1 zZUJ_);m(~mcsaTm4ulrXbB+Unv%d`Z3d5c7Tlu#iA%mvhL94s3aU}cr7@jo@?4V$C zx@rw-GMxTU>_!}{k#NFk{Pi>I-3ENC;ZFEUB?qR_Y@U4@_ro^NPT;pc;aIAHT`Sm5 z(Fx~f6>CyU&R9%f{s2daFvzSt@~9a2!r8bNv!7dKPiXnr?4bIHw)%x8d& z1e@c>@31ep5bikFC!Y3j4$Y++^q*;sC5bz2MpivWXYK&L&EOON_-5Anu-s?ZH0=AW z*+vTVx*PbNg3r<5V|d9XV5<#xVyA3m-;skiBnRJs1Y#~FC}BI%naY7L7JTQ=Wo7t> z$ozYpW8GqThjSc<`*Bp4GgG3Qe-8Wq-)XK1&5GYY=B)Jr-y`^(U9)7B-oaqqKVZY= zu6r}E4T60_S+WiIEwjh6b(aZxd?Gg`OCI)#tOS0U!9UsBM8Hqo)7U?oACA6_{VUj< zAK)eS^btI~1K2jh``J8Y53svt!#-y3Hv_xj31KUNEjQS#z4_c?bg_5B-q`Ip0`;S` zb#TccOD0plf&TMvHMb>M8re#)7Ih8a(>iN&fT7nx;QI|e(Kj}8XdXk~t-v=6KC!Kz z-^@Our(TerTE}R{<1iUVcXC9#9r$g6H`@olQZ|SEQ|+X+z*Y&i>nk?)@{QdfgcJ7$ zcQQtWj4p(5IYd{J`#gof2g9Av$O{}A=$k~a7R?n4zjh`OjJaOfhW;}J{$tN(*Jc%| zM30fNoxryX{*$E!|9q>1P;L;bmm`mL>YHeR)BOCLR>HC-7g8$#u9CP{y`-m>|C?7}% z@NI(6*}yNrLm%Or-2?2d&w+ar@YM$Y*|uZ_u%)xd@fchD&aY$t{y<~?Y+g7BY(KDn zSpR(XOeozFl^q|?pp!U?=lXbBfo~S>PQ4ca{Ux?IzW#R6?8CnP?Z9s{c-`*<{{6RE zZ`}oQ2V!Wd!G{ArYMqf;Yk*%V_=JBw|8@f#ykE&jt|k5u-sA%zl&)NZ4DgZRPke~F zN3>N(Xj>!S%sE0=wsQXOfIFx2ZD;S#n0azm$N3@iBz9}amX=Ok@)ZjH(HlalMqVaVR$fGZ{Vw*bG{@UJj>D6d5>Ssu(0 z?{3D-g4>P*J{n9$v<&zP!FMUo>X*1qfonh$XiB;Mn7JJB+zE9Z9#yC*gkvdpG(c8| zQ{iGN>Pv+yDcoJ*YAT}Dl{eY{+nQUl-fwO*HbuhJsMe2_o0A9G34FWY?d(IlE8lvo z-)Rrr&YI^$eKM zXk*Hqi8+$o1ihMaom!P)AI_%Ss6PqBp;S1Xih5IFAQfFog}bR}G8OKpqI6O8P|eh+J~bVR!tZCCeooU<8Gx7QGYNKE@YyUnJ}7( z1~cJeMoSpAg0ETl)`_=#)&0aV_L*7wFc#dn*QoD?P?w7KW=F6c_-(*)7=%2gE@sj3mda&J<%w>fw15d?lJmmmYEdqSQ=fX4gXYv32 zORcN;Hg`QYRWi~K-~&-Tv6I*nJM7Z6PnA)|@igJd%Fd;owuh{(@|3s({7U6sPZBJ% z{s?8yvpPp(kK#hvxD`4exuUkF+iAPa+9vlzt1Ig~L4jK_Ugw)!UDuUszmLd6>`|xK z(Kys6ZI{!wa#p`Ty`d7c?`y5Fj#)OEb=b%$+csW@uCt9N-@yNKOS;m$g!g^jeeIQH z%xD)Et>u}%XYH9Y>rS~1dt3Q~4B?h;VJAe@0iHJSbayGH{B^hD-Idq9l%f%m8S(Oa zkM2_E{iv14Tq)lgH|6=Fi3CXvIJOE$r`}S4rdL$nh)L=pSN9#==iiBYlvhY8nQM&I zzsXt5()2RgR;cYK(*2o(=%}XhI+?p#0-3Iz-XF6mFdoX6NzjB%aS1**hw*RY|M?f- zI4(US!SRm9fZ_w8LDAjbcOeL4-(%&ghJ!Za8F1C1>K* zW%#xC1QH?}9rtxTvgVo{GRo~NYK%70!i2~?=5~v%N-=l}g{Mn-!o2sOHF%@O7b}}OYo;$eA#={9LmI3`1WAD#AM@$PhE)P-jFxSlV3h{*~HnD zBuQro(;*tS=OSt)h}0w9|88{YHRgYVaUYjI=UeV2jr*`$PTuPXo)<5F3S~?kREs$D z1V0kT)83PHWN2-}(j!5MsR4#jL=l_0HAF-LIADW_QtvNVxk0DuMhJJa3$p_}HNs;) zqHnsdZ(PwCSd+fMiWgJth4}{s&ANgZ=q>NBSos=^EM(8oR<;hL1A% zE*rk4;BUG3w)c#h@=?QgBxs4Uhh!P}Duk~~ayyyx_7(j>od0ktI^~Kfvv}iPc@5Ag zS`GACYxIgW#<$&u^#Q?-{%QPQ|5WRU4);U%wPMuT{lw@(G~^bUH-X|!rWy1&2@aQ& z@oUsHdRbKb*KBQ9tL-X1A<*MueX^Z;q`gz0u%O9=mN@;oRDi}qKz0QJlCCItSee#5CDNw5HL+K)$A13g1bt%JLQY`CAYea?>84A zxoV^tynonT^`4$6GbgHhW30YLUpVMJW+47JdpOdTV{#Mv-}o$C<$V8I5!u@Bhy(9B z?8dxaIHC-%r}~s|HTlBxs2##}+6PoQ!_^M1L*P2TZW-hH9k-)uyZH^Sr?At*os_?d zl9BQ{F)4q?Ri94zX)+B{{tP-`%EJ|bVO4=22}`30iNu#YV?=dxO4Y3?0k)^ef4Zg> zZ%Bs&>8LeL(WF)sRWOE9&F2%1;oI()-&7qOx}tojkreC&r~XOq-~P@zl{F#kHO#dRm1-+mIVT~2|-ni3UFYtGKm@e>jA=J+EsA`uBbRyace0K;V`Tlx%1sDw7i2{Ezt!C3{ zLy^Z(9>@y^3j9!>z8uZdmsj%i<@LNkqg(dvy*w7Wxqu;FC6`Th)!f%%@T9-8kbQsMA?e~aT*2wxRULtbc~@B8zXQ0~g8ZGWZ< zJaq*sHx&q`t044(N3Snmw~E_^3~2xSFfu>7IGUMbY_GJ%hQM9}X;H$4D#gn-@kGMwjPZWJ~H1 zj*FsZ$;`tG1aU{jBVP))7Di(dnRmVWP9bNOp-)2Lf~CvA?sJUNBY;r(b#&E9kv%=56qmOOvh%I)bueOEd( z1#KAbst~DI(^%1)H+>s z)%YKgynS$F>joDGz8U;fy!b@QWA#zSVNU%dZu3-hG89o}uMq9_D&Cb) zNCh(3T#4w=I%_@OMF0PXnoCyC{)X0VAM6M}o!HSSE1p60#Zvt7IiP=1zU@{5fFVplA4Gm zSOoe0hiG zdxEd`OR-AuUsFEeOQ}a^M)vCidk00@rowLkxo9PQ)vvQVt$#Sv-+&wd3USZ}W2W4W z_J;nAJ}46h1ZU$H`Lf=J3&9`oFB}R|z$g#;CDdD2=Jz>cuR><~8+;l2?=oxG^^-<7 zFTVRiN06o6Yz8i6rpVqwJ}94U@q$y{(-~#7AnMWiqo|g^Lo$Kg;M)nl6lr*I!DD6M~zDE!l1p(0Vsx8 z8(aofhNhz8-5KpBiq?Z~4fu|)ldllEx#$)jFuNHq(|vW`;!CLL_MsM^m|xm&mL}y1 zq!nHdFL%^Ep>5V$p)U{fN9-XQUx)wq?+dqbe}CKk`7!s_J_sQ$WRZMTeOf1w6CDDz zn49<0o*2$Z+OyP@52j>G96_N>`<_&2P5V<;hJ>PMG^Iw3TI2)zIGqkdY@I0p>U}$X zZ?nGjW)c32VYj%!yaJpU(H$AdwDi!#?O0J)_Z)oXaztONW3XE8Vn zh0|hM=nk!M@xDA^Yt9qBu;0Fh3Z`;1?ay-whLK-Q=^x{%C4AA87Nm{$U^-Ai+zZK` z7xvHS4&5(7{};8tFJ<@lq$@p)6GuC7I)0HNdwLm<4bnA;uTkz-*Ju4ozBF?SlMmNN zb`Q3JZwvUk7`yH7URQR-wAcGSn^JKGrN9CZShJ&XNqz9RyeYHe)SvPw%TgIg#L@@* z&8}y02XIgS-(TEVd%l7E2jB5^D;eu=xnDmVmk+1KSO;MoKJ=y|nAIf?oA%{y*vywh zFp)feF}3P)+CvSLQC_HLeQ9(gtwzVvYIH(tJ)n=;Jgt3p4(sW2jm<&wo40v4lw$qV zU=#WG3VDwdZyZIup48rxm+Gi?f;Hno6~jRLsZp)LAFX}=TI^riA73XKivQ%EE5WsL z38|ZwB2$}=u4&!x`At-#K~2evvFdC}_7-LAc0Y)S(74GHKlr8kL z`Nz=DvEsg5$NtWVxO6yQ;nvZ7v7eEAo%-5*`D-UL5%%K6{A}2`C0zp` zo70u=8}q6g^8L7#r}NfRe};f?esn$~1S1(CxSSD!YlMhFmSMir`WVO$m-4e)Q^Q!c zGM4V+igPEr6!ILe3zqI)(|N>xOQ~p7Y6y;*(qmpKGv%|f>d|gFq1pl7wmy|rO$xyq zG)L1Z*|RQJj<*sI5y$Q#jS=K((d)~o=GYj|thg3DERu4Yd4b`Sfa7Ze{FDe9=M;8b zN;W{2HrroB{~1v~lIhug?pi-npf%SAgF-L%WmL1a_(;Zm<^AAMDJdTfsy_?O1ZA-8 zOJ%#bt$q;l&`q|Vwe(X(Ki$Wx=m(k7>PiO*H%K_4xKc#rLI3EAm*b^E7*QUNH&Tz? z!iwtyeu=J=#dq`T*gyX#_`2jz)0rx1jH7LO>H6KmTh7yG4>IVqZ|(6ytg5)3{jZ@$9&Y!BP0f#4)^v{O@6{P1Ya# zE%^sAX6ORl`()Oc?z5LzYvUDEqXA&)oQ-3G#e|6|58H^?2biEfX2)E2f`2>sk6WI^ zZ@M>SKd_Ty*nQYLn5#Z?8#l#o<3FJKWy^?ztSic{k|Re$8E@%*^RhnZ2lh#?UBaaVv=Gihav7Ta-DKTFgx z!s+=1_&>O^wmD;$zK`pVLj)P(t!0}rCe2OS?^jVhh4Lv!=i*7?EJ0;ljnvzD8j!zH zER!~7yD?L<-LU1XziRqhYyD*yJN;=d#x4GUs@fy%+bv0f>1czGTGH;LKU@IE7)<)RvcYT{NsYonRq>U9+iBLk#r-@1~K446val! z^N8b{$Q>&WX|aqEp~)S}Qgj2jYQa@|u2S@TTzNdN%Kic$dLFi0o#H1{T#Th4-Zpnv zy^o0`A>ZZt@~XEJjP3&TS&Mb`TS&4j{U$>f@g~BjZ2xQ%S zA!wX01o-(ta8Q1}j`@=gkQ+smn-&J#qc*OFpJ)F6yXL%= zY5sftDk=tjfJUzR_;iqlM;VSq{XNZ>@dC}}LfV`L)I=4>jWt_uJmExtMf7((Be}*p zd{>0t5!a08>zeLFzFJ<-kFJGqAYYqapRcoWGe0^dmE&T%!2yXtJCpBwGV!5&e=4JL zejY2-$^zl%Tx0~{`Y48LKEIR?{gDN7OfQGs`@`1Qa!rG%E`peMY z@xt6Xi0F>SHP`YvBN{r*nGwfee}2?~UXriWup9Z@as_adbpY~OUg*#FCo=KDeBYZ< z`E({wk9Y^hR^W;)%y>pS`bPh$W6Z;Wd}Zd}@*WAR%o z)d+fhe$i6AP;TS6lWsP#57a)AX!m2={P{9POmNR00 zI=DnaZyEZF=!+(xH5@lk?CX+E*r0oGl~o0E(;X zP+DsyuFdy{Gb*=d0`=XZHy8wH(0f4x8G;7A7c}U-ph54&S+0-1eCfo=xOT2Y{ts#$ zp4B>#drO{@=nbWdM{nTUh!QK}cEGZ!jpIK3kc-OA_AbsLNgI%Y#A z6)3UyQb9DH3WEi*CnpM`NnvQpJL34ZCle0M^9=>@jRL=~K;;7kgaD(q0$`36sOgCU zH9b|Jre{<=s*mP*;qbiN-u0|t{$~}d)cdfCRk8-!yB561X-Tt-n0H86d~d8GzBjSw zD`5*?;u<+3J2;-S*;BrrT24K7Fv@;fEj!a{*<*9=Q}u{GS_sA^#&8w=FQ@JM^*HutdDQwqt#9rZDg z3L}X$8TcC6wEv4}^(&&)t{EK$)1Vh$l_jg-b2UYriUCpylo1u<(}FfBPsF9vH)ufn zafKv57-TvPf(|+}y4p%VuhY-*b-M4uno6(4qtY{ru9}Um`fw9yO-I8PzQi_VHHTC2 z=`<^=GA>K%iD;T4+@6-8gv#@kN)Vd;f% z8TF;R(7)|!#$9n;M$}N7QIzX`UNn?tpD$C5wl$B|R>uc*I-) zopD(MX~8VBXh7K=^ zuE3cW@#stFTNvHU3l|oNr4A7&M*1z1PiJsZL}I|%MPi!+i=spG!kI-pGDTOlg)K37 z4lM{Ri~P|A7u@7RfA>rJ$MlzK_~LqzI=EG&4(=AIgZoA5VDCb8uy3I{Xj!NZ4lh&( zM;EGtu7&F0q~g#w_0hQ~^x>&6zjh(>3Ct%ObA?YTf3(&7b{xd66se97^|eZ{nS-zXmqrPSjaV-2wW zEZ|NXR*$aqfipGdAS7mv6cu1Ay?Um=x8$pQD1SNi z!?J`f=L;A15V&sS3)k&@;kuVET%`Met5IsuX?;u-gu4Yf9@Ozw_}~Aa^+{;05(u=A z8^lkD2cQLW8z~dD1ySd9RP9XEC0ZLy)c}I8|8&NmNw03oC`XHOUEXr)Pio-(d1^nH zr}of4?V*3#L;v_+Me|ImKcTe>#!<>RvcA#8$AXOQG+NusrC{ysy||Q$7B-7J5x$gI z^MIAl6GX%%ERHy4aq|fk>$IJW{4{^WQ2Hvl|IRR$acSKWh@`+QMNHL5o;`shK*=DpTix%J^kMccM>}sbBp(zNbe#0 zK}{GX6&;Ep5o22p-(wjl=X)f1Fazg6`b> zs;idYYR6E~zQs}B!qB?dpIunkx7d$b`SQZosh?T|NaHhaQk;B-;`B4)8OjI5UAq^D zp~bl~aRA&+;LiGXi3gm4PJ)!W(V+B~8-e5{8V&kVs#)XS;3s_r5yJg)aCrm-^+ymB zq%Q=4o3d-p%Ua@SIi+ej{Z!IVch|a^wTHltS@uJ2?Wthx$F%m#vKbGQ2Ngk;IBH4E zXSSHR;fUprt^M#<@PGezvT;IU<1i0moDdc2?hwtCej{E4t$9^*&`InZ_lt+gby3EK zqA1kkX9Aqr0M2aS4*EcN*5&3=Ltk6zE6ZbvCti1DlPTS`8c*p0>L>vbxNIVTdqWjp z-e#-{Krg$8j8IJ9ccS@|!IV;?(_WRgj9tk2Z*XOV<0cTYKAYe~Y6}&_LJoKmIE%Rvhr6EgwAI5M0E9v@ zdh~%~8gF-U{=r`j{%rm~Yex& z$H;0-RZXQm)J6HIneg4H$$9dm(zHLVrkD*hMJ(g#(8u9Cbb@4_HT1vL`q!Hr?A@d* zJCv3#f-mGCa-NPzTE!Sq&u7AFzGVDl>@=j7AAOL$vxNF_bD$7WrqPT`1-`95Q8JS3 z9Sn(|fd2oR+}ax7UQAwAm*q?$z%0d!iv3EN$G5qxUHNq?W6n^|fjo#%)$*gXnPBMQ zkMWKJ;BGS9ir0XfT$KjPRXH}3tHMngF%LXX9ez1E9pIyU#j75U2%w3Bdd(Qaav+C$ z4Y*g%j2oIb;Y#}~){2zAi1>x!Xgob$)o?fz{40L(8S|f0uCu!{Sgn8`Fg|-oUG(aM zLv<(D&u}^XuSe@6-IaX$)27Dcv4%coWZh|3Hes25 zjkX`s?R25GPF{HY^2?foKWXJYyJ!pT0(e0~fSFsyWgCBtUdPKg|0lFQ$@#nE$~Zo7 z7{mueI+0SFMwEAr>1F)EAVIbKgaQzNY`KbYt;1&f|VpcFbarID1d(tkWeG9G@1C$F5|=lOe1! zmJs!*hyWsp@g(bmjl7oQ(H+mB{~2!OHG{j;m0jR>fOzG{QrzsGI2G`GvJvjb<##Po zLiq@}mxu^rd`WZ@CEPY=FMqQ7AGoW*U3>01`6KX=bB}ae$sye#JskTww}4-8ZsCfH z-3O+6!jTNnstNTFo|*BT1@KIFv%i)P27m8~OV8x#l(lDc%8p>ghR~K#s>@+ZbzwzP zn*f2Qw`BaW^rrDVkBf%#{j{1M!Wo;98-%8vd1~5|r>3NrL*n{q%!JmQqo%%;`=9hb zV>cXgWh`hTkyx*#x1cc+ic|#K>(VXfZ{gC6(21FcZKF=_9?Ps z0{t5$fx}=lbcy3c$_I2T+C3Fk^Ch}hSK8O6mQ8b$Oj(g0&ph?`z-V*^4?4;hF*iIR zA|*r$W;QoMajT<3uh~ja)`{AeRSBE>ni7Q;#VD zc#CPd$>3vNYJEyx!aI9h*`Vp!mrT#b zi5+jE;tXyZ?s56$@XLEvhL2L;h%SH#)CRX&#i)w)XaZST6Tm(g*Pz(n7NIu$F$R z=;z$JMGtI}Ypx9Z1GsFs?cjCgm1!tvM%7p#w}=~trTbF4he8=6ihB4b;3(^Wn<~v> z!8B4z49WI2T#Wqh)S4~I&S}Dx9A9)L{Z-#+Z`zkPJBV2pItQl$On8 z04MoEuAhj0raopqOXW)<-|vHCzIX_?4th#~(rzvk=rSh(@#~fifyaBKKyGQwnY<9X7E;d!{I3%3EQqelzci(f{(>-?bsOz^ z#zi|c7P~LWlff-qPb)o~7u6EI%2SrvUTz1)uuphI1B{wnA$Tsia(UUr*;8Q|)z?wq z^0*Wk+VaH4C}Z{kIKZnud17F?fl~%rf&m!yg9S`X2^L+OmdbjB$eoscFs$ZF2=?oPdB zjSf&F9l+vV`0kPZ7fll&B1f4x=%?ub5jdtgh;5wB#Wu#B`}=MrReJhak-*Ti(S027 z&uZseilcpGt3IQ2lAFcR;rwvDScE+AY{YfPGtWkMi4i{=ot_uYJ{#>V3^$6S3x#2# zIBFps>e=Y3(xL9nZ;HX(R1^k^eM`|LcevP360s=ucNgy9i-|?*a&nQn#IZz|wa=)_ zhG*1e^E2wQ^%-^9_Kdphct%}zKcg;tpHY|nöXVm4e{89Uh`M6vhdY?_!t+0sw zXU=o9DRt~|Hl@^ZyQ#oQL~v^~E*|h6`lmd}D`%FU10dD_ z#Gntfr_mfe-C4l>|D#U_cPqG?h1>4=G45Vh24SO=;-nqZ(xO>YQHxvL`$k`0M@FQT zyX*w?nl~2>n**Z{91r{XWBSlqa90^_`P#wV;mXdUo}1x=#)qm#8kyyFoKRjzIS*zW z(Ayub(e z=g_3daAUROJ>Umt?RFga?#1Pw19uI$w}Lyn_S}Iv<;r@{q2n+Rax}Ar3KS0FVpsi2 ze3582l#iNcq)W#RwF7FAU1%dN0%yi>CO)mbamh{O6c-{0HDXre>52$(F9pQLGmrN3|EQFSc)1S`@zl1phyx{nI^V=`jU3ybmacd6UUt_kU`fyPfqWydC}f}{PUvo1wuAbAY_*d zgzQ>@kliQ%*#X7#FUnkevB)VyE-A-lmfJnEN?+$u?MjJu7=9`v`3Sr+{o(? zX{8C!5pf4PIxM{vmw{bAfWWvAV_9qcIT23M9r6GFoOQQ1XW(m`IRp@A4p*F=x$AN1 zET7#He0FeO{CBY5M|kVjfO91{XJTt-ZM`}5_!&_Ea32~2KX2mR$oy#w4?`v-mMKwL3_B7|YgMLD}g$G%LWVQEKdQ)~E=!`+_e z2U9C=6ZNukU3v-iSTZ!ivp7f&Q(BGAh#!yYgYf^+96yfebLv0K(J6UKC!IDLz2vHC za_k!Ffns^u=K&ws`3pQ&Ly!S`ihp2F@dZxiGnTkWtLq2~vhhR#Y(CZZ?G~|RGLqpi zK0^n*zZ&hK@29br%fMYB+)u%`ITZN)zeB>UI~B-`6RvF7?(h=Oz9e1)`G}i~aikHu#8(I0b6WrT{yX)&&+_U|rIeh~E=%o2g@5L3L!*5y) z?m}>9?Nwq6ldhB}Bfv_q07DB`pZOG~{3}!s*uA0)VKw)P_S(IoCVLjQ#bOJba=?#j zlL@c_Zu@;`=3-HYhyVSjKltwV&aF*sKhF7pD@WVqOdjT)Fs)OgX|?;Me?EvMQN9ed z;l9`6Oz8u!6MBwTXft>>fH%uKvhtdodU!|c1KvRehvu4W$cn?pJ1FCIq#j)fKr~#X z!195IrYnmXWg|xaKlpTT^V_9SGq}&K(|woY-&6k8&O>p@g^Vr)cV?t{AIoSsH#50w zer0?KXKl>*L#gKYdd7F<=^s6LrF_8~MZ+1rQAFGEJhdh7j<$Cs)u-*tdM;vQ{Lu^* zXSfEDb2~?cj3wAulxwfJ3}cygB{;3GSbpLW%TGj;AdBXNc+G`S#+NwWuHx_J6{Ay@ zipD=0@=E!lLvC;1ou^#IUdvVN=Yojk=vayxUy`Cjqk9@$3m0c*#)8Kw+Wnts&A)hl zS_$rQ!~KoyS^5I=at0&vzgijhQ{XQ5AC!}T zEWl1`A}`wVW8o4cNx&s6Ih1Pn*KNF-TnIdCVMqSaYcE0|7kFY}2xle=gFUXmFf3d^ zH4$jQNk+5K!N`m(*iGL%t?!7wf75Mg!;T{F+~O63;uTDt3REXvsJS2Cao2b=jkr5+ zrjbsPicmy`QaBbAsyL|r+N}cS1mIa!)N;LiroRmRMfBI*mCyz`1O!v|t_=TsJehx6v{|0eCjLXR z$q6#TJ5KNt56`fhMyWL^qqDa$NL>2@XHU2#RJG%Zet0&4Y9ue5{DJennts;O&yU9VSVDyA#fM^iRY+`( z3~{~tK{37#i}7`tBRxB$skWdI|Nzq{tH9w+l8n{%JHJK85=q&p}=o$mz^O_o8TEzt%mu z*FWsa#z`XLmJg|xc=X6=5o5S$IbwG#N9?|@M{1(1wrc0|rLy zDa%J{=w~baWOG;1pYWacnD6{z;yWMV5{#a&(_NbJxa5juPf^z9Czojwt4AIzVJ!k@ z#&9M+i36^zlQ2BDJM=ClBY@FUamk*`ycU;Wb=|Wvo=NJ_*Z>4S>7M#PZ5hw`3VZ%7 z%HZGBx+HOhL039K`lw|#!oG1q9(&R`gTB>qr~F`j^pV@klPAy>|X?j zJ_ua3<TM)*63=Acsb=4=iMKAPSGKy!T|xC6alj!*z&Ybd zy81CL>gVRSgT89$YfjvReZq~%KCzLuPk4=}fa&5_ydIa}S48Kf+(_2t<9(Ke;1A%R z(Yfl0TiMG6eU@2_(-4;dzokLI&HItKn3ss~Vp7Jgp?*qsjXc%pa{6FXM{+dK{cj+Q zzX9$)qJM8E?YGnZfj@(FzRqnWL>4*EoMy|o8BxEJzKUv26Ap`>@FLEOJ64_!OQ=Ww z1CCH3*M)g-izAG#RiDw~R=QJP*D^{$iDr=g~~W(=7DvR4Mfri1S*$7>%yl`%Gw#(5F5K zg$?A^ZRiK^zdsOtuHh^salKAgaoO}IEWC^2BWP+=VB8ST*z3#o;+1}UJcFF$?7r*( ze+~F&_{dX=PX_v#ZW4c0juLd%gPacXeP9xDum*#;iRCdd`6@6j^)tgyKyeEti}?Hc;m?EmjS{dKFM zpRMZ0-gV}^msd-9Xl*;cIOE@6z{SWqu9@=8np;fmUIn_5z?QRiIqN*Gq5*#4}hWUv{O_zY;&>yD#($B3+l+sU;^`kqq(E3$Zh8Tj;*QTO+E?bb=r8!>r zs@^LN1y}HLWY=jc517=t5CneFWS2Q1QSx*@?1Aj+{#P7;zpnWty3r-p+;V(xS&lCY zgvy|2D3!m?*%=9bZ>U7aMOaftccdPp7I69m9C?}KwPV(y5ogzb1O2S0p9eWV=J!72 zN=Q=cewh|Bz`7K18PfW^A(Tzl~m*Hx}5S8ehKeiqbKNN(vmocUw7o#W^`Z@e^Z0T zY`LBIfY=9ex>0OU?AiIFFq$YTwB#bK`Mw6Cp+}4;1L0S|cfAn7=$bM_NdMoB}N*n(?@O zL3BeaQJ1$Ja4xw{yxm&=AuE4UfBkhR0n%Lv|WyT6y`pAZk_zwwlaW3 zN-Ryua><)0o4jZRceCM6?qm(RG8P-#s6O$9*ZXoR+SwT_1-}%m7p;u1ntEac0Fon# z3=tJU=ctoe+-t#I^_0dv_RsMy{?%E0QfNq3XM4kJpB!*yHe0?D4Lkt5lzDkN5pE_}}-` zPt18n&pPWC-?#gQs85)TuvWi>3cOutFDL!7*5Jpi3{_JTsGZsu`P2LrpCYV2;_#=7q{4@5X+Gz;oa+HAlLIFj5{^tpb8 zI}@D$TFpcDz+ZHKZlB!;n1q(ehano0r?ZC}XfWG+D_JcA_-pd~h1AQuF0wzZa;rQ_ zVpcRF3QHrRurwkH8|l&)Us@_DSuteEXqJ2EcbE11ARm*Nwc9>nc%yQ$bnt&K{|IZw z-f%B;@)3R$z-@rz8YMnMG==P17Sm55{mi!eyUp&0ZNZ5c9*+T--G9jJe)t=qbIOx( zDfRoz&Nu02<2h@2V_Dq2{}i6|UDmEhJYDaG@)j+wydJD0-Ul`BAer2kGjh~(S$l*a z0Zu=yNl$t!faIj`VrX05#%me<+6Lu2c}u82Wd1*%Rl+;inGphBmH!np^oob64Vp zhlJoB1z|iZW16qyP5N+?_?Lly z#%KNo+I^${1Re8tpAPQb;NB_RPazIi4(?)bACE+v-19j}nz7yUAr6UnB~p0=accK` z`jUG-L#ge6aAtclejr_4m+?(`D(}x*qIxpg@NHX=^L(C~@(K%0FN<*Q>!TwRPUeD~ zhsGg;J>n}5^uxNRe2;7yeOAzC?M(a~zD&+3A#nT~3W4jqThFkm<&FSgd2HWUo3;jF|x(MK&{!nvRm8A=EAum}jaRu7p4t7^ zlU`{3BO~Cjhz|NcZTsSEb=#7z0kBpt zaq!mj`Xd?rauQ{Z0rg}hfPkz75RjDs0s zh;{V1axQ!yv}KN4R9=Z-ac*e<-CK6)=l}eW#b}?EOD7-whRwRrAly?&&Jv9 z$_b0?=l*8mqeQe*!70GQUFXYr*#d_jWuz7$rpb>8@lDfEgmZWdIlNor)EiQav&nKg z(SLEeA$dpHFZ@Wdd&Id}v;fs^d1x{JcgU ztzVF`VE=`5rx3K{mrNB#C-a5oOnxl|vT`3K+qe&+A$cZUjJFc|Y&!dMC)g#mncPL~Vz+5630-#i^hhxrK;Qg&Q}ndNE8uy^bc-bW&CY!Ghr!Oxi<9z^(|z^@dSBVF#Y`8 zSAMiNQ%RYJ8bQEAjS=sKmQZ1a;;DQ+?P~jfl^y(<@3^lFVes&&(>!OObmV3at%W#&0lXuP+yk|IT4JRbdGMpxX zNv}2fKJ-EFa6}Loi!)S!xWVqs*ky*J#;kuVoi&bws~Yi2f{8>t|UsXZBIxVkkMXZRAG8)t!% z%(F384RQ7`^@cQPM%9d z3}@CdfOGmUY2Kpc@3?mhp^pu`CE_-d)9K&fMw+it=@ze1r`)vL3A`>;x_a^$$wz>L zrC3;%u^9D}^C5Fj=?fVeT@NCXsgSv4`Gf2{k8k4rwfqHJ_xgW;{8RtAHF}9PdY3o0 z-1}iXCN;c?fkPbjvMIHlab_^~r{fGYV_Kv87LLTjmR<8I@GTd`Wp zr?MMsxfPJ|oKqayY$N5ixQH)DJ@V(e&rm((GgLH&%oWRwz=vbuFi`(4C;!rA{JQUJ zt+IDz?RO5}TiQmlQTBmN=mdW)F)!qjy@`l0pga!E=&^uIb7I8xK{Dn-XD#iaxNHZz z!N2oi{I9PdZ>xbHFd`wB3ki6Foqq8L!!r`<&l(J-_EK@9SFgU2EO%?=Sn_d-$uo zLEV6TM>p!Z#jQ)P+#$07{gH>@D*RN!4@&qQ@?Y%yj}~PUPua}HOWAN$Ov;9P-#iLo zNkiGhYgDEjq8fXxy2O&Y+|6~cI0H-8J8TB5&mUS5bic`t>GppJpUYkR*Si(|pcOt> zP6!|VF?R} z=QiP@j0lE{Qz~YAcNY5}PMUf1XP-L-o^j!Rua|Dw;PrAzWGvpxdJ3K!PUt(RUwI|h z*l2JE$3Xv1oUzXEE|+c77Hdlt_3Cwo`jneBP|yWSNL!JWHrYe*-<3ZIWXfQ*O$Mt@ z8LXORu!3W-LOCr-n{elF2a0*5G2Tl_?uzpc$Xw=SQi0#azstsZ$8-eK{Tn_5`W}k;u-9&=iUp#2YyUXw_ z>CyAxopAUjAmOmECmbFF5Dsth5YABi@p2ljZ}r9Ts#S;2+vwHzC3ssb{xL4v`r9kq+`k zI>;O8AaA6DybW4;wFne2smz&$E+PtL+zPMW>GN_dGo9dVwfJ?c(|mE84P(gX9WV@RcN+!)M-8Ko zukuUY#uD^ipVsEnH(B3pKAg7zdDF?8`esar%k_vWkWKT_HyW-IKF{H~j&F8&DutsN zp|sAmc{ssKz00`X==IjKE%bU9}Xx-^*cPSgFVrach4UlksJxejApw zx)HCJ$EzE>ULNKi^m;d0Jgp8rtq!lWI=s^A*x6Zck&MS1licWzH;8Vf*T8F=*Jv=j z-1O#dy;nYS!b6!|hW5_9Y(M68^K64(HS;CrbV zX-h$PY9r$J*0DqJc~|LtcGJtw8@`>a$R%FswDaLFYFHYdbd2p&+1Q> zYd`lV^T;s;o|bxllD92huS@dEMk!HB4W7{%{T?Hi>51<>B)f(a%7&9YoBhJ~`war# z={MFSd)5aG^gRJ%D%rCwVBmE$U_^r6A=zL?6ZM89ttUx;B}v;RTg$D<`i|s}+Iq!y zO)I6M!1Gvqkd$+(M#>rVb_P5nOjZHU7}Hn4Gr_ly0-h;e ztPRLlYMTO{IxaK^q?JVZ2LX=4{2pfF5kJ?R^=*Dn2a6j&ukYx*W!u9mGk(ujZ;-+p z;m^hW@@Hww{hkT@@;~PI<=@Hi>+^72w$0~ZH0t$v_@eVhzR+vvZ9Wf=L{`anSM+)x z|474Mn0R<3!tBl?kzMk?C&B&%4}W>WOIFs6>GcWvrUcb~0^6AXZjySHbzYOpRqs+K z1Cw4(2KIP485r?;ZV--hEtJ!)Ygi4i>+rgG9JSBO0+qMDc`HKr$1We`6lZ1Dxy6QfIk(^~5IT4(e-LhxFq#j_qj-xz1?VgD0nV7t-M48$3P zuq)2s3J48Tj#l{|joi1~>ZGHTbQDWEtbWKoJ;_@7Zk2R!srhQzQk4rwI#5PkmDatd zZSgLaVjSSG9_ODm*r&s@Pe;#TPMhrK#1qp#diu>P_IonZ)=NVqgX_^&eT@8`ec(5x z9Aq5mceys%^WB(_`EHkw`EIw5`3{cx4y85Ub-F*j!Y^N*SmR@(o&rw_T6|tkZCCkZ zN3bWsOEJOI_tCG5yMSc#863Zh*OMB3p5=TR(2444i zjR7A6uYsGDhOsKa!;s6tGJhiIlMl|b*zoIH{MtG{UGMPd$sKs|q*-~A|MKGf`5#$% zDq@Ms8t^q)12!bE281I|C^-U>4aI5eSYI%6aB9dbB5ynxI63Gs+7q}!7DU4hwl>4N zmD^f|mxb@B*E??UdpV>vy!BoK{c5lAlHpw@y_5GFdaQpUV)@V< zN#A84+`t2FS%&cJYqys|Rr`nbm)XHyFKPeiykSJn17r`yQHFIMu-rOtfag2_%RV^| z*r6MeP~E_CPB+H6@EvDh$+T=u$Q%$z6j}k{=UL(Nzo?uCWbLKg*13&N`CvOvGc3CW zlpE)HSD&+oM+q)HqH|l63-lh23OUbv#o{@sN55JQf;v0~mRmdqCx|>|r!R6s0}$bG zKgTR(k{`bn=J}ufbgzdgl|#V0<%t2OG1P{|4nF z&losAIj`Q$GDhz8H72-5^db0VET*VHdT!QQP+NH&>bf1QvcEYmE?S`EecD1~1 zVI{QZB_2s!N4QVR__);dm2|Aj^gW#Wxx&#nE#f{N{xR-WSeI}3_W}yCJsCH>l_*2f{fTi~6Vmo1Y0LerZsNB3^<_zMyZt(vCjJC4QEQj=X}<@cl`W%E$tJh5AQ8DpO3=i@g7Ft(2ua z3t?PtaIJRjQi)x2jEl2^&SUsnW~#@1szn~yOjkMRs06B<88 zn~+auHHLK9H6dd|$g^HPqxDfk>MNBrvVJ<`I^nc6s96EGy1BPSK6tFg$n)e{mA-Jw@wH(YKtUH5{Y&oT3f!sW8G1 z>3f!F;~{-=iPm*2Nr@i2Pbc2VFHVQS#^>KYffr%VYB6ANzuT`lA@g_ zONz!$mK5zhSyB`|SyHqtT~gGLE-6}-E-7kFmlUl}mlSoSODcNOB^7n56V{z*;@oI>*uWBwLjil%;0&Dj4B}Jw z7H)Jc-QY^+J!MWFIMja09p1*vLaZBkuUbBM!3Xg(`IF{-79aI2M9*Z-!fc=ZCHOuT zx_l)=UZLSjzHSLerl~HTyE+lJZe?V;M&gsZam3f`O5Z41<7pbti^E;k4Pi9v?J-p$D-zD(l#|EoNEU!YEx zN__G>ocgOsAD4+sr{*H=Fg3B4YkIun%J(Yb*fAtEF>|pVm8VAw(JyBW$~zN!0sJW6 zc(wj;Of+$BG|VN|b^19q2-0MVh_AqkPuk1%uB)X{t#&Pm#IrD!>Ys9jsd#G*z|sfx zCRUn!`QK9k-@zV~ch+@jMLR5>WjVIZac*p#)>3a*-dHb%RAP)+?&>U+slm;mZD5q z){RGrBt#qm;&9%px{^Bp7OUG}Ri@qlDWRsVhUDxe$h1N0abFRl#5_gzcpPdn> z?)pkM`gL-?$Z7XQn^Pi8I@Z zQ_aDJ>s_H1*1E3n8n2vG%LCOZ7n=K-|Kd3~IOCmo`7}2?wZJn#P6qhALfq$Z!hRoq zmt>X%nikDlCMeri4pyBwvucL;&V7c zpyc33;|tMKT)5{78KGH!-1M5a05$&^$)7k`HxIa|-fG`vEF;d6kK*LH$6d{?)7Au- zZo-WLE^&qH0_q&PMc?N_-^2l}pA~w8yUXv{z*j#wa4;5P(Z=O>KL=8U@Ekue2>V3A zq5mX*5yY3g0-k=Kj;BG3AN2)sSmzhpPC1Yo_3LwfRlftzbN+di(~g$93OBk!4e|U- zb(l|>a_}RY@qQPY-CVoiZ;tfL7HgXczd_2HVyB;Pi*w^-%;GsF=KzpXe`na#Du|-u=qj!4D_4?%SVFUu*A8W zcY-XQx`wx2si2l$QqjeFc$8QyamilrT33;bDYs-bF^ljguf*qC=R)8~;YvPTAq8dBV%%74FX?U^a(=Z}SAUsU$Fa~yd1@k_2I5iwW5_;!y=zGun`2ja zi}mbexApD=N3nVyYKHk~Zlc6}37#3HhyS&W&)l8h8MpZ9_>C?ss9}6^N;Qbdf}TtVG>+tVFb7p+vN4p+vN0 zp+xl3LP^ZvLP^Xpe5$r@q4=#oPT#ypTXkHLgm9e1`Py+30Cya9{f6_4amN8Z<3qzv?c2sl2sj5KgVA!UBNxLH0Rzn&*s)} zo^{%E3fr&no)jj%@U9e%BjXYNPms|&MeB|W^Y?&l@!>iC`p>(|jnfgIKDR*I?OWkO znDg_;f1bKP>GBlMZZ;|@p1pxFu~?U^_bt#mlP`fkm@EM<4@!V*f}-!@H%03WW@Ed| z`bWX`1rloS0*PUt{g;AUC>>eFvIgqrsa}!Xw>?eXE#q6qe2d~Va#`^mwzI3;SzI#i zb2qsVc>KmQ-Z~mr0)KTOJWB=i?A6*6WVH&A69mTQ#suO?S#QW8D15g(_Lje>izM(_A~_sjQGn+S z@pr@TluYk4^v!mr3yDA9ia*J|FL|e{vSi6lKeq$J`+QofJ3Qj&uLkzIP;H4jgI={K z?tT$?I=;!z^P5zjrgd99s|oZBqkMzB%eV-BCC_R2kKhCj9O;rZ5`<6qz0wcasg2vS zAZ4(L=l_>Woy)fusq;gwP#8y5r)}hxwX4Z(UAba5#Et#kuZWx@f2Y^AWCx?R_555O zl<+=+C(MCmH^(TvzNVkSlkk?fcU%Z{T%YxOR(Mb2gq3fQ!1s7l(DVG85x&F7hv!W< z9>hoZZm0Ej8~P0iRp^<|>AZfPL#kF9BE11CEuXi`jrSrVu5GR*t#nCtBuVmNk8MfX zOP=u7BzdNdz9}xdnKOmB``rl3jZbqEbdxdQLa6uYb;+KUzO2C{4+{wRmwYMcH~962 zWUb4e4N zI)f{#Q|@gpggO0;NFpt?UxYSq=172tarcWb#f^tV&Vb}89akIf=RshCXzHXSR``0| z;zmZNO^JG6B14imw0dx~i*;L})RXhSX2UMmORl0N9KvuviW3G-EmL@{TOUo4o0#yu z+?7gk4as6;qbG-d7hR)gF%%8x+gu2*#b>oH;J%0tzT0cmE%3bJHSooJimyp;w>TP2 z$YKat;p-EDBbC)D-jxCV#MIjx&|6ZxI|BO71>U_De=t#O+mq_V>84}@>;7b8eTsJ| z`7u$o$dI&cf&LibrRbX&-{g#AAVoq`rxw;Y6(paAmCtm$4REgf3s)AWbv!+22(mL} zwXoH5$^AjCSI=G%WUK3gUn5grXIvpJSR+J&JQQ>HB(o$Dp7r{wpsW|LZI!`dgSXd( zr?!MocgbY2M}$7VUXRm&o9B5e0>Tdm^pPZOG;kZX9ChG%I!9<1-=V9WY)C+8V@5T2(BoV@Evu&bq7SSh~dY>=~wE0eem-zC4x z_1)6!HQe!bov|g6edM4EVUB*z;T{iK6XnYwgNZT+#S-PgEPRAH2oJp4#!(*2r5ac6rZ2P{JF0*k>(ute-R3n&FsIrC@& zbvfHHo{6M!?$(Fg22P4Q&J=^*L*@{e12%A$S_bBG9^3%Nz;3XM^YJmTlII6O13j1x zw(%UM94zCRQ6rej^SB;x2pk88d0v^8aL8=rxoQDe$aC{*Q0E!FKMIzD zV_*$94mN=kU?(^U4uDhOI2Z->`u%1Mly4_!3l5pNpbnOR23QRSz(z0#c7P$UA4~I_9z;3V|90EJQNw5!GGaz`ZdXZy{k7)N@*&L@mWLGymI7xzWrljzeQM&Is=lz~tFU}L0b zHFt5qz|Z%a>+jqcDFa20fs+1k3;0xgWOR*^wN~ryP9ziT?A#E^SgZ7n7OszUKc?(c ze}R6zE7BuKIDs}LM+BcVB2ABr?1@zU65Mb~q^2G0?1+q8xIU8hoJwy4DCIc>N;t-5 zup=(g5AqIAB=^_KUQov22@BgJ`8~)_>yiHFmA}p{%H1F+=_>q<=sP0)poAapwbR|F z?7Ib@&?BLjzz$bL|D7t=dQi$S)UW)9UKV%pkugEyD;N-rk5s)X_8TI(+f+S={y@8> zUZWsul}Oqk?GZV&o$&z-yk_SUoJ5ZNk$y`OzNS7@Sfnn?$U#H(m{qB(X zIwHm!N*@I)&`%E2zQK&0`^^Ec?N2IQc_ZMH@sWzRL~f6y{#kH+L>pCd5jcta(zh80 zLiF!<(38LBzkut#k(@E*F6~|VJN7-h>1SZ+_M$~@_#Jj z!3T^V;Mm`lKI22>t`-z`p@S;lY1*`zi@OHbv{?;~yQeMtY|pe+|6QP@yHHp9{5Zjm zNLM`gv=J%vD}76%;QB~kl9Jnw1F6^IG?6Bpa$HuJzSuoBEZciLK^TG2Vrz%>uCVylJx* z41R%lz>eH$a|}#Bf7;Yi8OK2BpJRgS<04fTfKLS?V}j_1{vCY69m&X3a+M%*pDj;Z zG;NL$UVpv{Z|LG_GlOtTFCl(#7_0z`zBp~xgCk%k*k3Sh4uK_?PMc9M^rdMtZ4vVf zm0QIHR2RIHk zflb#;n>}Fl*C}r>t#sN9oIrmBbHL_nr_EBZ;5zaLW_)AXYy&630kENL+MEQduBYD9 zm|t(8U4jkYq+GzP@@caU>;c=st{d?OR^5a@F!w+4cj6&)0t|!Q75D>dZ>Ao=(p#p@ z4lw*J$^#5mPMcHU7?^qzjd>emYP8I&Z%G)VFF#KKW59|liPA2|p z{DEnA;1BEr>%rFV;ScQkKIQtPI}*Bc+Uys5a00CQ0eZ&Ctp5V(CsTsRb$1ay_HB0) zemdiiN%-JcgmwjnmQ9-tV8#zsxLI|S2l5m+0H!XdJivM|u$cPz5#Hn1_mMs@ z|9;X3*8Z4&Byz*F83WTFpq`hIz7^AEAz1d4X|oC}Y^0t={~+xH99v2I1Dk(3Z5pT0 zPn+-uW;#L!zBSWk4Osl> zwAl=1wbHJ@!nMQ)27f_(ryeo~!7Q+>jrhRs$B7TDU57ug?w9l%u>1+)1H0A}-)Z>k zpnZTPPfeR8VA=-83$Pz-1tnR}fSn?wNz!*3N4!uMsuoFKa=!hO%&FZSnA?qKNiqsBi@eSeu>&u7-9KU|X4CGP&HxQ5DH%T8j zQcn87*p0MXF!-O89~cEEz`)J4+Y4z&x6m%YF|ZWu`WEQ}W0jO2n0FiH2L`JsKT!V; z?cyTZ-FHbJSW!*-z|K2JADH(28M6nhuOWTlz@4P;KPdPAA|7z0mi7l$-c36P>msys zvHv0Y2eo_2e?I+lIpKq2KO%fE{l}yq?EDG!elg?UO2PxHng|c9erU$pC#9~MF&mKE zenvk9%O9p5E}{RgrX7QIEyN3EuOXe_(4#ZvBv`$ccKb!r{|o8~9B8Axf+Oqb7hrii z;S^9$>q!?F?4X>$@uvs}Y=4@50~T&1o=d5RF3JP!+=PFywtL1*{SxEeX2J)x9_+#H zE%ci&Gta+3e*uTLl76tekNkqoFU^>GA?baYc)@|+V-NOiqu+rI+o_Ms=;uQ-W(An} z2K^H3eUttVhDWf!obuR1{=mY0^ha<2lzmLv-|1(_nNzC#1nZG2r)mFSc$RYh3gd2! z@&IE8X+PkE7B#brC?9v!tOnELqvj-7kq|Wtu3&x%M9o%kYC+V@yOQ>MOw_Cg3qw(J z5bQf4YL;!Yuqvi-W4o-qOONghKap07wSq1iIMNRE0{AWkaBCsbX zYIcBQpNpEot4ZeiVb|0n^H(WjH|zln(bi26O`MH)WgQ8 z*$kFFOMTo#d2S{=F#UPb3x>CnkN?E}cf?!4cN1A;J}}vX5d@o^UstMnD#E|0msK_FO}f`P@dq_`;^12g#Q8lz^Q#va|*1O zqF!#J{`XTpU_+Gl1ZEx}|KFy34pNR_&@*dhRpC#cHLJm_o&YFSmQa&fmnk8WKl38;AEXkZ@EF@o9v*tLMd)BO(e+T-rXU%4?_cOER z1epHWSu^i@J%2esN+Q@fjXcQ^5ajrR~gSW`!Nn8dey)~o^R>d8MibRXps zVH{gA%a~3$jkBCF(|>+Sy6&OeRuL{({P3(9T*kVlW!9_(OCFsyhro=-sD~d?uWhqt z1K9gZ@(;GJpEZl>NZ(VlW+#~S-?L`Uy|nvZksq*U^Q>90obkDbctPVgv*s8Wc!Bc# z5&Es96U=&b)-10lJ=;hJ7~4)d?xURkh#nkxeb(#-C*L3+_mlsfvt}9CFhah;mR+-) z@zQ=rsb{e7omtLsX=m@wnnnZpdXM@5BYOx3G$ts&2S~?W$`8~gXU%pnG);M|U>uym zJyr}_19N68IFLMNhE_6f zFPJkc!1iP2%pS1s*f}%!Q^u|1=FD<1>x4P8AFMrb&P;8>-^p`kC78Bk&g=!dPMb6H zAENzy3U^>MYmPHq(tFmNSp^zr&zXH-Z4UAOjQ;bvIkO!!zCb+9)W`Y61GeYQnZsb^ zf6STL4>PV@GG{h{6$NwVI9U0mIWu=P`M+$=Yyq2#<`{zr_o_Lw0jyj)XU4$v(mAuV zh5mXS@qp=N=)sX2&_6=EzKL{#SvSv_S!;;@TXSX|n0YJlgT=Rzu15*?_Bpc}EdMU< z!O1)3%*(=b8g%DOlZl zz-$LIA0u8c)JDABr016h%r0=Mop8X^jss@VbIg+)a1Z8hJYZ^@DX(Wq57_h^@q*2p zi3cp~Iba&Urhc{%KREK617;Ig|67&LL2wK?w_nM1U{w$8aX`rx;1u%Yb|r^jJ7D%c zPyc>H$${YmX2TZhVMNKDV9swSr?-{d42~lg?^bdP*wRb6Pbhf+%z1%+v>$h1GdKmd z#VF^kez5Yam>GJ7 z{&0?pFZDAqvl_Ymb1~~Vh!MvA_xR6^nWbR+1*8`YUKF$1ODm`ikiNp0ISLkCP57@e zE_{u2gRz@qX7@JYy)|a0|ABI;ikbDGQ5`cU!R+tH%#uOs`3Etx7p%XF{BNhxG% z$M5+o;UU+(NB+Rlzs1blKT&Tp_y|2d%EkDQ{^gFeBz$P5zeW5hBF9J4LLZug z$RnvAnrXX;=Y$WDVLdnIBr^%p)xA%-3Do^+PW014xWqdlSQz5sMt& zLhfJupBg7T@#pXo)0Gi!O@KGqSZm`?ZS1h|w>G|Ee^0 zs~%bjgzq+J@&eg$P<4aL;`{!hX>u{Mvt84U0VUpphYo#PM}G*EI3~gTKk*V6_@D&x zzchM=y${MsNi0*R?!!2hi6lNa?k3)r5(*N0>W&+CIr{8|&# z0kuN&b&i~O5`Vzs_G?A;yvdjD=R+cXtsfeMEc0#Tll)q0s**?E;4guWJFX>p14yuGKH^saG&s%eq`xCTks21vk>W}klLEG;b@)RU*SBIc< zC>zR!3ZW7x6ZegIJ}oE0x5qx`(_;K9P|wpUj{Y>lGWctuHBl$V$(+=$;=+|)rWwqD9W^iI8uQGNr&Jp_$F z^~gNyMhbv)784k_zYAoc0QR7kNHRST~imT#>ZzUe; zGSl{3%&Tmr(BKN6=D2T1?u48&lJMI9L|wX6xrr=cIOS+8rp;0YS)@6G`pbeGH^YSE z)NhdVq(S8;k_Jf3k@e9|x_a!iIdP;CwxlJ`&X1#a@*s8Q#36Q08|t8pd+c%+eTMGS zvY`UfnMIhnkmR{3$)^>g>$dw`6>e&v2qf`3d1^uy(@9wj(Qfqzd|KYTv>NndDHDUR zQXxrG(C*u(kLWac9BjK=bX3ao=jUNhhrQBjhUN zGV&m1_^afXc2I7I)Au8v)gODlO}a>16pBG&obak_w^Dv%l=G2kbY!s)#jEnlrag?| z&q@0ve9R8V3AdT@5mSbrO7REPLroB0!qhq-F^+pF%LzLl5?8OdrQQnWmH*H@ol}?M zc66Tp$S_3~H$yvVL)3pi(O9iUJ56Hg{6aZ0|#NO?MAw}cV&(!QY#C<~G_JAF#} zZZ0}$tEKbGLi|g=a@>v6UnZgXIwu`cPBnxfV`$Em|5!eC5;i18($WI&gz5P2L+A99 zhA;cH-Wz;c_)=A8;!nmANsr@C+|+-O@qux=JMpkz34e^RWG-;rI&oy^s$R#jpOW}! zTg7&p3{Y-P-weS!W2)1J#lCn?f>uZQ9L;{@0O@$0K4;H?S$2DH;x1qgEI6Asf!;}1 z6=Bvul3qvWxNoKZ)JvRpznfq7j_xD3xEq2RNn5!cuj5|IVFEju7p2}B?Y8as&7$6< zuQ_Ee<(HMf>+7T`8x$jR=NMyAF|y2)Qr9KqRgBb2#169>`##c83)VxuP@8QRgExFW zE&pOwr&Ek?&Y0ztt(2?GWAn>A4L?WfowQ4yo%~7|l9wFZOFeWGZyzLnkE~1S8~L~^ zgbdO#h`SLxole-}$Wch@rKs^A+p|+wPMlL8(Fj`fOnkQw}}UM=w-E9tZ8d;@Cw9rydd`Wg+XG7UT{{+&ks&%#F?%VYMOJ zR3Gk!pfRY9dJyA;okLwpexumU*H=(RRnXDwB%TuTifMWvwY|&Lh{G9m%X| zpf0Ezl5t7$Cw*7SM~u@4W$o(pK^brRaX%=2a4)7YNIethq#duw+0u{b*#8?+MDBE)^=TxtoeID$G;Po#GA!>xSY9D`mwa(M#7PF9hC8S$NQ|V@8FSibYn_mc+hQc#z;Qk;6`C)X zBg?qt)PvPdDTh%=;*+>1;A3{#NH|Wvl)hg?8E24>=)7>n-ipUA+gAF#lz|xGWj#7+ z+vP4|y#hbTI8?q$*@-*R%UCvg6g#O?88^j9dZm5jf=(NmKhDPNx|hBzV}#5*Qg1Sr z$=Z|p ztl6L=*S`hmIv!PJ;>`0s_MSz?9~mF!?_>IKFZQw@7UPs%+LI~_$!~2M{qiaDjGOc$ z+{t=rETsI_o=6{{T~zR!@}WYg1S*HbNZ68R3BMv;^*hl!;Yq%$a2tW@Z9k5kQ!Y+fNIPt@-ALU@o9mxS z(8iCUtv~_Vr|e6`NPCcZ!kKexX^YNW`>)PG{x@@y^pPGr9m31_>d3`QI19RjaT5Cy z_%z1SamE&BEme=M33BWlec@HI*HvLj`lZ~)ZP{t_q6?ftTZPh~quJ*kMK9&niN3{7 zv&hcAO7>W?h8m)d#7O^@IVKHR`ez2{oLh-M*?Y@gG6#whrn8^SMJ|BEh`agc4+Ed4 zonV*sFYE)@rP=nQA<~nf+M}c=o$*A{BIAh|D~ zjT&J(am$%rlro=$+D}vEDLRqGIPr*oY5SwNk+Ur67ZR`Z=P}!@mH!dGd5$~`NgPf+ zT6WUj(8)ad4|?if%E_@C(GL5Qej;^H|8J~OsP{TMF4;Fo{LWgU)Xu-i<+zdkgxHHa z=@VjPe=Au&$f^ddX;>FfcdZ;O$&N2l#x zJDoig6ogWtbSM+bhQd%DQ~(u0B~Teu0f~`)QJWo(vnHrVZo|Fb#d#|hqT=Z z(k**4$+wdhS(EhP-dTq@^MbTJ?bGzRzp=ky9Badk7|Bm1ysYcP*w@U{r;8qv^Kz+E zNxQW30%T{6DP`rvZS@brXn`b6Vw^QnCT&E{BpPW4!=%OOtMlE+I;tPL`Szp8l27F^l#z4p>GWf1Pcm;h;XCbXzRoFMXS}q|S;@cS zN8%l}pFfFPSz9@EGOkwsjCpHb`cm!mNuDGOCy&l}`Eh4Pa_;P$mpbX} zqOUvtMAw5X=WLQDnIq+#LX61m^LVGM9lH_c=r-13VkDd@+*U*4&nZ9ASD>r3_2O3c zbk?&Kuo%6JJz~VIvnS0H9cyoAy(#4`8j(hQ&{u$;&NaB?>%KWj*sV}i}`l0j8 zNy2c>ww!)A|Ln|h|F7J1Nxy}LAsMS3KTaG?=$*8RouosIGbYM>7Qu}(|I4#NF;=;u zuY%^!Yq>d$FHkX52310idnXSPhm0jh)5|#HJR@m{`{#9-)XfNCI?uWDa4$ysUoGy% z$b2Auahy4DO6EbRyH-LGs2S>n#AG}}exNXv3Kc_Q`mh(> zAUFoe`JAI~WY0X>!90pSmA;h$WkE7diwVQ$L4{BWQ~}jM^-wd^28offE@C|*M$YJ} zpjulV1;?Q&NauV`jP&_To6jfTPF=~lmFzKd(B;n4J9}tnf8Aw=?dYAowS--OowN_< ze6n~kb2n58)k5`9Bh(6YK)ui)GzN*0IkJUu*x3V*kjHT-3Q7ABBhTQAAm{Fa)N$CB zo%7#XbluPdGz|4a4UialE)eLX+`$n^E95*M=!Wlu#-UEg=?C?k?M#wJo%{vu{EJS; z88QE~Ubs)im4(}|?OxoJqL(>V%#mjYjo7t99Z(k}^JEX$3-v+$kc=I2{vhdc&KCyI zJNX`jmoZ69;C|M+vd)2|FCA^obTl{f*HlG>;q-Os%gz~t^c`m}(ubP{)`=}pJJbb< zk^GKfH%|N^+&j-QCecZ`InOjCEIF?bH_ln1V<%xa_X8v>@$1+*@tx&IA{2!xD_M1LF^rUovrTxqtvrJe-zW)=+hp$!EL=4)d@cYUHXB` zl1JgA(ApbxOV%D_odP{{y>cIhFM%$6Te&ZTuZC_uOS!LyZ-*W!v%`l!(u|^eZc_Ol zhmS!g+Oocqc>%iTZ^}L$J_q`MFEqOeC>Op6T58KB@RiW>?<@Oi_Ir{p2{Dd?FC zlpKRkeTe%1vy#){bD{0|36}pt_$p}ajmo|j{z#L}nDy%0%Dw}8DX+JVRpmX5JO!O= z%NqODROo@fDEArgljLvdTS~4(F5#@?{dbkzDDs0o&Gn9|A8C)1=(ZOr`zZX8CY`nR zvx`*xneYYB(rc7lBr@r}=YAzuA!iRKXm6z|xf!_^y7LJY{s8xUA6o6x_WwlLr@?oh?9(1zspMSbBTW^$`=(WSSHm|z z_x@eUP4F_Fytz-w&Uo_3m8yK&vDX(<9t|q|QDmvV7v|NU{&Syp;|gUTgzx1U&VGA* z%0oWVNPK5c{bPLBUZvuz!o7rl&tH_iw2uyScl=m|-*uGmq`h7HmU2IWy|kyBN0b~p ziu*9nSuPw?_IdD9elOmu!Y@MZ;d#rOKT>iv@{y*5XEg8Kt?UPE_s=g=a_%Gl>|Wx# zbob%-T5AUX^C08JKLW}QIau58G zM&f@Vb~yg>N9j+;tM*p~FY%qXMES2rmhg{#PRU)!`YAr`!yYAvS{ZMl7q=?88U9Ek z@g@HCaD0KaKJAKCs(#bqCB8duQU3FgCBE&IO0Gjb(nxr(9ueN9&HotQyJgCM4F4IA zk^XNgIR{?Czxc+F4`1w`{rbn-OZ<<#cQ`+i{&&Bs+}99(Chh&fab@3Y$M?|H%6=I6 zNF(80yXPOnJGD@`Fa3o|?~9lJYyXS?=dk~d<&4i4DEBS+m-Ma8Q}U=C-rE-`ImkZb zNF!$(_k7*0J!`tHdlh0|g}WW3?LlJC9;7>D&GX)Z#P~of9DDx;%96qg#qjcriub+vr~Z zq1)m`SBCC&bXTAgUQ7k}GP+lnxh-CFHRzs4cPTpI#ngk(pnLHix5bOD1>NK5E<`81 zm=5qf%J1z@xh-CFz33js-SZL3&vrKmK7j5#bi#j}$!=$^rVMUgPJZFVEYw01?=7*7wG}1@*e?j&C^!5L2FMI5_ z-=Iydp-sYzk-KJZqI>2M#spg@=Mvk|orlgjqmc86m(jh@!kA!(E$0z$F~7flv-_j7 z2|3Gn0e6?;PIxi$p2l74s!eduP%zZl`g$bGaAY@P68@ zT#POOo$zAh{B9At2Onjew{>!Ew*cKDbi#{?f(hu>(r<;gOeblk&X)d!wgxXo&IE5q z_vQ+>#aqgw{74;Lb{)z_jskGu*mM$W0v zME7>H+u}thXWAbgU_3%6ycjv#eihxj74DDDxh;z$ek7d_=>M3`XFN(iS~^P!_rgon zcwPnH1SMvva%zX4V9b8ti^_fgxtVpx!)sMJ85`NJvIo2(U)i@Hcc0+XPCZkVd+@*6 zZ?Qgq?qwwxAZOBd?kHF8TalYs1D2a{|HSVq_oK+Y%vm4QD7o}m_PfZB-=*Ykpbvhh zxkjT)Ub5>94VWPSxiSywO8>bSpULlV}Ew8vhPFAe}VPPgt9M&Z-Vaqo041M zC!ve?DOuY}`+*YoE4d851-dq>~7ogPp(MQl7N;Gf!0R%aMDa zrTj&=7 z^gNZL{5K*GLC?f1`w{ps_Ai{H?6Y6ue4jPMd%sk2KXMuJr5#ER{f@bZv3XIal82B( zKV!|=rR36n+*5!1f2HI)?r71Zdz8-q@WFHzsaMadcPgQwB{PE~RN@+9&Drz^P|S$~!C{gje}@OhByEF~AhS3?h-t>g%NC-h#9 zl6&AMp!35@o`TQ)Bj;;=)qb+JaTf-9>r<+}E8!cU4?nNmH^War@13f~SN#u+uMM=9 z1|>Hmk3!qeSMDd_a|ao}@|0W*-v-^cQn~Mh@A^09-2YJa`gYn2@~xLBxdM39TXzZ`HVV$9E7jm!Fb|R z`EP~qht^)L+z-JA-=O~I)OeH%pUM4&?Y~m)E0CL^D@v66R`|Y5%Ig{>4!<-Qkw3VPr=Wgpzd zd<(r*s>-_-z6rXwUb%0DAAlabU&({;lh6bFf4Yosfj`q;p_}dTQ5)sF8d`k5%6}Go zA@syb<-P>I8hY=iO0I!#hfd$C+;_qoZgjt>~|QC z)+o6Ez6n}60h0n!)>Ep`23AqgU>?f2QL#{=BrbEd&yD1-N?FJ>6 z!q-8k+x5{1-vK?+sqFjUyYc_RMkS~H75C8X$tu6?@Ok63*Ayj}!jC~OyH$Ukg4cgc zd3;*g=e);W2YS0pg;xhZ1fAEd7<=F8H^X-?W;|Fq_%|A6+f(@r0JJ#^wOB{##j9z%b6K!rbw9NNeD-l*g(_;M)W3renn?}nb4wetf% z2AyilQTWuq6aP1q`%L%}=)!NQ@t_KR5W3<#Wj_WVnxei(ReCbvJE8L*Qt9u8*Y?wX z`&4}g;dAe&zbsU8334?w`xoWE7QPkQ|E`kT;fEpDxRS@<^=ZPt+YS$24>A5cqSBL# zTn61buH*{%dg#47mHS5c9+CH`@cQ5gY0G+>Axo?9{jgp_g zD>)m!7g{`{>fx!W3-pz5Ra0P{Qk?@3nnZOC$W^7aKv z?tveN;zCNEg3pX$f1HwY;H#mviki#Ex zR(FzepAR2_E?%PK2KWNn$3u%$eRLrYKo@2x`$70g==E$nJovPO%!j8b`)v5`Y{JV_ zavAba1LgTc6lO2=CflCFgKgqzHQcd?i=F zH$hkADY*rH06ING<$nae;snOa)07-=6F%X&{zJJhK#oA~<}0}Yeh_;5d{rJJ@L3+e zc71`e&xS98ZvT>!%ix=#y9$-u4xh~(&=>Zo{DtA$pck)D_8oFR^c?d4RV9xiXT|xo ztFBgZ4txar`%08tf!v7v{!%43BDWwvT&m1`%I_Qd^a^C=yT@&xpFg|d&r zXC(Nw#eY=xIq-GRjo(uCjqrnuXx~v~KY?78Lj7K=n~L8AuW-+l*IRHowCnCE`;vmJWl4nGWa&!FTPy4k0943Q67)l}qGl>2mex%c?c3S}Qgu7++sR@v9WH$iVVD*IOWP!9W# zpDMW*c@(7e1`yQurp^pZbWB+mR=s7uG6SPocj;3HJS+68LK9!(S-- zdide{7#{*EzvIaIF|?mQtMUlIXF;zoROt=F*F&!3mD~(J0iC%{g%^cS#%sONB;l+eOCp_WB z^h_pb4_&6xBK*HH^Yi!YSs$N231|D6{}`sEYwZzfJX%~Y+GQ`?Q#^Lk?kly%Zc1|4?VL53Z6*lB?dGwHC?4m!*VS!FM?WEHVGi;&LPBZL$chG6k&+G1;a9;QIEzS4e?mQm% zdcDr|x}Nua&VA0w$&V#+otlG#aUajef%v+_d^p~iWm_I!r((6#gS!vwR{l9?=h^~g>@=ci~5eQBUrZ#>s0(!OXk80 zY)k)y_db^3b#%mP$;IErShpSPRIC>DKHMU#Ta0xoR*QPiZ2{IDbz1MasrT*XVcjOI zOT34t-s_u#b@Q=K#cEOS1J1&_gCDTo6D&YqDOgvDbt+bidav;;wqq97saP%QJ-^df z_c-=T;{Csgg!o7GRSn*brDnhDux<&~CEklv{|{gd)*Z{oy7*^NE6{%l)@{Z*6|1EN ze^+AN0<2T9TGaoiIf?h;4ql7*dJwC{hum`bDzQ$*YH7sZWAM$yIu)y>34ae^-J>5o zZz5Lfx}L!6KUlW{>s0(!OB-sJ;(XcW#<3CSMeuhq)-A+36|1Eee-~lh!H?s;@c6nY z{w~0}wOFTOwG83!7L57P>+rr)d|e~<-zKbEiFGPg%P?~DP_Y~$x#cFXO zcN)HG_*DE>OHDSe@2}xlgIC3B3E=N0tUFkU_oUngEM#cB!SZ#CA7e!@9>mg5x0GuM4>~SXY8|DppGw{+3|fnGc^g5vz5jSho`Ec43{0)#AqAr||kLi}Bdb%V>!n~2rAa;#g7bz8Ac#cC-<=kf^}=LPQ_~R;qO@-lPOrI;;<9S! z|IVu=lzJ=(2kwe zI*d4qR=H5?7~;}JxcBeU&)q8!x1m)Z)Vc$43~f=m_KzS=e;_?mZD_tK%96?)~s`EXFhtU@QPCw3#AWp@(z}gNycv$B%5a$S;2fZVn&qwSQx)geO zJYSBuR_HqD(>z#;QLKj1y8qjgNNY|GmuKy5T3t)|PCGq+O#LYs7ppU<<=cfbl2-@D$`gk8loPYHNw!^RK zybG}pt@4c4LBs=STSm1WL7bh374{Iybt1ddk?P3U(@*p z=w7t-{qf@saq6{rF8(L2GY}V`E%>YUyATJ^QjY0-5OEt?$%|Tt5qG0)nxXxDi0%2f z&v{Dcvk_;09AV}d8Ex@b_4ry5N6_S`p ziq@rw{b+~I)4CCHJKEg1?nK;&R+*yn1Bl1bPHfjY?K)h~&^ErTbuQv!wAu0g$`IG1 z?Y~^-1Bg4&b`EOYhdARy*nbgyea%AbM%#9a&X*#tMO*k~z5Vrw+tBt5=zIs_7+U3K zt<&&Wha0r;qCGiy*cy$9y@pKNE2w zTFF&f7bEteZCR#u1L8Kc{RLV_5XaDtW@(=7dhB1c18?a5T!_7BSH}BmK-`bEvrp$^ zh@CfJf4-pWixIb@tZG8`_ckbiN&NFWTU}I^Tyl^+xdjqx(xoT!41!-&(s7d(l>J z)c!`q=^w@8LHu~mMqG%tFuuQv5!axtM7_G+)FSqyZ9A^(1BfGNZ~R&7Uc}ih%+HHj z=O8Xd+p<89w;XXj+RTgb!bQL(b|VNh?a7l)-8yK&}L)4 zC$6`M?QR^;FKC_vaXH$KKWJTrIE1$Jy!iQrcmQo?md=kM&R>PcuPd~6A#Oyw@^0N< z2yqOpWS*{1`vmqk+TMKaZ$g}U6XxTH))|PsXvZ$o^$mzS(H8e=-H+H&jN{>V+MkcO z8EySpy*;gnyH);4t$PutmEittgZ8H*&PS`whcaPPAv9()ljLwwv+z zIHq}Nh_li5zOHpH;$pOeuW4O|*pGJlupVy{;%>AvXLLS_IQ^4&{E6#q#N}wa&ewhq z;(D~|QSEO)97fyufBJauL>xss@-LkqLY!KP{$9~K9dQoYk<(h|BQ8Z-bH08&uR!cW zTN=+dAP%AJ7}5TA#Jy;z4(aXhM?8+U^q+D5YHTmsiTQfGWr*w0Dqq$4AmTo>?SIgE z7;)OCa6dj%&wn=J2->{({^>`Y{%OolkLG0~E<=0jq^|cM_M=e5p7T#8mwt92FPRr2q;wrS$&uQ&L+=MnOzQ0-# z_oHn(qVqAtnV-e+^+DZVF5(E<@`v<%b|X$N$NvAj_B#;gqYXZ$_qQ9db1lxV8+5%3 zvHC3C+`F{yf-blP$JeFtJmL`A*>1f(ZHW8OPCOUiKE(E0(O<9LUzv!j(5j!+_4SBj zX#19EZL7e1qHTOe*ViC!L)(9e9)B0&taX^rE}hRoT!FU!H@dzCaR}|1<9hqr5O<;N zyF}-E5RalAi|^lY#Ob$Tf5q?LG7;yZ?R!M;4=3VAw8440zZS&3XnX9MH-K1u)^F$2 zI-hd_Y6sq;?6?xnc?9n`u4x)p5;o?oix)9r`{&`NI6I)*sIgZs5xwa!B9Myp=0 z`NfE<(2o3C^Sp>dXlH+=^X-WH(FQxU9zblXgnm+MJK}t_J(;?`2(ce6C7ur=?nYbm zOYQGL97EguF0DrqXM7IzHCj6m7octZfYvU=b!c-BXkCxE4Q*M6*6oOU(N6zLZ{Gl7 z`{!}}{iU9-EX2iVn=a7fD@PneJN5-#-;B5u?ex#J?nYd81G#s;&HSE?OLbZf%|i`Gk0iy8DbyWlDMu%+=Mpsi8v2&7uxa1<2=M8XlwFx{TSl1 zFTjud#Qg~3X0%;T>;2b`co=Q)Fmp z=hLeHa>VMhtV=6(e_rTzv_(JC{tm=3v`sU$9z~qK0rMBX-a8OG(dKz{eIeq&Rd}!Q zcI~f)ZbaJ_)#GVK+=;g9QSI+WoOw5%U)-j37Gf9L{0sE{DMlPX+p|vdf{5GEc80XS z3vv2AIKL}&J_~Un+O{FReJ;dawEelde?Q`OwAt{h?e9Q5gm&P{`1T>r^5T5|y4HD! z%g}Z{qWvDkjc7aK_cu+5+t9ZEAkITPjJ9Eqt{*|1xev}ihI<(!nS~no>Mce)v zt)qxX(VkkPwe3D^FWOUOTIV4yMO*Sc%`ZdjLp%9to%bUSq0NcwHpG2si$4|TBTlQs z{c8OF$$_{OZQjj#|Cb}KM_bjP`2oc3Xj@n7`cA}Av|XiI4wE=bKiY{;YTbx9jJ7DQBZy;Y)i>+>7~-5S;&>|2+KIRvZDxFb)gTU`Rli^7I}nec zRl4H!h||A>>s7JV4#Wj$hi}r`Q-rt)ZAYD+zYt>eS?wdA(0Q8=ezcX%aX;cJv^mJD z>y;OA6WZKWIv+wDMO*Q()w7(W{Guo>7`QMJX z2W`Wb^!TEPo4$;0R7I1lacEWLjU5c|+Jr)eEP z+>16lULQql+luYESo7?N^UxMG>i(RF%h6K4t+fYn#z%1eepLGd&@E_-aQ#rnQ!C=NHO)P-xz4!qZ;78R>SeV5fXtlP8;@7cxIMMC%-<^8%= z#c#E!@9#L&sy{CgMXmbXpHmm;edPMK{=SU;kLi3lbU)g%cCCjIx1oM>SnD#3S1vn` z;{TkX?M35-RnNotoF*ryNzB{OyJPxqLj&e-y3a%N&gs7mKpZ?p>i01~={hqJ;9N32P z*6BYFt?ko)8|?e0|271Fi2uhrO`b#ZF!}j(HqM7kPEM1Ux1j>prR-@kZXPCYLkC_H zn(mJe>EE6H+fdau{kOrBjn_o^K$|8ppAQY#hOB8aZXPCYL+5+&S~w@CNzB{Oi*1kDIfpfYI^DynNOXt8g3=Mxo&(#^at>@m&pB{Dl;gUDk55j!g|AEM za{|--@nQ5FIF@R0EagnQBgr=LS-Qlq3m=&NuaRY*1Mi`v;r)l{{yffk4s1jAqtkyL zTArBx+fa`0!kO;pL;5+e4V~w}=lN{sz-L1PyQcs7Fmw)VL+~7Ue6XDZkJFxWU>khr zz%~s0X!>85@CCNh{$68v@ATh>v2$P>oOn%OI=(jZ(Hm~KE-R;~^0w-AcV;=S`ruU` zxN@;`k&bN3op`xmUFEvls%?$PS5|v$6RK+6 zJyo`=Je%%BrT*)^TW9XAy!)Q&yY4i^t;pS7S)=?qsIBqZuByJX8vlAL>+tXAAdtGt zv(CHDcGdEmS6zkGYD7A$_SD&~s;s*8b9b*>U#Tl^U3d50>o)1iM105X=tv>;>DK#AAc`}=X^Bv z66I*T@#o?6|4y{ou0d{Q+?R(rwDs&*CHYybNner*ijjf@WmsuqP; z`tp%i|C>_znlI69WSQ<`2G%LRdOlW&rnK?}@CJ`+zxXOdd`Y}c>4N@Wyx*cf<7=YO zeuWK7^sDMsTaEVLxG(h;4HbK_Esyed}(Ow(GosyAjPIy zi$nX=_9cAxB35I48(PNRl!?aw6Eo582J&UWr&_`Lu`<3WpK3m|1h1km#im-$>uM(T zqQ<5cwW|`ohUaJDx1VNBjA0^)e-iyR#C>Xe8{tvS^;BZ-TE0f|^*7-A63|lrjg|2w z!Fw1Q)wQbs=vVC%tvw6BeqcH}o$@uq*ZdZ~!{lp$uLV92)+Lt27+%B*HMfeFnmyZ=8nWA3 z(SbUq)S|XSd0#{kBn18{k6PxSsXi0S zL-5%XNvuqS>aRMc5=;Hs*|xmHDYnb7;&ogb)cDfT)H%ew_V-<6Ti{PnEFNtCpG94_ z)S~~Fw!SuLt$SA2Agy%|>UyKK-aG1=qP3nab?wkv%_zqB%ETY=_4%c)1!|cQKLXV` zueJUzaCLrbt(zOn^*bj1s^eX2{fR1dTx&gd;;%Y>wSMQsUv->njSD^& zbv&wNM*P#?>bTQ--o)R;KQnCc9UWgW!*=P!anT>&eswiaGjJ@f@oT0NpYa-|yxLQV z{b|E@`zxK;UpCw93GImQ-$XtT-+wBvN2~{sYCDZzRmOKNzxn^%b$=B?58-;Hm-GpK zmB`l#E&cVBe>Xttw+R$6v3=4XAioQ~?a=C+ek*^F{CB{o{{Ox@(%#D7LjDKfQ~%e0 zDt;^ZpMqa~FMyh-x8k>v|7G|dhgL_`aOZE#?wRoMerYno@#&glV2U- zBSOplKPK|&e^f__TJ-TD{h5@%9@cr#S#T$|H;?kF|HS#L9+{MuynOQC4&G8|b>&I$ zV&Zr#B>y4!)o&5-{u>p37x_=Yzf$m}znJ{*#U->PiNB2ex5K|CiNAvU55vDMiNA{c z_^QZ>-*V^uBgbD$eth+bZ9@`%9r>%^-vq6$yovoU`3>a9SMf~zhR0OZOU_ z_n6AxME(f;yP&7?w~#*y{~qY6#@|kUbz<*J!tWqIzUszyAc;Rh{*S?bFp0mL{CC2y zewSma?d>K1gYX|o;_oN_A^49a@ehz6KeIXU+Z8Rv=L_n68*LjGIeKb>Se zW8{Ar{pn;7sIc97iB7b1^N9ju1n&tB7Z0R)k*xdzr`ODx}-_tY|zmfcn@b5_CZz6v;{JWC)TgZPAZtV6X z@wbw{4F0`I{O#n&+pV^JN&Fq;AAtWr5`TpJ@5fV@gGv0|}{|Nbi3jgUO{xR~Of&Xk0zYX8_rx&}e2v5yY zpr<;X)5!mA_-7^Yr<4DA_~#_?XOe$5ZcOGS@n@0$M)(&b@#m2LYw#~h;?E<01pdWI z{Q2Y`hkt1jeY&?lEgVA4a|ex}NS&hvKgfB%WW$ z`_*EqSI<9sgdP{q7kVlGIQlyRT}=Hs@csLG(a(=kzlMWyV$si+vqZjB=#@8r#PQj}p ze<^s?(4(kJ?C*l(=J=WguR!q1$^TXG)mr$>Mo3fF%ZuKs9-pOxXB)8kn}HWXQ=pmWr@Z173tqY4HIn}- z@OD7A)A*c&R)30DCwLygD;0;)MjSM(}JeTm64iZ7($Q+9c-x%d5<|AOvM@aDOf3y1kCwQ}=BgE^Vyy6uKUYp?ckpB|AgnbyA$LAZi`cpiw z;DrURk^DD;w*y+ecafM6-$|3#DtMiO*G&Em;O&Oy?REX#(7e6& zf0(???dE*+2woQX4}rG;n#b2edA0xCg4ZW_1LS`Vyg_Kbzl;k1F!?`=4o(XHknoR? z|7rM7L%$8b{OY%m)%&lB@zhgZ?Z0}l z|Kxc6%E2vsYJB;Em+Cj~FGtA#d+<&}^W(4gpXT^Hf@c@J zdh)*k-WF)yf9WG8FCcgsg6AOrBD?`LADVe>lvmr^A$ShK>mdJX@D4!p_7g@@jncZ=3U*D|n;i{~35^pofUp^)GXLF2QpOUO)KSgMAMXWkg=Sv%sL6{6UXkGCk^ctp7DF?ym-1@=jSHSz@S^132;OmM zyv*|Ee3T0RF!_H5|4C?m{12QlwRe8noi?w zr@Y$!8d2Xt`I}IG0GgkVNq;x_x4?fWiNBxxVfc?F@eh;#Z}6W?;vXY_2HxU63(b#z za{RW}togVW{uJn`{AuL>4E(c__|wV18U8s*{F&tcKK%2N__N4=5dH;8{5j-*1^z`z z{CVV`c`44PB>sHzzZd?cN&JQ6UkU&6Bz_n9zX1QrB>rOZZ->7miNB2ePr$z>iNAvU zN8n$V#9u}Jarmo~_-o1kZU>$pC-K*j|9bd0CGppjzY_i}N&F4uZ-9RrbT3_Bie9(Q zCv|=CitC#^|H{b!bMV$c^YzUuu1_BFkHcRn{668YCBF+Fs@jmm?<4=$;oqFZ-$4Fn z;ok=Bqy1qUGxtYW><>BKRPvtzZzgnzctOgm{W~mpezEGyN{O3^rhN#aPH@B}#)aO$EA=F<9&HWcsUiB~Q%PIc?>eoU$ zY5ac5PgdVZ`HQmfdI2=|AEx|d^%2V7fck^be1F{VhPnNvVm{>dgvehF-lNb_+McTa zn!JGE^@#mjPyS}`wm|n2uke2+uS@Xy1h0(zFMziOnzuI#ceZ-b&u_;CFDiHi@r_en?SJP3=J7lvco`{H|I5Ie3(ez;QeN?D1TQ9dW8}XZytB|eAD!n}{V85Z z@J0o%kNl5-cNCg=O*2ehRPe?HubupT;O&EEo;TIxWdzOrm%7!irwe!aDTo5(pLsbx zuL#Y&;+ZDTEqHdpt0Mmg!K;Sm>-{k0)&BDfUWVX}lD`tXGtm6_*7!Dad>w-45WFVx zKLXw^=pe?Cc)px{zR8OTUbf)nlK&6jT?w55M1tp-W%4q2n)8t>c!lJD1H9$XUQASi z*Zg*qS0Q*#!3&c=_dPb-erSGt%g1j|=|!KvO@db-ctzyD1-uo|t+f9JD6i(XNAQXS zFGl|D;HlrjVqSyY9G~q$bN{&oFG&8MgSQhpo5mNWyc%D=;FStq2l$nhQSzr?V8;b-obrlSEO;Klv(2{l&lTXMK=bxyQC{)N1+PZ%vdQlTXrbU0QC{(? z1kWpYF7kW9TPb)|lvg~j;Q0ivhWxw0TQ7J4$}3*I;Q0lwk^H{}Z-?NuQ(o}`f)^0H zF!}!r-hRRBqrBoZ30_d}`pN&^g?Rl#@Wv>wcp<@S7QAut7lZeP;5ja|jwi)y7rc<* zWs$!QyamwM>^HAhg_KvkPQhyvydv_qfVV>MJd{_wZovx+UKRQKz^fL#2Ffd5pWt;0 zUV!}n0dKqDwNYO21_ZB5@Y=~g{|Y>w3tlhf6)z@uJ%ZOq{sQoh3f?H?6>m)N`UG!` z{1xDx6}-%g%;P2XyXN&dDtHd^Zv}5YG#@Volvm?R7rY_CD^!K)NJKjjrKNAN}kuYvq$z}qHxt&~?hr{IkXUK{x@eJ}ohhT!#3UhxVAFZJu@ z{a-Kn3&A@gcq5coJh$N41#gu6HQ=2Qyo_}7cqtRS48hAJe=~UVp!s;or@R`UNAMhi zS3v%!!CNMH<&;;vTEWW}ybAIUfwxZZ>M5^yKEcZsJU{tUbMXF#;Dso!cnyN*6uegQ zF9dI|;B`}8@q&U^Ab36G{{(o41usT<#cL6~BEcIW|Chl#EqLj3%;Tj^@Z5rzLH@_V zn+wgyi<9zdd>w*UDtP(ie+9gyf>%a)#p@Eha=|Mn|0Rp?`i0>6D6e?Eg69#udh(Zo zw?*(;D6e=?!K)Fx5c%%|?@__)qP*e_3!Yc-y2<}2c!vaUnDUA@DtJD@i;@3%@J=qwt)98ZY+mtKj- zOK809^XB$ui}81n|62H;N#Y+O|0m%;0bM)=-#OPhKEDkA66jX)_flTXXTO+FIo>Gw ze+J%h!K-_x)t};J?79H|szv{QfLyWte)69LZ!2^J?!@tt_b!uHEO<`ADpyn7pvyc?8cz ze&^MA{Scau=ON0g`4|zr8o?VO{|4|*3tnKpIlkP7&H3;OUK9C$0^Tm@0@~hg%B%7D zM13FSpGEyqXgtk$^L!YiysGz$`fm8KOT#V}zw1>|B zTPTm!w$G7P<8fVX@;k-&>M6e%{4LOBG`=3nD_)nV@29*Q^~a#gsXpgDX8%2+-bwj8 zP`?B^MD-EMtN!~$eLv;DuKI_@ZNDw?_!1TV)NE@!KY)KGG(I-`rhiEI9prxo{`t`D zKqUCrQ(og}R_nDXy(;`z9!&v~CY z{tQv?r2G=pFM;O%y_8q|%ldlCe-iauM16$vlhyZ7ego*`+bBPd`o~56 z809CcPy2v1zU!~W>nYIOe<|fBtM^dekNQec-%k0->LZkY7WD^3z1?YzZ?bv^<a&(iJ^nn(_o03_j$`N{eZP=4-G zJUczm)Qm)q5y^6Y49WdH=UlezN)q<#(d~AT;-%o^OqBvidB_Kacta(A<9w ze~$XKqP~^#lht=n{$^Z6@{`rqQhq7w zH;DR9%1>6`LwOJC4~zQDWmAtohw|S>{UT@{znAip_3x+rFHyf$)b~<;vibqapFsVf zsL%cI)Z;IpeENrR{{zkAZ=n2S{WnqmL#W>+>W3&lS^X&GKZp7=qQ3AWQ;)xx@;gvp z0?p%Zru=06w^67Bp}gZGc>ECc?UbLaK0JN%~`*Lf1lhr#Y??wH5XdZtR4|Ewhbbn<@z{y9nfndILI|GXsrEb>1G z|AHj`9P$stzbJ`6kNg)d$Nggxe?Iw_!oM_$zmWWD{^kDmQN9Yi&CuQOC7ypeZ?NW1 z)qBPK7E%6w)USa4BGvnZKSKT=!+#K(j|ac-ca#5R_zyvM5Z_j0jV}f3(@3lF`h*S$ zem&(cy&kWBK&KNwDEy7&zYhK#(4QcGv+xg){~q`Up#vCbV*eK3XpLWuSJu~1{x_&! z56%6BM1P&+Pq_i_M+kqL@Y_FX^}h`M+0e|F{&Moy!oL)_uHt+Y}__-y`a~ zDgQ&%9}@Nbl%K3VO8FS-kBj;kqcBpp?UoEl%K5sAmu-S`kkVF zi1L%wk5PUL>d%UL*T<(Ge>vqlQNI?N$KOWz$@-5_{;#M%DC*PQQ}>@u`MDp(>*>(k zznAip_1{4Gm8jn)>ia1_S$&N1TTp)rx|NQX zJf1@bsoqv%){lz%bjmMA{T%29s&Ayc>VI6+H&Ol$)bE0>r~1@wb||&z_fIl!w#M@l z_~$|QBb&J1_ETQn(0TOu?EREkpCRf4 zly{+iJ9Hn7-y!@JpSE}pz`qW9fcynzR$h(QE8f4)7Q908zXINJ=os;G)|k8&!OIoA zT=F|VhR@SM=Mt}!@@jleQC~*+J5awyY1*F!pE1YNBgRu8dE|dc@swuXXHA~H+5CJ; zk>ruTTk)VTr0sJHe--&tKaR&UXmtc9j;~VT_maN={*6ie_2jREe@ha7fc*R5-=4(Z zME(pn&c`JF5c%(g|IsA=cJluj{(VXOo#ZcCh5OSa{%-RB5dK3+{C(tqJ8oo;Ch-rD z|MTz5xmXNd_H@G zKS2HqOK|=`KScf42!GBk)_C&ZUj&^)ey{KslfN4N66h}S+itaZYWq6G_W1-agZ%e{ zHy65_c-@p&ye`4>3tp7`KLPJJ^sk5)5dP*0YrMzc-wn<8JHB;RUiIHA`VR_Tko@Q0 zjQcaC>3Uvwo5@SteF3Oy(Vw4fmOS$3D;{(K@gldIyh6bX30^Px?*;D&bOZ5nD@~qH z@Y)2gjQk<+)0(Blhp?(--G(?qP~yvlhqGV{@T^}JUVn8?Z2YxsmEVV`Fl{m7MkljDX;qP6Z@}^ z^6jWU3eDT^xWnvU*5^|Gb<|%8ok7QY))&nBUU9rf#dr$I|GrP*eiXWvc#Z1jd}7i6 zU&Q`B^ZXtXyms=JgSQVliu%O)<6Cd?Vxm4k`LCgVJ2anvnRlA?qoO{K@=v3FF*F`# z-t2!|`0L4k68f%Pc|4L|LH@(=uS?=@C;w~k?@Qvh-Dizw!5TbtLr*oHV)B0s{*omA7V_T%|DGiN z5%TYb|8x?6VVyPpF8G&2Pc{BV^8W|^9neuA633t8OBPQZfBD}xuV-?8YRP}uXK=hg zhl$s{+2nZzuRt76IbXK?H-fha+E2%K6Xn(TibQ=I<-dXY$3?y4D;8hXyG4B-<-1Y8 z7@GTUq5Nd^Vam_`EM9LH_1X1P_n%Msn^3v!;1nSp7bN}6xpRE5V<>%ju`%h8t-a2*v z6_js4{W@swznAip^*==UGpIiy>Px>qb^leA|7-VFCR3!vFwF8sCRzYYEk&=&%c z7?1SVf79Z3!M_E%F2(Zq+;8R8{tJogu}AbjM*i2pI}6S4KRCCWydlA>5xjEpzvnhQ zKY-@#_X>X-`9A^wW@M5{N?X3`Z>SU=)w65&Go&MSKHqv>IW#l74?In-t}Fxe_3Bf`DanT z2AcbiP=2!hdntc@CC+D2pWiff|1Qeki29Y#O|<{IDL+~N1C;+7>IX%A#V)h|u-Jcf zlz#^Go1l672Pm)lm-Qo*f7|CU|DwL^A+!HZ(SH@?KZW{gXzst4@~VGXKS23b)DMb! z*TYlyUq<;es9yuk{YNN2S^vG1zy9->e^H;`Jazvr%0GzumC)ROJLM>{Wiu#H@*7zo?ucdq|>Ni01`1>h8S^Y5OW2iqV>WhCcb^jHV z&-()IKcPG5@oVUZQ`cwy$jaZ2`gzdlR6qV>E3d}iC9YpNk6HO9)GvZA#j3>pR>k91 zUe))AdLQL~kNVBf6;vO2V(R)Rvyq{X*{Vn{9p?N%#Ur+uR{9BUvd&z(4dR(uQ__Kayjpq~aFMytE zJhkM%5B?2F{1Ngu!+$V|KV7|8GO^fgzlMJf^i<=iApa}yuS?=@C;vrv;(DIMZ`*H; z=X&^4pr;y7G5Njlmn8AGkpCy}?@8hxA^$(%Kb^#1_;YLgj=OODLr*pSM)I$Ke@7Dk z0Qv8Qe=vzZ?-$m1Lhvt!o@zYx!C`D5@OfgYykdw~O1|LS^`-Eu+vc+>Ac z%JZ*-{1?~a|KC9K|EF#HrOB%jydiPD=qCR);2lz$#@GI&$qNZyO!CP88O4KcBwoQ& zCa+)cMg^~o{P%&k20DyAl(^oHQ(o=Aj6LT58y7tLudM!m0N!lqA>#E@Uh!OlmwLY) zYt^FPPZ=TqbKsqZE~V|widg+Ao=@=Xf>%WT7(G{%gS7r8JGt_ZyR!{{wSAvL%oFWr_#wCSGTk$*U8*T)`V6zYn|< z&{f3Ce%9oL1l@H`n(!ts|ByMpQc+(>`S+rJIdqWf zyC|>5Q!eWJD1SZbk3#eQDf+G1zem&;Q~p-emq3?M|1FeP{nv>4R?2Ti{a)x|svn`e zs`rZeG0L}~{;a4keBKia1_S^WUz_o04J)aU$e>hb4M{`aU~ z49(-Oqx@w3*Hiu!>bF1-(e*B?*X-Xbt|xMTddNS^i~sKk&7b%195Hze+hWAp#8+Fr@Z2g30|Av1<1b&=j$`aS0{K~f)^zJKf&7x-9$Xc zQIi)EydJ^JC4c5dd_D-ei+EAWtL^O-ygtDjA%78gr=cUnYyP7-zHz~e3SK+;tHIj` z9VDLfPbM$tQFDHW1h0tvjo__-W?nbt)%F$(UQF=%$-f`GV}e)SZ;r1{@J0o%hWsyq zw;r0uH$r(ez81k77d+cBtN-)v#jPVWpTB;}D_*zYr3TFV??&=p3f>OrQkst_W3)51obDN`ToiEqBUMsKPtwPA$Y~)zX`k&=vo?22jvyd_Cxdda0p(6{8iu`gy#FP ztUsIM%NF%Hl>aK~7eTkuc)BP*v46yPas{uK{11b71iFKGby0IXPEp@L`Cp=b8#Hgv z5arc)3Pk+~cVwUnJ^FDSsCAtD$**Iw`OE&lLMZ^18@>@qPF_1#}&a z$N7>uo@~K$i}4kZ|7!4-LDv(nmGWwQrJ}x*@}ENeQ_w-GFZhc&o^nxNOnD#bOQ0L5 zzKimze~+l|r~Dq&AA{!p%LdH;Wql3hpF{n6=z8kEpYoISAEW%Ms6PeGzq> z_fmc?CUPTml=?4t*{shK^XV15V)9=NUI{ewGG8%y<$~uEyj=2s6uc{;`SG!z@@oGy z2%cZ?M#z5`c&DM8XnX61%<+WiyaB-r3SJBO9|vy_bd1JV z`&W}^|B-n;Z5F%$`TM}z4jm?5-rr1Kf#8J%uaNxz2i|h%VdB|`Oos&c@wzFm=C@h!!h+XN{-xj@gB~JY%Sm&5J%ZOMc%9_00PiX2R^pZZ-Qi^f^r9k)5_&O=C zcz(f)3SJ-iPk?t6Iz&9rDXTxl3k%+m;MI};yf5PU5Hz13#jl#Y0l|w2o`?LGgI5X7 z+ne=IlV{&+&c~?W<&*zr@RmZC(ef4*G*op;1vm8EBRZ%+Y8Ot ztGY3hH!gT?!3&W8m*8!O9-!?lJ8SZC9y8~+RPbuZ-v{1$Xuf}O{@di03SPP3xyXM8 zyp_;={fkmw&2PQnc?2&;{@FgT)mU7|jV@*&hOfbOLIAECV3 z|2?99fbvhHeh``;ZyL@w`)?B4BY92ae+j%@(0n`;q?x=f!Rr&_bCdty;H`r0qU{@@ zyxP8~s863|^_TT!eBK1Qm+B*wSM@`pet_~W)DJ@Q<6YqG7GKrJM133O*Q5S%Xs$22 zz^oq?^>vj04(c~S=g{?}-fq^9i2W&fE#wb_w+DKZ_J?=2$#eX~Jio@p_=4nr5xkwy z|VmfX=1;<;XC3@H~RoPW}hM+Xu~$uf9u7UX9?@2wsr< z`@!1@&ChSU9VV|?@VtT-BmXh*PC<{^toxJh`6jPN@O*+dME;MJ4A8oVvg ze7@yeZu05{uUYU4$iEG|Wzc+m2~l2+uU+s$f)^%#J9zt{`SVyE?=i>MCwOgwH$?s; z;GKZ3PqB`df`uk8^{3{1gayw-{*+=~ zUYFq2kpJW0t%v6CU#VYY@`8fbBY3UkzYn~<&=K0+{P&x@h~V`JUKROUz^jICrt^1% z^6GdQ5xl72rRG}wKL_4S=qBPdP+svIKQrfdNbs7;{}1qXL-X^2;FVT?idQ6fF~RE~ z|Ak+};~O-Ouiz?^S1Wj%ubJn%}O2WWfKuQqv2f;TRBdE{RW-ePFJA9d!Lye`2@ z{kD0(S4RFXg0}{`hmM!A#U^h=@a%$@eU0T0g0~R5o3^))@@hV^!sh&D2wsf*9pIgU z=J}}ofYqPkxdqQ5cz*KtgSQpBg0{EAY4W^+mo0ch1$ zPCp*cq51wc^g)x?D|k-9i;({#;2ne>rtPiEH+i=G=6n$}3)@;FStqnEWq- zw;!74*R#y(Pw_eguUzoz$)B#Bz0^LIM+x`)gm;Q5e zerp7;kNhjZI||LOpLv&?yduH#3SKMuZwGHL^bl>Y_j;4(6+EBd1<3y`@U}yD5HIfr zljj%pF3SH1^(&#>bi5U>FzZ8NJOROLBL5Nac0osJJY_eUygtDT3Z9qz{{n9#w2S65 z_)(MBEb2QcfAQDxekL@(pX7F#^|oJ_$3sZ)Jmh~rc$Lu18>PHD{&NMdP4Mh1t^Qr$ z&4!N9{%WVZ;*|+rSnztt|9S8ZL-X^g`j1)tDV|^OIt8zp{Egu4hUW1Ve%$1>3tpGt zm6QJo@YX{2(e`GzO01;AIHj5c!9}I{_UgUVW)K zz9zwQ2woHU&u_r>7&=D0iq$5sOYpJ<&rkjw@U}wp{APa2d11lx30?*HH-Wbfn$NdU%B$mVK=Ay6XRomOZvt;NG+z(; zDX)0;PIEp2f;UF~Ux0TOx|NQXhILkdikB~VLBVSw|BK-5fsPQb=r)t*5xi!>t04bt zYX2!s=Ud_JCa+QOLXt=Rx!(j2n(w!59+TH8cx{50N&c(Bn+Kgm<4dnJdBcJi7QB4& ze-gZ<(6z+#ea_@%JY~*rr{J}c|32{cLi6$B{k+L561*nH!!;2neJ?e*7~Jh$MD3SJBOUEuA3W}bV! z$*U8*alxx1|2puhp?Uw=?=*QK!Asp?$6i&7{{P%L8p>Rzvgs zWYK1m=Mp@h;CaX&0Iw37kLR>6o4h)~^9x=!`F{!CLg-=I-WcW8^(rKI0l~9>#p?er z;LV2aAzmltC$6W07ZkjH@{fUcOz`~mR(})MQ^9K%yb$>>{}z6K2AaoLyv5|@JZ;Wz zNbqXOUkKg?Xr7OZubRA4!D|z|Jo2vvZ!z=$?Y|i1)%Mm4URdzbzh?FC18>wE#5B0gXaC$NqNQV7rZXPi<19o@Qy?C=TQc>TKy?r>Tk^X=n*{I*DZex zycB4D{VMevR$lR(g4ZW_Ipn|S+qge~=JDk;n7j(Xiwd5b{8xjw3YxE1McYhXK=6hH z&qMxCf>#Nh!>`xhZ}K_?FD7`6MU``LZpGI=h+OZ|==TcZ|z|7ZKQ<$n&m6lgwPY>if49WP$N zvkP7h`CkEV5j2m_vBTtr1TRDIipc+8@K!*V)Bdadj>+p2Jcr=5kpGHC+>b+#VY?FZ z>wUoFr9NZMN4DTKk^e^Uc0u#@27)FpPw;XDFHHU_@b*LVe1vzJyb8f{3SN}_e(;V% zGcWp}$!iq60>MlDuI1kY-b`rT-Z9Fn<0T?^MS_>rWbrz|TL8_k-;YsV@kRvCEqIPy z7VjnS=0o%MZ=^kBf|p1BG4K{cGtcp`$#V-{x!@I%|I!^eo}p9ec+PG% zc|O7O2%d}l*Mheant3_9OOD@CwPl4ZP*hyuDd1CNEF${DN0R{vUz20-8VH=h|cP$^|bVc(vqz8oUkA ze13R-VDbWj7Zkhz`CkHWJ9HZDzuHGkUZ>zS3to`?Dc`~K8Ks#QGI_&-7m_^kXDc2w z&xiL>lb6wL&TpIGHIe@&@OD8r()dPxXz~gLFD!UDKeGHAz*_`eK)n1`E3d8xwSw0v zcpmb93%p9`QsVjdn!Fal>k_;U@;?UN0q9YTBk_3D@MDwLD|kJE*GB&5!FwFqN#lz= zX7a`buTSs>$R7i55IUQ9(KeHpd&r#MsNkhOZu!$6z~curzaP~}c{Ly9f;S|1QSx60 z-f?K-@$Dy8e~Q;2crn3CeZumuR^x-_^LLE$iq|1{qk@;!Zt?B{ZvnJ%{Oz;yiZ?8H z@KT@$P?tDfB9vFWPQh~v-T?WZ2X9bm znvc#-t3SmX7Q9l)BmdtO51QX!2t8%;GJ4GUEf>5l^1p2-{=W@0U!Qw_W%3FI&m(wa z03 z;Q0hENd9{8c0%*^x_@Kx#s$wWcs1m20&hJuzu)NTGI=?N&G`)oUV!`uz}pVZ_s^xz zn7lH<3ksf>{C@;*BlIYpA4SiaJip*I3tkoZ$HA+H_R;y#{+!8c7rc<*4UzwS590L* zXuiJmb(_2a!D|z|G4i{>I}6R*+kMF7rT@;HkFemy$X^NGDQNyYa?bN6uR!oR1+R$w ze(+X6^Yhir9+OuicwK^5Nd6yzw;Y;}=P}Bw^P@@ddIT@?u+@Jrc=Mo{*GqZD>k_;^ z!5bm}8{nN5yw=}Y{VCpv;6(+moBRvEi~m0kT}8*=`0q`go|dHsTy8Z__!`pN$~c*mdzu%8l-zfDI?URtj?A9lg(Apd)s@O%ckkoI5C zA5ET9@G=B%jQoY*orR7NufE^pc?8cPcpc=gP~(H<`{(v!CNC&>*@D+c{(A6^Li77u zkrz!~m*C|J-Vpg81MdVhAJ6@NHhCk0=M=ng@;?jS8_@iIZP{^?mvzLP-vYsFAb$+J zZO}Zw%`cg}V!>i=T$>IKg&crE0=61+W1)A8aPFnMi)S1Nhr zU#WP|qcpzSL6g@nc;$i@B>x@Y?S$s@t@LG+m-+{DK0JcwCI2q)HbOJc`HIQQ6TBM1 zD<%Iv@K!_D(*E-gnY?nr^9o)U`F{`IGtm5eYxIQ4YY;r2;AQ>Q^1lk+0%(4{F8gm* zUS01y1kW#c#pJ*EA$-0Bx|_DQd)VX+3tm9*GEZ9m>%f}_&G!c*lvm@+e8HUGpx|Zv z-Qs;3yt&X#^!`>|%*rcXk>E88UK{zhfcH3bK5cLKl*#i7UP$nU$lnUy321)4F!HL& z3khDE;AQ^P^8a4#KWHzFuWH1~tL^O*ys+Q}$ZvZX_v6rfz4xCsd8vKod~^z4EBW6K z-d<>9|NYD4IR&pv@CL|#6L^ErEwsJGqbAQIcs+twPySl)wm|dq)uJ;duSxLw1kXeM zo#0hM^Zi=zHIvsRcu~RYA^)$zI}FW_m+h~cyivg$61*t+hrl}yokshwbIjyr9X01S zCU`^Sw>RT>h91Rnmbf1ZpEY@I!5bC4e)8vmcMO{E*ZTi$^6CU{T<~oFvHWiEQlR;G z9;LiGKUxJZb*Fj%=NPwmp9gO~^ayQljPi=tFL-vrv%g{S8o-+k&EvEE*UBqi>L1Pd z$Pm07@;?IJBBg15Q~zi3oPy_&Jo5ia@u2zn`3U9J_$mZ1Tkz6tsc@;qZi|992b%9+ zx+$-C0l~`^ykYX61@9zuF>PR4ns3<^zA0EPw+f~=eWS~r+p9SFEl@1dhAwy z;(Y5j=c7jO8p*#9ydBWY>zi%zoPy^SywnRVzZ<-n(0u&mTx8|d_&kE=6TD*bSAkao z9i`)CO#Qgx#G=2yFDQ6^!ONRt`R@mBF*HA)DZJRqtMPRUUO@0_$=?Rv2IwXlU++6i z-k9J81ureb^1lS$ENBPa|Mkzc@@jlJ$ISU{7QCEyTK<{e$Mp|72ba*q<6GK1E3bHE zf)^6JV)DNiyb@>+ZLj|llNS=aHo*&%|1;q2ht4Bj$E7B(Pw>KmH$?t#fOi7Ai+C0D zOYM|7`BRsNj{7|HI&|hPD&W{eF|@ z61*Y7t04bf;H`sh!*QF~UR$ooi-~$W<(p7H8#+ki30!H``^9)h1+Rtt&w;lGx|?|J zt4&^);EfAj75T@&tA^(5$xxojOOKlSKlMTLe$Tep@-NwgTPbMf`L40@6X(C+*#)nO z{1xEsf)3O6<~dDXi{NDlo}2vN25%K~8}Xt`OkPy*9D+AS{sZ8hg>EID|5}rmdEDH8 z*@D+X{+Geq1KmzM=TehbDtNhqS4{r5{{Yud=nmq=t}}U!g69;xv=3SSrQpqi&ZP5y zyuiw<`Ry0H0>R5#X8CUeZvk{8jW76NE3bHtm(2ZFBzW!Q-wxhBXny>zDl~azg69^z z0QsK;Z#(n=jn8$x$!iw8Qo*Yse+<0!(9FvzGI;}nS1x#N@?ZK0j%R4T9*nFodD(w4 z_n$}bGH$f|Zt&(px1DEQ@0wgzUd>01;ME9Tg#4SqI|$9ZqK})rh~RkzuZH|T0dGAt zZ*Tf4lb11IZm&=9oaB#!w*X2HqRcA>#ROGkF=WnA_VUcrE1L z58fW7XIR$*U!}?O2wtD$k^dFNgT_x7zIi|8t}=NYf)^FMTK4}C&xfG#lX-9Qvc6#Q z(uU0K9TL1E@)v=(0=l1gW9v;`nc&3)&vB>a_kuScn&-Ev*2=5vL7U)>3SOu1JMJ;- zheiFks4pP@&oG{4(B2EJ{grdV+&`(`HSh1*w^;pM^CLXILbso9)fa5D@@jjWf@c@J z3i6kNw+^}ib&31yfT+(9^}|%J`g4eU;Ww@EYz2QgbOxROh4))|RUZ=lWeZ*@`5ysq zHFO${H&^(}$$t#~wb1;1U(MUi{v)D)r{J}cKdlwd525*ZuV^%RS${R>vq12C z;ze3aUd6Dvy)}XtCI5}!9f#)QCGd#JiwK@q@Y>1m1#cg8_e|?}Dco!FvQC=g^9f!Z z`GeqXg3h7&4gT2Vbqk(f@LI_KEO>jM`SrHG$4p-4-_7v_1aE-+-e~%b)XOT<@U6bp5dX!yHeusLu#n^|zpYE;RptM90sq{KWN8@Ir!DK>h~s zmO=CL>)0<$UWMSb3El|#9|P|+G;d!})Q3fVsKe@S6!ninm(uYtdcex7?Q0jjPQe=^ zfBs{5eh$q%*OMl1Oz^q{&rN}HgUPeKYK|`^cv&x4{%?Y}0D6cXzhXzNygFVg1aDOEQvYcA_k%YRdW4R@?mt<1 z#p@BgalsoU|6jp71I>@G;TKI_)<4bdO>MHH7q#gBPa7luWsl?aT<99w-kjqmuU_!% zf>%cVo55QHT|m5=znHvU!OIZ5Ao=UT+XJH54?^!dV5&F$Rj#*R=(PyFYGVHj=q&_qA@SRJ zoaHOuuxm<6L&>InY1^;C1k3nw|nCcatZ`@USXCLS3-6!;liT(Kx z;C(Of6i9gUzbWtIdYgpasL-n<_KTr6A57;j$KTKOX1>njcTnifo@MNNp?4{GH>uAf z7aMn#-mK6&Ec6Z$`>#Onc`)4{*m5b?J1F#y3ccOL{&DC%44y^exBG)!FZ2da@0id# zNbLU(z30Gnq&`o(jO%R{dMAY5EMh763g|C)*)_yCFDlnSo5OX$rMdd0;4 zY3N-B&XWErF^}s_JHgXCOXzJQ_NPCH^$eJ%w_rZkTPO62gG_J`4|Bb$H+g!igx*17e;4$g1Jm;<>#DilPN5ePdfSNoPgVYb zt4V#{vxw^*7kUjs?+~&7JM^9hhsb(*$5O7h`tLlwEkbWrgR%eM5ZW7f8;Rf6WyW3g zSI323tI!)E_SZn~N8lnzc>VM4X0F%$4<5g$&^t`*Z-L&6;9VqsiIrSWo%?kDIPXm+ zgx+RiKLowo!8CpwKFak{LN6=yhKT*|p!XfH>LPOKUCs3-PvY?#6nck={gm$@yhTb#aLP&V!vEv4=Hz4%33%yZd{|fY;1+(=` zhU-l$o&cju89*fZj{sQ8FHF{tVaKC-n9Sy&<#QkKbXT zmnHU}fL1J3{PtLhmK;AknM4jq4p1dSgN_O6-3Fy%?CDM>_BY zu2(&or}u==8zc7b*p2ZVOuxsk`x4jNEc6P$;^{m4e_M!sHT3QPQ@vTYbG?GoJ^#-5 zF-7QA5&Jas7J&DV@*Um6^%6pFy3jj8?7s}XH^G(IQt~DaNlJ>Ul zn_Mq@CXZi*&>JH5AA#O?z#B;ZP20uw4hy|1p;txhZ-m|ga5d2j4{^Q36du2j&>JN7 z_d)Ly;6o(6l@D>fF`?HW^jeAiZ=iQAn4M4D&Gn*FdHh<0-t>o!{X4&l`2qL{sn2_d zjk_w}-9oQb=$#<;bD;Mon2sOof5`PJ-ofJ+6?!AYz7={u0@L!H{u8cuKyk^z0hkQ_LIMd^n&Smvu*pi-k{LiDD;MieHrw=0}he+9sU{DJ0SEn3%%pS{u=1L z4yJmupXGWLXYurI6?#>~{wC-x07u_x`qKl4j2^nhf}PJBR`q++VZkE2?IOHw#Qqxy z?=J98k{=bn=Xytl-cF&{K$RNC^J7TpZ6@}AhTiSqZA5S8QLeX3 z=nV_K3SxiWFvb@!{T|uUjn_G!Fx#jX8)P%Z4!F>gkFf)C!tph z-b?h3jd8u>LT^;)O?lPWe+7C|!8E)(+RT(9CB9>1ePukcM{f950Dp9UW!2n3V6<)41Lyp;!1-Pv4mz?jrV|gx&+- z)kN>aWUhBq=uHuN(@r<`cR}wg@Mfa7@l4~c>Py{wy!bft&FMmKJF)*U^zH%E@-2A> z*V`rZW(vJ9v40tQSAyyH&CO?Vy=gOe{ALNg-NgRM})A)s^bG<>KS1k0R#C|FC zVqm)8J@Y+W@1W4D5PDU_{^QVF0H)vTqvvwH>T`K|tAySFvHu42)`Fv?KJS^y^|lJV zkkC6w?4N|*bKqm7z6@Q!^``w3k6(k(+ehrjp!XD*_Af&ha=k&J*CO=x68j5&fcZMO zh{SL7{ao*W&}$WXM~Hnr^j-q5BK=>`CL9TZ~=w*f8PGY|odiQ}_NPRhcIoDe?lc#r3=oMUH>|cT2Brxq? z21|{*>RCy4#6(0db1zd!6)!1V@&-gcq4pV$vU?-}qu(mvKz zbG?H?Z>P}PO6;G9-ksn>L@!#y^+Ff$^bQHVL2Caf)}LV7zl0Zay~DnL)nK(n;O&3xUdHv-3B7$nuVA^c-wC}*;3Co=9BDG{ zs(nnkkjHOS=oK{^``J1F$F5&Oa)V!Z&~HOZv6auwGrc^{A8 zVWAf$_QlY<5=_V6!`E=Vtk63u^iB}_E1~x$cnT@su~x2kQ0R>by~)=a`x~KmI(Rin zZ}?-zUFBcZ`+0g#2)%A%za4sU@Y83Q{5#NX^bkxzQQWZ7Unlenw|e0@^TA4D|0MM1 zgX#FR?mDiQo#pv=_X~yICSpGZy)S|{kpBO857!$Odeep8j9z1Z{$qHL2~5u`j3te` zN^i+UJbp8U-sF_AzY2P%gQ?!(8;rZs+bHyA3BBXQ{tM819bEi2Q$EwOTLjdjyz%-{{`J^-3<~>CFng^~635y_>jbtl(bC-e>qysy?GHBW;(DvD;ORXo^tKZFpF{6XF#W!<^V?i+Na&3Ty*69H+c?^U#rkd5c>w` z^?>R3zb(J!dILf)D)e>{`?b(}08G<6`L|qeROlsy-b`ZuE$E#O-bea_J%_kncrH(G zR_Gle_P>JOOWys^>zupL7{h?*q`wv)>~j&{|>##^=8iF@mnwS#)y3}^j-zC z`OxpV-hj~CDD(=B82e?=n*=^W^6$X^7pt|hJ@ab&^t)%FM{54;Auo}|KGUY%n$MS4GX=a#6AqYKPo1A2j1j* zDWNwa^@x4H(gTkWz0C!0M=I4H=lkP6p|?-y?IQMfLhk`E-G7-;$n`4z4^Qu?(5oc& zBhZ@sefHQ0Nu@yQlBW|5C($5A<#T)BT~N@8Ws~gx(aPS2*3+{~mg$g6Vh^ zevffiFFx-4Pw1tH{j{eMKX3`De;d!`dP72QrqJ6)?B_%8F7QsGS22_8P5);e zzga@Bf!If(*9hK7^!B`$>kSIMVxf0{*xwAjUx8`<&3Yf#J1q1ngkFf)-v_-~FdZ+8 zXK}q%)jYjbLa&AX=sg737D&cws zi+FmYLN7(^w?gj*Fx`(RF6VmPLN6ioLd5+KhMS)n(1uCf0k^iBu2 zlKiWjZ`@UXP+7y%J1F#`#QvT8v0nl{PRch_#r2K~z4bz`mDtaP-nC%*y=MCYuD2@0 zIJ68o#5_X%*_sb>FR%avSjOz4dWy%A#n8R-28 zTtVV@pqcA!T*A}4Pv{*d_FsqI>)=+RSGAJs9T9q?La&9`{~CHLz%+i{tGHgMj>qqy z(Az-l--O=h!1TTD(W|-MHlcS|=p7~Y7yk_NXYfvv-ho!GcUb5h6?$8UeLeK<08_nL zZCo!@&(k|5^g_fw0liu<{T|g2;dL$L$4H^C3@?-x!#D-n=bUW6Z^?O$9X_7jbFp{T(4-U zmp*4cIaBCGiT&l!i-Av&@u(un^)?E5ilJuC#ShyVFQm}vCx}E>~DnL zMc~OK|E6cTUP9J_YV(ny}d%OO6W~lYwY(zZz^~j>HpS!(zvVf zqot9jHzf3S68kae-3Ojc(mTA4>m3t%4MJ~}*q=9w??+%d-yQx8*9$M>@oN!!`-%Mm z=sg3bdb`(iy`4g@Rp{*__U+Jn3QX5;J3hn|OM+3%xzW{#Vd@6kJ8x@1eW7 z-sEPkw^QhyAogec0_U;7biF_AKe=A3&>IqZ#l(I-^ezJzlK4&E!S!|ty_r=En^y z_g(u8EBEfF3@i7ozc#GgL(dsj?%n@mSh-L6qhaM<@rGgLe&B>*lLo#^`&r|$Ikq72eH2ydiR1;q<<^;5Z7xJ zdQ*hn3}U|%dgp>?koG>Uit8N|deeno39h2AWow~g3eZ~*(6;9*oWum5OR#Ptpcy<(x)O6;T1yB0h~^r}K!Z}uvl z-U^`?Cib^L?@DkLDc|f8t~V(3s)XJ~Vm}JKTfsvlekJu>?}X3`3B77!f9kI=zJiNM z{OTIGUc=Qqy$wPyLF_A_*8@IC%6GvapgsL*RA_M_0d7QB<_9lo0D?G<_np*Q&&V}Hi4v0n(DLdtjA zM~%DcKWBfGr#CC~rW5-H=$!+m?PH{s>xG2gpwQb->~DhJGhnK>?qghUlh9i)^fnUv z??LZYFx`)fuI73Nh2BP?mm>DBK<@@HE#HDD*DG%2`L|i<%@X!og?pB`U+^2upTTtf zG2G3=+bF`@F7%EF`<=qQGH&!%BfRG@B0XEDtI#qFEL=;Rd`!I#?v<{^tK86gTnn7 zasLy-dj(9-FBN}+hnH>R;T;xwEyDh&aNkbc-**uEIp7^+y;}4c9$xWk9^RPHYY_G) zg!@+Fo@JpPTk7G8;W@P z?)Oijw^i8B6z<1}`%?(-6|ibTUU&un#=~oE=i$u~daH!}j1F#JEbR9Y`_rDoeg+ty zd~@M%zmtbwEc7ab-XURM8Rg+u3Hy?-8U0d(UkWZE>96~`aaZkStI!Jxz170LLAW0w z?%fFQCGa#-pALPKhnMZ-@oy1&$Ax{XaIf2G^uCPn>cRB ztMm=TdHS{sy((e9Q@C#=?*E4HZUs*#;SKzRhd1*&9^R19+a~OXh5Iq${v(9<3Yg9} z);-C?J0QXv5qjH+{p--X2i!vDe~0#Qy~^u({PzjH<>Wi5is4an0%P)jR?KzLa$iZ z?-o2$*pCqRDbM43F?ikSrvIApG7o=tua_Ur{9=~S3kmym$GQJ$N$$T`_+L-#7a%@2 zgXwv>U9WS!A)!|x^mY^buR`x(@P@NZ`qmdBvHFj*AGevXD!+qg8h6biyebi1^%P_O z1j1Vgrswm*?=!pNVgU~xp>@WMj=)b`9JXq-6 zT%ScLT@v%{~Yvg2hSq?*Or-FZ;#N63ccOLekb%E z28W2=z6-csNruNSA@oiV``<$EO>iaAD||oK+bZ<3La&0@pY;OnQ-FuwX6ongMO<%k zmd9^U=p7;UA?UpXrtcvhzm)4Ggx-3gSM&j6zZQBIg13|Ya`WZJUDfANp|?@!jS&00 zq4y*3p;JtH2gHW}&y8*gp=vd%#CX{95O6y`4gDtI*p>?EeD2Tfr@4KDT=T z*DLDh@!Kx+4iWqJAIA4ZFr9B#E#!JbLT{(gTTSdg0=)>Bju*SsW7hhQbH01V8XmtP zp|_vd_dxF%@K#bD)pcBNz0eyLdI@5G7xa3-yU2R=NF&!fD)dH#UePjR{}l8t1Rp?> zy!rL9RmOc1(%o>iVO1Z;uHl>*;Q6sngm;|Szk%>x2Pa7RPl<57?zLQRROm&C{S_~w ze*{m%5az{yW}NF)-N^M03cW0`?}A<*cpoG@y%TA!H|678@37FT&T#v~S==*dHM7ixB>= z!240rz4UC}$o&tC@Mj9W9mIY!^zH>8BIVccC9Zc!=*<#(8;Jd*(EA+tDA}(m{tDMi ze3Iu+vCzvB``<&a4?K(L)!oJQCVz_SRS3N;#Qxmh(WczPOy-hi;rKE(aENPFV`8HB$UO!q5i40Hb_ zpXTAW3cYG#|KHGC2#%8Z%E0%z-g==I6?)r={kcaleuL?Hb;F}v@37EI2)*6HewHe1 z{l^&(v%-EhabJq?F9olDyQv@5Pjdg^&+z;i6nd*i)hML?Oeq+EUIaV<^6J}e;;z>d zIlXBGiZ{Q1s`89KXKUV;<8w4`%Ejj$n)e9z1A>nU-X{E?5bi_5eJ|lJAU%pzj!Y_e zN5Nk3DEJJQPk;}B-|ezWPv{yiJu?cLUUGPycArs@1&_cT$Go98qu^h`n?E|Wpa@y0 z_(AYc>(qixuKTmf|Fu&Kj=4MruJ{=2UB2Lda2^6&;_@Q!da&m|2Awe+O1qD0e#=Qo-^E#u~0p15*?b>eu7hiAk=WcK-ShYu0 zzxIGfiTiKBg$bnhU5@+j!J9udwP3bu|F%EjdmMO*%hSPo!9^}#0zLvBa=8k;b)B&f zgLi;E`!%jXaLc0;oV|If{=Ea0f1dy!`z*pc*YSTFc-l6j zf1k1kuXgoSe=uAz75`O#X6XK_aNp?;qCZjY4Zrl<&nJT3EB3F6{X+0M!X4nv#QrAmcH({?csFr>7QC0ZzXm=?xac_AC*dXF!rz$mM8Pu% ze;iy)cpJEy@b|zignt3<2BUk>?Mvm;QM7mG&y0c>;jVZ@yU!>nd>!M@U51Om+5a&7 z&)~|t4Sx)Lg77E7d$t?*JHhMkF}xRC_n(HJ2T%FB;S=EfglD{g{N7>Q%fYMeH5>-d z{D$Ek@FBuq0&oAOao+*%-f8$raPfVH{|r7x_&t9^|NJfEeg$~oe#1+^Rl5vl!37T( zz70G=_#5C2-!|?e;D!ec9|BJsGCb)7+B@Ot;4R-V?w5dD|I6@)!9@=lz6yMpa2I&T zZsWcNoOsyq=fEZ3HGBtnjPSR>yT51L_kst94ZjGke#G!!!G+&9{Ps7|9td9o-uMIK zUI`BGF&qX@f7Ea{_yFN`;H^J2?q3A2e$4RQ;8`Pv9|j*G{8RAG9~t)-!Koh`{yVth zCx*}aJH`vb3&6vBjr$7ly2lOofkRIi{t|fdlZNjH?<4#ac+)=P{t~$5DZ_972l|7j z4POF2NO(DT+fR-Ajo|2h!~Y7N{fyynfR7S>2E6NM#{IA0?9UBfgkGd_)bL913Bnoh zo@b5w7V!FC7*_qztWRPws{c6Oa}J({%}m0ZYdN>JaUR^rx%ezK`%r%zy;<+(ypR08 zybAvy-fMFHIQBcqd3@zwMb^)%zl>dI;;Z

%PB#QB*<&KE5=ybJl^{V6PXhUD7vMVP3r%#$xaGA@Veu%omep62GY^(z(<#>|29#-Kyf6m3az>6FD0i;*O zXJc+XepbOh!@tLq3eM8;_3{HPzn~HB9;^Jh8tmndn%{PU3uk%x_sTz#;GGv4z8PF} zuNQ|iwSEa<2rHM}4|4Zm!pddlL)^WHuy%QvyB88xE<4rXQvK)5 z0u}x)!pf!OQSM$rSh>s|;qJwRmCHK9N`F0J<$m->-2XAc%4PE7+0%cP}BVT#gY|@f#zo+-K+7gOfkSgg3z6`*Sw_vPtkWf?pJT{@b|!%LUI9oDqDt z;KPDXKZWaGD7aK`i{L)Nw+X&q@Sg;qQ^>=w61-gSYQbHE&qDrwim=M>uL?fpR36@k z1vd!3MsP;(dcoTS-zWGH!Lv`}@reljp5Xr${HEY(Z|8a!30^GtYQfhLR{7aaSmo!} z1RoasCOD4%c?N9Xhw<;6$vi$4f|m&H5c~!B-0I5u6cxi{Sqh{D9y`1dj=xrY>%{e^h$U7rajJ zZG!Iud-I2Bm@fG5p{EXmV3x40(JpB2B zuN52>(3JW0l{;?3voVU z2fV5Fb+d54L-3H`9|?XQd>PJnZgJbo8^ZnF@8R*Q5WJSKs$UxjtNOJ?@V$Z`6Fe&T za`mCX{iF022o4F(3VufLp9EiVF4vC>UMqNy;B)?o`~QgGTLnKQ_+`OooX7PR3BFeF zTERaPJas15J4f&(g69g(2>!a@2Lyjt@Y8~SE%^U}7vTPY*B(wkpU3xIg6jo;Oz^h^ z?-M-fy-I!gx!*@vxep6I zA^5T)9{wC~3j6iykDC815bjOjmvGt3E1!&Tzf15w!M_#!XTc2@^7upq_Y+p>T~Ao0 z_n_d{1c%-zEAKU z1ivQulvzByGX-BFI3f76f;WTJ`46>^b|JpU?iB9d7Q9FB(}G6@zbN>F7xDNn6?`>e zRX$O|s(c0n-z@kc!T%$;_F^7>v)~^J{;S~2FX8@I3ceOh*Ux>z{TqV+OYjqdeY!mi{Nhy{)^zh37&r`kIyZF?+|>C;0FYMAFR$Rsj^q~@#n(*1;Wba zoDcBuFA=;>@a=+k2>y=X*9BL8kcanS!Oeo(1>XSv9MY%$oLw-rV2yD9yx=W@za{tw zf)5COe=(15m*ATPZzrtscZ9HNe^V~wde;g5tl*7;eN2<{Yoqu?(J{+ZyS5+2?H!HtAf_^SnP z6}(sQ&jmj(_+`QKN=V?iV!!o zKB^Gz3j}uv?iIXV@aF~pT=07;czhy)Q-Z%O_#c9&%;9?H3BFYDV!_u4zDw|51fMpS zhu0+dHo@Bk|5os+^SItwV09i8E$pWLN{D%au z0^fu80KD<%)585x!N&!E=)+upx!~&rZxFm$@P7#Yj^IBDp8L-{{2IZx3I4L+d%^eM zK6W?qM%B;9h5NIDUlaU+Y99Vd!99W>75tRo(-(5RcEO(#e2?ILf}a=su0>q`qk;zo z-zoS7!7mA(RKxYp6kI2Gh2SlMe<=7Df+vM|cyAY6FL;IER>9W^{-)rE1^+_ue+xdV zmdEd-f*%q5hTs{Cx&I=;3kBaK_)fw13;qfC%4f~@(?i1jWx?~8@c3LKxKnUi@V^Ou zR`4lxTz?w)xrFJTt|0EJe_Tvh^>1qhe+sPbm#FjUYJA@;-0v2ApWufDzXtvl@~a4X zs=s(gJ&*6j;B)SE{_SksIw=^?;pbbgCF7XTP*k* z!8Z#2lHex+;Fkr@Tgv0J8Jxg=pE|L27S3zkBix@6{DR=OHE_L)1$PO4Nbr;3 zhjAY(<<7@0Z!~%;PZtqZ_3LKA-xvJ4;Hk@up7MX0;M)ZMMDP*8Zwh|La;|^A;FRDy z!H?iQHgCNDzHmP!%=M-VUMP5#;F#cV2;L?5QNc5=v$gnd8yjp&xHGP zf?p7PeiPT9E4V>$i{L@QpAr0Tf?pMUW-|}}-GY}0-a%OPch3=4_2~t{Z)@TDX9!*- z_*TJp2>!Ol;Ahn%*G>%CWSyWoW2wSqSi zR{6V)u*%PZE6;C<}9A6NT_Qwq)mdw-PPHGK;#`WGKxJ>YJ!L5Shg6|gmfZ*MNM+CpEjmK}g;3C0M z!8-*1MDT^Hx&DU)hXuz3ZxB2rc(367f`2FYb-@cFeEz=@?EO*oXHf74!4C?4SnzSd z*><5X_%^|t1wSD8F~QFZens$Wf(tu%eBLSeLcyir*YSR(H-B11+*SL_5LWqjzu+H$ zY5)DCaDQ3wq$rQyy9Iwh@It|jg7*r3Mer#x9^QKdUoLo&;1z;z5d3k$+rST#@%pF2 z{da<26?}RpkMDVcZxein;Clr>BKRMIFX-aoEfU--_#wg12_6$%(9QL)790`WFZgqU ze=7Kp;MW8f#CiCq3I2fK)q*z*{)XUZ1RoZB@pU}>Qo)sis|8;Lz6jAo$aQzbtsW;0FaiD)@21lM_6Cje_HX2L<0M_^X1yCHN15 zKhVR&Un=;cf^QOhtKj$!G9J!DaFH!3BFPAM#0+%tMTi8!fO2bmEacy{|S6i)U>zrZ{YEnB{(DalY*ZX z{7b=a3O+T>!+Ve5GQkakcL?4i_;-Teo#Ej{1>Y$6^Mbbteu1#c|6_zz{-2WN;himb zuHbsYO7ByImEN<04+(xr@Ogba{CR@w1UG@{{gjAs?-zVC_;tJ|?e*993HRNCeHUb{cEL9Z-Y$4V@auvvS!2Ri;ayEw#blM%Zfq`Fq0s8Nq)QJnKdtpAx~nf(HfP zB>0Ph|5Na8!G9L~Pao&;85I0^!A}T2D0t=|*DDqr0U68s0juL>?+$HS`?yj*ab;71@HQ zXhBDR+2tLnR6*M{4W;ut`n%(qY_cmI>1peVW!vMunQ&=2{nxB#n27hr;@Ma_l1--D z)L+8$+LGy}y zDI7u0W>l86^~8IdVyRdp8)=Ux;@P#~^0^c6nRRllbY47_i1$R0Kxjo{Da60DB=>Jl z5me^%md4hGlkwiH4mjD7jr7ODrStvHu6lWi-$_NeBN54D+TziCe#=a#p~gtIqg#iG znh@`;j}C-O=jEhbXE#i{zZ?2u>9zU&nOq~`#e2H)`l>Lo%wJ0FD7~6wBAGtP;_2w` ziRkR^=#KRE#uAzEJm;UB_JbznxhI3&(#bV}{xaEEiff|rLbErjGCUV*?jN0SD|1x@ zM#*%%E8gp?H>J#|I~MQi&iaj51zZ!4+A3fv4XwAv%L!jaol~Dm)FgZRm&Ve_DqUeB zy&c`j^wr@~;pWwra=({eloNWZ01x7yRdsg4SQY5S7DkgQl7KME=QN`mj3pN}HfJMQ z-7?!UsaVJLiP%-~-e_`7bNoiMJ7MLuxl+I9=7piR<}sCyEsiG;nU=Mw;1EmvA)-?X z#WUzIRfiN5(|LY>y?s6HvGnpz&vQJca*5k{3AF^9i*HRN73i;=%c&CV>y4v_=#6DE zO_6Lo8R*p#q+2jek>0N09I}*2E~D!WCERNK>oP@yrjm| zB6QstXgiR^-lj-2-e)q`v+^?6jj{L7DwC2O9f`gSyw)T$Xvjj|YfzO=jH$=^tS_#f zv*&BB-&ap$pdPUej-p!_o|KBBwc2?GUeerZ=l!$tm#^A+cCPo*@}-ePPo!7(E{)-E zrHeM_P=4ncpEDV}mWEo=F{kbKCeuAm12lu4>+I#98HR{IQ(pgbxpSV^U^jNg^iUvi!m4@mZSRXL&`g zFzS{s3B@uU>3B*_f^~T$V*P4XR2F39R?+gZd?I>%GM+?5P!K8}x#i9>ud$9RXC}w~ zWJP1?c>lswEUmiwaT8Z2oAJtslRK$oVr^S*xXc9Ql$vt)GKQ=(bd$^ZjrB$n@f+26 zm|wwHvYtY3eKvp4wrHu$bX&$-^sE{~1`XV%?)8^J8Rpfde11(|Ycf;Cx-${!LPdNl zUOU!yBv8!xyuz)czd4p|O!j4BHCTSPV@;e}YnpZAyyd;xB#uAM8n1G>o-%jETrNcd zI{Is{md?gn)XKU&V%FT=vewMqv3#{m@N*$Mi)HjfSxkdtZJm9+dV0~;mFSCwmv%zW ziN(CmNIcP#=?X9HxKa73bUEeyLz$_FHT`PJ(wK~55}#3POm9cPBs0J|k&Hww)}>~E z$|iH}P0O&zpVy4N3^a%OXl*T`;;iX&eZ`tcJgXMHmI#;*1ykhMGDNm5t7gd)GtHhjF&R3sw4D6r%^cGVCKyCK-O-1>=%mwPdbf!Mv=Dv&Nfip26JqpP zp3261(0|!_S~3~Pb&K&#b2c6C?XvZErnyk_*bO#YQ~ii-Q!J90u<^2_y>*$PhbS+L z^w^VVjdLT`6qf$nYFkgXn@v|%X0eU+q_BZv(KWreim^9N#52pWWo(bNNh9m`F}z#! z$<8ga&KH1bxXcT_0km5}Aahnas^kfP#7V3@I{Oj{thGrkB?jI+*EIN=NTwqajgjFf zD4Q#cC;uG1E7H>wsY#~O7-jSv+0;*N?8abz7QwV-W7s?)1*+#y?lzCN)u3jRiAYa- zG!nSYB6eSlkFt~ZQX1r?biR|1G+s*sjpsHddt$v=_p3x-c}aM__b+Y^lnRrw((z3! zDswzpy;dl(hzN`EHOl=F5gKLVS(J`%F>X9c$4!eZS9wfrnJO&&@hGdHc^dESZ_C_( z2~bVRs+av4CpG!jl%~hV>Mfmhz7eR{ zTNhCzF&H#fj57}~yG`l?C)RR(iC9~2vbPgcV%q>Dl_R81OTsb?;cL}+n~-gPJ4uon%}hr@iC`!%8_yaM0P+^^C^n;FAMMLRGe zCV|Z>nie~otSNjsQy;~Io3~40;-2%;yIg;DRlDN3MXGl6hB=cn(oX}{V0t^0M1D70R%wLTC3M^D}pTL90Y_eTw z_6lnU)ENS`X&C6u4E4sF{_u?Ds{ZV3sR3kH(eLRkg%r4|Ba4ap#Vu{o+%~BGGGM8k zfgXaYKJlO`=t+gZJp_ed>NCW%>EzmliGjs=)}*ole3$^*lWA-PW4#bdqf^I* zoz2{I@$Sc;XuQXkp=7_qHB84+X6sMSL``F*>k7Y}$u2Kbu^}YI%=ol0(S=RKYRs

g>Ve;}l4IUoO!1R zYV_3DQu6#L|{=(uqs zUyj3TaYk?AK1uKF9zgTOi7XdPRq*C0K5J4D7A+36BAScU2ZTlV&JYc>E+bj57c4j~ z;18HgCAE?MitjTcA^Ue^eAZ=VLBW?@eoNB;%(BRDWvcJuNJp$G){*S$#ipkEupxR~ z^})QOjcTE^U5zd8!eafNzy zQoRn_Q{)1GTYlocQEt;0-H%GPLg|97f^NaQNJgb9(@O*)V;i*T4c_}IUtdXlufv-l#g~co69A+Fpd+8nqD&mn&1d6{e>R)a3U>&!I#&7{K`vXv^W3WMok5w; zjGaR&#<~(?gg&?8nosB+0(NcNPWm2#*O9sRMbxzzjBfonsSxYW#mB4uIagL?gll(R zzuj$Czw;5UdmTq@okp_QK9a$gRT!g`cTL4=IibrnD!XtJt?8{?t`WJ$OCBB2vQRGa zVO8|bKbhl49sNx>-5p8AxUHL$x^e5?B$7@?*5VFUy00S}MjknRjB`h$LOWs{>$pCI zZon--+*fjqaNb0ImDi1X9;tbqI8a49^||gpEbX`)hx@aamzI<}voqe}Rn`UHyx^Hr zCay;)zd=_pgimo7!-Q(Piy{2lLJhc-!F|fx87fr&F=y|1yf>XT9jH?d<)X(?D*PPo zTT#fJaCVN1`z{8El!)sLDh}*W^0-kDH#n3)zN7SknPG`YE-C7;_P7xdcREx=mIyR%fw*E?hu23efyr^*VUU?DT#oZU>JfL)8>5oUJmeoP)ac2P#yEE0 z`EB`0iE(V%K?q@+J6@sVzz?M4H$4eecFZEbDm!kG*OVW-$ZtCFxbkBxc~#3Nu9~;F zcoCH4Q8hR{tq=qfgvqEAv>H>5YQ8+7>|HglXeIUh7mk*@2@2IPMY8sGIZs zKI=K|I!S3`x{1krB1qUNXfDi zBI#z8Kj1PdL!xNQ)cHh^#?hXu-V(hhl2WT4wnX<wG~=C;xbzVTjLJI}r6z&(~$=vqtH zP$`@;>{b6!btl<}Q!TmrkIz;Y;*II?(TY z;dg%S3tff!nf`C`p?RK2rMX1kcX>s}|8Na#uVxGH8-+pmJ{l#c70(IG}B5FNAG9VDbK1&tS< zTyLX>FIE4oeP^z4oySqUE?@bvcKO2I>-&{`zP>+?w|s+u@aA>#Dm2o6vYwWE$@TTh z3++d3Lk{-3dZ(dl!`T<@ujqMb%H$<>&rVO%(r)@uIKK7w-><;hCm>~MyFm&g_qp~nmsOMx-~H>;8l2N}O)LjPyi$XhEH4&-Ij+(tlf*?xxu14|eJ*T%l2=-`y#jGlz7Pb1Ia7^y_n?>Xd7JDe^$Y4h4zHRShl^ zs-C?B{bT7o4AeN0tG8gLs(zn1%gt2*Ko&cB)y8aXHL46OO=Pe1vs#3!fNK(3gO zQ*LqEK3J+RE6dZ?>qjPFb5uY@%H1#_XEKbt7chDhCq?8)EAxlrRw+!T z^7Y?+T%Si=dhWl}op+2`Sie=ZtV!i5huFU^he$~9SJC5WaOkm!+B~tSi?;ci| zRxoaMo%FTCiC3w}!2foY!fQ0nZPvF?r6$}mBIx?x(rwlWmo!57UoYv%z)6=hx%cg! zzESnkdsQI#wKsA!s}n~D7vl6wnPpTUXBj*C+G9=0q?s?-#}0jN&3nkAGoJTt26ZTi zy<*6|U`}wHmn*kYJ!qDqU_1I7@Tvh{vzbVj;;mtx&WNBwd& z>Nk+#lb!z#$S~7aQfm~Qnql~3(m;;%3lEFq=}fjInQBnaxv0k!EWSnL^pgw0Vc~1A zS@hC#C1mDVTFxEB>``rKG&s*?c$1F?Jqx3eQ#KR2w63Q3o$k%}lWyjG`ahwOkcM}X zH}OSiNHVeTS=)y|ZTxr<;mH1*pU=Kf~M4ogJc2-}QlG@6{JKfHKb%-O!6+Y^%Bu`>5h zqF;126>)A2`vy4DW=*H*Gn{K8RwI)cbU3h?k_|!sVR4%WvQ#2f-fBrR&Aa_!H8jy3 z_duD^Gz&+orZL1Wi2DGG)zB=2$@LtorD+-7jTf64IZ_&QN66;QnQt`rMy`(qPD#+_ zJMe^)dczbiRmI!!w(~gSxydBmDFx2AL?bqt$NGRgOK>hg`b8!%&RU#{kebFs@-51_ z!h}}mDr4T#>Rg01(c$i`EYn3$rXuDa%irR9U4&*@=eVK0)rGqV@g%bH|98SSkp-Tr zA;`+Nu)yQ#n$Viw4eYH?h9l6BxVUYa2PqV8g%v zS&W;rSiPp!TD}X^;*0*AwvhU2pnp|B{cJ*gOIuTOSz8$YtHrXu9{-Q^z50i__=ovz z&2#ut-`S5hQO_l@6VHpgH-9bDMKTr5`Eb5U*o;kgOz#_6(n=@UPKn`Cq0NrW0dvcz z2|Q83OP1U^~`lX2sKAY4oo8Q*f)!WzBF))CSOE?{)9%wXm zDU2G_tj-c#=i4KitIj?&kQV9%rdOkRe-DBunluF_G0qP|a1jy@ch6AeKfYE6|3bt> z1kkX8e=0%}sQ@gZ8Z8%CF`m_wG zpt(Ee_KBY9Eo_hSSpAUgESn$okdJLnNzP2*+PBSu+_smme^|}QoFMl`SMF4)H3X6` zx$~fQL$j&sJlbN506DXw-fgj(n4;59w^(gx??zs4vDuP=$M%4W)eGtO{NI#pKFElf z=Rp>$Pg5lM-(<0QCW{W=IbVC`n$XO%E8{3H_PxAf^U%qB zRFn(4aa?3xwuJt>mt(q|+iF&}H7!|0-F29{vM9&q`X$SJu3A>PHZE*w3)O~O>U<_T zk35qazl2VdxH#XZjy=~p zXJTuuhe~zxcAg+{=8^6-NU5zJ5i;83H~grfmvy=hI-<^{Ad8WceU6d(MCv}8K`nKf zbsKaY7pIdw%2pcD$kpI&`*b8oO&11Ht69j76{!#%llC+#bLF68(xm!b8NPN|oJis+ z0?}d|u~?ivUasrfp?);LZ=!Qjm!RXeFyV`k z4z?LjWBY>Dm9xIL-{$C)qUvsNf2X~E` z>8ijCYWNW&ZQxC4>iR6xRa2V~mymczS2#v&R|;|&qklz~WQ_8h%lL`q#W>AEH#kJo zUs*jDe+BehER{-U9A@6N2~6`dF*f&9k=LylPmTGCL}zFS4=6^RBOz?C@cLb?PK>CE zYvVKyXRsp&Y{s!9uRl>sT~@lYcLc3>f1Dv$-6z5?!(0>dKPlv#cBx6=cVlohhrHV3 zB}?~Uo_j#}%D3;VjEGmSM(Rx1lGUE7M+NkQ# zd{NWGBq|TuZ=f5huyk{3u=n|I89EsG`ko5g?bzKjf=$*Z0CgO7b@bGflDMa1Hxe)S zX(OFePUnF$x-{BeX6gFt&a~38XxzRe)b$jLRIf7Ilyu%L?C)xhvL~?IfOI%;wfSV} zMef@&Pz321BguC|LXYU<4OPF|LU>h;Gx8m2A?T|=7l{knrO6GLQg{$a`A81&>w z_4hOuUheCNpl6p$ab0*i)6u{>x|`LbS8VF<36SD*e{_ciqh)ltU=D^A26gikbSQPn zzOHVyxsYKUoM)qZ3=|Bq*A`Q+`}8N?sq%}-2XxhB{T;A61KiQC?68lK$+Rc&96Zv4 zQzzC`>-dClT_KHM(BL)J6dX3Df?v}&8C;X>Nk!5zHMY|1(KoX>N_0sw>Ak0&X-VoLu-WP{QdOeGSW{xQ+ndE4lH7NOrk6UW z?{LDAP7hPxckMvmc+UY0I zB0HAgYGEX?JRMg_x79yqEsSa`BjHS_)Ki+y3$f1A(6$ESZaiWeG#fBg(dkjE&c(}D zG}ZVvr*s^wGbpbqS{-$Ly9$A(Gu`~LU0oR3c||G;1E=Y!tW2i;+eg|ERj3ztX!-A? z>T-9SbH73_+LSZkIu{6KSsCxm$pn6h*1qBD9*R~LHGS=IHvLcvy45x(+hc4#<<$e- zt2OuFj#W1;LC;x7zPUTO#y*F1oSkK>e^_#wEb4}RRm+_HgB5cQS~5U9|6<0fd!W1N4US$hnucVv? z-Jez4M^*!Oy`g6)fksXbeY*&?*HJNr^H-^6nX-xp!3Fc z!$icEI$bVUZ}6wVwMX4ss-;MCcVB0xJ#QSFE7cOQCsjUBPD6L&uS389xx)5So6f=; zuA$@_w(097-tnD|IkPRNN3;Z@^C;|J73qIp#xIWQ#JLXY6dfi2whlyBORPiJ~ z;}99}IcUv>BwIu1z;xwkh@=u?o6Sq4u3>tP>wI6cb@uM?tJZ$bfZ|N{{4GsKZ!vZQ zXuRE!oWb6i+^N|s=5Ap;3nsc&osQZ8CZN-y<5`P$uY>YTo&V_QcWQ&`($mT7@DVFF zGo=B!G16h-r`-abYM7IbhSy@98;c_H?tX-xoTKz=<6W^{%!n=e&J570SonRXbljTl zZtGXM`66C-|7S~;-r4O+nKAcOyaqZWZYkMN}Zh5mi*UzO*Pg!K|_{yv^6Ft zTkuOIY!={Uh8_*otZAJ1Ic2MV)g|5^Gmi)O^VeAbG{x{R3+^UcSLDv{ub0nucip$C zVK9n0oceiZOE5a->hnkxA8%}xOxJHw2UKS2(rU(#U{iIA0XtC5{$kKRS0&S|oM~%< zn@v;~!3um?SCg@R`OEQd88!XdJX{c=R2ki*!d1R0yHq3q7 zE>2?j^xn&2UA|855RK9xRa_j8E196@(V29vTy%Xi%OqVcx|KKe_4eYsXuw96W0H*G z2Q2&STPe5&Fkff6EAYQ@xmaIZozd%Yr#7GVi>EKlp3Vap`@`Ck(D}EB+IQW zP7e>utqo4+Pwq(_K=t=WiYuDa2S#}XMTWCFlzoTMJ8f7i{YF_!jEFm*-8=A!iB zCRuGPw&3SD_1WTA?X{I| z4{DbO;o#&%2FqP_Q6^SBQr*_Mg)Rg=tq9n^aCH(1=g?TdHixrUTbs$o zd$1$Z6z_^h15>XXYb|!I*$C;?3SI8*helfN++9z&S6bYi&9P8AvL^2)y2>J5vZ1)` zLyglr;9`W@|Mhp*UR-oF48>*V;W<0A`gAgr!60MXqt|&`gWG(Tgt+@|&Lkg=&DR$? z%OJIL-JORCx zyIR4-8@bRPilJMuPjhw2s|mVgEOwU{a8yUUu!(}{@EWY>8EI!P%Bvl8!{x_N>L!En%2q<|5ZKUIsg*1Do2$5Su1@ z2D&w=@M@BHUKt&tdV*UFk0$rj0eEl5qsBgIX$p&4O1WI55b->A&ihEzz(2{-`HQ74 z%Xv3#x`D!tM$${Ru6P)guZpSPN%wCMJAV17&19|f`%;}UtfWHm&Q2C3-57KYbzNzs zl%vz}ph`A+-$>A_V)q&oWoqihUkBt^*hfd*dFXyD&lfUX_!#KibC1hnYnbQIiiLLGy!gp#YK0gcP)L7gF3O%>vuYghE5Z*iU_u~s`BC9eVLlNZ)Tsb}SQ z@3aRyKGw&N%y6zQfas}`Q*!Qv$&wuPmnx!8KG6jnz8+X zA0)){yx8Z;#L$T$197I<-W#Yv!;PHste>yz7036yh?#U?uipJqX*nF}nWG-K%9tNm zze0PKC?nTW1s(N&1a%F`y(sl5CD4xCT|z#OA5#-P%x&iry@;C3iaMtof}?;@!dK|( z6Nhy?ww5`pL&?5&>xT)FLQ_fL0XO4pD=e!%j$K522Uoq6qPOuyUwf#FC2akwH3Z~l zaK*D_5Imej2EqE|G6)`mGRVY+J`9nuZ(x$#XAf+s9JBR7Mx2}=Yzwd^cj*0(is6^< zY=Z;IeQ^mZCRJFs`^20@VyllaA}#u|vQwM0l0Q*doWru{C)^JFkGeA?ak* zKDd@)kZ_}2w|of>S#+f1DRhu+LHQD7ACN6-3((c4_f6HK7jtw)DlM(Y(t5z@ylZpU zY0Jvh9oGpxwXWVr$0&{CV{MVXfqL6ZE68^|i&287Rp#J{jN1O@3Y_W3zv+L%d+jZ4 z%{6W8IrX{D6Wl>3lSbr zm&ElOTaz}))KO|r;^eqJ0^XBYH zW9ipk)5lhRddg%PTf7IyYOGy}u`x{y0i)QVA0g9E8bN2~A7KkF+>v z(F69a=3%=Vn^Bm91^s+Qx#@m+w}h0oW%_%(-_9QQcULNc`YQIKr2YyI;Urn}U9y4`s*sB~^6do)PD;X`h0o1WU-QddVX zu-{~inozOt?H9 zpiSQ9p!f2XHKzLRo}494WLn{WF(4o;wXxuKjC58MFcuPP({o_5VDC9dE2P@(YT;#z zzR7!PetmCmGrs%t-6XGDH+?kqqISjlGJ(F-)|i`ST(rRfH%nyH$qLs7HBv1YY(_>X z8n{o3tpryH_dKk-(rQ!DHR;L!2}^w`aV?SS`M&C#n05H>yLMNNmsrH>Mff5V+9+PcuPJ&9K2?c)YH3fy=@3ZeL znQk=G*P|{4W^6GsLRon$(an0Y%3YIhpG$46y2)2|`S;C8@(=k00EyPNL%N&IQ3`ABl5V3wS(-#KT0DRkzp zpIIVHGPcLO*6RUW2<{+D6Q#N=7CY5`J6MvLY?deAjjNHBOM!gE?up_ z@e61;`IWKFBS;$~o`NDoHiLs6P|s5j%(5iNb5OXXi>G^BYtnK1jNOz6+d&Ihl#&W6 z%&}>K1CX{N#AhIPbabv)VtWbS#LzwAbG3ce9^fN5zsSS~M_c5I+beWa`>`mS_~$ZH z7HaAeG)$4DPX@`0CEX7*wj41tNq5~FG})$)$2(0>)zRQLcuDcN%F2FuI@_J}9is92 z57ThWB0TWWCSAPXj5*Sn?CQlQg>;@KuMSeU6#x^Npn6N@f1XvXCGRX|?A(i2)+SZz z+jCc1%|x(`fzUhDu45#HgK#!^Q%7oo4iTHu^LD@%CHib`>RY#4G~&FNXJ9J7bFBcIKbGJ~y!4^Ai0@FFSKG{Jg%ck1Sb(=v z0Yfir(+$A3CUAmATAs1PoX!$YlF#aZiF5YUW=EpgysXJ)VKS!4JLhUKBN@}YQY9C4 z&^ow~M$){}xz}KkBWvR34*c+{HI1eU2i_?&oqnV}6U8|PTUaLhBOM)mJ$>V^DTtl_ zu#;BVL#~PK1;n4Dtm`lvLlMuZ)O~ z@?IQG2ZC1V=?UISE2k(+EcXNA+QJzSPjX(Lc6^aCVAMN90*RUX*F2t1|6hwlR<`A zWt{g4u2<-05L8~qJs3uI-<3T%!IAgmnQ61am+zdM7cbLbgpX!+LE1An^YM;xPyD$f z3x4yyG|2sNR@Ba!)8-ydo7uD?9@#mES$jrBv5 zw)Bd~4B$Yga<)Yc*Bj%p0HHFt+>2b=e)W$f$MlaVT{S*c(_;F%{0JWtzZis z)!A0AOx6Bmkf%zjp5Ak*!%Q>Y+ZA|(h0F~)`m?^vh^{P|%SElrD)J#Ss;|*b?BXQp zi`BP~G&a{F*0VD-0{nFdFX5)IQJ2Hi5Y>|BXf^o?*`Sgf~Ky}U4S#BPH=G4GtCv|fQC%P!A4_O0$#dDojTq^D%qQ{j z4=a%o|OhsoE?kmVyhsa>1k@xb)lT@!kqhGb*h-YGK+Yp+Rq z|9Fq;xUKyenMcK)>r}QjX}W~?_-BZ{Zo)Ku<>@N)rwi2q5Vd-?FK}2~e0Rw_SsE90 zs4BSgp*fJ}@|r0eoMO^f+{!F@K-L4ES-HiG^cl)5ok6{DK9xHEwRjAs>P4(qz7qsFx6LwEy*Sw znEG)jn}tcJve2`l{APG}wxb)*I$M)!$~864jcPBW&Ad&2^tOaeKJDl;km`n``_ z56vPtmG|N|sWU0-`4!T(@o@uR?y<~qz6#qSX{tPSjnsQ5f!p+?2Tlb(c|-al7^w-J z{bktU(t;Q3Lp!9gV{V`CNgt!73~fz{1gA{;ESW+&U7V1xKRv{3>E zBxYKw*S7sgCa6w;z2PPdDG}SN7-U#=Yy*yDnl(qp{t5=Ey2_$HYVF*KjCgL{O(B_X zvd?kjUG5?o<9;o&6|Cvea=*Dejv4B&@5KkNFo>EmlW){{^~4MW_|}T^QO<_Eb7b4U z8X*OOPhVJW+q0YWK*lUclbn(4%m3`2KhFK=_ti^%*qp{LU%>TWlM`!ful{+ersu#^ z-8q@;9L%>A0;$CA_f6Z6=aA#k>?5_+)>J-xgTXH)=uIJehe@g-J5@@-66yRurtAL! zYl{RafBo~3^2JMk<3{_g9+Un!fgvYWJNX{j5&^p<-ihz)eqLN zdHtAi%X+js=nxqB9%1}ByP-JB5#)>|G>f2y)$;^7Xtkr$UKsqi!y(;M2Ixapz}=u|85Z@GfwA> znEtQDkZccOnw7hsOa5;2WZH2ZyST>4;*l(7-GRsXKZ_};o`^VQt&) zAlE(p(m$4H_2ab2H!2!-zbb6OaiYAi2TvcDb4hWy2#L0XLvr)x+d{{Vi)^XO3ymHA z`?OctaPP@}2XDnfPQh|~L49(!orAu_G&z=PpMZgALKT8C$T2RpqW=o&XrKVHQ6t#Q3uC&##}RNtM2*^&LALf~nL z@$$P+SFN~DI$t8&MGux325pr6rP6hN0f5emp@*(8_Zya;ORA6x8{|+x>Fch~oS1mu zIWxy3f=$m8!v$F)sgrUt-|496&}i!mm5EAgOd@`Qu)sW(9giHX66a9o0AiQ3Fpg6L ziczyWYmC$F-7%kNSz34f$+hn8&nZ7?B=8w%%DRk%iJ(Gz-OE-Lq?5$BY}*+(tx54x zYOq`PrA%BN*Oe}1X;0l0a1j?s@@Oec3XR>V?H}|bwVI`m60f@d5#s6uPzv8Uqzmnm zPpeXB>_SljJ(ehD&Cp9g?n;B=Zrbv(%Z=Q@(-KPR4kJ~2EBUT)w4bH-o_=-`%1 zlH2^7x|+Sn;@#xNUxMX&@GTCA(Q!+3G%g9ZzB?!JsUYe;cA9zUtK16*;UOu7f17qaMtAww z=~&NRLMWPlpNLn8YV&XX*Fn|fb@D}y{eO)5irn%3@gLgzxk z^Vg2;O^XgJp`Tj<0F}ED*y&&UsgpYqnLeLLI;AA*`##)M_KA%f%bjM9y|dvu$%5;t zs8+~&al(33H)L~8r`ck{{V=JK=(hcbwdSa_I4i`@S0JqipP|FAouj=)Tp4c;>$zhK76JZx??y`2U%Sgu~c; zhN(O)S3j~xiGoyvm@UN(eHw@)H)sdKh>`|gCgj77-0bd545A#aq%^=v{{8_y_YZQOc0 zG}HNdok7tt3t5*L>Yg@eo*UnL9~X-|BYNLr9a zvUdkvsd8*2Um&Z2`?QL*ymzGqv~WvD>egYcAJGEmf2>v-J$I4FS8F# z^WSGz&DV?T8|wd?`MUWb#kHF`;CsuY&mTfm`TUo`ac@{|)WD{=Y^3T#gLsY2>xVli zh!)NK`SI};pSa(UPkIS0k!TFGY14eVI=vh><8S9O3C<||Jm0Px6h)gqfWtWdzi+3P z7iS+DGkd5qqPaQ!_;uWT_^;`Q*$X@xME^spPj!^?oxnqa-gi}lLSN52QCEzwVezSv zyP7ZR=KJ$5z5%cEtvVttSLXe!8o%%vIvtvWcB#WsS4&JP5EBHuKna8gC;^CK!q#I| z(H?8jjfZw?-=2i7trN}THO7j9$qNR;304`#I!bXthYkfJQR?7WNhUU=u%WkD+9#}K zFWB_M*H=O%kgp^Mu~Af zw$L2X*lhLJ?|=K-=dTwZ&;Iv+o8B*d*+rkE6c1RE6isi$C#I@BH5lXdnI;NCFbx7T zSWhif!{RKs8C5UiTB>8UJ|6D%}$1so1`II(1efqJP&*d~Y z%ux!#C>?C+h5$IeuE9c_Zut*>(|r03N5Ik)!p|2E)92OnwLZU)Z1h~S_^O5rVB1~Q zp6nRJurX>5YJqV40(Nz`S-)!Tf$-sQkBL1AehB5gVV-+lG-W*{Gd&aFbC-trcL`8q zCj8`~6gFufS+)+{9vDi)hsT=A_s}nKi#Wez`-a(<$j*avSbfb{BP&JT*Z^%voxPNz zi?N?Ff(swRqt6TY!#wStk=n$Hw15aq7t>o1iQNK;Gye8BK>O*(i{|v|vZ;RQ{nE3k zoX{ic6G+u~A~^RXh*W}vC$Frb5ooz!`Pye(E$xkU3=t90TcrjisFY_3%EP6vYA=7v zl!%8~#&%XG2s>LjaTo20fR@B>fNW%^Mm_2UoNFF#Atd073M?uysPoPNiUVdsb2MYY zG_pesL&mL~(78r8`+Pa2jpN(UWK)^h?8WLt>0s44F4y;4e%WwL_IbOJuJtPI)gBmw z_`h6BuoN7YY(>YKyB5@hd zhN}j61OJBG#pe6n+2rqA-R|TUIL9pnvab3lUvURtFRNiO{x)iwAFJnP_VZ^oY#I&C zYyvxfF`0`i;qO;pfR`t)&CTW+9@DJIfGM9!-YP_FBkacJ5v(x*4uAoNmXkZt6rOnN z9ukB4Tey@fZpghIAz)*TVpwtsgaJ_{DCW1L=p}DJfs;vL#$iWK_TzU;MotH6PVa(iRuiIM`rd%V4{iO1weo-EHfo))eG9U?<0FN#Cfx91IRB1M2T4Zt@s~{ z`F8#*s!1$ps>&CO9l?nROnf@uzG+Qi@J}n;eIn{mhKe5r|4XyvfbHtn&7_GRXlV6G1CxBJzBYt z%>p55AXhcpR)z>$gNjnzP;Y|8*r>Ok`y*58N7Q%b+4z!R2bu& zb>Z0Xx6;h+=?>)cEv;&qaMOiq8`)9a2j^8x1Xenzj!Q5n(>MrAK-`FUSB@RWRrThc z9mfZd&}7yWa$+?VeMC-3QA}M^YSse1K1We$a{zFOlPm3%Tv|;pCnN%cY0qJ8!<2{< zycbIvm&WNn--O(-X{Wp0Mr9m!Pdcg9x&{{v%V}{vWT5a(?;QUkT`f8~GZs!XJo1uv zw+?r=$NCS2dl!~+Ec#x@t(O|RD+($f>n*<} zK7=X;KV62TA7M7Rcpq*c6nC5R^?D#BEc#Ri&$y4I zNr3&a6U~G8nZ&ywpYz`nN}|1>_6gU16cxcGba?Uuu$dQd?^u&V^XK<-AQA5lZy$;&AyX=Q3?DK z?z{@))Ls%bMm!@lnq2KiR~*Xr>I6K59?#nN}BkA#j*e-fu)CKr;e z4?mF0wkp*(_s|2*Ng2dggRSKIHA++3Tm!-P{A!EpnS+3RfDpN?JQui}(sUJ1O*r%z zIdh%iT&*s1G>> z14+us5I4;DoGE}Uy4B`U4?2@1mnK)*;eC=4Hl>DXDzdn;C8{i;Wk%#WC|qF$4GLG? znW|4QFOlWaG_auMyfhbr)6ZE$d24x#;+?{J;USE`fgK7bkf5QeL5DCO4WyNM0xqI@ z!f#|>SrFO#eh%mz^TbrK^Ouav279!C+fIY|kTnUW(IglG=gECb`rDSSBkBvVb~dJf z02-oyDrf*fO;zuQBe>o{e`iw7!(#Pgci)J{4`~>3(FW$6*UhyS4xLg5hzZlDN9nT4 zor%|N>c`IDCm&jDhLF_A z2F7IjHfn;(5t$=^F(@su6yZo0e5^H5bg0V)m=R=1Fb|A9EG5*i<6eq=;y2@E|{bGdz6eo4Q+qVeez+lYx z@NQ8FUy0BFVTrg{7eUaV=+Btg$;;%+@0f>iH$_J_yr$}eIi~SrB=9w#?>5l0F&5?r zgX6Xfp`zGZK-ypHjfAWUsu)mMJzKv_nM4g)G2XLzCgw@td#fxZrT{TjSc%ga9DDwG zb1lQw809pC0Tc0$e?H;wYg{FkiN1oxKu&}#oB;&PFdrUBtM*A%H7aDmJ9Gt6?2b|& z_MSK(3ObygMAFOLu$vdbS8$I*{Pmh(tZpTsD|VnH7)+oZQls0{GSWDnFR8}0F9zZ@ zmXk{ht-k`G1cmh79h@KR6Z!%dl7}9?4P+pz`o@J-^I%+U)UcXCOUqdrB159ZOD_)t z^Hxg66NdxiA{mV>-*|ENNN<6u10yxGTqLEbm1YyRUHrVON|c^TruGzvm(!ouY`$k- zPySfF`Q70hurO+&=1^WK^gEU&`lahsu_-2 zWR!@n-vR}xYJBMU06&31>Cn^a4|J?vWH?LE9M$8ZKmQcUIRkNkX=pw%Gc#q@o9i}& zVSV31-e%FzqS0^g38YTzC%jl7jzUR1W6VWE?HL>l?$)q{np2A@xWKwuoEXqcP;PtS ziC9N;M48o>kB;E1=&r{28yv^=#vE&aL?{o^?1(z3yb2`}naLCoGkL{&;RwzmJLl`? zTX;|swv^kB7NMH5v(O%Rh$LiM(;#kBn^RY)MLgJR)t(4w}t5MH^05-EqlApe6)TG^S0)q0M&dSmzwDqj84FV&?TRF;D%5{Np}5k-O0&Xb3Db)|72gMt z50qsK{t}WZ1hVE9s7NJB9WKIS?)ur%*AR*j%P$(9guH_hKKZd(IL61zV2{dW4RF?_ z*$deHrfg{LVV&%lr`zAw%Q?7b!^%JgL|H)4t2Ij=(SL3QUvl0?I%R(`JQq+Zuhj_~ zG5-s=X-Kqoah0>9iO1(C@@kQP!{;HDt4}&#Iy0q4`gdsufa)k*nQzy=lPNErzSPimOo5pwwAug}fiSJ)@XD`_OOsVH6nLeGx`TdT=C?!%t{b=WTQ zvVh00FqAjcNkb?^@xl$9(&ZmD@e%vOgE{mDQ;t;H#;(I1=?$fGajOOarm70DGoi3y z$GLR$5S_B>TIGQ@$}V+R>Wf5m@~Ymh4MHo#QCLuFq$g82{OhL~9|@2sb%@7dn`&Rp zxzt7J`wZ;VK7PHL{t?~l&tT-(Sv(7lC5af+l8{klEGe95mow$dRQRiHPLa@f@Ht8T zH=!Qwp|f?8zUT%dSIgojwK&#At{W)qqdfAG22KRB2Boll1<8r(AL{c%CrA%IY^gIL zMf1)mmRUZuGa%OJXg{wX{)PFSogt`%`2YzsLrZ6f{;s#hrS35S_GRJr=fb9NDP~rp zw6rHu2!7F_tEQ|Ad!Pa5IvUlwqi&f9uW6Ulj_X0N5V9*&_{lwBRB{+vBD6(;tde9%> zL00Ms;PgprH{vI3RrF`C$7r{cNAPDX`#h^2u`%f-fVKDS9W5NE%*nC$9shM*978*H^S zBKY*TrGb^OBu9nPL?T02Qb~iz?Ie8xlB3-sURYx+u=WuiYi*y>xW#e6bRc+3IuZFA zTDiI8ut!|b8u7x-Kj#mVqMKzDWV6_->~`6vs86ujv~6bDPFl?GvK>I7-`8~zX>vVK zX^q(1gmi;kk9zGLZM(K4TAKuoo8haVhahk3j2zVMh_7gLwiW%JFR2y750kvW97f6f7$cq(cSbS7My+p{k>{udRkUtm ze~V&VrAZd&20$Y^h(2(fCLO{{9_{<^Ic!VGK75U z63m73Sl-2D6{9pna#q|1mpppk96E;#WK_mO2s$>jj_(qZo3wC`4tZ=N@&h?7qY zSb!s8uoO7Kc?kMMNAM(tj>bd@?NU8H@< zx&yU*QCBJ-v_vGGAxRDZqg;OST&R9q9pfONUA>#a2c!9cqUj1%)746RyK@XfY*rPQM0ns3TvtaEfQShKH1rnHp@s{TP=~keBV&#d2`WLXW*3{yLrl!(cIoo zk*E(iMwBc_8Eiv>jz!FtO$qQuF{S8SDQsYSRMYIzzY>NeRq~m7@BD)#{;?rVL>`F$ zU}?F;sh=}1T8k&4iE9QAiPiH{W^M=V06emLp&FyaPQ}!9VicqwsO;rIB!nJ7m`U`y zki90BF9@I~`x)zuVMNsf-37>Z&HNSCtmO1Wpbf6ODjnewM&f{6xBggcBxJKqk*jq7@XK?HmrJ zg9QBGeiWfs5lP((qHN2dt<+l!t6a^MQ}XdK$os&R{AgBL0gOWd59CXz4!9Uge}_X1 z{2cP;Q33xe7;f0N;kx)lLqrTjFg9(d*)W{@26;7`Ky?(P#diyf0k-$8hyxIg*}a2n znxv8%jF`EdrY2HpiJjsEgEGf`2SDE!y>B>FV`_@Vjl7kK#p0KR(O@Xwp+e94d>O4+V5JB>U<+<^lLVBeR6nS^rMrAZM^I>mb2 z>06|?#XsSwp@2hd-WrMZBjSM7h1+8>v0z7|dQymSl1y@PENpv)d>G)6pdIPdA9l~~ z1GNB!$PQuYX7|^myg@i`lh_o1#)LO<HF z@C|M|DO=y8+x1V#^0=0z(_Li#rs~EmK|!JxDGv^mhqPFt|>y z$WvvXy_s)MZaYr&g{hzB)3WyT1k_ckd6hRjA$#-r=|GdfFF*pl=E6)WBJ+w*h{lv# zK343OLXhh3_oW`=gB)=}Gy~ArR-$=%5$gd7!(vUwycJShT7urm3=T`@fccBAJ$K?{ zOw4xqwjU?hvfseGG8*ETDYk!5R&Iwx;M8`TXtM~$J)=|y0|`ldC`P|56`1F3@@ zPJPG@oucw=qIAiP7*RAGEVpn%VbW6k17DO<&%k1zFTQdn3$PQCVhn3PLp0;fT5qm~ ze_g>F0rmMbO6X}PF^*uqk-Op{x8AE8R*F0d#+Z6A6F?USjy!{J%JUe|Nx@9sLM-CK zk*y9EQ_MUD)e`vAb)s0QXPWliSwyKYizo(WQf%nI7F{N=BD@$~q5guj2f?diY(*rd z^&2d3;dj*V0o1rWbfwe56^SObw=qkGR)R>{QjjO99@m|F^}Jd^@2HrMNTc_({;M>N zBCijYuS%DRYd<7VAE*l7dPAbk^;2JuBjAfI6FsIE{nNad()@ZeUg67aQrCyiD0 zD-zctkJk=R56LQceEhr6oR zPc!LwMA!0*04PI`5LQhmFXEb0T=NnTk=BW+@)L6!hpz_simqXjF$nux`m%K4B1>Aq zoZ&L@mA>yF#({^adRqyjt&}u4I&|xNy35Xu7Fy8^>e4q?n@C>Bk z^YIkO{h@0QZX>|4LMyjo`elcxgvRdC!#y;cLV$wG zi4l3&e8w>I0F;91*7xc+qTwY*;^6au)kXw^>*))XgVf!9!hHy5+@IY8=r_=U&hCND zGXB7CaIKQ5iC#=ng8in{&wrUV_zI^UxJPgkI3H%$(Z!Noi6JQiDSNE*Z!77bf@gn6 z=nr2;_qf0~4B^Utd-i;=J(h=&z+dcc*bCj_;q-Pp$L5qavg2N2O0rKX?y(=@!Yp%z zIjEOiA@l}>`-V-KlQH<8=f&pLqn?x5%T5v>l%`e$DVJSQ+z24vXauYH3OIK=$g8PH zajAP*dAbuu+6kqQOK~Q|r zX*9)e90M+0%QYMnr`tuJy|6=@niW(~G!?+jMD)y=CIh>;mJ9f%OZ3y{^?Gg|r`wyg zSkf^E0DP_|$1}yb@ze~SU^mJYEzt3O{ZiW6H*kU+UUr@ZpH0)!D?X0%F;_>E#>{OS1-=aQtVx84s-`>{ ziazAQ_SP1KZLp=eE#lBQ<6aOCC|Dq${T4X^WUjivj9+J~>n*d;29q9bWYB3jFCi)JhX(gCFKic5^RY%M)1;5Z{`M#_TdQhWZpDnwX zt}A5UFK##HgiAPs2%Du*(4IQyh$+f6w_TaKM$bq;#N=cLix_dgIU~vJu=tZ3hAw2^ zZm+16;apwzJ3$Y&S<242z}J@TiXuk3I7b^A#1<)8z7eJbrquwH`0qYXIQ1LQ8I#rg zxLm>P|07vsYYE@7Jj*3`D&ho6c0>%u-9E4Wg3_s(Jv}u~n+o?R^0{31Ufxz)cvbS@ z$btgl$^yCgdkoSrty6!FD=j?UB|@|mB^lNJKASzmn~h>8g2jWWS=S#0(5i-$<7O=} zV&{`X7d?;8YJE5Wx*JsLK%t_vcgAgtn+s*`jNcZTbB&Th%gD+_)*~_Tmq~=f%0Q(u zy{_%<&`$cjvr`|((M*uvuV9#=6wocUo^rI8(|oR zSPd@-ppZ#U@Z%AWL>*961bTx1)RN?4&Sg4LA=J0i&16mO91Km!(;_|gx_!=YCX}Jb z`oi3$f6zOEQ_O2P>Nw-28*X18^}>be4w(JGRm&-Da_R5^7Wsyi^NMCy{$zzrpJ{l$ z%ei>yi)HnM1*k{7CP4(qkY`>RK@df<2-OlBFB6MOG2b_e830*8%nz)-W$4FK@?MH> z1K}1?aQ>d8o|u~yKM(nGtI9% zuLysoiSaWFM+VlU7qs;f0YW?zEO!qLMqq5kr(jQSA_=AdmEzQqycVe9qCW@!9f|5} zgqa#LeW>RNgMtyCJ;D)A%7;lt$-tN7sP?j!QM-;iWT&l5{wp>c5pD+nj@}v|2l{NpvK4SA7n#Tj^&5%Ow zVZ8-m%BwgaOMEmAzhfmBo%vQZn-UogiodIt{zPs{WKhE?zEN$!ysC4(3=|$M2ZgF> zULOCAhl1flNtK!^hm}_7^p?@>qI7t`I)q;OI?@9ayZnfxkoUH%)`ieG81;Pbih~)> z+kSEYJ2=2VSujOrDhuqT8bk~};j}@8rIs46BselE0fg?*RLcoUJeyi8!zUJln?*vm z@X?v6&(VVx^`bG!z8_J15hHqW{|X7+$>}O3F|Y1CN=VQxut>o%pq0i#nwoR-4>p3G0g-}c!OU8vnQ zR8pOTbNToVsuT80+_D4D)1D#mUp5&cKZx2XAv}PDCPB@9;*ElR)}~Gdx}2_ola-a` z`i3y2{_&-RUA?2Ja~}FQi()Z)cBBL4^QhEyg^dMy*JEv}C-kV>;~6BHM~GAvA+vF6o*$ zAOb^3hSUmgh>!(q;fh`p)GwBnU?bELY812Onl`eVp#eI@9}Jy*ON+b$xEcTr{b7?Jhwu`jm}VTlj1Tz&=W8Y)T6)^#;NK`*u_&Yuz{NPon(^ zYa-DkWu!3GHK4s166G~NmRQF$GuEwqt^sMjt)i^aUC4iFE67og?zq!FV*fp|$V()s ziw7sNH4j|=-CZevKC92e` z0+h{f58iVar;g%ry+VOQaEq)*E)a!EgfjTokCR{9K7c$#oHMOL6U01GuLmYH+zUFy zL)Uc8j@t23w8_T2Gn3`y9W^l^!gsDmH*f0hl8!Lk{qNZ#%SSh z@tsl9En9}TC1LPdQnMA_vGXo$oN=Pqf3bn;1`l5_Ra7@BL0t(cSohrQ?C>cLK$MXL zdm-=^*$a^CKA0E8z7q0vX?|w7p9TWHXCiGWO2HP#u*n*?Q(V`S91(Xq+4SI*kpHH0 zE>DR-?rx{8vZ^Rx8VN?f$rv;O;eeI;>v~oi4t02YQj*0(7_#Ys_`Pgb`?`6I4bdCK z&uLvTAu6OI(E%C29SyPlBF5a9kYl3xy@~NEY z50=GID;0h}U+*BwvwmRMq2VVO&{rQ}yPN|g5M4&wjsQ;3uM{8=0o`mOdZQZRvQGSHh{mydQtYJe(>Vx>jV*gkGZzg d%lcfjVCT@&?P{e_p3tLp|?(S|(><;W!Y;5fI{mxMG1$0@^)4uIs|1V?op9ZNDs8*+ucHwum+1t)`j%mEH->H`= zD!vMMKG-i2f#t-*IBTLcZM>wFphuB8N?t^=(~#j8!U5Bqz#jl#GLs+LQbdLc2WQaF zAu@@%Mp!A3EhN0oGMt6TT(J*LP|>C%e~8{Aa0z=!+`B<*pmz?pb=ZYjid~q$l6H~d z^@qCP_D1qTrwe|o5$8X+=Z5bDjv=oCpA|YwhWT64GPvKx&wAk*b6AuE$cOGVsUzIG zp)(eGKronk_{#{gh;Tu65P_e)_}LGlK}Apn7|jyGSqfgEf5ag3C(g3C=R7IV_+N|r2Cxr*T|p@Vyo$Ukv@iS%?6<%=8IFiA7xrHGcQ(ZT2Az+f zD=01G&>Hv`W&^tK;6DLj%7J9K)x&>&=py)o$bO?g7ke7q!jSENj*{_yL+*=gB(y2+ z(K7B#ltpG>HORW6UmMhu{tDxk7Wq5)T-bM`cprOl;#w)gcnj@Am|3AO(a}u+{1B-# z7P&CZ;dkJ-IAOfTtpNHxpogF*q)vbAozd|m-o)sI$}sLrd0X60iLlWLM0XkCdO(w- ze+K(*FUj+uDT8b1+>mJ>A;a@QE=()*^B_Bm--_^K;d5ip z56)wsPq;0>KOiOYcyxygj|P(ry?p2w!tEvDw*x=XFA5C@x$%=997E?9?tj2gPzIS3 zZWgeWFy7(+6ne4HB*=Q8qnpRrQ;-*l0vtU0Bj$m&L6=o>*2gqYVZ|n!a7GyiXF0db7m;=a#SqZ&j6J{|;k9{g>sE1y2kb`#FPWo+%d^z^1xMu{yxS=}~Kf`1g!fmkB7xrZ2LpKMd z<&N$k>1*^8;osE|#ys45;dikf7V)(qu0_y_xMfCnI{Xj#0fdtZJ_h?`;Dr66^!J=F zUdS|DLB9ZUUr-vi`smcaz7_~`OX|hp|0r%Jp_$OR2o%m*o{tLM^v33|`4pTd1Q{w9LiQYO;50C{yF%tvIxxS=ad8`9GVw^EW% zaMiS8@Rtc=Ahap+wD^yOe*`MYurK0wr_9?R^zNZE3;Hj#p9m8@C(`*&>gZu7!F?rq zv(O8Meg-*ly9td1=fO^7#qhTWS_1k7w*Z;;Lh#Svw}hAK&CBT%7lAAT?h@4d74BRXYscPelh4I!xXAR31fx9zwSLf`rnZq zNB0!)M^FL&8~!cG#(RiF?vDB-JUVTs;{t5KA zbiWO)OZxBP)&jdD_PNBj4!^GmXB2#D>^E_{4JM%%4~}D>fxH>AdC(K^^U$3SBH%sH z(M>~i?&3BLoh{HD@L8~Xg9GS4!0jyfj{Ox7rYq=xTVMDO=nGQ<{UY$r@S~9lvmU-T zbi3q*n>TI)&>4aKo%H({zt2EQ^bV3X7u*}mG^T>yL^l;~rO*-PN&?aLBRd36!|z9S z3j03vMiR~_a6^VK)Rz)(y!6)q_hQn%5&JFty^^}k@K*u*L*#?73v(L&4*u_ff3Y9J zZ9INb%k(V4tuJ^Bx`9r@j_zl$Plh4F*#^9j36lc%F$P(J>;M1%k7auPr^Bn_op|0k2wPS{xT-TzBGaWY&xZzlGh!Ebew zgtenVri&Oxt>%|3-!f8Xwm~cMbuz8<5}d97 z&k}Z3+!M)y2$x~%DtDxxB+`AT{5B$&3m7z1G-X+p;0zMmjH1x8}!vgHyKsT*) z%l|?P+Lgh4$wxq&;MawC-jlY|Ae%vVwoH#6d}7Jhfo9PwUl&62QC3%>Z=~K=!uwa+ z!wh|(4uQzCq`~_5) z&ET-Cznzj=1^kPXadtpI7W))kAZaSXxQA_&aZ;SgU?EW(T$?$my*Jw&3YYir#w-G^Vb4DT9}=9c|Cb?4 zC*u2y>Vr?_LJ?9tQhmLm0JW`b*;Pt&BGU z-FDdTf^&xad7=|3{klrss#5ML>!pN@X9+Z|Op751Z5B*+>363gt(EY9PP#{-6Deh( zxRsXff_yZjUGQVXKZD7P-;%`L7P<{OM9TKbbp4h5C1?q0-%6MjL1HjWx(U+FV6Tn6 z5~xl(8_BSQcmpU&T*7q0T~FR#=_cGa<5*J{2nS3nSzf`|Un0u`bqAriw>IdOA)IpH zHet7ec0s2X^4HjFU>C+KfoM%}pN_lHbj05`=~fVXLv((jkO8+Ql<`bZz~Cn_I@1h% zQTPVnznNo(Iutq$@EZwo5zkKe2Vekp-ISB|r1Y=LGM=@D@UuyIVcmm)?!}&o@<|2# z1{xaTE-k}wFr;1gngzFDbn8g{8#0a_@SP1|2>n_z9DnFrgRC%N>^1Ou*gAEWva;y8 z$+1JoO3Czi;3k6pVF=@zYy(0swTxHTg_#JP4dHAeoPMM`7`@k44fali)!q=sRT-BQ zYyIIT+J!4*p1?xbw{*q-fu11F$;h2$I7_g5%kp!Sar+ta^iIn3An!??YtkW&E`a7X zq%9R`O(JD8B)wz^*WgWCMjZKNUdPGsmr31CGR&N~A4hKm9vex|!|lSGh5G>XeGKle zr0#aybK*a_3|~0+L~o#>j9233hmJ;B&WHTLPclQDbufh65PxH(-`m2EH20+2A$0rU zKMEL4L3FR7lN0$71OW#5Y8n1&gNLH%uEkGLDO-ih%!D(xbu zw&>)O>G~<%>l@O^~vM z=vR|zDJtb|$co{Aoa7Hk-8?ev%(xc;ZNXRE+9S_rOe5|NhB9~}{o!mi&xvmk`q!oV zNQ2+XX!zou0so5#HxuvzOF%l=Uq52k&0iVMed#tshBZWnJ&L$af?^;}_XPb%+H0Zj z1m++&n%}r>lWx1P?*YP8Ccd9C&M(AqUg{)I5S#wzkTNH!FN94<=Sg9a;q8&KGC1Wl z zkCt(CBbm8q{8c7TnWW6uAiII=q~r$* zMNDynT=0zzb+DW~-UL~MK$<_ukKwkP@N43h%g+BQnO9w3r$P7$M79?FmQX);xau#rzd{nagQ^^RZEtqkk^o5 z5BU#i6mnrwpr6{{N4U))JWo5f@-nRYgmoXCtwK?n;po;x{v3a!aVr9U20WDMN=lk9 z!dFITE>t&fu%{uvak$sRv4=sX>lkTfnUCIt*B!wJs5|~!LqkAzFdp}M(7gXkSgVky zl4ZTwP<|r5VWdYlX|TH+%5RV%+-1@|gH#?a`A+DpA`QYcNFdh#I|yq6HW#}(K4=%e zP|l9qdGx=__y!YRU-$tcJ=nJx(qRppe~S#^%(IifM3z=6?m{+)a0(mr^)QU|jbz^2 z>9vLrGAO2luS{7ZFpseB$|W!B)4$^-l>v>yGB~pq;>VyB9|Z0@^8@! zhjY``i(_Ytou(6cXDAc)@rV)2KqZ#ydA==_e=gEE)E}3;BQP)&3mN7*lez119}P#i;tE#*_4S6 zH&jmOsFyOoqf&U8jt8?%MRSlWQ`EBdJ++yK9yu zmLyuJ=IBO{;rr;o+K(tMxHcPuFo-hVu~9B&{%yk0!}$DcjC5je{1R)umnA zDKpb#%91@?^A4Nt-b?FMQS;2uZc)m@S~B**o9j)qWOc3?+}+>byPoFJ-XTCFo?+ZA z+_g}`6G{1}2#0VdEoq20hwXKK|3snH(`cD1Sbk-RK9$VLx1YaDhjHE`ABuGrJ=W2E z#PHhQ(X_v_IZ_624pTTo+>1n7Mb2u+H7(`)I=O%4?BV{Jdzx@>&3#<%;PCJfmcpq# zM>(85;qT}loz&v*9PXewcxy~Xnt$5E*tMGCno%n&PCC5#)4aXKLkkX+O5#z@Q?gWc zuj-}+(0H}_o`;>PJ6H!A$HLy)=G=XgZ^`W%n}pc8s|?ToKFi2CE!{Q$;itM?CHip9 z)!W-Y+_k(aN0EV=_qK`@nwEp~it*9W zKRm#n;&gP(7VY3@@eb88rLMbrNd$bjuVXKFZFn7P8|2b8lV>;?_Rn9pm*(m2@0>iC zU}%lu?&2cWQcrVq%tW~1qG7sLnz9w;f)0V+qkMZM(U!P4JCAViN3I(&CR_qB;4%EnV@1&cUqd!Qy&=LDAaStKP7w9)2*1O`4>kqIRN6H^COjy^>&QM_zK@)={l234D zKLno)xPjz$GNF@Fx{3P|-K4=T?zV&xXPM%RUEI(7C*vjEGeffgZ;(y8>H1>M$^mkN zJV30Sg()bhuouE!*ueW@7kg!~7b+p$#o4PkLn$Nqa?tXig5*Uwx}o`5O@KjGN!rEw zvZ}#d%rj!`UK0cY!(;eL&pr^r4FYKY9x4}(>C+FaqdiB=*rj>@UU{ z+#|4$H}FDVJns}K-NbpsWH1fr`$0X-*|>={ecl4BR^L}5}w#d)fS)vVv?ttg(g3A*rzE!Y+m4G{Oho4^*Z z4eSKFK@`{v_Jaf9FgOB^f)n60I16HcFqb900u^UUH^5DB8{7lJJcP!AN8kx~3Z4OB zo=Yn1FAVlL>@N+xkPGt~w>RJe5NAN&fH2>oKfo`^|CTgCn(+L*Zr3dI6(E+qETA

Yyet?(1vAi*sDv)G^qFTV4421~+lO*-Y{+ zBy9;5_iDi)1hfI|KzpE@4%o$B)@ZuGceQg9XH?y#o7lsP`#9Z%Vi)%`1Aus5RT!~{ z9t^~DHsR7uxDUrZ5{!~=W1wTfI51wiO^{UR+nY#alfhIl4NM0!fG{(mv!z{}LmJN^ z5;T~nq+I70!Y=}g4em>@FO&Ro=qhPn16>C;NM3}oQQ9{_jr;X&@H?bzCsgdm_kbuM z&f4~Y{Xm>MMuQ{Z7!c+-^aK#k0G$Enz*wskZo%DI%3Pp{40?&Ni|4qKg5*FvzachR;yx`k5JsHoqy_1Kr*sp~ z5oQG999-OwW(C=SFydJuVZ_<7I7bxE^X38KPRMvyTnJv=ixvUm-bpvbu#5A{5(c+Y z*u`^E;yzp4k(L7$fIkqXqNJ516=!GS8DVjzS4+AHy+G`BfH;4t4;lcYX$apKh%@nK zpcSw;t#Jzm;*O-Ply`u31mZcSF49e$>GlAKBolN&5=vrj!!A7tNYzAAvRb)HTzOM$Eg7yyc)F5mGaS@W1SadFL|=Xyz&u8E0!M*CM;^u zs>s!affKM#9p7l<_+%OS7WMm3@LB4J$C214g6G&P6|dB^3aGK&zvIq&2LrnYb&p#= z`}3C8?RYcyiOB%c<`N{YeFim0OzC zw?W9aqTu()y^H>U{t?lQ+m9UI{QIR#L3KW6eSP6@`8~@%9o!@MpP*8eYp)O8^)H=c zkxT0B-l;M#+3u7%rf&CKU*5(|OZs~Lh^WZ;q7zY(q>_l6|e(aZH}rg;sM zV&6HgK-z#jfe$NzS{-(dN_8y1%e}pq-(JjpEpXzcM}aYAPmP}p%@=j?N2+_l`}b8C z@FDyD@OI_L`k(#T_Ir&aKO*MTj_6&=AO1JIN7$CY^;h4Qtr^gx|CC1?798IkQ7dk3 zy#ud61}Dq*RadUh+1`C)r!~-ad-9IliG5O<+I82Su6{P-$oJ=)b?@?}Iq*u6rJ+}o zd3~-_TNpUlYeT6;0}7o_{G*B6Nz3#{b)5VfoQB?N?ViZFTpvv!5pb)w@n8y2$>rvN z6Q?S>W%>KL_}+pkemw2fHm*u^6T1d+$}%LT&JwCps_eH2*y@;R9 z+e}H3^myj3EurOBznhbE@5E;UwI5}4c7u)>IKH?;H^S`E`D}%heakJ&?z#2#swd}f zcRko{dZH6AW`K;ti#c0-vz!?Cvd;LK@JB*DQ_mdn%WrJdt4-rT&$^YyhJw-=+c(zf z`3!k~&RUeQte_Xbm2dTKW_am2EM`#Q-zQVwYpiW9_CnAyiE8e=Iwodh>-N8o1%n&d zcfQ^nSmDOl_ZJsMESx{7^3sAemrvMtwA2Sd4?v3-^=VoX99sS6m%wp{6xn+`tx~7; z&l+6>l5SocdbWW7yY9&<1PSy6lbX8(Tuge(X@4ruPJ(9H$*F#)#);QOeo)poT`VY@wrMTD*_RgOulU)oSn+!EXSd0FYSh`tArnep&kzZ2 zhBnQV<93wKYdMs(jOO3^MVRA2+2CW?S$>`+XJU79|z3} ztLpNm=$^)Np#t+ksjd5hBGWH9;!rOzZ==-3A58kSH-8i8)=8g7UA=X7<>AoDhc*Vb zwAAUDzuxI-qx02+?ijY@lIyISEziPVj&F6~3^WM$2B1FZ)Am|{Nm?cEQr9n!yHsW6 zfrhw!KV0jhunYGjPU9y0gr9=l4g17=lLoq9_wbJ2t@YmJ+BZq@j2pgZowzo-@7h0O zLn}Ys;oW3q!1=jlGMDZ3GSA`Md-lb}O{(?+blmu`$IcX5l_43L)|%1Q_vL}8?9VD@ z${u|DV&ssE?NdDdl{P%n?55S1EYJ1qVEgR(YUN+`t$RmkSkg0fGFk357PNY;2|k@` zXIZfx zl&aF{*tvE49#0#5eoEqPonv1ddYo;*ge8k*g`}Xcww<$a@X7ApSZ-xER^&qF{c_Dv(&aYc! zZ1uCh)_kA4N6>MgLGb3l_j?l!`M7^rzR`VO=BY4nZ{sR|pRJx6_2s6Im)CKDru|N4 zc%1Lq!Di5J+hZLc)yT8>+NW$Io^p3u)i6bolrF4a?4`a{lY_ zVaR$9_XrR)9CTe;;a$ruEB1_Qlw{*@_-DPh=Nkzf0phR=yaaioH+_Evts2|dZJyVV zah5Do61{<6h`sK}J;zTud#2vI`Q>}BG_S*Ye0yI047{*=U*Fc~EcW1AGd7HmEx)U; zQFycl`20>SVi9M$$ z9}O?)lje)7l}VYsUR1BX-S>gLVE3skrQ>rvT=)@eE`NI9u%2h5-z@7f`^MT`jn^&R z*87lWt(XeuZ!9leNITpx-@TA^g`+lP7+NXQ1P`xwqq;u%Sr+#VxHUm0_(EElPE#{( z_+9kD>{PG6{rhTrwcv{abCy&|vb<7*48uy^ace)j=;ot!ul#;p{nGR|J{2au1|3g- z&)25F`cE-WdRJ|?t!w2Dfo?DRrd-(Al5>{lW!%^jsseX$oIt`xRV{$Is{3E@$UB+GT^OJ9Hb_M&i z%UM=jg9>aYxHlwaT)h{9rh?u|+NjN%BnwM#g1>|P!1r!_>Q%epxunqNOKzXfj!6MR zW3C_RKlfHzAD{6~1E6k!C$9WjpLs_z=>4OEYqxN^FzB)SzWaH84%@%0dGQ<(GpCkbonh|s>YgN#P>=lu4hrmk|G*(jt(0Bw z``=g|mNTGZzKBfkCLcOkY|50+`=&xW{0LZ4ck$>QLp-ym5f}y*WqQ{*td_Iut9Hef z%{Wq`eejHbOYU@CRDKr-!QT5r@6~=U#_#VdX&LM{P9=FdZA7*!DMG3ibO+P1H_qGc zOh`qS+IkL1OG)0|VOt8t3Th)+|}nv z#GEAVFL!<2T(ZKS?&Wg6D!=3IU+99BD>^&$D3`Nfl5y)w4^RGT%ifXe1b;tI(JE!& zbJVQmmP_JQRM5L-t{d;C9FRNX>F=G~E(~9iKFm48*f@`XBfm~o{pS_*H*|QNwgZC> zjOZISddz*NuJ;!>0>63>{|J0~y(q9W^lYa?E6PCoO&NCYaBHsmXNmIZR=sk$&!75u&#y5y!^AHmYJELl$uCt%T+5F~FB~Wq zG3V3bl0nZssvcVVDz1A~w;IqwYZoULD3S5%xsFrT&K;Ao-OsOix>x=Na$WHMy)J8) zNMRqDBj3|ZUJcH)FBe(+=$r1Yw+K-mV0A;eh(QakQF?wUUlBSC5;EV4qUx4-+|HDl0qYH7F||w z#HmqLq9Z#NEm5NVs!6TK{?4CwN1r-*a(Jblf3Z$Xnj?GTojUznG*OAEX`;Iwz`h?; zIr{!gRZwPPx=E=&+~_9o_IwBJ(VLIqU!UG?zOiEfrKWnTSrOqPNJOLY`Buap{cy~*IA1_4fJT;(@*}^5BR4~--wBOrwn-9d~}_%=_@*S2`iM&D|+uH*IG#z z<@>Xm~`WxrbQO_`4yRDc_?l^ z5$*G318rtquQ9r7&Sq_){}k`pxgE5B>D+1GZS?iHYpJ@qJ^Z#S^9uB-R$%z;}&E`f#l zW9JAg2CbXLEnmO1;JcR9E++FhdFXXW+^G>eGERKdYg5y&DW?qE6cJm+?Ug0W_YDYW zyrF7Py6lHvE{c6K{a*9n?Zy4un6h5O&AP<}|I zZliZ>{}`XA@sd90%FQhF(7CDS41wfdv(hlFTKlx&8Xx>g2FaZAK0U8P*io@O!}7#+jgJbImRFDDq&P{x#EjdBMLM-F!=> ze}iTVOgOY=>5^t8_N`s=w2Ni!pp}9)t=@lTvA)GGZ9Z{YD+!&pZC9ZZ(DeR0`S~ zhj>qA`NIUe_%HZP;>A{J1fQpZ&Lzl?cQNx9s+sxTea!rI2mJwx1pk8{ndL`YnR$Q1 z19~F2|RWi#{9;@LK7MEDzD>Rdwj z`waRi5ycpu)e-t(x6S%B51Dx_z|40KHuIJ2%BP2+Kt%Y5|C;qrFE;ZrYt4L6S2O?D z&VNW{v;56SGr#SFnRn422E|#V$1AmxFU7j1nBs-}=M^)5g{RYWBjk0(i!{;*KAWBY zn1;GEA-?TR&3w17W`0gqGyiF_nLm5l%%{z+cu}9lRw-Vj=aXIguO}V`lSas|_0ze8 z^nI;v=C6y72S_9I%Ng2_;G5XhUkm-E0%StI(onu4eUt3^|NU7wN+b05-qg7S|Khtj zx)Jh&HFPdPK5L1YuT{#-e`sLl?_M_ZiN>4x#CGFXrEO+;L%aT7!>&I#Bs1%$>SpG{ z?wR??;(=pnME-(`>s&(q9vEJvvC31K>+jhBv;Nf~X1;eev;MrgW_gKnX1;$bGvCLq z{npQDmd9>4^O;hbM$z{xv7e@&$JN*~hLuq`hp`&z;81|2kskqmP>T?{?`q z)W(909#=Dnx?d{z#v;P!!<@?@leA*Fe)^Bb%{`t-|%R8Ji^U400dCji8qsE!# zc?+2Nt6^q-wB2}7!frlmw7{&t;JBGzWjB6AT{p{%`S-JS^vN* zGvC_3T*nQvv+zMSmZ$2z<9T&YFo@PqBvcimH)!yoz7tiNifncwPW)-Pl?KF%C! z)?X|hnwCb42XlJrT*CM=z{kvY6(0hTM(7tCpmPcO_w2@3ao?&Nq3<-u@;_Y{U>+K{E9theo1LFf1|9KPwr>t+cz@v8F!fZl6K=yI=lHWhu!=!bGq68 zgj;5Q^F1>^ajcntHPy@q4mR^??8Xo06=wPDgJwQSdNUudSnK>zJ&CzKgPSV_Q6Ci) zYt@gptH0ctm4eW3X4n5(6fw(t_?!7+1PhVx0U!H5`tN$_cg-e+ES9bH)!meg{ zl->BfzPnjo$gVx5nPHYc{$}QPd^hveTAKMUcI&Yyf3v)&UH|xF*B{e;H0yh?0yYVa2U7OgN-(q(45x>hU z&z`}|JBkl@NF%}zny7OL`8kx|%m+_1^OFP2e0jU|<#2DaJj=gk{=VIMBC*|iw?I9! zeoSLC-~No5KakVR56ood$7eM2(+8RPS82`Vk-=^}+UaN3FJ?FYzP@IbkDp@Zmn}E* zcXB8`E7o#`^_*z$4@W7vsGqBL+hlS%<*+CYv%9XGV{B{M+Br1>7S6r%)hW( zuO+sdFY?>fZ{tWkw1oWcuv^di4>!y28^$LQf8!cv`I!D@e)CE*|JZK)Xnfu*zh*aH zY_wa?rcP$o?-XO^|5P&b4tDMPyGXP&!g{l%&L!l3U|%yIVt0NpFuRhA_L^0EgiRU| zUagWkmk?g^`(}QmUHSjsZk8ABZ05JxwZG13&GHdf&HO^U^=bK8W_jB?X5Oj0nXi7u z%+I$QuZEX5%PrZ>`QL6gzTUPg?;^F${?pslS5R-We7~Lkp7~~ZogQYsUoSKNIk%Y? zzcHg5k+g^VbS|O33N179uk)Ju3U=);)=;0KJeOFzc7`G4tE(%74Wovpm-XGoN(1nRl_9kHXKIo1#R=6~7cFQr}mJ*;fjkKShHCl@vIfj7MJhZvFS$PCm(QeNe2Q+5hHKW`3;Qcr(|oKbBfz)_1k5{{eRMU%F7U z{wKTrXld2ltmWINrdfZQUHfTnH(naNYu0eg|qehGQz)WEbln*a)Taz=oj8!;iUkb zF9q)gulql>g!gG9E!N*q5&rAG=DoR{&em^73cft$6~sgNVp(2IoaYng1$2k^xu{K`)}cW4AQTsryA?Oja6K%?>&p~J01}4{W)8I zyTr;fK6Vx_GdU*2pA!8Vm6ZN8_IGjqF4lLdh5l@zf6!umM_=$;Zt>0wFP-dnNbsNS zHt!4cQv8O@l>Y_3T_MvqIw$WxxHu(zryc*#nV;tJ(lW(j&3`~y%CD}I^_vAE{6D`a ze_ohKF6F=165nQv_1ix}{-y)v!Ap*MehaVT&WV?~#5I?Yr=ffTC|})wiF^*4=R8+J zJ&yIt5eX ze~`u2ewV?=)>r%l`d<_;h>GtU2>sGOX)lzQULHA4uqV&zZ2e6kA#ZjZ-rLz$-=o{( z-%x&Ok$2AQYzwc_G1^lm@*~6F6>aWsrIS*h75T;l`4|1KEd>!0^?%x+f4LiX?9ZHR`i&T``|;v^by=Uc*pEErMVrd9{Bn2VJ?lHFz1HJ=EYA;T z>vxVtetNOKJIzaF{?h*#GCG70EcBJ}C;x_=7moHwzS+h9`@I2_h$qfBBWE17V%a?loUy~jW%4dfRPaN1TY@+Jt$bS0IUyH5% zpGZRgqWy~RjfwW#qX*9@@WQiRzNcczzhS%=?L0(2Pm_x0X>xQW|GZGG>rbOUY~-UB z17-ZzXV9NI(Enxr7Z9JZza&mXdP+H3e>Y0R?>Ce7(~K}W~_{R%zdi%Y^{xQC= zDxV?jR|AW>Siegx%4e(_{j-sy_1oQoU!TiCTWVSgnqBfyaU+S*%n`3#{WFsRDEt8Kz>~;w*FO>_0{M$F4`$s z9tU%ACZ5#ERzFF78UJ!Q*~ZV0E6G1E_3Gni8Xx*I{XLD0KY)hv*-##R@qawGYOnQI z(VlrBc(6==x3@k1y=(V*R~7!JlwseCI_uT8K5jH5m^Q z^8%i&Jq_U*8Bf*T`oOnOukzdOGwEae*ZpsOKzo^~#*>CeIYUhEY-{A@Z2iqX z5&v7t$1RzYMl-d>fBXpJwWqW78^}U_js|$EucP%_^`gBDadObklU}`j6eXc=c!~Iv z%)k39=95ioJSYYq#tV~rc_w>Le~DK9XR=-^ZW!M-1QVZOK0L{KGyam3_4kTIdhXJ` z)*9NEKjj@{C~w~)d|xr2lWl!6;RfxelUlDVMD9c5(#IEH)+g6kpXlSm6ZR(s+FGo? zc_`xRPk#z%XVJvAPw=bYLwG5kVb;pW^&);gN077DKkIzjH!sTT;kUZRyU=Z&wP+dNKKR4%dV3F8!~Dm~=X!otU84M% zAMZ;2+wjfmJKOT_!Fp#W^OvrFErRhYfc_x!clRgnxfD=(t!?a`~9c*aZ3KhEU$upz(0X^&f{tN#77DB&086J|2KEsIgUzN)@zpJqJI z?`-`>lBka`GX5qn^NCDPj5q%2zxw>Sp84~zp}ovWNqhqxt>3Z~?RiN*@vU)-t$e)t zvp#TSy(#0b+n4g@qZ0afU+5|QnHPuk@># ziw_|)tXSJiN7k!L&p6t~zXi;n9?YM|r2PC2-s$G0e27)Q00VtsBDH^50bjsX@zEXV zFNX4-*@yD`q2ha&i2XxZHU9oY?)Ov4YcLRm{;}BRm(}}Nzw=W5Ri}jbbC7-?KDO~t zrl&UKJgew}twU-vB5-!T<(Kg#2lL4GPb>yh6H-zc-n z(Jye{qA?6v%XcW_%UnZy*`I^>EKauhvHl9ix7lia@uemCKjCEkjZ+ccF!bXcmHvh0 z%n!3H*59WP@?FeNfo)xE>+QN(9kk)?Tx{*5BK{rtppQkSX9eY#vXP7SUh>1Z--t@? zVry?XkmqT@`cleMdD5R3JKDsU_|c@Vh#H?e zkD)ylRlL_y;^(6v`g}bgJ?(jgqiuchq!sP)oRe+;JQM#Re58eJTH~*uiTvz#)L153 z`P{6pK2uoTWd7!SVm)y}^@nrt0VkcU-=q`ey>}Splf`0d-^Ie|f8L63_K@}UTs8i8 zM()gc!8sZKP~Vp+66$ zy@lqo*xGwO#&?hYj@mpK{w(%?k%swe8SAGS8PxjUrJs04Sgi;13}tI;JDN3mk3Z;e)ia z=UI-n`K(ZP<{(!^NI1dz&wj> zJs1IhZ;r)Q{$k&k-osIAiht35N9SgLlHAGG-nQ}lUKAgQC?@OcGeCa z3;pdOomr+QQ!xF9k7?=ai_b#NN3?YPZWr)hL7mq1^q2LFhcdiK8dm65wf^zo`K@?9Xd=F0Ci>$C~Kln@Re^mOPBR|bYQS|UVn=zknSNogt@F{nw^UHSdK77>8UFO$6 zhWs>8_1}Q@`gxQ(FPy!X^AbML^jhkF=YF(Nkc(~oKc4V>cRAVik2R>D7}onwWOOdbdA z8ugJ}<|iPVgXY#%?av+9pQPu*L%KYKj65~;F9*i+r*qW$Jty*5y61%ny41*jkSDSbZn!d4P})XZ?Ds zJ?Hf@{zBy$4;m=BZ$A24YA4(I%KNtTAH#ew2YDEW11wXl?Iqh{)&pbJ{2$Yf^@Bmb z%Ou9@MrytO2i~`_TJMieL4WS*Xj>1}WPcO(UF{!S>aiZ2?qo}^n5VqF9BupAN{?9| z8OnDM`cX5SZ0G-tXxLK&Xe(SE*S2gX{R~;CrLrbtero6;>_Ndgf*Kun7+=l+S(9l0$vCx|8r}p!{@bM*`wIwpV zM$D&M*?;TprOR~Yj}UbpR_qDk8|qI(zA{+NUs3QuZPb2$4@q0gd1N%{6XkiH@o?r> zb$;SglJT^RTF-j1zwjxf_Ct?ZFArh;KhT=LWZGinzJ=BP=oEaQui6h;Lg?>?^Ph=~ z`&)B!UnTpai;IKi!TpgaF;RYlnScD&sr#N}9TViUg+!n*NOJKll<#^t10xK z=IVUlG<;YKRbC(JFdj`-^(($VX6)~E>T(`EMfJ}hH^z59jNDt+hkI?ti&3gQe0o9q zU90M+G4i>i)p$D)zVjHx=X7?^ejCQqdC2|7s`H4KrD(7IYCe68+-IB`Pn`o9Un3M> z!;kSPK-+=<(b=VeReY`^E)4c7V861|K0IlqoT@B^47GM%W6M36}e9(C4U3| z-M~+$e&P)EQ_i39yM-EW`lrGF1l9jjmZm-{tNHpJ9ryE4mHy($W2^AytSs+l@Sm$X z+18)+Yti2Mz%0$)+Fl029|%|JTLeFIxC*~+L;BZBwVphNeB}rg{>@8XhR@yqIbu3b|0uT>niHL`q@A45N*THkf=N_^aB=Rt%mD`it-gPePpFV2;cOCyvhpYA3Fy!U> zs{MAGm-L7AsyqsNaNjq{(N_-+xHYMRnh}1;w?s9Bpz7=Y{8o zI$FPvFY13g_pf1w_Hm&j{n4<0e#u76`L(()xybu^*HWtW%&bIS{ln%_Vd^}O#k6kCczyVAGvVLmwzETVyHq_hWMEnO`ceb4` z_IyQsZd2>gg>NX|18Tf2A=cBZ?~}{&xw47%#R+wuv8o#Wdpp_A%Tf^k(>_kN{o9PS zj2C>Mc8BzzmGL)Ec{N^q$;9})-_dsdRIMuUvtGF*^%vz}d~!-SFBkQDg{16b=1fpU*-mr~}-O>GfYag7bt5%Ky6n_Cvv{ zJ?v+{Fx;^J8a$5jF3bEU%X=>2g}qewCu3>vL9};${8>i$r~TFa;BWlQL*_GQ!+hM2ejn$p*3;*iPYzsD>-C`(SwDoR^6kudlCPVK?fm>;8qzb7`+b?- zYv@;Lqx3B|nNK^b^rkMz_?}VChoQ*Jby4RNJ&=cHQvEaOEb6C=S|7!t@7q<~XS{lb z+>pP?_;2K;#_MqW59zA>k9KAJ<@tm9vOR1;-#4?Ge|B;IbIq_nia{QbMae(F2YIXg zX3ekor~dW&sC%3BMkwRI98dp6?tE6A$3!Cc>dXD6l%ESC{R>rjonZX+GxYy6_;)*p zf2r?H!iOwU>%BU^n9mIR@!3A~m#2aj$C%x)F+tZR?&I9+$@ZKVynn8^ZQTJJIa6Y1s7i)hq-jP0izPcym zlbo&J=(o;yt4RM6b)T7Q0psTowSN7C{MJ%>muz3#kyjb2^mB8bbpC_d?-yhKytT|? zJ1>t#zu7R0?L0h9TgE@beAXBJh!qxXpbS6l6~>Qd>iLnW@Hv_*ez`a2S$ElQl4X(p z#GX7)!;jXSljSiI-h<~ebiR2a`WNSQ(~yhtMw~;G%c%Bq>l!m2Y*6c=q)pH_@PC(5 zep4;B^N78_Igd1y=Ugx5qvMXY@o+{B+Q+}Df0e7u_|idzAF`M6?~@uo#YU{vASIuJ zezT5>ADfx=Di6aPljV~kfcDi&%`bT~kzT|3yYD}=?+Z@0^!9aU{_CywyM=PmehlXc zFBy;HoOzy4mfwpu^fyEO6v#;W*Q@iDA5Up-^mkf>wftAKgE#2c?@xI;ah@jSXW(Pu z_4;s#W;{=&=EDq}mv80A4)yiN$;r$YP1Sf>cna;anc|;kXT8i1Tf8QGQJ+&g6W={` zpYBUV$Ni($3ui3sKMnhb=jc1!SNrK@oImv&q|#H0_j#SOxoAAXWzAp3VCws(+TXN% z06$IXk9be}G@L&^`@(vG@ksA)rI^p>zE$&W)kmC?!x++{E;lHe%Ms{Yb!OMdb?Bqy;OVX zjQ-jOsywE$eptx*K~LXb@;hX|Za*@~DUWH*%f-zMuB#!2Bn}d&PNIh++KKSYM<(t?F+G`>Ws8 zRR0cT|2uW6+K(2`$$aCY+RHuUVbhfU^e42>#cKVs7e02Wvu*!yjfT^?lsYf)q&*xk zw1*BI&^PqA16ioA8*05>ts~`YSYIykC4G6+d>l!8iRbw}nt`?dxEA4jB(EB8vNIld zXli}dEH~qan`$5F8_?dCI@#{q!r^}#_|@>=mnr$5cC0TA>3zfgpvH7Z+y3l9d)7|| z`8WFOcfu5MtNCs&@*G2y{208)P*s0(XeWW`)%jJ2qV%7Fs{S|e{M+whj32T+)#ki8PYD;> z^OTp-pIS(Tm%TUbiythZ8Cv^m#+KCIVJF-EtpNR{Kw4FQ^{X%*bW!D7vMKB3E$TdQ zGxFkHl{^OCx0~Yge_}m)O!enB@LM_5IVR(;_MZM|7|)t*WxVBi9dDVRPO-#ixE~qO ziT-YAZ#U5QJFE1YbYnmCL_IHllJ@pJziMxOlt)Yf#jikr=TkL4IPOCJnDwI^56a<`dcnU_>!*RlH}{zeKbC>%)&}+b zQNH4g&&8Z<_s{DaGCr+R_1T5>{%`v0FPT2eA=-}{`v;l+9U;`Op}lw9A^NjAPYh>& zcbFgUeklE)Jji&TOzmerxSIR#6ZDtzaeJ2K*7nnudJ0LW#@j*Bv>y*Oew};Geajxj z&tiNXvc%E$yiq+L`qLxzd}X%ltk2r1^VU|B_h>_T`<Wgc?gfit)4&6 zdC2=ynlgRW^U|JDD*32*@?+?)132&XVSmXqVl8jSx2&HIs{LBYBcvyt;vM!e{&Ako z5w%r6S0L??^HPQZD?bT7X1p5Tj)-9^51qUs-Cv-3Q= zVSWvGM}OhGP*3lGM6^eq2kI;1FYt!_Pn7CU4d4&#RnNE7O~!hmAG#i&SiZmS)R@4U!z@l-cjx+lE*M!Hc*Npx;x2&V}{8gmyFzd}c+;`&5TD~($?^@OadVhJ! z`O|Pi`8ctD86K(jr*kO(5JUL~A+Iq>$%ivOd6ZD?BNjdmUT;4~uQ1=VP~~}n^N5s& z^N4?1GhT8i<0{Lm`W(u;ms(Hlf}b0z__?3yudI*s`b(RZ_B4a@G?~8#==bZb`rkx< z`e#ekKi9CoIJ{Nu=T^}EBU3nOL~kvhFUcIV&xZ4kDAs4KC?CDN`jn#l%d7mKhL5VC z_`YkX&o!Ly%K9!t`nHljy+58n-gzxQPAlbq;p5k-=Z&g}`Qg2KKBUxI>SF`@Us*m; z$eYEh`D(&^_(AIY^l}TzPzEu?C;Q)1>aQHF#qyOmbFU4ls&q?L~82Uk6s(Q-${RrPVj6_TR zT_Wz|zo_zU#`#p#SH-s^zs}@0xlDhH^7QY9>iqCFe2vCVw)aU!lAgnc^c;A}dXn{n z-ru>Z(KhB(_j$v3-zMm%dLHZrybsTdoRjfwZpnJ>sFU?~Fh%*cOvQW`&hvJpUyLVf z3Nij?Q};7j3e*0wtM=L`J@eNJ^?Y7x^jrBTc?RY;2gCgKei7s4Xp8OrteS)um_z9g z!+%g##fPFFI?7^uUttGDySAXCwnC=gbwBOnw^~nCw7ajKM8Ots>!_`xTtxng!!K;* zsPPJdl@I;PdCfCb{;#u8zc(Fi_ekIDExgt0zLrtVj(vYzO^K)vtR|6lsI zo02bwcT1t#!x+L3x~BS14&wKEL}Fxn*&en#%|S;^atC%skFdD|%TM~ABP z?MCiUP04qzr#|+m_y(b!X`@*2+^wQ~#*BE9pG=iz+mluU2gtc>UUu&Ta( zTuOb!?N!gORpvbUv|&7(%6ysFkiX~*tmio&gIL4&BD|>gD!h!mK>db?C|P1#+n9wkr0`m06_wf-8y`@JcvD?W_%T`c!i`gpsA z{eoXd=6jjGc5NsxLw|~#Mtf|n&IcdS{(Pw)cUgY3ThU(5&|jqg3ar0Jvn-K5qt%MQ?9oz;A@_Z#C6_wRapE=BqH7|Lhp zMCOM&v6^~X#DpOoj{&KuUF&qzv{$`6E5jGKtJ?eE-|V*x`~O{A=s%~_d0cc} z`dcbBeqR#vt2_N)rYH6Z?Qyv}|7@Lw_Hmi#vE_I&hKfH;_P zYtGqw?X}l$?f2shCd~GG8_Lr(R-QJXKQZTCtRLy|!rvHC*9Qr%>ID4gT|L@^_^AqNLcM|3Q8CKpucLe!i{`ZN`qI_1E>o-l4uz#GD zhyTX={p|gfTVZd^7aI6IT!G&|M*1)*`Qtj+Gr;Wm*HX0C7_4-T(fYH$YlVN^Z=Ro> zya@O_V9wv4buY^E8gst(>ImvXChwbgf8&kj`s9bq7<}zlWm**m~XFkE48_ zZ|=w0^BeTX1Ll1Wf7(v|W0vpG^XM;joBQFP{{ZTX+2(rC60E;&W&M*mAB6qpnD3(> zfqqL({d)2KjJf9fmtddb7fgNEvz!}qP{Wun<5nI>Phs5`Uk?vflx?<;=S>!Tuc^@`BK zXji;{AewCK4)^y*`$g#5NO#K$kZYPu3nK;4RH$3P+p#RozIi4Y@ zj3frzyQ8^kMxbUSOU=MEJyK_PBCN<-C*pM4#To2~g_F7I#1JQz9j9JTM;)aDQ9A6R zEQINEQz2v5%2#B2#BZ$%kp$SrFmWbxEeR!VBx0)e&Q?ZMKv!IbtLjuMVn~6hl$vBo zi8xCENFJnA$JJ8mq^8v&k6Eg=$x=p)w>(wb2)I zC$R$c`n9bCfx1v=AeKz547G7vQAR^l}%*;XxVYUy0pvkA!6$^uN* zH+Fg%b-LcVcvnnmE<+ltO`UVV0F|A8R`=iMf(PlKJvOC_6etmBUJ@k&4{v;Wd?EwmW>p)ed(K>WTd%Ff=p=dIh zNVd%F;GKCbZZ;6Por6GiXg)gYNPi#{9$eLY%{27?dk3M$Dr$v#6Um-%cPNfQMi|}t zIW1_aTC1s3Asa0;7FE`AX7oPnz_V&Vl5#6Ci*eqpg4vBG)K%(+?l5(3Dm9I&rdqS5 zx48E8AUf4^P)8djpVfsVb*e5ktG4G-tJMCUC6~FpN4eFGax>m)Be+&+2HVz=wAEWk zQcw5&Dq~E;^Q-Dkx59=t)rBF>GHy`{Rt-IL!3H|48dA&E2dp8kaBjCYODd~+)lzG9 zaXL|t@F0{~Q0u?LCbCSRs8ksPY+Z<5m)p=StJh76$>Ph0e^tiVR+nj%VcQ&A3o+@a zl$+HN${qo{*^-^zPt~%jEZkHwnI+WKPHwKWHmX|AA(%a8-L$#E4I|{8vFcE3EsTgd zRi;&y)%(hVMVUjrp0FxK`6Q)Ccy2YzEkYQw+*W0cG%U{Mls=c4lX`y*W~}1X{q>P_(BHb5qG^A7;xE$#7S+ zSzH}n5e_AiU7-jcRoB`P?#A2}&6l~$L1I#l zD^6D8IMFf%XJKhMx?hl+THIGDB7`ni8e7Cf=R9=(;Auv z0v)l;Yvi~+FH!jfb`IN}=#4suNOZfsm!9Dk$i-F$ZW2;By_2(MK-L215!NLrO>~n? zkHN0i%Ss>+n$yeY(P^sO=sIK&cbZ7oCfB(b=W?ypFP02CYZgH*H8T=PI%*X`GZ1qQ5f67!_IQ!0iQr+gbIm|O5RanCh6Up@ zqp?%e_i<`>(1zo`xQ=D59M);s=2f~LdmtI^?e9zUM^%QgTO!fh&p;&xeUWJYKr*qK zT0GeVJAoHTU=_-&GOF=gm8_#7qutRST9)w?DBv85nttaHtD~WiwGF9`uEun?OMQ+o zp%8hfW@RcmG9Fy7mXw!^1?nL40gi?SIZuQA-Eb3 zRZipF5bBZ~Q$xeipN)16J#>Xzi)qDMi&Z&4M zQ8vQ;&LsnLM6qxmypy^#tSr*x5xqcOUDC+JcMg|W>Gs~emBrG9vy1je(KA9`T)Trj zpwY@vsjQVRRQ{xmq4bpx?+uxekV~7J38rIR*PD=w*65TwA>1#Q>_hshc{~z#2|f_* z3&>i;QK%luIb~TDgaS$DhOl1-V-B3v<))=&S}1i)%hE^$XrSVx{y<_N?A)UpSl#Db zD6Ws9vyff%YvohdvaiRr7JfZ$9;H8mvOD0UWy|0|lju%jvChF72-M*Gy4Cr$Q|Al9 zgATE0MV&Nj8tlQI5E>BI$6@Ps)KeHV%ONPn$oPpNI$mJXYJf%(w9g?Kjt}5Lb8`)C zUukZ^n_4X(dK}SkPy1jef@8`-lRK4KobD(bE;FzV(zNpcCY`G>T5gt8+tiJkift|! zZc&Y@57PCx=EQzR$;_ZVlHuIcH3J6fGy2ARyELs;Ig~x8W@Gfd#ML0k@S;U9M_%G7 z5=4(KlEU!0I2kFJfstzsrJTT8mJy>42!#L-fR4A&1cn@Epo7)Y;p}V;JTltoydt;q z$niWQQ^T22yED(oK^{4hXXGYan z$}GQ)sjxs5e=N&jcAf1_p3!B_5t!R8u@eYq@|>BjWH@)C!cK6!e*q>0qq#jzy9hLY zsBC`iA`rBmb+_y8*$YQkKDvVXei(;(g$GN9nOL49!5uw^1zfFzeSL}KKs3^ljIM}B zSJDtEx8rA*FTLpu8QmhjnGgfOYvzO&^v+AbuzF#sr{-jSA~J|gd%cO?PTJdOG#OO^ z_OcUOo8a{QRguj5z*P2r1(gi*6^3USxjo(HbDkb%dSK3Wywvns+>3pHQZv>t*35w* ztsT?zge_L9Xa}dI{glCEqmhlJoUm@BS?hU=7wH2ts#)<%+10HIsYd#_B`!;@&FESt zJU<-6jBanDJ>G9B5Ww_#=CO??s;bCLx217r0|rAa$;2}1N@`3- zmINCUJ(vrNvcxKpnEq6ikSYmGGdtPS6o|PgPwFf|7sktQCUyD~l_L4)G$Y>0rNWVjnSM2X<&id2a`I{^+BNMe2hWfas@($AxCN+n z4)C!k`Z{QSOgjVuNZEL-GDrkjk3>|y;bIU&sIz0V)5e=7?8>KZ9#gB@@vK6R%xYz+@ne|Ax|pdgQuEeTrEHgOEF$11h#Y*rg` zDzZ$di&C^aE9YF9PL*=eRz6j!)ZT(AEH+zjx7^%FeN9u8iz4wP4hfk`Ozn^l%+os7 zG_j_I2YiQs?dprBhA!UGfAla;wN^)Wyl-B-mnm9rJZgqQ<~HiA6%A}xbMhl}Ji;(m z#x3foD}pB~^V@iYqn{e`9kY)>G~L*`x~Ip~Bv3`F7|(TmEJvy^sbD|U;E`P%KwBH9 zJs;7T?RzHX@J6P- zLx0rV=@2i%2RPOErAk+24JZGQLY)i{fnOwha6Awv?=nsr2(ZU0HJq~1$plqLI=yGZ zF&`w=F&}Xno|{>iip3H*Bqr;8e?KT7?4D`s{?Errx!*EIj3nwXUO6 z_t;3^sfTQYnd*eHo>rYh81%9+AC_^dTKQWANV_Vh=)gM3?nZS@eN5w!X^@0r9w{OIt%fy-IcRTurQ#=f=_PG>w3S z-=bcqCf70L-mD>aU0i#j$~eq!(-(Czsyua<+m2y5ijL#yFI35?&~vo2+=_165o=+z zH~p;??PSX3J-AtfmG7zA16A_coI%!sb@+FnY7quEjY&F)7!PA_k8`KeNYP2FZp#!w z4gIw1a$-xFG`Y3g=45eAxO?(W{YxBwF(IqQFO|VmuHfKvJ>f1aQ4aF))wIkrmHhnO zx+l!0^{Vkp6RXuwSYfi=3@NK%89SiYU}kLVzVA^e1rIchSwjwKlUpa_Ng+<4lHAsT zNK7PKw~5a;Ckl z#`J?Gz$@Am+eF)l{ie~5uhMHzR_g=;J?3K-5Ynqm!{`@uIOTn?yR{Evy5wLFPp1ld zmNU<76Oxr0zxJrvae;@+bneb0U<9v!mYF?T-)qRjU z5bxt7)q3NRN^_eE!A)^J3C9AIQN)|NPD`Ff4nhH^(YzVTX*f>@^UZ=+LqAo9p@l;S z57wkOokJX9+6B;=Q2z8z)>9+UlEm+2Mr)i+g4So z-g~vPvv2mru+`X1s#gA1iMA>HPD>uTY$xpcXc2G`_D|wsj7$R;2(ZU_=z{j#Q5~L^ zi&CqpYiV%-Oz6pHN*B=ln-3nFwMkmLqjJJ|aBj+Jv}syC zGvT*FpZ|a^BhtCLWWgLgAQ~Cu+k>WJt2Z8~ih`x5_LwR&kni&}F+O-#FNgLpvEA0+lkNe5%qUUvMag+JwHb zq&H0_K+n9&cg?1jfOA%P%U#oted=#b>Pff_o8qV=$$0XX2=Bv%yCyQV{q$QCEJxIQ z0gl~Q<>30Cwi*FHz3S=Y?St)drOni|9_I(;Bb9U;SH@#Um@|nuqMQlkRNorf(Q>_m zu`;rp1|g;_r-4ZSU{5m+yt9s4H9gYGlBGN5pb%M5T(BGuhCOw@1e}<%5`*Fy(#m2v9c6TK(l^yG$^K%V}M7Lg$=Hfua zw9HFdyrP#q4|j(^nm*_G}HU5)88nMerCr;xj;o*S*fb%v%?|>p2KQ#aV?Fa0a45K z7FO3c;9YK|r?8q_TtnwsUvFV`J9yGTvoFT)9(2&{Ah$7PyQSK`m7c}KdvhRN zvbVsxWN$8{OZFDnu(Ky8GVJUruwiFUZe-ZmQ((i+o*c=rv!}pPa<4~6l9Ig!)+Kv$ zBwez%z`A5_j-*TW7Fd_;&5?A;-U1tT_T)&0ojnCM?Ci;r3_E)YY}nb8BN=w~6xgK$ zgG0vH&R*3N$}pKOOug~Qu@7SeVgihv2V4Lqip>*3IbBAhvCIgIM!KR+nbXhI^UnyY z)8ElmA?CsZ?w-y=YVOpBQiBD!^HwuQt_FBFN)4n`4;si#xKSL1%GiOMA)sy2)!y?R z=CFkF{*s&=TI74s2R_;iSdGg(%Sl%cI}fLMRkGxepAw<~&1`B8XlH+CM&3AW z*YnCa6B1m6?QOK0VVfvzvE_km5;nd~<2hkmK!dv~RQaUQu=W;ylshU@navR90W7+E zqbb@Q9_$UX+4)Q%3RLj`*7JMrQ>@jE-#|O_mr$9 zePNt#qjSZ*7jsE=9$Rm`RP0>NF?~`VG_eyEs5IYuDggbfd+cP@>F>N~h&!;ogSs!K4Xxpo%~D zsu|Sh=auXWkUH^vmCc<7&$hzs#9&vfF-~WB`+4U0bFZ1hJ=U~a#@5A+JaO5ol}V+J z^Q=1^0+mvVeCky{D5p4L>Xe$hy-h7znE3+i@!O>x((|C+pfT5zsrIJLHlAsk8FN0m zp4O7&isig;Uw60zrNhkqw9GTFS&su3celxi^^$QG*u4<6v|h8&)W3#)s#I4^yk&b& zYX@#UWYxq}?SV?1YHbCJ`Cd$6q~h)XMo4!fv?ki4#@YiKvs(E(552u9t|j6@Z!Ms^ z+To6lLF|&mkxCQ6K&AQAYYx1CYfAOvf@Wykw1gcUM!ylp4by_7ZJ(c$?*J`<*7~U~=cV$7;_SwyJ+Ueq zzG<0fUX2uYuu{7RGnqPgu$39u-C%S%kNR3bc^wdYrOs;3X$_m|6YF%kg zn4F1wPFXzy(V538(H^pA3h{~rQ zygb?S1J}scIzXUM*_j=i`K%ZeivcxJimE z%U*g{)l1RM67JoeOe37Yb%zhN!$5R4rg5;b=3}_9fp7H>Ow&FUWO=RU(>egPG*%w+ zw_>0%(akTptMAYsc{ihb-ZH7UV;zwlFB4!~4Md#*uiBux%Ie7E-lXQLuc3o?ZQb#W zpar38Ef*Qv2hA)7>T+Is&8~;pqa4{E!{)hpVGLeb1@$IWpqAjxs^P)V$?(cXt-a%F zS4G%!Wr5Jz1RcLhD5dy&IMJQz%1)iX1fM|bncM_zMAr!K8-;h}jm67k{` zW>_!z>5Zt$04?zu&y{5WnZSSXd>x$2^_G_bnqM}}UDp|xPdr;2^Wm z$xLTj=9yjgtr+4L?-0@ov$|{r7&i%ovB%PK ziE9-tPO1Z`>_<9hZgO)UE7;2RtbH=RVr~?@AxpEN>1@8Tvp-r04`XGiq$$j!YO}(a z9ne%WT_R8eVeE}u{LI*k7G69QWUC0z>S)VfIU3QHbYl%nd)bU_?ymlx6U(C>EzXCJ z>El4EASz!&<>5t5x}p!u@hz6Ci76fPg-W``lkvl7gR#XyIjGB5755f}RHVgUT_9SCgdysa{)A1H;DK`DJijF|!255Ggeez<6T#=sK2Oo%O z9AxU%(I;c@M1`LUBx{YTeK){^s~gF>Lq;1L7WY!$76-`4zv7*kAn0QTl^-Ti1W{!{ zYasEj#TS)lH!%z4YIjoZW4Ly8CT&K+dcrcRW|gYBge<^k ztu4OnqCHZnta1i-;0=lSC{$%)owr6bX7PZcmQ5t&#UHi6)o)I#yc9!P-V{srqky1? zi2F3Pe@!Ln(?94DX9rM0i{ha|*S+tdOu?J(G&4D~6CF|2xS4cU1W@U&Qry)6dg#uO zR{B0DA94oRV^vZxlZWCcg)EBV=5gyv2X5^`qQ(mw`KsKw`*RPzh?cV}2ls0kPgRMH zG9~@IOlI6w*P}gACE@NrdSovi)00T{VWlw9wYu5Zjz`PKRs6Y131vRD_Q2?l(4tuG z%z?WTSu}DluUu0vvjp-smc~R9-$k=}QjPVED%Z#${&UTORgf1LtT7f^%1>R+OMBH? z+=qiX(md&xjKXA?Hg3Yb@0N`QwSCrCD&3UM;$Hr;Bzw|xb+Re3lI<0$>(iciRZFUB zjuSm7X{6}xsk$njoOE?^KHcsYi+2uS=#B=*JUxo?Srve++yGuPiJ=_2Nm;jCx_h-M zNspi`W*yC1=)TzVRmYPCd^L9mn`E;IAePzMtXJ=JjxAcbm&z<{y;OlK(N#+J$N0DJre&Ua&8~O1 zYe`@TgfWD1WGYab@ks5J*CaD8pO@Dg$>-iLlD+exj})cbfw~gCi!3LS0#mc!sdQ%b z8D{T1^z5;u-`mlRZ#VaLX`AYq8`k6Yk*v3>H2H_9Xv#46yAs*Cgk0K-L$gLKIl2SI zc&tjt6>d!6*$!0$^|^0ViFm;kiz?08-+c*Gnom`7U6t2%QRK|t>U%)6=PJRfzObd* z!+aTj=i508Cp+dvd%Ff=CP33N&s2K2`&ziKFPV_v6KQHOr3R|_bCqOO4kPx-WzM4+ z$ES=!pE;Ff9G}X%|H9}9z=!Eu1m4e{C6bBWL_CPwjQ&Kp zdoNTa<*ryezDN>5Rh(5bR`KU5rCq7Tip+CCSF8VUiQ%Nq>(WTXRr zvlq;2iuQLT<9%463R!^hB zs=ka}Zsb=~waK_dQZCbLg_BO?XC)A*%L%Ob8jx*5*w3wh*52NYtof!b;4+GL8|zVK zCBu=pzRa6vYGzdLsp?gqq6MFHL4D`!XQ=Tf%8iUJB?f8T|LIZ1XX#Xoe4&; zJcJuHuUE&{!h83`{;+lzuvht7_&nreKgN>z+|RtG1bc-7E6K3hG-h7$+Hp(5@;4C^ zo659}oOz|%fiGE(D$(9{jB}iOA~^*)gzxHNm)FcbEbb0!Uxm&~S!r6`XY+~}qwONt z%+PZUKEz;L6PcHUc~!Gl_gl^*<`umw+|v`5i*?wrqJ2j&ulW5ddPeP#z1`6j`sO{a zY1-U9VYVgFbO3Mlw&RQ&O8HWvV7c-IH`k zJfg2aR#8&2`puE71tH0{UoGSV_xicf)pE*Dj@S9P9s62ydaNE)%k8E+M6&IrJH)|c z9PgwDu*ec^MYlQJ6~#D=`sVWMH->@u`ar+pt7riQh<|FeszV;o#Y5|&4l^Nf|yHfxV2mSX|S z&6TByQSqtE3FEZ@sEw-iMlci}Oy$WNd@-ZQuGwm>(I)wtq%>Jm)ao;Q zRAbQ9OS!X4j!tMKsamP>7*W=NYVT0xD39!*4nc<5ceh8KImgN{7jGH?4gzym$N~Wt zbhNN_<_{Z7HE-lBaUH<3%bRxqJKrpT7FF}bP;Z{{DyRcgx45(yx$fenmC22jPdn4B zedir>QfywsXj!M3vNPduz*f$GyaoM_g(!qh9S}w8p`4aOrDOhWM<#orNM@GKZUCgL zy;K71h7>LW@@v1$gpF>KC%-hK<>zB&Ja;df6~&gic!vq2(dxN(YHXu=>DxF&cJJc2 zMRx4z*hiMfx%DvxnzX7KyP({6Ra2(SdF(`Z>H*nCHabFc@GaxaPEG1q*y}7$SIMrB z7Rk96WSeq+ogrH>9?F4jTrbtMZDcPc$u_cHoKW?$VI8b)9lp_5g`28IY2OZQ^$Yhx z8cR|3kk%W4S>J=T&*(*36{Fsw937iuQnm6f<1AbI82adC$k`!Bqdodct@Zi0N5*P^ zygWWY_SF zFRFSm9YyiXdm>6VlHu@*`Fa;LdC{Y>Lasl1nzop)FN^5n?js;&gW(y=T>>7e#Q8G7`7!-M32B-;Y+R+EB_txFXBuus+Au$|Xll z1YOoj>*V;hz5X3tOweN0xm$kIA-klw$H|?va)@Iyv8TmYB7GEB7Rke#=7d(g@klds z)<-5#wV4b1_z|dD6ixOt(i$qh(UQB%k*C!@81IfOz|GINzBSmLSlO}~J2v8cf!K28 zA*~Qw+E{J1OW#bOFa1_k+elSvulQs;Wk}8s&!KbR?YPd!##1$W*`%ggVc}V)3u|X} zC)&f^3zD>PP+m!6+Po^D44}#bbq}jTihF6YlbfHQohasZL-S5>O3V6Im5^#*Hg2&{ zodR@A+RkY;lae~jD}rI1-)@(?{tcC@YV5+O6o+|Ky5oZ!xF6iELR6{(2c0*Up;Vcr zhJ0_Mf{hliApWyDU2ZW_?2gH{F|(MiW?oj-+JjMMO@ zsitL~sU(uoMqQ)!N4w~@DXlP@DF&)7FOff;e=+Y+~3aW_YtxV)U648}H3KJ-fne zC&;FI5Q}%kx(4H+**cB)M4}y*OGN99M=AhwYuH?Y(cT<^+9L>T?bm8=%gwILHDhqc zm>hQ0W_Atg-H-&E!mt_s|2WnwQLJvAv{|S!U7_s*Cr?C$>glbN%`@ zN)H*=6Uo)5r#sY)Nul{>y;0RXAO5eOXLsw_nbGY;+ph3xQRDq`o+oowTP_4O&!3ig zu2>w?Vp_)udb~Qz=pd`xs;KR9OqV6wE?2e-i&}jBwPWmNGI@62pGC)e$M_O5hxmAJ z7vHALBk{%+4T%I!nHp59E9>;1%6d^1OICU;778=QOR`%)ld}B3K(#KsC5(YUzse$p zG6uyhC@3pg5#4li@xF{LLx_{#e zozW1jH-6ijO%LyEH$4!hYnOzRb9!joBVB`T`1|qvp!4rgef%ikOAd9zf3I)inkW0VG0n*Fh+-Lc7&b>By!;`5ods z!fW!wxPC1k`QF1z`TYY?c?FXf-93hNvh}P6Sw=L}U~lVht-u6ov!2_dYmF{5v@#mT z`SlLW0LeQkl=-qolepv`O{Rt$H?VD>bFf!GhBC0a&%Q(|l6@2LHTVYG`@^{V5Q$QS zU~?2k)`N=F`$rpFOFM?k$ChKOlWjeqN;1z#PR`VdX`T7#D0heKj>yT3X9u&Xhs@~2I+~`%R zmw-}Y@1R$aoil@=NkX<^na4OrBOjxNj zP1aGnjl)E7FI_cVxGXHAjH^K!Mngo$d6Bd{h2yNNOs_Y;I9@@?7(^!G0h3GwCrFy8 z{{TERs@0Sj{m7s8m82Hhxqq#NLGHYDueIRLy;e&YIZ&gKlzgfI?xwMmCFWj(vubp; zSmTNetG68J85@u6Q!qkU;%iD;` zXlH|G2eWDFGr^qf+pLc{8dn&cX5DF-2llo#VmND?70O0gBb1Y_Gq1TO6qrA+B@|dh zr)Bioq;ZA%xt5PA^e^Z=MPsq-Go=>hwK`D8_F6ZnV{2(^=Yx8p1F?i$+f+J`T7Z`P zn-Cy1Xh&NXSs4o8q9p2w+qkfU25&|{Mg#3~?>DK%n-L;4NlIH(j>Zb3i^@{P7^r*D zzSVO8Kuda5@h$0NbpPfQ7>$5z4MuN!eL5pA2ezh8vk!SimnG0Je^1`g^-7WE^VQXv za;u1Jf#~XraCbv^b+n&fZni@GT(2|Hd@JAE*P6=UXmi!i^%DpV$>g-gs$Ep0$)HU` zt=7}IE2WN;nPRkY5SLgs&7(S&bHnQxC$Dsz8f7`XpJ|HuFS{bb!^m>gE>{Sg$MZar zEKT`Fk|}wa|H?@z-$*j0{=RM+%F%$1wlLY&7dfeoCZe7L1xBkV|Mag`sbIPjKkRD|7G8GGGvQTLL4eO1sr%AQo=usj5Y8o>LTG4e z7cgMJiS38<`CI3i*GxP)G=uu4kT=g29uq#wvkSv_=!rOc5V|K>a-EFo?>r zrA#otcrrbFL9mw`6McSgv?pS~X|_2h-8T_mrk|wQZCsBJ!s09WD}mH6sWby95p|IxVa=68d$hIxcgoBsZVbP7Y&skKXTbWvrhErEOz@*G-|jbq22 z{8{>Ci`URnc7I>AV|jOUbUrcK$UNnRxOY@JiERgxo@hStWP14Ia9^Nmet1<=yt6YO zgN`PmOtBLmiqJk;;??K#is7k(&s6VVGL?J1R<}2(CjqR6w+Jw5buIwac~0laL4;d1 zj`Ub1u5FyoB-hcc+L4YKN%R?7v&YRI)XJok+nDBLK{vOiQFY5eat>`AGb~BFd7{mM zN-b0fcik#>Hybh=^)`6nN1L$ozNfExRkaCDAiy50va8CgcI-&|*A@+Bvi3R81W!bg z@vgzl4O3{VMOyM z{++G}?wY90J-wfkdpLEXKe}*{>DJT8(d9E$(!fLePYHxPQ^#FA!@FC^NY#^1(f56l z_>LNV^(~l8^vq85B)ZU-NNB5UTG_9vmtVfI38%`PDpMXbXUZ%}s8!KE!egjn3H25# zBF}om!En#9oSV_?5U9~lf$MKn2|9x6Y63c!B%P)unOKJT-i(`@ZdFxl8%U*(3WYMB4uJY=j!Ul1im^+JKk8bxMM%-rA^{m4bK0yxE6n*)YIrh zXD;dS99%=;S#iff9mU^>3_pK^BN(HMziv5MzewHiW3*xF+?wz`R zIWs5Ap|N(XH@LM~uT*)|08!@D=tfxqG@inv@FMF}s5>rMDtR%B7KQ2}cqjA8YRe1z z%pr@`EXdBIGRw^S&<4Uh}rTS;a{x{Plo$qaP-VY`!&`#Hin8vMr9aZ@0Ll`i$cCjjxpyu56WuGK zExj$6!tD>^*otAxK#lc{%7~f%hN^6!32VnUV?o?aX-&$D4UHJ4!u7_Z(P`@Hdxm%~ z+UdO`qh8L~wA%8*QBj*}8Rm3vS1hTh^zDl!u*0ZD{@WSuAGoG_KCaT~Z;Ij**z^^7 z=8To9_bSa*KG3O{j&@HwxAt(0gHCk<*V!?(>r`s%!8~g}8uc@(j5-(cv!G^Hs;eBb zBEx#kPaJD4qX0zDk*-$$*~-dKRjn#N`DSDk`S6s1cyAxh(4Yl1u0yNSpQwy%Q$`i{ zo?(&_U9$F2qfX_egwn9{wS`!HP*H7pp-NL#%oG)DR(c&VnwUq|pd)4}-k3JDys=y> z#_j<^PWjjDF=;{fRMD09M5oftq!gqRzdCVu6|R1xyOvZ_U@nT~x|dL|3z z(JjtA>f26;%Xt2@%v<%I6DJu)i)WD`y&@i=O32=0Gl}w*$DDp)PTH8n3I*)H&1sm& zlrER&^NAo+=v$q%{c5~T8u>+$sr1f~6_9(bWhOj_ma_F7HUzUr`FX|A(x7$yejEn# zo<=?~G$ayqt1C88dQPJ+pBO9+e8yD!va?5fY3bw;hy%0@~DN4 z$L143vs1JelL6Of?pgI}sJx@e)H+sU?_eIr$|JmnM0dDLF06RWMyL1idUcFRs02QR zi@n)cw2NNVpSY^(sv2QFjrVk+3*V9ZLccvP_eJJ&zPYcUUT?4BappTXx$orlr@4=N zx^tiDQ(Cz%WS2enad)%}o1Jr?cl8bCKI~2Oc4A|8?hD;<*^7v#8yOMX|LImhdRa-o zlLN#Ch`m0;^$<6ep7jqzIwnuf{Os!J2=!-zNerfnP!uy=VG#`+;O@H01rDyB7*^_L-| zC|7u%Pt?QhhbXZ|@cheNvS(42fMnAG4>_|$5@W1C%uK-F;>%Z`F2q=I%VeMI-jMD; zXYa?w<7Wur7q_zar-L`*Ywt@e?_bG@U(DVYiNpGPO7PP=zl-$8h`V)}zyI!Xbn=VD zX8rg5-<^X_YN7bP{(Inx+wi+UJcZx#f9w3>0x(57_3N}$r_bG~z4zax(WmhHyi0_5 znrO8UJBWTnh#{hLgxE#&D?+?T^Z_CE5Pd_4SBReJ6Z?q1&nFHNy~-z!68*SOyhikT zpBN^3k57yc{RwnLy60{8i6Wvqd}8~pesTVph2klq7^*!@6y4k%M3M6$q8}^}&k;q( za~Dx`<6k6-obMru4(%&MPZMGvQCRRG(cJ~&DA9`x#A`%Lg%~CZ1V@OT2}8kr=br_8 z5&gFU;U@|(ONpK>L>bX@gcwKkTp`MdjuWDS=y~uNqUQ^7DNz`H647!YrVzbQh)Sa4 zg{UEVkr4GnD}rO+?=<#B8DygqTP4Jwmh)ohZa2qL&J>l;~wbTt{?l zp=cxejY1J2dbtoWqGJj~H_`VB(MNQW5CcT75MmY4D}}g$=wu<*5S=2#O+?=(#LYyn z5@IdU_Y1L(Xr&Mvhz5k%NVH0bdx&BvzKLk95DyTYD#T`@bwWH$v|flUM5hVyDA5lH z@fgu~q1ZgO(H7uO z^rOI^=>9@6f#^cuPqY>I6a5dLm_l?B@F%($_!C_M{E2=H_!C_U{E2=X_!GSr_!Iqb zv6x5n6TqM7+Cs62=>7t+l<0N9pXjH6KhaML5s|-vKhcm7-9+1fKhZGoC)y7DiFN>g zq7mRvGz$EQb^?E*UBI7cS+Q70GzR>M#(_W4Wx${4a^O$28~79L0scgLfj`j%@F&^_ z{E0qMARZ(7X(6@|O#*+S{lK5-0PrU|2>gkz0RBW*0)L{bfIrdIDE~yS2mVAq1N@2J z0Q`x5xj-Bw`cJ^0Xn&!2jp%2AKhggO{E4ms{u@AV1pY)n2mFctsz8(y{XEJ)(VKuj z(JughqF)65ME@D(pXklNpXe>XpXirR{)v7W_!C_V{E7Yx@FzO9L`*08Rp3u_9q=c5 zEAS_J8_GY?^}wHKNwHW&bOXvi(c6JP(L01_Bl_zX5-u zn}I*kZv%g#4+4Lp525@MeHi!?eFXRu{SNRa`d#2pbPMn&x)t~nEhrJs5&a(UC;ENh zPxMjXPxJ@CpXd*PKhYnd{1bf)_!IrH5U&ya3GgTS@4%nvHsF6d=ud$^(Vqc-qCZFZ zC%PT@6YVG#E{E0pR{E0pZ{E0pV{E7Yo_!Ipl@F)5!;7{~v;7|0|z@O+)ftXJ8 zH^86hZ-GD29l)RHGr*td?|?ti|3LjubO`tp-3k1OJ`4Pb{vP-feGd2&{ZHUe^bf$F z=zpR76Ws;;i9QefiM{~*iT)Az6MYf*6WtB`iT(-r6a8-#6QX;7KhggI{zP8_{zP8} z{zP8^{zU%_{E7Yr_!Hd={E6-Z{zUf!f1(F~KhYHMCwdV06Fmg{i5>?2M312S6Fmz2 zi5>&~M2`c1qOSsfqOSpeqJIVcL|+H~MAN{Z=rHgn`ZwTD^zXp`4$u?8pXdniCwdb2 z6Fmj|iAIaWIHJNQ%8B}XqJn6FPfQ?M=o6O`E%J#;L_b|DrVzccP*f5v_K6yzB|cG4 zbc|0-C+hc!8AQkWL=(}|d}21y(|uwd(NdpiA$m@USVZ&;pIA!tET6cJXqiv65k1={ zB1A_DM2zS;KG99|T%YJ8I?g8sh^{XZtB9WG6E_e&-zU}(y}&1KB3kYfHxs?kC)N@j z?-T2Y9xE0bh+gCq8;Mr<#63hW_K8hI-{lhz5G^bgn~A>LCmtqxs7P!fI>9F%CHfwp zc#P;JKCzAHMD+iNUg{H15xvYOo+f&^PwXK2UY{5uI>{%VBYK5T>>`T(@{2^#=iNhe zich>k^!@0M60JmklxP6`QKD7oj}om$f0Sqq`lCc^eL{Q=|pD& zf1(Y*pJ*fSC)x!3i8cd&qCuZnM06JLC;CW{xQ=M3ShNwH4g87D@rfAGtARh!xxk<3 zJm63CbHJbIeBe*?8sJZK0q`f<0{n@76!;Te2>gk*`ospJi-14Th5~U9(Z#@@=n~*h z^kY7;ndnmBPxLdzVhho0fj`kt0Dq#_0e_;O1pY)n1^kJIfIrctMPdih1BGIUXdCb+ z8V3GEs|&=7MAsCFJw)4qKhX~0Pc#DjiAI4x(N5q`v7{D~%jKhZwmPxRBkpJ)>J6YU57Lu8Tb>u1^5&F67VPbW#CVAs8DPmx)%5o{TGyfqF(|2 zM869BiMFHs6I}=ViQeiHTZrBU{E4my{zNwbf1~bDM866AiGB~5&a?XC;B7cPxLY1PxQyA|B3zt_!Iqi z;7@cL@F)6H;7{~tz@O;dMPh*H&w)SD?ZBVt>>{y-=$s;P6VbA5}#&2i*V~pRz_|1&p#Q2SjU&r{H z8NY_{tH7_CwLNIqsGGfuF#bBmFJk;W#y2s3I^$~?KZWs^GQNWG;}~Dc_#(!Syk^?{ zHO3!g{40!qk@3$leh1^9V*ED7Kg#%r8UFy|?_vA~#;;}kO^m;R@dJ$SW_*P4*D-z( zJB0#=pq;=NP|(@lP>+8{;2k{KJfYfbsV*egoszGX5sU-@y0*#&5Q*o{1nDt%J>S#k7Il(YZ-qN<8NU60OPwEA7T7;j9jNiuiM;ZSx;~!xBJ&fPL__d6`iSaiu zet_}ajE^w>I>s+z{5-}tF@8GZYZyO;@s~2bg7M=RU&{C*osXekHS&gLf8t+b{6WUQ z!uS^%{~Y6YF#ajVZ)5zUjDMK%4>0~7#&2N!TE^eR_!}5M!1!*)M;Lz{;}dV*C)}pJx1a#y`gREsWpH_)U!8$oO@PznSrC7{7|~eT7= z<2N&Y6XQ2BejVd)X8an)uVQ>3<713(WBgLaw=jM-<7Y6wp7E88pTzhHj4x+=8RPwo z7mOc1!R*iYeT?73_+5-2V*JyL-_H2Q7{7(_n;E}}@f#Vxj`24$ehuSSF}{!SF~+ws zektQy7(biwGZ`XujBjK7QpUG1em3K0FutDgm5iUn_z8?JXM7ps z{frlkAO0J&KjZf?eh=e!F@A{gPcwcy;~!)E7RGO8{3gb4Wc)hD-^}5Q*o{1nDt%J>S# zk7Il(lnX?@$(qp#Q5oquVMTY#$U?#3dWCPd@19L7(asfXYO-H_4#P((ut{1D@xX8d->KgReijNi=oO^n~j_;rlGnel5Fzl!mFjE^zCjqytv-@^FWjGw{y zdd62WeiHHcU;PjK^ZoPwsp*EMNx?VLMTbh}P740d(vrC^E&b`f`WdIvgWJTW z{GTlmEq);;;LjT8z`^V|pFo_ci1XY(nQ;o|7G3?Brn{G(zV(gtWk2tm^#I<@hODty z`+snK)7ZJ^tolsTwLkV>efQFdTgS}(OjCL6jr50)l+5*A@|mXbeg2=m@`V!dMZ}qh zIKKn`OZ*kj_Wxj2Q}Nt_7{Y8VLfpcw8)x}{`tWB;#MKB}iLid8=lgNrETShL>znny z{YPi639j4MA|7qHYG}dHH|oS?Wqa;hR&=bg@Ur$lhrb!VEPP(uo@LSS#s=Tesr0E- zea*KTE*R>Fi^-{>pT8RVYU@7n{({sPU*kS;&TU7*VLE-dgHtDg`E>%#gZd` z*t=_3%ow+oem%8sM)0Tmj^2nTQk!l2-bg>OzcF6+gQDR2tw zQ>wrxrCw1sw1@O7_}67(;<|>3?JtM#4^IrA-qsN&4IfTT2$ubnQu@u|1ACFbMlmrM zyMIeTY{Aiju}#N|N*5gU`L~R}qv5IJZ%jGkkr{s|a;B{+Z#j zHHGU+){j~5Uw_*A({C-k{mk`etY17-kXqwgaJ1;mv)5g``GQ+7`tJF+o{>7!H*Wn| z-zi^z_CwDM&sg`&@a#3`-ge$?7p@=wwP%K>Z+~XEd(Sh&eS6+WPu+LccP{>RCA6A; zc--B0d&uX^W^we!HRCsmi5K4WSi=SD&tHGu`g7Nx+cs`J4IC*S0;mmu&XRq3iehHouYHbLfwI3E3A9-H+d= z(nYB^(huyb44z60aGUmtpL4vMj+X%Ig0Y12mhpu88IP0(x0l_~Ku8w>9ecDF0qyO+ zv)5g4>&2U~{`~C)NBw7B^j$AlpGwa-VqhIS^j2Wqbl{!E`j-daF0B8w-yQ422Q{n@ z9MrJ>%Rvq6GY_0ff9D8ceHyq2k65wR%69SpLfJl*o_*NB`W=azq5G%F7@IIW<@IHV4sq_yI6W-^5``%$I-o6P7URyJdP}l15CsHM{ber|~ zGsC58icp(p*W)cH*?!+S>n>V%(RV9kO@7vQsDzJOZ`R^XmRkH&ddVRJ;ZLQ!A?&-t zsrJ5h-#d%&iwE8wgioelKjMyXiH>lAj_|1i8p4CHR~JS_wW=;(IbZ^~kC89}ZRq zPo^uu4ISk8HXW}}w9VpduWhq9o43tnp4;YR%0PP6es4%S^}|l4R~&w4ksjXnb|GDM z*u5lA*smd7v0p>Fd_R@slj#|UPo?*y2ZaqR3asD$e%K8M46Hx5@2$Xk z&7pS|>w>op>+y%&vA%MjhV|w9G^{V#r(u2dp;PGt2MFsRxW638#`=u^6|BFwU&H!T zdhL<70_!gwd?&F!l`cKDTrX)-B+U z?$5^hOa<0&e;vL~A4{J~-|-fVrB9|eyrX03Q)%qme_O`VC(~D@+_C=fVGZjK9M-U& zdRW8yQz^{uU@UzyeJ!}Z>tku^VQaG|Vq)RLO~=QT8Z##}o~F5zGafm8LupW?){HU6 z)MHX(F3@ICz`d9H-=MQ-E=8nf;Y1Y8rWCGQG9*$nFv7lg^Ma$LXBMPx^j&Z(^}}hD zU5+^w=NXlgDCb6r-|!ZUvQMVhy`!USjB4Jd8I_aiDXudrr_$5qjLNBWy_``&4IpP! zPNuIra56oDQTEC75^%2{p*~x-51jXZ#s|KnkGxN%Z+ifh9dN#^^|y?tc{OcT zjrwwLz7{A)E%11rwZND3k@>0gI*%iB=l=Xx-qDdcJm77bdpVh&^d zVX@}S;Mi12fv@tkl&`R2>f-pOhG&Pxc6|0XCT_nyzWb0^_SoUST>tEF=@#GZyAS!6 z-5xt9{v<+#+J^D`-nJ+4=JSV&uOA=VG2i5$?wlOEsQ98YY9~Lo zb28Q`ezx;bS_fG0g)L_WrT*(kf8Sj*VrR#av6q{q|On)jRHi}7~+AS`7e{gf~+&#ZpX3kQ-x@^w{Rq^`Ti(-W*#=PpQ z+gU!;BJP^k_O*uKkngVg3CP*Hk@O`;#-@sWA0IB>bXt7paM8M*!~X3%k&B`rt+ree z7RUVJOYxW6=f>{~|0e#_u-NnzLcJ6|ZP`-@`4mE4(th=_j_{`T_V%ZSYt}q9T)AdB zRuX)r`YOQt+i4YGg1HJXH&!tD!QGIuhI(}`?0;~tW~EnQrGMF3Ev-~K`R1L~u+mp| z-h{p%&`zQB%dvV1)`iB2&u^I1# zdORIZVLpGzhu;O)|7mBTTp9Q+wEe}-)1|f-Pu{-sbZGm_o!cyHJO4ZL2`AH6>+=bx z(k*Yne8Ncjns;b@HUMxN7BbVj4=x&)+f`a3@0`mzY>34zqS=M|4^*7%4f2hB)P@iW&8~GN@GmNB{yrc6BC)2y$rm^Qp z`l##J^JH4cc?OIy~}>yWGzdT z1po8L6Qq|mLnVX-6#Tx+aJ5+Xko!E<)0XTcUj?1k@|vY zYumZ^E}8DvA%&j&b?^IwR|S8zcO?Dr!NOSI4@!d<6_1Twc=gR&E;^%V^7T#OHs6qc zOW#kQMEmylonj)bRTW*1T?gMTTwnZ1(fX&47nYnkx8Qc)9lu}dubw$P;X`q>&lQI! z1g`-7!~SCQNB%T~C#XG(H6HHS32#ZsExM$&6g6Or9U($^n3nO?Jx?ACj&yWQ;kt%})gp~7yt z{&w?WtG}%|Z1uNQhrRe)C!|>YZ6KBHZ^brtD|WTpD+kGLBk64!yS;+atJ&>Ohey({ z93i_+{kUPbVU*l6g5@ZsXT_+D{^_D~u-E3C=c#mW+Fu-e*VbvWtepm}$@be0Z@%y{ zy<`sm^f~Oy*q$o&eWhXSE^*asLt5C^4rz8ew0Cv+Z!S7*=e6Stc2YV0^WHW{{fm5? z-a8Hao@5mL4VKq#0!{5nnYf5U?lzG;lJ&@4F1O zD!Q60c8T`~cbCgBU;4o-aj z=HOYuIa{|L6QPd-smJ{@f+x~HIievo7D!!`LCRn22STq)gnoB03!#5*2SNov=wNBM z8Ex-GamD*~A6a_1{E6~kwfL)7#C+41w7m{HJ&ktZ&%j~F7{Z}!x0twixNuW(s&K4G z75Zm@_g6oU(J@*5Y0x7(f7IX~8o%{}v2)u#6Kier*ZHe{1}#MDaY)(@YaiHW+FGjz zN^NXinq});|G)NtRwu62+u)J(wU|Sjgc5NH{QtT`@c%{GcAjn88T}a3&Pmw$*|P7! z&g)?3b-@dQS8x3u?EFdC`3Y&~5?4Eq&$jc8MoBnW+TJ{Fm$BAYF{5e=2%(5Y6ic@HjIIkKDTE1@y_UfaUHwWZ>}v_I_w}&2uOLFQllhx#XiJCe(MI zm8o0E=Wi>!yQ~0x>%!FM$M{mdvuV``^gXm@1Ui$}j0#dS@qSj}&A~exCd)c#_15O& zi(4PZ-<=KO_!rms5MsfB8)Z339@20;9re-VOnpQxM`>*->ZI2sVmBSIB6bvIWjwTj zE@MasN?%KRR>7?e<9CTG=It&;*(pWY$!$GKW$RH|?VEOFr$2=q+^L)4|Ih9%NHr8R zyn6AN;?(B~#-#qav@}&(fxjvEn>QvkuawHzxtd&*u~L*VtyM9~TB$5+rLwFYMyqn# z(B!S>p{y-NS-U-UZ(C_qDO!}*c2fRD>PDow5g4yJoQ?4~8;r-fV2t^jBU+ieCsW7m z!>$hv`N3_SzrEwiRlY0bt z;+@hXuJz~hg?l^zO^@jejBEz}VH+H$Jhu9JNJx;o|)zU5)rd-TzP{{!p8D zmDe`XzpS>Pe$KOdHQ%U!Z#-%18yTsPRsv}msSio14?(K*jf>zLJsWN-2(4em^1c1-^Mr2&+?7z_F^RcgdBU1r2nqB7bntB z=wt5_>1{{wUXHzwU#yZ!iWjkIQSXT#^W+!z$MisQe;j1R5eJU{H8K zjQ6d3wyf|(!K-&R%sBpUYtS29BffFP*2mG~+b+=F9T*ez-PO>1^^myyyx{a8)=5tP zO;Akv(9q=A|6DZY*pz~zWB##Nc@Y!amd85U9*W)HmWsXHHaR{qygc3!{?FlZthtnL z|IgtH&z=2JhTgREM%lpV!adoJbcP**z?_`|kYpu;_a{bvM@d);$R_Y=i`ZHYgN82=R$@58;|>z*Ah-*VaR`;XNZ-G6LC zLD2_Wg1-sg5`0h0*A|XdOrP0SQNM7wtitypj0!6jqJ2Pp3##>v7ovTjp7FxriVGX@ z_wvp;!JWavyBi%>Q&0QPOWj;rKJ+oC@`tJ!=Ty@b9@;^O@zlFo)pyi7H1FOy_Ly&MN3if~kUF+FbxTRb)GyQeu%^ko8B(VV*V3R*MB3nD|W%*vY(8P|H~o&^$*30zxL&Zl67Nlqq7Rn zVAT^R&$d5vXzcalhCaF#-nQmO$brXge;l&D+V*tYY2jZ0S08GFpX`2hSv)pA_7qy! z!Us>JdsBaaHpk>T_c)ZbuWk8UY)r7|*7XhI=$0GBu^m) z;W2;7?l8(9%3|f(7us8gr=S#0dboA?(kZRO6SiH9C*!W8^7wJAw);^cOSeCXHT-MC zPr~m%+5YwRr5)uR$*sQkP|dS&pE%3k)UZ#K+>xpOD3yXM#H)*Mq<+{7%RUqH$I9B~ z#6H^gmssJk`mxU;kK-O`kBRp@hceX>e{Q&9({sZUwzMr1x1WB;W$|yt&t7-h=F;28 zJ@PB$`leV9+{>(dYzHSrF+-*5$-MP1)ck8%YmuH`bJKxl!Jyvj~c=e(i zUs~pW)}mEhrVOMPH{D<)LGJ#l^Qjc#+nsh{?Y7H|%~e_|UQX;*#4R zI<~o_aPFDw~^cXBa?e3Mwj<=L?7z8Kbq=(Ig;w}-Rt|%%TeF$vxc4s?uHG1n;KL6 zAWCMmDbkka^z!JpBJXN@dHG)=4=yj5d|yYwiz(2JyIX>^zVY<_&3juwrySV3x9IC*?mgq~ z?=&dG1NW66hM%PHlcGb80g-RCKSH_~5e1?WyoX;mP51F|W5g?8jNd z$vA5`7H17>cNc9ObLU9zZ2wnmbUh>soJbTfH@!+nSd`P~)O_%@EDdvRt#Y)^*Kwdj~ULx78vw=6(mx z0B!!izjJ1W87^k6`>*(X4rgZ0InVh$&+qd5ZqM(j>{T_YAg1!Mf`bLUD?J73%29=} zmCL%WHdNtVABgYT7kt-9Lm#X*WVXa%y`j3ScUerC3hNDfVZC7+zuD1W;NeHKQB{2^ z_iCf7tXgYuz2OH&%o%I~$E3{He$D5YpVsYZRPImanAC_(nOtPq*ZdgsmBxtjulY*z z8KVs6P3BUggrxs$&`?1YbVIgX>SnP1=i%k|FcrBgjzOR43w{z@6yfTWYvdq?=if~$+j|S(4-=n5tE=towmx@ zC{dGAes4`m-<>8kwHr-}@xG?-tV_L-`^Wl+0G$k63T`fuZak2$JT{V>Z@2?lHH)ND zT+v3$U%A)CC~v_kU-dR-6(nWLe zeZK4oQSfR{hgTcg;Fvx6hw9~9F99-we}ZmC{y5SJv#+NUw(;Za*;(832Y1m4mBbGp ziaOywt1?U{%r%`iflZ0FB7=NTvHXnl4&^1UTn{sIl+SuLng8fE>H z^j+=4jcl4^)AmLd9GuKOq9twcpwrU`%8mtP-@PU`{a7?7DJubI5Nz%IXBLu^ZTw1y zw3-G57K5s!V+;9$cM8T9^w+wI|GOZ246UJW;}_Ttz?1h_QTPga zWtiOVz+5KYx(F-ihwG5XwT<%HRKq0(Q9pO;*xOpM#(s~kov(4!=$Ld5WnrZa&_!`E z4IdSMR=hAc_IsFSoSJj03>VpTk&q}li7goXLDeUg%=bW<&o zsK0Wvxrqj5_*KIx%+#5Z&tNk^Ux@e&FkH|ok-w&_`>ZgbU{%p{AuF%E;1tfXZO{|y zE)*z>=NBXtPA%SA_$P42Jp9kp7am}T2K0n%(i1{>V>;&N*Df)ddD;Z@_65g6U5)NF zU~B+)A8>HoS)<#5F@Ijiq|mspLB?EfPF6F3lappV7UGjJD?t%!sE9N>De}w`*TdOx5nx z+SRgSyFepRhTpifcdvtYVc&8)Lo_qd3Qj$)(N0DLGGdFedZ+eKZmc1LlOhTdi2Nu= zDdC@Msh7j&-jXO-;bSObse<3;X>H#F#6dYG0ovIzg0>*a%|=B+OBg;-m_fXi`LBKQ zYOaQ3TqBo*s>U*DvHrRYX@!jiy9?3^`k^kvVxcZW8$Swl8D531*MB)xX0Uu~Wrn|l zYT6NV{L~n+Wf%9RalGz|=<$-vt@6G&hTzEWW6hT{E~~znA+PJkT=Ki-cp{dQzoLqf zmL*^`-nPiC`3j~)ZGAx^Da|r4smhA`wGov$zO>BMiIR#>)oZlMW9gid#xZvz#_^0^ z9LGwFq;m2Arec=%ZeJQhBU$z>Bk4^fOI11gc!H#Ap7#GJ3#2+PelXUCJnrOsTTe>rD{xm{)ze#9BGlNG3=%V`ANr!n}%@X49v(;hGN}t z9bnOaLa<A-FJZmzU?u4^H0_j6ccni^CJutPj2JY zPF2>J=@CQ>a^RD)2Bb%BC4TkP*bBK0L8$Zv=RI;UQe2tZ@ zYCphKtjSF{rs5O^4X4V2%(tVv1MNxNlNyE zN1%;=!73?l=gVzIc#~qR5jBDg#s5G5vyDIPlp&JS25ETI!9yk|Ssg|f&W~EziUW|o zDo9^?<5RY`1nGOu+{Q=Y9%|i$=-cbKcP?N(hC3%(Q(Ud68zm8S zJ^atmFBsR0deShax9lBqn)|S&b_lz1YF;!njc8b&y`X6~aqZDFBN*3Cy{dvq ziLNTt{xva0yF)8Po4U-sq=Ag&dK&^U&n=iU7OVjO$X|yh(u({^k}L`?P5WO17Hgx+ zRh6%5OL8;d?NdqFEq}5~l!!#I3b#sfuccfmhmVmwY=~3JS?llRHxwU(_l)A2=f|1Z(OvOtC z?FHlNAT6A6ty})c4Qel>KYeEY)EHg>g7ZWWLuFgc7&5ulkUTA z9Rk}nzN7*=(ESGJG8*VKnWP5^x`&_#2^1!iS!MA!2II)E--j zFE{=hdnxK?uL*rjKp*4D&WEP9AgHNLu&A;YglKA67D+|Kmgw?(28fy(2Tg5Pqf*e+ z(k#6N?^TCHc=$qxJaF0GZj@C2$|%_)-Pp>XgeA+|I>I2SSQnzjweru{rCVNw7AM(4 zc1RndHEsN0dn-TRAxh>YjjgaIB|$4#mDP$^u_!Cmb{U>XgT2*?)z`A}R(_8CGsxCK zj_8_Jex_XsdHt!c4fem%uiu?9^Tt+wgyS6kr#o8tr$X#&YB|f_@@3m4qZ9Y^b6hl5 zL(hW_2V1);#O_-Mxg5hOAeR-T5v59KsUwl2pnzOf*dw-*TviBn-?*UNmrIsG6vk@y zC)_VMaLjD!dYLtfiQE&pF=}J<#y0*w>*G4C_{v=Z3-DKlsLIHyhqVWhVKzkjDy)_% z+^_0~Y7qgJ{8}4X9#yqN`=*sq&ZvJ(Hzn`aTGi;sv?L1`n1#3BC%nBd+Ra8sxwE6j z!;X`*OTAV3fBxh(!H!dqBt)z)V8?a!ZD0i9Z+H!o(5dfi$0@=p>;?QJWXHAfbDWid(16aaHNDFS9jL%2wq1 zh9XWT(?@NML=@|}#4=o6uV<^Koc+~WEfdGJG(kmvuXw%ioQEK2+j{Pv7!dyVDtuhwUAbGgsRZ(jG9PBm8Y%I4fu zv>9Jl&QYtc9oS%+iZ)yPZ64r8)z1vJ`M!1o?4TzSQ}NrgT2+>6?6&-`>Z!fQbl}pu zEKo(lv2re8Pw7mmtUPdOmf$CAeJa~ZmgbvZgFm0iCgF}5bQtW+I`+J%(uMVVfdaNcBcjBr{O-K1d{1NgR$89Jx|!u9O+hxZcz=X}#R>QN7#~loZ83z>Vdw_IOwp zH@+dJ@~_2l6>43AP%SX~_PF)RsvE0F_^ctOyjN9De)NW@4UronHY9148(2dtzt)nj zW5+(CQ;q2frmeX}XY~W9hF}&VP}zF*hTa=6BbO*P$T!F~1lm(>P;F>MTrRGnPgMnc z=ghD-44B~>Sgi6?R6lZ0C)!o5|PQ z%D?aEE6{%|U^((ne0RnQvqmfGz43ssm5;Z$gqY@cqp1|zkBg=X%{Pyxh|_<3G_?;f zi8K{s3((Y6TY#osvfemNoiTlPnmT9gps5Y^4w@?Y8nV|Bps7`Mk)|xB??Y2Q%a4nu zvQ0OSrds)>&L1C5t*``W>Z&C`Q%9@;np$MJQJV7cJDlI0rfMx6G*xWtpsB^}t$eXP zKvNIG8zHUnAx80iXzFA0kBO#ye2nwv(G+~S<{uwT%>zuLoH}9-&{T;fKvUz)H%?Pe zI=(wi>CGK9m1FIosR?bZe2y(ZQ(0D#rZzdg4^8bd{kUlAs{Q8CR4f0I{l`aBBTWIC zDlr9UYOy&$Q)<(V)6`5v^}nk=H5XC+fIijF(m_*kt+1u70h)@kh&1)6{kzi?Y8j)ZXQkfkR$M8qp4P8>54Sf$}e^XXljBj#J4JMJMa&00_>A5MoKh!<1dy(tMZH3S9H?H3D+YY?uR?9b!bu{wpqKCVBM}LwR?bbx!n;&cV zIk%wGy>B2k9Emk!?TAbFma0cr<JJBUO^2Adfx0`3WNx+U zASw>Y`+S0GNoB|%{RCBW{^@8e-jVwc9p#=%%l?KdlzF<2vP*Hbmm%pug}=9yEymta zlYjbBd-3DCfx1tP$y_E^%=u6e?Eet0cEO&f`T@SlGUQXu6teGo3ALYSUH5OXuI@#3 zDLT)(DGIVa_eE~{-zo3y3q3NPu_j}1A6k>~4?QyS{cE&*{5E9e)9T&~tk+4BW(#XF zlKks+k}?KTmTFQi4rHa`oPcvY&ha>_aaQBp{d%6H(MeddaS7{reDJn}ui5ZD?_X7p zeCHaa6j6}7^M_+ySB`h3J_&0zlCZKX3G0EAM*PB;fO7)Q@i@oh-1EBOBw^)O64r8j zQBN7pjP*sVoH%KuOyjA-%CA*;R|L=8mU8JPyk*iCSobp=Ydey#wj-O?{UDE8T>0Z4 zw*?rt1(eN;al<(t=XjjeIID41;oNm)5RKbp|N@QsRjP7z$b zZjHd#@f;#hu2wzewm0c%Eh)UNvuIr$;$d1Qb-4DW#7DGywQs8#L_C<0jBDkJ04-F5 z7LKn#mZRcrCKEJPX{P6>4fw+wR_7}6(zQD6e(iXJ1bsxi3qGO@@kb@@Yz1Ynzo=BT zvqHoHD;8LVb-ChSrI$BBFBFkLd%`L$9o7cmy>nBD#uqcGF>V#JF7ZiiMA_?V$`?`~ zPm_AyC)g*7$IV?q*_srO^YNMH6kUN%!9A&4oqI=K#3tpY$c^Iy{rQE^pNG*O8S>rH zA9*|5Jj9igkLOvLb-CD)HA zNY#<4z}HXZBH&HmXNiD!(#N~(Vw873w+tDsdqodjmKSdU^RTnKh8b z@E^~GkA1ETD;(A@{}6uQu2D@XWfXxf9$Sv+7Tmx-ev?VTNlGY&v6pE{kZyX|@UW0g zt062DNUD+Zg$gg8S<-42cci>=&Nm`vhAXB?kOSm|sU7Z`G zs1OW@6!q`HfV7&Rp^6+!wG(wS5w%k+2VKpRZ2l7ms58(n9Vq(|NXSHH#D5rm3s31N zU8;6n;zHz|f!6$Vc1iyW#N{mdj=nJ_jW!=yWUp%PK;^oPx!HaxD#qoKgt%O^KQ2dH zBDXG=|Ep9*QZ4rx2+_|K_7_X`gWDU9rD+{n$-2vEr(u zhpUAima3!LCE%oD1B=KM$S+`Sf4bL&d|2DNxG8%*MmS7s47Tc4NZ; zIr1jNeLH-7mPw4yF|JyF-h={Kn(|z2R|Z*9eVfP6m6+4J$2 zVNLR&I60{Ge5=DB_gRj$nrulHR(z`^(A1$ZoUlf!{%Vy}=&(JfYyW=SPAne}>3iZ|3);k!Pvz*ba^ST|uv&2fE&6VXOE7 zgS1<<4{6zQF%O!p{DtnXR(Zsv*rWMhqiHsU;O!de``CZZWqVBXZag$RVOw_3DsVwU56b#Hx9|&K@D;5Ggw45G@w6SpSAm zn3op`4v3_^$I5u@o`#(3oX=oEAY;_ETHu1`^-ce2g>MLc%zYQQz^7km;(HUP~P6>K&mT zdsujeX!f*Emf1nISItZncz1S$khiQTXGb$-(eCW%)I_GzpSzs9`IBuz_Rs8Z&Hmvj z#~$lS8P~Y)kp1IBHN#Iq#e+e`uN#q_p4n6Wa+D!7|3`K~w7i%G)O8iInB|CZ^ZXne zNzwS`WRHVBoliyTNlf z4D=N69I47Wjg01Pu!+T}I_PH>WjZ5HuiuwD#W0yx(9~^1bUlfy6`(8c1l|3x`o$4? z)jvYJJHZBvyaX9aOaF*$>zUoc28%lKBUt&OO}$y@laOzn^&y}m$kcTN*)|TNmh&p= z&``#+k1t1rp7L7gSBfwCP)iCcXe)?H?oU{4lCh`3MU{=)QNQ=`#(l_o-^VQ!BJn={ zDSsq>AJ-pze9lI(bRWM2bK&FWZJ-YgBJPv1Y9xuH8Q8}^bS*ERX3SN{la_!>)pkk6 z2M5?-oL;0@zkql-&o|kmRS7yDpMA}a7(JlCuNR;j zJYkE3_u|%TiW(n3^4d`&L9p7!^Q&xsG`fvm;rn?T8wOE7KSUF)bob-7gGQSmmwl7h z=MI{w;^f@3d4mkeZ+<{ofys$n4&LXm9(J?6vA*7|ibyhKV=l%lW4a7Y?@-NVmQjlE5qjv9)-=f@q0!YXhcHEUC!e9@=Mykb;xiFp15AQ0R9 z)R+~mVB%4SFw-n$48FCQH@?f`Lxyv{U5w0KBRS{Wrly@9Px#6+Pxv}8mMdLXGt#;oR$O7& zu?J}o_E-Xwf+=)FXuM|)_#mG=szbA7)3H#;+d!1kb;^`a>ku0`=I~t3r`eIUMO8C z(f&#MymqoyTKK1e=L@6-lM5ynX218ddj?@YQLK)+TftS$>&2m_G%7N2KH2z8P(!0_ zuTBO&MvW9Uhuf*;Q4;{(qX^VHVrl+8Z%eP(qPlOp4CwSYfnhjt+K zfr;fB3el(J!iCA&fxS`|YPicSg5Q;WrYUi#>h=PzuW$0e+?I1 z=Xy`ZvBNanEg07Y*73Tb)3Uj$qTl6}PZ_Ucxxv%%?UASO&BtG~Fd8QLE29+k2Q`pT zz^7vv*8qO{*g}?tkDqB`ITk!MM&Q0NXsvIsH5qsK09(WJZ`;P}lF_CFc~s?7=$8a& z*ZYOG-@z^^;B|c8jTUvz@w#kIF-?R<#(e>PV~kQpfn!snlv#$ub?8>RY>h$~n>t5M zRbQ^oarYV?na%%noO+r`87}va61xLqmC0-`9N4R4ocg0`_CFmTJ)Mk74k-&I$(OZ( z(V|u2;)p3 zaEjwqjrANnUyauK23re^*C5+y|9EBk$4hVZ@%@Chk&lM)dJ^Ab(4xUHT9?^rybNDS znJA3YVw`hv5FM5I>1dsuiC$-y3|wcI4PR$hjOS6KGvRyDm0F-H^~PAx70K8lP;ir( z#;i{meT}eOkG}SqJJZ*L{_(0d^QZ>N^Mf#6k)SVyFkT9@7EWK07>yS|bsh9Yzo3#5 z|E$pVJ4X86fbUPB#cW4c`r39?%EVxt#^4->BS2p?W+HuI)G%6-@ffvxFlrAFy|ti9 z=f6sCu>!rRYt^8)3+Nw>;eGxw>`Zs7%-5qkrO?+8{PhW2af*$RUnnA>=3z zYqB%l(I{O8HHza%bVtABVcb4L<`a{ANEpWn_+?9!r~Jj{Je%H*Qnum7RwWRa{mrx+Q1 zb+lewRU)sU+Msim7w*<7s#6v=Y9p%C3L>j_ z7erMz7DQL46{@Ni7lyp=md?9AQ>Nb?7h@_rcBW8PrE$joU34QE;JjPZX?6vxCTNfm z$)xr|&4g&!o{`wMsgG8Pnh7nu1iQi9$qf*^0n+_Us;rE3fPni6O67DSV2bwc{I9RIvT-&k(QbbJ?PCZ2C`Flo%t z6KI`k34adVxB*(Z4H9<06}(_$xmfVa9QdJH`1^#``UP8)amNqDf?m#=s9lkUav&jM zYp6}0&jb~obZkPlBhP2x^KrDgg5rdkSf3H(noF3=v%iosdeG0upG%qDIOxxHXkA?d z;+yeX;x;q4#{46qCK-L?(8q{98QeP#CVH#={(D`>e;>v%9y13jmxPQmJ+&X> zpNFdo?6;70QOZ~_rj7rQGG}n8b;rG@7&de16y{ha##zRxxh=lzOstI>-K#R;7@NwY z@>gVC=6mcgITr_tDY2*OQl??GhMk} z4t5-}eh_aa*hl$cPZVHJ=oa?MF0i*@giHLe zuLX27yM^6^yPm~Wtff4*5AdD`J{I7d(+PIuV~>KovWAZQIa?~IsHc&?&ky}svq(or z>@@O6@JzqpGl7vG*9H2`fNPW=`jLRoW$JF^kyVTAe`u{H$KR#Q9Kc?U^9mefjaJq% zse0&&eQQ*&$8*seCAGcEqDm7vRT){g(lVwLJ1n6ZLj>)8P$tK$)533qOzFFa=WFaV zTfSgnFk8yQW=qyTTT1-1rBs+LWtc4$m@OkPTPiVIMt;j|3D3>&kr$&wW=nbZvt_p) zfCNJ{X3HqdmI3JdXyw9e`6yz=RTd5znG6A=8;5GudRRXt>z-`pG;8r5wRuzI<#dpJPC6lTuj}$0V4n0Uw z)@v_SZ)xG5F%xzNLeh)4WkIPUZ2v1*x+OLdw=vLsTKHKGl1vB9w+ot25}y6qGF&%g zTDo6O4u_mfnT~IMIr)r*@cO^NK@3NA6s`cf%PcK?ma_#O7?$e|>{8|Z7XAX-=^Jc^ zEb-oW(|p@-zs{Tv+1cBV*~6fZnU-Bfy5k@|PeqGS&f&WBFr7K)JnTvE?8Q%kM;wGh zvI^u}f?yX6D2wGZRcuW{ZNIYQs{UpD_F`7D5)R)s{q%5HiG5Bx<8ZoF#NmExC=Qp{ z35Nz#2M!aji^EEDR~(j@p`|)nP?1B#q0d4%Y%zC-!zZoX;qW}XCy$y%92)R>F{C2Y!sGiLZ!<_bF!$JOWiMybW?9Vo$%yI>*%L zI>)quE!eROtI^;=qIq4^1a|?(w>tA`XmFql_u)bCO|z;=u>Cy)&r|`6|PQ4pF;K9qi3beNc3+B&U0{N!pa(ypzC2te(Cgh}V3Lv%^%_bS{o2v-|y@PGRMO!e@ALXK48tj7`FpZ$rZRd$<1syVgnK7WTGtN_Z{xoFi zTA>%6sckUsIfARt*irzm3b3CuU|z$q^iU~m4P{L~;38oQYsMoZ{Nl;8s)`qpsGybC&fSIvG|Cm%=)Y}yuhov9y- z0cpD6`3UU?$*SYHXD+TL*yaOvk_jrDJ^xEMSW`D!2W`G8ro8tSsw5^ECb_s?w+az7 zWr-wN4X;n+Mra`BXwX)@Ze6aoK2{egkLM)TVTJ)VI?;6o1C3B&q31z zIzK4Zc@a=zkeY8>NS_L2a{C_$=cv|c)zD0G{=cPB)=cMcQ zc{5h7w_pb|vClH0&$4d&ys}fDoB2kfJfY7|qF3X(>2ouG4)+w}>In2%|1suk^m!D{ z2{=TZt8a})kQXy8I>hq&f-;-T*_vjoBL1=J&{5E{qRT@yXqBL6HA9aLpQDSwaZfr) zhi>67Iz#8^&1M~_>l<5Oj&A0sVs7k7&w_4j?>tAh@bBAOcs(?uiSTQXcAEwpq?sR# zcEa_nn3`sOH?*e?`66o2&B)kq!G0LKpc?}gU}*zdoC|pxu4i31BV~3#zC4ceEF9{h z5Bigxt=2KF2xjRN*P&M+_hZ2|NtN)ISO|Lx+fPlyYK9d<*y|snC1Z3Pq+zvSbV9Lr z)Wl-MG$Gh4#>^Rxz0}}r1}(+b6?>oCU_(KEKL{(G^jr@9l4khUJ7cdm-gFW;?ZBR> zB{!p@1*Eu*G%Ub^&u^o}Cozwthxi03zL)q{DH9KQ`u1rlQ;LJ?5yo?8b@g>$_%i3P z8Pbyf$=j|&mFr;>3}5&(@zZK%)c!87gR?6w*fy7qZ?TJ6-m%%&E8hy6A{W3$r#^LF3#DOLOD?Lb#YUJygG}U@$X!LsZ z>rv~K>m%3qS}#e?FXc+}YyW1}{Q7U^6dZ$br2V@0#%8|3A*<2sAsZvEMz)7ZY2gQ( zub>JxfD=o^cGA;_)z*MD^{N~74DH1fOtF0ZOcjG-@@E-e>rP?~v&yY?DU znSaFWK~x3*0_L=lR?<=6H8inYI^fv^eH0o1vKQx0BOVdkl5wU3>7dnqc4>7oH(EzA zbc)TT3zldza#CPw&vp*m!%oi zW~mnxNqEB2h^OLedI18NMTUDMmY0Qge8PYF#b7oQMQZ5G>qhq zH@l1@(dII79A66=$9wkw9OF3E&NU|BInRL`eeRC^Gx6LAduC(DSe_3VSDW!;tDGr; zzS8oMlz9nf%9F{$@%~?=%=Ev(hl%rG9P}5eKZA}S-y+bu$`;U{`vA8eW47s8ZqBf5 zwI@Y48k?aBlMYRq6=Crpo=OaA-7;<)eD}?KR+uk=e%SzDPX^kEjy?g3NCQqxkfiA$ z{`tg~-{(usOyDpJBDN>a zg}I*C)DtavQYM*ZMLm<+#(1T!-k2Zz6tNohThAYut&c}s7cUex^q_VrpCYI%vg+4!)2+7(6`9P9&W{A)A zPRLQM<$sM5NS70q-y845`MBkC;~<=mTE-aFI3Kpm4UrR1+U~4rLSCupi%kMY4K|bS zbM!R$UInQn`eHMKzF5rBCX$!P>wNisd0!#h~dxb#32Tg z$RSx~kwY@gB8QAHiyV?}o*SgJnGT|~q;}FJhJfcb36b})L8=zHBP+-q&3v4bxZ^{C zimwR&iIy`Q#Aj)a&oCEBoXyA=ZHDJg{@pte$iEfc?_3;bg06x9##3kTZ1 z=vWuRBQ5q*!W-#bF4$$nJ0FH{yWZmN!Yh9hctwM@-gY!2Z;d&&^q7>{fcdKJeJN9i zBXs_l0}E}I>4)WwG4`9o8*%oV!5a=lb(&B|?5D;XrGl>^eD-L@`WE61viFHMno&iD zcmo=PlX#<TouZh1b#O%7x9!L*$1zM+ZMN@dhXHLl$!X0{rl-?c4aFO|U*Y_`!pj;Vn}$ zzsxN1!+cZ(ZGufm?R3Wv7dl0^#o3{W9|PJwW9{GvlO>cNE?YYI;i5%{{f}y|U7sIf z(B3Y+DgFth1}0u9@DBWCm_JwG ztiwTagCclmp>zJcX1+gy*94D$GWYXZib?7KjYP1`#?|J%o*T5y$TFK)HUZYz-DQ(E z=9lG_WtV2xl4W-1_p;6Y(xYwGgq^5vT$;SrqixoN4CWh^CjZ@|ZPtYRAd)8En6hD; z-4$k=-RZZ@#`$ft@q%qO0k+vh*k+Spo8`bZyZc*gvpZp%WnP>dVw>Id9c;5fhRLwa za$%bVFw{qTmtdP+6{79;A+{`8(~%RC4gW=n^>2biX|g_V%mxj*tQUnD)@IETW>}MT z4)nHc!On?+m;Y=2(mw;*=Ul;Zx+AC~l1yN^Q6bvrIUyD=N|O%l(_+d7ZJJD^eSTlTSu| zZ$3%tfls{rEWw8XJ_+WH5s!Rh7H8c;_%Mh^9u#~S@QeyRjAq1`$U-9?@uF@p@yI=2 z2YBR!U@cvbM{))KMYx6L<>xuGYrKde6OUw?AeC)RShpv%(?S$j>8eEi;`~- z+~@ouwNWp>_@;O+ynOCW@LYHi*CM|7-2T(jM!ozAdsp8cbV9*%fykIi)JD&mL~ZnA zlcO~Bh_<3R5^|Vnhe+unA2-r06R~+1r z^StNr-YWf`u4oog^b?Wp6{^hxd`~muu_H$V8Q3XRxk80!e8jxOW$jV zE7iegj`LC+q4;7_y|mL4B<$v&T;9Kz*9&?7;rPlF^8O*=Lh}B-{2b){dl7N#z!!5} zeCfO7{Wl@Y0aDcITW4RI>XghrOa_0Vjdw|qp8B0jr9r0`lnR@l=Y8w9Fr;UAM*;lnvjq`xx9Zb zU)?3|pEyJ0xq~5j|0Jgfhv$NM|6YDjm+XpAp2NDcE(Vf<>UQ0{Mte|Y(rLA zR~$YQmLVU&;b)d6WR8hEcMhM6(c%ap>mrorR_;RX>+6^gaQ=E{0EdC~8VOjhF#w)1 zW^yPF8Q{=^np8hEx#_{qeq;@VuD^l2M4X@GroqyYSx9pdTjPOmqJs}7VC;LAn@0s( zICT9D@(N5|{&73$4Wisk6mXc(Ee%D^7q#L42qBr&`Ro&1E_UMom?7)HJ-`YV!>1ZxD2G-{_@zc#8A%6+cOY_B`ug(j5Pse#sxkR)Z2SFE1 z6s)?TLC=TicN`q_J9_zYA?kfn@cW_857u&F#)K@NWrGBU7o|TawFS1Lhi?*W$MCi5 z{ULWA7xG0qG~xb&rxCL!Xh-L)Fw#9|@VOAJ4i=&zo!72EwGBBQ(3j$HwrrI$757w5 z6Y2xqyGNj?P+I!sJUypKRo2O9>!bBVrpfnn$naqHYK^#ix?L22lu4b zdZ6F_aCC=_qyLR-LH`|ew^;C93=5;Xex^{mi!q6Gx6BkuccT!8^1%KN(47Zsz=_g^ zb)vhp5V~6+_$$Kc?mn7-psAfjUOQ3E5oDNpP^XaEN(i<^H52G+b|;U8NOvA&deFQ> zbmu`P5~u;KngmZ@4yO#R2BMk?Ss%!m5y)YbR7;tO(Elp8Am0xM$+GQ;_4KRB0CkPn z;>E6m1F?$6i(E|&*Kf~2P}W6T_mtHuM54(c(UjYwN~DlzDo8XnBwBofqAcPS)LPyU zU833;Q`+l>B$_HrqRsiY=GfnH@dj3+b}P{|1{I@H9@yA44<-wg#i9y_ z1T{H;6~`+^`X<)o@FH*fFMq`-amoX zDU(L5$pPOj=EkegN_GgD_g;QAY`OR#>N;5caQo}&JL%#j(^ zpPH;v3sL`pA+pNLAGdeND&WLUvdW9 zp4iT`G`B*^NT4q}%CWW-M_I8XS)y|tdb#dFtdhG$ch+}dRQVLSqokPP^f*72s*`hK zq<$EONc}=4MUomCsjr)o&h^Hc7qKpe2R@FT>tcBMl~!g*BXk29`YT0$ZHT~ouyUqz zT?{FD@@S_BJTt_@p8=N?TK|j|9z-8CU8*zCb4PH`EL@F)FJ%_`I10KYgY(r_q)e<% zd5CpIuz_sr7n!rgsco!VQpLJ@v&hP3y!>S|ldOD?ITpD0Y;nikxYw)JAupRn0_z(*{3z=W8UJ*{KX@~uJ8u5Nn~)daMqQa~jT`lnep=BT zH=ozd+6LrY2zdeUFbQiLh}S8)gXooz7XTka$l3-EABDJ#n}0O8wgECh(Di%HPW7TD z7*Td0{s|4xDg2fz`M*!;x zGsAQ%tRW$fz=M1M4d><^f=%8XA1&!*m5Y4j#wsmn$H6=TH)@@P^3fB)JOVer6nO+~ z{G{vV5xDtXXb+iZs8YlnD}D*~74%~T&WG21TONUjSDAmTiC*%=84-)bSJg9O^9O8kbraS`Dasqh-Zp8nI(xUu%1a5xi zx8xB(H|{2nz>Vr9#AoryBLIJ3&N&Dfzf{O0AkB*SWo{371aAI(w|N9^J`1#cIwX(4 z&0j?ZGkG)UU7sVi8NNoygS^V_^9V3fXlobXT8x^1&Hp`*BgIjlyc9{RqcQP>lG zU0poKmh{`we=C#0xb`hq7cuFR44bgK-uO#reJcIo{IQqV3hb*=!lXp&l-z)dPkc{~ zE}!z)z>mDAA`>dH6Q1`=owVd110&m|qsww@j`iUrWz6t%SUt;XUe4Wjj7eo&tdzkj z|L2yc=#FsGs(V|m{Z3LX!Kz$E`tS>1nAEOMmyxush;KW~95G-8!H` zT1hM5Kk+>?nMvI?MN+=Yc)6GvnOr14?WDF`RouHaNwlOrO2Q09)dHm(zKE&$aTii_ z>{xc9{vi6rru!t(FXukM^+l`a40OMnpM|?@f2X^c^fSk%au~;nq4%0EeW@!6zUz-Y zylbSLaqFemhR0txeo*OSqs6yAz)?#t=U&45Hv8X~;J?%IcP9PZF}>zFP1pO4c<0>U zJD=m9d!zdvdTiIy`*i!pU3lZ9riE{aef%8vS@FIz$F6CZZr*3Y`v!%)Z;J4~axOTU z(UJygZ~PmFF=p&oH*yi!iIcf|d@}S@?4tv1eBJmZaFS9a{pdMpf@S}kgxFHZ32d-v|RfwlYZgYy-m#BgwqE&rGI=0uUGK? zqm9hS=bHBKZ){X^8yclmlIk5g_KuCj6&g?@172(T`+njF;9uYvW*E+XO1;aFq64i* ziJX!yaEgq5hk1$7G836EnFyvU9Y4n(*neX@5^)_K`Ez||IQQ4DYz)vh@K5h>b;ZYH z`vdrJSi8f=Nn-~-ekHU~#dY99#Odt)B8{A}ig?NK)agS&9_cF%1 zJcM3u^S2$O7p2&Cs=w`O>(u-l&su$e!@9Oe6#tu$78O0T*!H>z?Gx}46Tdwa#zpK{ zv<$isDY6l5<{P`i+5f%X8E4%~pGkZgsL@sb{#PBi_#-_FY16r{ulpfOw_Zz62;;SI zs7#ke8TAL}7>QD?``3SU{NQ$c|1ab5gC-%Ds}JUn56$e>)J_#YG}Bps#Si#PQH_7_ zS8h}dW;r=#^BVXn+?3t5wSUM{R2M>ySu)+qZcXOK3$;CHe+>F<6+BCW&>nu`-_!jqubf`VLESq%`q<*+oYd5 zsh-J@Ut-4{A9%knMT2S*KH7CGqbTaH>6aAP<@N&Zyud}YOZ%{69^&dbL&7xdmqU7U zvF?;LBI1KQCc~_=7DK9FKQS3v65C;Y3A)30_AocRh-p`3vSXOrzP$J=PK5tHnuiuL zsdZCcHYVt1a}Vg|bDv_){1u0LUD!cZ8e^3*(BYkpMmZ|JioRuNIq>)PRCC#bUHq6K zzc%77q0St1ICDB6J&I>K*Iah<6D^^uyVx3Lh#Tu!Jh1;cXkVEoH}e0(*LP9PWjDVT z_oUz|s*4?KT8?-h?1Wu7ufoxR(-pw!CEpBv1h5*j5oND0YJ|Vo;J&0VS>J~s7Q(LLRDD8J2Rvh zm~_Ef6g`s~&V|e!x-%j89PuaNWS*IR7o#Gxz@H*E-v*XuIH#bM=}tGQ#?U>pLh5aP zDd2M#?iz>dUn7rvgdVtrUVGs=*nc>>;!_^Prz{MYWdbfg5pXHhF)4lhT4xZCTM3US z6q(TgJXQdYzZk@TyYBZ5nMJswJz0}=?_rehBCZURc$@cHX z;|W^_9!CiHxE?>&*hM^ICImmaQPr35sO}bzE@Vq|herpnv(ip@Tw#ZWXXze~7jf4z zT+f0(X7e+s8Mq4jxZs?Q1N=Do^=AFAsK#Zl<`uJX%u;5mZpS;mzTQ-$h6i8PlPm#<_mZ{3o-Hd7b$ijPfB2 z%`}WlvfKsFS*mW!w9qQ^mD2;YKHbQUXVNmUQ&%s*1KaO;CXF|mivQ$2C8c>xvS+(n zDPgUDl4G}(<2gDN?TOy5d@{zWWR%%1v*g9gIBK!~EcC6>%zE0#;w)A{GbuGwJ~2`& zl09yJd$$$^TZ;*A4SRPoY_{6RKBPds&%=(zPyf=^e+{*~EZb! zrX{?nLc}`CpFbo={iMM24}$8OtgsACtDZ()SlvAD z4c({kk^F~GHDRbW&nMLvXz^L4Nq~lG&M4~TPLC=ox(`=(XrrKiP4`LFOtKq#RIx6u zW=|25ifbdb&q7tl!E=2QJ@%)Rq-d3Hjbw-R3!ieZ(w)T?EIc?bwP0a^yi@`X*jaEH zy91_Tr$9IIDBAAnTNAyv;ukn2 z*dcs2b}o8F7g2e5N@_9gkW4Hkzszd=j-oG%%5r~sVNy-*ULPNEZQ>h~_GCkg(U>O* zRUu(5x&EmC2WI?v+(gX9n~~Enu_}V2>XB3Pe^a0O2w+DQMRfyUme8-s+Nt@43(*%g z25@iJUe-QUFRzZPQ3x=)`L{4Wdv%KHPkpim{Dw>c)uZ=g>>l)~&g>rWDe5tPikvLF zE)V0oLwnMvY)I8IsY7Pk3970nH{GKqiU z4G!(hwExz)!T)Sb*Jl-gpiqQhcMyU}{3CBn!Lyn6#m3*`*>0)~b_@9={+ z)Nv&0nm%owUx%e{h!9T4cR`00X)kXY(H`yF7=h;%mJb`ZVow~*Ze|z%A!fR-&Aict z8qq8_7PBfUr=W+6gqFtyTaMtEH^B;Xi~V*(DU?U-bg3iTaz|0UW;naoR8pqtJybs19sl% z#VNNk>5B}^ZHo-Ky5+EEkHen*GjwCvvv)R}XownI<7qX5=QmZ?y8g?w46*@KvjKOMVVe4j5_b3Z(%%*cxubA+8>uM}hN z*rHr6N%w$G20KjJu3A9P_A`msX}8#H;ZE^8a}09)ZnF7TLw`}jKFrg1XZ26c7p?v% zk`~L^pw-_eX!S=xTgc_&s?s^;6&LbTQgwYQ#`!I3IVvvHP5HuaF}sA-b>XY{I(8`{ z+n5>h5h!p1{2)?L;9v{Qaf7iUFLWJW`2i_tWmlNREQQ7FLM>7^s{hqsrYtwP`1#P7 zLMx(Rw?jPVf&XI_u1>dqxD4yrU{@`}c`lA{+&>K5KMdSI!2Gb-cVU(E_r<=8PjGgR z{jRtt-$36kz5x;I^v?U(!qKG-a=m1Vh2 zm&k+A$L~QPUXQIz2e&h?xUt({s%{*pLG+%9yne}#Zzn$$lAvdNpM)*`7hsEnEl&6z zX-emu{75|072o^up3eA=0WNnrcB6$I4$M->*zAmNCvrwGGvewT>-i;+_t0Zsz_}R5 zj}_lJ_V17H6#I9<)ib_l!Q$u{-yg%AC4A3>%}#Q7jFa#^2G4ZHw-b9F zcgD995pEZm?y1Dp1(wD~;opX>vI*y>aCDFFfBfM5J=OO8@ttA& zF8Ee_4}8aA2JIQ&^Ki9ie4n?8_@0Z50mAo0tjTvmx_5`~_dx~W{Ov@AI~V_!jqrWY z=0YB3clcJ}o*lTl$lOwlIs#8(k5HUf<6tyPr?8ynkFu#z9NBN}M-dz0B$!cJj=Ha1 zySDU@oLg0=(m3IvlL4+}R@zfo8M3Fa6T2J?#~!<(JNgU`?t$RK$_=WOO?wK*K*vc! z%ZbQWZ2-LU0Z|fWqt9Tiv*`JU$f0*)x3$ihr)-TAS!JQ?73r59px|LdGKX%K;mw4pMiCzh85Oe$7E^op&U6BIXgkBPKW<{emuxw#9yp#P<_u z@dfPE6TWxwMD)e<1afM1sP~Ga5B8CvxYR$2Y4=F%GwrXg*T(><0Esjt=>MQyFH?hF zF&FBki}luF)%jum8u|TB^f49n)&ssYx!|{V^3Q-aJE)Mx{vyVG8QP$F>*zP2&BRQE zy{CizlFI^)PT{@>aaCgqt+&1muD;{@-Ax>2;#&peRvA6snG)IRw}C#^}RZC5Tjb{pShpxqi4O3UE*87Vh_f$MCTq1c=^33j}u^AY0Zg z+$!9)16LOT?xw|<+sUrOc@>Tz-RQ#UhDN*%^GDCJ>q!A}C+~wLOe5Z4C8}w_GvRX; zjhwa%(IBtP7n z{cx`o;9l7++_Wwxe8d+6qOTD{rV;m$!+Z)5R|6((Cy1$b z3*j0u09?J#!5llYNXkS3=4WwUgd;H4{{YOYikQl9zHG#-(=%Tp?+oN~0uJpsc$`AWe0rn-`!tMet!bg1;plZY(^F%3^O-_Cnq)t~#nTWg2;p!&9 zJ`eDw0d^(Mw!#kBtGc1_5rCp+-mJxYY1A|PkjDv-$Kjc-yjj`>@+Qp7tNoC_0NAE> z3wZo~pIS0EO_dJSwB)ED4`2&Um;0S#^Y=BP$pO@e~6=&t4 z!~=aGLm$wUPoZ^aQk^12eP~4;yXUxvmCz10hyRGN67;AzyDSG_n-1=&+uRco`e}HtS zN3EIM()Y+`p~s!Gn749A8qmU6W{A3B2jy)_hZKH=eb$E*^wl;LKU_e%A~TXxp& z-2R4zMa*!kp<}n62o`?hIMHvs-5Me6*289eR!{!iW#;o#kv>O!Z>E(rY}a!9!VpyH zm_J9_mLA%Ky$EBmGi2hHq|Hu#zExE-3{cLs#?~a`EU`w^sP@Fx4yuAj%G58oE1?5E zj-K}$$EaafLMQ(|>|q@s*=^EbL+#yNb|uu|4QXM!64EcLfWtx9l@J<$6MBnvv@U$V zak_^nT7|10BPM;~A>bE&k0)?mfTMGtTo$oUHjCJ&a*Nm}CKVc~Gqg{0uG^<`4(bzD zb_;!)AoOWMw|zPaJy6VKE2@Y(+Yo|p7r;MH1|M{`hae1j?vhi6#Kvnxl^=vWXklwuJBG!)mHmJ?BV0Y zDpBecI1ABd2kKLdMkFrqluX#e2XjuRUOBK!3ZxF&h=NY(kZHW^oQDy00 zb2}hBY*yCv+7nmXf3K65IesRrj-}mY9sCoH@z^Ib+uzsm=2a1hz+(x*^h|13B%GL_&{rfH}j`l^z|~Ck65VSD}d=)1n`r|0mXP z;Ou*#Yo8|i`(!bReQGoXc#Pf^ph&{f4zi|t>{E)+CtCea)JUuU9sKHU`xMosPmcm# ziO{ESY|#E8S31iL2R|S8xN!9#^6b~5AGzq$M4Sf;eWD8D0eP6TWzc5Y-PnoUnnip) zYTu@FKCA%rZ1kQ7rhml=@MX@S&LSs6u!=)#dqdG38Ri^>u+ztRX}oAyZ1-K@f_$K)4Pj zE}@hXOl9i&5sYo2fAfEYeJMvF1mEx4Xa1Zse`LGO^XGHCnc4e{_HXUA_g-u5wbq`? zlRU>(c#S!>%xlcCC0=8WWqG4>EDX1s=h%hdP;+c9nG4x}eIi2+VBwO{>)?&#IW~ov zXJCuYu^sfgoXmv-+}${UB$jH9ZJ6tRYci6fnG^ef0VVZC|aA$RWKF&#xmwA};xY+t!? zGK4pste*2#u7G%MNy&1O;hb2{{Y{Zo<#M2p9@m4|2I6T7@ig_;xTk3q@mOtn&BiKn zLPeMusO4@Z`7R$s?!eFajYI6mt5jrGt!ZuPCgWz9yGU7es9)^;paFgb_YTUnJhxB1 zqSno9cjf-7w(9mUcbbk;m@5Yg6y~yl@d|Uvz~t6!_o&0%8{is$gxxqa(X@764-7S~ zi^=RiKn$~3zr^|@lsm^8)^W|Gr?vR%#>Z8@IY8fM5EsJ^5C{{0o;a@3M!30>T5hfV zLvmGN3vyHLXHN_?fXIZ5sF1BGDf#U^xVG5a2mb#DlBr>U$pVQxr~bu=uQ znw9)Dpig0)iLWsl`)v9(W|P9Bfk#$RFA+b`mNnjbC$#Jgjaa-4Jkg!u5`4By;aN(L zOIc-;SJBh7nNMx*fD;V!z%%%Nus`rLooXi^ls4ka^F^v0<=$9VzW)ay8+pd;U2yS>ndo@s2Jz6R}{Iz`o+*FcfsND9jw#dQWNT} zV5qR{9c3nI#t!PRz7^fu0hg{(sMqjDQS1%b;T`Z>5bDKI%Qp)^)Qj$oWyHrG#mb8Z zS}YS$uSeRv6Eih#n0Y{MryQ`1Mf8Iv=-M z_%#n1BfTuM6Dva)PeEfo4*F$$@A^{B$MtZrm0z!8r&+6zt^vzb)?uqGBlGbhZI)77 z8eRpaPV8nyln?$O6~5XCzS$9c-k^bRdPr_IG|$Ka3&M8#J1Hi6B}!|h3SqlCJ0H+r zR0v;8i7+b(VI#eNz#a2gOT1z9&r}E((kD`$w4Qy&@B**049HpP$FPGJZ9nlU<`op)2GK>-?0J zzXkq{bEj;GxT9*=-t31wX4hx*0kok z#~m(VSDD1BTONGGw5uFt7q764ahXXXpQ5NB(f=c$f<*r_w|Ju{_J+*Y5#T9cHpIDD zzIh&mIf@Dna9(Cro@tq{(q<(bd~W~_z#h=_XU2Rz@cZNt!@jqOawgAbp+{%n)9JA` zLXX!2!u9#=LqZdwNK3#>kxKBD$M6Z)$0@?PgVr1-6ShH-97U1rloZ(^vq#B>B0mXQ z;pQ!9&0&2rnCGUX$PL;wQ9CWT{5gXjI~QUj!4_9bS;fVV>pS!(?r4S!U!ew(>t zUSPW7j)x=MQKYzoOs;Upc(|h&?&xQ4sP{%^$Y{6ZV`CHpWw99&LMKeWmdm~)lxRoS z&W1N$MAr_nwO6P>6<*wLQ9fhx9VhU1GbERj)&0BAz3R6m0C?4u##p8`jl9cJG%GH1ZG< zF5@4*$om^XH%DPNYpRXV@j7kRQd+b%8b0x+V(hrL_4t6h;nf z{=H1?8SI~Rl#_WxcX(Cl4kO*HhHcZ@XP`U0lJW*sE;u$g&J9KYnGyhasQuHugG%Cyb!&_m;<6$$anqFVPoj${Kk4P&x+($ zjB`S_M_OH@RU`G2{V&ev90B|2bjm`W+o%3U=@q$3o0^JVG2b^v(PN&kNYUetcS3|7 zx4j{9RMl_?aOSQthwZLhcSiFQ`-ZuuK!c(VZFdNZY(;EW2w?$8FWbQPSO8cF(T|6N zTr2<~^8Y2uHpA(Cw#7~(->e3Yh6O-0Cgvq;q*b<&HVw21Q+wRE?VIGNTAr!RrYz@4 z#(d3dV7}@#FmH%qejdy>LRy8r+~yi{I0KxSyK>!jm!KBJ^DT(0MJuwb`fOp)2|!O}6JY#fHi3{{GZ424gvbdu)OgNR=wA1UO+c(aGM)!`!>T)GfTi4mPQKYf z&x;hgLi&H5ozOa-(x!Q*5=I3T)JNcRrNdL7bt4}|o= zV5T$Y@NoBNX6Z$fohrnw9Yd|rSJ8-(zCb0b)eMDd25%&;(INdb5?XwJl9i~49v?#^ z#x@cN>uXrel2@xN+O$!-j68e2^ROdgFWp1AktY$?<_NAS5nSsB44pB=J+*P2ku`zc zApQEgQX%SBI%8PBiVhsY#~~FWkvXl1jzRxAg=j8)wx>kYMw@AY5OHexV5kAlg{LyL z4xXA*F7BHU+izhT)EOA5M(08}#LW;k=g7kjMtMhQjPjHaOK*rf;qf%Fc(atoyjkE_ zg=8~GgyR|&l1APrjJ;u4df^01rhDP0dV1M~_KSq?7a2wJYR6YiIfD)1(E@#_-NiZm zJo?{`z4x9oQ|sbMoKGR_{bFMaaodmB*kt_?8(Ro}zo?CkI?H^@AHr4<)^{l_H$Q^z z9$GG3jC`CtpEfYdjBzq-Y^+#}PLo2nmj2fy*(wCxCE+@1%LZ11XApaD38jrEk?-o1 ztsBGkm&^ zGhS@jjg$*`631+H$XEkcg^V>YpD}yhA0M+BmKEcQ$ku7cY`#Ku1ua&_>?B%jh&YZ^ zV>X?2#JXbap~q7cs%rY}Ptsy#%(l{|nA)4@=v_}Dmw>C8ay^e#M-Q>zi;f<`+AKOc zoD`hYYG~$~tE^SHFRq=3^yNe1>OwE2jndA;`aFeeUJ@>w%#{S~JcJ~L+<{(--r)_A zK`U`>m$!;&bBx+1a5=#8Jhu7Q=VodLcoK0nM{v!F;F=Z8s(1+PQf$_OYnBDq9g&RF zj@LSc>kitb*a&W-T|!&lu+A~*;nE~r=g_5}QMeY;^Zb;!j?!i>wRUjTfaL~u`R;FE z>*Pta41|CSbK!ud?9F8xr46g)c9HZx9VJs4VNr_|_-kyUqx$RruC` zU}{Qy7iqJU+S1@EFm2_Wp@{Oq*JZDMJY?Vg13VbTXFzmf(T785n)?SA<{7%6_5`Cg zNh=p!Fo=#p2B)BXdrc&ZaZp9or1Oc9eMc+ngZc-Q{wq3eHRxW#@675GBP)ahPnozk zk9@P5o@@NV1%BZ^62v)tm8{-8(rPiSg47-hYy(plHu+hUr3zmZTS$KgeO_?95~?2Y zO6XCrtN}|L#~k{U&R;VmcUs`v6C#6|u|8HR9F@Fd_SS59P5L8VF;*N|^stRzxX3TG z(9fvcodd%56nhHFCWTcNQVY%cqeAUB4^Z-EXH zT?b8{j(VJm7f{>-PNV^ud*{(yN4i;$@r)1ZZr(7D-mcfAZn6dSNOzEU7SWAm?v;W{ zd=$-N*Xxls>uHlqZKwb8*ElQU{9z;I3ZBo3QScSG9NgIy-y3{7W8rI7IZQWG?(Gc)laS!_VIMp2YqI^;hAvIETnL4k**kFUZ{A7i1+)pr6G41@ZI<>D#EC@4Gk)UmDJ5o~K;G zlPL2wDgT8Knx-N16-E3PN?U{aRg=ye)ZZCG=f%c@CeNzqll6@C4V^cLZ9P$bkacM` zGPqgZlaUGO)6wM7I_UGqDKx|&IvzFvYJ1UnZ-0ezHO^uVQoh9VY3GVT;-fz8Trr5Z zmvOFG3G#*E_{n0BGmkG~mydcpiA6SkvKYdGE65u_F1l!~LOzE#5>FO`+;3n;?qHnC zKy@137}*%@_j!ffda@Yg79aKiYFByBEHP}+cV}Xo;=IO9xs#{nRCHeoqDSl>tYxn? z_F2YoAi67!hBH5Ar?Bp>=GFN*h2&$jM;XH#c(Di33KM6wjG;uF#`#jdIm_5AQb-oi z*F08l>#UYG9ki*X*2Wl4q2H6i@WGeS^?A$~-sN0dn-t_U!rb@WL-xHu^?i4w?;BO$ zH>T|SeTzT1^lKUYx`U30O~fC>Z!J~d=hLQ}+C%8|P4xFw&WFk=9jfml_eAX!#mX~C zBp*f`{eBXEVU5z~#TG7d4^oJ4Oib<(t3{AIj~-*aHOJt@trm>Gzn|V!flf5KT&xyB zVjPmK7D1#=Yn)AxXcp0yWWCrz|Lam%ErR$_hV%C$dFrEaDt5_P#=Kfbn`~;Ygw1g(=f8ygk8`YbluLNbJQoa!#k9y5 z&h#h6~Kq& zA~HqHnl#L5$Z&bIdX?J7@Pq02hH&n5jIx=>h!hHPOJ>vudkj6c(c?Tjy^d#9 zktm^{ewudksm;ZnBK$L-{;sC)_r92^-Q=FR0J(n>ne4n+(#~ z^JK(?lUmTnWoB$f9}jYSMyi=1_hK0PPPOvf4C*WB=OB73v)@av)jfG;ZBDQ(q)k7y zWMcZ+dis8ubFj&jc}X}f2Mrw0D;!!m+6}W@Gy-FWPahJ;N%U{#Uj#>q!cmeEM;r6R zisJzq=Wc~#7d>5+5=W3WTd1uA$MI?S_=95w^Y!jf;Io2%#C(>Hl5$d#!natfIb=-0HgchFsJwl zBmdK`jN0y?@Ra;__X+ku&gc$NzQ|+cD_dkdjs*<98qa#%ZyAv_L>i`BbAz0g%80B% zj~32)#wVP$h&K`~#Yjdnvg*7&%30Il{>$jmLGDYZ=d59gmg1mZPMb5-ZU~mrcLz8+ zYcjRdJXRd-0fVpV16sKt8ACs|Sa3Lo#POoS;fUb4?JaW#@OeyyqbUhTC-h#WaIB=K zvKA-fI7gdjs4WaGrteLh4>~Brzaw&4DxX>Aw~8YOV(}N5GsyiT#+iidk7bNflV8rn zZLu@)0C&Qrw_Wj<$yOO8LzE%6Y*Gj}r8pB0;`w8iz=W%o)BC%e4#RcfAW~SOO-|Y@ zpiM8euW}~74qWFr53Hih83Lnm{*MHp&cfj%6Gk};Ms3fBy50?Y(;wjbo)6yeWy-blt(mV|4w@EtaRAX&gz zf3Qp@o|Xl$X9YR`58}6W`f+UEoNG2xF5nS+l`ST>iM^_*P1XowzBT)VA7#cBvnQPn zh{R@7mMU4#1NwZ4XpYG%)_cp5JjmA=)aQdrc9H1{)pXuSTs;E%N#>$?^$5VvLG*v& zNp~oyuV%eZoOJ=+PMdwyF2(=8YYKOYai-Z!xt=FAt_=}fRWV%RgA>%}B_Zo#2T8v! zE>p<5&|U@EB~GLR_++L+HZKX8n{k+i3A0+W1t=xY+T({lqN6L8nzW6DAq_W42a*ywjF)lb<#i}zMZ&K^3vlM+To!%$% zij445=pdS0oub2B-bmE@0_xPs7-8|Fb-0=7V0;~4efU1e4#4?AqDN^!&!>%>+SR@* z^9{Z4!Q@PBKIfyyDfjT0>G9Q=Pi-oCZ1hh5RC*Mf=pnN>z{z7GMXsUOrt`67Jx~<6 z<})brI+}0*l&L5(%VGl%inKA}(-cLfg86m|O*lZT2O2ZA-RO^JziH4U_$a>aa?VP* zl_zQo_)3f(Rv+U4w(w8mjR2?r!W*)l6&yBbQ3WlcK3V~F{-2%>a{kY*5ut;e{|9tA z|4-%&>-oRXp$Lp}{*SgCVAn`Vhb-E(Q7h;FN9zqb^j2dZ;~aJmnvMjtV31pzFvQICLp);Ec*vvU8$m_IdquVVGlzm&#}^$(pi zfS&V3ay~~bC3dH1KLKP;>#iHXA2z7ZQnC6X4Frfkx31ix{RH%8T2)i)N9WeSIxZl2p}?S3Qt=Jvf}w|+tF*6;hu{-mn)k-cyEvi?LYg4|ub?R{VQpH%PD#FFcC z{K@;i%0J1#Ci+JB{B~u*ZZ7P2%a={-pTAHP&Rg<>P~kRKlM!M)JRbgCeDs`;1@sLA zq_66{?OT{l?EFoK|bwZbum7HgBuDptJK96x) zkoAJje6c$xw6c$W`+Z+=26O6rnu|G;k>?zJ*v49T9$R|;hLTo2{N{)sgvM;pm34h| z_D{kWs43X+15Qdw?to~RQ($AAw= zJ_5*pAiIt|(ri5uKi+_{XgR38jLv(u0vkBz&TA>3;j!Wt#177q1-&4T;vN~uw3W0* zu@8-Wy#@IkvCGG7D%Nu_(TxT9oCxwNg*-9)->%mL!>jN?+;Sw}WP!X5Z6Ep18_<`g zM1BX|avQbtL+8s`GdYJ|N4bROv*XUF_-wfEo6e1x-D3Goi@R3gK94RcBi@0XfL(_- z%oZ~Px8%jJ;@$?PGKISoWL+xCKWeR!y9$Ih>uHlqZD;WEB=RqCF1?X*1?p{Ud&q^CQkRi)Fs&zFf3-32?eM2q7!b}8iN+i#3$m0X%*UMr$zTOi}{{7%^qXNkrV+Yhb2 z+;INDEwYl*3VPU~M>#fz02mW>A3MknidTxVF^CK)-`L>nmVr0nqdfxwbw**;eWcB8 zXz?Po^MYr~@a2XcYbY1-m~9N~Gs?!m?xSoajETtt=f@{Jy*?1o7h_*w2BMdWKJX@b zsUOR3Dt(|f!M+gC5756E*cSq5NUR-L=#uq;owTW>wm)!pA~GK5@NUYTJXQ?Z5e%pO zhP}tHzdI1Ozxj!CPOq;8$c-;3)qi&bBnLl56U3x)5(!VjrXOrgzr zYPpB`XEpRa%(;7aDgG}!hAo#>E*!Vz`t=g_eyi0jZp%epCbC{^xdFX6(r25u8p_*v zt0*@A3|nph%b$5RWw`x%FBILtZ-^}y-1JN&lrpE)mh0C$X}5~nYWgiUx6Sl-1AU)L zna3mhs$=^r#~(iGTrk`@rDc*+tJqXG)+yTb!>$t71XnaB;&}MUpTZfz(e6i`tg~or zc6OVrBb-eC6lv4<<)^iZobu7`OCM+2{s zky%Kc5z5KwzfkTX>!BYDKq|^PEnfIS6*s&zM^SDTw7IW*(xaAMIlYlKduTI-+Au!p z_rGS)E(?0C!x!N6!`(ML%i{QIyX1+TTB`^kv-uhZ_aed>+*1$1j|5j?pqrO;)gSR}gOG*TQP&*HA0Y7KLsi@O=Wc#n#%l-()3#~nr^~#Ej>*i z1Pq#PLPHmtuHuFPKUsWI(X<~*8>_weNefM#(CcPEXnF(Pv`xhwBy#X{+FYh~hxeJU z8Z@mN&v`Na7k7&AedICkU4ASrBi)neL&JdVU4C7&?7v0W-P76Q*!{6KuycbY11uWW z2EYCw#o4eQpGWJ?m(yGm#4^=kW-C+u6e;>NT5~CfU0rON8uP@LgDZH2wMwl~Vw|Q#l3v z*wd$~oC(T;sPZPr*}h-jF(CVr9lkV9{`{;t>HSf$0Vu6_Cde}xzcPiqls6LnHT^0w zCeDkQ@S+B)J!r*Pto(i?vBVe{X%lAbwo>aHI9&+efxD4%0ngC5S440xi{M@o!JQS$ zrC=Z*f5gC@mxTN55Pt7Sz2NriXVH2E_aS@${OEwGa91)jt+=;>saWAI0@;luzjwj? z9BmxbUJjod3ugQ@nkX0ZG@aUZ{P%v(?)>vQxCPnULF*k#`f)_=taz&4?sT+vG|0^e zyXM-+n3aw7Z*F}DcSOwYOuFY_7+qkDaNC95&gM;|%Kf-rzpAF4L_5R~hqtK0jHC{A9v1=DrQf zfvg=BjU};P9_I9tw+L>vXwdQ(3QT`NTHL^L(&YO~PVoJBw+65~`}HXa=lj3(?3yu5 z-7hemI|dFGpkLu5vhDl$IrHyo%KKxY@~qI-EYF@qBd&<`OAwdE5HtVrh0rDmN1RY+ z;38ECnti-wzC})M9s{ujjeN6_9_Opy5P3F-zF$qyjKw#>uQ$@}f!B{O7V7|gy^J5o zGRj(0ze~K)e&;h%4fG&gZbalim8FkXkzUuTUW@i`?sbdm^_v!*R(exLZ%(T$eF3sr zpra)8S#l$?+tcb0wX5j!U_Nt~KD#M*@E95T@}SfMORpaw@mnn~ao-iJ!I56ySEs@O zvReqPdlNKqqt`2wdVLYib7`d44fIQLv6`(fMz2rMY96)Op}F+)9ehAO9*x|~W9U1q zXBPetKj-xPQOVyk_~T%t&)Za=xAB&l(guHIC-r#^y}|E{-?#ve-5(H5#M*0vKN{ei zFtx|Y3$Tm6uAtv@D9cR!e#g@9dGx^Q=^giu@N@oeWabsQ!I-5}Rlle5meo5#M*0|h z6SsQEH+l5uRenP<^FlFtwnFtgDshN+gtWU4rJJdJhJK5lCjVW}L&y9gz7dp-JQB;i zTVk0H=b@z)w~cn?yT-VDoVF6e~)w*2;!b07cI9E3hHw|w3l zgaN&V9E78lgp8!>FHlj?oUOV?a;x{Bf0FTdnX|0ZhmAubm>+pMnlZ`9vs78nXXV#K ze+Rleqm>S*J=W7%1cFQc5202z^ti&FzfGN)V-NI}IemB)$}fzK{hr->JM1h z#xCbK@MejOW9`kzU60ldni{i1)v6 zYzG>*kLOX zaX{q@EZR37Iq$eBiqyxw=jml`KT&6bWLX4>Qz5BKfyAd@l#w<02FX~%5BR_P_18nP zZ>1WM0@`#?yT)&$?>F(MIhn)x9*-5rt_Y6VzNl}IADgdvMD7i7mhUJ2UvS)0XZe1; z8%^G)%jtijmx51Uk%Z$slx|Qs7Sq$BlsJyl#!0QiKa0NCfg?B~Q@h5~bm}PQcG`$J z>YTQSbNm`hE-19}Z%XXR4?;QL`#~u81$G(V^lxTNvX^%?WL1>Ce1-f_j+!sLJYm

!M= z*R%00;<1i#yVu~=dKF_Ydets0&QZTYpY9NS=;_p+!+zrb1&t#z!ne^5SrgDFlSjBo z=B|ZPuQ76s3e8ITTb06F!-pJ&-a%~B@IDF)BGwicIWd10Q zF|6~4=+zf2ILhlm6LRv|V7C1+F=iWeK=^;$hFODXWj*(#qii3$B?%;yM$_ z+|TM8!Bwhom8Qhio}l~s(D$*IMKbpbXZV$mVlu8EW3+|ZIuoul4$h4q=PZzN9#7M$ zgUrdY5t5y4a_cB}b=%barLMb=}~;z4mej9uitb!C@OTh+?-N`E=~dldzvazezvt(-d~m{{_3~ z|30PvCy%>T|Ce#!2(EmEJr{JH2^y*RhcN$XqfvVvJ^E-SdeNik{*-k*<~`TPE%U~j zzDxP?Gvh2W)k1c{boN;Gf6lm>f65+!?awbO*$m!D-f_<$GsWkz>^@@6I>s3{7GbYX zU(0HrEUjnJrj1(sFm3F!_(io)ZsLjid`Z4`pS~q1`5a_@6hCVpUNwfFwU-qz>ggr3 z(aVW*I=^GA4hoMK?Nzj(PM_epnsi3p$hQkEnE~XR<7lr=g{O`lXR&fy�wMJ8h;@ zdpB4JhK1m{ltG*ek6|Y;?yF!|4Mwdp(&wG%&*}UO{AB-=G1#dzZa?_|g_G9thLw|q z;&&4?Ze%?;B*>YxkMjzjel)>KAn`>$G-DtB(y|h-pr7wNlBpF?-qWIchn6O>x%P^;eB2c5%gT7mmrLoO%*znZ z3yHR^RI-Ic+j>>>e>(e|$Sc|xWJP@m?RgATY4TP>m9?Cbo2^DIuZ@8Qx8acZiaLgy zHsNV7_lN(HU+8AfKcuY0aj%mekGwaBHt$fo2mQI>6Pq@mS<&*E z)UCEqf{c}4@Dk6$=qpMr18Z3lCGy=~;#p+h@o^8+0t zMc%7Z?~RYWC*H3a(B#5^{DQI0&%>@H-&I1V*F!&HG|q(Rr(askOVCBUdOoe()UU># zQ800sR*lU)3tG;=Z|}r?o3@)r=(_*-`~^bS;m~zcT_tpF*<9!v=PGiE7lp{J8+~;# zsuuouWOMEh#~&T#%5(i`Lw@s^-Qp`y;L3rfqno|>O++*Pc*(aS6!q%wfbbiXode?) zC5wE8sx0tLP!t_+qG)!cE-OaSGoeqT==2ChPok&Fp3sKY?Pb@M_pB6cz52eu=e3Rci1vBcGg8?`vAuZnBDT?y*hYV) z58O9wqh9P9L)%7W-Rtsk3h zJS?_R`R+V)ZV0}{e(1%X<-_~m$9)H>Y@nje(? z)^bV|*9=&7ux3D;NT0WHZqNG98|D>E?&*LstZVVLT)tUNA2qBQZ~!{sn<77$;%#ML z_oDs#(EgEIyxZvKa{4@*vYaOxSCB4uxvY<$wy(o~1IE7YWhaShhl09lh`0(Lac6?M z3e>Vw=c!_W9ZQz+=V_)Ar zXw&wUVF#w1!XxuyU+nBaa@`uI2VV5aYG=V=^aGcy$zC#sl{v@p*;s#kWHpe}1utIN zIpl3;2UJ<>#~};c_Kjl|k-1dRHuf{lr8~aQKbP(Y-}Mxzxdf+%KkJ!>mpInX|Fg&Jd=>&LCvgWH^5(cBX;c>9?L~c(F)-=qZTI z)s`g_@WLn<}V>})U%5?uqw#9dYe;rctT}-O*{ed z>P>3)$7i{$n4R#&D*rxCK;Urxy_{bxS0^a7PV*g|2k+Xncku^wQ1<@2%olT>bT3&y zBnO)p3#sH_^RnLTAKY3K$-)-Ns3Kg!xl(_a_A+zEd#OkqA;o3&e z=cmNgPn(U@&Jp{^9h+7UuEmtId76~%Cpvbe$`_`*&5NJ8fzJ`a_YC8=x$GfqW4VX5 zX=CsKnKy#WUgg?uBxT0X97*RT!@*GFI0L<(HfYQ;j_s@;UbMng<5-%6cQaTD(fir` zLG(_NhAHE?mNs^3FQWIK{9iV$8DGLxl=FGg;q6tq^}O7uV~pp`pn;d%dPlTrf|j_j z>Dq3=?8B=d5%C=QpH8Rt;r}m)#s6OrS1H7mDG{s8n93?2<=iaLw0Q;bKCg^RD$c#b z&c2OW{Qp0;X*cngTt~Ttr{>fTRX){hcM+r7M4mmFjU_=ZSrdo|E6a3_I?TNc(;~6F zF@%G@v9U7;)^ItWi`gZuv9D+VXy3F!56uMKX)FeeE^k;ZAeoHJ42uOszGFg;Z=IUVTu-oXh@ejyd=0 zcexqO%Ck$^E7Pq#Uh@CR2rgAt5wQmdM%i7hYmcm&7lij?BPicg!HZ`HF7p1F0Wb0Y zi6ba&3TV?o?HW1H2UjzGm?v)Av=*Lp7`H_*z8Jwc&4f{+7`=D^TTyz@0@L%U$SZ=< zug=eX+^kgQ^eEeUGRj3sC{Hj#^At)aI2m;PWpWCL9OA`NEp&)_ zrHCiDv^h>2C$)}%BhQd4XF-ETd`E*Jn|6)IOpl>rn7!=wpBBT+eDcNVAsp|)2PBaq zP2fvsLqj$qx}9|~LXieVk%kl$@embb<_tkQ3$&d?w|j~HM;2$tPNqmTZO&1<8J&JG z*Pux?bl8R;>W!dHdy^-sYov=|_LA-Q(|Kc>qD4H0*`seYWi|7Vk$EUOtidyY^%s3y zIO84kagW+v6Lk&`xjz!=aGDXHk7oe$AEenS>2QcPv#7n#xv_VoL5DK1i;wEHfK5BY z6Sv9BURA7o9lY%JPm~ejVL6Q>f*f?Qh{EJnZ90%hXo+qIfoeVLg7dQz&y2@XQ0xQt;R( z2_DUY=OmiCHKNIkXIc!;37>&yANsvVZ{aPgr=0YvI0?@N`k5EQlS7ZMDjh$%CK-8z zJa|!xG{7wMM)4f;*|h8UuP&vW!(-liO4~dVd4TNT!QwCHC>|mYM0P0IHyWGcL2fc> zQl8$zgLB#C(w%c-F^QaWvu;NaR-?;%$Yh^*&MgSt7F}Mx$)@KABIn#X?XN& zL!4uK$$A={Wu0i8Vo^E6Ikty0kHmBA4A#q;%spdoHS#9OSc`5Pon?~)tWrtm*z)F9 z+7(lK({hgO_t>;``0=(-Zs4)vyBC%-J*X3gk`a9NaxWQr%=lIeiLWbiPLGv1f^VL} z=S+$3lI5IU`dv-G&qdDZy_^Sl$jgv;PA}~WXxBmQ8VkN`FkQfx_Dnyv86Md??c1Ze zTA9i_DA_;0(JE_wu0mTA9;5s<7}Kz5+oHLY@2lV*67kw+2M>tmVvproV*ejPHw8Cm z^R!XAsn~~QbS2ZdRdTX{VIEu^mz?CA4tTPbvw5rmV0*x>YL%R%O$%+NQhOWS)B#5E z8GgObrXAp^QFx>JOGzxxqK8;q(PKPV(^)kG+Ah&y1ZnBMiLD0CUjJByGwL@5ZAJG; zqrRIK8;Lp$yCP#B=i*E-%mBj`bW-T!^KeT|vJT_nMoY$@+Ku2W z1KV71Uh8Gw<4KRRh5PgbXB{}*elt#yb_}FzlaLOwQ>8nv#$Kn84x)<+(k^sSRy^KF zM!Gx+>6=)C7AvIBfTb`c(qpu#qc$r%18lqSL-zLAw5vR3UDQLqUjy5_K_saNHj!S) zu>G*T+|XY%u*FAk2fZw}j9^)P)ME^}11pM$TcQkH>lCi%c%vxxhUA!(@w$_sg(B0? zXFpn~m$PyBR-MB~_Ysk8<*m82xlZlAP;?(z2BzEi|K?HN?KZ}dI9F3XbPqeckq5mW zyPV`cH!@0zR}4HM`)?ZCH#-eB1KNN`_WB&GuB>CcVYPkB`o9Bzk+|(!z6qjDZoy`N z+)0n;CV59ln;W!gqIOznCw*@KM=fQQ!V$&bL5~~mCg10Xeq^MmtRj8C$X;vp-j7Es zi)JiiaZzc@UcB6;??-vVysz)V3)`dOVyu1NNxv%9Sd`PdUO69#`87%0zq}>ZtbA(U z37Yr2YqxCLJNPhfrCiNp-q%am@jg~MrAKd9IwewHL|;aaA_JYVs|&UUcG+~gq(}pT zts#P~7E6GK6XC=aQFXJOv&% zb7;!ODpZdnV+=)hPh(#di9nDU+CS1ZT5>N@J60aERXw}o-E;N7S4rGY-6q-MHXmgd?U0|9Aj=T6h-}?Z$ zpC|GD2lQj~cabcQ`sMxyxX3T}T*&&9>Yj^!WV84^7yWt;@qX#MqeRq1j}Ti5v_M?WxD&(|LHHa#erpDs|9Bs<=yvF;hY`8 zIWvN@*Jt4D@|o|s=tnj)<7=b0>G2gSd~Imzf^VzBw*z*ZTzm*gZZTK>na~>e+1mvbT{N=Xe zJK2iwB))UmKlSJo~K*gxtX z*tIG6G)%sU?G~T$8}DKN;JJ;QXV7=SXQJ{8UFcQljrS3iz%O8(B*YzdsA~ZakJB ziiT}6WYAbZ=fRl~YN8--qn`|g|fLMN39wemYNwNIWM!0z=wF`Z{ z_YK-!xna{5 zw7n0-t+ZVV-99F!AI?XsJdqUBFSMkd}{~pgK+OKl!rjyax{{tfXi3fmahvEUi>W_A5mC-Zk<<111 zx}SI!csg*A_qWjdI?iFNCo1yRP1-z1?R0PRb(^+B{NO1k^T@gX~X)RAl#*G=T>>un}SnHg?Ex;K+(!TWNS6C;He5sZ7@C&S#KV`N2 zg^`o#pUKIzri+#Hy^$Z)zL9NU7qs$*34=*5lU&#i>%U@eR zq4h*0hTHyj8Pay{cKpBo;;Z#1h~u7~9LH@Zj(e^V$9*Uk$K8E%;^BU7>-s>&aZiB$ zYy3tW_k>RV3Vv|a7MZ=OKALe|G~=HWx9v5>ZNJMnob&f{53@*#8O{QJuL{4lel$XB z<{tNJ1&qwA$h&cQSiZ?(tlIoP^2on6{=J7^Yryl*?9Fn^Gyhk1?UEC}RMyw&JU8C8 zX?A@3*HJ#hlRU1gKJ&OPwTx>&GMxN%>$v9pSo>kEbH;ud*DtZ>iNAQ(PcwcfxuNBsU$F6A&Jpbk5C6zKp5~EU<@-jOk(~V>7|E7kd?a6tMC8jzsyOGgBiZXq zHImnT{rbMJ(BBxzTKwhuv2mmt$u}8a>qwR}*fq^rm~IhUx=y!i zx9j3#nfGrtZ7u%#9q({%!1LQ2%SGz%TgTFvonLCr`LT?n#&|K3jG^NfGM?G*Ill1O z$8xjxw>g$40`ajt;Em79P2$y^b}S!wQ;p@v-hTaLs9%2$4O?dA5;W|7ZaGLbmNzUK zw#-DaXkGIfV>yF=U*XqwDm^=%QA@_s{qJ_I^2zvE*8MAfGx)uKaLuNj<1zM-sJ<<0 zY(Lp%4Vyt9^1sz)fK)Jq&A@}VV!uZu2eBEj!=ZQgD=+6nn*pdTh#F~k4ZX7wn*o|9 zSgKRl4EoV%J=~YpPuAKc^t}umIg}s2EjXeW*n!Zi#gfyW~ z3n#|?8T&c^m+_JOe=`*yw)?=j;9C|6>J8iSah*U!-Qm*4k#QvX)W9vhTOvEm+b=GJDGxqrk`xUfv>FG&g4f^zxykVWaQa3#T zd!ORxDGGZv=={v?MC^U&`~92&BDtbRcD=>9@dJEhDA)5OV*k&@vGw64m55zp>(XPt z8j^8e5Wzk}VNZ1fU>~+W>$vxE17JTl0Lr*`uotaKi9L%pZPYFUd+!z2+uvY2quj`2 zwy7g!;sYSM#}r?Q%2icNChUh>`#2|w+tK^5a-=^Wz~+P}K)>!qXBDeYC#U9poCzoD z9+~bw6?1CnRdSvu-yCOzoY-0VIk7-;#m;WkJ)})LZKhLu*H=jYmxATeFKpT|9xImO z2$qXpISar?%{7Xf83fHD3mTQ>>}k#czWZP5TAs4{rXevaG@B!68WozxlxV!1W?0eG z)6*_I$VFf3kDUKUcNN2G)W`XMKj;7b>imBm7|s);aqlwrKc4t`WKo;Uc|$K;MqGd4 z-f{az9_iCxGQ}D8sZ3|-bVB4#eBt}K2}^Xser~vsaj_*K$$-LJElA{>Ug*6+Az4pf zXK^ZG9T#bHjW&(cPWEl0?^D20Ls`hvr1adVkDbUUl4G8g&MW8SjJUc>u#X!^4Y?)f z@aT>{y_=JAvxi+Dv32R~>ipfH%%Y>{yt2ygM(6D#BYd*;xsSU)td`n7@+cwo1Z5UE zKyX=|lAA}Wk>6TDo52A4zqkFLZQ5pHKNeEX;K3euO6+lV=U*Pr<8ODy^8JkNgv;4! z)6e}*eD(XWzZj!36DtHf!5h|fPw0CFi7LLM%QwaJ=y*W*>nQzNE0RM(ghC(L9^gD` zmvgr7y=2oK5L>a2ax;(27s;t1@n)mkbQ276S*XO_=xsV#40?*?8;~quk_buh1&A@}J+m{b$em3MvHI5(~0TvSX%| zyYBmClr}|>ty0KVrbO1y*u_UldhMjwH~lh7H~f8i8!NbVl%&mc;W}z}49o>fC-Eb9 zF5>^kV?Ec96;kF5b0I9}8hu#Qjq{pP*0rm#DBXOE1J0c@gm*i0} zF?Jr!d5O9&uuq>05?Qe)Ddb^kPdHRS_w658bbFEJw z#1^!Z+J)h(7i`*n-KK4#T)`v%S+P79!BVQQ6zzk>7Ahf$iAO z@h9N1;>wEPI-z3szjbHNbA8Cr(Qy)Fa?e?%MM8*s3Zkt>?z8LD*G7;nR>+=7iR@jA z?|_WK40?Pea-Utle#zU%y)~(jmDBDFwHspo165$TK>W-*=aFZ5thn|C4c>DqTt)lP zhE2D?^%3C!&sb2!ZxHR*x1vqcCW0vFld%dW`Q%Igm0yhVcs9JUSclLwSE5bJuEXqC zXw18*ymgi~i>NJNoEFmeDsb2+z5kGiV@3o=cjR8aUVT$+bYxFDK(Er{C{s8NpfSrF zdP(7UDJ2d^(#Wi)j~Y1o$p6U!w0jiS?O9S;ixFwr#|~ z4L>aTF!I`lyBw~ETz%x=9IjS{MZV!}j?Waw)~oOO-1+j*^~jeuT%GaJ&p>u0wrse2 zr1RhPzDT~jJ`fHa6WWJwe3xgqDJGQlKJXz_c@EsugUD@4S7(3omN|Rzb54#4mEE90 z#d;bNvXL(jnFbD6Fd+Hz`jBt?@c-{a4oeji>U@*6iFl(MKZDnKth`+QB zF(1UyozM=SxpZ=IpPtJJKkEhs(;Mh>m~(IDMi?svt5&jH z?4-?2YTE;===VFs5EW5AIG23x-G^_XobNL`)VX&baYAzLjdbLntn{%0l{dH-*(HAN z-K&=kajq}=lHe-?pXg%_g)fUY67vP4O(dLq_v&88W0S(S5d?Emoa^`E&%zo&?bN`F zV5$V)Ov=)f_!dU+&5z(4ROkA#6UKG4f{yV=dUb!ujPRJ8ZazITqN9DaF~eCJ%P`Mh zRtS%&lYOxU2p8^2akAg17bYQGPw#UT!fY^HNpZ5@ixrg>AjArQ{(R^SoA&(2oRLz_ z=CQ^t_UTNsyA+cY5 z+{Y#8s(2&OgR&Qy$1JDW+`VM8VEoaikvr-CGNd50l-BE!Hh00jgW3iDi)U@xm5;EA zP%h)Sed_Pf4{Rg8I$ttf=aX}9%KoYC=m_WuS?CE4^n^y=m+EbYvnop5l40*)!}ymU zbvmXMxwwg;qS$4}LjfzUZ<6BTMZTKWeD}D+C2;X1#&|IJ2sfb^S+9GE2~VGcy-%O5 zsBjkTbp%v6g!bC2A4)n29|4|9i=Tmf^CAdK6cviVc_Zm0T-vOoO*XYx@WbErzHwgr z=1Gas-HRy3!StiJq;S!n%Mb=spVEzZn zVjgw9`Y|N+bTK&0e?^I7KFQ9{{73U={_}=4_9PqJg%-_UzF7~-Tt$g&uwGG`zu9wB z+N`3@ee_Nu0Dji=ohSUaYa`e?PHl2Z_g;M>&PZs;?srDZREA zo0{nB@I*vkmsKh5S=r0pI@BI;7W)JHuhP}~IR6(8ujCEu9?;0QuO{fUy`2BE`eT3S zBRam9yo`x^fV_2w5#2`Z{DJeQ(fz=)j&cc)SBPA#s2_qhn8*6qN%(|#wYij~JeiGIlOFdqJ$dxFWb7K|EFfdo zw7+b5ihIC+bn z-VaqTITP;t-ut0Pmv9qjMYZz@r`Yt~_f=K!{R>DT)JXa4jD^8)*A$+P;&bJ6p7}@{rFG^X8-2JQE3rh$kiT<}^ydB0sL1%_Es>mTWk|S>@?OKAY2~)a z2pM!g^OXEbPUjPig-6Qy`%ez$O!x*jX=YSh8=U_gt^D{?Pu7oaT{37ZGj7lr_I9RwJ_20i%FE!faFMUQ2tgf45nbl|gHKKI%d#~ z0T+=V%q0H(4!^fY?TvANk1qT@y8hL!JwqO$f`}hh!)u&PdyzelW9#NIYzpFAmxpiN z??rsD#ILU9srs0o)o{0ivD~VDKkl1TizNb00*@NIb9l_tEmPfAB7Sa$cT@^04@-4l zNe}mx^b)Q6p`!9|a1Dl|RPI45ga&D-%-k5zD#fQJk;?O+N;XtBPXDp0c&nW~M1@;@ zY9#-DFSh}hsodz)D$(@4vg++o>((~@UB>TqD=92GpX64QJ8#>ym$*|eO5>0J9vO~U zxownWq~r*ziaGJCMY5XB?(u2tS> zGd?p{3%wWl46YV>*GA|)v9m`ng+`BC=>1rXt9#Y$e`VW$2F3eLeEkl5y^DQf5t2@x zvf(g>oIdxU6I$j7z*U#wO%kLFVz=KRoO zj3O_6l$yuiWw&cVPFajbFMK>lQG5=+Wxj_|c)SN+U@OJ7N*U>1{LX~OdpP|^H{$o2 z63+d*JbC|O*A9`lCQ9R6=z5{oroDR9rfuaB9&dra$2m*ewek-Ko3;#7Ub;^>2U$t> zK5dWe3S-@ctR%~j(`5H8kL(JK>>y+LenW6(S&wcHYVCICFz$=iK5}UGk7_cimS?no z=h*`*RxP)+-&(QaJFhO!Y%l*#|8jeK)w5acuRc4xz5iK9d)11E+E=ak&TARvKl@*Q z{8xYA)h3RvW2MR4bz2*yzb)TowAvo~^34(ZdRC8a9KU=~Lw`;&eY%B9pt0;4RXZd#-o1J)-$QXSb{-tMey|mUQulkJh_1&A}4)>7d{TkV| za-C!0oyMT>PMPyt{Cy`@@wqYSK)eoWV`o1|l|ijad*qaQb6D$|me)RU?16D*9<)?w4mcH1-uW$$6dpWpf`__NSiv|sqMqH$Xltwom*BEJO5$Dg^xcefATwzIy1jxTmJLq z-(UXZ^2`-KfA;&&Wv1S6 zeGgtr3+hL-{!xr8%idn-{3GWm;mW_VE8bbeI94kQ`R_h$z2RnVee{MK(U-n;Tg&Dd z@qy=S_40eg|0C%ZGMO_;-s=}{j)ZGV!{#hSWdFjhwHFz@wA+o|M{MA9Sf7iQf12$MxLM)_PLK3LKd0czbxO$gKzF zPDF0aQESK_s=fW&$!o=MsQ#Q0Gw^q@n1K&&W<&LrWHo(Sy*DrUy_BQ%Pd7*K-n@{= zQVw@6vw8-3j(YWJk%)t-a7!LN|B^*Am===^1Zz#~UN<~l$pE;Y-!k7E_xF6xb^fbq z+|M)0>-;@rV(K9x$JoVg;>q5FheJik&ssrN5e zd)hJnUB|Cysga1<;)XeY>3Q=KvN3s4qH9LutN1{FfFBOIMY{f$yk+F7I8OO_#`|OT z-BjZ(T0)ug|KIUm6a0^j_rhR&yysxU5KD$3mB@HY_W#d5-n)FCZM--7dawX6|Il$| zpXtThy+`jx22UPu@r)UIyw4*4EbH`lsncJw$O@$6ItFtaSe0p1jDO!|7;LUR)cAGALza=8MJ> zn;mkaVO;a<#p^7!ym-#bFNTEr2IZ?1<*WFuWO?yb#y(DY(TSKx(6`NN$cq~P-ox)T zC@MtPP?_&t7wpdFgwEPD z@seXb@YdGHwIbWw#>w>Oi_MYSGJEiuGH%Np&JJ-VqT^41(tAWxu~0f5WyL9g_a9l) ztWj3zozUe#w_Q>9`SFK;4)W`VI0TQHBo~bhhfAQCf>GnM%*^!N#4;)1ql&6SYv)@o|mzWkYPWOmywoOsaGK_RYs_r zqo|vclDaz*wEAxRS&&4-!pq%w0QBGi(1RS7%**aK?b>YeOGN4GZntUq*Rcgtmh(X0 zC%4DsF0-xb{lSI1TAoUz@st!au4fK5`>gB52$8V9{~uW|PI%)qJ`mC5g~pqbH2L^? z(W5^IrK0i2tjQ;lQ=U_Le2=~aJ-!>yjARb(#`0%fm%H`Z(Dho#ps~nNSNOG^N{f%% z110`z-p}mXb#f&{XxT#_cl8nhYMS@ia)O#=PYO!Kcj3{ zH+D2bT7DvC?_F!roante6{t1o_A{L$4m&!!^^4(^=pxZOjcafJx6O{Whdz_HQa2vV zpH~lp!v^Adkmk!(Qnct44)_1kcmBpW73n6*K@fdCrbo!94Cn0nsP5CFSB1JFx{GnU zCsHZ)n;yKSd+^HW;S@~t60|!`%)8O?f$gBjt3_bM_f?P4wlLorTzQ{_k%YG1pHx|7+d*gG;gf z$MqbCJDWZFdDhf2Mm-u^EuXN+M@|KvjjetUxkZa!rdod^)_NY?_7FUBH}r?~54E0+ z=t0BZc0}}`(zlh)`u*Rpf1SwbKSLE7%)r3{_Cw$IDxBP=a`;^}= zeR9xVav2^PWKDgY{bs$-;GyN5u)`Jnw)qsmjb;lDq@cJgCg0431Fr}ERE_Oy{(X&K z+pYABxYR3i{^n2Z+8S~&$o+xQdB0+>O6LM3cV+N6z%l z8#mjB=P*YN*){@Whi^D9r+|Fml*@MP1eEaxt7jdpbI@XoTvoaxfD!hye?JNWkW z#)8*m=H(95IGf&(9&V=PLha<-sh@gpa5s6ZKL5&6fQ5w_R%`s-YvEd_VBs50dUZM6 zjorHG)G+5u$KNdL*4un`dN;>^YaIF?J}8FWWg3!*5)h)Q5#F z7aLm%ztPSv-UZiu;P2)pW@eGHhr~P7AlHdM` z;+#@GpX3+CN3q(HQ@`{_=tjh&p4@|p}Hgh+n(O;u%4wXkf8)b8-GV<9dn?qHR z&qmoCs*Zd%%I47I$Y-N$4o!)CHp=GE)W~O}%wv@IGB*w0nh9^M_1c*$g}!9onuCQs zJ#U>=H`SYYs|o2vcYn+SA~5@kjDM9+L6yj94rCG%EGf>(Vv{{KiF>bDKvy3OYo z`Net4!@_)D!};Buk^KvPaFo+|nojK?FZ|51hg^BBj^=FF=++UgTl#|m$6=X0RRKYm zW0^fyumL|3<9D%mn`h7bB!myJmJ7l;^hX2XIrLva*!&wo_$t4ajIb&R;Y%PYH6h&0 z=R$t*y7Ef5-pSqNV$Ki?`#j|$o~Bc~@af8P7VORK&?@9!pB_A)_lI&IeYH+&nZ& zvrW7B7WrE!n|R{)>}Y7|ulu9-=Sk1Q>{He#w)kCPJ^C82Ml?}3Iiig*dh@>Y{2ksY zjP1F@)csc7`((`%rEjy{_vU?t+)l6#d1Ky3C4>Q#` zO`*+sYPS+sFdd9LiFNJWMXW22b)2Tg?n9DsDjs5-x{3cc#;GVWPQhQxIB6-zX|tv0 zG6LODe?7l&k)E%kZ!?m5F0HQ8Y9+N*!432^hkj3{%;nL;Z@!>aG@WXeSw&1lZimdN zpKvSeTcJGW%;*ib^n;M4vy5n* z+(-X@wDt=@tCP@Vis<3 z&tJE`T3G}>(Z6<2-dV^yRv*VkzP*O@7WZ+KZ>G`jOJu`C^A2@$4r4trmE0A#@MnSV zs9odGfCLy$FSXJM@)}xV+7lMwC6_WC;Gqq zO0aeFPBONYN!Tth9!nIq26|td659#d%%e8jUkjeQ#P{CbPOclCrc=9>Z#5ZI_Ki7m zOWy*YmpN^bJ3;q{MzbI1NA}}kPLm%RJ96TF{C;T6>NDZ~7s?w8j{JUb+=%aRV<>)W zuJL=t7ftqX*|Q5wYf2NlQTn~$cF@?fo1lyA*-!pTIC?SfB<=;b@C7!{L|IdA!OyIi zL0MBS2fMk4Ie9O*MYKn^ewNxz*a51cM=7+qzK!#89%Jtt%~^c`z7DronXI%i%POBn z8?&s!FR{9($Gt9s`wp70;QpsU!F_{wl5sChf%_~Yy-49+0J6N4xI1W5ORWvuHDFyv zY;@OO;>W^cp=WGEB`Nn;`X*^`9guJ9edAiKd!^t3(!heQhB zB9C&nzJc1=;5hUW`v5qep`6JhIE-CAmwj#Yk-QEAOD;RxD0a591Mi3OBC{_4$S|id z?;L906`JPV`^;oxZsuF&<~UQEYNvU>KgiCNo=3}7EC7-tdUvM#f&Tw|B0TyM?nBEfD`g{3yx$}DiM(#^xn$R|&Zf8?&`rFeaN;da4UC?jeIOL?#Hq4SVZqBrZeF{!iTo(!RcZ(&^pNRFZ4G? z@t0`ax4@XV*UKobP0+Y+=~uB$tt7`kmPqhcDoa9i3?=K0ymgi~i>NIKFB1IF!cOV` zzKmfyoYNvWKTz@Pf-$pPQ~0{{e&SH9`1a7-^!N%CzCFsK@Ie2r!nZLcz6a1LE~iP~ zm(lk-jQaIek9UPI2sANNZ z@~5mbL1R2u2PNOBjAwpFc`F$c>m~bTKKF3cld*KnhU4LHGO{fJxu?Qm8rdRnq?hAb z$nYm}p)s<-`!cdxO5~f9kawe#u2;y{fo(=gHxHSER#UoP>R!L};g#&x;lIeMJC1GC)x85VO9P7@& zEpDkvHR{6AUTQng**E?LXLQ7@Po*s2G4g3V9?FW1eQ9hC9}mw(sy!bt#(k{``zxWx zA5Mjzbwu6Tk!2e95|C6_u*Yw=pgmdjZeiCq#{KrMW!&w&VZGf#ux}OqF-5uG!jtb7 z_s$6R^`M)jut!fUqBHs(+N`E_irhaK`ET=PWL0P|jk1WxJpS1!dA~Lw`Nvpm{FN%+ zy?mcq%Ul)eR+j%w-P^}kRbBc2=iY=6lMq4(A%qa~M2r#BNGXG8DI&ckNSFT58SiQ;e6Iwd+)W@UVAMh|0bmVwh#4;L9^65^?leJ@y0v2 z75aV3?!T)40Q62`Eh9Of0Qw{odih19`3iBs@ZZPG#e5eY$ttxySVigM;cjF9mLHOn zjPw1|xaQIVd{6ZAJ;f2KL-Qf|ehs|qJX-O7Z^cb1j?wk`e#|m+BN?FGO{Svo+0Drr zhsca1g~6Q&Rs?eCVKw;CUd9S_vfE;g;FU@y*vc|qg1k;}IQ1cTxK*Lu0v_3;Afhq2 zFDYo9oNxV*^U}-!_&7pw{okHl`xv(NTj+*dr_c<32i7dJyc=`fe2*dXpHQRCShos5 zFSrAoepAitq4~{8MtHv2Bj|Ib3(AAPN5ZeeOOB7D?cy^Mr06UwzbHN!H z&rO@CcLv^-T<6oin7_;XxD(^>*GbfbtlZ)JJ;J&YpT9E{?jvds|Em5pUI?dtfF~m7 za3{W!NZcaPISO|c$XXfc(fnOYpCs;{!sa}((W-5ElUgub7t`YN_q@yC?+)_##Nsa+ z3OQ@cV|XrK9iP7^$Yv+hH|Facg>wo|MDw>%#zSxn3xCtWaFA?vACmtc{a@jX@#@=F6>IGdCCAL_ICPSH`!+>$=K=u^SnAy*3DyO3{x z^Y1v1O*42EUHc3B$R%^GlUTEYI%B?-nqO$ls=^so{XpbwyY`wGedOBzkQ`Bljd`cS zcuy2YnKxz6Da0xHWd`5>KDqYsqV^lJu^FTEDW%Vw+>%V6I+Nq!C2{Rw%Ygb2MJPCj9 zRT!6pLbQ}8a|2taPq5|gwy@*oulfbP z5Ajlja+Nwc6&^Xt!dJ=+G8Qrp{J6fSaINQw@Cu)@QaagnisapLkvq<$e~A6>#Qqmt zkt^kV`t)-5fT=Fu8gMNIV;)y4E&PnVlYLH^OOQ`>#@m2r*Q}3qV zBix;4!nONR#@`Xv2Cn6_`0}|>)!%dCQ;Qsfg=obKCR%EpjEl%|*vLv1Uq;KRH0abD zu~`!wpX)zVxNCSKT1K->xP`mxz>`9yLDqjzon|hImeG~;8FmX__WdpL`BT;euG4Ap zaki@(b?BN=I1@`OYQA9wjKVqNnmFIUc4g-9<4jdJ^JC&X0N2g)jRuCD*ygPNAh{lXUtaJMs< zKd<+1h;e^|SQ)O5(hNTSlk%~oIYRoX6JNMw?_eAc6xF3U#?%+=F7&S%#eR7$sk4H2 zcyN5gD!&8-@n!Zb7O9hHUyE%7GCNsep1~7lnH>Sy{|2-OTOru`X9wyRkq8>%U=r zJN4r{5sg*l#SCgRg5w~T^-P6z254k1Hjhu~Q$wFJ?s^8U{y^|?4vgzg_2&Rjb6G#q z{rHad=PO?3?VjY9+wzH8Z31#@;L8MGz+dJhawq;ef}E2>jy`^DcPMO2dBVJo$XANi zY3Ac}Wb(biQ#^lq(5XMIu!XN9@>B_Zj&gUCsa|0b*q(fyJP>QG+EJQ0_l%Q@zmhAK zk*sRq$y#XSRQ-_1j8N_wC;R{LbI&-*{wLTpuvObW*Z&jSxetF^bnY1^xxm8nkBNSE z3Vz(Du-(ho=aZYpoPSLEyg{FZ+)W>_fx-Fz;S>F9d?z&Xx;V9elp2Gwp3ZSuRlQ;K zu3$#hF=zYO|A)H4beFBJO$ug6>*JI|{Q7qMt7O4*;(syF{T_?3PtT0X^C#a~Ys!LW zarAMD&q@71LH8uzUlo<-Px_pr&wB38?BDk=W9v26Kd#ehk#cvR$B?__9%I%_^7P>w zO0cEXlW(V?k3G+j^!2g(kJQAeDd{A~fuK63cGN%DYZWR-RQ5ZOqq2zli+%bwzPLc4 z%Hz9xqq5&sL!tE|dE7mVoqOv4;IjeO2Cge;k+?3p4P5)Eb0MpN$6wt3h+XiP-u^f}1g)t*ZaF+aR& z)f`-(rJ2XmHn%aJrn?z8K4Uzg?+paBO&L!YMVpU1o*q$1E-D-KbNwVXY9Fg>^myvS z%I7`78C|65O=!MEAz8$?X0W(yJKR-Zx)*#`)^NUqChK#k zHe8PK6=P+%44&QVLOQ$#&jeea{*v;eget`Kky-azysO2(;N(P?FTKn`XtUZx) zVILKzBYD>gPu3}9Yx(x6%$3o+dyhUVxm$vNVJ&!0{+m@R;+pKo7Rw(Bx0P+)$9ihm z1%gd#avOFlzem{S%#eI@Je>n+P$*w?8#ck;DwIz~p-ey?Q%or3mo*^J6w0gp%s{N> zW|Y!r8GU@*ZE|gUkUbtS-p;j%X6RP|-)CsQu8%D3!oxl?Lx>+xe1P*)C!tL)n20w; z)%gqA<$e0pYvOCdE*B0MTpXeBK?w%by{a3_SzD!LNt}AJRFM0b5$`(j*l$iEv#CI(t zUhJPfGR?|fRjJyqAd{zWN4f0NW!3PfnV-Ki#skowNC`yz>Q9@+$Ap8$;T7F3aWgXTn{xVi#nTw+(`4hOj zjk^np2ROExbr4!S!gU$#i)pdSPm2sii^O8hF20#RlolU*B5Co4SUYjo9Wth0=p*~0 zjOr`;HZ0&ieIHMl=Pp5>A4QAJ;LKLE$OLUOqduAz57Q@+yC>PJfBk#Ndn0ijTo=*| zxev%iX#W=dNZG@ERB;n|F!C8^+s9LNM(7cc0g()mfpN&d8ihN2w9mXI?xR@#efms4 z?sSE_C?@U~MV5l2T_}+VrrlWnPV9f?t(dqkqFF!5-70Xuxr%(-hsZs~bt%n=t(ZVG zeA@0l{b*P;3uBAut#~mZ=>NnVe{WYT6p)Jou#)R}?shFcH80~!1EEQdW z=E=8RAx_5}g(Ks*mR{xD9q=actyda|3x9w*Cp1HL_xNr4(ir17$t&7iANh2JZ+nLi zR_4^FIgI+}&$v>w@;zgMl58b&ZD3rJ#Wpyub85}Dk*XMG+B}WLT%>4|4|*r_fO(t?Z8p#+gS#KFXZ_y&0`Y&OtEzE3Q&)|4O{EZ zY|QcF&sO-$W8!ZR88gX78l&Mw_UxG%2jPB9{FlMLgS$%v{|e@N2l%SG_iNZAE)E>O-KPhc#*_L*_!xL04RP6Ag1&j`zDU5o9e z;F)94`Fbx*2&<6LN90`}w0HX80{X=YwHJRypN7w&m%UzjLtgNGiR8(VIr@~U#9;7I zDkR8nW$@@N-@jd8_lED1c=SFz?|FP*(r1TM&ZS}h>ETd}RXe-%XtX?efPa5wf5~^* zNBIdc{P$V4tu%Oh=TDj*x8*9nR@7DQ?-@R|dD`CT`-H1`@U@ZXvq|vPs7~KUTtk84 zD?GIo+Kb@p!T#cxE8%pp{YG1feKNni)}QLgh0~YZ8H&TpJ>wjA@U=-EDmGW9IwsX; zG-zLbhBKiOmu9t(JJct+sTHr2Jq7BT=$WLhnkTnz;vdL+tM`xReTu8~Lav|pr`TDTu~k;__wR2Y3t1o8(VQ1@O?*G-?z>RD zN6ro#ysC5GWc!Lb4|+wv`SK**Uw-*r&WMsVOL6aaNXh$N#v%HKQ~*q*MQrsf{SV8&WbKDUsi@7{v|V-52D~>HPU1Z(Qw>e)DbSGj3D2DCpq-vBEk`33?qaLQ$tf!9Bku`e?4h*zk1EgA3)M9RR6 zbL@9@S|?pQKV_8sh;f59iToY)3hxL>G`edbY0AKE~j!BO=&XnV;)AM02s;$kg{ zSLdmZ9EV1oCw#(8u2Re($7IS&KJ;soHi$UrdC3FIf(Y*;OTXu&Nw6f?d^W zem!rBB$r=od}}xFNK=?vSr2xp82f;ie}wHEr+b9ktO4BJ2BuJ4m8Q)FOFFnNF2mPP z3*qXe<_DO-<8g=M$-X9@ok2WV3eRV{iwN^xY8=GEvndKsFLYlQ#Pcv;Tp1J3Y5J|; zZczlDbzs==&-h8WCenru?^nKY?Ciazb!Gc{$&Mm*aC+J0%yJZRJ@1n`IK8Z=dxsk< zQyk^HN7n~5^fXN>#)u9OsuydWJY_4=_-t5JoL(vbTnpoi8dq!Z-le zwkV97K`|#L#!mXIYnR^dvDAMX!r4aM16V%IO&5`Jj(Hz z{v(BRXOvYu3a*G}EHvB5FYCdOu5hM-eCJe!rIN3eT?(^x zCw;`5RlwbIUf;K@S{YalaNSBXV@dL3d2JxEI8-ILSMLprBkENZ0ODgAa0y=||9=8} zxkF)*{QqW4RlcefZiQq}ewog9zDJe0KFp7>`#iMR*ed+kI{U@P81SD1N#=UPrrx#U#`<>=J9l&A4^KVc|0u%!_uRl#0wpdFU%tf z%SmN5f2O|xmL4LzV#!=Z6qaZC&Ju;Ch_96>#pt)84x7fk%Wxo_H zH71s9`ZRL);r=AP|I}{m{6(C*p&34`e^h6urLy+y?Q+tnm)u6;&69o6fQLSm|D>4IeD2YQIL~JG?uO3K;u|5}o^hadvp>^MaGt+MopTQN?e0)|@iKJ8 z(8;-K`W=M>i;1^KU*h|*15KV?Iq}|ue#`fAS0kpP3{2(V>->gQJ4Oq)TYB{(&nUGf zy{}@;b9PSv>FKcD>>mAT*7x`_m!kOeG)OGW1>eO6?;*2Jq|8|yJw!`{^GSYL%2#^W z&Bi+NG7^~w%`zu_=Fq2|ySqL2@$Cm+V6NkOX<;Oex7-GfHGUi#^0wN9qwJbEUSe0i zmkJC9j?b_odx&s|g+uCMhU87YaR^FpRXCpDOKmZ6^wMV?cc*)H^6gpRn9sF>wwARh zar;5h{ce(d`LPb_1uCy08sKecfOnw*e#el(h3*`s^$xoxDXz_Ri%eGT8SBWY*X*~* zc{jC>WcQJD zo zdl8mi!JZyYEr%+g_ULa>%Pu~TW+`NEVaZxSR-=%;6jlAs3bIlYkK~sp`RaUyEVsXh z>T{7%IMU})`lNC9efGXj%wzu_Y>#rimlla_$Y)@y^BLH7`9j#lN*8Q1OxPA*6Wdv3 zy)O1+D^b{H#Kd+)t>p< z$CI|bp=omCcNT57-M*_3kN#tG$4SkwLv!{|wwKy(urpu%jB|+Z67|s4?YS*43kD_^K8ZyqtyweClGNwaoT|WqCCg>TcX>n^ZfoCf|O& zy$26~%ynPelkJ#TZ+%{~&kmdI=k6)1nYBB?UUn$ap2UoQBQyRh?#XqfdyPzz$*=Zc z@1;$XOp=q82fv4vx11FrP1RHyq3WAl*5AHrsmMAj+Q<%-;dXLxx5e1KDGKi1pm~tj zT6)U)|ItX8od55k!h!Uh5PVK5xQ&MDpG4Hlli!vyEY0k_ zt<;n)Hu>KQgB6>5Id%4-qPST9=}_E3Oh6Ac;cw%7TcFBwqHV{k=}_%#1>Q zy>sANv?e!1aq{FMXkEw;_2 zdY{Xnb-th07s*z~oE($ZTd99-rnL(T_A!^xdV>pI#H4jAeIDX&1+;ElWzc%-9L_*L z$NIoEk7i!$77iF|-7tB2N7eIQPDch}?R%&Sf308SnmDTZK`XqAqXaRlhi}hPpntsvmSbzFO zpXtHU{6qEI)+imW@*45^%#K6}^yrn)BMo{aD0*b`ggHi4bdg=A_&&TxH#NAl~FU=<3*gvAjah*jA&?8#E?cv0qfj`-gf2kk;|4hG~A3>XL zVjSY@{yoG?3vFJ+k07-9TWs%cJRGrTlM+RnHQ?41ZLa!yh(M1;n`QLzakr^|+t-L6 zd=~wmYZ1+?-@<>7L66NILmuAq1oT_#9i@jf+?@JYI<7_OxXH5(UB5>^5vJjGgYsH5 z*oR#UCHyq_Ep}};8d)qFJc-nVG>kMyH>*nzx$#f&{3cLJjp9i8=%)T(5B2{T{ry{K zVGlxs8C>&eA|FZHcPl;rI`($N15t)OXslm7RQxe?sY`A{mztye66JNGE0&V!%~G4w zsMTT#E|-YPoaf03kjmexB+`nnap$u7Bn4x+yUDF{Ejr}lsp!$`u!}{D{A)#rEj(c! zC!$v+n&@DK4y(X;*(>8@*o)sHD(+tTETNB!yA7VlzRF(6Gt7BhvuR&Mhm&rD4n)VQ zx#BHl3kRqmG?3GD@#t{W6-kGjYtmsS_HYmRs0=#vV-I&z;UJn0-DJiLmj}V#TriO-wH<;4-h>Srn@b38Ty0~@zg`ScfRtXNW@b>dz<{J zp)+?=<3Kdn_&PJ+J7DO{M*hFcD*s&Dj{qV1d*wC!dW*N9<6X-QT7Eo(y0=de55#pHty0a|fpZI?Ts2~GbQ1|DdRsU7bVP46 zGSvj~wXj;K92cRSMBP+4Km&^t``Sa^S(%AG!1lE`KGk2s_C+dqB2ts<)@Md|EST$& ziK#^Q%evEz*C^U!(XDTzPXTw&VfzmJ8Mzmq#4pbE37W(`iMO()>DvRD+n2sPk~-z9 zEL!mtTZQd=?!`}idmwxJD);=Se(bg0b^<@#kG+X^^|)7aP5MrcyZWgoyw;5}ju+Wa z;Gyerud%Fl&wffqbI?&Y^tPivs2M9mDzZT>B-Xypru{+iy?sJgy*`7_?> z8;$Dh-}Kj5pYi=?yfZgC<`vlfJTSNX$f%&+O;$K8i=b_HTeZb@eb64nTJ=z=>_Ru^ zU@f+E>jHIVR(y3-a6+ z`s8r8(|7JG_^-Ai^IRXLiOeT%|Mw=*>t#QGieq+6uXQ(`=3A8f*Qpr)9;liI&(h#; zl0`h$XWZ#<^OqZY@YqhL8oeg-vO9zODHcmj9&}GEI^)i?XjF4Zs5#@#vP|cGw&jfX z&o-QK=fJC6nvwyoiszOoj$5#hX7%5yXlhtVnPx}i{24o@ zac=F$=sjFZXhVvodsTeIxZvIs>yo0}-r-%9)wW#XK_wOx%H;~xa-SPlf5tm;1LLB9 zMnRX}KKQ>`SGe*IsVf|_(r#|ty24%LI+Z%YlN7Ipey$Z8m4um-J!jN@*85bf3)J9+ z@&nbjE@m&IE^r=PoymTtIl3;C-$N{8sj+*8$kBA$hy4E|`16X3dD+{oS19}bQ{AdK zoy~j9^%2tHn8d||IGwI4_OjaJCwbcSA{Bd?N%`|u-Z)?F)th5Sk4_~Y#1mHS9MU@7AaD zo@nkJG3|8+xR=ccY#cnOxR=47U*e6`%Ciw(zvD_J>)#gY;c{I^t2`XaiNKty&NOsO zb&KImmE^wIJti!^u3%5zZen)Ph8DU*IT5<3aTwoE(@ox47xT|~?9LJ3`YE<|7a0yB zSN9Rf&O#F|`K1vFoWmYIGY@F2QGOcfvztDZ+`ZVJ4xTcw9s3gTpENVJlj;;Ynw%$p zkL2#i!`CUE`VqvS3#N0fa7-JniD?*Hx|^!!2BvQ)OtSlL)>!20&cVUZNRwaY@V)KW z(pbzs#s_n*d5=Zw#G zi3#Ppk{04uQc<^_sQ9(h^QXtJ4OHZd7Ut-(z-6aXmFyVkbgNJOxg7l8^q$174UWUpB@KyxrKv;rV%%P zz~ErxKsW~<@(BmuSM}cw4(3F0FkA=S<%#CtvmRsJO80Z{L#*#E&N#&4;G>Mg=n>Zp z$JTfZ4!+NySMf&4QEaYHxW1Hq?hUMgT!$tbc6TVpf_PIT&q8Q-yNmoNvTG)>v0`2S z)OY$&R*hJS&v>(`G4bxt=HKE!+xkyD_H9bvxQw1$N8 z2IB=LrvQDhmJ^WS6^a7uc*3mNB|z&>pl^r*^2sW;c-CHXD)p@ zxhwhqSAc!a-$4tm!zF(R9bQ*f^B+Qo#a@Y-k~nsug;1g_iW14-iWf)JP5i%5BH2%g zF09Ee-N(8YO^LNpln^~?wW7o-u$IQ8#9Q=P%H3?=Jz!tAo}4gTeZ^MorzRa&M;C(6 zXtp5@7Y;EiccKmcsg3DQ_GzQ_pZh$6HPPqI=ls<)wYLu3%WRwCS1L&D!Usq^<36CGgm{pQ5QQ8ku!Cdeq^u< z>yvpAZ-K0}X^N`_yeFEg2jF#Ro(gbPW903`TOfMxWjMNnH!e~3wb|G6egS8H{+5_{ zt~Q$Rb&PH3uw+EYZm>84yK~`O7CW?ylODp$%YSlqXQ$q)>`pT;pLH3$yclM8ipC_o z%nI_dlbFYA@v=M0?(EV(@O9z07U~?yK;X-~18S;RB3@fQ4EU3!6Mn)0F)G-$OJJJDlgyXYcDdNL!xh{x{4%Mt%vBC91n z6nwi_+ar+RLs;9LoN+LF`ggoK0{NV6%2PN3iXT8)r{hEE;&eRLIQs+UvzOB6S^AW4 z_hZkcNz6&$c5r=`7L7aEkNb6ZY}^ia0QW(6IBqMxBf-7Jg!|rW;_hTen$h9M{UwEa zP7K_g`U%ck8r+tMH^Ec_rlaggGXpTEZ-|LIlRggauJK&WBX;!>ax`+?Nek?Cy-d!B zF+{@5*gI40Z`{6bHklui9~YeqEMzql8Fxx&rd^gzJ`lX%1`cnEL5;1r3!%a8{*iJ1x&A)OL|$ zpcon@5l1N_NGLR&IJ*+lZdoXLvP*wM&e*4%2i-J3g_0G8@^~sde)52#PaQI5mZqy< zu9Y+PDaU!ov#ckJi3XfuO9}cl4ElKJx0$C;KRWP?7PoT>Q|^choMv8SMcsyC@fQAvP4=LUp6iDC44i9Ns@O!O%U z?k1vlg6(=mpM!2gg4W|hKq`1@V(=-+f-X{!7Db!gmmk!Jy&J1h#alYLI+{5bXs zRxL5{@E?2@Mo6;4m&X%kEnQZrI+M*Uzg&f2;7|u zwx%3>mS9=Qbw16EW3?YgrotgnZCzCH4aIYI>StM#;@KNrWIYiiXH^t{uKx?(gHCGe z#zHb93dt_MvO*!5(%;EB%t*^ikZh$-9(Q{OKOWEC|HJHoaD9ws&hOf(TL&e-E1J4b zGCXy%o*Ma*^Y?TT^A*bQ+R2&l_~#X5rQOC?`> zl9+zFQoh~8dGszk03tV$dWrOzL!Wl;?)KdWrh^Z0 z{*UXWtXTNo^bh`Irc|L?JwnM`wdV_$U}=#66A6GkvO~((>bofs!FU@$;8yfxhGi@KL%d|7OzO?V+vml zPld<)Nsg9IJyXVS@GOn|vW{<0;T@?dC*WctC{7YBSZP`lrQZtrU2=7zhZ%Wi4vb|# z9Gr`|R?;GIwhtIMn^h*gPS#Tcqor6@`EY!ic;<}h-p8Kl@j5RDv)nsJV=ntRwn^{@n z&-+V=uE10hT1WT4#mM5ZDu-nCR7xYi4HI1*FN*q5H3*UNC*{_i+*f#nUaQEDR zZw&Rwz;J-;R+_~7MCMjzB>1shRc8`%RbF2B%}N%^mLPIi%{Q8@_K+{8lZa`_e{>#X ztNqAkDrAdzD!dwiDgE;3_ZoMp!yL+g z1h(A|vKC}u^Uxw?u}0Mcz(P{H{n(oRdH}M%wFYHz%-Ih1IpeQyvhE1yE@8LII6IBq z+QI4?YbCf3=`_pXF*tG$JNuo)OJhMtoj8y&_6mLGayJQV)4*}Dn))1jCJ6pk8jhy<=yIAs4nS^_V_J+lPf%Qs%c5=HCqbrSy{ zOY^9rkCVGEa(4Y8zI`<~^0_7|96wj*^8d^MzsZoN6;|8+tX`W+JX8@gsOMNg2l*ls zZO4ipgd)ehAB`^}_v!@OSJgS05m*TwScQ>QZ3}`~tN2%a#Ge(5Bo=|N5`@3wbbbe( zU2|RA1Fu%{4ckno^D7-4x(5zyR__o=+ystU5hwJ|U66jQ^m~ZA75&eqlbijoI0L~o zpH_J|gmI}y;RD;7Zo%dm2w+?>!n6XQfJ*6$?i!G(N^A97IhIe@>s{zJvsFj`6dPcMDe zahLP|Y4CbAHUZZPT7V{j^FRhY#2Q3H^3y=`(_mqc256#{Q}@I_^B%CNEu4LqGw;x+ zVbY<5nhG*j6mRI{R8u5(_=gJY(<}4Sk6M9?<@K|#uMgqAc9njqR$GjU*gQW z=qp$$%x;nAVLCY7itnDgFS#F0<*d~Ah)v>}P76O1G^Gv;P@Q>aK6M##aXfhDeXK)H z1m)DnJM&(2sGKM!$u+CtiIS{Ylh6gTpu}3g7p{Yt@N3QCwF5GTOa6bMf(!exgB%C3 zsIUpyF-u7=++Q~!bKhEUPK`;0_vo{dyDI;G3jViMoQvX`Oj~>SfSUW{Oi<}Q_2O?VhEa-AweR{*#LmfnH$hfo?{^xM}RD5AXY3EpqtMeK(qH85mwugvW;*%e+ zk`ExCit;zwn&y?+3KShi*fNfF?46_)K}uMxSo(?!%U@zK$~@4^R)6 z>y>1oMV+Dr^>Fugu-=Q_-$4{G{fmNiVD@>ERUjs zJa?Qv%eb5CS_$qeE7||#dTFFFF38njyH0XJ?Ca2Hsr&&Q_`*dVI`rv-1&4Cj(G18$ z6A-q3uR4ajRM6)Hcei5S&wxIe(8`sB{zEg5 zoerwu%Gl}950Z~SXm!A+6^GO6b+u;yPsYy>D`R|mtX1@2<-n>Gdc4Egd2|AviKfT0 z81y)TL@ZMDr~+|XOnSUTpWC^66}z$w8qB$$8ki%fHAoBB2Ro?tBl=*6{+`#+2Pd%d z)CTJ(iblBY8hhWJWJ`BlpbuCqVEIdY03GBwjMfM{h&nZEgdLn_LHox-?j-I^_!6xV zcIdO|)5_glzU6%T>nq5c$dyD15jfuT8aS%`I1;fID@`~`uZiOzmVO5j7zU0{u@s?g zOdQWf;qZyyN#WSSm(GoeqnkdDaCe%ohHsw&j=5aRX~sUGkwq)nky97gAI!8%&H4^v zH4S7}l*O3Ykh`ZtUlEoWtV35(&T;&ncpen8BiO*gp{Erx+3Aeb*^z}PWLx;^9EB{4 zZ*Rq$XpYmcFiWkaPZD=eVgDXUz~B8{_CUEVrhO5%bIOA3z@skGm)Ve5jGTmJ3hUs+ zBT^F=+i5If`uMTsD{N9-CmP#p@G{&UVYTPG8?cDcJ6s+3jbdT5(q|WUmxHa_O3cN5 ztjk>2(E@qTgl8SpnKQUlMpS>YBhx;reyrWVv(1D@MLEaCvr^$Xr|jTM`Wx869q2r< z@Ki}(k{>b4JzA6VY|pqxYqA?rvYWZYEmaFbS^J?b7~=Ug zI;f5JF$w#o1_$TG2a~hRCIqQ#_)0JW=G)*eylq zvJu$Zu?nw+y;@=SU~dZcpJP$CbHX7OcBzXQl3>C0nyZ7%_=4RAy5yMH571{icZd6P z!Rx%2HSzZr?KRpLjm@lnL!yr*DjORN;9E3s}~O4GPb*G4Z5F z;aSVKCMi5tzW4<5Qsf%ZPOJyIvA08igSvFJU`PUwjq4QJ(BXRKoWxYA^_b6b?R@fV{Xa)NZ74{mQ2tPr(<5fYC6@{Hj4|ZWq3{|)?PS)umb|}07QYViU>ovoRmih>!o1>Gz_3%s zO_(RW9h+A46`6InflW?gMa%og^x4E+>M*^1@G|r5GS*tIx6|Z5W8E?G*(N(eXU4IP z{kRhq?)}~`r5f1Pna?2h*%J-j@CkH_@RhcmnoKvb=Tqj-urKvoL)F_m$p0_6UvnF{ zx8lQS*LU+oxSxFoe)e`fHwyPAFl8#-=^#7HJYcTgE>EqYkH+2O5&Qd%pIWqwe__?` z<2si%1pluUE7Gnv4@#^^I~g@3wx*qP&PLoryMAg=BAm6_f<{a2bXAYIa(DJ2YcShK zL0Hz4LiFb>Pk5}zn6vFf*Hf4^%=6+yAhGfe*@r{>bGH+tD075) zAFdsbRtM4X?Zjb*_ThH?&Y~S(N{%e91vJS|B6YO<9m>6#E7+nP;#vl1@jZi-z2NTY-CmT#5?pC z-T!r=opUkbnfaZ5340Qx>Z$R;rzDEb3I(=BWHprEJosETyWau-CJ@f@XEhAX0O#;- zw&UyR&>!M{h40xm{!JNE61UOLF0;gKv=i?KB|X}X(e<3+ zAg+P&=?=v;w37kyT4T8ap8$QZn-jpfMd94Z6Xumafp5)@h-+v^qS+n6Cm`d9d6_xT zto6uKR8rEfxP&vTf4{Y8HZb19^>*4?=Kjcipx8N&+o>}rIl$VfHZw_~Kdq|jeR0lX zG?s|GXYG0^UiWx=9C!oCYfm32k_JUS#RhHH(|96skE30G))Xm_23jBXN{*aI?XAy#NRk%i|UG^xWb7n=OFqREyRdGgw+{b}CuXs3=+yM7tl6n$xs zaT&F1Ei_p|9~XBU1|Iv3MXUWL_BhvU+O=qMO6AFGr=E^MlVd7Ro>0WXjz*$|>~Lx3 z9MI3Wk~2bCpd~vt=hz#J?AXlaQB=v0Y-Xmonfzef40a+GJ2rx!D(A6l+rSu0Qr06s z#vEznKgKda0@f<3JOu8Fm{g&r9rluFDeQd=S+t{zty(TujTWE__R<&R3rEuYbaHwO zI!*Zl^C`BIo87!W*rqZ7#v42B;j+p+$ICS(C45;K}6ZTPd=@q02y z_0NG08PMbtuSM&m1!N=^P0IZ=X;pj5taOSdrm>SAHFjplA3L+7sPc&Hea0O-7uiW} z*B|jy#ippTJO)+Tu;I;Pr;Q3%tOxA0w^RL(Q5a*Vy-hEt&oS<9!OuWl4{hcma-I!X zv=g-OeGhaYx1l#JRy6}e4nuKRZFm6U`|MZ&u$!3yl(*z}`bSugZTh7c)?*vhKg?QE z8~&_z>dv0x`G@$zxA-2WHJ>4_h zw4;Jk3CA`|uIK`Ln|{uB*8;7$&_1fD$bRZ#g06UP5=2kTz88%p|;Kvm|m1T_q8r+jmnZ2>)8-a@KQ;-_-u ztLt$Wqr)}y4^qWZs8Qy%R9sJ1<2Ee9PPD8W7cBN$r1Ce5t+a~2E1ipXQ4KJGlK|IqdT&;gPjr>5tLC~zIPi~BlXC`?LUL}t{;cq#VIX05`GT?4 z8MeQhyXQe@v)f{mzm--6I8drMP{5OU^sgdMA+rwD>UD`0ROdD(1w+h@Iq>J zvMnbV+26#OfHvLgli&Az-)c)nF69hPo4yhm9`1h)JEu*_^TmF+;7Zt?681HTw!RD6 z+i#_Yc(B`T(eAyA_!zG2t}B1ZkkT3LO`{s;eIv;)`F}rGqGd#;N2{EWVD|phcWV3B z7VI5fTcAy?UY1fTv0lS-a~dmW`C?k zi{lgh19rUI^pBaxCkCIBvlJHSvRYzJl&vf}g2l1lJ8C;K0Mx&?|0sM|2F-F=Au3I0 zDdb6!=PK@|;YUzJ;(yap3_4c1EZX8KVg)YX52s1qsj)AAV^JcsyHRDVA8QwRY}4Ok zze=(`jw-UkD}jd&tC6@HG93e8-D6Lxu&=} zy@8kjB1()%lA;%W>iZAQm8X->E@NZUyuIWklV6Ie^A3G;V0`KWWP6hsg5nnfQ3Or% z#v-?E?7~mN7EeOfh01AAIrD5Ke_F&Yd~vW+q|}!TMYFL2cs{xy<75#OwnE{;-^(-8 zcqS+K%m}ifTcOh%5qs!udN!16B=;So$6LGOLuh)7ni-7RzimVhE?h8*%%llc>W>sZxNL&T=buxbyECUo zss*!-sThM1{urIT_%VfAd%amuc7)BUXnUh%5&RpiXhgA4{28(*?&a*hLFdHp@AYQ$ zThd0kx0h3-#_!3{KV{>Qc~TEn)}%6(5n%*#icpui^h;xv3?Crcpd?5sp>9zXRXP1L zay}6XA0okRRKgaW?i2k7#3;0J=CK%0K&X1>u2<8bRFa9l)PbLjv>=yP+;2nSVb+qP z#26L?{QwfPSAM;b*0_Fd1+4!2|Tk>kMpPSe)@wx2!oaaT-C+(DV zvJH!DMnN08rDeZ4+tUpztot9h|G>(>T9L5-!<8!^_{oaI{Y4L4STSP%4fl`S|C9TZ z_g}dGy8Sn-OxgceD<62(QuOw3zx$8ha%tC(nR};8%h)lbjn;oHze%>I84=l@4%qJY zoc}xft8(Ti@SLUJ8Z+NhVcdLANSI4107HAPQ~+j9^3G;mE>&x=rZVVR>hY8kB_h6> zDg7NP2is^hzYlO$p7G_Xw9SI2OIT&wIL#*Owao7|YK6QxxWXFib$L~2z0PoBVI|Y9 zJE8Kj$!d`VA5T#M8yUfZ9*x8-!>7R5Qg6!(zAe#qiu^%sWJ@%!*P}TdZM?IMeH2-F zcJl7gwtD~nja54^MiXt_oW^d`r_%G3tBr`aSxJWctog{IRoJK%!?pF?wZ`aT&$Y)W zGd^-%VEf-5qa_38F}lqa7^CkK6Bd7rE_4OP=zGc%{Y2kSm4Y_*gkz1-X^h-ZT$M3K zYq7q|T{1>Xp`lBm4Ny2RMz_!>hr68;GoNVC_}qsUZD6id+rf1mO?-7XdsC=;Ke6%8 z@1DAMv(ig{I5(R)UDi~Y@g}JiIjeM(sMlU6F(7}j z7MrsVORwYfTE_j{ft^+(_i(k-qBY*h`5>;B`b56vT*hh=U|rvjB(Tr8q2s9l5z6~DA$Ew$=9e1^5uDqarklk%Xi+3--`P!;e{ zGgJF|gVs{3zK*}~-B2O9UnKIRvPnPDUm&)>m6cVV&k4RwtR>{Xuc*>;f8x58Ci1g`-TIOB*&rKd%c;xVXP|4-k5Cz?%$8ywS)Xc8X%KB$ zERs=&+H|Kk1+R6RUN<<3d9IC`jiVI)a(7ByYB1A0baxw>(Nk2FAjZ4fpi{OxLss<1 z#ikp{JoD10mYZ)@lsbmjeM)c#N_OhXgQvOCX~RqXmHdCLtgW|IX9fSIM4N-tfUQKG z-sZPjWQR|Fc?fy2dS4>vYpcGRKVKPO1yxZ2q1-ngUO3rh(LSNp$84?zw4uW?cikE_ zceU!v`=?QjNh~$dqaR#$T}_Ir$CPSMu8|q*4G#zvHJ`vr9fJ)Ku+--Jj1{z%%C7Ox zKcaUIXeEZ0TMlBe)2I`1!|OjG0!wM*Lb~cA8ewN*yIUqXTb4y{({~ucG<` zD`+BGt~}?Y&!gO(;a+jY7&iwx+0%mti?~ju$$y4@`%g{tz9A7I*Ma))=N6C^VD|R0 z_OXp(smXo-wz*3y9%Ik6rxne*Yiy%XCZl$=s&|miI9u#Zuodl=9Wa@z_Isz^lrUL) zUYl%TM|{D=R(8${uvFUch>GpiO5EM}Fe^8mI}P}4q#9K#5e~Oimj?MR5>^~0Vd(<~ z<(_pL_SgN2a_e{|JX?U+U#yErkU=?-75U{JXfn`$47z#x;e$(dkkW(x1hKzb@$`r` zz}RugzUaoRSLS`V;o#Tq`7A-rJF7a_Cz!=L$n|o2V17^9Ue~nn8$>OAF)XZ(4p|@Z zG}erM9$#aP_!>uXmO#dztdOn5ZORJSN*undkgeovcrR(->u%yrYDsRf#*D4OQ0w?(dwt@=KtjhR=+t4Lz4 z=QTB=hxzlfymPUivf;T+j}M2|#`jx^?YN!XC|qaL0`y!Ilb+{X3-BH89o}3p?&Wc4 z8QJ#n_IrjcPSG>a(F)S+iG>-Wk7#4$|CX=amHwOL^y_vcJJO+VO6{59f}r*x)V+c} zAk@wBQ+LQ~PJLzNr&{f=rF&$WsQcZkLfvF2aT;Bxl|3SZy5GT9(5gSI z-WE+=pQ5^%x{ILbdAC8``TV)pEz)4fZD=1=bdN{$5PjFSuopDbswHtf*DPzv{-%kq zEDH0-7?ww?J`LKYDSym(X4VInjaB}b%=)aInKk3=+02|ztMfOBet%4lcf7x*L(cMO ze@vjJgO!>N6R7Es=&$LJ>#ymMgbyYeAB@-<{M-0oWM4kugBkYwU|K;L-@e0F<2LNO zR`g(tEfuN#H!A+MQd=j|zDt$a5`R+2zDvai)5_gYQ^s8n z`}A3t4BA?u6&ExGk_6hbDa&F|lJOe*W zuKh-v(7ct%hQ&zpB-W{X-VY@?(VDW4*`wZl&>(+ts>bhm1P^9`tf&B-Y$5L^l znls8(CZmdA4kROU(oI!?%t>v!!{xj%X-^W~hOw{aK;e4_zKmz2jNORe>fS4DXxr&F z9V*tkWaSbbps}iX*QmKzIf z*Pm_Gn~2UUHEGf%Rm21;|6Usz!DYwqzZ5d^>hX?q1&xrA z&D+e`@Xoy<@iP&oNg2<+~*rv zpTt)V_s^^P-T0AvrrC}z`k$%B(ia~MZb#Qc*=r+ zH&lG6o4?LPR*{ zGlOp$;n%U3Gtxt2y4oi1Yo(q=tG;NkhA&IE9p?W(y&x0`bphOUUIX`xDBP7n+)ixPV$2#(HHeT5&l z6FXPFQndCK_JqxO*mt}t_vT07ZlS6cz8Z!5c930Fc5cWfIe#J1(Ly#$W*zSOJiA|z z|9Jc&ZgKSXw{q$ zx|7)Lxj$p}?;qP($$Bu>UT72B1uCm-yfO=9zr1(2-Fd6+z6VE<4>A8YT6)6sIWH$Z zpY!(JLzZ-;2~XZHH%{AgW_a1LGsF0~#51U(zE5woLhbSXc-D7Sb;E_kfKS*U_nYR8 zVh{_3jKMf9u?0CALy^N6sWEpWlzfo;L2T_TOFUw%lBZW!=KuF0=Lx!P(qc zFG_;z1>PEO$Az)T^C+8kQ`cEpG3L12kh3Set(^ZCiDh1~CBYq!Pu}QJ^ZU5q8)bb+ zf(vURvhc}or{T&9Z~X=N^9laEoOkA_b!5J2{#^5(MSJPXoC$sx{}xU7W$>z0@u~=3 z?buwV_!Qs~z5q`oU%Hu@@w}&(*`POEIWv5()8I_Tp@fn%!z-MY+Wp?yso9NZhF3Wg zs4RG9IN6zqwwdJ&P`t!X@nbH7rf<3oiXU-7an|GzwO{eyX>bLoeXpWTd5|^+wQmTc zcKbkA1ht=b3ANv27JXK&P7AT#h1$~;wWq2#hUYvqsQsGaYiKnV3THxL$G{;)?KJ-U zBJW(~&wLoJ)oX7D#>th_7H!j1&H!>fsb)cDN%x8cteC0jto`u!{`Q;6yVj*uh$s6YL&BQaa1_U%^ z@!d;n%E-Sni8sa*fREOMY1m}tM09~$B2Thidb_m zE8Y0k;F>!dFBrZ>*Bw=O@bB{zgF@NYv6^jRkj71yoFu6*~L5vb!9ay zRn#rzUE#aKO?a|e$o&ycU0G#Y*j2*{?cd2t+QK=?R^rZDIhSFkvGan{lc4JV1=iKx zQx>helKN;|m(r3C=e{a&7T>AfH81(+hL2jt=?I%`;Zsd33O3EcmIf$ z*G-+AJ{xRf5k6WS-yA2_n!EkWWc#jjYA^}x`o_2 zH{rhzKOqvT41$K!FK3NDf{F)}wT$*h#7T?3m8bH8PZ`k*Eqbz?vQb_!LkDc3j*_f8 z@{V8dj?11F<%`845Bhh`Dd}8FKM!{sRouHVzJB?&b?yXhE;`D#|FEdqL^HVN(v0;e zWBW+kw-xQgGq!2&I9onDOrh-evadwVmoBRL^f&qDeb&Ohro{a94NrTV`e|#m!gt17 zZA*ufXa6ww>npLWZ%Cf@i&cR&XR|9Kqo__%?SUI!;8#vfoTe^a!BdZaZ|h3oM~}PY zDQLUaSdZS_cw&Xn{l_c#J37#N8!h)6cg}2_D}Um*_cuQ7|Ls>BPuzdZo4mo#kAV#F z5UE;!W0CWA&W@S0mlg%RZCD+Oo*!OlA#;#1U%b^XJb9C+!p|W{L`e&^Kg0cP^2tuT$>9| ztFugZ->nsf%li3mzcc>pD=m|MyfS&(;}2LR6YLsU`!~B5Yfw4Ekl@*ckAIIliWkCz z(H95Qi1@Lhy+Lo`ghB8B2@kS~D2%tSg}+Rl!4n>2`FU`KwZ28ajMq0WBC4l_(?4c! zaf{xKpX3qs4&lMW?iRfgk8)+yxHv_>`?y=+6TfF*Tr_@c@Z*sa7Om`N@+xt)(!_2O zo@CqlhR;9ujNf+pbnXaNqrY8{7_^oblDE zAWt7m-XnI_?60T(dgPu9<4bME-wJ;%S^Gz+Ip;5`# zPgX@gnaDaa$5=-W2G@}j-KjizFJ9%5M7zkoQkBd3Q1YuS!HCG&M4OKou|slOTx93 zX3Q&cB6QTtkK4-7VQ;i$*-F$7NYC)D%_;U&wF45^T?<4KG_&_9Grat3x7e;VF zSDD6KKm@=qH)hoErx2|C@CTTlPkB+`i2o zIL)wSYn#&7j9`~C16x*hDGj@AtbeyM^H9zU8he&v&vLqKo1B7`8lCsmHi;9NTt+qRAuJ=kaV?S$BHW`1uGt13tc6tz&Ea)LEjavxGNfg^e1A zqRj^;OII*Y1M_L$VTHefKcC=zqNRlA^8XN@@f2v#TShI%w=CK#G?g1b>FwY6{yzq5 zZlu^#$S#)#1+vB7OcDP_DqfDP-TaM}{GE+y&t>A_7`4GMZ?s>}A9J>u>J*WqDnF>9 z!hg=y!+q>Q`QmfzLNv3wn#Yu6$ZcWlhwGsUj%M~nT8RHY#q;-oEJsBQhIIV%;BMw5 zCG!q9j*xy9KEkOYJqWWGO7Q zek_|5mQ69R$he5aav$IM2~qEie`d+om_Nd22LsDO`kdqLL1OpG$)@eTf%BhSlWEKk z^TZ#qt7%N*7Mu7Z3T@+TL*hhA#8De|b46!F5;{ne^Gr6K=wlLPg)Q0b_ z$>Q9wMqL7n;otJetZ`;|u@jzG$-FTx=xxYmzfZgk9`x!mvO47=xN7Hvr>wGp_kr;V-N?_H28)it$}@2*L*rPgwiz&5HjwIRJW{m|&z zjM}W)akbgC<7>y%X4a0a&1%elL95N?#P*m*Yi+^aqPipV42m6er8;helG#wQ5FCpK z4GQjZ85FE_Nma{m+CA+udOYdx@dVxkWA2&8&i&+|F7{>C^P;Lszt@{tY@kD;B8WF< z-Wfn2P;2xQKNYhS6(s{;_^Bm_qG7FR?_X$h3A#Kz@T#JdNc2|TXYBun{01`j$lkvE z*K`#6O}1)FxK5=hpKI|D)egs0C)tQDnl1jtw1)G!zgmOrA&18|VxN?R{Eedzof)3* z9QB*|;v+*FerMy@=d)fW=l}D0FPs_HocVkC{j_*uk>6tlbyDXGTWBJ-&`n3?Nk-)s zVydMcRHA!(Fz(#_OS zY9VU6gq0xcK{`J0B>tB|KnV z@eX#uohz^gunNRBxO3%55IBfaDz#l0FS2w@RdtfRmPA#9w1BlyD(jd%4WV5q<{ed6 z@1qMNgZhL><}GS`_IjGBT*KZDv=T3jP0?yD?=$a1RYSp(rjzJ~KHW?;%N8mSypBHI zO!j|9KW}^#&BDA7RRAq67m`=*4U4ve7N85W1C)v>0@8Vgbab%ZeCu z$@J4@2`I7#oAnQ{L4__y(Vv=$aEL{hnK9|I3;C$<)1`(#m+?lAvJAub=aQjC1$3!9 z%zRG^*%Vo1OSG~NEMBEbGN(#jp09pM^;_%E7?XF*@)?iSy4zS+SXasGAWofi0h z=0H#Qcjxg1@nWo7$o{|IJ2CZRzFR5)$ah=ScWXQqeh$L;?k!Q@eGXsBdft(edV=p> z$Cpi4-yPVG8No^`-~B%QR&lq~7Z|sX@y*lt{wZA3Y5zZU?*d*`bte4plbj?cS3(j3 zh8RNR5BOZbJDWwiW)jAQW6VN)0V;xH!CR)Z)L>oEW6C$+HgAx-4{ea$_oY zTh!h2Lpom3Ojvd&H{DLW*li0EtjTpt2I4A2zTa7C=`rN~ot1?>MC~X!f6)SdN6!DN z!P5GW7S`j((ssjP5SK7F@Z@!apBz%of8$s>PgZ;WfZi8RH`c&nrv=C?QgVH%brRTE zhCclAkkl$#<7p8*c{85#&&Ftt#gdv~eLF&`Zv|r8okEF$zOwfNeYaq3T#N^X_|$@P z;6$XBUb_@dA9?p-=Y0ys$2fn0{$?sajG#=PfYp8apW?Mwi}0tX1!eli7qIV8FK9Z) zG#c=~ZawfHYt3p9_%{aNe}#1bjs4$-pqbQ|3^WVXJHl4-ss(BLMBkUR8z@!XzC7`h zw744vGB2CL{%e8y3S66kYm(CEGy~T{YnsyK2jRMSFmw`z(&isWe{+PKDPg!)j_}Dh z(BB-wy>V}Lqk`3kB}wG zprg5HJD{xe9=Wc?LIAfs$PN$x2PD_`->Y_)lbkw1QI%W&&@Y3OcaUFWO22d`_116{Hpa#bgVR`n%4 z#+v?CU}fGqLI}RA)-)L=9oGk*tQh>{kZbx)wWe3QUI$jPrr{j?79d}(ss)x>C))eS za2N~usi8Fu4?s=71gkS*{l28&{!YY|HsJn({|I>_j*th-pyzqcQfr?+j=m$|-l$ge z_9%(m)YwNpizE2z{~hbOkLp`T^mX)hlakBBP=DXCxF=rwn3{nv(}Hk6vY+^E*7F@4 z=g?x;^UkRC{JjtqjrClv-Vt8UcSfz}x|sEBtmdT9YQCh$Sj$0p4nd=bgvz^WEx+Il z!*irG`c1W#H#=XCSj&(3WG&bD4F4H{=$-62mqo4RNQfTu{UQdU291t{Xe$tHfj+N; zhna_7LRM=TdWk-C>(RD8@mlZGo1^p+awQ8WHu~;o9rxkWegwa#BUECRy)qJ_+G}@q z$7`opw?TNu?K5f1s8P0VFLq$s$`{u+XCujFZcA#+vF@k_%aAUUb~oObhQ>@RZ=)IU zN5a*Iczuyi!!i`_C9A*@{UhX28Ak{5Xdl^iVkIc~(wm}kBgmEgVB%c#D@XJSMi4hB zH-g+#&pk8v^)&K)Cg0pdU9#8rz$ek-tK%k|L{}s^b+K@MC>AFxCMiV;SoSY|(wX z$HH|1<2XT1e31c=|DeB-JpPAhJo~8Q3%|^-wa%LvUn6zT&NbryON$=s(dA=T{P*B` zzWo1Soyz}zgd9-@TuMQs0yl{ePXHu%y~SKg?-R(r3UXR7lJ1M@VwcJjp>YtMa8G-rKZD^9W{S{LO> zG?pzh?zMb;TQ9Mu*?;p%%`MmT?$G4RUva&EUy`->`?u%SW!7u|bWze4%a-I#+SJKC z<%QQ?WZBZEHw9&36FP#pN0&~pzK)*Y`nyF>Fr1D+xTm0wK&X^h21Q3uE>=i%!FETV zp6Cf`4El)e8r}wr)T<=pV@hL?uDl8R&{9rDV}Q2Y-KV&)xk_VjM4yJnppVGsE72IB z16=+?_+-NZ*GJ?*qz1~+7(oA-?PuNl)LxLtc|ASe38gKwM%Q`vKZjj~`fQJJypvY@ z{gpSy`g%wXp13ETYEDFo%ES{c$C{&ReF_9pY|I4t&U010tkm6FvBY4eb9EkEkLc%C zxkM)lo$D7JD7%#3qK+B<47AB{Pe|-sy4AR5_8WOV`iO{%)jk!6w4cZw`0k4rrg*Kr zkJnlsIb4o2Z>5Y<{M7pJ92B02m4!~BPh~mFiI_RziTbGh5jk^mWhMBTs3If!z~>`) zwDjq(E6a9RpYXopgE6ksi#b3JgKkxeGU6G=V;sA@6;>>* zk8(wmQPz9=;KJlP8k*l5NPDmb&j!oP9P2;N&E9uz36g7kwyomK%U5o%oOJBKr=S1b zc>Z7Y&!di+=A=xke(ODF&dJ|thqOOxUD}_tW7>zBTl+*C&?af8uqc{~emIwTbnW_d z)q@{9{9H*=R}#7M`Leeip3s$Z&Rw6*UN&w{r^CUu1a;Rk>Ml;^`?tEY>a?la$+nu| zb3bI&jjpd7QI-eoWMfVCcC9$e_AFBPXj^k-1y{tQzd%XgE%S35FKUwSp5=RU{+Pyb z>b%UCv-jWTkA)+XUrK#X7N<0tY=3vsY|3pKL+(rV7FSARul`bh3i#OI$_YsJ6(>s` z9TPf#uO>He{aXqInU3}RWk6_y_cF)m`oDIJ-kSM*e6{%9Wo_-Hvh~f5AAa+TCEC)uDkwlaANlwt|g;+xR(5_2l(#rY7 zWUF08{xVs=zaSW+-KVea*NCqua4Z@y&-qS4CBFYh*ykSX2Zm}_67PTE1>bfST6?jz zNDp_0`a=(2`mkMioh31u1LWGv&OZGArGNj&bAj2Z=SK>!Z6giM?laqe|U z3{0VQCS%;|zs5R-w>6Sow~rh>;$87WWx4-z;Qt34B^UJe!~64i`+C=FA17K<8b|St zTd7qMj*jpOO(0J3;*ht?KPi5%g7F^p_vu?b*2D2i{(6|2jd#-K(zenHX=|u?IGf{4 z{9;NtFQAR1Wzo`TakR745IjRWN%Pa@?IfQ*$79cso4Uc!#XOC_d(!T4%7;3yA-~>; z@$My$DKQr8)ajp;TErJ6Xl3ptnq00yNp`32sbo}h`YV_#_CQ~HeMZB9%F9MTMQ8gi zoQhUDk2=y-yf_%?j*KTqPZ1SQ-lv~(CDmI-_${Zs7y2!I*!zzB9!Gz{ik0r^1!cW6c#<; zJQafRQ(t(&sQqvmD_wo60zfr0lM99GrT(A1D>_m`pubUJTG%g3uAK%O)_Lm{cKA78 zLyy)#>_D`<;M|T5KNZ_Q$LX|S%)sr-qhkhomH+>6F$2&`JpLVg`w=8g;W7^*UH78n zh_<1LhhDEfCrXnbSL*r38T9ZbJxu3YPY#ke<*s$yRmiU&5HoOcn@PKw+JieeuAvD} zwiz457;C<=LD?g8)mv};`}v6hZ`09@*-e5=BE5gzIwRn1y5gJF*3rAQq9p_IRl>z( z*|PTZlJ7k$z=4eKnOZ%naXA*MZx7_p=^y7=a=e@ByBLW?Fa%J9g%7xa$~JW<$_ zt2m@v1KMWotO0|p=ZtB|i~>#>~Oz4-mT;kY62>=4lkLvlTD92>)vrGBwK zRl^tbvNw$e7CC{Kh4J9;DRcm3Aw8B@pQbM$N>IgKioEoTh+XebK%?5zhy0H`hU99Y zFFTrqUUVFhTBQW*!1EU1voK0)B3GIP?>s+L+V0=+e-`?4^oH)=aQSX_-|D3&r|S^v{h`q@%|9} zM>URa63HMM{(lSnN9{I+;Gc=7A@Fx)2B0|x_%F8#?47{A4&F^*p5qpnm-t78V4m|8 zVcvZ#3&`E%YyS!`&*sVLzA(&>k(0eo&vP5PY=QXzI^*GC{*XHs=J&gM(Ny>9e&kDm z`BCi3tbck5YZ8MnU(5;*Ysv&_FM%;Pxx+BeR50&#jSS{nd3rkUKS|!brk8;^^#Ox0 zpS9VfnW>+6WRpqTNsHBM2o9_xFMrbROn9VVh971}=_QkPi@kW{d&sNK3*STD#5dgF zHF}Z%jff-3BqaOy&VeAWQKqsFObYST29I76!K3eWMeiZK#N7$6vD4K{-I`t^eMJwC zoQ>q)OO^xaVN9q$iQyB>TpN?kK(0CAR93s5S3AQ8oIgyDv;78l8`g=u=h(Fo{Ux;w z%Q@!Ig8M>U)V?qp>}p|l!+0SSCG2C0)`(pG+@oyN*f_9JLlt-;H$;OZ6Q)r2GH6N| zljO<^p#V#`w0|yq;+7%qFy4aO6GYy%!yg-DzlNr1xz#hIVFWP~s!}@{|L7ojKVc|OjO=~a>OVk3)2sKoRywlxZJ3{;=BkJK1=alhv(Vpb`>XhS zJ$X~*dwZz3w1mHZIzKD$_rv`Q`TOztV*-D#?4Rct{mcVVT9zKD!*a*N-~2OLmPLcL zEM~MU*Be?EB6dPrmfO&>^it#fuN=z*qx=0}4sOWNU8}D|o6(Ec)V+?|cy2V|TeW zw7c}`tNlW4JRiTuI(V4fWVYHx&M4n`;f zJV|^pEx4Dx6SbFR*?ZVqV?71KRkh&NanU_^dq8>kl{(C?rSYvR`0j)2;df}zpBo>ut|NM?SKscR zTWvk_+eN18_EYaH)ru3X%ko67|0pxr7Wakuv^GsU`ENCI&y-{zINuNxMGk;=9L%XgO zSKV8Ech#QiUsQRjKdhR%y!ftrm*0KYp9aRAI86J~Kt9KOj)fcxIhJrNJ#?7k+J*Pr zJ@M|_pATr?N6niHJ;-PXy(_wiUa(~Bz?wj{i^b_9_W%MsAK3ui@<*G>{jpSB{yEuR;~2Yo zVr0!fr!W2D5dG}*!Y|P(UAhP9Rz%tYdhGID=j5Oj;p$eCDSnt&J93f8$p~>lk{-ad5u%SEO_dz0|=n z*55jG3_V1E54HYMVss2W)OujlO2+`zL8^Rp2pvO@zJ$B{{E8puYwP*eMQ7mwI2O{x zE~phJz)!Bslt{H!vaP1u#?`@7PRQ9F_q1lqtIOJ!TW_+ZB0VNzC!N3yAMm8sr-kxq z_7HVv=(SRnPO^urJBEEQ6h8BGUtX6qm7y8dwXTw27@*YI0K-3biw76f9=#x;nZKku1H?SBuI zG|AXkW>^5I0dQrdRg;<%#U@*RT@O-m1sX886!%DVy*>I&*M-W&X#2cDU2l(m))%Sc z?Lki&spIXTPN1Q8m29c98WKO7#(Yk+b!6^+ z$ADKH^LrS+Nw`R{aS49QU)8F|Xw9ndC{B-PT*I1Gxi*g#@vdu+;$bQ|f6LXwUTo;) zBjx9N{)HU91LQ>^z|yFkzdhJ&qv28*fJ=Bs`T9T#t0x1flqjgYskFq3~oFeiKxtDo7qiEO*NWcP{?MT4Pt?G9$Z1RSH%_{UpnXcUmHV<;1 zLZ2Nh12zgt}q?R z^hDEjO8{0z&TwQ>kCCxIV0%R$`WJoNxeEUG{l%c`b|gZfqQld5_RIcQx_-oc47lYo z&u=M8zESxu^y-h`yI?2F;b4oI(D1V=CuTHVzrzXq{RUlM%(>U!L;U|>y8hHZlQ;J? z{2<4(tB0ZMtd>#a;7ccOfl&1xa$SrZ7LQ%9y&cO~$ll(gFCZ_%bpn5XG)%5>N7M9K z?vRaB!DHcHQdmEi0uQ0+b}}5w=!d81J;eV@zJzq*#DW%0gQBMpVZ(PYgJ}vXZA!BM zKLW30r?T$QP$?S(6+7CX9^%fLpy+me9p{Dg`9UD#Tv*1C+`H%giM$f0;Ey=oLt{-T zd*E1WzKTg|iHcze$D@fou!owrqFWBfFjRkYuszUDospnDaBR=j0ej%_YCD-6gLXi6 z2%pG9d9Dkx0}iPT>ckG%qgOgxzk*F@3{RFggEk>}c{Gqc_~->~LU!Vuhqnp!P)RS^ zCe(xfKahZbk_Z3`eRbd!Z>Q#Qr0(Ca3E7DNh}7g;fZbl;R_P1dgv1WmqqlfRViU6K zPw{jK@Bf(Cfb9+O+H@~AiGUrj=n-@}{C((0?CG?iYY&n{JMf{wHvs1u{dY$0x zgN#meHFv1d-N6&Fb_!V+k)ty)x@-8pBiU-d7z;?MX0e!UCF6~4w8n(;E z%B-F%tIMp*t%lXIM}OH*T9 z@at5BrCO|(J^DkQkzwc@p6>PZ5dZJ5i`R%S2;pbV5AjoQV?W^Net2k`Wv#MV#@6nz z<57-x4t`yRRM$gBRH@~Wj=xR@S#qR`hn-CMk_D#+SyZZc*jeKv-)U}kU+AviLmW(S z*B7oSa)+^#b1QO(u~T_>q}EnDHp~nEmQ23Iu(sNDyFaoT$zOCAay{OHtow(R74(Yv z(H@c0L;7&!4!qBU+BEUlvEy0zUSLm?>`Hd>z02OX=d&K!@6>MjyU#vVd*3Nm{ZY4_ z9N^3iS^%N`*T6&C^*QRju~u2TelRA3zpTc5aQ6lG4s>bED(8FY@%}&t|LA>g)<3ep zLGPY@$fT{N3GH|RALI9LX=r{QtJFWw&9P?h|H~4g1lhI}JdYEm-KRW{U)65W7Hi+t zmT9B3x9}6U;G=3Mx1abMdmKl&B7Uk+v;NUwQjfkUV6AFAzqM+_p2UwwS*secCp{kF zN&F)37Y}GV-fXWre#2kldu!JR{J(WHhraD3HGuYq{&M?wJN5*uTz37me|Nyjm3amG zn?Gmoa{(V@JGQKfQ#WQudm$s`dQw>V58#W;`u7-iuExp-dkiaAW96cNm8-Gx+S5Py z^~Qh?^6|k|pdr0+uGMeH!ez(aw%GA_XmqvyTkrz@VU&H!jtz6A;}PWr{H?+EsbRc; z@4`N1$7|{i$M*wY_R0vq%yw)%Pv&@k=J=Zn@{;`rtW!Y5bAN<&>UR;AnV@wFY6Sm1 z8EqMLLBXUWlBkihUEBN-F*r1`E^E6uo?$PD z{ZPl66^R6#OiRI$FYzW;$dR!Q+7Rzy?;0#Ja#^(*r&Kl`4EUkKz6_Z^xTno zEW`(oeG!f~yaURWw~<&^(!-m~{C&*8boR(de+$XmYscH82OFTBD1Pm=`yRk&&}q{0 zIcl`vcSrk(WI=;p;v0tl?pcBFeo1Bb!*4=;cm0sxT`=goSMx?cM;hAGCjOW4+r|~)L|OxyED1#Iex9?yU(sPX}7&kjXjRFw4jVL5}!xAzD)Ih zexJu~qS}Kp&QLy&c5)m9WgU1AzwB4L+!rM44D0h~C(Gnl^Le!E`&~g<2TJZ94W4cf zMCXsUccTf3mUZmnE*BlC)UYbV&adA^Fyh{(2NMzcVD?cH`r4 zKA4}4k#FH1d{8O4t_RUoD0u+eem5G9!7%UEkHyHh-NaQek_h=$qz5~;#F4qfcfz-%qzc7&Ea>+oq8YZo;H-v<5S2Fmfbl@Z^g=mzxEB2XElQ7T*H1Z#8x7PE zv^1X8(^O2xMAmJ+svkCd%_lpS@e>Zo5fk*aXXPnh`&4-Jvw9!C_DCVjYztKMaV6Wc zs<8ldlLJmTk!bGR)Y#41kM?hoYd5nVKOz&qoX_L@T`#z%c#w*yx(AQkYtm|t;iJm& z#EOW0r8?5iJ(`ip-a4Evg7uFsg8et}y}9Tjy7k;4bP?U;N{pmF@_hr0_9=7`=*#%Z zn}*OubmK{hZj@iIK^L)$@BH8$?28=d&<6WV-u^%6Guf@1JwcyI;Ey$XIDbGpb!UZB ze20;4N8%UWbI_-<-X3NI^t{Ej|OL+@XTv4e}xB+%!+ zkNEXD=j!FuhkVFcb@OPHy*Z3}; zh>{JviMRiE$1?#B#Ox9JS6-|C6CQ}&dXev8M{VHQF9u78L-JzR>Z^WTz_VuG4;+sw zpGocHhc$y@Q-}1LTzBjeo^*O2cRUbyYsZM+It6LDo17_^I__4_er<4mri9^qNpp~e zyYZSDg?uL7go-Aph*-Ek8#%3DA% zT-MG))!#Ql{YgaZWFlg_b^9Rsvm38=EOx5@bNEJzKV!E(k$8%$_{MpzT&UJu*I53q zbQuyexvTdzC@J&L0SUDvB%yX7p>~sz0^KOzcBkK=Mr+tUC3|d+;yUtOt0P7-@Nqbb zH8VnPFO9h8TIAM(pBi%OIe3rCqsXZ=Gv{9RGm%qgeUJ5g1plpN=$&XmnJLzXtD78$ z!^?U|%*=(u!Cgv5MY1O%pj&^AXJTd4ZuB#;yW3hgv;uk%nJJGsI5vcg+6^CNC-X_S z-i3_XayNOW`R<1~-a&(B4){>Ar#ip74`r9Wvj2aJ4`mk_BEO0cB`cZO+zauc?9xkA z&V>u{p~N;q&IQAV(oR+8pbsT{if1?<%5Hod#4~YtA4rAP#DzuiVtNs_Flt>vK#;ZSRcwS^h4(kIl8E;*NumCH@=jtK$V}*PGqIfg}LgB zB6D1e4`sK0SouU6yvIU31;Rd*-FhKd`cl96P_i3v-b~LAC|}Hl5k6qzLs@kfYw_Pr z+6z_GI-!NuUyP2Zn;3`T*B{)G(phkq(plJv>npW(k!!hli1k+;qqFGNTbztq=`4_q z_*Nr>U$hT1BjT=`_%#Ea#XP<>qYpo*zoGsREvT~?iCwlEe;bim&u^FQ(lbIji?CgG z$MP=X*Mho=ZftZi$2VV?u42gjM(ncSlk2P5WxMey3F<1K<>)HFoRF@f3m=E!3%U*LIeNV&BpMpw~Atp`RDqpRq~OK&7{YZn^ zXuPUT7}Qm)yAxezH!(&WPk%RzueV3x>u7ryy4~SrA@%_B`giM%%HzHpe*hU>El=dm z#}=qETp00V-Fjx!=uC}Wc-D54JGO=A&3xsXG5+>qm+jK4xaW+o8$V3#wXJ;ThF+7l zfMXdgs9zb0UACJX&W3&^Sp9P-yKI;KvNzT)`wmfwf}38i{D02*f5$G{4X6Iq?6S;R zNV6hqc@!9W${X|?=+f=%xzW+2VY@8cQH*A#TemX17Prj5XjY)I$g^F6#)}M72S~GutlGr%*``uafTJacbWZb0Fk#St&Kf93NjcoN@`2XWM7UgMtNvNI{^WzmSKy(DmmtcfM|I5=?gY!Y}WXZ%{ z&DG6R$_o3S$&~>{`;_-}aDE*-)J^!Pm4|fUX6r}nG*El)d4yj@{(tT^llEW_@;}Ep zG~thGn;*mOoVPnEY5WEJK4g;Bbl4d0>WhaGyx(M%KCV8j{+F(T&~a%w71cXlLnZ-Nt7^KG?G11qe= zWymb7>W_>I?qZKKs%yYIDa&wiNU|^_mBIyBu2p@mkb#V?)nL zbq#^@TBYMM@Izv?i@-NA-yD&6AgYzMLjSNSlUJEm+F6vlV_qUIivth zOH`Etxw3*$X+%A+!@0YNTD!8kEMnvzIpm%t+~eohW^xX!`)hJzF!tFT3u%%!&+>#Z z{z9~^W2{B>QtQu{gUBj#V1TEju9SHyvOY1=HG1~GW=c1Q; zs`+&abFq4{Nvr6>-p4VAwi18gtzg1Mcq)s(uvAM=+dI~lg12zGZ45P>w5Q{3ad-$@ zR3wO*Sde7fz1I9~1v^rq5ifZXM(8P=YfojaQ}7fH_LN&xb+|z@=CD88+=1_1s3)=viX=tL)CS`t*=Ty6k#})g&!&-&5XXg@^0B*(wue zFw;~r@E=DFr2>V8Wh$a!dQ7Cm)CIvv2{-`fy9#^L8b8;+`{jGg^e!imP_(`Tn8ksK zX$od3s;`SfeVNI`635%tDIZw_Zu5YfToZj(jq`bjQAtOe+KtTHMZNd-(7yV)e)E@y z2j!SLO5{Knwpldhd_@U5mFE`Wukc5cb`3`hO?U?JU*7=VV8Aq&EIh!pR$yARBM8+s z&QW_y$v2w*NttB=6@|j}gF30UzGI+vq5L9QShcH?o30P~wO?w$d$v!s0gT*f!2Q3@ zUDk=O=7mH4BmCNfkl*ErhIoCzuRRFyjUEHywVtdX#1(fV5HG;%WvYUBx#A}-3-Keu zU6`y>LflOk_WySSa1nnq=na~W&-K4K_cop;8N@ zg+%lZT~s(6%=HN5*2L%^1=)8we~>&I>^MGPuf517ae$rFAjntn&WBID*jB)m$NGWoq%!OE){B~C zWv73VY02FGs`Zk6>><XsDA z`@1E53m9RF-_nH?KGOKX%L4I(%`x$Vo%k@9f>+4G{!8i$BUblw0e|^R0;{{hov&8+ zcK1cz;U#?Mb>qPf=PP`=z3eZVz9f8#37<`x* zfe(BAcKWKo1F4f7@_z#AyZl0n1l13-u+tQ5Fzhr?A1G0HSru z;tSkBxdsh_;8e4sFfTX^PB%=yPY{jRsUP9a6})kpXV=Z>ZC*z<;Fw1P{*?Y<&1 zwvOH_l3A%W({=^XxWc19>CDkmo8qV?*ol7%Gdd#sekWDhFO-JLTpY#AQF7>ZQIRHt z-BYCAjQUX#P&^aJ@0Ss{kNnAMUAdh=FOZ{AASd~fW#{Z9$Du@y48L=B;r)C<;dKT( z=be8opUpuD?y3$ zbN%@*hT1vh>FGRu*xRK)?z-=r$d9ifKXU9^V9?Ty&`|@9!9BBNM-Y-;@94mOSl?s2NUdZ@HytJ1gnRuUjPoOkf;mlQjj`qxMTM$~$sc&%NNhiCy|lWIzbQ zI?WjkV{;UYJBhjz75KJq-LR_Q#>-}RsY31|qy ze7x+HYv;oc{hs(Rjvvf3Xyvb(FUEsEskYWSV=KF4E&JonSy0In&9Wn20zDk?6e=he zLLn#EinwwTSvqBRv8{qE7wn5gmJOC)&)%N1zry1Eiw)+UHJjc^X^?8BwMx^0f zR{0}vNYIWzZ(~C?)Sb`g^1jc#6RdI8q^TDkL>yAwWa?Tz^3y#j6KsL^C^+`_(?uJ%RIozlT5-0m+}fL$V;f_;&N@ZnkBiJ3(wnmP#cz(YebZ|2!Hrmt zWmjD5&aTgifXB^&dan57`mZ8Alg#v9TjM)t)mov}YMR zVrj&VxQqyic`^NsihBrT$+cFp&CxooLpjzvvw?k@uH- zqjyBPww1Lt-QDDn^9`Jr(x+SPs7oU3VY1e8toR-O4xQW$)EB48p16lSv0(QM@L1;F zUNft{e8=ntS!IRH@mzN639Lm~W5xVE$6cbJPy!S#wN2#8Q%-GyR)`Mk?={g}nG&1O zU705B>vW}@KxUX60oxCD z@L3&*sh<>p&qKbF`b#5zzd!JMN#OT;d=nJ>mih|R@lM~=(7sr}{P>Y54ft7p{|AGpdofu7osc(9hZI>|mK5Icp8ITrZ6&5U&t=PWzrxCj}u@%gbl zwIn72U#`83w&-S8lUhX`YA5a*Bu}1j$ep{mb2e{0i#&PjT$6VAAo_vpiRGgKOXSJS zCz{wB3wBom&syM_`LtDO+ve?9@XSrbMqgKFKsH!yz1csvotm=mP*c{^?7#7gPPE_A z5TEOchIog*v47G&cF*ETw#in3V;A|p688v?6mPq^u7bBOK!2Br{?1Y-@TH;C1fQNne)dNng$vPdaNA%yZq*FkchU5CmcVh&LMM4+ee@!u&q30rP6F0rO?v zDR8ox!pW+=Qs90Hxzd1pnv(3Mhj@S4IZf875pu*@Gzmetf2J3IDRBQxzw%21?p;I; zBSSe^i)vRqgFGp4f0ubfc2;{_v}7TWyTdK-3Gb0|?I60IMb5@3xDS>r3-HPd1y-Aa>{{`>(6kisGyBnc2{um%Jf$LtSRP=f`nzp13Sp z5v?-YQ<-3SZI3nGe#5E1`t~1aJIHhO+nNN=4GH_G8`D9>)*o9t2TGd1JGXP7w0WcR zke0gX)k^K;&uhq?wPgOO`BVO2J#?ldj~rUn|90w~<#$-MX^ZVYpf>r8>f)+>d3RU+ zqH15J<%@(*ORYCN|5rC?ryj3)!}CU<_m%#_sv9nlV}Cw!70Uw+lU zdEzp(Px9k#aW`Znf01!+`pJr$yF6W{1ka-h`BTQ*$s?OzN&EJ|IF92u=5Wm6n8`7d zV;aXajuwsYiv#-jPzTkNqWeJxZ~Cb?POaw5#?~*CTnynCIo5e2-{n z($zk4hxH9d2Q^bW&>k2yaxSxXAm^C2&y3hLO@aNpEUC$EBY498g|nGDGeD9Z1JAzX#w9I;e0s zSn85rXT-$*$MFRlfO;uW6OsB#sFEuzb$zCvIXjfpB~L%d(ti-wxtZEAbddd#!^9`q`>_DCj>L^7+DbQPuu5Lav`p^YHvG#&xYIq}L z>L3)CxD4ps>N23W(1rdnutx~=P9QUjJk>#@^ZDf|z8jf2TwT4BsJjYAv)Ub&r#{mw z&x$(J-EM>ew2=zUK*R|2VZUXhME$uuqYRlc)7Lmdw(%|nNNuwQAH0(!v? z{U&BoxTy}}<3t`AUN6|Ge}MHp$W5_BunSG&xhcRTs2A+S3doMYFKCUF#zF8~dY`3w zkCuYW%+5=`XemgpOew^<%ko#L&-oFA^}ZmG6$rf4stF|QnRo1m#E`ezul$yfMcdVR!)KUM!#Ud@?|A4TplB*Obf{=!iBsW1xu$d2uo(PUtsw#dLFnI zX7&TM_N%%RCz`x#dHN3qaL&Nxex;T@Gu?$2DZK2_DAA#xIBHzF;22eyt2)v|X| z3-PGKh}e@|&!YIcnNF!XD0`k!C)>o5^?#2F*$G(wc&D|mSFAFV3oyOpB&s5n+E*Ru z2PH2WZ|xr`RNC1sE*vU{@f-+3WsTQ>MvXTT8Uyl^H2g6}?jAww%{k!O49(3+gaCnRJ z`{}c62smt*L7nhDCar{{M*C_x10ypAc2F03q!|Nq?L%h_Y)3;V83Wl%{bLpGw*@i- zcH#$P_yNhf&tbMD(`yG=4z9)zDCkQWo#~a85s?|N9sBADWYdq3a-UPX#xisyna~~4 zEp_M%hR6&ETw^i=cIeZWyE*Wf_W*? z@{=`GGE!!4%i1p4D0AxNdn6ZS2U*lb2I$al_JngzLVdl4FHk$Smf>?wLRb667l<8M z$=03v33!)wA{++iob1r6#H&boIEgQi@b8Dfl*Rq8GefsBqFiDTqw@&MJ^Q(5F25#v z%NbKH<8xk){fovrzJPk3?f7Y5_-Z_d?28|zs^`hPyCds)cHpxuJkZJ)6Rc~SZ#lha zWjj?{%*@7{KB?+qQNC5C$%WQS?Cp$jYW1Y~krg~j7Vwv{8F!G&N+L=BlU+spHS+1> zaVqc#?!4)Ty2FE5nPC%{GhNN_bb1>T>dkC@nXDW#SCKO;S2CHs=TzUnRqNBj-wpIw zg|X^+ri z$M~E#p_r_gwi#%3&3r+#ckmcjo_CCXUl5YPF+S@u#`u8`Efp9e`Tu1c3)C3r4Kc=c z@~g#;aSCHPr200-Sj68i)8nc^W1PsiW-`Wu(@fe{8tVkRQjIm!TDVOl1~hxg_5rQg zK49KyvTk!~i?pKam#6Rh$?`Xsn^#n<=o~ONcMe!~rqsqab`GS~;zM(&b0FsgIa{|_ zHYaaPtxemMu_1lK-Oqtl>tKu5&zH8^wd#d6a-e0=(?&qq^4Mlfu@2N?iG+jyAXMUN2f-+%!DKIDMeOu$cE~+u&bQO+9fR@^ z9Cx^<;IH)@xd=Eep+(|PyMD9x|3CaW>i*w`KL_2S@s7ox6~1WvS?V+J2R&E_f7na< z1%I4S$RKr{p*;1^hd<4pq3~xHl=U7bx_xvCf~q?Zt$hX zW8lw6oUfwSU<5#z4>|5=DkWxi7yjNH$I%3TwBlbiXYamvuUKcsKW$|GAN};`-T4~} z(7NT|UzBCLh@8=5v0dll|JROuE;Pzy6)L3_*#|7OqxRVc(rWBbqwAO3sT=ag<%x<~ zr9rJScc#@QK&^71R^w_q2l5YDY*=niq-;suoL-x;DRaZ9!SpI|bBKNjz0&@?uBL58 zMu=kN){V51(xWw^2_BD+d2kHbRk@0O9CQiYTjh#}?pdPfo;g7G5(0EDQPI65=w32( zF9o`n3f)T^itgoxzuT5c;~`ab!%}6ohk;je<%!xODy6}M`1LfIRqAL#%lwEmv zU>dEIR(NG<6?JZ|l)dIT=aB*LjFI+yD?OX(xssl>K+gj+4iB8BouoNuNYCCG((}Xg zd|<>qPow9KIP<47>A5A#_~t|Oc#!roty%SaK=phRJwGzyp3U^Uk)9{gb8VpKwe+}# zRzrJO_56tHxqzPMj=1Mzj~*GggPzCH^O8W%3+eGD+B{l?>Ul0bw~?p)$UxeNdpz_4MpDnLiz%XXj{Ro=;qHc;FcAEt>rb zf#8uV3J7DR|I-4r^lJJGFqwXd7A1ulb%nPjdY&9 zHNcym*V3~k(6dI5XUh%`4A4%N$vmGflX?Ege`Mg`hHbl2+tG=Dmavn8(QI4kAsR#z8id7RzsI>T8SXA4{f zADcft%h_C)m9rC^Rk-fw>@a7uU7I;;M=Qj({+ovJ(N7j~yh6hvy{jYhoC$=^%0(9!lbGQuG}hU8|^P!)uv@OI?? zj~(rLfzl_SS3#f9#_k#$A8cYLd^}3hHZ`{CPIlqd{x^Ak72}&jrK0HgV7ccg_uR*? z(~0X}Q()3oY)9|SF`p*?PQGZ2ZJZigUW1J5rogxc(2L2qp7Bjo;|h=I0CIA1)R+n* z#x#>S|8~4DL`Nd}02xy~PmBvaAu-SLTmGOit>$}ki1Tj;a_wZzmNBJxrnn3%h5=t?-wqs7L6uY9q&`AOfDRA6GASy$XR&30AYWwu#$(`{d? zyWIBmx>DQhy2)_5U9QqSquQTzj?xMmi+4|J0?I0HMZ$!>fwyLCy>W-HVAXH^e_XZ0 zcgc$4>IAE~snDuzm`v^QC3kB@3B|wlzqx`4(I4&b6{`F2IJ_^Qv9`{YV?PdRe-h144*=@zu8LB_6NcuAumv7^jAK{_KuhVbCOYKL-otMiK zT+gD99Qr7=mZ9G=O{OY07$?=M%TL`FZyUE^TvOcBnY&BtI(-+dYSS0`Yp@vz z{;lwjsm~pe{icl!6&I?TL^p-!{21l!*^W-S!a5#bKePDy{Wz$DIH!M~dK`t;A|RBb zH1fk+Y})bmmW(%VJH`YuBEFBEfO*Gq(5|oXwv(p@y#Ww313}5F+@`ygtur@dWfcvC z8Ca|s6a^tYw-Jlfj_rLnmgzR?|Fsh>jhv@!7xN-?k;kEp_ve|kQ=73DTx8OY(uReh zZOq;<3`?Cs7~bg<7@CUB6EvIYX<#^kXkrDWAOr<&2E-or1tC`J8#%3w`5^YPXE4N={eFR1lLsp7Y{wtRfLNA-Sk^EQ+n}%` zY_&Ew8W1}PgdX=85OZ^0L$8&}HWXPC>$v0KIFq(!lS#XYV+l?ENxZxHiOmx4PlR#7 zjzZ;sE32&yZFm~7*Q^$@#mkzzsXwVPZTD!kpEdjEk8i_&J6GYe&=|8doB23Eg!FLn z*X=}0KjRyp`Vk|40Xuy)n7bW|3!F1gT{J%&ZM~D+25nd&BcrY5$|^>eNgpYxuhU1G zr%mrtQSITKKyn+vC2`+bcN_8lYLNW5e=NGzjmU=_uc4i9+;91faj)?k zBX8FiyCX*~`}r)!Fd;Msk(vt@Frol;eaS7fbyi^11!~jSXpCmF zTlVt}jOo@var|^2Fe=mJS;0I>u3`#+uJ)-c^zG;mAeFZPG@yhj!}! zwNVd(+;3yZmu;ig7RM@@G1l!0pHmx)c9cZmv)7lY@Y(4bqwsm7Z&H0~W9IHL3Zp01 zX&Xzqd;WOS#>w?@8>iIAZ)_v$a9#w43!E#@JNDOIGWJTwE);l*8vB$%V;9=mB>18% zZKD5>D`|}DS(m`M!PSNxCN_74-19j)k6M0R#MnD>Oj^SRV(&RFqlpghGO}0vvAKNT zx|^%Ep*5;qJ@>Y)W36dVPsa1A>QUKI$@KCxURckUt3CDWw%HsjiNU}AU*Rx4SMQyN zRpn~@>gL$KVQbURQIF*A?bq0H8?Uu}-CALL(l=|*b+$^|^|oboGdEnt9TmRucqKpy z6*Yfv;J_cWNw=I#&uBRnpD}Y!`X>VxB7jpywRiebR>`iscp&Xi(WbOdED431?*2aq z@!ei?f_u|C2ezKLYcAU|ck`6})3<1wFW=ueaOi|-^W6PcY?`s<$}Mr5mo~(2UI)*X z+xWeP@w*F~F5P3<+)l>#LcBTK$=O(ew;o3~e!JV+h+Mu7D=R#y_ZoHF>b+VWH+sv} zvDSMdxL^szoElfx1mfDD0Iu|g%OiAMLIb8PNDpYv;C%7azfSGj;?2~C&ztd{WxPi# z{daKdPVW)rd2|LGsCV+W+7I%v`YU^YTUJsWZy6Wy7KuI`1!g?vPLI;4$+ZvQ1|D|) z0&7B>c-86;(C193YVv%(Dxgn0;i$bTUYkb#$5Pr0S@6rXLK zzE`+`2dDzCO#=3D2JQ$(2qu6-Y0voXNfqo6Ec*K;6E;lTV7cON7Zh#!hPCi%?nx-# z(&+=K~WHAQX4v5vrJcQ7}Y*?({=j0lr?K$&?=7s*@rv^20h?0 zaOHlFV36Yfly{ipD=S**y839|tpesTc#C}T-fk+Ip2HsQ2dkhS^27frw znx26QJ<{K%>eN6rkRt_yJ^=Gu^$+^f8}cLG62zdTU{<6~RT8{c2G=Gxg;hH+|TCO!{O;S)Lv+@k&Z&svS(mC&i5 z+`J?}rxu6kRK=sIwTtllT?mabaOoB2)q9sj(WMuhL*dfrZUdLjxCM_4ED~xIZ~6Nr zx11c4F=4|&t6#`ia=C0kN98CdkzeXw`DkFDc< z)26xm;x?u1z)D;5FEMzOgBC*YrlxN~-iS^i%_g7*C6@ zh360Z+VBUF{D47zC4smTxu=kOUgFn>(MycuyU#trI!H5V&9t%?pH$yJCgAsz%lD71 zi=*OrJYx}FCp}^u=JSqYjT(oUaU=jIFY0^+-XKSSiENWDK1{0`AYgpGz zyP=K#Zl!xV=>I@j)iUr4LQZj-n6GzLr8_)@c(#z9Unfn-eE$bbf;PMXWZjwRC)%e-R@IuQ%=d;ZxD|hYk(hR=b@wjk57Of- z{JUa(iZ-XlZR=(JPPHddgi51n2Zp^b4*NwN8)aBbQt-IJZsV>)6S~r#* zey(#IS=u)^!y4}$ym^Gsw8(XG#? zFjrC)GhEFj1>IFE`Twyhc;29g50EK$k!3V;6@N(9xK{YvHtPSi5q-Wm8QGcnO62Ia z1Z57DGY%PAE6!J%s{)lbPdK7%TdTfb$A$G{PcBiwr&BGNjByDw?5{EJ>0r z&P=wra0Y}{_FW@mTq}9chL^qI0*TEsxI~e@MEfIH;~YZLY{hdwQUa;wzBVwV%JVAo za3>=zbjC^`a?i`$b2GnYI&Wg^)7OGKNyK;3%9Jj_rs#mBF)t)}wUYBQ9eU8JzwNg) zrmNA**jx3FRTeGt6GEv)E8ePa#OJLrYMm5BtdlH{Kw0AdMc*s&|E+j9#QHJH+B=J% zV%U#SuH35 z5DJ#EuPaF1@0Rf>d`h3p`Bhz9<3 znN8Y(e?c$6v4R$B_qcw!cu0)Ql9+iOOJXaw8nGk_&6(|SJ5Ih>r`G=%pfuB0Z5^|_ za63Dvx4f_ge@P7wn}jG28&kl;CNAJ%6R$jM%y`%&;$f47hm8dfo8+N9Y$7}hO?cQ$ zKb9KdVUutH9ya6bDeU>FTg9s|(PqZKCc&1b_WTy`Q+#Zs3ca!CFK3*0y1SJg>UDQ` z&u=9oZMF4F{o7wk7G-14pUz%8{GPwpIe5>1*4YXd)~XjF@n=BcQjqvtv3kT(f(&MN z9UOF+!euD`nN~6YB274Vk&lx7pYv7pI0v2~cF%t{4qp1l=t4Q>(L{H@rTM*qjgP`j ztK237b91c)D#KqmhhG*uSq=NAu7sxgp>!YKjwYQEkfG$Hu^mY$Q%2!J19x%vQ8JM&Ek_eUopT>LJ1>NCm(t z^Mkp@i&@j9A?cu1f2&`+^j}rhgLfn9X>>YT^^X2l@*7JJ3y2ty9cc!70AvFC8yD&? z0sV~7tQTT3?F-+L#TstxU$1Cb8t2c_qfx^zEJK`dEG~}M-dq{ZYyG)~9{Vw3ojKk| zi?1GS75HhF3Ix|bnhUSBYexE}Db|cl19L}hG6&$e^~UeU!0M)$-1`QsmXMn$yD`&d z**)5p-jqeH=~n$!YGoSpm*z=toX;wl;);e{QjY<%vuM){m|Y)$+0`S2*}Dp6uejcJ zWQJn?TJFG9l8XxK@f$Vw$ueW1z%?rF{vzqMwrxZ3w{`?HPxKe4U|2$wfPEbg0&au zX+;Bbx8AsRz8NWAd)`Er+g#SxgYX2Bb-D-$H?sDGzRqR@^**7mdA|Rb^z~~ap|7$Q z2YGH!I1hb2=Uup9OauiO)g$zFoM<;54N_d7-~XnfxEAMsgzAuAKSHcJdTcC|pw`)| zq$}B%R{IPZJQwt{`tHJAFoD3u}Kc4R6scP=N;DDrk6%9>@7s zdb|y4UusQ>-05FCdP%&t;`?FTI(Jsn?5nYba9m6a&;{`8y@6@qmQV%3s|55iKZ(Jt zd8+?t%rX_VkcC|^%j@~yhgo*dEhE6Jhw+oC2A0SC|KXS$pjQ`ySuOg~{xD`OR5cWc z83(hbv+G7<*7OL>+OH)3FlK%2i%876%{dfiZS=P4i`gY|&9uU&m{yq0-y7_WAHj{WozWFd}cKQn07hUVLE zF{YOIpTG($u0R5NB+uWi*;0MNP z?z8%DSP+Itd`}P(6$%pNBZS0<3KDI;_Z(@V9liyBPSIh#KrEeUaw*!f7NVYutEYx` zBi`m!3ub_oIsqFvm7VIz^FN)WS{x%GcYF*Pl_#t^eIevUb{O6uThnA9BU`?`S z)!lO1xpE9q@ivJ`09C#bXP`Pprj$)&$g>l#Nm5Z`a`4SY{C_L)|MJaJ=RES8rP_Q8(f_gE zyo>LdO%HGK)wB4n0VNp*cN(K=J8PZ0Ci1J(cka*7G`@L1#|^ZVFV|7C9u{^;%4R`xiN3s7)HL% z+k$UMJfp~HOm1qSqO|lEIr6jG{7X^kigX^v}Yrl&jItVtfNi+Khz1JXgy-6ZzHYJ@<*G?fe1o=eU6; zGVL>XSU+l=xGe<_>(U)l8m2wdN9S{TVABiBm7W&F+x|3ELV^tYg;mbc6cSp_A>tlXEweNFs+ zpErZCe(<14d*!62ZKsJ$_!Q%=vF2<`X52fL!@aP-J?l*5TN<26{9o_XYLj;tvX5MC zZM@N_u5FC+kHpSM%wWmR0jkCAS*>O!n1sW&Iqml{q_`tZI_YLGXvpzgoYTc)KBnJfYktcuu*R5ro z#d#+D`Z}@%1Alk=CtEG^$)^tnxTo&>mcoT+|3B*91wN|sO#DB&5JJd35F&&a5(ohz zhHwiPX+Rtk{N+lX+`kiCHU3-z5X?{_HW`!(%J4#sKC0mg|+ zspa9|Lvj`4Qgj-AFFPHd)(h>Y$fhr328#y9@c-$75dQCD@6uEH;{!oO;WbZL&uhu1 znA+=2r`Hh6M-@5O2%J;s%31B}Z_D?@nCFbB=)c|Hsfz@&BK~|1VE^R^vI0 zXZf_AE zkE@BSjXQdJ4SPFyjKn4dQL4{KBzug+TMh#kHZl@|+soBRELFer+!It*%_-KtjF7Qv zWOoL+HW>)(X+?0M&T(45cbJio=R$a{lKXeqKk3u7$No2Qew=r62yG};v|+NMiHTKL z@4yyjGpOM%8?)apv;QPER%X9HpeU?H=wVdteA9_Fcowm%xaR(Sl|y_bIm)OQ0A>bEK<#S z1@T%xuX_Nnz3!!B%az$c(W2TGsq&P<<`cBIOX=92zPL2tN(9CRyQY)EW7|pW|tcWa7}KSKQ8M z3=W9ril}uJA0$%twC+=I*O-b~9+WP|r-mSONA+7g8ZNye!8(iT2BI5dTVzb$LBow> zOj@uzoYI^6VpLVac(+Ze<;6H)UePbthiIT&t7c{wsPsr+om-sw z!h#wyb_=&{TX8&f+ho)4^Q8vl&+@nBpBY?z*p>aZ?aq|^ZCkH>W}Z3IJiTNl6~Ski zGt6_$v&~W&`Wo}{;#D=*9r%`cg*l7Z)&=JIL^3WhXPXz97n_%v*B74|%y{-Y<{Qm9 zCD#)JyVAVYyvA%YuQuOcUWZ=yiG64CDI&uoKC67>$7;5wA8TN4B0fGnUMr3GtZ-Mr zerk4*2_JqqcE8M?+?8`Bhs@$yQ(XBy6%!7gqVoO?qpU{xVE4Dl509x@PlmXwKXLgt zkRg7Gy>J)%w-SbpW?A>Ov@y5h z#46XA#^ujaZG_o}$yyX!%`HYhe(vtaRD6wE>X98eH@N03PGu~D!M3%g)=AqGUgtvT z108#M@;G1ad$uRGOzdFIL_R=a`lP0r;C&h+a8K{VswL}0ryGifTeNMhISb>KgOsDPX9&J-|C;gi4 z+rjUKzV`+<9-d|r4j?N*=)YW_49?f9H&*b*wb+qQ>D&8r`G4BhtoqQ_6Y+5odnTP8 ze`av);V~w`nn;sF-FvZT4w1X`-5I(2uRYS{B_zYg=-=9bB)q@7?qS`0q-h&C;tC9{ z?_fbR?{{f^F^tN2WOZ+;e@IU~t>@dVJv^iIRAgNARQks+JY{~-Q+cPCB}J~((wiLn zX~t|heUd$jef3mEl4s^}_Y!()DE*B68)V-Pk^MQ0c(}NNs=>VI31zOVuByQ;(P2ds zo!`k7qC&-cJHn1itt`tLSA<*~kljx|Q`dR=@}{lln^Ttg#76!}*hfHjAJ7R$+pM6= z<5!;6I+5;A;f3_p=T70_#tKOrVPPFW_=e)P0-?(;KgI<8c-SC&&AQ~vluOvYo|q-gNlP{$?aJgLY13f=Pn?! z?`XI=q4;mPu9>eY4lpMijVw(ruU`{NPD;+c(fRUYyNzlRhkdF^oI+0{-Xl!19Ri6x zlCjy)Yjv)qBu2Ii9yZhM!H@{H>Fe=lI7imRs9e48Jyr2rMj#)4`T=9Klu`K7fi#5v z97CH&SXuCG+KP9#CEk!2ev{&5rHQdH&ihUw-JaItZS~ZyI7J;Lv;kT?>+jKKoZtGr zragd7?jFu-IV3hTj_UG}qPdA9H;rt+n#92(D0td3f`QT%7tE1jLGpLivPR+@ zjBL?U*zHEPi(oIaie-qW(33?bcooaQDQq0Rb`jzBi@cy${2yr|gBH4gKOG3|ewG1w zYA;Vs=I%N6n7{Y|_N*ULYnAhZ93m5ZSU=GeTW~M&;|X<>ib{6mS|$?FeR}+Td-fj+2rvS@PTN+_sfC z?)v%WEOT1PETn^(=DElRD~eawWFRffF<(pNnlBHpB#&W1$ztS#h34f*2uqO_mXJ4d zBa*^`;+xGknOBwEfP8Shc|8)sT4aSa=5L#Us6XPf!;gqOCp}y6g2(}@j#FuTFt{-A znV0Zh zSAJy2zGxu9BYb zTe|cw2GH?oa|4tzWF5V6*TnhgeND4(4bbl8yq05lohdUJD5-wfMeSJ85N14^I)jBRUgO06hu=%G2n@Lfh)mi@VqZ-ro{`O3(DEe%OW00)DC5 z7M-aJi^VCb&Wem8Qk$pF^vQeJjkZxAmh*Owk>r~$_Wv7MzCrFFP96=>+1otg{kw?c z_LpzKD6cqq>D^L#*G-)KDf|#91FD@2?RH4-WSkHPc*iaRD`OWfcn#y&o8gi#tUusf=5m z(=(n3)j0z99w{#F6uVaV#>K%c8O?X#`v=g{Fu=8q0o7Ys0WTD(OXkdeU-!Hme_|G#kKOPeolOn^cU;l@6= z-N?8R$|!i^MhaX>aN|pR7k(J{FFL#6#&&e}F0AXr;l@QU#veCUGj{LV4cu74=eKC_ zfQqX3$BoVZfz1-!_~dO(dz0hOkC>qwpC4{W=9Qrfm1KTBIl@}u zEp-88ln0f1%H~7%yCCd$LF`91vHU)#e7@-$$md&-i*H9hcl9MMf0L4n@3pQSrEGm$ z_f53%9prNtv~l@sRU0>2myWWHW!*Wn@h#+Y8BrO{xcsH6joH?WQMNI)TlQv*E1y^K z_^NA_&NPQsGE^&})^yX4lzc8WfRx@xdonpE^*)Q7j%}>B895zV-)rnikO|H%w~Ca$ z)SAQpMOdR<(s`k6Qq>#}30kD{bPwtLGP>ozsz}WUA~n181nYk&>0Gi}-{Dh)=`7gY zXGKbNbWsN@!1M#w$wq7wa0J?p_h?(B^B;imXYJn7xm?S_iglWInS1102H)Cfb1$66 zTRLZr{6^Dm{2sL_&uUsNhp&8YSn3jzJCUBW6fL#I7M$8XDUb*;ZAObN@P-zfc7s@m zTvj}hJ}&!d=*ezdm(V?F=VDJ6_7{W0Ttr9jA_v@?4jjgk2VL~i(d9}9^44b6MjK zQ!@WT%g0qrVOQL|_2VQ$J4d2153d^g>(GP7r>lQ$?09jDGj4X{D5C)1KNnupg+1}z z9#@7yj{h%%@ecT9b#$GJ;FqDmnzLY9gBY_mfh5Xxn45)9QUeV z{`;Tfw^onn!B>^G{#%bLuY)J=_jlo4lurL#2F zLqq@Hs`Zh~=T~X%L3p;OeD*=l@s9(w6|&DD#{_C`Ea1raH%(i$iQKI>HLa9`d0)AY zd7p@-FQ-sTxsj+_nfH~Zevzg{G?u+_6 zea8K5MtL^l(uWjLi!NTkY?h#lcd~H ztxIX-L-zO&dbH+O2LiSA*A0#F%V#w0i;V%=O`PX)eDk$cuii4+wN;^>_Fr4SE%g99J=J;l7 z>s9p*|FtEv_}{a(I*BM5T3fT7<1T9}RP?_Q*Vf~G!>z4H;BqS%W!e2t*3U)s%TE2G zkA5j@s}mojCk2x$U5x6MK4WdQ@OcxhCHRf9*1WkdQ0?{S8sogzHSO{(=t2LA48Y+l z=NP;_7vA1x7v65|_vP)Zf4J$$yggdQ5e(TOJE=Hs@b+N(*afxSt2AolI;;Vm#hsg>UDo&$p`2 ze@fmdafCSKALCr&2z7op&d37J@5Z^r5$^nMoP!F2rU~sTCbQ?AtYGrSathB5t}Z%j zi~UjAnlb%F`=lx;=L)Gv8pclNkw5aRN*d&>#Netpnr?~_u0XV;)C=v@--MfZdx?i4 zW3iU>YBN5BhQEQ983dhpnTY#N>i>D$QOoe8c9IF~iC+>K+lf63`rq3@3+cU`y4_F9 zl;7w}L4ew-5=a!_ioYUZ(2PkG7_`0zwq zl(rY2N^pH?|R`b)42j>Iz>??9!zEfInvJ9#FAyQlk`IyLQ$?_l@jyn(|LN76@u zBUOzq91%V2KC9r!MXNuKtim%fGLE3BdFdX=DJo-u6=LhuALwT_sK^Q|D`YHmlHo8M zj#Ppi#KBL_SQXK@4yGdIOKC)&M}goq;r_){~`Z2^q+hM zo>Bg7^g;aF&{}vvyqgn^QT3Xkm8^PC3Aj^#p584dU!jZL|2o;B*n8$AUjYihGqbtd zhksk6pJ8hPw3ph6hvV?~lXMU*{Xfl5(m^b)ub%|2O#J|%5l1}g2XyNBeF2sZ#mmIX zkqG^bME@MgPtrx!nXjLuOW$J^zST(;Y_WMFtJ?&_?0(v96r;Zbyc@zW!%xzQB-KUi zhSUs_x$eRO(5WZFD}DVWTiY}(Z!NW~S~cxmj!UoXcg7EhZJbDeSY3L+&Zp7iBY^Y)Y#XdJ zAKOORixI%_IXF9nopNQ9U>$mV7cE!^I`vwm{kmdOB{M~yTFq1Ma<`KBgYCe$^af-} z&VwyVcD(sNRs6wL;tyi$Vv1t#UT=va{-A#RUF8uUTf^9W>$9SVl88TuApW2*G`e^~ zZ3^)Rvx}2zGkxL@<`RFf+%Nv1u3o(4$t9`g6!SC{gOFXkq9)Beov6iGSUE2qmWYG7 zB@)eet%^ZdK)k|yY@#NtoQcH}aj?8Zq8YDKF$k+v)x{gIavrcod{*j=KPU{HtE`;M zec}%)nVY9M(oIq2+bg0EMiO(d@*CJR*I)-UVbhf82YGW`{z|NX9cXFuM`=sEt$PV= zWMk8m_zHn7E`Ppi5K?jkoRDN{E z9CVTCBQXb1dKGhkhNEH*x`^~~#T;}HyOd+i=ActAgu|>tX40@++yWL`@y2%`XUF)(9LU`QYm8US!7b3B)8PCaD&|1G zweW2dZ6?5xLe|k6cg(?kO~_)a0<f`wM4c560wS4BH!<#)pWbGk>gY2{c&Kv2f81AOlP!rsH*81>%4QmX03PSe6d25 zMrq{Vi@%_gsJl!?^>Xhl({G)-P<_tWztxA80-mrDyG0CRzg&&|Qq{h9ef=25^);XR z`f_bDV_mP__6y<}JM>t-TWEF1u=VvPGu~ON$^CeZb;jX}XPi(TZKBp?aeR3|ab)>> zJ7VFS63HlfTO1I70yi5e1{Qf4U%JG=cIubVlLdz)ZmL6n+s85>F|ZvfE@p^#%9Rt~ z%1yK(F|eIfOzYIcee41f1KXiL$8$@$`=wQ45@yr?2d@v%-uXYo@o`9;uf%3bobM!$ zIN$teiAT`p#Y$Y_Gidiu&*D zh^f2E6k?Ki-m#Wga}-d;n8y?+Pcy1aMG(t7p55HS%#mi<{UFdh=4fe#;2;A%Fb*_i0jNS!m0nYHMQ=hWlRj0m_eNVOI z3uo6D>_}w0Azy2?pwwGggev%14c~0r?A;a(!sgN_0Zq`YL zhS1d^+ffJk-UdJU$lgIU7AT?8-vz@3!W)Ot-#hd-ef0MZw0)#yxBlLtr}4dk7I*vU z?{k6c<(vSm<5l9_IsTe)0v*`aN0y;G^?T@XPY+L_kIJmRg?5g7!f$-z1UmGMte+t| zAoKV#II*4>y8uXAci~} z%uORs0O`u=iWBJ2@3~@}fM~{^aRTTj!^8=|AFUmFg+q9u#0hkg-y*bO#jtS#P({Bu zfet+hiS1=)oInQ_Eo^|axpSB}f#hFm+VsWby}qPrdpL%tfw{@;L=gwmi@X6}`xN~D z3chjpKmG8gfz$Cch)lhmKBWQYMdcfZ!(lx@Zu7Gqhy;D3m-Rrd)FBzIQf-TUN@z6v zpBAMOtG9h>HgH|UFVpfOc`+QmHp&vK7IfAY_}^fk1OwpTBgqOKSe?Z_DKerV32nfN z(4lYOH{P;>$TMfXtO#PC?4aJCXiFWm&^6GZY7~3R3i7*To_UqK5B4PiYX8kjFI2Awem--D#9Fa~xf^jZ!L_5D5=MqP(^Sg0&?Sa>hjn&u# zuR|YuGP=?5V2^1dHPlWv}r&q5_zP8Hd3|dtyu_0J*}!y7`8j3>O{NV&v@-s?Tc2|u7AY$ zM`(3LuTgzsNX~k%=cVK5a@hgeYR-uq!iP2Xets0Fqw1y}D>lW|jH!KZu;s95b&;jW zuqn~z;q|&H#fkf6cj%ZB|NWuc*(p8Z<8Z1nY%s;Va6Qtn_6(9lfBYylzWLq8j?EUI z9h=+9(jKkWIB|r0w|4ThKc1B7+&x*i$GZckh(ALQ8u-vDEhl%kV_p9(vzoas }+ zw*b>M>-ny9b=JSqrQcOiWjWyJ1@@Bv9dp_MCEbnWvm8zRYG!RZ7K(QEr<4|w-0gu; zzr33HdTv0j8*`dRwx(PQW)3S<>qp574G)cZK_b_(X{EmJUwL09-#V~Te%2$u&GV?I z@bB!A+)iDs4t*EBSWX_oxjOts@RgmMvpGic3AAIM@}-zVJ^^SWk+~uxikC}t+;n>L z8o%(Qmy+|=PNsyX#3)xP=}#896UfW->XOnLhitCm6KE&a3yR6z3&gVb9YtQ8i~h@b zFNZ4@|KJY)Sp0TXTW6$rYivMhtP*eCL58T&_bsaLTj;xQEI#zi%O5WNT0y_wq78|+ z77d_-x;5VMJQ5ApPDBe9Gw$wj#^Nua-$#JqLM`#Z9NsW!DW3TR?ZkkO95dd5$J7Nw ztP_SW&~4kvd>#&lU4Af}V_ffZ!*CD1oI5NGr+MxU?j}27*h#-*fgy>r#^Gy+5FOsg z)D?Tg_r2^9v8*Anu8+)v)0MuC_NA;5jH`mF$WLDvsut}9lU%uvzJ}657qb#fpCuN{ zmj}1Aj}lZH`ueK1qK2AE*92(mInU(~eI5F@^7;CkRVHqTIp%mo@zi5inF^KeE_HCD z$ifXO_Rhndf#j3or+fnVYy@JCIRMHvF3SgJVaTsh2e(#oJ*YEybHKBE?&4 z&o-qA*ZQ$ox3;npg`3>r_(;(%8O^=$Be6yc<$JSN?13|Vtd!H;R!Y&1wPdkC3ms~Q zmr2~NP`(^SI?&2GK$jNEXF->4r^cbQH__dm#MKIwN>jE=gYrprA-VP#Gnq*1zfg3n zkZ4){ zwu;EhShel#`1cqGWi#lY!iB(@HH^)mo#+TpZ<|nx2|!irwi&eR;e0+|YW29KpToqS(t23_DJ9Z?Ro?YXPfhc4=0vV5X>d*O)$5~|ik!7}Azwrom zj>l?xA2QI657VHqb@;bg{j}yQv$a$G!*@KM1Zz?}#$zwvC)46NKmWG<=bF}+8K4z% z-onB9T~l4Z$5fas_UN)gvY?A*S7lVUQ}s8P*r$1>+?1Tb(A(hx$W=>Bt4!afU#pa~ znk4JtCqq))Mpcn#mQNL+Gp~x?I8L?;GeBp zjCZ#f%>JvBnE%>d@A^P;Eex0stM*@Fzg*^s?`5>QS=AWI^|8(OJ%{|pIPB)UiNlcN z{tY=U-f~UN;<_7<mO`j%QWsiaim%B{S@Hs7`mkVsCUL_XkdE8+GW&{A z%XX`=TwYJj64#sJ@>gKFY(x9c9Az65yBE>MLad)INaFG{RU2mqM7FzG=Jux|+x6KE z+0JVBlV_`dPIT9BvfYi?2#2hJLd}lTpVd~8?Q-Z0uu|NLlBKxt6%%p(wNtJ&ZBz zq1XGIy}nQNdMCf}wqZ&iKk?Gw<;n)umQ}?805_EDd87`{e*a#{bU8frHh0VD_ZIrv zpTXXUoG%^LH1oaVUo1m6@BUvq*GQ&lj7e&jWG4rkLdh4^vTnZ{KZcK#mqPDG^3SwW zEzR)Hv?B)_bTm}K6vD6kCDq6MV6rm8yJ=RA&`uavM4Y^Wu^e1a(f zExyklVVR|%D_-yZ#pWsD*40$FKT$U?I^z>zRuWzRUU^^86=aXIG(tLv1B~G3R=b0ni-QN32HF0dS z1GMKj@8f{3QwK91>qAD_-NNCDa-rsj3uhl%STMKz`oYlg2|)F@!?8ms@hVjj(pC3T zB01ae{*Szd+S!LlOa6$ySvUs(ZKJ}0?;4VOQaO5PNtdNO*H+wJ{DR0+8W6aBto%1;^T{< zi;LI1z}}+pFm@MxR=C)i!SjXJ)hsn%b9{+;S=}`y68SP$MTg9Xmt_$Jb3?J|8L5M{ ztVH5omaFKKm1@V))o`+UYk}#S;|cJuWp(hcZ-W)YtqT8|Z+d4<&Zs?aS=}ah&kfYN zGAe<6i@1^;#d}KovPQ|jCUj@h#^Q>c8;n|G^JpVWwQ(PPx;VVxMoF-Ea8aH4_ z>Y2<=Jyr&h)Tc4KUpOYIN^CQomDnaZYr9Q%RH=N4-(s|DD?{v{lOUk8}JVKG$-T^Lb&#qJ#T5 z=b7fS%V$f=g7S>J53gBCu6wxJD>z`5?2X=rrn9&*b)9e+qZXO)vgGxPo$)=`fY4)q z&zwwWE|xGC;c70z_*H_(ucRvL62^L8|6;Gm9l2&_>J2^WI8i zeCwhY*b936Q;zH@PdS#(iTbzI$D_KQVH6(Y2tdDh$?u8v9!~`F#1X$Iay_01;)x=^ zCpLLJ5zG^Xeot)icw!7s-0Sy5m1Ff2TaWi6%NB*+1PutAUga zk`1hI>E8sGjFAycl6IobxUgwI-(1)f)v?(m*tFo_UjA#-a!r}VS(Wq453l(fKTOj4 z7FJI8zzM;mY3modF=;otvtZI?G}G(Aq>s={+w_lm7y4n+bzs9?j)h(o8|7LT*s+CH z1e5a6y(e22R)%@JQR;Teb1(7STJ9rVO$d!*EZ}e{^M%Bf=(~C8{cuA>>St4j6&i-8`g1S zT-(rTk^hF#>D%;Ge&c!s%*gf_*9yL8(&8t6I(dW)u@Qtre`Z}jdwlNHfkJRxgIL>@1y6#HI<%^d}M1QM^U(;#7zL9 z8k0r#;p(Bb;dAn>huWsBS#?f5KaN!t$+$d6t9SJeR}VFW@wkM%bf6TOmP5wo162?8 zp8tpIp|-Mj-aW~!`kMoPPCZm3_E_qn?w}qjvbp0guZP--m-a8MhuVs#<}a;>+R9#Z z!_+$)^G)iZw(9uUh|7zAvF*(&7ZW}{^ zr;R~0zdW=WZx6+gU8D`Wv$cn3ls1lhi#FbdmBZgI!aHrA+ED<#Nuf6=*+V7l;I9zd(67v~b5k}0XRy?u%`*(*O9ZziJP+VkO zcPnwuBij|)m^;I+fE7!5nW1oK<*iV5U#&cdK5g-WM6NVg+t`KJuq(hZ>2I{3T|u6+ z@Z1XS5{JoO7BC!+3((I0RMXlxJZqocboWTLPyOSEsfK!eWNCt#YMs|7mtV#HGGEWv zsDS$4Q~k6RAEwkG8mfMZCWJN2RfDLN>bfJ>AQCBLs0LAxrBy#qyub&{`D<{AN03aU z{$C91MC$*w61n4BgD8e^J3^d|VKtEYR8swP{Ju8)C~f3NOZC%M$<0=LM{*R0t55az z)5O@ulK;keJ;$Y2%AGkmMdyQ$Ww>*4BnL-ogi5VYsSzr*LWgRE4&~*bA0=y9V!w(i znR>Uxrlt^ydzCv!rIGmt8@i-3zo5 z|KCLm@r>g;*c`oWbMn-EJQd2_*RdL( zN2g`{z2xW&hQ*SjQ}$2_IXW@q=oE!cC{CzNBS&X$aZ2q1pB$a67q;c+_~qy{)ce*F zmm1=(dg5mWKRtZKYU0HsRTFO|Csg>RQHiK1G)v{^@_o|Bcdh&I=YlydE9~_2f4qKbkRYN@Y2Jaf;t<-7zraUw62Jaf;tyrgq;hDqs z!>AD&Gfch?5&$u-2G0~L%K3iD*MaBsON~%)tY3I$8@4`IzD_GUm*jx8gUEQo0Y9aV zjKND=iFBSgOumk*wm5i?CO%Xnv{iq**FRrJr1du9|J%sF5MByZMeA>cyH51W*SQUR zFY2G@rS;3TOK{@Hcvne}e6#XxH*Mw+6&2!9E&O*qZTmE>D-z!~=jj~7>u2@IY@d>w z-^xxpzIsHf-hqril71!{fVUnYJV|_|ZTcd5Gzcww6)(T=ylV2FTFI>Q_YJmE{nL{Y z1_0A;V49%Xme|5p>{)I2OOZdIncneSZ~il|O=90`&N&=X_b~X05YyxJ^-9WtD3Gs9V@pM|7=x$AX-~!KcUNQ4>B?Jg7Rok-TcPmxyMK?wc#v%6rT{;?o3|3b<3F=+W0jum?wMG>XL`W7 z$*p-CyF;`R?`^=jmR^Mc=V_&Ri)B(~ww~4!+^xCb%=Ck^1h~>w+s4f9<9l*%EBi2c z+sFmZ7ye5tRKW4?)1MGSIg$MjIp4)Gl5E$i@9?W);3?bLsddl#bM}1$cGp&_>-x)f zj2Y6-kmXzEWC4ABom%(se|sz0|K9vbWIK30&z#`yF6!hZ($BOo;y)iF{+&bmEVV`b zyT~i{`)SqhNN2x4hpMsG;E=v?SgaMle!s(v-K+Y&gPvs!+wU%(*}~l<`n{dL zZbBxF=ltbA88Z-9aL6%dyl4Ln>>Bdj2@G}g$lKF3RH3X5yR)^2XOv|@uv=g#*o{%*X5>Ul|RZ3#YlC98HeUp z6UABdo5763ZTf@O1qahjR5C1DX$mM^Y6>*ZH+8QW!?UeQ6PTtnN!RW#;JelEFid71 zPb<&%ba&LE__M|0=!$~GVrJ83h)>}|AjzSxf&H!Q5fxy%3R-W$JKm~4uiBXCZbN9o zRg6noueW#nDk7^}+0g>KlkJy1@9-T93<5#>>;97a|WE z`;mz(Qen3{^^G(}JlbaLFgDAD(@vjGn$xG#Ci(v}hH@5Ls12TJY7y&%N#-HKq%#w{ ztgX~2MutImoyAOCKu?FZs$I%LO%pu4JF^()t#@Q@@?G{^8Zk&dLze!Mq53nZffy=bHwD zmu1qUCfm`1U?N<{d>kH-WBOTsD{?~c$I)ZQd>kE+&HdxHF&|F|2>y72hE!rZ`Zvj@ zpWP{OY$B^|1~Xpolc!1sa`-P57h2g%>{5^3^M{_`p9N}(p$0!JvSw;^tHyRG7lbbe zeNp6_pRXGH^@4&4FUqJK?#(pqB&I#Y4h*;?5T82aA_bx=O4C-a6jTJw$PY9$KU zS7v(-Oj)YhHaL4F-?L~@_G0yx*(Mhf*M?kL|BvkV#c}DCv8Lks0_3$M-gn$FsbUs+ zq7P;gD_ugJ8?|G_SabZrfRd{!0!t=W1eHvw2rfyi7*jH}BBUgX-|TRRd=ulbt8R;a z7yH=GT$kOksIO`;#peR{SOU_eTcI?X!q}WQh>29JU~0~ z57av3kToUy9FkpMK9+qBld6*sRTV%B?ctVrtk7`wJ6yu~1XN6R3XeHzF?KUlJKDvu z%I;U0^(pQcQ;8Rqcp+D2eJfSKKV(kt1F{0s*ZRv}$y{z$znbL!Rj{dys%R}(#^m=y zk*RWRDbRE~+G*nr=<3tXoOxHYLonI0@>Cj6HE@5oeJ*fi0pqzx*!zbg*P}kzh(02Z zT%TO^`Bs(pQ=~qd)#uT~fEKBEP_q#aI+&q}uW6`O)SkhN%$;j|>9j#lLZK(0u;UyQ z#xKVf3P9f4NID>vTcQCx^UGNA>;)t%X#pehIn?@P6}|qo{@Ir@`W3Y1OHW$xIeF8Q z7P47d^;p%m&=Z+!>65v$##yETas9ybgG5uY zr>AC)J1Tf$SSTf?t3~Z|p6sU6LiLk8sJ@*YuAqd#6bPL=_D8|T4eX2_@BT$F6|MrA zpSPIDp&64ag3Yh@p62(bp_TifgV|m&UGhu{&z#`yE^897Oh;z#eo)h1IxSeI;MzjZ zlZJ)sRi3$#yOCBCuw($&;NMgKfJ1Txwb`lNw-$_UZ=$i?T57UsTYhSOqcu9GMgMJ+ z$YPgm`2{Ux2wm!lrY0MF;rf3;PmVDqSXNZ!RIeg0FPc4q6N+2(z=5f1)Q3q^LY)JJRoeUJRk&NGGFT{ZC11NhUC$G37eaqN7> zqDJs4i|m|>G}ND@#x1r#RgGI@e+yi33_ZHHH-!K9^ad7PRTFMWs!e6A^0gLykJ3&5 z+>uIGah4K|*rMwHd3)PD$0?a{`lv_b24kEq{6WU)adc`=+lDbtp~H+*0b_94*2y@1 zX=~A&RIgq9S)vhToHp@HA9s)U@B9DAMIIBNE#;iVA^WX|fN5hbldBRdTgZkBSL3wX z5u>QhF2`6)cuh=gi~fF}v5&ezXR_@h$Bv3NV+EpYAQGwLp+5+nN#YmrZb~e4MkI&B z*v$+&BeA1$MWe3=sHBH&gdT5#wtB~o$}|1gK=yLi)c@-J)R_mK0?s#aWWHi%ub~); zf(4o#jtB)syrTtwaiA%pCX~_G)fcC##~6^D6-j;_JNv09SgEX^wBEJPAGR?C$yqe} z0C0GMg5))R;klci*lduLkn;^l#GfTsD(PjGf@Cgzy#(J+_oA5c%s!q;=I#aJ+WY>F zS~0=o|8d^SA%D;UVg^Hq8GO`~RYRqi)Z9w3>Tamo2vx`{uJ2!YP%<5)4%z~9eg7&r zLb5g4w6KOu0ir_}n_2T#*=-@)9Bo-*&LWmH;^Xt7%ZMgjPVLalqt}_Y9GqKnEzjN4 zH_@`NM%}GzVgHb?`cFSR8Ei1FOzJP1{Z;?JKD?sjjN@Coq$Ich&zH?>fP|vYhXFQ1(0;FL9tD zDh^bvGFg_(`UhVJT$5~?10_tfibX5OZ4)!>YS*oI+121!c-$91@orBR@@j-hoAW_; z0<*K8+7}C*SN>gHS=IY%D9oOL7JYReToHTEINr1xjlCKDU)r7JehYeZdRF&1R$GCx z$r-CuvW~~$b)Evoby-``{txwJt9Qh`DDRB!T}X?MeqGhG%;RbZR*B2{gI`}#RrJM2 zc3J=5UoSNMm$E^9=*&!ZS!T&NLkn)9{$LAsXXsY1v8vhId?)oT0s^!z|5nqgIgE84 z?6J;oQtLdoY6CXST<1E^bFTAdeJ)ldaWX*81`#JZjql%gfiEouL} zeq~b18Czz+wfL50r9WjoujN(wuJldzp_MN20kYDOx<ZA}O5biBTItj6qhIN1 zBfU%fuz`jZ>#&y1-!}S{eyQ)9!qvjw=R+&~gl+UI{k4(aH6mPM2OC=H zdt?VIwbFyVR(ju8#!7G2(^cO8D)$;RR{FO*R{C>}Lyl~}mA=X5yV7s8$x4434xWpo z7pZJCm-U;mWPpFlLO$F57S~E|Ci+L-=)KaXKG96PKUBcsz0wnHveKXa%6p|p+hnCb z`jzKOf8;CQl^*W4Xvs=%CfWnJhrWJ6F1e@wuJj&Fd*>_i$~nJ$_cvefi|{gfuJ^fp zBd_(0b+d{-|f|pIYzQ4Bz#Bht;*-o2eEc>s_do zvEDNjXCK*)+^pZ#`^{Iyz5PS0eXe!%t37k1cU@ssWb-a#wKuaGN59&?uzz#70(qCg z*PB_5qhIYEBfaYiaJ@mTRfDfL<69VhwSV?U*J{t~lht-5zApR!`SSH<>b-lf_GWU@ zWVIjYZNUrEf)~u->v?GO&D5tHp079Sk0?6ez1pX++TTSs*xBp3+MB6MBCGwGKYFhA zW_>cgl*{^)fAn1K|MW-S)$YyLo2m4GWt_eST3vko`2uP`eu3?SbJveVAGyX6`oz(C zZ5p&G`HU?(;EXLcV7w*99h)ICdwu`)C!6&N1F}*RkB+x!=JcBAqXFimqk-nhM_rP4 zdPzbpITqv3IA-l4%VJ71(IzPcvyZ-KO9~w~|2~P|2YdZ~Dfi|e0fs(d z)aQ)pZpOBbw|bptM(a>S0ve!QwBHo01i{S9Hmn5AdZ>?;pypulTZ!fa209iUUq{7=X0fQfaEp?fx*hD-{ork;9In33=Y0=_m9OP< z2cJuKjW5sTa}%Gd9}X|ie^t<>fXqvYLr-h^k2S@MK0gy{uO|y{z~EY#;VC z>zmQsW0;Lf^xbBCl|4%3+r_(iT^ak=LxXzd`oX&c$^D&%G%eRs80{0bc4p*dTQk-F zqbkBY-Y5P7c`A~pp5y*q{n`(ygRhepxt;i54w;i*l2tsuXo6*V)rv}&^{bhx16Qdr zFLWfSF@MA%mYin&K}Y(rC2FjvJFcl)iGROYPjWQtd+Z^6DzuN~|0DK7{_nE?nEwyh z_wfII`=k8dX%8!E20OyalWNBmr5>GN(e{THUsV%O%Fc(+_>O2Z8P)T>#`9&{c(3ss z?>C-VYCK=Ueh|TUemW?Fx%)+568*M*icRuP?B5*V^~xjlg;=J>S<Ty4TpjrIJNJw@=(>HhUp8+5ZQ|Qr)Xr z?fw^N`AUCT>BpPZue|+#ffi_$&D;Mc*Or2H-2?5k@y0;2{&b%_k>iDb@>Cj6HE@4- z-&|l_1>EQI@&9qidVU4lRg5WIY}u8os#il%6QHQcP}KBioAt8Z7&Qi~d*js@duJS9sc2|c@9Y=WDH=Mr_u9H!)Ci^bhFfB4;%lcs56^2sj8eUIp~ot1 zBDPNChE%tfAm1bt)n#=MIYdV3JRa{cjM6klsfpYfd2W*XSK=id1FR+=8-iSW-r7uV zyo}OPM&U~(r+C&UyMPt8Suf?O+1&3Ve?OG5+45iD#t+#4gX7XG&*JxyQA(>yubf@o ztcN?t<%HcBmzV9vxYWbTCRoP^wvZDBEwu`r3%3Rp#bCddanSY){c6@TtkYENy4X3J zfDw<5+t1_e8G>Vx?aw$kN8m6X@Fv~FFT6b{0!5dX2VAbC16LE-{>{YyLu-|%-(`W8 zY=3zso@eT~doQpD1MiVffc*|?5prBoF{@Py{!j(K*a2r(i*2(ROSn`qfd1QK6@Fye zCn@~MunP@~t(}DQw06AYp|YNAQ;|J_pSVPA@L+83P1J<*h5G9Ct|*9ReJg%@nfVK7 zl>yBBbOrb8{KgaRGVHs}tbI?9Sb(L8NIrNYEp*Vr3SiTGAeU+yP58IOTGI@zjRe+D zKW1HgANV=s@6szSxKpa8SEe!V(N1{7o$#Kp8SuVr8-h1<{2|9o;5}lU>YQPL^NUgDJYbdj|2_2d9w)5F(V&~CPPmNtLqGHyeS7bWkx5LMR&saSdVf8GJAi78#25W%;A$#NoE9DS-6fs_9T!Mn7-1_e3{M+ z-p4OIJw@VIX`*JQXY{39*+{SLeFB-a56y*qC(rDXi^Op>fz8Ov+^w+P0W87570vnc z%`Uk1JAEsj^L9en7i8{SdP*!Fg1B99a%-q1wkD!B>0pz-w{M~wQe$PtGb1Tp(#F+p zze}@z0Pnf1%y{Q4eh>e9ld7#d$^9D_w(jukQbLbV}%^3k+Xw85DZ$N4j(fYpdRulU_GQ3Gnc&lv&CHfs2uZ-Fp z>iW2RQ!aQz{NTL{NWLVhT;Lt-g$}Dcfgu`fz$+MS=Pq@an)lvDJb6ERuyRh|5E>lz z#4BokQ!HBQYGr-SU=Q-Sc$HQjyjFZmzd}!i+O9s?#7^@wneireIzwl@y=pM=&Xk&9 zi?(QHZRXLeI&!GeG$iW^|0}*{ZKgT^n(X{tO;qQ2 z&ylfN_U?Pnan0sA1+le6prU)Cb6y{uYAWZ>!%sP4xjT3BQ;rD_8&6&^o zXsh-kJ@Q04pPOj05L)UTmz426_#9c0r#ETZa~y(WAy3pO9Gh%epfF5uODrHw?DliD z!le5jiFam@U9WzO(Ze{ipOuf^Et;e7R%}DP{sxp zyctdS|Aq5(vf}14N|$_U#QnZUez%WjlDRAX|4X;RYi;CgaNf&-?zBc$jD^}zh2yGk z++&KyN3NwlvU*XKaNuNiNlQ`G*;qjtc;KH5g?6TBzP>xnD_&%tk-2_O~T@lUvyRJ%SS57!hxtKOLT7)TA6%8P?Zf;rRWt4!TlMoh zL#sZ#b6nMotzlI^yds?qWVPLLF5F|ag?xk6wyb+Rt1Y~295i+;(ZX^0A?66R+8V_= zSUJ7Qh=&h-u}M$v3tu4d@Ttms-J~by*3YTYxEj zJ>OM)w&+W;dOMX@`8*gDX%$*5zkL$xMU>@B{Y|B%-mhAk;%-SIv!lSPPi@}SHjVub zS?l1Mb)a0ed))b7p3HS;6G@#cX<<1}o*jTL_to7WNX#31GTnmi(t}LM*`Djd&tNBh zR(as(0w4SoJoLxSBAZb9KLrGEMHTK@N3HgL0%+^#F*W)r(2 zc;aRgc~4FH9`c2o^cC0-1UC!OqZ^3|mzMl-^Ld4{25-qls&L_EFgRYp{`M02aV6Yr zr24;b9V`^R;rAK1`R+~RMSiGh_jA69!&k3v(zh$U8l6w+)kx^dj?jb_=hCa2sDb3t zr(?Psk+nvSIc?HcV=+MG`d9_A#5sFf4kR)RY01ZO&`8!w6P58#^ZOK_xTtjL zA!%JQDI3YFZDPMPxR3qpcaWbh!V@^};1HXdNbAQwoAnQcrirH4$+vm~jWXwnH!CI< z#TR#%iBxsAd{Rk5(N#o_toqg4W$%=~RWYSV5^>RXIOMoT?le#Ts8 zE;m=051K2@RdAb^Y|F9DG?Blw5i< zct?aI-D^oGZ_@9#{hj41&dIh1Et5GX+Wx^Zg>!;!k0p`w1lvz6Q#nW5o+P@O`L`8Y zuI3!dJAd_XKR>%vO^p>BNXp1N@Km)CFqNnJ$glXa1$#dT4o z&(}ql9;k~ct*DDFU2M5fxx8j&tqHB@#_`(zGmhss!%a5_9ysIp#V+w64EbZ?N*l36 zzUMgLlwzLQY&nWGHCeQnTl(V1zvn31yv_D_fM(8NXY;)4wUqOs`P}^|`+L9VC|7dK zqmE#{hddmd%53jDyyiVe#jf`p2f5dCw{L5ev{vZR+6Nx3Rr$1bc+InwM@AJT)<~B2 z2%hT^y~$0)CcoYDtdoy!w^mmk8s)pbx%XeJU~l(SJKr_eioGQ)f^{O=6Df=X3bNcb^l!Bqw}36nx|>W{tB*Hl6aNuwyBDvloQj#37GV z3s-bwfv_8WSs;v25WdPU{2|QtgRqR=&s7j+^a|%14#LNHCXu`E4}7$Snl`_~C%}0( zhcA4APWX=Z8}s)?zf=YSHnTvc0au6@T-&b**9E)4wcQEV&#?o*|A&R^4Z#3q5tcY@ znUxIs+-8@VYq2*Hi8&lxlX<3|yUYr(Tzp^CT1=YuJcq%x<7% zJ`@G_YGjwFh|dn}iiTq~Qh_uVzf8o($;3qll-^N?f4MYR_ya!s1bp_1hb12RD$8RB zlWJ0H!_DbfL>Ad@S2mI@qokJI))6T4h&i`SL`S^cbGxFJje40QukyB0zN_NTc-Il$ zmFIldR>yZMH;wXL+y9Jr-NU=S>wH&+)Vdmfr%Q8FJm9;m^N zkH6R%7XbyJZoxdF6~F7j0~)DPB0M0+)=2(;qssq}rQHCmDviiIF%{_^ZAxt7-0oOb zPMS}J#8|k%Oa5dqu+_RYyIW)xz^v(KG*tNq_Po)Uw`RTj&iNP6h6DwUm~Af>o30V7tj9MrUm34 z4NA!^NlLk;{fJ#B<{U4hmd;a#pZxwY4|CWj)3ziauU z{wtL$Fb%mZ#2Qnv+++O^kyN&1eNe%!)l!R_)k3B~<`chlugD8(wJhp=zo(Ji(FM<- z+UTRPw3>~rqyg^gT?2J!MQ*Wq*DRG4c@-=2HC9b7QkZK+8gf~1Z;jWA{L-GsJMR6u z+PNbCs26w*f5`q#mx(`r2y(P|n!v^wkf%aVM`@ycPntA54C98Ej^4txB3 zOVdg?T)JK_)>AF@Ni@5hC!Q|fT@_JOq_jMVQY);EDw=88b4WBjbhkakSCkN6aYVE| zVk};|!~6qtK6b*@C+{?GHE%O-H~$bmc+Q@oB*Ay=^A!Jk%f1Mm4XINshPN+M@oR<# zb{87hkKl71_8ccSY_i{=IANo{gWAxgPTq%x{aN)x5sLTS!Dq{^NX7f|`25ntQD|$8 z*qTM33fNy#HPez&Gp}~h(TLC9*d4mR;24!!&X3!F$1%~y!qB7&Hg7%V8-=BOM#Q$A zas17$@%tOu?d?6s9Zr4f2b*^sO(_`Crt;B$HlzO}i#A0>q}fCNxzk7gA?DGs6-{cJ z(xklG*-kqfJlZMsXy=E1?cA;WIHGCD?&LAu4XXa1clO^<^kXCX|A*k~5v&Nw;Lydd z(!tYx_-?&Dy2()SFt3V%$(9Cak?@^Pet+2k_aS4@+oLPbZ04Cu+}bluJsw2`TAju;)2jrcPCPQ%B0l~yRAM60 zHqh=LcjD1fK9@N0XaSKBi8a%(UpHg_yZMq9DHQ_)u}tTD0G-}`W^nZfp{2QXU{XuK z{-6Wi^3^z=OQ-T<#`#9#TyABa49a7~Mm7*zcjef~M*T*|?-}bi@JL59SCPaOG!T>N z9~&9XxGxy+j*XOSrN{`g9j&yH#*A2P{;`qrR3T5rbN?ON#bv}yyhU6j=leOBua)8t zc+MF|mRXghP)Z>Y(`c`b)AR>AJEnp>5r#wx3Nff_4(7|wCZbB@!-5yz>4IBR2^8i=QJjnj+$ z!On3CQRAer$vsYn1>j2KG~GE)&ndn6kNQVwl@0on%5va8PT|9h(>=f+=p3iZXpb!_ zuFrp*@_4SF`}OG0rAw$ma+X?woRc|ZoP6UFSqnl_D(OKhMffoEJ|1KPjL_Q-LGNXNZG_tKoO7bw^x)vtWx5*x_VNUr-qGE>G>`ewOt>wx%rt?$w;rI5J-`Vp+m036&zHMM1|J$RbTV7-os8 znOvcnryPXNStq(Pg&XuHJfb7TmNw!6w9=a{z+&P2Gza*&#!{bh{E3^z+c^)J=O0&RR;Ey?Y`VFDxPuwT@cO3CFwZij zQOAt2?Ax<0B1KC*{Yg;ZLwQ$~gwC6MAfb2)waum$CzV{iUwotu#3YLJ6r^e))c1#u z7o0sp+&}|0I)e_eca?`Hb7p?KQ;t97fl~(Obm3KCuP6$qohvUX}D2c=fc($`_w^ zpjgkHenr8pjrMkhTPN)Q;elHZ^aWW?VpBG7>wV%|1Xo?Sm85*qSHi6ds|yd>+_+U_ zg)Tb;o1%%sh=I1=Lfk$)S$Prtaq9tvbHn4-Su8aL*3wbnRufNdS?S8^-9YRaz;G`JENY)sHmq9qMkyDdKyR6Qy5WCNWNo#9^d%heNv}%7k)-QnM{Cf2=uD(XNx!E1IQfR(@}qI&Y!A;1bg{E*yK* zh^q($$5Lz^aB;jNHUq~b0*=+*8wieB*v)#O=_E1OIsCT_7~boXdY{@uK4aZlXJm!! z22U->^?X;66%A7VkH`vxcAf8OAO^UBsv+iuXT1$KX4*uY)_1~1IFdh)^~yU?pW?7)2;SoBzbCja;J8}tecP`l91b8|0) z7tVF_!Up1{UG&Pu3me#fr$PV1F8Fi;tv&{P`U9HcNxfRNcD1`T7cWfmiG~>Bg(s<^ zEL3keUKr`l#S>n55=jae9Y_S|`I2|;iobs~*TAptYlw+BO^gKRL@UPyYCkRq$;+RksD-ZgL2|t#`Qa?P-T#*vR~`0dJ`b z-(36=d5KwX!|#OzhQ=-U){4drMyS?&`Qu3{e~u)3odlm7i2wi7{P83iEx_1^=7R3M z;U)ve^5?K`cNhNW*|MLfi$4ZC`QuML_+z%>kIS9>(ZI8ues3Oml9lx6H{g-etxLUl zq=9Yuicbdj{?Fr+R=e;?ktBsro@BQ-7oSwP=H`>49}AzvHsKiJlPA&1g-=ePLbveA z1SC1W^||KJoOU934y#m5SL6#p$T_*{)PM~J={6k%TS?Xi}~(g`&#yV!)1@Y*wuiMx99 zLg$B@gHo}4*v6oR3e;(RF=`%yk!~lc6@RHwYGX$-1_iwVbK|l}_7^lgK{>gIxLo#$=o{ zu^HEO9G57ujUf)3bu)wzLmb*gWL(B|eCl(2Y{utvUB_lJK7)#c$-2t8KKp%7{r-bu z{jQl=KOdpI-tVcZb52#AI(6z4-Le~J-@)p*Iq|CU*zdk1A8!-(*2P)3ZeG1bs9K=D zZKLR(M=?H4@Z>5i&Qc!38V#PrT>UH9E!tJGQ78+ebKdulxW#ku<_di0#+8G3he`R* zTjz5nEe3a#zK>kP{{^|y@OujW4#T(RhqpU?J0YU+{qb$wd+4dnx6M}#z7@k_8_YNO zQcb=EhUME<6ibT*cT7b_y@<2Vqbg^k+3W)7W47nU|)O?Z|jWM6?D)qQcEA=h=|@y1*y4lMEe+&Dw7vq#TP#gV=Y8Yf-)I zoiR;6e?PJD{eh)<-~LFi8~?HTjxu8@PQtQdm)DCMZNK3=w0EK~D(kL2D`b4*^21?;BSs`!mis8kweRxatP-$1p*t79YDDgcgEzBLzVOw- zn+?52ZV9j4V=@i7SC2*f{~_Fs#P2Us47vZd z(fCVqvmwhCDOv8mI#`zZJ%eRg+_%iPEbFceC(G$qELl#u^8P@|)KR!w{H-SCDUu|u zt9<+X_tfplA0NbfT9#BNUooUQ@rohA3D}npmTEG5--c9A_356`X-U-@otQ8C221tM zWs+)fpCi?D@Spbou)nbXC#ll@za9JkyGwP9c%8&6)%KfjLN^kHRNL_y=|S9TIf%#% zL#omC%zWX??AeA)zxXb48y$pAjNh~I*OV#d1gt+}q#ZmR7fP zK%Elv)P(Kr$YqUu$&l%&#^jIx{MwI4j@`@OQFct&%dpBOW9Om0d+=sqpD|wNTs9;- z>#`x)jLU=NI_0t@*DvsX%apr})=Bngefiq&?AtNz@CQxasigh)E^z&_A=evU9>nQC z{w?H65db9DQ9`c!K>a%|>a^ZQHMZmJ50aK!uI+fu64uhSk1$r-Z*D<(9B5uNU%qjy zcv^3gE6Kr_C|YV>gXXe^$NsrS~sKGu};k}WgqBqWWNa3 zU;sWAtjE~ zJ$F7QlKrNcj?Ckeu;=(s$c6N6MJdBy$lJ#|JneX?&2O9@`iR_rIB^&6@IX)DVaM5z z`3?^_gxP3w&lK#txt+%&NdBD7?Kk&*MR_3Fk!yzTRFUOGXV0)-E>i{%E^@+A29I|5 zBkp~ceh~TpE@8w4!E1(9O@Wo$jyn#6&(aw55a@1I z`^{7MP9gqxLeJVD_ILXgB@Msh@z&2Ddo0@Cfc$;%j-v;Zjm4d%!;sYgk*Ob zeT*1aG~YhD-H_b0%Oe^e`KbNoj%)E^q%~a|S5Fa1DZ9oZk3x2=Qa8Sy+S55J`$x4+3i7{v;mb$Nxc7fVBroKRHTfXk1cS}o*M2hr@*saESytezhzNVsrblZo zheXz09ULjtj(3#7iOZkDL)wmaK(Ug5;zSWmHG9x|R2vg|mVKCy6EM#4clhye0B66390H~s zQhSGv1sVsprN_a6o0B7uzzmlJZd@HY4j!|mP%q-thLb{4&){)z@+wIo9z5&nCCkB( z!dh4(?PmVJq2u7Xkd9XhxzPJ7Um4?I5y}_8YQOoRh`1X(4&Hni@1K5#T-Es97r56r zIB>JK&)|Q~Rbw0!id=n4i5nMt)DgPgNbhAYNvVid3a4bemM|<2i0qK)8 z^yg6DQQ+G#Q?JMU!ND)Kv?HHIJ2KC-8~Ojxud+`O>kyB#9Q+;n24y?$2{><6!VbGa zZ&muUeGlBszE}4Cd)`qwh&!fk+c*A-*Y=IVx*d1bYj>nyF>I-eB0pbnS39Dp9BbnM z;{TtxXRf+-#Q&4ELGk}&QEbL;=K$jW?`Cbp!jd&cG)0ZF#sfHqZ%6$9G2riDekL1b zu=R8R8r_cgf2{8m|33=3srSO~gWsw6+mE+$uv^TE+aAb5exj!M?GJ6I{rs5sg6}51 zuWbL`d&#@m_LTSd_ObA+JoGW-(5q!VjvSQ-5G(Q!B7(85^ge`r7}o#Gaqo}X{ad^? znpRBz4quLYdM~o8#J&IMXKDD}YRp%UAXni-wnvQCq`1c4-&Anp=Mi|-4#3CpBV^;T zauL$mS&DsjhJDC2CG|XBt0xIip3ZjUxFgipI3@d*jiR$N9qp_1WH8j>DLa5XKD6$% zgI_Dbku;ouxL+`%my!-3|6e=u|Fz#t0>AR(5G(NsY-;?zKGN8aQ|6IBzqas+{@?!j zwHJQ09XTr>`Z$mkd^fI1!Md-;$JLFhi?4gA4x{utfx3t55>u1jCtE;I-QTDDBzgDW z4#ae;b${8X{XsjTAT4g5=ry?6cGckK=RIC-p6Ky%^DxHeU~abc5I28;JRSx&SAs7S zY`zdT$Gf;$|mi#tq`#zEQSk z%HU=azSDyL4PSmr{Q1Wp?|PU1{PT|!!OzLfc$+#F_kr=3#`fkGwfIq-5zMYu#{J|) zE$isOgOpPxTJD(m%3)NF^i=09TBae#+Z^K+k3CnJc#Bq< zc=7y*j}F}I@9X|DK1)Y^m>cmvEPFgH>$_M(I*@}rZN58~`Gf`dmxwKEznRhdH+UDO z=gTDn+N7V3nLD5*p8LVcOy>V4%`e5%G-DV*(}>{ z#`nI9lQtT4?KfjU?F8-ux$pY?!e!tt2jBsK-x&LF(y^6@F**Pr$K7=7 z68PtMTswco(6QMlp8>Ua5cBlt%Pl;^9+29rbinwoZ zz7ZaS19$_TY)!9iNSroq#yFNaeYOeuI_An}VuWu%IS7j9%pIz3rq@}iEs+JAvN z+xVS}zeD)_zQyk`mxw!X`5lsH>H$R9wTb-{*PdD-Ch36xiW z;<&r|1J-KDU-=>Y?)ZI4#o6a?lRkT-IFR)R#XFym!8@JC+rhI_+HkjB!8E1;mhdP z^&X;4ZwDW^`4ax!+j9W#M$k&>)~T)N*Y*Dhdl0`B{H3^{s6UO)`gbj}@xCssnD@ru z=X1CfIA?QzU;gGc>?0n}`VLOT=Z~D0m4xx2ypxQ7E%*+5=eu-PK9RpO=6!geCaUjz z?|tnZ>{_qTOWhB_J49J7HIcpYdsrt%!(zDC`{eL%iPrxkJoCo8!?5Oi>D^)Y^3ab3 z;7klSL#uN?EK6tra%_3u9gar7%VFa=b_;#-Id;Cqg7$B}q<^2lzgK|bIM_Xd-zP`f zJ0$_UiN)_rOi>;wp7Ps&i+kAbLkHsCjo%e`ukra2!mntoe0yod@Yn|Km2VQ>9v}%A z^4xJb4Xv&N!n^X3v_|75uQNZ>Cu`BlIJ`Xok15)E67#ov^(Q|m=6Yz%EzCn5y&s^h z>-J!d!0!zFb-%&A?HV6R?_zH-e+PWNy}j*PjEOgXZ@U&7YtS=(Z@V_q{I~Ht@GAcI zy+;b@J2|4fM3ieqdAleN9#=8HMSlFhLcBpyL1ieMkr6JcC* z@JdBxxTI`VM#d{kUMUEb;a^n+l__OarKKrlp~{rCd8H+V_--h?>d8=fQCY#-CksO1 zq9^CBowu%_sJyZyRHm%XTbuV}XUVgRu`#uYgL| zTN)}W^3%>xsXzLm(#04wq6+CYh04|zg)2?L`O{^LZa3CvTisX#D&OYv*Np~8id*2X zt>L1)$|9j=zQ$33h4VSL6of++71TmWeWvM`S|>VHIJqEHT9NYXvnf+k)~`<~oE#3l zX8co8o-7WnDS9$g7!H*bnv?GjrcItc`ROOugbJ%lizf$i(>KHD;~~o7s|WGcYEzB{Ahy6~(Vi&Ce@%)d%me?uUy?i}EUr zsZW-7Fkg!E%8h|7YLjfE3+Ic67>Rzr?|VM4C^F`9zxIV2CuT=UURbyV=9DVSdmBd_ zk*%xb4PR?pMO8j#E$BwrV7qiZ0_P7SW-iHoyitS+-X2V_h5g~I*-H|aQ&nlq4)Wih zmqW7Z=O&HnVC?xz9R$@d$15#Up&*7mx6LzdVxE ziQ|e*vz?)4jFx1@=|6|0hnVp(`x%FJeF7Ij zeIa{x^4Ctn`OyK3FYD|I{naOM^8G8wD=nBCTALS!yFFa8&X=AK!TRWDK_0m(b3&!g ztS{A156(}0GQ@7ug%aweAz+LwD?YeAurx#)l1SV|>`LHpYi7YhtEh#eLn+d{eZ}+U`j5dR=8W zub^^nab8(jQK=;+Iqb^3ipu9;{7$oBlDLx6(z$ekxz5!)N&9u>AgrHpPgqxtQgf?= z5j9C-$_l+pg(P1vRQ$+#Rgu3n*Z97CKj3hgQ&m!0IKOOVC=7o-0z@oHOYs5txj8?} zEh%_)ZV0QVU+ki)HTgy17gvIYzjYq3(S-IpQvTBypG2jO`TDHH0%L*6$ zvI^lfzQ%1aj1wF~S}m2Tuf#DFe0*PC{w2_Qh#1uQFkCDjct(te$$Y zrpP4u>BX8`6=`f`Nx46}5s@h-#usl)jQkwo)FX^;S=pKO2&3C4Mi|{vm@-&7e#X7= zeR+Nv-$`efD7kK|6Mq9yDm3w%E_OlkMSK4%jQ3IKTa~aK7}cJoK8yeYt*BRpu70vkg#x7(e{8>M?pFRS(v< z!X?F3D_1&BaQ`?TZKV2a3qx`}B}|pYaajz;7b3HrUiruQ(xtFdpD$es7Zp~y4iV*e zZ(_^_N@B_pQf)I)3R5AR?e^ib9HyvpUN{^Izf=^?4^@;@TAN|nITgVo^9zg0DqTp~ zdL5h(eXIzTB3j0uZ_~v1(hq@|Ta}O7I#y+J{i61{oLAC(bZ|b~X;1c&?F&O-{`O@R z<)tN#D$1>26~FY^uBv?D;54}{*B2WjSmqNWhP_XWyp4y*TC+d$cAkTgxAiKj*32s{ z^0&vLhWQ;sz?4gp$$+ zVCIBEwi8|!lNqYY_r1QMuKA8jR=RzNIte=?CyQGmZZq1|$+Z!O-g#@X zi%Km=x~x74T<+_*lVDBDa`j=I@+~u9B9a-ciGF1UjBlA482>T@&Zo>u)~qTIhYB(> zUcmLJvP#F`zmJaCI0OaaWOiDnB{lyzDI3AAuc&ZUQKl1yct0H(iZd5s*>+NWFNSTm zs;~M4&S!lqOK@~mgusyq`sl&>tQXVKJap9(qjWz_+(HE5MS{%@mx#?lqK`jEYB<|&8sTQv#n`)EFGKJERyE0aW~FCt_*9RTcEP-42LRl7tf8n z?|#i5V->Hj@c?D(oQtDxi=jR-zU23OUKOrt=atPVt&)vRwEO188iPx=tzN8@Pr$A> zJ5;r*c&>dmp0-dBG`y}+6kOJl3NE1)cH zVP1J@UICQKY;6QM(b5QTGW#4`COJzMEw&gcTi23;;=HguQsiZ5;rzWmlbx4o6Lv$rP;Sy`+s4TTC}t$!Fg{@W8SS^JYBtc?-OH=-SGydgH} zwE_6&^I)hO_1of$7;jeZ4{x~rJWcQ+2g`kD;r;SI=iX6f@@o+IxEPTwIBv+Bjtlqn>El9jOaor6(7bbgMDna@!e6(#U+5qU5gMu+5SR2Hp*E5aJZ z{&A7jH_fN6l)xiE?F7!3eptAtBCHQM-}QwnpchzBaTm|{8A085q2aR~w0(VUCDv!Z zaj^E@YYm0&E}@))wkat+KL~7Y5$>b=_u;H5LJCmNiH`iZUK8P&w~aH@S-zj4a5-Fn zp;fQXvyLny#2OXLj=wdYdi~jZBE)%aD2$v~j&SZD(_!nyV7$pP>3)0hi*4??}O4UtD+8+x#2#j?EdV14RqWrXK3RhB*HQ@33>U$T5HFI;Y{%=Q!@Ef+V| zuT0%oleNC;b!~d=hRe#8>h|D$>ctSePu+NpPTVSr?dyiJ?Y3o{6M`uTJIH6dhv3Zv zOWFF%sagtAUll6Lbp?>g;t^&$5Bbk=TcB)}5prUSPFnV$L1#C#+p)p2^<$5BcO%q? zy|J>Sa-KKiiTqlP%Vb#SJzH$)e&>Y?78aGQsze5FAvM`P=anP=Ql#O?v;$sbu_fXv zzO81`Z3UlS3WY;up^_}bi^H!LLF`^hh3N(K>{{e`y^7p&xCn!kx)r%0c-9MEwL2o) z|GU!gIc5sP)+-xNwAj^4^U;m<8(WZ%gdBw!XYPR_+i#;@T+D%|5#!9>aNP$V%3n>1 zrwZ^|%-lR&DJm?QTS{-2tSpg_D;S=J+ z)Aw$_$#D7hi)4ky&Mp-8B&zX21JMpn_TLC_f@{8U&xi90OQij%E}l_z2HNZ3eb#S` zSxZ^u+il?tC6UKrwaUyZSyNgR&M68ZWuYZ5s|EL6b6kBVTE^+jH$=-CI}z#*+Jm$D zB_*rYSkW%>d@;tnIploImlb(F?peBcXW$~J51#v%UKlJj`F?^&uS|W7xa1@6fvepD zpN}~DSKSnR)^Ff(j%Obr`6(zF>MlilV2mkXEqJw-6 zWYH9+tTe346l2~Ag;OeVxh$omLOjJ+Vdb%(49xxCh7zyxAw&wNxmYNltOyr8Sx{d7 zq*+D9w8B@`XK|B+6Ke;-nj12RgEU7(%8)tOtm8oLIypIg;hLgGP>c)8t6Cd@_ zQw;V*?F08(xWo?U^uteAhW7i84R`;LdRRKIvKNP!o~#b~=pT03gfnK-EWBAzHV3Ih zFf!(Zu{LLg!Ua%jr~HG3Q%T3||NB99-Wu$*@$%%*&+y2C_q$6LPIHfoLfha=5eF|t z_Z%5M`hy7yd$^J^$C|Vz7_`L)osxH=#g9wF9T=zO%@ zsaMX9b?cFXm9t^pSTY2wz&Cf7{qWp_mt5cPS-<4I>7IDWJ(C-co9NQH(%d-H5Vl4k z)*NVE8BA)ZEUhQ=Xd%j4JnuR0y5br?xMN4TTZda)eCUru*kvU|gm+z|esR&fpFj5t z^kd%QMbs~6eH6=CNdeYJ%ZU9Km=ES7#h~n&okuo?X9BkZ{g1-}8!P7v^3?0u%zZv0pA@|=ys)%U0-UJO4-WkmvrbWk6=~s{Uk={CtgJQ1#dF_0 zTVu?c#;oI`?OdM)pwhtJ9{8q}O7~As@=f_(@(VjAG+p@WHT> zYtmiom0XWZEKUHz&dJ4nDELjESYP;)FHZgxC*`>q$e~}UxmUiy!gD?3b}sYlZKCReNP%fjqcM9`)$RN zHHCPR&hg^O*RjpYzR(re7bNRmy3`3Oq|?UT2q>lG4BOA-Lbwa zqa*ZPKRz7#eo2LJS=eD7_v$)_Xi$~q)Z6oXHV<;zEQ({blU#@Jb5{Y_Tc-n zvJl_*A!qCRvh?S!Mba-<*n*7gY>W>%*ci#R%8+-Jv`>(}KX>io+;EY-OTMpdM;PpV z@cifeHS`#(=kY@c+1omd$di0)G4YR+*0s57b0{YqdH${1r205_?elX4V+35@pGf@i z9EeNw&2yM0@qN!Bn)LTg#gq8{+_g9}i=f})pa{`xNMnsteaFm@8y|~vJylAzGm}K* zQ?5VpW;m0o$HB%|)rmlrQfnsY4+%GUm&42bhooC-(mBNX&6z|JcRay@v%IT5^=S%it4v0W* z=KStUb~eU`{A`T04Ck(O#-`Z#OUT`g5smYSk)5OboEg)cW_@3#O>lgaL0X>mbbFYZOh zZ+l?qkwY3!D@By+!{*B{bbRRt=wQYGV@fK@^RWASeesL)E$+z5m~s}l)0Vid^hwL< zZZ!Pz7M^xo3&_MZQu#OfEZc>S@Q`ZZDI~Kh#g%qDuuGN);UaiOXuyl;h1`%c(&TmQ zZZ!Pz;BGYh+C&w?j+;xYp_S262WeRj!JD0s<)wkAuv19HtUW@kud(2%_h)$7)ntI> zf{LgOUOK+ogV15b-HJr8#@gnS7i(!?LxA?!esH|6_S2jx7UOVUc`*d(R}Tl{O*N9% z!@+pd#P@~I!WkAaGT}(L6gEL*eHct5l70-P5lLU>78g}i{FCcpd~jQ7h+3vbmd%RdKa)F_KHR~Bmy z@HGYpVP*J?d?S^J#i57d7nM?Sg$e}Bhf3&%ue}u9Zww7fLsDMD&=4sn?L)Zx@KJ`2 z3{US*M_fw$CU|7_&!qR@{PH#|a(;OmBuA&DtQ>x7>saAF7+h1us+XUUAtILq8=S;d zvRI^`M-Dx)h4Z7ccHM*VlKi*v?t>)RHrtL{b25^}xet|OF{aa2W-hS%WJH8W>$Z(s zB5X;VJ4)Ifmi%2fnXzftXZyQMK6d@DTfb^)X;yy!dp2Tn`Ws3O)3Ezgls5YOlX3L< zgpDlzqN<#s|2fLFbE?vyPEnN8Diym`e0qg`C`yeBvw_8TtIFquIn1>)s!~swE|rSs z>FbvaOpXg9TTilsd2C%g2p*$q+PHR7Rkl$zM^!3*PBnK5Fv_ybe@51b2@dK)I zh3YxSwLVp8rFwc)Dqg30?ihNIO_)zL(zl0ZbGvTB%ZuMz6ca=PT~skk1V|<@EWdT=~8712#5u16Pz& zT-&U&9cp~Dsy34FJ5}un#xNC`qWm#wouZ@*+-Tr_!4`ICn_2ZJr6FpeqVz{;D0W6) zz}Fk&w2pYz8mD~_&wApt&7;`HQTX_aQOS2kDI4NZtc#CBac_M5#(4EqeEj)%)u}=` zeS_K-i#qby&x3K@(MnFng=l3L&o~vW)TtWEr&R4y6g#PDTVvQ+UHc-MeG#QKM6`t_{A%=YzqkS07nqsuQF$~p-KgIuwb>04-)USA}Uzuvp)Q*m1Cr8F8O7BRmHcssyN#ENN zr?rf9kEa?@PYlUzY%s1zRr2R-(3A~o`q!#*Le)_2GiQ7h@h4Y&pL9ozzl$+S&a5wD zlzlwoSTso)WdJshVAnP6QVgq&(oRRSBhlK|(X5L`9@MuzhBd@!M`Dz-FJi$W7`AbQwq=B^Q$1gU2Q7`%wh2qMtz2nf+6P?ep@r!>qYv(g4>r+?vzvY>N~^#K z+bA(vdr6tBtyN}fl?u(n%khmm`u~q<{&3ENf|L`S|zBvuxsMtx_%2_Aq@j3$!w27XjPo$5|Gv z=E`Pj+D^_pNO{R0*_ z$&`K6o-X>)&w@L-@&Prfg>w|$jYt z{9Xv!bwvAP(%+}q_Y;q@^Gxk$fukIP2L1ycyug(cR6NT$iak^mM1YS2K3Cu|UVxwQ z#;kUxo??MLT)9Akzw3c@yj8*P4pJv$=~lE~CrRs9Cjy1BZAYrD8! z$EkdTCtT#p8S1oP+-5Zi+l|vI$Ipu-A4eRWuQMfv=zg4J&>g28@(Yfh zp)LqCFsz@7vOd_xls&|}eT<`c*u%%Jbxa8YU+u;xj-IAw(bV%2Q0s!2I>hv|mvIyi zdGNKspS?!%C*6ha#EqVyW&~=PvH;l9U?WtU03R@pq8RM9?&Jbn@4?0;t5Zn+vw_GT zUjD$YAZ*jaM!oT))6|)PZ4B0~qLc-j7*r0WJ&dE+>S^=&*O-zj>ZSR1DjPpR%TkvG zT9~pLh%GF5%tRb!97VD5v>M$2Y)>!s*ThcK7ON`*9ZXC>J6Z5F!!!*trx-_33_lB7 z2y8Q9E!~QXZ|7>V*2;BEfGA#M+Sg3SMhfL!JaC9BO{7-X^uiG3!ObdJlHjGvQFL@G z4s>$`U9xd87O3Q`cX#3b4=@D`WK>H%LA=gv4|vgxip^yt5sz)(cP(X6dk&) zYngKU>aAaW`r`oTRx?&ULEEFN(_xfhq;*E=%d{&|`Ul(EH9v4(A5R}gsx*= z>(KSH;Eb;C=CNlr{S0h%UF}u1ZJLfvB|f<=KB>{@n_D#cW`~x5EyF3A&cC1^HG1-O zUFp*~e%_%jIPES+-ECK>?uStK>ul8+?F&s^${RF&H`7k);GEW@>DZiJ()E4ti0C?I zBz)zTs`cvn23RY)zFDJBwrTYJT^fD=sFrX`Q_fQ5cj!lhp1dVW*%-y~bBC96SbZwd zf6)fB|MA*+UCrjlbe$G3U57hnuddhg*fW}bTBZNIqH4984*MRTpe6VOEx|X@5`42= zOE|A7*vTk{-M2wc{#sXhb&j962lXFyw_SGe{$H`~>lIi@Pj9$K0g(=i+ znnE2cIKUJPIt4ZrM-h$!*V3l60)M;b)~}BL_+iYg&DBDFqZ4{H^=00q>3f)VTBp(a zwMLSIv@oE$bsfCJSD-In3w^=obsBwAuhIAS;B!sy(h_QPLwPn4!YcZ_%FzlZ%U2^R7M~b}~UuFpeT-YX?0A*mZ>c*qqzrwS8Po)S9^tPbG@5O0F={0F<}! zz<$$$ZYNbf$AcSGWed&iyHt*%GcTPjXG+Sy0NrZPJ@)@nd%s!xDGhYX{{`rFg<$^> z-A~70Y{9Ni){d&`PNsFL`UxIHxtD9VxPG0}=X=zI6Xf0mL*dQAmPJk4r70NF*x+d# zMTfz#%PT><<08>cGic9>|5{a#Fs&CxDi5N(Q`J6Hbr_5&cc}?CR0RW9Q842wO1+kJ zR8zW)NtUDNp&bP66@qq(K|3(|bL=~qc0$#=coNDtxK^X;cesY~UNr%(ZMZq06ZGS@ zn)IQjv=Hr1jiczH-BE`9pP)@L1W)3zoB4)#vI4Xl|B>{Ad`hsr5~nOsU#788hQ@3g zb}Xa7IYvKvc(7K*@ItXs{!ot?4*nz<6Qdmary8jjD>-DJn$UjiO|M$(;1Fxe0XhX}V z=AJrZ8~-3qZHo)+7^NJG1M~XglJ<^Lc9D~}aTG_fjfz5g7`tu2D*{jRJ@(|8N7!Dj z9_E31oCcBLF@js2%^r$}NCzzZI^a9IseTim-NqWYx}PUtTPPH#k0-U5iUWm2vE7YN z2fo^kpWViGa&<3H*ouKn^IIx6MXA_C+=@|!ar;5^xU!F;1n}XL>pP~lDVtwW*kMh6x=LRW&&UH0QlR* z=>G%YJAl^+PiIlo{unJujn@|FQv%Dx*8F9(_f6VziG9`~vE_26J!L)cNB-WyKNbsr zQi8TfpB{Kw$pkhZe>X7Nql=9>w%I5;1ZDzXLwL&{70(mZH1Z3i1Nic=egOs8Y&SNJ zlbu%0grT-9xYHEEHilyWcqKs48nVifRZNMC5W5N3uJc3MyuoPm^58C}Y^2KS7)KFX z2Uoodfvt-W8vu5x3p>{6uasSSdL>hmUD$B}F73P932gU&clFl?b~&(n+*m`tQoKq5 zw%COoA4r&}&KlFo6jI7+HUTc>HsbD4#!>C_#S0U_QPrdT}tpoP_e~YZYOMz_w zw)*&^w7xV6TX6Ka*=lY8b3q|mxKTLcg}1pwIO9*c+Z_ac1=SrvpU)S-|3}#7u?D{q zr!P_q6FLna-Y%AO&gAfE#!rDr?ny^jZGTIzJQCg>O=VAR5 z^-cg5>({v1D`B@_Yb}lwn#Cy^Y;Sn)7@eZQuy;C)d7%S%&4Zs)tga7y&fpuyH}|sO z6?0~R34|gx93K37;E$Z4wtvs4KV??Fx|R;R#1eq@{~{I(0$pGn#lAuKnZVZozrj5= z#!Qm-Gv8i;{tv-V&Q*&8mAiu>JS zQWAk(PS^wclDI~L%1y7igIl?%KVrH>MLpiW*9<2O&F&)_Tz`D=&#*ZFD z^I(&)w$}{1MqXbI|2Nfboz2CLPF2(Cj4T`NE*~j3ACE69Qgy-{{rvd_S*z(R~OZ7t+CUWs-=O$kRWQ?&4Mr_j?(D8 zi%qc**gC@6a!MSh&Y<)6EC7m!J%0}XyVQe?OHwD(`THzjR}6dpetH?~e_+k{3~_cm zKA0@M&ej2c{{NzOTXR?PBz1OR2O?2WcQ^=T5g{jyjH4*5a+}vnfo%|2^V~Q#R{HEe z8Q4OBC7%N3%qT5h9Y=e-X{dMUuzS4jUoa)j(`Nbe>OH{ro^tnB9$O4-i@FPq-6H&y}o5Svj27V6zm#ACB9vN{FiAmCHdK>V{5PY(9{p*15JW0I! zuDQQ~9h#Qz%Z{858taG9$XKnu88~gKOA8D3A^?Nbb~+sEY!P#(o=n%T7~U+Zjjkgd1K7 zYzMH_$AiQJ#H8U2cD3f~40Z}=tb0Hjy+6bL=K*Onf=2%d8vEv&wn&rSLzM#C>c-}1 z(rd#MVAn;6y`GENg|Hvf{0!b_$F0ye2tTNZmDqo8Kd6YDZUz4Kaq6=*zbBIYu+^|1 zRu3Cg^nj$#*?C>}a#pi>Syzjo7^yr6;UhgbkT5S!<( zSr8<55x9|a6h(xOhfeomtbd>riA?tZ+xvgGvGOu42DSy*w`{qX zPhtO4+neZ205Pqxar>E?!pji?GzJE&@MMeG>k6B&+XthnoVv_r8fujp9|~9 zUTOh$;BO-9ZzZrDgl&FA@cfC<$%r9dmVTb;M_59i=|d6TmpY!*&Xpsixu-ct(arQp zpi?Q@V9i7Fde{FV*1rg`t-#(sLVdUDmAAXr0oxfNb|$bjgl&4ln760Qu2eU%zy;G6 z(9VK4OgG(K{V%r{!T;l_Tb^H?z{U*4?lj^#%Y!Wl*`z6}iE$L$-OIgF1MJEF;^vpA zR|=aAY%O7%OrLDhbn^SaEw~o7!AE)C3G7BuOq-U5#{ruwu;$(?IAx)_f_z=&z#=I8 z%)NbGT`$1@4XkOS7<-^a+A_6_>V6aWhGFZj1^(>e$niPA?|cA!0QjZAJ31=AMxS1Y z{U2d%UyQupYA3MWe@*oo^WfMxjn3%a6i0N+hM$j?1G|T?_Fh@uADsehvA~WOV@Pam^W)i<*A zjVeNJ(s8HaJWoJ)1#VX01dE~q&Z$Y&nu1VNII=a4qPs(&n=RU8of}M=r7ouZNpY1MOJ-(00sY(9%Ps zA14_{QE0g}2gU%O1ANOzxO+zay12>G{XUtE_$N`f`z%g=pT7n8(|<*McjM*v`3r%s z1Kyd3;KDLc`xadlP7Mby#D;|2{h#_*%l-wub!L83$~xz*@eg zgcNl;`FSbWV)d|o-mWZ+{|K>lz@9(omYX;`-ecSwSs&~$FYWAS97W;%wA58u3Ty*m z?fwei;cmlsxHh=o49;j_97S=X!p0^8TNoj>dmiS0VBd0&)1*n#G0tuYDMN*C7jLG7_(K891Y{<*OKU06Ty9Ie3KZbO?RIQLlxY^Mh+f9^9A z*cxEHd;Q!dhLG<-12^1#w^^`_!#uzU-^V$MhpFgZ36<+};QtvyXK^FLwV(htwjwy@ z77OloZe5}%f(t!#nnCCKe;V8#|8xpLrx|prjr*XcPc}GR%?@DPFGcg;7{(ncu=N6K@+&D$I`$z5>Y<$e-YMRi-g_MVAw5h40~U;0oa~> zkzosgZ6>U3^N77V?6muz$EN{X?!n62hrKi5|MOslA8Qvz3v??yZ0uMIumhjD+br)( zssy$pLToy))r1u>+_b-qt7H0t^pi|)WeFEdb5ew=)o|J^i!Cxj$5C|8&HX>Z`tNCj zynmz>*xM~=gT4OCui@)}?et*fW8Y>1TSM3gV&4>Cvqim;#lCfB!2jpL`-^?62L9Nm z)b_|?-(~~9N#MzsNB)^bbY6MV^lyK7zt}ki`0NM3cRqvu3%nHrBJUHc0ruox^xZqY zw;JPn`17)CU~38cfWC6Llay@G`C*OWJO2LY{p=J|zhD8ReGsnp9Xxp2wDL}Ij^YK| zaKu?bXFB>1yty9i6*ks{w9jD|twuhDb{_b`OhkdJ-3ZcEgU#lJxFae@(LMGmL962v z>XUKi@r~xGY5EsyJhg(tFwcr;>Iz+`=8b{IHOeG!ktBJnI zy)>WANKV|w)md@-RDEFv=(n=;4JxkuCbX%BYp;WB(+g_S7ERefG(XfhiXNI5XJG#) z+DtiAo?}yz5|JDrBM!MOauA2AZ{}lQK#P^)&qvF)nO{*NV+L}CL;27Ezw~ZXW~G^7 zMK_%^&?zT6pU|BmtcPQ0EX|7B!SwxNED86DHI|V0MeJQVI7iV!ooU#b)fHxf$yrTAPNMC>Da2`(DN-qHEmCxNEMYV(0%oc_wq{^9b)O9eWD#zYl=l z1bkN`)lcIAu@;G18d7^K)n^5YMJO+YiXyd0tK-5&(P3yd@U;R@{(Sgv6Q)Vui;V*| zm#~%%Zk)wzfaQZqhrfH&H4XNEg!R?|d;TvXt9L1|4TLrGw~QY*jn3ST8E0-E3>%h| z3~ZsO*T}&#E?JX~Ywey2{g1HCdw}izklJkAD;%E~H%VVa_iX6y(J=RH%%GA&;F})+ zp9*}bz|(m;bSyq!#8O=}<17!c;BD+qXhONcIEou-(Y<;J7 z?`61qAdSHG|3{Lw>HAC4q+?V|fo&C7Q(gwP)`)LiJ#3DkDZs8Htk4tmdCa1)x>3yL zJE)^L3m+IRhe!XD7zLtkbAM{&ZNUDN5;qKgAFvhJ+k2?bCf3KeS_f>W8|$OqnZVWn z>y68m_YvQo4A1`%e2Vlvr4HaV;D7D*ai@s!c|_(+d_C|-{zK&WnZVZozrkI9viuqG z?H{23L+YO-eFvojcnx^3kAEV~$C$GzPeCmUVn=eXdxqw!P=?_G06oP1ud*}!fB*30`;>6l>!`0NM3cRm4I zM6})V;iXFZr)z*c*+Bia{BXk8u*2B@l?C^*d&k3P16vEMx8L$(J_h)l2f!bj2>VC0 z-RigexzZM3Pyac!eYE2@4o;#Noc%@&4)S`OVn(#kF~(685zE%fv=I2Z2f)Vxp9{Pl zn;SPt`VK?aJ*e(9*gqbu ze1G=<+xvcG{VfKz1=vXa%xS=v3%qMiO&mk9=u+o^J>N(CC-C0!B|on;0^eUxcvt;F z`8Bl|_?8F2rvYCM{I88zdYqk6jKLgVcL6R=fc^J?G#Wso=RM+si0L=azh_9t!mj|n z5%@@Ks#IW0f&I08J_UO)W$_#8Ru+H_EV6DPMeeXRaQ9=-{|BT|4;mMD5f7|$GRPu0 z%h$b%rJ&IO8r8?gnm#S;p;IY+(R6YjWlgSy--xB}<46ybfP_P0FD{NEukf5hW{PuX z`>4c#UJmHBd_sQT7ub(dG;HRlXq!}hjs{OrZhAM@PqOq=Tt^NXFXsYp<2^=NZYs77DBtham3D)fxMV(qZq+c8`brJy>yHZi{hWu5j49c6)&B z-HA5a=K<-`Yg{p~EgtL?X`5yeu$6@U$h?1=EN>U|kHh}Qjg6Zr-R4$cZ~unswQK_U z^UQU?c1DPu32Y6p)o%MgF-3Y!zdaWA?*rgFfY%~ydo{4fc0|UrOklS}h>Za@2iUic z$2&II=oIM~z+)+}|3uxE?J6JZz6IFR?@*upV{?Gr3G7?P$5XqJ2LyBa#98HPjku>& zM|=EX?kNesMjUA54yKW`Y`t2`0+%q&5RJnuc*nfmHo!QFo4me}W8XvnsSizNTycDk zubd5=fbXh{j0X#Wts|^`-Y4$Z?U9LZ3IMw_LhR`=i2nxmPmVusu0e=J&L~F+@pYDf zlK|m9YUWA3X721uoTG@mf`j6Wwt&v*?X)hZ5*^(0qdQkC_A?~73p8^hfyyQx#4;xG z=HY|~#ZyD*WP;9?Z-CD2?_&Iej-!wAXTF`l#sK>u`nVo6j%*u5!>2x`gGM!IIQqDl z;?%Kq5^?HH_sbzRkc|7Uz;8JIZF^j$j`4Sv+X@=D-+nL}<)G098pfS`__gDwOUFu1 z2X;NMZy9%J5$^@PnwX3DwPP#+XM`eAYBf*lG*3<1IY&`sdvu!`O7C|N|3GbMrZZ*G z7?X{d{brW1nH%xc7g^FCGcUkyabhA4PdqdlL8HHxc;T}DlgCN-r5N}Y;E(^9;zmDt zi{`oSukU6(T)oW$NX{v;w6>^0yk#U(boQtmMdW~VJh-W#Q3@Jw9Zx5jiao%Dv^V!M z^)L(6b0bmt4V*EU7aI3+j^ZJEoY7?2odo+w&`37ND)NPoo_LX~bF>Sba_6JCm234} z-@&PTga>*!E@hz|z4T*?N@)*8uEUcmN6}$Z4QQYIci^+w(}Hen;vuf4Xli!vhY4x6V`M~Utw9)wkjgs3>WR5^+c-ASNZNp!8dw?6jw{ojWD+zNSMWEK2fjIo_|HEhK76xub3wOW(4`!!Vg3hQ?_PP9uN+Wq zprH(*G27Su{Yud2*y7TkZ|E(FWY8@Xbj>`t)3viW$JcQ7iU0@{H*l?%<2awnZ9H(E z8@s)3(#vW!Xypp+6lp^p{W&rk@sDo0Y1&DyPS&uuL!cFkm>p{9gqX^Rw(T+xc*W#! zod=N_^+VG4R+XdZ&@Bbs);}daq(aVE0|J@y_uqkC7a{igqlkY7_O0WSXs$yJQ{;q6 zUABoKPjbR7%*SLHo@PmP;=vwd@)>l8o94?F&=}ZE^ZqzFK~$6V~2q%g5f`egxy+gO!iH>jXB&)n@aqL9=0t z!i_m1EzY|dv7NbaQ|s&yX%d1-{<`oV9)*=ce~~FG8fo-ft_j03$Y^i5ROS!fM3Y} zc=X;mrh>q(aAC*ACX#M_Xxt-sbGVe3%K3*8{|l_w#-HQsj`dE^=-%k1F(K*YYNpnS zaI`%%w;W}`4crJyyUjR?TRam(E@;$)hGXOB`dXtBfnP50W~^oWV#1#`^Bf{iVXt}8 za`)UYrvupk3H*1!fA}!t7Wv9kTnGI5KXKYWAwIm3sk>O zq;yS;xxm*8yw(1feBI?u1b#W;?J*-iubq8}DJcw&#kL-iVdvGHSq(&rE>fNhKr zn+j~H3p*}O{*FLTJkCFWb$r%Xhf~v}&%_&n@Bg(MPd@LN%hmD#Mrk#P7(qK1%nU$h z8Ank>qF9q7=Ep+NXaN9iO5m>oRry8+ZNyls!?~d|6IY9z;+PU zwqfM$kaS?Hf%UFY{`{iXV+G?EtkTIX6oW8hbV+L4s&D~DGpXtWNd;nT;P1{&pq zY545R#Su9F974liUm8H8=S|4eUUTH_v_fE;3A>kk%!uJl)eug+eDYa}VW*r8Li0pS z@m8K>xo!}{j-s=hQj}!SDI_|dPBG|A()KaEFdgpzea;fB;dq7=?vW+^wF z(=k~8LB|{$Mt-)`S#Rh&SpZZTi9i<%A}fWUwV87iMRqBNN-b!d{bSN`GnYkhz3-e< z3qhk!&>%m+Gwl1p8fIK}g;!2QiriuMe8AF-t&9PU9HOz8au&b9W+rP5sy=zj1`Q9? z-B!t&g(8w*oWOCVC|9cq@Scbh&?efoo>H@|w|t!LDJ{{^|Mk?r$lF#58m*vFW1scH z2RUQyA*Nqo3AmO4hZxd3@T4>5t%WYmQ53s%S4$Kn6?95Lr}}un*^4Gm&e!p_1?J;g zBGY#7`B-qOI|}|kK||!JFz(Q%Y6v4Qk2^)Na4fDm*&slIQ zH`2*$;vB`Dwk>P*rv`LRzCr8dcSt^lzk2dZB1As{P6ol&K9+>r0s?%m9Bnf}rv`M4 zyCs+>k~OsD^4jKzzj0LodFcxbeciu`s>7BI)8@l2G{MTG&2Fb(R-3eAn#lYM@n53@0IhW z7&Kal#wRq6o?}x}v}UGH)2cZ#62{^3$w`*5(R{8}xKH--q|@fzq&{&?$!SqCXcZEz z2xI-vtJwdE{up;DCXP;QgI_lW!4`A0o#H~#CGQ1~StrdSFsBi)XX`-c{5om}y~_u` z(4yEvB(9-6T~=%|v`5@g9Gs_1s7x;K^)CGQX|dVR+rx(54wEz3RJcUomjhp8#G$~y z1znk`;XcQrvIg=WCm`96n6LMfXwLB@>!nY;2IqO{lWw`={FB=93HcPo+Jt;UGqOSK zC`;%!_mLvg{Z5{Qmq|tZ2p%Iw(an@<&^h)R=-4sS^6#%?0=tE<4|refHuK&mNhg}0 zPz*O@6t9VM;XdC$&)&H(x}LAYu6bKx<&_tzZ3{<1vm8afWu5;juA# z{8y}uc%JzS^FZ@F=O~Jk5}QaVXtWNdF&a3DgDl!*=U?qQ#JR?5<;h>Nwu23n2LB9LZS(-!!Q~EBNNh;NY9{3iO|7nKBydBbQ(Uusv1w zo&-9F`5-+DqXsGcFmIfp`2e#CmbDM5F@xWmw&u#Epwj?4ElqTn0dJm9<2m|lt(NI; zPy*@AL`Ec!#s&jxn=p!SaF3#+ZHb_Vz#`zL*QqDm- z${d>JKVX!-PE7OSe%M78+{Be_M4^Fm6g_m}Kqptwq4OEEr7QtgEdAvLJ$6S?0yYhd zlQ{{+`ZnRp`ha9Hj4MlsbQADh71S4+Qz6e7ykQ@ofpgX{CHL4sGz1|8VqC#;LjT^t z>Gce!E196NvJaEcP{UTnX`7rEnxr)`by3DSN@`3++<@*i z4HmJNsO6L!NF)TpUIIl&h~2jo&t1!8(5*f`iqRdj8YqevQshIf9#(z|4|*E&>jN!t&`}Oe|qvQRbQ8W zQPpcWi0Xs0yC_#8jbi6kU{)LAQ|TT63I_H}1Lzp#Owz zru+u;*we|0Eh@(FZdG5b?NKpXBk};!!oLyGfW-`(WilP{;}&1sWd^4>jj08_vulVR zt>g39^yI`nL~k3>LmtPnm>VF(Vx4H0-1y=jAKc@h{j_^YFf5tLr#O;Z~HeKt| zVR30SI$S>5l_+{f{!A2wkMu|BU+`u4PODJ;qb}87a4bssJPML@F{kT>q69_ztUHeK z_Ie$#=U+wJ_L6+iw%9oxs(PKxY|)_4>3GO?x2oMx*%>wAu%_T?TSOmd@eP`ONYn7e zqomwDTG9?3&)1{aqH`2oZJQ4I>xq5@dOq+!6=gPI#e4+c`=q$#FfBiK&cQ!vfL3O=`ecq@s1e9adh!}Gu-LWi7E3VN-@pcldXGX>angmvcE-PD#X)E3OD#WA;GVr%#( z9=t|W?LKOT*qit8#g>KXYD@Rm-uX3I(5Cy~xHFPOA!rjZHuYIp74$dKcOelkPsbwF z&l3)-$_DD#QToxNCT-QQGNIV0aTHxuDC<$jkyX@Yinsh3n>VYIHAhiA+gY6)v0HUY zjDm>@YJV=uT({VPS8>B9S6ds~6OT}U#c_=KOb*=gGOD1C;u zKT6Nk{(tt~KEAQCPX9kiPG;&5G1iE+Myx{|%2>yO5#v}PV8jX$Di$nQpbP~I2B;9Q zV&nvjSTSP7iWMVPjBJH4Q!!#?BUX%9*oqY^R;*YtvK1@aWwUN~ns$EIeNIC2xkNII ze)pfBeZAW2+`X^&=YBZ%d44Kva-OL_wh31klAF{j7~9~R?c0}i=a930!l~V=Mz6Ww zx!}C$p-tMH9Ku=`VvnlW2QY@cV0#t&|7g3LdkX8n1FTyd?9_Rx&#e8+zjxi)ig*Jm z-T|yTi+{%aH|z4uGV*t=JJX0)y>Gg(3-v001{`T!#^i!Ha+jaPOAQ--8o|m-(q(@Y_I7rn&GV19ID(Hmv?aNNGX12cdCYj` zJFWMa=a+Zr<2Ia-#h%keE}Yw$F)zk5cb0)1OeWu6H;i zffYO6U8bXI*V*fFP0zd#Wmn&eid`KT9(m&hw9i6 z?c9MKmLXf<9y@k_>KDpyfi#;t=xE&Su)Y^C<*;LNBi#HSvHz=L?NQIln){0PIIv#N zIjjaWm7HFvg(rOv(Ly!>aP);6Ow5Th(pd^^@g` zZFx0c7wy5yEBLBntt@I<5V1aCJ=ybC0n+ z?t;(qx-V@E@s`h0?Nrwv-fcT(%Zc|m99Th>9Jai@?J0-zE{B@N$qW4dVMjgQb!R?O ze50;k=X9EHyvyl<6T@b+RKq=M^e6qX) zKcw27HID{xY*RTV5HVN&9qm^!cdKi2=6J#BvYm%w;l)18jdtf=TRl#UnKxvb`|k_( zMqEG{QcK3D!;Z;~a2doZcB{6l``0m#eL8lGk5=<#K*!L_*;;YTf&qxLpIKEk^X%uc z{TQ6lF<;BK5lZVptT8i|S~iZey?&b>!*H3#w>w-}b>X`nhp6Uy-#j?oYik_cc)AIb zRiQKwVs)!n8{_33HC`6gb@gsJ94Xz=eAA`;PmxPHnlo<$X36O zTWD3a%x&9k_QvUr=L|8qQMnexiarxdU7PfxoqYiukS~}IahaFz@X(Ta6?)^GU3*+j zK5e&Saw7_kS^uDEEhJy9zIn#Klu{WIV_pO-Uh6 z;Y`FSA3q=Fb@`pAs&De{0*?EZ?<>zj{%(DFo7)XOt@vj-&zUsiZpPTaHJB~B*WuQ~ z&ZFGj+VKdjZi~jP+&C?3UK+xucQLuqLLG6#iZAbz;9BQ)$D#U}&Y=4u^OLOV)kQbp zeedNf{|}kpR@mV41#JKR4n6^X`V5qBt_!h_aH-QggEqBdy~o~&=>@y~6b{Ah>i;8- z`bmdo-g;wh<(_af4(a%y3+i~gZpY+C#nk#AvHSlY#h(5>wtr@9wLQRjEWdu2jupg~ zVW(QF4LKcC4)vcohmQ5!g83hu%|h;o?w-_f1dKZ8)t@Ky<1_i0^V?9j=eN@RCjaZkLwQ^{h+#i>}}2(q^3cq)WSflYYNT zOKjHjF72_+`aLdfqE4T1X*a$=zt^Qb`T{-Y(r(>?;&*RRfhV@8!1rxYfv2~qz>jQE zffu%@z)x;bfs0#I;Povk@Zdoz@Qnwlz^Q{&;M)#TfyWP0fhP_^;EW4@wB`3cvu}iZ zWN`m*izCM73cQXo6E@I+a_!Un`6bM-u;g= z4C<|w4NsO=B-p}YvZ(%R4kol=;ddBI;LGNt1U2Yo^!gde9j3M64chU z@kWPrvOnX%S?%gkIgD8Ir*o{(0mKX67T%$RO~buv4fSl_{{i3eSA6q+zq4#=0qg(2gC7JxbDHXl%Jar*d<6FU zJ8JFVhrmB@>?Z6Z@A;Uy%?_$vfK&DR9nR=OHe8&z!Qq~E;N&Q}U`G8ZIvOjt?A@i? zF}bnISoy7{1vld1d0XrQ1k}#miE4G!Gh@TaGdy{V9c%N+TXV1daYy5zx}1a1qq-fF z8}Y^vZ@F`?vFF%L=5`y`;P6=7G3qA6&S~4aTB*5j!}Gdo%#?2@Y%DsobydxzZpY+C ztZu|gBbIed0n5T6`hH*gYy#V7vgLQ59I9_~>~s%V-{*@w=eraOk74_#So2;+^}J;h zt_A=7&-Wz}clJGMnY7;7y=_}h(&iYoxi_A%Yq+uej9t=h$7HH{k<`NAhrwS{eGIvE zTi5eBUtfn99cGNG=d+*3x6G%0gYj>+!P@rv>hwm(!TO2LR`;1&5HnG|;`3_t$=h+Y z1mfv0Om56lZ4ms-sj3a;eL~i~v<>=B$9DZ9XNUVLbK9*xCy3j+?k^wEK4!CHawA9x zF_MT8ua4o-k9KU+FK~w4SDK%_R<|_XZ0=%AtBO2ovtw^#BZh_;)$7VO#IyEy)brQ} z<36`W9rw9CWW!V04jj(m;1>7RV0y%^-Vi>fW{i0dx?pb@EZ-iB{b5XQl#!ap{?EG+ z_fB=pgZ;5)A8t46YQoOcpxuq#CUZa2JVu|hH>_j5td@eC9Cl1@M8Q2Ph?9CI4)%qc zJvcgS-}1EGxl4Z-SB`Z|U5n+aeCx~Ys&dozV=B*ySI4SDtPT}x<9*zdI8Xl9@8iC! z>V4d}oM~RhowZgb4JjS3%|;nn-HyrX@Y>j~vHW*b#c%Z7>%e=Qaira@uAAbzI3{n! zc3i!cTTNf@a6jbGZdReq3j$9&8vfSG3frZ2#2+%_tK${)>7hRC-k$8ij_!iZeSP^R zVRIjM+}?oeve=cv`_9cfg+)aXr@bo9cCY6l+@rB&3a@I?aYX;9`4wdIb}DnX`&N6K zxL@s~sEhiUcoctBoIBKd2K%Co`cW8%yPWOr%gj&pT#4fGNxFN>S7C8_2$LH%82~?d z3i!(VkiEL!(TsCkC%U8N?ZE@!N6g0(A5`NQPYmIl)dn8`A6NVVocAmJO4CBs?DM$4 zZ4zvqS?}^QaR;CHyuLpW2S0VPYPa?MfWurTsArwAoR`n1J@&UBkqUuLD)!Fud!8Ec z&e0xqF7Gn4vl<`8dqq@d&D!jkG%tLcJ%(9T2ew188FkJc+ur)7faB!)-FQk8z;(7p zoR2gQ8t(h+XI!S|u>Y%A^LYZy>uo1rg?HzuyFSfZwx(>27=Y$2TM4@zlfz>5ttG+F zz6<>Ba+?ncbUH4of6!|4t+vKO`9WE%@G$w5YC@&n9biXHRy|*XbBBi>eWs(oex`iO z*qE&m*J;heGQLi$Mk&_W;JXxWULQldJ=;T$GwYw(XKKV*NOPYF2RoR2th(&kU&?i* zTg5rRdx{4UYbLDvP+b$n{V#{&`|#~L?ti(Y{$`u?fZQ#r@JX{pmCi)tHpYAiF_MU} z=UDY^(TiR_hg)A#;fQL$(V07LH|6gdd(a&&-G!4I>hQQ z>!Gd{VH`Bs_xqX5>0hAziZ#y*fbF#J_q)i4z|Nhh=9xM-bfN9NIok<1--S~n_v%h; z_ik}IrycqTZdcNAz~~s&)&Ft8i2sl4>i-kw|DV#`X{WZX=GK^+TMs%LpLS{`HF^Ce zJ0>?q3GRPGnbq6)E|hOd0P}tr?EE`bzm?w$aY6xc?LEo@3Ry4UErwY~ewt zbIPvYr{j|(`l!>1L#dl_U0&C3ayqaE!T+%a!T+r$`zy!8I<|A0*@_5jP)!G{m9?5Uq(U*hj5dl39g2z5Vz^=KIEuwwtg z>(Syv82@J6@_PzG_Wk~*$OKpo?4D!QyRXdc+RnYY+6%#nQ0#@==u~?lNvHEVT_1F+ zy%78#dm;Eg)*<*m)*<-+Ybmv|xYybEgi~8qlj}}9Cac#QA(UCY-iW{L zz{kSMPc$v4cyoEhGZAhai?`^PI8Uv=TI)se*Q&LD+SZ7>-pnW97R~pPS64^NB1Y+W z#JE%SfqDK2=Lt{Nv4e7>%`>L%yH>9eGIvncRX99t`{3WWxYvhR<0_VUA1TiHG}@c? z_Z!VEV525mKIgN;uHJQWtM#svSOa!;Z&Sr*ZFWqWM+jy+FbB2eX{`Shd;81L25bwD z$2kY}klKh1SMSs{dUNmC?1B|r!!6}g0C;Q%lU2PjhB(#prf{5!d0^*Cx)Cd__ygFU zHG%B|`+vDT%lw;c`(p6G+unm%V~Ve=9~=DY9UM4^qYj8uwuVQ`pEi7!V>}<&$lp@XllJL&{?e*=&Q=RIjZ#(`zrcRA)M91m| z@sle40jxvoz;;xz&u5=;`T^|!A0RsfcJ3HeXY06Q>sGt^o;Ds~3!`=Y&-u-VPOvG( zX4Et5xL&r)exF@E(t+DcafSxxZC1?bxLr+;+0{E)%{zr3QC+!WZyYUOKfA|a$7FR^ zYAaK6%=%S12e`au5Ucps1Lq^)vx?tsy?^uY2K{)4U%%3MX8m>6wTlA1PlE z#^bV>tZq;_|2HJhlrSu-(<{ z^EghLyI<3q6kGoO*4Bd$+3)Ybr@$880=9CV>L~MA8`q-C-@(EetBjhX_t?})J@fpY zIWMt2>H#|jcF(bD{}5yTq9)v~fK%DH-y64e{Kf9T6Fr#5UEi2~$X>s+mFqUneoM(hWU)zmelkE`=ZhtJ#aXd2$ssosKUo<7BWBe*4ML4{kk z+c6n`W+NLwxOL?aylofvir{fO^Q7dm8s^XO zxSiIISQ7!X!F-48^EA=Ts6tQyPuX$iImZkE$-4D?N zxDL>V`!Wa2)rC4Oj@_QhJ_i<8Ol~aJt%xyDRR`P?^Sr*Rxcmc*f5jibdOZuabo2&W z-lvG~&$rD7z)n`P4fg%M#@hxqQO!P&^|}VOU9o$t_thM+(;im0UfOU{{f6>M^&8co z&Dt6(_w(XoftcLr^V}rHzZs(y%RBDJJ5lYC^w@E&@B%e;yRH6So8u8%J?_U_v=#sa zyvyG3w{N}D`Vg~votSi8Vr_)Ki%i*I~J)c+4~U0V1)_J0*`z9$Cn8FJ}iM-cbl z?Q$orYb>~94u@$r^&v!@wZsO(X2;}4&tMCL7>oW&3|uR5;ri_M{l7lj1Aa{L=Kb+l z9~^>j7(Ol7y-ZVhK=&ETyAw7qMzyvxObVW*b4yR zRybUMno#`xHajLa_%86HiZ8Dhx4M1mx&e-9)pdiv{q7*OJ@ta^JwSHhd$|8cv3DMg zv5n_B8?PR^hzE&id3Q?J0HE zW>j4|x^I*BzRg;0vmJk@H&?pnqiWv7QO?wn&y<7vZcxq+{az>TmC0;&o_U)Mk86+N zVd~A!VMhSZfW`Fy9-vv$1ACp$$DH-IY_iS{-KxsTZ}R3hYxi!p)$BW1;|ZgVj2 zc&H4T2x4RrW6!bbx-Rxf0~y^6S@fs9d8dbLst2d&U{aER7)cL!b7F7HlYCDMatowxh zz85VARtI~{1C?(isPovZ`+c4!20p(-m95^1r^+_J^>o)W&jvkc!#8cz__pD~l(i#S zsd*6WAlPSlUnP#Ob^*dWsGsBbN(0|+@~-lDRri-Q?)SQNast~JlQ*9QDSzM1{PyXD z_3cyKhls25IaU7$ZFWrJD&NKmG7f&~b*lac#s|RCtK?l z9L^QHj%nPcj~5i+V-c9X-{F4Jp*^Aw;+|4}ZqOU<*0o7>K=zPs$E4Y{=E@1{r-koe z{g3$9KA?{8RD9eQ3O?5jwfc+@{~~F(~bt*gJ>34 zaM&@q5k7&~(=C|qmHXh1I{gNl<5rs+Pk`K|I_VKx18&8~=^WJZS>81t0-pqb?E{Ca zy5X4()NO~J(4EJ3KCY|hnCEpTo-M#M-m{Op81Z!pOfNd>uh+G`R@)K!Ei!7u@wZU0oH#^sNr_CMJiPE;#D1)3v{-*w?Eb-K~fI zqFdJiH)1Cdd-k=}AdlG7YGjNHX+vL3Z5y#SIXGsq{y4ktt z46JN&&Tp!}ezW$J+A`drwg%&y8?Ud^5_NX`9j?PaHmanpd>hNZs?#&yRBGApZ(-%Y z>Vn;|-``G-gPq#GQSR1*x9#_`7Xq6E`@peHsxPpO#yQrIUea;v*32en?|ruXCOpv< z*uBYluOpD!XZ?dtZMWJwn-5MeJH3mWw8uBu@%L%fSykPlEo3qNH_E{F z8D#|ZxYOC8Ka8hYAGF=S$@z#qFuuuopCfR~Cg*NFaF5eD>U2*#@rDLO!#(MExZ=6W zh@p)95vm<(o2lk0ws~E1c!-Gg05j`~QZFT}Xk`nDL}$B6xP z-^u271iNeIcH}`@BaULs-L>nUEr>&4d%9JR4vx`+n#${ z6hVwEVjReJuodipVh`XOe8s!a{sUwuz-o%!t@d-x_o?p2`&5q^(w&#+*XzzSJ~5)> zt#kOq2!^DZzQIwSb>IvgI%GoqnRm30=~x+IvT6YKAnw@V>fFl4II&Xieyw+^I4HaM|)P+@EQ>jTSx4$8N2em53E0%^?M!8 zogoClvwaATJMl4%u(-AS33K{ZM?GFdV?J^?rAkTIc2#ps-{`2HP|q_WqWKWmf}>rWD73an9f&)ExYg_Bwr2f4)cN?d z?z~#R#jG+WpQ$pYQRS&}m2r0wYUXGc3y>ANIF9*m#;)oQ+_#R`JG3JR9?U^->?vXz zJyFCvM9k^i9rf6(nRPDTWgq(M;3J5=@ETQb^PA7s{m$D?xEfEesdopPr+@Lv)S>5k zcW@_Sr2ftrZp7#kF>tMSTliWVUX`jUVjfG+)~E=E`qUnr|3{qhJpqh&Tlzu-3twU9eApZJqjq%P+S$v@zBD6Aqjnt{UVq#LD}gIR=WCuN~iPcZSuC z!DruWo3*3g>zB*#j4Y~nBaX((>pJG>g>Ws17ezd4Ul`9H?oiK#26em-?$j%9GhZZw zXI5@eQSMiNo_vmHRu*<+v95}RcH-SFL3PFM)VK`~z}#f7$HR9R&REl{KSg_E<-0z2 z(|3J_5o`W1Q7_!HZ1!2?65M2tFV~{Wur$~1GOZo4h7c?MKpWQgNV*?3N1tH-Vp&4lvAHOfxn+Ce1zc z%BE0T%wYe|EZcm(2-`8-W4^T&&)HrTupX|xvHWoDO{!}r?2V7%ide zR^ip?1M?gf;v8}mZd30MSPz>f$`6|kD)@GLBZk1EY7%e3#AJ1xPQ*zePWc`=t4@cu z;4MOzJ=?RLc%x?JzIR->!DMx;I>hP_b=q+^-gO+ngSj|S884rx#HDH63H!I7G{rgF zJ8=B7Q760;Po1Ol9q%~ZjdOGt0LMu&oS`2U@x()JFmGGKEXR$S*y7pL>$S}ppEh5w zjg6$(vSOQ!7|X9x^St~nHm~PF-2djni$-zs3^y&9C(j;J(eSeNTg!_*zQ~Bl>iN`z zIAbb~d4Ih6CdSFn>pQDW;QRgo^_cqx&VQP5)cXW5h8vo0w>f(PcsbXDw)!Go?5vuC z>+g8l#{9x0c6HQy@a&VaJwu2ycc|)@N}NOSob%ZMJOqf3RAMjFJUTY_FGrs1y~*u} zQ+;kXp<*83x!uy3riH*?YkdnC+h*L4-lX?9I@J4huXK-=U-V_}0O!rG1Xa#gE>)kS z%_2_em8eJg+R4^>dxN7zy$`hweR1}4z7KT(Y;}9QSoheloz=1KoqP2@BVn$a$Ktcl z1*CJ>_PVz2Ge)uh7Jv?7zr7dQgX6FvXaL9TIj9%sWfq|t&i#1~+h-JTPBRFd!ny1S zG>_{RBhX1)H_JnF=Cw~f9>q1?MWj>en)jXdcmUV>i%5Hq-D@=Y5bwCXMi{E@_cH7G zHu%Of@f@@d|8K#)a4KDZ_9NZ-b}Q||y^;e+$DnuAeKTH%`%U9WN1-aMweK|sk&ZwW zukLFeLb?c5<&>Z})Q_us$kqQtCt3dwLzTZ3_vNeqr=WWfZ~R?oAJR=Hm*btX*BDjt zpk_R%;?#3h>i>18BK_M$eC$1YjWMJfI`w@@zT9U*S39zdQWc}I(NlhV;1Tg*lXyA?=$+Z+H3fs zxftdjw9eRT3`0AwQE@lL2d{rnuoe>+G}V>>@)md*lUEK-6J@M1WkQ$uQ3IU-;Dh2 z`;5LXVSGU|U*2mZp)Fql4~>sn{g;3ikq&-!uhINk^eeO*I(94i0XmY#c!hS}w%2HF z*=M-NEY1ZTKzbfJ4$Xc8;|&_SeXrqt9s2Q(y+#Lg8Jd93WH29~xjXk7OVG$3D^3*Z z-+^)YP57X(Z|^ngUXO8cx0RoT`jMXeF6Jk+klSncTK5@C-^aLs7VoosC8&NR`X#^D zXoLEHjO7Q~@gUlN6#C~U=s#$D2JL(U`r&72C)D#W+6m459PNbm{sQgvW4!$m;~F|Y zXT_g`P9g36HQM*aea7%_uv|b3zeRsSn;+k6G#rio`Mu@uhK7*#`~l+uI{n06qX3=# zqm{2ciT-&L>icK#(AH)2!S2@M2L@Bcx3sP})-ZfIdI>IY4iP|llC&vo$7PTQ(6 z1a025YAz41&8tQcX-}P%UV-{{VtLUzbhF$FC^OVA1L zs?mB3;=FRz=!JG1x@wF-n_snR%s@*}{aB2brd1;V&A)oph(J?YSB+6qd;W=)fv2w(!5r!7FqyL~YuSGvV=UdP| z=+x`bzT>f6>_GdVgRjSYf_Am88hL2bk*mfEv;b{+8`^Oc+6f(e1KJ7g_M@H9mN%lE z&=RyYgmH8<+6m3P3GIX?0;|R}wEN9y=iA}miFQKk-hy^Q3(!&MWEgF)pB;C!s%}zA)MeZFv{!eP1$F(pMh}!jh(q_xZkzU7=Ite8PszY>I3aL8~IT0IjGOcn4i!9bh-!Q0h&7( z{RH)#hx$PCQ12<&4)h`p)O$Y019TcX0Zm+h_CpunkNMnz^=cpL4eh%S^@cWIgnC04 zp(SYP1E}}AQBDN)h9)mYy`kNgpk7KZMZHc%|MjC@(Do0aUeLD7P%mif<*UX5bP4Kt z5Bfcd`3LQ}0`m{rbtUGXN`DCTS30n2GpefaMEXf;OLl@%`;M8A9v%QLhD?T6-vu{=XZK9BZ6dv8L$-iPhY7f>&#cLd7=H2Ou1U+C1$C{O)A zh56Er`2>wXCvHJMLSv)IhX!xO{5T8k`WniCW}#{5P#Vh- z3AB9->kX*u8)!dt7V0|({SEbI(BIGn=n8cFPK>7>tY3Gd{m_m*7*9~o zIK~sS04+hY-^6@57wx$V>kX*?Tj(d~U>5C%hQ5vV??U{$(SGO(Gzy*m4#pF7b^_xG zI{#fP=jWmPdoa$S^Z$zVKXfjK`2rpN9{K^=eJ|!qFV;8T$2fqt`~d48sQZUlUqB;y zwC{Y>??=do27iolpmPtb8rlV@$29r}sy~EwLzjMv@dIuD8T#-2SbiSH_6O>kMLVFq zzd$>nlaHW&eb71d8+7bZj6di&RBcZtpd(1P{@O}+K*x}tflfeE^Va{pk72n$dJ)=m zA;!yZ5f7SKz&M0<{|@bih8{~d76!Y_cu$(}L zRxo~{k^e=x(C}W&f2eO2{ofBiv>iHI!g?N>T|@stlj|rC+P@FweGubYD;eETzpZ2p zK}*m)bkbfjR-nUZ_NG9%zfRWDG->p;OSQO(jFS9P?#!$p}CP>PkjGwBrRO zV+`87rDQBXTMjB2-YELnRWiDuO)o@w&>3hRS~wVSuE4l{QORh7F1t&{05o4;GA5x@ zo|56d66r%w59sjAP!DKt1L^^7d3nhwLEVidqxD0W&(L1z^ealn2z186q~2km=x$#8ubakrL?AXGmL^?~N08EC?Xc0rS`LA$QP zeBOq3K~smLUC{1kv_6cY&+6!GgAN5k{3ozfHb$zH8)O{h!8^kz; z_Cbd(LV3{M4`9AP0}=GwC$T-b7~>zh1nq||T!MapW-mo~(7}H6!}X}&2QmMk!OJlJ zp#ICz|In5w#sSoKMal4e3je)oZ4)X=t^9i);GZ+tp z82`}X^%&RCjwI#>H2-PvL+Gy?&>m=L2t3sDS@a*Y_eSuag?au( zUqJst2S<>9BepAF#PS90`4aM>Eh&sU=;)Wx|DVJ9`WB35X!BxF#e!ZX|xkM3mt{(x1qhz#J`~bKacVJ4U9u*!yQfr=AjvA_?wtN(8OI>j=z9$^ev1BXfca&p!(eN~IZ|_$82a@VlnYHjbH77>KqHSM&R3wn z$2fr&3gDqbPoNyAZxQ2Y6#f23)Dt@VBzS2167r#Qe@6aS(cb?+`OxSxcxcztSRSA~ z&{^n65##+HYsN5ieAAk-2<_RtW_Z7j`fgb>qR<)W7Lsmy4Q?RXwQq+j0NSZUo#s11^xY!HKPOC?pZUE(9}!UjA>~9A!~*{hVjy{ zW^_WE8rO^zw86V(6rjVeTr*m}f%W98){FsY%d6Ln9Mp5znxWl}`RrRWLeTKGHDeIk z*1TrSKo_=K^;(2_@4$R)LH(d@JJyU0RBuIlpy4Ce41Wgm?I^^Dj``P&NvQW|)ay>v z^G%2kEe6(%1a$h%C?6W$iTXpmZ-H+&)=$tj=xiJM4eB~}&6t9Qjzjr-u)Yo9|IqM> zYv%ctZfNs3)`KUl89h)vjQId{K}Vp;ccC57_EXjj&o@zi$C}XvExdcp7=liniulmX zd(aPeVZL^*83E|h>1#$D+I9x=p|SU^8Ln@kobELv2rWXR(229xj68JVoHe7Qv+^HhjDBc*7v?)OavtUbw7YlBX!?*VU-+J>agv_=Ebd#e9ak zKelERq5TQO`B(Tqj`l#?u0y?{ou5Si<H`gY8uQ_Mh<^jd z3pDf@j3?;u5XOu8|7X!J(DX2tyL&O;pgqum&s*bk44OnbdXtsTKyyf^MiBS=7++t+ z{DUrj3F8nN`ZD?p8ir0m8*Z_zXkE z^?hy4Sb|QcF`j>b@$hx@D|B*f&6tNyK^LII-@tNxAI9nJhzHd&m_N`wbQ(H*C+ht} ztlxH{-p~bT2%6c0euIvVV;n(q-@<&!gM&6fhq4$K(D=8l`Ze8cjki{44F4}dv(Uxw zpq|j#?;`Gx;J*j`4Q=~Zj6>)ObOD;nVfncqy0q2}=$bPDO%kFeZ81NUQ`PN6(#FLdU|=m+TJ1E?P~{BKy!A3(c) zf_&%*v=7?*kQHwMI)ZfOr&!*hlQS4^4`RRP=V&i9HH-E`dw+rD5IXe;;{6+Zzr;9# z&OM6yKo_9X(9Ex~Tug(TN54Z8zd^r1TOUKeKnu_XX!^J4m!Dugy@2ry?fo6b6}0d; z>H%GX&Z__a-l}H?T0pv`faL|c0&RH+`UkWJy6^;+7ijL^Eq`bc^+7reoq;y|am}dv zDcS`MLi10eUeMt`p{Tfz8* zhDs<0I=6=Ydl>)Uhj9VTXzNA{+Gkrg#-Tp@x>1079P38I&#@lR*NqNnvvb`@K)svR zaSk5yee*iz0rnfA&9jJ8w{G-6hhMO6j6h>s){PnHoD2EC!18i1%7L!DXx+#{hw9gj zx<^pnOV^EV=n8ZQ>TO&%7NA9_=a=AKv2L_OQ(o{;U(>oV2hAR~ZusZ0-PpEn^h3uF zUpF$)+2(a)5n4QA-SGVi^K1LM(FGlQ?YfbKMq1X5NoaP*y5W8l{E_QMFSPp&>&65$ z`$p93*I4fbP%mg?C;A1Ndds@81f6a}d*-p6y>;E_gH9ida-q57){P=G{zsI_hCLk z2fNXq(B-pGugAedz0myG>qaMZ?woaF7&_QvmDhAG+Jkg6R4woS{5kL*ILl+#zWNfo z_6z*J8q?}W^=a=BHdC)E>*vavDeI5R`dC@_mi5KkE$%gCJ*KRumG$DXUR%~Ll{H(| z@0azbWqq`)g|a?X*0r)8^je#CFzR~>^hK)PP#5&vRh#xg=pQcKq`e%9&l+m4fT+Ai zrAT|BxL9xgHlWe}y}s|#*&{T~iT~wr@8KqB_bHmT1v-Pc2SMHVb>KJq**e_2f$RAA zhX?v%{JspoU4OEf(_>HCw0fj*3!e58{MO-jec!%I=l*EZ_CZw{%h2R+Y}#I^Dr9@Xs;u&oS`NG4RhZ@Xs;u|GzO%*%zp2-OU^O2bKTTQpLREZExGz z)Eqj0*M;X^(&XQ9^o}>Y?nwU;)|7UfA0f^?|J+>{YPRi|biV0z$7!3kY(B2e_5%AB zy!((muoxDyY%9V&%Xr!Q!&qtMlZW?*A>z8FR#_- z)%j}QbTfX{K4r1|>w{K))PAb^ISRjO-%T7wlImpW3HY zzk{#&YhM8QYQI+b)V{9z?b&6Oq4slCxoV#`h+mafKDBSGe$_s4<)<>$j(@2ATjf*x zw(2*Go$gBg)&JG+Y4}z9!)jXX6RY1DOslr3--^E%(=WiU@~M4l^{e)&EBQ**XulZ0 z6`$G%SHEiCT;;3wReV=sTGdtUZ}(u|S^cX0`^rxx?g#OYim$RxZEdKyta!>BtMZNF z7)t$yx+_tvx+&_~DqjJ6`|7uPpT9y3mVK(dOYo@Q*w>IK|5SWKNUQoOU*AhMo4?-2 z$|gv~_a#ij*RRS?{O4x#H~32HI7qoGzS}IH883fuou+;(dDTAE_v&~{jRkc~rMc_O zV=9$YKNSYAATW=|lusRNx!|khRr{upQ0-7YH+=5D;rj{lRevd;I{xv%HwsXV74@TH zt8wrMek<|RFP1#t-N;nSMs>Uy;|1DjZ>U7sh*ybIt!jOwejMde)jG#YTP3S?ivnbwa>^I<}rkMct+_0L!3tM*jpkBYC-dU<|m zm>(+bF83c=t_90!RSz4|<)Mj`S~jfuk9@eY7P0Cdy-KD7->7}b$xjgPBfgpZ)08d}-$ML6`DZ>V%Ws{O?e`PEK>q2gCErhc8}akx z&l{4@5Fa9bmi=+bFA^UnK1cr9Yb5Xcfoy*#@dfhFT`T!s;=74oB>%$4BtJ@gFY(Ld zpG`=9f%pjV1@fyG7+F8oc#o35NdAeB%kr9uj}bpb{@Lp!-$#6c_yYM|*GoP_e3E!C z`6nsu`=MS^Gz7MaIs-FK@ z=E+}oqvU&t&k^51{#o+p$=^)=B}y+4KTUjz{G*?f^=?mKXJ3< z^Tg{fklWW`@{fI4^4gDO{awUQkUvU(5BaCapS(qu7bo6Je2V-&%1={%3*~2OTwX%dzajEBd|mQG#D|G*BY*rh$pZWk`MfwY=55kB>6MspC*5b{5{{2 zUyb~h8u|Vj`E51wLpAclHS#-ahWK9Mm&l*`kt~0a_z3ai9`jU!b)9Guhr@;)~=@QT__$rzsy_6A!zqv+!E9Dm{9ismItM>n!?4J^)6U1kze_hj(*B_GUDdMxl zd&oaQ>E?&!_{b4IMgIPu%JO@O&l4Xf|1{+%DL*tLn6MU-vV~&l5jSyqEm5 zly3OB?4JVh1@dPfmgRR6zeIeV{FzzF4-sD^K1crGBa&YtzC?Tn`IB>!Z=IFx*AJ5W zmnrf`$?qb6hW!1%lI6vT_YfZ^znAh8l<%YbWR3ix8u=qN@>4bP(>3zPYUGdC$j{cu z&)3MGs*yieBY(a|{!)$nVvYP#jePx4Ii98{JxSw>$HyG`3%{1}v|q^Op@qg*k^J7@ zNio#hrt5kF4;y2mB&e?+!FOuU!;uHQ>OPJAcxn~+}d&wUrzx&^1d2Pf;h;Jglz9{(^@loO%$iGbKN#bL~*ZonJm!Y&a zC)=MOK1crECuO`g;*-P=kiYp)k{=*`nD`+1mnoekK1IB~B+JwOEYr)xr-^ryfAA^E z`+p_-FGKtY`3K3LCI4*I{Y#qPpDFpDmGZ6oo$Ba*r`CS!{x)@I zrt*8}{wI~L-2W6Q@2M+M_dk6Is_wf~KQ_>cuiTHOo@i5CQ`P-r38X8}30CgUYOl(_ z8Tsn|wMuy^p1S|j`gw`=jjFT_PsS-A|Y_ z>i%8p$GYE!%WHV)#{F@seZy7bLDi?9^5YaQMgE?b$#_}vr^z3Fx%4lRKSTbuS4h9t zBiolHzxS2W-$DKy`AdgNe}epZ@-H??{{;D`$v?YQ`j^N*Oa4ip^f$d!ws)TVnQhYF zN&W))$D5^pko-&J&u*9g9QljnpJLviwC#2dMls@yq1TP`-AIjGv|a zCCcwQR{A@r{2cK;9eeEOva=YZGh<6cRB)^C9$4-** zyp-P)mibykrGD1-z(@Y6_eg&W<+q(C^Lr_tpZt+Cq<@C|ZRAg#CH-wLm-P*i-+!L; zkC8u2{)G#qzeN5{^7}88et)B^PdE7oKOp@P^7oQ|{$lA*lRrZK)_&=qCx4XuiOZzl z^9ou282RU-(x0Gw&y_MiN%`ZHKg{_9k`GgTQske$TKco(Pm_NxF8vwGU${o*7bsq~ z%CC+G)H8zC&%xR+)B!>Hr}BJD|8*Pj)T37)L^=si<@jc~DqqFRRmE4~N>%v|gjz$| zhl~o};g$0%Pka+TL2Lb3_Zv@Bei!A>P&z^JXNjL9|H3C_d1>P3iC-qa`%{u%B)&j= z6ZwPqYGQSJ{jZe$vqXFc`3FBO`55s<;z!6I#rL|a%by~?M0|q$y`PbM-J!Dnda;tW zwjWXQXFeWM9VQsHk zh>w!L=_bkd5br0xmHhLRP7&Wm{1W+RM`ZbP#D|D4kbmT6$+tGi_J@fdBY$^F@`J>8 z5+5OdH|6(Je&EY8{sP7CCBB{f?pq|^^lI6@2=UG2U!Zgk@loPe$e;d-EI&njjQA}1 zBcqa^BR)ZVjQpKnmAq%GY=4sYUh=o!D)|oLhl%eZzm}GKlK2$yZt|xozk%{Il;1@8 zlV6wR7pQ#ZXUOlu2ji;Ozio%f{>xJN0rD?XdW!fQ@pX5|^754Lqx@;g57x+!Q+{H% zEKm2z_Hq1S@{fO0@^Rv4seY5>pUg^Le~m1Ep76n?9^zBv58f~N_GVeWm-r6yxBgi23F3Xk2g$!MCHYC>TZmsK|ImYyZ#Y8M z-%tD~`8%g2-%orS@xA14cu4Y7#D|D)CV!anTPVK|-!iRU|21uw?dv3dfcy&&OFl+? zH}Nav5B@^(bHw)&-$DN5FD37Pt*n29_!RkLbCMq-K1zI&{4vU(p!@{oFHru_qq6*I zDxdjL@^{Qj-q#}ApQQ48$lvo@$;XHvCcdBiDaub#ewy;rlt1>kEPsW{XMTeGvwx6$ z^mVfR87jX({w(FUKOy5iT$NVG*9%o?b^KlWFGuBj$Upa# zl3yWyp7?h1-%^!U{hO&utNv~M z9~nRJdf7h(iXSBZ!m8xs#4i!=ElGco^6S=Qe%-oE7b$*;_y+Rpf2p*`+P>@iWIP>j zma%@U^ELj~iod*lC*DJT59Mn%8Lvp`D8=^@uiGW>qx@ycFF2$>OYvKXFOuI+`3t&? zH|vz?BE@g3;?3o`>ipbc=;{*E>JAh2=3;YsuDX9CRGAgXubjUtRF&6>{68SA-t$vA z{}!q$PmSlrD!+=aZIbm3SNWsQQl3u!DN6f~l;fc~o+^Ki{G*#?`BCCKtLmfpaq{=n zNj^(_H}M1HZ+d~`mx=EszLorQlx{gnwm(ArBKe25$nyJ$j}kvh{`P|;pC&#=d>8pk zlwKe{LA=W)%bTFI_YJcBN#du-AAO;W*G2p=@d@&`94z@^;#0)8k-vf7KQ%*qn)p`o zyZo}fnJT}zy{KA#KmIJs(@<5s8<785q?<9$E6Z=bDxMnu6IK47!v9^QtM3)1>@AF8$>5X!J zE)hRK{A0@t< z_-^tCw@N-sd@u1Gu||05uYU9Oa20-n|I3g4-;P`f3{hc-%EUo_&oXJM@W8@_%!iDFZmGh^TZF4zo%95apDWa_me+(q~vqNFA?8C{<@y`EKGv z#5a+Dn$jc0hl!sjf1*v6KTCWk@gwB#c&p^yLD~Lp;(N&7aE#=`#P<^4O#V4a4-y|C zewqAZ$I9~a#7BvrAb)>Q^4f8-{W0R>Q?i0>tT;CRW$h))vVPJZ{>B%dRG znD{30FH(Ai_!RLg5BW=!o+Q3Vyz3NMo~uKqOT?Fm_mY2x(rs^-{ipwLC2ej0 z7sx;IZdv{S@h;-W$lr0Q(ki0>u;;Cm#$OuU!)QS#?IB_BAU(q3!-!AJZ&`CCty ze4O|e;@ioec(3HAi1!npB7aAh`3UjT#J7^a=|ahm5kE_Ofc#AtNxne*JoDuDeL(U}VcGry@qYG4B;P~)67j9% z_g*adQR0ilw~&8<)5MpEUnc+fC9-_)yJY+Iy_K}J{huU%@1>IOBHl%Ol>FWOk{>4C z!#w$;AC&wI@m}H+4CPrRS}gI7v^ zj`%j>N66pyA;~wLBHJG#K1_c9faJT04>M2x$W@XbA-qcdBfEn)nIwk0m7EM|_6(N%F@( zF8MLyv&4^(zvDW|7l_Xh-$(wYPe{J$J+l3I;sfNb89rw!c994Ee`CCHW5Gmx!Mr|4>r$L&O(}A0>a^rzJl_e2Mrt z`2#md-gTO6zrI>YTigG3@~==jOuUPD_h)2zGn5`A-b4HX`7=W@UY>X_@j3FxKP!3d z^h$fn+kfJR$lrIP9bX1>z&*_unD;4&s-H50Sq_=_K(*;@ufp-VCLuh%XVpNdB=qWjy^X*?zrLNtciR z$v?1L@2f9r(g$BFMIK1hE5cO}0}d@u1K^6U3VzO_fTKSI2R{8N;U5FaIej{N?dEPssn z81W(Ye^2tu#3zWKAb;Zfl5ag%wm(UHn*58Cl8+HTOnluBq`&7rna&fRB7Ttk(?68F zdzY+#n)oI1NA8z=7x5Y5ljNV6lKcemS>or&Kl-5LUFXUA=ZK#o|IoDLyNJ&dKSBP| zPb8lvewujiL(<>-Q<+{OewO$I`DbP%@9&lEpC^8q{H+g5K2Cgr_zv<9|6KA@#4iz_ zC4cG{l6Rjk>t7^3NB+TIO1_Ku67d=Gr^&CcRm!*C2VDLB-)A1>!*e8+=f`qY@eV>& zzK68C>iH8_RXp|l$#j+fNcew(biB$Rnv?Arr}}v)UbxEtUZC$ny88JSFZrWY{t@`E zK)U+*ml-P0M|`P@SI_56{z}$&k@yzkXUOlT{5i^Rqx^*$`HMC3muuv&)X3K!mF?yF z)z!#%*T`?Ek>6A!zqv+!YmNLsjr?GZ{Pr689X0a1YUKCS$nUF>-(MqtphkYYM*d)p z{Gl58BQ^3zYvhmB$RDqfKT#upvPS+?jr^G!`Exb$7i#1$*2rJ3k-t(SU;A~9<-bP0 zyGDLPjr^t>`OP)*TWjP8YUBrN4iA_)g*{$UpL18L#R6vi;q}kCDG)LGsC zeuVf4^W<-MT=KKTM~QDH{|u$weX{*A;upxD`MoSZOnid)9Qg+el1~z!Bz}ba=|4z* zn)qSjv*b@bA$iw@vi&LIGvx32cgeRCpC-Pa{LPD!PY|CWK0yB9A0oPyY60$!Cc#F;D)`e@ebcyuMy(k9GdGi~ND7 zCGU?^{MPoxMSKVOTmMV)QQ|$s2g%=1lzf(WFY&G9cmKEKi^ThgZzBH+rTrJn_O}r4 z{);RxN9ic>e&VOeKl(p1-Z=4X#E+9du_F0p;zPubkiX}DCEt39Y=4;e0rCg-NW?{8Q^PUYhtA z@pI%K+b8(~@d@H5$saf0_Jb1{n)N=WrheJ}B=JM!58Kq+g4B=2cN0HMd^h=9g; ziSHwS+h)nvT_)Q?~Ydd2d|LrZy|n#{1b;reuQ{G z@iXKfdYR;BiEkr*jQl+fl6PMz>mMS1fc%{=mwcG`F!8U@CkoZpG+sWVb3d!e* z?;h>sAzNdELIW%&cdM~TmpKYpm>v&6@UA0mJEt0Z3} zK0$nh{LM|0_YcVaOA;R-f05Gt#19j%zgm_zN$CvnDdK0yKe|=MTO>YB{5bjh4wHQI zhh_US#1D|a)hGF0;`?%#0SW)A0hcP@$mMUNMgE?*NxqHv1o8dk54>IS!^9_v?;!v3 z36ftTewcXe9nwGAF4LiFWcyRZkCVUWM9HU!PZQry{=SnWUnD+5`~dm;!;%kNE9;*n zK2CoB$&!x|pCdj*epiR&XNk`f?n%sNPd#| zdE)2DU+R>6iTDEXuG6I7d%8>q6SDnF#J95ly^>E5UnIVV{0&`_pCZ0Qd^7nMD6M~7 z)?c^Psf|B=%E$lY&z>R64-)SpK2QF^GbJA*-b4Hd`TO1{`3d5^#1D`^+%5Sc@jl|a z$=`aG$fQbHw)&-$MS@Udej~W&0z<2g%=YzT~@z zj}qU*{tG0ZB0ffZANhOVFZp@m6U0Z!-`6L3?yVl zpC^8r{NtBOzV1`9{nNxxl0VZg`F7%GiO-RL@Pm?15E+2ll&C%1>&RR_g^k~ zJt^D2M0|++Em6q_i7yi0M*ik2Bp)NbM0|k!zAGi4BVM=5?SFv$O&^kciFg> zJ}UVb@omKW$?v~f@)N{|h!2s!-H?2V_%QKZAij_M zgM*Uyepa?WN&Fc33!jvH5Anmqm&o7#DaogaPZ8fk{=rX6ev$Yz@uTElxen)pTXw|-v6 zOA$Xye31MtH%Y!g{56GL{pO@{|9di3$Ab-~_k{=}ARmJo7h}833-S6GtpLsM9|F1kB7Nz)s zuWa~Lyjd#GQ&pad-%kGYsN_92$@Y4Q&y#=ZtCH^~-bef#`6q9ce2VxM=E*;hmi#>N ze&PqozxZ{@H-AC4zm0h9|0C^f;G-(8_wmgef`WpAf`WpAf(6Uw1(Ft(kOYYa3?v{} zblGGVvXW%OX2XkLtWcpv3l|}e?I-xJj^+B=jF_qGiT1s{@SKr!*B!RFJ}Ck8U5+U?ef+!{xZhjeT_|j ziqTgx`hkpo#9Es_u*`1nM8;pk==VKg^ABbG5ypRl(N9`u^Uq`awTyobqi=ZH=HJQq zlZ<~UqtAcF=I{KV-Tv8(e*~jny58n5XZ-UR|7u3RdV|fsjPWmI{2Lkl!e?#%1B`zu z<6q9`$3JKDcYVli|8mA(!|0D}wD~I;|7ynn1*7lzyv@Iy@vme2`HVjK8=L|_PDa1_RlB^sjK4kO-^l0}zh?7yeAsS(XU4yh z(a-+9&0og&yEFcUjJ{^4&A)*0_h$Sv8GV12Kbz&}vivzMF(eK)4f4}1@yZ#F~KcnCFuFXG*@h@fk2O0f(M!%fV zpJ4Pu4%pw@%J^3^{xU{?g5k_x+3j7&_yh0R-|POqjfXP+jf}rPr~km_pTqdKGX8Q# z-(X;p=h+4(dEWOoyZmje{2i?Pt{*abhI>A0_s=fIznjr7_`6-+M8?0D@h@ZaL;qp( zZ)N-k7=JmVA92Ly|AO%!X8hwB{m!E{fBtH_{l^&pK1M(LUpD_7#(#?OFJ$yxkK6n^ z8UJa<-Cfnw{oCfRWc=+J{~Atz(&k^z_&YQH2%|stiOqk6 z@potZry2d;Q#OC@ukH5tX8flZ{jyJO{&|eQKjUA+=#!t@{M#6RF5_Ry=sP}c*LMh` zU(M+EerbQNJL4~A{D&F++5gr2q)#q1FzJ)z*E(NdR(=^Pe>0<>^o`BGm+@CJ{suh$2E5UOl16h8U1dC%NT!z@gHRL+tRYo3zVGueG3?WE$3(S8#8SFeT+ZJ`1dmU zzH9CF&Svz78GS>R{k>eqKacUxVf4M**!*)C|3b!}%jnC`u=%$#{-un65~EMHwfRpo z{^g8+9;5Hy-sT_jgx!Cu8UI8^pV`6YU&i>?G5*ese%CoR|6azwk?|j3^z%B}{JBrs z_20_)mooa=b8Y?x#=nE{FJ$xs&$IcrG5%eQe<-7G-^J!X&G`2+{{D=YKXXPJb{JD(2>;jvA8RI|2_$M;@;;uITLB@ZY@mDhXq1|l$o=@5R zli9{z&&wHo*@ZU$B*x#K@lRy*Yc8_+S2O<3jDItupV!^yKgRgGGybKFe*DEYfB&cL z_V;G|HH^Nvr_Dc)@%Lx^m5jdlQk#D-67C*v<;{Bs!nq<%Jk=k<2|D;a+Sqd#z^%|D6pPh|YZ82y_5Hvc-tA7T8P z8U4zuZ2m78e=Xx*&*;m7HvfnXcKefzeg#zj&z4U&;7) z8T@3wpMJlMux0~+vjaA2X@lgfofG1{DV6G zUTM^?4*9EqO@H1}R^PoweQ7_^w@?-8*xx4@UdQ+kF#f%Yk!uVajVC!_zO#OAMM{HGXy+u=5S{so| z-`maja~c0mMn8L;&EIje-QFRLe<7pqKi=k_$oPvH|4>HXbArvkhVhp%{(MH?`DUB{ z2;;A0{Jj}{;0HE;-z|3gCo=wyjQ%vkHH<&P_}fpkzgK*#jn^>#TE;(#(Jz=}^MAqk zlZ<~equ*U+^N)DJZvSk?ua&B!|JJ)o*1<~||2)ROjnU7H*x%pH_!lz%1&qFUvd!QA zw|4!PGX6?NpPXXzM;QNd#y^kI&#tlgS2OP zLRAI-uGaf}<9ln7UjRHF`RY7zukk%9Z?Qp7-_NgCg*x(mn$`CJ;~&B3565l($`|eN zIn4Mo8*KWO(`>w)@gHOS+Zg@nMw|Z#<3GjtyC-e>%IP*9@seHt(~LjK=#R{>`PVT1 z%(nLW-gc%wo`ZZP4?X{_^QpY0`|b8_ zJtF5p z-k$MierA7fH^bu@e`m&jkkPLpr7DuM{w`zu-5LL8MnC5Zn}09k@6GrZGy484|1it% z`d_>JjtA`i$z}X~8U4_&Z2oe_KZNm@Gy0CFZT`iKznJm&Wb_9Z-o^OK82<@Izb&Br zEzbC~ea~)xCF9@6=+|f3{0)qMBIDo5=$Ey%`S&sY2;*PR=!cwX^ACOBu754#FJtt( z&$9XFG5#duKgj5NcCh((F#g$$e;}hj%y8xhcKzou{!@&8&Dr+%hcfbCFbDqtg zWc(W$|29UyxU)c$mZ|(H@p6a8UJQRf1ta~U(5K9G5%wWK6#1FzmD;r zV*CpkeaA~}{_cf&G^e0e_uvFbg<1o^zU~2D;a+| zqtDN``R6hIiHv^)qtB#Gv|%4^WBd`u-lptWM!)g~oBtr=-^lnkGy0h$ zZT`OhwClf>@h@id4L92SNyfi}@h@QXLq^;DI~e~i#$U$c$|&bF#f{^Klcxkzqh?n-tDR={JZiW2X0dT*gh~1f2IMO{n1A@h2Jm!AUm% z5ys!2@tn?r=NkN6UY}E}hJ5Zd%6l34?*R8jzVc@eG0GF?B?f%}pZ*%y zl;;LkpJK*8uS!+I+3ydjw($bSU&i>$7=3NT=HJ2iD;fVBM!z{~^Jo6s?$3#ge;=do zI>qK6!T2MLKcCU}ud(?TGyYn}KZMcGj@kUX8Gn-TFK6_pZ?pNkp0L|LoAI}wYSY)& z+ISMM5KS=oAq4&|+nObPimD{EiZ*A@_RVwhuY^X@Ls@HvxBc^H2V767a?) zD*xpFrUTc0MXMgAvqbr`4NUoS3`~^kPHTF~UvFT_S#ovGY$D znDQr`Yv)G{JPYN0xiK>^6olvEe$SJ@T@dd^TaHVCcL48Smlaqq^Zx^^(z*B}S9Kl! zCOm@SIl!+E%?#`n`RxPG0guLyzVDOxufT`$(*ySb)Azmro_~8*;DpTY+%6E<{!~U_ zr^JJR``~=&pv2X{oqm=P*e&q`z%Q=R<-Y{164md&3}1OB_}6Lr(ZDJZ{e8e`*JZlx z|2=Sz53>S2aW9+t>o}wDc$VUJ`XdM&91|Ac{73USeYI;BI#EFr{VXmX@#czH-J^5{%YGX5U2}g1=h*$l>mP_FUytZslaK! z&J1h@E%C1eo&|sR35nkYzWZrC{{I73iORpIQy>ugv6kPffmNdX2N}-9KaM;j)0O|h zz@zc|upeT!H`FGAJ`pChPwOaF6!7zUk*FYN|ip@?!WWz_G~~U&+4(_~a9r zfyEMk0=xzO?OchkghGuWUQEsRM&N>&9?$Fl;bCH?)tF?#Ps%HQvRPu`UkI3@Ex1fH9f<(A*`e9U*mPb&WLzyrEu zx$<*Auu9Y)8-eHIy~)n9eg9zj{{@~fpiO|@X`uQKxIl3`{do=WQ|q-nPh$BC7~TN9 z1@UIZvOOQM{42ZK!1+6RKzx5TaF9wrDF3JA^uu4?^e}F$dFEfxS+jq%Dn2&g0X`rm{4}eu7 z`kw)J>yj?w^++E-47?Wds^oDG@aq-nfrR8gqdWBDxQsxh!~=kPZPNOz0$3#~zYcf{ z?gwwi-y|-s*=@;?E- z8~&kv^81%x4E^y=dZ0}9PZ{uWd|$P95wJ>B|3`o~^~ekyz~5B=KLH1?$qa0j?Qeex z+Gxz*Yk&{o{JC7_PX+$yqx3*T^4|-r5|#fZ@JXCkOqBKcAMqnzN6Gj1dSZNjkP(=q z$^%x3_r6p!Qr1e0)ciyZ$r+Z@DPT zZSNZ3(Z+iGXJD16{7-=&oTmHd;!9PoGk#-%AG}rT>pOwpLA+brH0S&4flHuF_DTE> zaN+0a0eZ57>URoQCHme?mjwcg5Z|lD;~rp@D1R+*Y!;YhdHaBmj?Q%TLq>1-Bc*-Y z5%%(#z$y`cU*HGu9-Y$9Q6qm9@|PNz=>G)V&d}Fi0T*t=_vH7#(?^x=^v4f@3p?uZ zcm((m-9Jxv+WTkV0q1A9?fZuDUw=8`A3?9?(~p2}xgg7J-&SCisD58C9J~VaVThi8 zHv<Dx+#d6u6{lF9cs_WmTFXjj2wGF;S<8=+NO7#8P8Quc? z<)SQieBJ|2doR_=i)uxzOp{|0KZ|({}+I# zZOjVHmid1N?$JHdT`zhLfIczgwG4Ow-UCte3xM~(kP)br{7(U|_#j>UA`5;0O<H_>_8sXe>m{8ff=rR)B`_=_~f%?{{6sN zKhyp3JK*Ej>heAV?v#<~&X+5LkpC;w1L?B9(|}Jtp5gZQLzEBuP08Eufk&6<{ncsU z{qUEn{`gKd#s~9D(T@jKiP|@V;a>pvLD0RTe-U{9uXOoeGx~xY$osikzeIswoP&63 zIeznj_b1Z>Lx9O1I1H@Px%eX^S0VHR{=5MA?q6iO_P{9MPlso?@|&Q1L*5<%e#cnf z{sOELmH#pD*M|LZejfC}Bf5Pz03UrJGq4S`G@c893o8(>E$hD?SS2d&bKoTx>GAG5 z2>b16x_;jSe)TQrPsu+KSS8}W3%J8M(9f!T;7<2vy7t6h8GYJd#ZC1K1d4#a#(OoY z{F%U|I3LZI~%!(;ZPa8sCAyDp7r_7`}($-vO_^A>Cbn+I~-QJMA6Da2@ch zFSWnn0pP2#|4`q55xC$d8Seh{1K_oIU+0)?UzhL0ep;^gU!}kk?#v8ymH9J(RigSo z#qciRlh6lCCH-mOx$B^>Wq;-kQQS`b%Nec(KK{Fm0JhLV-X382&oTT3!)>odeb3VS zy?o$9#`+ckF1akj-EaL2ID9a}mA99HUvHlg*otbByq^Moa8bIuf4Jxx*lYdM-T4p( zo;FACCzb*qg?uRa`z>(E7QLVO8}R67G6EB2|72Zj_eThL?O>;Tl9$=Qk6}Gf`;kY1 zdyLfkubseMrl$uUlJ#vjG!Phz^X$E{{5)WlXuP6BVbA1gdHf;pG(%rJ39J&)w=KkY zT#5aUEbkiNk$>0w@7sV&asPFf9Is`-qo39Ok(Ypbtxk9STOR`FHR<)M%`o*fM;?X) z_rZO}WB6z4|CucRWrjN!!M-u{+c4l+HQK(J0bKa|tibLxr@SYDJ5AE-?QUR|sJ&Um zXfMuJm43ShSS8Az3;Y1yhf?d;Cg9*7bbtJX(O*!axSjUp0~apCeJxp^C~z3(;k!Ua z?R^k<%P!qNuQL9xflHp${+>R=S^2=L7Ha()0$zdlj#c>&G5Y6$AB25+7{!pheaP~M zT!;D~zGI)PZ#}R|)E{esU;jf!;DpS7hvgTJP}ENQf5`B&z_Z%w{mKU{{{-;snQa0` zGMxUotQ7ri$m@;3+YNi{PT&o=B((#drT*MR`5Pebl7ByNQ$pJl>DOcGV}G<)(&qpl zGW7c`z>mGG$Kxl!JsyI+A=|qSc<# zzry+=+jAB0$?v5HE(RufxEXl6(I3lzRig3V1Kg#PwkJPf`F%!VzH~?b%JOS~RU-Z! zz@uMFcm0`1fM=mURr@c#5#=9GcloadR!Pi9;Kwi@ho(E@{afHI*XsFk0C>Owt&iH3 zD{e;~OMyEX_TO~il19CMeT32P2JUL?XZHaIuh9FSvqwW8-K+ig*}yN((d+Se;KE2| zpr`DQ1;8p%dpeH+|HJ9-`tyBYl_)>P@KWFn2eaJ$!&cxGkqqJQC;2}FJl5EM^{l}6 z&&dctOvU{84sg>0`g`Mn+g+g7m$|^xu>SOy^p66k8U3{zIGmg5j^AnE z?-y>B?F|F(`4IaHIbM$et8^~@_#f~VTzWkle^dQ09gFn<=R+GM9uBz22|B0ldMO|CtjMHI)|#3o)@U_%-^x zYZ0(Y)PEa*UpLm9_krIq#_NJxu>b1N#$9j50;@#)4*_rZwXW|@;L(@r{l`h*P9yaG z8)sQoWJCt-j0olIB%lmn|o{0)<^pELB&CSa8){~wbu zAF#hu_Ru9^mFvjw?ZDxGLm1`wKLNaDUwYtjkdb`-iP2|PLH_R0=P!MLC-m0+T@Jk6 z7{8wZFT#FJtv9~|R*CBOUxv@DM)|9ncDSS6yLY~W77D}n1KU_RpmRNpUvm-N&6 zq*ny{NBGl~Jl_Nyya@X-@DlyqgrTpMKHLnf5`FI|aGue>S4N@VZiGE4`)eFp}aPxJNkM# z@KM++2T@n*-&cWGRb&YN3*nQ%Dp7mB8&inr(HFq6pTORb-`fCua#9;t|Lg;v3;&_o zuYC=?st@#!q`&$$$lJ4ey$J!UMCBg_{`7S%kKLwXk8PZ{lmdU$nBksxOaoSl`0oNX z_xn$>{C9wl<9+c{vOeu=L{(Ba974(~u&waov zjQX~%Q`}DfT?c%JVc*RM{s`+ArE>!v0zU;Vn4!n>1>jq*%5?SXKY?G|o8|f!&aH<& z-jf+Xh=-`pb-)iA@^TmOW0SOg+yLC?4zyQ%uYKSEa4#6&`y_4?hd#!BXr{#10josq zj{@I;_`kF9H_6NWEdN>H_s)U8NalYDeEiRu?s-H124KTKbQ`crRQ|IJcbo?OyCU6{ z*CD`rw&?zu13Y1jw)Zyzzi7UQA5 ze`$U2C~!Ec?VqzsRb$rAN5|x;E#HL^IPEUOVV9`%16KhjP3;|8FX%T_el_qG zV|{uQcGVZeZ&u6%b;fI0ew}n2=aOCL#@Tx&s?t0yAo?TuA!}kKG-L37D^}tO= zfBg-(4BeTIqkprCy=j?_4(l_;Qb5IU3;w&SS8|L2b}hGhHL-6 z!}8lMP}I(N76Gp^=2sHiIVa_++|{HyR7964kE` z`1O8m0yshy`eG$;r`74MyuAwCZVT*vng37VlfTjJ?RXFLm7$NX2JUb}hT9)iz$(%A z?gkE4Xnp%z;L;2A{_Qw$7UCn1fR^-E*Pmj}U5JmTIrwT|m8iU%fqS9<^YJ&)&jx0&=g{>MDvlTbeEWqWP`-j4V7cA_k*|GmH}QTfj?d<=O1WuTY* z{qI%O#2*MuVfZQF9_9M{`c2>^7i)jeH^6Hj(($L4{md@!M&N?4^!eH>mcNeSFM(H` zsrNINFSg4Y$M9^1S1{ae3HDbnYyEdU@TZ17*a)l=wf`C5_wb(83H(j{d64L@(Duif z_bFEmVZ9l&P9Q?WVw|)$K_1|^>ocnXE z-!DS{N&fExt3>_L1l;FVZ7)8}@;_zx>|a1X9LsX!Yx96rqVnbde~R-6HQ#^D@_*0p zSHOE7Lwuc_zn3gir8)h31MusHeRDVPrbp7<`MLpki_zcj19upk8R#wBd;WuVdA9+V z81~{?;62#PtMi;)z)K+CN*?JT^h@lY)%u?Y+z-Fsqx^@{fIAuAUjZCErN?&{uu9ZF zeSWEs)4pL0*8tyb$nV1}|5e~S!dhPb0X!F%AM#~;doIT}&eHbG2w;`y`?oXv3UG&x z&_|O0QM0v(~nghHD{=q9jOXW=fUQvksjqI0x} z>`yl$kJ|G~!p8lXjg0>T;OMG!H@>CqqpEaAe#bDpk>QiT+yAM*-(xj!Z@r(d23Cpc zI~TYU?whOg-)C9=+YEPkOmRE)`7XoP0e^jWrt4pw4qWo6_E)X|{^%AR-?5AFU+`

#3i_?yPJ^BTMS(ZIcas?URN2M){oUFYH!&m2boFvFh! zM~`d!`_i>``GvqsiuHcrC%_LL*5`+tfDfhV_3b$D=uk#ro2>6ePe7i#qz67ubLu}5 zcuz0A|Ck5d&e$((2Y#nh8#jLTQ(%>-Ke{{#nSwpo9)DAR6ax1%=3gDKN<{w{<3chXRqBa9_5nBjpB~@O zjePpv_nuYM#2*MWGW;~~A?&A5Adkv_2lz|GKh2c*jOT3rD;Qn?TnhVEwf8BO|99XR z)_>JsU7@I7?W^NahXHR`3VTp~uK_p)XSMqNYT(lE>Gtmd{^$$b&yn&H*o5<79473Q z{Cx>e)cScWaG^0j<^oUKpyl&%;FE^G>}}u^>qQDP+p~vGsV3p{58-Y6=*YUJ}1zs`|`cdhN z-(tSLit{VblKkcZpS(ufS7U)yqVm=;{1(F(ZiT%&PRGZM0lowAK1uLW`SXD{82*5l zfK{UM+rNnVzNhC?K5!45ukS=2@rQv6g4$mAHLyy=e~{rHyoB{)fR_ISz$y{_D&PmF z>-GK>;P)WEi`DqP44T)pJ$MapKV!XV09J|0TMfM8Us^xC!SeqF{Hd{j?!QfOJM#Qf z;G-YIKPbQd1j|2bJLczodc1RigQ!nO6hr+p8F=(pIKP+pe&Aj=YkPPTuu4?ly}(D; z>Hhf&IO{WgAE)mtkeA;e-Ym^2|3=^m@bAn4FUeyK@Kb-+{)`8KRig6uFx>rjD%WXm z1;h6NZ!qT1%Pjw2!2NKZbT*2i_V$G0>(E}u(~cuWDAT3*Zv$}M6S{r(8~L4(|2S~r zCHj8MPT=jUwSVFm@R9=T7i9Z;y$X4GP@n$~2i`CO@#`{wF7WnxElA4 z(f+Rvuc1A7-|4XAzXn((qMtzdI6qPHKOgw$VBO!(F#7X;uehE59Rz$!dwu@Y2z>Qp zI=MQEcR~Y|i3}1tals=Q``bVY#oA9QU#viz(R{OV30~hwz`tI`A z6}3~}+kkr+_VvBMFLrAaI4%3s+-pCGVGRG$R! zwB_lpJ@5$d{%`br-3k02?7d9+y`#XRt91Q4|526Z)Hevc*3jowz-fs0IR-MCA3p<* z8S?yF;P43@FY+(ocMSho=Uvc$#(B|5;G;dYzFPv^4(I!=Yg*ttK&b~zhV2^ zI{~Xi?f)Lbvl)Jg;fvo?+)jJ*f%n5+?JoPL3i!Pd+P?h>@TZSz`{YT+{|&>Xe}X>8 z`#`F{{||WWReC&D18=%b%l}`1RigG>3`JLf`;Kb;8UvgK|N1fHQU5oPB9w{fcK}Dn z=y;41z;DzeUIBU3pZ)fLANvb*tH8Gc2cOsP16bPR@6@amoa=B!_PB(0(fLMy?^Sx7kCKbz2x|XfqNuj zzsULYQ{Y+8WV!Jr&j9Z++Is-F8{#X;Orrie_Z=Fq^uTI~D}f&~`r}8yOAP*JfbYQj zLW=(Y@P1gom=dDAbN+0%x0K;Yz>l4$?a2i!|25#bS71LN+t=nVc6rw`d@pd1)0kf% zBY9Z|eD&FSejW$zfccmy+jsds$Orsi3f~H>5{>5_!25@2d+Rxt-(f%a_u~Gx?C)W~ zDiQw=fWN*^%kv!I9=Kn50D08ktAPhh(DA7s0H+!C>-;X(qxbz&^&X3I2P4 zRif`b58U*yKA-;__?9%S@B050J>fWr^yc!}k}Dv9!l{u~{T@E_o@le66W zliksfYqU1PT(zgulo!9P5r$AI9RLw8*c;88m-6Uj1OS{WNLrJ z)xdpn^!>pf0josiF9ja(zV=7|f#n|o{uK6c8_?4CFFy$Vw?gZW8-bT#|29VE&j3Dz z^S`zduLR!yKW)Ff3B2fBEl+11!v3Vcwukb8RigSOfe+2kboV!p1Am>{#=Xyb7`P7a z3(iS%>eu^k_WC~%c;pOBW|=>lu(3Z|46G7;?>Pf^0{#TJpRxbA=tK0svELa1JU7zD z-EZ9vT)0`Er>+1V@PTgc?|?hB)A23;0{;49y`By@tZL(o{~F+4-`Do!TP*)8hR^*7 z{)dRR4@Us2MD1Gye0-DkH*aP6?*gxYzoSO>*ZF@}+)jP21m5zzUQb2?pL|!_U-NbIs=>5}K|G@h6L%he6?u=J8@PzyI{_-ARm8ibY0Dpwvm*1P=&>sPw_6x0V z+Z}hx1^!olG@SzfYU+#>f7@t*o zzFh@;C{vH`NZ^s*)#G;`aM)O1UIKpVFFKy}FmSIYw7uSi6unGT-vz+Ii?#i~7I@Q7 zv)u9c6Yv8MWx4S`rx<_Re?h->)#YCcd~$GBV5J<-B(O^Kz2|`sVf@a<-!#9Eu>7-N zxb(P3%kOu|kdcY_e*paHDgB)ro8fa# zL7$w|{)KCSRigS;0GHNhx%YGKWciN+cbJ&vz6bGF;9fXi-3wl7e}_-89>Bh7BXJ(^ zT%&)hfYZ8af5-yhMR@O9`KMn14mRlVIr}r{j{|!C6a%Y7^}P$Y-Ei&y-oWxd1Afrh z?_Kse+Gng!<-ni*Ro`#99atqQ|L4F3(=*-maT{-jboSS89|%J3%O(MJ0Y0AKwZt&h|H zi>4r6W*c~EymEjaFzo*+!1E1zW+kvn^u3+Hy*}0J)#t$PWN81+fUh8*o3%XL1pH}+ z-rqF=t3>6k1@3iP%fs6&|6_(boW^+GtnIh&0iQgLcuu+g)d8zS%RoBN>tur;P5qBuD|y+;E`Q)JZYP6pr21>29`;A7zVs$slMMG173mm zZ5w3%gTN|L`A-AC2!G?1_?yP(7|Xx%e^{@L`E(O-7eim%2dom6_dakQierxS^l4ZOSWbNlCr+1f%kWWe+Rr&zk;;1z@lI3`#Le;_m*Y4@r}O# z9=#Cv#U%eW;Ey0L%O(B_SS6~@mFa1L4b#&D-{7Cg9vKI$66ODa;Zwk~{;B=V-80ex zZ$KZC*+=y&1unfm-SvMZfJX-P{{5H0v5R%Q`1`;r(f4x5(IFGz9|7l8rw8Do6y@(^ z`CYTp0uzp8y7u;H;1$OFx)V5z{YY1oMfFn)} z*Q@pIAHW4rHeF?X&OOWK9|W8QdNtn~foI*I_qVGV{dw(e{u1CVCv^Nt1bFl(THoFW z96qG`^A+GjdvkH%6^6cl68OtsX?y-H;626K-u?_Y z>j|yzv(HWo^t-f;>+c8yfB8NAe#*VT;WJ>5%l`T!@T)8JeZjNNL3#f}{Ef`N5_r!& z+TUIdtP+jSVgu9qxz@mx{{ismZaTj0d@5Kb${)<|c!p;JFB+%)G0y-WH|)>%fcwGU zqvRp0lU@GzfX(+^s)6?#^Jy`oe+Br(7xaGR2yidMzUX}}b5rv;j(==$XXe~tZ#(pMqi2iEHS$B%#) z{a*L)Z-KAAO`kVsUXT`e%Fy3I;L&+{KOF{Mg!^Ag9u@;mp{opWAE z>+7eC{`?E^{T{eqhGN9}34GM>S4{&}iQ4xh@TQOSequk%PrC^E+pvcR0|$p_dp`!8 z)u8tq4*>7M@6XY;jlTan@F8Qr@NeMot$P1)84flc=&0ktLUhm}6V-1!uo=(t58!uD zpRFLA+Fq*5>xuSV09<&vj+ZM0ermY(5Bvo9MMHnT z20S10S&iT4z#k!B)hGKh%%_|-?)`#l;H=BwPXz zKm3uZ{)NCQQGPYUD}lQN^nUCm;Mm$s_ded|z%Mpvd$H%`c6o)s+ppE{C(HmoYOF77 zfD3=G?`P}*9&N0DUjwUjDgLwwJmJ@RKiRu4`nMv}eJ?r$JpW!D?=}y3yHURlz_Hu)`z7xKt3>_Tr62T{ z`Mv}2(JaJAB9F#z8t~m14<%pE0cVw@2iBk%s_!9Sm8iV`0xwErxc>1=ue9@r0Kaa` zha~V*m+SW(e+yjj3f4EU(f9uaJm8^BcYoULJJ=uK{>)5SzgvMvcG3NFH*hw#6G`n}2UCk$WPKP@n}XO?@P`A5L-VgIk@@8iJ1f!ZF}4cyM~Pkap={%NKgznC+? z{(b^@|55mVWc!{3e$`ly_W_^8d!uUpoPAYV;ISWR|8y>JC-|pSeI^5U!~Lwbvb_s{ z=XcQd(MI6Wi0@PD`Mba>QGc8OZfC?pU-Dh_|83e{E(BJI=)=HyG40RV0(|sV9q)Jq z_-bRl?>7+kOjO(RmB6F3_5NWF@X_vifA<2gO7y*MLG-`TKSjVt4gDDho@K1ZtAIO9 z)&7XLfbTZ=GqUaSdjYTgt@d}02cGsE#vf!PfAbjqT87^S-oF$38A<;?;E(3%@#&sp ze=pEG8K@4|)`q6k#p^?j$#5bW=p6~%Rv!yB;P0!d8yW(k>&k8%R#+AaH8xd+N<((u z%@xVA{7@(skJZPLfx22h4*9RuiDYB4Y4YT&sso|W^+jVsWu+CBp%6eM9-2}cuL{?O zBFT87F%)i^8K{oeHPl9v(a2SKgK`G@6NM(@tD$fr5uO!_)+ZCQ0+SQrx@aiUR980( zECvcu{VdX^WURJPRJS@_KRudAMiUMU-8`xy8O#oii&mpHp{j6Wv~*N<_4NE3qO;1; z&SXhNxU?b|Dk%x(ghqxUQS@KDI-HD_l;dA-yDHY02wxS<%MPR}Xi)LUaI(6lb;S&F ziWxLr)J9Y!2kb6$adye{tI&eRWJ4m}a8*ujK>*p#FrXogMcJk0)g>jBA=GWm4XAl_ zi9sF|#iMb9smz%T307P%7oWeGT2CL%jl{xJN>IwcPzeToa#MXZ#;-b_h=%II^-W

#9`S{WMNjG61Nex^_B5D2wq+pJOWigiqrtyE2gHVe2`~yfKW#Ua1FqYQBObI?h)20)3@qY{UzdH^o!l17%3;JIR`X8gd zlYc+^p(tzLQy;jZS>1`#%Rt zRW=XXyN-Rn_m@4oQlRj`_n&%CW~11+%Zy^m|DQTfhE>zQ`oqy&{Xcs{Hi45j{$a(% zdrb!G<(`0P#IFW?R2Re+3hiPNyC)>U2b)3NnU~nLs);fGAA4T{AJtj4KP+t{)+VJ@ z5tl|prBv%o7Lrh_fv`+yXvc)MNL!Q1Muuh?CkX}AHnJ!o(l4T)QrseLh)W}Gh*|@5 zLDU+l3(`*;wYC#b8>v-9E&ubJyPZ4ty~_l!{(gUBC3&9v-t(UKyyxAQ+n7jE@0$ZG zj4evg1Z@L)s*&16Z1qBlB1ZprECe-l(CQ{CK3d_#FYgf4?6tQywbEL3#(I(z)6n4) zpfTPRpV!!!@|4m@OT;anA!t3e4eK+30!6$-wI#dO>ZU~QEnJgK83PQmu1R4);g$46 zX9X>rC)&F^y1i|b=&b3kZ;R7v>Kadh)Cu(#L|YJ&5#>^*%n~D%XIxz!UGY}$5Nng( z*4Ed^Ygd#|+G=T8lh&LPRu4WwPT7E& z>zErHXomtXb}pky?ot}nCK_l@NGj33lKyRqw^4VmD-zK|RSDR`|l07?8@s&*+36-_7MHp^((O|of z{AZyyCNf?vqc`>Sr?xigxWUw%W1Vwo4^3iaJ9=~aZV_EwZAS=Z@6AlrOE&3G<4kSc zqP1fsWvv*688Jq}G&i7+MIqdbvC*g>d^6b!m|YercX3#yda+{DfbHo5kFrcJFYSmZqK;su~*#m*eh;&>=U<_S!yHeno=E|wY3WtU}uFf^-#?0LM$Ah z!QqA9ghvWZF_o1HF;okBzRUZhjLixW?C)`}vT7rp+>Qukjg3vby}?@O2q2_3lHyH9 zR0=Vpk|8MGPK6ZHFP_@?wa3(jS=PbZhcxq+w0H1k4X#tTCljfyY)^_U9N4A76Fee~ z#Vm%(BRz}A7Z%dc-CIE;e&_xr%fHF0dawobvxFkn7ba~{1&Rj(gyK9&MoO2|P*qA> zJFs2EQx0_is$qG*K;F-rI$PpN?Al^?G<>Usb_{}F=;M%jFud+nGRb|YOQzbhB`urm z?rf=z)W;ic7iKAA79{Fe#il$dfp%LuN@x+tYzn9*#9lr%A%4ZPt-ICmU&D}v#HpOo zxm`@ALl@*xA8Gres&A;QjqnCWBfc{80q=0KDkbc967vzBPLXI|P3b@;7-DH#=xW+B z=2gD6m%P28X)Sr0y#g&HE}Pt85%$bs5q^2o*kP+Y3kj?8#WRVGiTFz0FuebidLW|R zM-Ro?ze+po(BYtg75#_ZFsQ+bxf`LyYlY>0iec#@M8doC@IqNvD&F3i?C5M#DXZM3 z=9Nc!0=zS+ftoq)C>bghGow<#(^^_@jO>LB_tMG?h-~!?$h-4U#B9##wQEn&4g`$C zCs_MxS`*URD_hN3M!Tc6N4mjm8xI#I8of)t`8HBn=}`gDpgbe2U*J~%)`G_8-pR&( zcJB}^O%1o-q%HMjv?&-3nGdO9ac+W@RiFq6qq6#*j zmYGkfXmi!Bb~E#ZT6%%Zq8A6Pp|(kk6vz-Pl_Z;+nT_r(4J~=~Em(9a)#qsO=rI^_ z1IVkSP1Xvc-1 z+m_L^l`aacoycqRF-!AHB8QGPwI)4;;c*~gG`V4O!VJyK$3cZSWq#Rd)bwO7{kB(Y)?UL?q!!$Why_;rWHBn_IO>(F#+lRTaMjGhG?7e+j za&(>H=eSl=UxE%?Sf$D)QycCpt0gR->1vheLR3lx@2RpnX0I0tDVs1WSTjuuT6$|8 zW^h5(@HPx-(qPmOR6!IoR*Cjdlg^+mqy>piOUpt~ioEj_QjfSPi;8u7m(u(WQz}W6 zDwWN7EaGlZLt<^DD3B_JfkG7o3gxm4!x?Sq<>d~YZ>L>aW)o8cs4|VpE?ycY>71Y? zwzv~mlgo2YVz;{7cjIAXI`~)FxW*D$(y1{)mG!DLAj*j-8Y4O-X5KCWb^_MbY`N|PlER`l@H%td9N;8+8q)mtlTN;`E*Wxqxj19~V# zi$Zk5$dW%tBUI88H2s3M(QYPB*^S+(>J9%U*&tN1Rgzstcx-Y_mbdw^L{Ia%bb2sS zV3aYFv(77KwCsgiJukD~PZcv7uPWD!*+=PkdtJp$YtE(l<8I8Xl(<% zMl1hIbjHek3_R@~kZ2@q&wiJ&4=ZEF;g$JjAWv=SP?wnpg~^UPR7Isxl`9^lFiCAp zYXZ~wRv|DsrR5b_T+f`{CSKLD83D4~y*d=ZyzN5EH_PaL0JOxW3q|N{gH{>Jr7|RM z^~_C6sCPL{w$SQ}rA{&9EcEbHLX+)j6S{FBN%dP|b<;A;XxlsO1!3ICkojDtoywcY za$cE+L!{TouP28x@}ewha7u^rD$3@Y-qzO61zkc~rjK=v7S_^1O{^h4M0-70SzVtWaLYu|j$I4Ta*OkHswNA{u!X!3wGMKxHeH zq3X%M{ETHu^{&@#3i#%74eF^>H-_^@g%T+s5hkxn0%@yCh1(hkx71Rmk5-8_75er| z1T-d`+AP{tCu_1)~fP~0Ie>)y*p2nw=6t(?^P4-RY7YZynO0*c#){l=4gZ}>B@067x-mO zskRDQ)oV&6;4kJ}83>hDstoaJOAXQ9(c|2wUb9+05uwd6 z;=#GmhS%n;I7$-j%&3G@!A7;1T`iVqY!-$r0#wOXNp)Mg=IU3I>uJSCOj;Wi3B^nY z5wvJ*!*XTJ9`y+1z+Ud%-j>WYjS`&+8oh^XtnVZmZl~L;-rh}D!%?r#sL~=>y0SfU z+oj3&(*sv@bjyWSUXz0m=KTcRKsV;mG2{h{mhl!;OBF0w*&n*?;WBi&zPqiyDRo10 zMeIhZ4a_tZlOXxXPa}HW)ILLueB>u)es`j^v9cY5#W*en6r!-Km-*O7TpJBrOgCIf zchso-fJzqTC6&__UiAs&K|ObOY;H{$tHZRAR0OC}Alq_4_eM5e-_gE;HYW@7LEwFN|442_Cx3Ia{BO00>#44b(Q7W=O37Y9%KHaoNIBX%IphCU$ZzaF) zY%0*Q$gE%rngoySpkk{kA!{2ec2da|H#)_pckBHlwT9*;dURGNO!wGoI_N~Va#qt2 zPfM)bV}r!uwQ{OQi&-J}ioy(J43VAMdKjp&QLDS`ihE|WW_nbP0hID8flY~(ba|%P zY!owAIgK_!o3S?I-Ct}>tfpI6Ya=E4@OF+ysHD5}jy$Ie4DoY3Y+!^b4iKg&LruLw zIjb&@5xC|Rm!v~<^Qie?|;wjL8*!fVnhr|`-% z*+DHlNBvXZ?rB4JeN%Nu2X|t%8JkjrTIkgRG@z^KZ1D>upQQn)ql|GC4xTj4s~5w- zNO8s^m0(qm#ch`}_cJqOdbgDg9c@YM5Up7&`)?6SuEpzG+4~?~D;Dx)|LEhir_1a) zmf&4tRF-h5c)TiKU7C*))Mi%k6oS!UmNw`VADuvR#V5})4te1A3im3#A(3j3i_LR1 z!fF%kt&BGA58kX8;=O7aBaByZ=Z)a{@Yv|SN5U4X%82@m{cqmxh|S~X_}jx?w4k@F zgE}sBILmYg+SG2*Dpg#3!e?Di2=#G|cxyvN$Le^Bc5u>(aiLf-m1g-!rJSnhl4hPG z{tU6+>eVN@?nq$aaSdJlC5&e9M62|2Gn(i8sUcH~%gd$AkT#{l27NwsLQH{FYDbG; z`gz3AiD;Fflo=SzN*qxjm1ysSN+qIIDxF>4aa^8SVeNJSeP02m3#>WSNMXHvkcDEH zr{slJ&&3#HftY;{MP;XbHfYwNlM2o`r;5 zq^NSt%Z@D9X~0C=%4Djep>}CU8*R|-#3eShk+yJHRemzV(*E7RVKupQy@;a6pq3jf ztgde)xZH4{u$o-DyCtx`!NTfx2&98%U)n?(KnL9p1wq2<`4%XwCbuvPI|K`>+c!|! zG<^#Kh1KmFC~dlZ1BKP?TbPA?1BKP(b_a7FP2qrnb;-fpN0%Hdur4_q$Z)#kV1aeX z!CXj}94xS5=Ri(m*f~&O!_L`QgJy{Xm+9_sAV)In94IjB+#b-8SaQ(7y5wMvq)QGK zSeG2kk#xzy0_&24Ig&0pSYX4>fgH)ObD+S6odY?NVdp@B4Lb*NB*V^u0=s-t&S?Ir zJ5Dq71q+Ph^O>VjBV484Wx0b^wNjle7-kwh-7=F?^_o;&CPz=q8A~b|vvzWe(RuM5 zaoeMyQBrR+WBHF2le3W7Yvmjr`XpTl*4}G6R%p^eW5Zs0tyqdR`fN4u_e!Bz5JQp~ z`M8BKb4sW8Km<*ct&$01Pa{760+VWLizhADD`p&2FU{8aS;nktJv~}~{gP`_HSnSU5^+P>2|NPej2GV~iHezKlLAf_L=e6LXRo&s3U-Eu_7gWCQm+8#i-Q?G3RV!fT#lGn@vBVT#;Qlcg_6EMn)9=O0{*Q zlGH|atX#X07C4QPL5FSxuTn_WhehipGL1eiF`mj*#tb9*b-(T5gQQrSVKkkiq)nT1 zR3+2j>-3j{6bPK^X>VwyE9Bc(R_Gf9%py_he5z7N?n0naQSIE#lIg1?cOKmt#`_qo z5*QJ*Djt8c)5Z7k_W7;dDN(NJ=#IgvgeslJKPD_bE~A^PQLIH|bihSCP{l|7cotFC z;F4CVM9eITaD*D|5?#W-?nv-u^&QLr8EG^s;sT0Z5kM8aOXm5PNM#H#vtLbu7wH4yt%X+ITxxJC z4OIrB#VX~>U}mW_o8;7=h11so)v~GgG4LX4=R) z4;Ou40dA=r92A6cp?~IcRlaJ?nCN*ck7d=A)CMW-ia3L&xrxv@?!GXB0`M(bZ< zR(5nWE~4vfx@j!Atb^*ohTAP|BV~%mK~<+qX336;djE=E5lN7or+H7hp|HDwbf z#qL3ew~-Jk05mNdvu(n|hbpWGs{C^^2|7-SBYpuqP{tV0K4Q{V)gB`isvxTLkmCi( z%Ej?+`YK1f{kkR^CDLu4Mk$Tp%$ofn-I*LH)%LdnrhIX{z9Z#OqKK6+pV;pwrxO-I zW{B(}0;;os&d#pZ)R99&oWGNgl&*myPq!)uvXvATSheOj-Enb!(;XFYI%FTu+;Cc; zDX9vi@)Mii`*)bw04mu5wa@5F;w^ogUrM4go~rZ>;@9+{cB<0pu$kr0WhUpAXZC84 zkeuejK4#Gh(KViP6kED7Ya=txAjWA1o$8KxJq| z0yp#+If=rAq#Wm8u4*Ua(q-t=eY4 z;%2(`y((T$p9-{U$w-MKSWufI$pPku9rC3b{$ApEUAW5lbq}PsHlt*eIXdJT2p-xMzea6L{&G@eRfH?leK&`qxK!BZBXIo7l^A`Gga zkprFc+8gN$5<=AiUDeDG!Ici`mzgmHDo6B@o~7~Io91_<8k=b1-867fgY8Kc+K#8v zvPTw7aoin2_bxAJYNoL~8Qd(6^o1e&GnMkx!BO?#0#pHz9qpFG20Gx#F@jaarHZKD ze-vd24Q!AUeH$VvoR1DXYNv%kt5kC_1KR0;3K~%cDJk6f$J&u_G-!x10ce$mfz&Vr z1Tz#L3C`$3k{TwhQiBW?pd8X6PED>+j!JO=6O>A(y!TGO(9bPDO2}L3va!l$xLVt6ELA)@yHwbc;6Dnj?^v^l2VhpSR%UW)O}G)Qy`j5{jtXg?qx71^ z3^4m*#uuc46M_9pLp|LL&`6yZp0&0ruL8-d66sq*(L@+k<&{_!OVaUY+X`oi;~~Cw zdVuk4~vY#QxanaQb?^RFi3NjeY3w=FG*SyCefl2=Jq-1JtAEWAZ)oVI3t7qc6$f@polgVDPSvw-VR4EN;+oc5H8htY z@34r>$$S=6-R!hA;tk-Org>>ils>y47LzDzGast7b!+CbGOrnVHX4hbN7zzTrK8I4 zv$JTBe6Q)C$rY=CD`p&29V!fgDj+=)|B6%(=x@-Ph87x+-ix!>K?3fL1M9@J^MJ^Ly@j?g=QUI;sLc6fVo-gGB z>!xk&lY_bf)=uYx>PMy{`}ucPr0gI%8VkAF#yz8t7%7l^P#tZZ#UK@gj3h|Ou!r|A z92GRdD8B4e%s6OjC;|T6Bs~UqRcKaGJ3Q98wrydfd3*YGbS-JA@vKCD<}rn71g%Z+ zrHO8mB4O(}4Gmliw<)fjZnAU}R%$#9qL=n5bgO1I6E{bDlCjOBIzhu>fAzG>pRJN@ zN!6ZEUfS-SH-;xP#>Q2Zh4^5i+oGIl10xcnr|yVEjC)9M5P=EWLC_lSZjX!0icPVR z0_@Z>0@yDV^A}Dl=r?twI@&uDi|A{Kw45xL(yTbJ)h08X&06}5MRg4=bm9N@M8bL{ zWIg;YquCy7NB@h=w);jI(GUAlh_o9 zVAF&QM5(fUXOm`rE1Rnla~5XfJtPwa$Td=I&oTnIL|8PQ2=$_7OCk32Ag(|~pKmR0 zqOl4b$Y|It2hwge)}Ch+-k4YK%FI8T`~W%FM2g>#GENiO)1^Nn$FaMqze}1@18T6q zw1!gydNxiCSZV+bOyfB-guV-l_SM`a>7R5bK`gpExVt@xySbAr|-Tn<=UuS9QHyr8MGA(cqdX;&%A7Q$kfibZw_? zvL&n6SH|1g;uSdSL_34E%iFVhzrD3-HSZD#o^anxy}|T|m8p*IBs`bvXW17n&Ulnj zLt5^Rh6vw3#usWT^|nhfm1No2Blh5Fr24cr(z&`#J6Wf`SI~-5;l&Mf!L;$AfUGj4 zp1Mu9`d~y!jx?olqw8arkMx@Z@2K*^bo}WDJsB*U0Sxq237kjS;iVqw6!*+>>}WK9vrqMpdR4r4mPoB80$ru+z~E zv{K@DtCkL3;#1k#DdQ|T&PzwVB6Ys+ypuG$_q}q#nP_MH*`?np(d^wvoH;|C>#Vay z5rbKcDr0bV{w)LA9nX%;FRW&?a+W)(hK9}h!K;GCTKx=`Q9Rv72I38?VWUG3RJL75 z6UuiMYM*eSo|{W|RlK#hiFVv28eAi!!eq@C)YMisP@l9xn<|J<&vZcnRWyEKFz(6> zgZicPx~8>!$q&!CKpZ-}n7>kr^?AiDmN^U9hQZ@-eL*#T3}VWzR2(0+bQYi>FQaMe z?&Ks`-})^ma?7_!M7%(SiiXepFb{@YnNPo)sf;3& z^+_D(TyfS(a#m|ui{N=2bVJX*S;!k}O6d!s0gOXI&w)9{=Ok^YC4#s|F^A07FBCrE za0N$`8YCNay~pPR`pW@wKkHPBY*(b6o}Nu=$Ct&ox8rbWuv{)R0%x1iJ{16+2stMe zfT12bo0jvunRTh}zm| z4Qu4?-t^4Tf^r8J9YyqnI_F@TcxC&-*mVtU<0=AlgAOi&bav_kDX+aeKEI<+`e^sr zkH^~@wE1zdKJ9Ke`UDYArmxMS78|E6wRrU0^me}SXr)yrILV-g~lt**P(?+@bI zs`nbr^$555e4mId0mh7lhnJ`{$ShZpGDRp^q>S>EOg7QyV*JXJLRhUiH)A#?%1HUU zTr}B)f^1tsrDmDC$l6T3wX$sQWZM^(72nPBv8<^j4|3t7ep&|a`bRwbGmET|EY4BC-ug{P)Be|l3U}k(FGTwZUP1Y;Zlua7U zSbb7qm3Yl@Q?)^cxPkp+T)pRnH!_&%0KI z7(AU+j>TMqUsC;g&prxz7FXxHZT10_BKLeoigIRzXH7E4LtbsXt3Fv~k)8^#%yA^c zo+}x33_G9qnATPIEOWpb^b{J6htsuuMr=MlmE|DSnKDbqEHlP>(JOCMsk3K~Ujnl) zG?r#r=2v!uOs6f4ftvM1jxQAjkgv<)v7LCudw^#JG-~+ly1&L-j;^QEGi@EuKs{Si zc!2SmGa>pYFUw2|GD364?oeAyTX<>Xwh&Q!M=D`E#~Gmm zj7>GP)s5zP^z}a6m25Nv@^J~>Ufo3ZSZnJ>Ji-t!FBtDS44u|CK7+uXC!;J%(U-El z2Mtz{u?E^^A_G+h)_aC3fnGD}knAEe-U<*^rgH>|DDxd78huDua9zB$E#9sjS~ZsQ zkdyWYs;pd1+rK($<@;{ym|94j^~R36_D0(LNuB!!Y^QWt`E-9{=?#m=_mV1l2s7_B zDJXOaLZtvFsT`%tTS*sTs=2^%3Nn__y%cvYrfZSi#F^KfY@~y%OFOVF6E_KTq{Nwk zNSWfXHg=?sPgS;?=ipUG&s@!=Mcmo65;uivF4BrAV2{DRr>LLLlPvSqN!!+R~nABdb>@Rwf#$2i4x0ap`q|I)q9Qs+XN>nKCk5 z)o=rym8Rc|T08EjStMpbN*oWZFTP{#ID-4M_*@*$#<{t_mY)t$mMQ7FpiCtc@wgN& zrS#a^RMmB&+=eqE;O8#pB8#cqRlC*~*~;^wd}Wg}Uln_LJ&diZ>74raf5+KH@OWoY zoNi;P=Szdl8lfn|BxV>Fv#4xBC;BM=opU{zs%V8+?(Ngt7QUdw{!|scDsIXT)fqj% zsQvA>xV57`-g-j{+YsZ%ZgfrGf(VS}2%u6K6QTvAp{Li@cPCmKMd2fVh_@wp*RRp| z%IieX*K(h**1KN?g)UN6TB;0dckz0GbaTuUZz>2c9sEj5r1bdMO$ zQdrit_ttPf8=hyWgH0i`S7aB>T_MHUP0hGA2{xcu0QG&vsLUK1IQuj$iIpuYyA!oh z{db~G?;7)PkGiuZIzuL@>D0NDJWgj)vymS>Jj?IwYNGGD1=BrXSTE}>p3fF$ zqv+p8zoRKmpEPKou^(4jnN>3z(f%46GW*v!z4Xx{n!Rl9Zr6{Sb*)V*PYt<6dlv6& z_U^9ljMEo@C{pbCaXIql7JKJ--`3)8p${?KPE$9o8LqH%XO=H{<$euytCmt3Z}lu; zbSg{w2c2T8jd*60^#XfFHZQPUZr49AkWD06zQge}(nook+>M`+jKHB;HlL$(&DRXk zK?AeohK{D@a8SuX1GD6cpi+YbWQpzFK}7}%$uixm#|*0m4oElTkxSE|VfPBp675x< zs+Y-vf)NVZ3c4OC(T2(eH)^}u!+e*^%sAiGj83z4bm1CD%_~F7D%>7l`#9bSU^X)n zFRLv;6XaO~U3a~Hy?~dPg#kJX^RB@};sH{lew%GhQ>5rfv^90L zbntw*(&lObTJ&cjfHkNmcW=Cd4jGf{xOw2w(QBtI0HKITjBWf7s>KZ2p*F3~q=f~GJb^2d(!Ef-ONj5X}Jc{z} zk;#`hvDv=F1`g82cf^$KOB_>YvK5Unnq)djr@ThPVGK^!cwW@gM$-`#C0&$2eSwsI zU9PegS0r&?Nxg9DFDwBZnkP(g3LgAUsaoRR5w^Ob?ltDcoE}8xpvG#{uFt7mNfY3TomQM^hZL8b65+URh)~@?oYr#)q@;&n)1Bh@VhWq` zIxQh`6%L!3Y|9E*X`peO&sD=_GR;_AaurE5!+2=RjX|e&43y?kXnhV>c6oOwAluu? z5rR#sJGYj8yJMS9ZL_}Gn}s}&^y)FP$;}vDJXdyY_4OIO<7j!V2;=S)>T!)(aVdL% zp6|G1miiqti(Sm(^Hx@Id1tvgri#^SUSG(i>OuM3#Z%PIp_D-Gr{)!FzFer(p)%KBa#cE>?)7{xK-gI#v zBUHs|<1e#C(Fyk$%n49aIb{+ugSlv82E)y&wUKt??pD9x6|2oRHBW_k3`*skiVy3s zR~he+JDPvGd7v3UhGjp?h~34txcN0&QJ4C_7RJR2S{QQ4o8qZT|LJ@!xOe5Ch0*&0 zdZSpPLw8>={85`92rzAJzF_&X>--osDRQPDfOP8((OYnrZ>~7HUD~L%fg9Zfy`X2;K`+Th2$|Da;E^*0r5WgPk%g?BNw2lAF=Hg)t-pL*4P zWwwAWF(Yt5$u5N^Q>M4Hzy(Ja>s@m6KyYZ@AfnZzr3Q8{JluKlsSRvlTvDZl;YkWt zUpK;-d(e~~krB!>K5wIFl8kLNS`AIr3Dfr48F%K(|1cooV4AP1T{lC#i&tcX;)+@* zp{%57n-CX>r7r#}o-55`JYVSZK0Wt*-{*Drx6N|J<0iiAV&PIbuBjo>e!K8hR;QGi zvfWfqbS^{hM!T(s`Yd|%WhSql$|2ES?nIC2Kl3_*f`v=Th4fE)`t6{nDo20asWYuk zCh7|tYip=!Y3it1JYS#ZrZt}hiOyu4X8e^iZi@G`De3Ir<8EbpcUyf^>V{?`QkQvf zw`j-zI>%l!ZvkE9OCKwCyV1if-toN1F_If;=Y}fcv^W|lU)a9mc#q@jZcn_uo9;pE z?5xJt?QEhP->V#JERsz`fkSJu>2{9bc8diK&9Xa_bT4gd)A)R1yf4d`tLZ{@nspDJ zK0VTG-sAY_o9xL*!P5Ae1&QY7Y$6=biyUJ!eT`T9(qU%%>sH9Jdc$SP-7cGIIXxT} ziD^bT)|INnoTXL`&R>e2Y}3L>SPNB4m#AvzG_4K4bWbSWEaA&N@da4zatmCLgG(-O z^^0*NFuRv9)lXVEUds3cPN0aq-ccd^2N~Tr=gxg~@=TQiC9tD)byKW8M(smqoOVH)=_~5AJ#kN-OPQcl)YG?M z=vM%}ZBb>Twxw{Xq3`%K^)JG@jzP3bHPIk{6_s|rqH$SA;R5SmWu(;kRHd@gYv^RE zzcromVI{XVmA!@f`Kq`4OFvFTQI%F$(b3vULmTaGM5AJLJXEDqEAJWqDr$sa^@3JA zC@Tkl!|9o3v=C-dD6~EBON27x=;Wnu4%lJ|;%0Q5^2MZGbaheD zS3`Woj*c8QrQ&qILMpMQfc)CDQmON)OZ2wf^|y;tZV=;8m+~fbQp_Go^$nGll?uYD z=l1VyX=k;4jY5KO9+h?nUf*YBpw zCowa6|Ek%kR?tk;KRJt8Z~gl{!YK zt&CeS<~39PG@+kjm^Csd^O?$`q?O7YsMn`B$Ddh8WkwK0Vcfat$ZNc)wSEPB8>=IL z(x?ijw$1(eSF$)|ODl|>O);&zN81s#hS4%(L9uHfl|5aIsSOFte|;UnHO?R=)(^T< zO6cP&a->{jf1=XgqnB3f@4pQl?W^fSj#>w7SqC>kG&R|wos!UVc#eB0l}0Ly`r6MH z#iCS4n_87puW;v38TPU2WtLZ!>3&zeOcjfk3GPMH^rMVF0+_|r#J{QK(oTbBvrtfX ztXuk1L7__!Rhn#twXw0fQPaJecw;WBktW!JIGHi|qV?58`(JG6)1*C8=|NVtsiJZ( zYnFn7W(?x}ZJeo|fBzsu?I2~mx}!0bSjpL8RQP2kr%JanodK+E3Js*yTNZGQTDYdr z^7Tlh+M=^i&2(;GlMDG?TGsG{7R?Zww~-sBD>WW9w5k-O6duwo_9z)e zbG)UZB|-IytpArGJyw zSsq;C$nF)6rM;4C8^`poZX+wzoQS}oS+>3x#48UnJ>hmC#(q=xY9J%L+Xw1h65+8$(B%!ceb7em3OyYSEm)S}DI4nUSq*Ze$o|B3 zNo=*)N>ZSGfQYAF=$<-juL}08%%MFi)im<0Y$ineS!g)xpQqP;%xP;G5J-2vCx6;E{$)!rzKxGKInUQkp_ zFNS8`P+hsW^7@e1&*F-T+CqwdaaHB~irRv!BSq$)1y>h`DwZ#wH=kDPt}Y}sETDy1 zzMNhy@{%5!==yXLKau~s2BimDI2l6qrahD$Y41)JA}$brXY*&K+)Ql9Qt>Lz2(SIl z3~{0Tu`3FanoQXcrs7;9@p#}a$L+~Q3TbX({q{nrBjwZMNZ@Lvo3*8=~wz<(|9|Ca@{UJ=rK zXW@iU9>IM2f9y4DpC1b4gq|c0KNDz-@;?1}nD7VR{~_vfUl@8y=enmSo)-#T5ISv! zpA_oQpOsVixBui`l6O()!Xhn>NAA6yI*XG-m+83c%5Bu;n-qElVf;X!TK#=sP5wkv ze*gbn1)4O}ev-?zG1T@9zV2p?AIRYQb$(X{AJ*lJjoIZJc+F!PkMYJOdffQcBluSe zezV{U1m7?CHwk{b;G=>c5d5uz-zE6>34T!UI|aX2@Mi=+B>0Q9Ea0by1b?I8hXvm) z_@jdVpx{RYe~;kPg8zo##{|Du@TUa-eaY+j1?qKz5B*uoFHma)KW#+gF}ixO7)0a? zUXIFV30?#bg#|B1Jf(u4D+HAbUXDehf|q02nBe6|aJk@(GJp)L6Fe12^QT4dQ%p7# zN(%mQq30UG|3f6WNANEd;WrEZ`9glb;4c>ZcEP_u@B@PXl90bk@VO%Vpx{w=89#dk zf2l#z?;*iY75pK=Unclr!Cx-;qk?~-;70`iBEhEx|6;+934WU3PYM1df)D-1^4sZx z&k_7f1)nGQmkEBF;9oBIe8Im$@UsMeh2X=2&lh~D;AaTFT=1_Fd{po=1s@aqRN;rq z1^*?%*9rdBLQad|FA#o}6#Q#M_%(umt>AkEKTGhN1^-o{Pru-=6ydiEezxET1h3_^ z_HdWrUnjy33jXzi-z)g51wSPCHwgZa;L*MtKf{8L7$p5ZD)>Udj|g7Qd8P$lEW(co zevaTz3BE+|q2F5mpC>BZ9Kn~0@OgqS6Z|y6uNL{0FZj74{4Bv=Blxi3-zfM}!Cx!* za>2h@@KM2+3qB_Jd4gXq_)iM`>jXbvgl`dih2WEdUm*B3f?p{39>FgX{AR(|iSpGi z_{AdpcEMK)en9X`1iwr0e--&YDER9{_`QO!68wSg1<$CPYPc4tJetr zHW9u@@NX0RX2I7AzF+Wlg5NIqxZno_UoZGwf^QJ~px_$?zgO^0f*%rmv)~U2{`Dfi zh6TS;gg+|y7Qv4QJ|XzD;8zKLOz^i0{*>Tb1s{6c^8ZsJe{%%iCc@_levc^E(*)lx z!siRVL-4Z%|9#<72yF~cCg1=Mn zLxR6c@P`E7Bluy#*Nb?M3jW<9{D|Nm7U@b0e!U1kCio44KPC8$f)D-P^8ZbO&k_84 z1fM7P_X>WR;5Q3CU+}$xpC$M$f)5M6Pw=IJf5U~=gL1)dwdhb(@OKM7Ciwpm{Bptf z3%*Y9?-zWF;9n)`x1`{=iSTO#{{g}G2>yeD-z@n56@0(oKP33=g8#7K2L!)e@Vf;6 zuxR%O1^-FmZ+iuQkB~DY_#J{jB>0aAepv7y75q`b7Ycih2>xRtd|L1y7yOvuKOy*2 zfYn1LBVep{47!4`vw0M z5q`Vi_XvJK@Lv`DF2R3I@PmT?y5RQ;{u_cH68tv>e@O7fq8tqiey<3BRPa+pdPfBR zEfGE~_ap#D|B(nkAozy`zf16M7Wxkg{>LKxUcnCwen{{?5&R*+9})bp z;2#nEQNjOI@FRl%nc&lc|GD7D1pf=cpA!60!H3e8|Nm0(If6eX_&mWsD)?!F|3AU! z3;tJvpC$NT3qCCP#{^$0_z}UE3;tiCJVpio8xcMx_}>bCx!`{%_&UKqF8CI~|6cG( z!9OARHG)4S{H;guPm1uH1)mmtzu-p&zg_Uh1wSD86N29*_&*4KQ1E{g{9eKTN$^90 ze@gI&1V1MDVZr}d@J9uIQt%^!e_HTq!T&|@V}gH1@TUa-SHXuyE&uKLsBa{J#WWD)?swUoQByqCSiYo|b;ipP1k$2!6TXCknn! z@E;WAt3~j4i~d_u@RNj`HG-ck_#VMuAo$IKzfkb~g1<=c+XbH^_yNH`Pw=}0KSl6^ zf-e>QkiCL`z6d`g_!kKNkl-&7{IKA21%FiVd4eAi`~||!X~ADA!jB1ls^Cuv{xZRb zj$8h}Tli0o;4c^9^927w!A}$Xiv*u9_!kR)mf)ueJ}mgRhX9-^4;=^@xSn&GR z9p*~~|1Tk@T=2z$j|%?R!rx+opCiIA7kr<{uR6ic6nu-|%SAa&3cf_hUnBTZ!S@Kh zOz@ipKUeVmg1<)a+Xeqd!4C-jTEXuU{F?+nDEP}oeX>{ZZx-Q)1Ya)rLxP_t_+i1% z7yMDdR|tMY@CyW=7W_iNj|u)kkzc0-zh3x5=nt0vFA{Qc1ix7Dd4i7$ewyGb1)neY zdxSo-1iwUt4-5V}!Iuhtnb5yn@KqvwRPaj$9~1obf?qE98w6h`_?X~Z1pgMnCj~!W zl#4ZjuNL8Z1YaZg&4OPh_Y+C;i%yMF8Vnmg1=SBNeliq!H)?(BJ6)k@E;cOhW=>z|J#I|9KqKL zK2PvVYfQL zR|vjE@GU}4Qt%1EuMzwz!S@LMcEN8Je5>I51>Yw4?SgL?{D9y)1iwr0Nx=^a{>vi0 zdj)^D@Sh>Uzg@^VB>0rzhXvm$_@jdF68wnZR|`HZ_&WqYCirclyq^;MS`j|d`$4` z1;1SI8w6h`_>F>Z5&R~>Ck6i=!LJeguS9w55&U~a_|1afEckxG_X>Wy;I{~VK=2!d zKkO2Gp9nuF`1c8Zui&2%`VR?ys|bHc@OKM-Sn&TN_@jdF7yO9e-!J&I;I|2WOzTxaEclNKzEtoZ6MVVg zKQ8#F;6EYwnBWHlzg+O26nvfFcM85m@ShTVQt+P^{2IZ3M({m?|E%CQ3;uJ0?-%?o z!EYD*=LJ6?_}zlvCHOB0eo*jhg`e*g{1-*|A;Eu1@P`EdWx)>%eoV+eD)@Uv_z}Sm z3O+6PuLyoj@Wn#@DZ%d%;Y0d9|BGmp`BlN^2!4&IH}VAkH4%QA;J+^Te8GQ1@UsN} zO~Ho+|1H6n3Vxs9%LV^!!AAxEHPPZPs@DGXbM+JXK@FRjhEcmqGe<1kihCk#)`!2X`_m!dOrlGD2(pTk&LeUNP=N#l| zl+c;tdU|x`)s6IT?iJ+362giUIL7q8XaUj9~e10 zI>2}?Fj73)&$s{>EyZXLljZ5ejad)@l@a`z~zjm06!l%%y=U3 z#lZQDPk)~93xM+&p9H=HIK=oka4zuJvqZf5x4?P8BaDv$UkW_T_z3V+;339`fiD9d zWPAYla^L~R`+#2v+|PIq@QZ+Z81DvtF>sRcPT*<4b&PiazXUkOcpLC^;Bv-WfL{t6 zX1pHwWx)B2*8#sAIFE4(_!Yn*#;bs@03Q1n(q9jp4?Mzn1@J3@hZ$D`&j21`yaf1F zz=MqE1J48=U_2N2)xiCX3xHn(+{1V_@N0pSjAsDP0r@~ z;6mU*#s`3lfCm`w11<*cXS@e^4sZ|S-M}TlNya;YOM&Yc?*J|XjxpW_JQui}@fP4~ zfWwT}1HTbCpYb~2Yk~6^r-0uC9Adl*_|3p$|KR)wE(acAyaIS0@G#?Q;Q7EqjF$jc z01q;r54-?)fbm@5g~0uc3xF2^_b{Ffycjsicm{A3xQ_93;7Z^avV|)1M=}6gZFZN#N^&LyV6D-vB&zn)4qx20X&}81P$whZ!FMt_B`rd>FU} zc#!b{;AOxAjQ0WG2;9$j5AaREJ&bn)-wd2&yc2jia2?|vz;6YPG2RBe0=S&<7T{Ze z!;IGh-wK@1cpdOQz;6Q%Fu zWIP|Z5qN;{T;L|)e#QmB&A>g3X9KSUPBNYWj0R}5j`4Kh1aOS;RNz&><&38Q-wqsR zJQ27RIG^$9&k}9}&SQKMxE(me_&9I}@YpHNf8ZqW2;*bGZwDS`d;~ZJJjD1ga3}B} z;{(85zypl;0e1uUGu{Kd8n}n?Zs0qBlZXPY3P=jxnAJyal+N@f6@b;4tHf z!0!XjXMFlIgtr3cF+K@=H*kpYap3;}9{VfjKX5A!d=GGp@iyQcz~zj$0DlBH z%y>QUM}hMhuLJ%Va313n@W+8ej8_4F0(k5h&VS$m;1R|vfIkU5%(xnOC-4yCCBUBo z9%MWp_|w1xjOPM>2DqPb0q|#mdl=6K{v2?U@eJTyz;%qL1AiVk#&|05Zs2mpQ-Hq! z9A-Qb_=~{#j8A`>@Rxw|7@q|GGH{6Tao~G_$Ns|k4?GAw!uS~QSAd5Z9|7J2JjD1g z@K=Ec86N=t8t?$)eZXG_?q|FQ_#41IjCTWn6FA9uC-7e2I>tMIzXcp)ybX9Ca5>{G zz~2TAGhPq;9pHS%>wv!toX0o?d>?R#@haf&0gpY+`42nSd@P6Q7#?`B;%dHM}g}Y?*RTKaE$Rb;A6n$ zjJE(k3LIv<9{B%&^BJ!L{uOW@;}r0(fkTW}0Y3&j_Giw2;1S>v#w&n-13b*Q8u+)s zLyVUI{|?l`HWBRB>YFr^G;IT2zf8a6T5yr=W{|r3L_z3Vx z;339`fu9B*WPAYlFTewg_W?fx+|PIq@Lz#@81DxD8*q~GPT*6(b&Pia{~b8ScpLC( z;Bv-Wfd2s;X1pHwpTPNy*8%?vIFE4(_*vi(<5j?CfXANV{09zAAUwi&1@HvmVaCPU?o&h|>cnR>UfCm}R2c8K$z<4h3tAYC&7XZHoxQFp<;MW2t8P5Qo1zg8?I`Eai zF~(DYX9Jfro&tOoaG3E#;MW1?Gd?{)`1Qbfj86hz4IE;89QX~uV}IcM2Mz;|Fg^wx z0Ul<21h@cri1A_ILf}Eh2Y`!!2N>@I##W@!e#U!%=K%LG-VIy=oMgNc7+Zoy>lp6< zE(4A+-Ud7uxSa78;A?=xjMoFd5jdamI^b)8^BAXq-vk_DybAcuz+)#k|AEVaM;NaF zo(DY4xEgpq@DSrAz!kuQjOPO{03KjG7kD9XKjQ-6MZi6bX9F(=PBNYW90jgpJRP_a zIL3G?@Dkv1##4Z=0}eBu2wVl6&-nBw2rmWBV|)_$df*V_tMI z-wGUKybX8-a5>{Gz_$R08LtPv6*!;qI^f%Y^BAXq-v%6Fyb8D$cx;sOAGi*9gz*aC zIPfszYT$a{A;wF98-NEH&j)S<9$-8dxCywQaRG2Ma1Z0zz$<~1jAsC2!g;ig@pRw> zaE$R(;8noojHdwK4jg7Y5x5mNpYiFB6K(^}V|)_09XQ1JIB*B>Seo-6I0-z$_!#ip zfrl9%0ZsuAF+L332|UR70B{%Z0ONhY-N5~f_W-X3?qR$e_zvJCkRUIM%Uc#!dY;EliojOPMx0`6yA0Q?@{9>%kQ-wT{%JOg+$a2@06z`ejR z##4c}0GBhK0^A22W;_x2eZcvQPk)T?R^U9wCxPz<4lzCs{6D~BPjLPN_XCeGJ_h`L z;9bayyMaFpoMgNccsp<%;~l{F z0LK__1Kt5#&Ug#(M}WhO*8_hPIG^!4;Ew_4F-`%095}>y74Rp3$9~WG4?F-o!gvMn zCxM35mfr5^x^llfYjF4lzCsd@u0WCRg{8Qio#`}PO2Hekh5Ae@{dl>Hq{snN7@lN2Q zz;%pw0RIv=#&{d>G2n8>TYw)04l`a4{C~jtjMoAG3OJ8(3i#K+A;znK9|Ioy4d*}b z2=EBw6~Mm%9%ft({9E85#!G;I2Rz7lKJeqf1B~Ya{~oxXaRKlXz&(s-13w9zWIO{n z4P3`~I`AlPjPX?9{v&W6o1|DX71o$NI5aYwZPXiA!J^=g|-~q<_fS&>GXS@gaufRQwcLV0w&Wi1uCk?#+a^zAy!PYw>&e z>HRsQ^)%XR?x*l;jqn3n__t``=WF3_F~S$<;V+2tgKOu~uN$LRo{IMVKDy!5C3j4` z?I3L|`OAB?jU^{6Z~{hn)ovi(ZBj`y|E4cFGvGm5hJ_gxoLvUU=megQRE=@!bS+aA1N7ZDSl zKG=+(t8=fwk`uPJTwA)4UhBGolzX$T++{aXvS$!Yvd#?OHhMD&7_Fv%n)`3ucVTEk zsD|t}E!uZcwD*ZKvGAs$?$UGv4Gw!BNWYKf$a)XH@g+TbNh1_4aAet=-eA zdY|g8$V-2o*s86a`9$Sc_5Lk=4`KRyR&?t-W~JZ9uY{}m8fWH3`&wt_RM9`vs`|QS z=J!^FqkS)%NgB^O80~!^+WT1g6|^EA?R_f!b6OcCQN*TSN{d7kQTj6e{bc(2^!vmk z^yI`S{nLNqgq1qZ|4m1_E{pa}jQ0K_+E<cQ0 zOvZy>oQW+znS1HvnbE#U(Y~UYq|llE(hKNO->oy3L-{(=wdG)R!|@3eb=M8i-iOk+ z{sulji5&gDmt90c9;5t=_C25c{{Z6$LA@oZ1(}zx1<|DTE|{5`=@QnI@u>C4SDuC`)iJ04-I+cAABMHv}7@wIdf6@+N- zldw}Mc?;|`{FtHji!2$MovxvvY4krctweN!N>+L@%N>2IZir`SX$po|%2gDy_p#~H z5QAhbh8U4kloKPG3iph`JrH z{ti1FLcPpBd5I-R*dd4VPGN^QxpMlSRIx;cMwjV!_#;8sVIjK;?C^`1sO<1+ygQ@^ zo<@Pq`cRfP-5)f2Ea*MpV~~)Uj(RInHpn7Qm*Epp))@vV52df6W<$4!mLE^0_x{ST z$FtK+<%K;?aT&Ik9?ILdkV~fjKx+*UWSY#pZJ<7;q;3$ zY)}SA{tK5shYgmJPo~pX ze>fAPy1~r^k@VA#8u8s|#m6T237f<)!8H^mR}cj>yQmxC#mFr(!o8-9XI_+H1g;>S zBApclBT9al{v9>MsW27FJDEIjk-BVM0%h9mXYAO%0C#z&<7A#RHvOJ9Kmaq7Qfi4>_n zL;jcEp$9%mIivN*%>IZ>KlLKq3&RsfjfkRFM4bK)vNxFNXFZXAnrbxCk1V3x-)YK7 zU#3hy*BH+9BMm>NMn|Tf+aO9m{9H@FB~qrJ94@^>4}6mHMxB1Wzd+V-vp`|`@n0BG zEwZ9w3mjpOFf8yoimf^K=AjdN(yyfHM%@M%vgbXO{8ZC`*>^D!FMO08=|OOBNv5Bn7KCciU6tsU{|qJjX{sBm zdQU`qk8?uOk8m6FSo$hTK>9)SthD0w1-)=rSaEH>)QF3Y0FcsM3t-#zWJHb|oz`D# z@q#}_ewps(_(xx@H9DXVf_|GG={G1>^@J?8g!dji@maPX+b!DrFSeuIe(4{ej@NA0 z^OG!W*ZDIF+_w-z+MnjZSDxaY{!$`d_!PS!x!hB*-M6UW(QQ}R`%~ENn^cjK?W%fz ztJ&_u+%!Fweu|tRy#>87&2}An)o_s&SCti)u-ziAyC`n9ofa>Q1bZ8LN7mjV1HKFUqXkSHnwC^3!=tC7@45jGzEd6)B z{yPu9v4@JE!dGb^d@vR*d#vk{TSoW(*y{h>qL0JUPg7cRuLzy^q~-mHpN8B2s)_bK z9ewv7um>mFcXLj(FZZDZGxH~xpP5g;XVT0=hP<^0F~Qxl_QKf!}Fz%&Y@kt+TnCdna)ssojb(qApP8*3=4=vErB!o=U^YVk(W zY++7x>-%PIrdOg{zcaH3zk8o}XyZ&w1VG7X@B3yB;WvdErr-1k+d^6ID~F@KADr2b zr$le3-}s3_H{u8IH}QKx(ogS3Et>5Rd5DPU*2(MXc@O;wvmh*DpkXY(chvZsUK|8R zzjx8^^uwI@T7<~qDahg6jadDN_HER(?fZ@v1-}IP3^6qnWlGJ`6nc65tO=RKJI6I3PsCgL@@?&a~qb5ABqkrXB9xpg& z0tlLMJ#VC)y?c--$zUCQHP>aV_-Iso}0@AA0c>LpO4{tREI#+SFJ){^#=5!jJ|5_5%|dD-A zHhonZpKa9os`Pt+qNOHrGyRG-9I^HDCQu*T>a-etRlQw=IgD~hI-2bw|DD+`zJ{jN z740H+$(-+g>hljGYL9kt1uJpx`>6+L1~%(<5$Af&RX_F81C;V})Gk`MJPP(6**>e9rGEO`}VmHOdW7)ubN=xj3_Z+za;8 z6x5v0nS-35^7wI@beHAv6*~VO;{D3w#pkR%p4EKLEe}x=`p#Q<%z8ctW--d6?|e=> zuO~U$$$!UuPR(A}&7&}$uldy}>|0s3ljm}N^{wBel#9aHLj`>t8HTpoW7`#T%Q ze@A&d_%%fR9F)g(>|4)adHnuYDdp$9JRYQEIm+XgK|0FgcCfbc_#$KmmB%}2B~q5h zXQ3+b&BXha$4kyxc|5EB>NP{8^7(5Yv+l3LEJk?@(qFx982US!$bUzFHT)Gs?NKDp zSAX?M8uve^{na^xlyXrd&q05+kcy0>zxoQ0j{a&6SX+PfJtu$Bv0RNr)>n~^#Apk-yK9C`n`*Or$38z zU}quycg*+neF;%}6w>oG|2m(2F3WcFT+a8r|BICJci%rk8F0&O^KPZ%8?--kocjcv z^RF8yd5-y>c94$wp5Wbr)D$d3_Bs zh05#Q_tS4tUN6!4)x>90USD+1%Il=FZC|hXI{fXtl~?8bkfMEsMU3(~4Na%szT)r9 z_LbL9ZSCtKtRp`+<@Ku1BI@U$y#AIZ5}woY8u<*R{JfRdS5c92l-El@I?C%QvV^_7 zu7+wE~X{RaM{9pcm_eg=m}X}@4?PV}lPqWk|bF`9et z(G7P_o_N>uu6bKd?nYb{`K@BOXzy?D)Apr4)%&uU)Xkkt{nk)=%DrdKY7VL6`llee;Q$4=G)(_*nN0uGQJ_ng)ALJX^!(B&1;4a>TGfQ1 zXxX2;zeuM7^YoriXv3WsPsrW0fTq6tE`0Tw+)!x4sp8yCMTDap?#v72ZmOg|M`vPc z-0bUM|M0w4Yce^<6|F^Lr=F>sw4C^qPD^v#4uGkT!?b%pw@3U8DprCP-WQZ!V>+q?LV( z^9Ytkx7JXo3epN&yKAV+UlXGxndKA*)ms%Y{*B$`v>&&(CaUeuowwnx$+VRg7JjG# z8>i^Fz_akt>Ew%}R}ro4JEhaR$g_F3&3oIt+JjoQeUsKE%xt?jchl#vR-t9v_vp{j z4+Co_em_dyhtnvJe-{EN|CXt}db03n3|_wVa{9-`qf4n*y6Mq7R|s#8d>{U7`SE09 ze=K?F{ENwt$sK1efeTlYpU$2R4~4@{BM)Xzh2xfAtb6KV(^LDk1hk00pKS!^pf3)%|#;|Fmz}nybAF>|$D38C+Tl<>z{8F~-p(n_Z6iw^DV}5D)iW(QkYMCB?BQ^Y57N{m6$AwMQX6U-P}y z>~qiIeD5beL@7USENcJ1Ci1DuvIlbC+e7_@s=f-k=xB%^;UMS(cN9lA z{V{jbchHxZ@Km(-9W+#;k>&hbP_s`Pi<>|Cm<<_oEG!Vt7Frruf zIlA?Y_eS^sePVRe_q)Co-Fo>}qG`zVP}mCv%`8=m-Q zHiKUn`74cw`F0TPKALw>dA3}j zpU?7c_(OTL@4{&BqMW{q^uhdOzPGCU7P2Oi7us;w#S^;!NQ+T)79A&TADTQ9G-eBP zj6cxbpbj6Og$Q68R_Xm{a*D=(FoKn^v`Q~`h)$m zo)=OK|J;a?iuBJ(`q1UuG`XSj&l-A;dAZE`_pJT%xvoE#uwAmPKOe`4R8@aoMGw5| z&tGC)r_b_Fv;G{S2TK3Em;5tu{ke{$c=+cv+XDIL1!wD@ADE;0=LMer`7vJLW&b=t zidpsN;}}~>|Ez)4sQ!E@jaQ|AlK+kHJsf)bO_!T!m+#GrmOV?&cvnfZZ;{z_Uq?;% z1Kf1~JzDmoJL1v4>!>|`7|m4KGj|kFj&{E*I^kc@-rJ34nr6Ol#jS#kxK*%~S`IYP zRs&Quf!f5ARaYMGz73aUTzRsK<~e_yeg?hvNmrft2Ie03A}#DI-AS{ZokV2Sgx_;B zdwbrAt>*o9$Eg}if1P{%*YGDpx!nUfz$7V*_LbT#O>wTbIO^Pu^QhSzeJA~6wJ*Jp zhpYeU>FpCe8SQ2B-!3|n+kGKgN4kWP?XCB_g13~PFVb>4mbYTF&)}w)SW8_KaMA`fehqd|v}MRrHS@ zp?;sD{h489`BSy{P(dfHoS@^asUF9}s{ zEzheg8_xZ}{%G03+z$-p?9bhZFFZw#M27To`*d!{iq%%DR;*Qg zmxMq9C=gy$6a`e2yM~t(5fK%B-|v~ZFT2@ zmAj?zn=aP{2aLPxRLQ~gRb_)!vFiII<(l~oC^vswStW5h)CH_9jO~o$M^dpyftuB# zX4}d?iCqpk0ac|bo$DfM9bzoZ0V|LaKWj2WIM=CsL(_7m$#Cl{>lJb7%*xG37wbl` zQ;ps$yVlh`4D`W-D~qhfq1unrc*^QOpx}kELosC4hx0!I=r6I9|`i!rT3ou zO7C^2DthnPCcQ6~1y!N-R>%i7H68v zHiHs78TZZ&<=2Fx8E1m>!}%*fjB|qrHCD|vs(7k82HdFPaqpzc(bY-mv3`O*DB4{# z0#qcNyUO&R<$~oaP@W2$iu^t3zY}R(f~tC> z>c`RuXw65n5!=bpm}l6I(_MblWZy;GDMGgChcr`8wcHzch<4Pcqmw3o9Lj&gs5%u~BK!t=KRCh$2hyamwdrgy(v+`{9W03|&=w9%*^Mqg zL04*+P!^7!{0-lcp)S2Uk)e)t`RU+DSL@)&;52KH^r_1Kz4G-gM@-Z~ifQ)sk9`ZfS=bJ%Wd) z8;7SIW*iwjj8v6r=GRYH<8UCVR)o5yt}s&~zU+n$#yqDxT6c1t=KuG~-;+6;TEtpl zBIuGbW4EDrHD7A}eAu)W#R?E>msXL!7sWcZk3SFDSN{B0FU6mSw8@{ZKEPTj z{Kpy4oe$Fp_x$r=T_sZj|NS{Rf3CoE+7^G(tz&4}t!Cs`#C|ZVg>dv4rZU^iU1jF( zB=Cc443kNT!AKmE^9!SSQC{$1I?XK zhC?qjRv%=8qvT_| zg(*~fHC_J2UNJ?+t2I1!=)Th5=_e@qJG4#uI|tOH>F*in#UA}t0WQYT-wGJ`Ux+kEMZ-UNtDPVqs&}68LK2T-1fY z%Eq*lA=x!X(`yJJs|JYxGVqqyx?%zt5oP3ipbVnBwS%K+AqdWC zi3m^Q5CA*iyGr!Hnxd(z!`3Qe2Je=I7h&%r>)mj4Lhn%Rr)c+AodU+aaP#{Dee-)H zNGSls3+HR3w>?nRY*Y<*=9(}9A*{k!yFC3ZV2lMviT*a$%*1zN3$h{m+4LUxe+ZjC z5KXV|9nNnY-#9p0c0EATs&K9iw?(x5Gc$D;r=Lp{KLy!vJ2hx^4gv}Wt$}HHO5+hG zpQH6Pliv|c_@$43OyI&y4;MxPfWk;#3**e(45Gs|u|-IZLKMG&8mMm#SAR!P-?|RW zjQwE58poPVsy9Hu=4XW(76hJaapO?tF94H2m_YrfI;hEX2Bhh9qHcbvy-=DsUzyi=^)A!}KYF>4Xrz2`doeyi}i z#)w!>+|dF2jzn{X-wzdvU5G`tdyN@n{=Mh-e>wdAI0D9q)eg&R{xW*45Zu`7bptDp z-3~Llg))t9fo!A41?fhQP?pgn(91Zy8-;*zsqqEE3eI|+slqghFb-~(yMe}I>Z>nM|}n%?9$kSao>wNF^!bvmm$V9 zN*9P_5v7zR+Xycsepzz5i)Z12orq_p0eVpDwCsAXe7TY>>BtN8qG9du%vg63E)^)! zkuUdCzAX3#@+I z_%o*Q=5`HqmxXe_gb}o3k+lf?(+K*UFjC=NSRlWcbfyoND!eNYkO&nsV_g=15p=?5 z8OZl7tfd#=J3!yEMCcm?YZHJP3FlHT2WFtPAN(Ddhs728Sn zZxX@qscv}*I9MvFJD^UN_r62VeF1d`NBjIlA>uM1Vw*fB{GsQ|2kko01ID|x`LV~o z((my+FE z|9qShKNe~n%w6x7U)VGj<-qHvpnb< zMegE*+9rebgns8|8dV2#RsuLX2cj^rJ`&ggX^7b`GhibCn_qxiSppj)fv@=+f;uzM z%ut3E;C7e5HvY&oqk*309A`sLFd`dZP7Fr+Vykj)2Q?M3A1m#ixUd2Lb1u$As>8U~raz4xw0LCp=bTZ%`t;{MngVXN z=@0OGSV3mw=I++S?vb0btch9Wvk~=^pf#jB@GL5P*l6Z(89%;=-wgp{s@*o+xq+MX z&NZ#qy{$!N?pMxMBJ{Q~l?E;Bqj|X}1}v2O4HYCnejB}PTG%T@5o6yl3Ehq<2xgV^ zaCBTxle>s;(Hm6wc|)RU|KMPUq3_qO>jd+5ICWjw&rtUc=qzpn^0hK(6|nP@w={Ag zTT_r}6+@)#GOGT@8d)$v8&ZRAe5sdN4!)VH#}r#qUbVA&gYAF`&6u1O3voohz>C)PBSTCYGrN2ufBR4f5q zg+U|!9hDhdhzTF)?g!!gx|?8I_}newdb6Rf8KT`s8nJaq9C|S)^gI4{P*MJZ35)Rw z?56LN5X{<-IyR$^ymPr(AA~p7p7186ZaN{NZe{KSDlG{T%mrLTQHVmC?Lj>gtPJg zNOEm42hrs4k=jmv6aUvE^PC*~b7>As9G1f(!#ON7n3HJ@%gKVLTX$pX#pqXTbfRFJ z44EIpJmu>YE#EM0;i5)v19aqD6+0R7%`hW@X4ArL3xOS`6`)K4>2w3tK;tEgasLzc zjv*e&YQ;%{PJmeLTsZlVrsK7K0J#K-@hSd~A*-`YtFIZk8B{Sby?i}tJTN0@^?E<8oSmNEZlNxPT6Dq(BAoMbnp;9azc`;*L}!)a@pJ9vY4uTx7w=WKepMWl{}-5fFKS(bj>{GRa<*A2Np?nTmPp zCz5&SJQB8UL@!;N8QTEJVMc?eV7{AwQx7sv!+eir(qc3+gkHed^3R~Hb2Ssye%#6F zBk~7PcHlMk9Qk7teznCP=(|HrYl!D{`9t)av&mA)AK{NC#UJZE{`i^49}CNn4cszG zLgLf{8sZvt0!KWmI09_q=ZO5Ua0K_#Ws)mQ5*)?Y$;MHxHg=+G>MTq1sJ^&7##w#h5~ZNU-1R? z09_Bb4PY$@B@JW`OFag%i=`d|-Fgg^dJJ^yF%b2r$7}&h_G|!iqiQUP&Y3^YMh1={ zAi&ItJ~D}cR3ig!BLljT0cm6)OBz@!(zAFcy!0c z;Enz0FWEa4sA@_){gq90Xm3yJ8S~iAmg^a&((fkaxcnA5-Ly_KBaj+&varUBxDkq3 z-}+ic0jQ|OI>vdVlW?SK*n$9qmlE;|Ll{tE$;hv#2KR&`-zhK6NS+ja14I@J_SP`g zHT_2Q59*8c?zr`f>wN1M*Ow&@;R()nl0U9eJ6QSjiB(hNF_nmQYGh_$w`}~#7(zg< zUGs7K2iN#zUNxWqHo(i|hgsl@@?lNaD>~jseI)HFi0d}=kv_1IP#=Lf#k!K`FBBNT z898Kqqkzg#++)S<%4TG8dZ^!R&^!92!Hb~=%{8ic8Z2~lYtdUt0 z%(5=LVhI_-QOdY;bVrtSzD)sdMh-G92x;e62;Z6M*rATN@i3s*l!WPlFVkv-n#I@- z#P>d3kGuvLfAvm8x2ZfpqCTKb^=5m`l_oy<*hmD4lX7YRX%{(+jkAuV@Mme zwsRi;crpd7x7i9#cmTN)Qvl6}#3Vn8)sNm3Gx~C}oyf(E@D?uAxg=Cs56eMtkJoX9 z^S{tHav`y74uO&3)XOnMQ|6H&eP}N$vSa7W*O9D|FT> zbXM$Fn0R0h;9j5w{H+$DFoyv3zUVGwh+ib>{!5W4|0JJB`=mPmHfJRj>cy@B zZ$!um5djt~B4YapfJX*FJ`4d54T9Jh0v;OF8&B}ipuSwHE5?t$ruA>kGU*5w;Hf6V zA^HG{%A0Zi#GPp(xdc^;vK26q0sdDq>_UVF;(zCqjq6br9M=)f|G4~&*mBgf8f&$9 zPZn1U@BGpXUpc>oQa+Tw(Wv?h($lo`DZ1F|7%5Dn>?lm9+$hWtxq%~4L^>3rrzi!6 zXNx!p-_4IZKzA! z?WegS?yI33D^V1=4QkPiC1LCHqWlfz1+gzthDU$I7-%ex8<7V4e(FBr5hTbH-x^gf zNsBj?eHw@W;75znp;84v>1kFWjuydSL$%^F!zvUGK=YyHA_DC%%wpac;^Em0p!vgl zF@W|D#{ne6aM7QIPOv`Z5*s$lS>YZ~x{8V!u+4@--8Y5B>?7yM5lR$eMqlS2W^CeDDqY`LcZRIbUW53> zCPKxr#;kYDYj>4!j?9W(BO;>!bdBMF&}b;ovZcYk3wm-a9Wuk5<1#cXV$-_e6R`9< zFl|s$iezR$WVcb3i`PvLaL2*S5*EIhB4+p%fVs`%`MbfM&Ui>%4EGwlAK$zsZ|dwd zVsu``M5Rbn1mj6iQbNqzhashjNg*?(3>Xhjdk(Og{U`nHU$nt`r&wp}?BD6%N9oei z?Qxf)X0M77_Y1oeKiH?QBY7BxmdqxLc-DWukGpixL0LgWB?REQ51ko~Oh^x(^iy_X zy4(1;?#MV#Tzluziv1K>U<3M6KsH zmA8K~Pfl-thIz-qnaSww4J@zu=}ndG5Zq;B@b#2=Y_M-a3-O#LRv#nF7T9kynOk$- z>5Ix=61$Df+$VlH)a@mE#m~Kl9Op33Lt?5W^3&jN2JnaSdZ(nw z&A4al-+lG751*`1i4z#<1mf$;G*U$d#xRY2hJb)u(G&e}WvH6-1!25`w+hIKW&$}^ z%swel)Bgy}gu`-1D&+;4YV~9UCdOeoCFoGof+G~dW2n5~2lN(P2xE;Xz_S=nXYRi> zrw=BA^JH|<6&g|54^U=sq+%BcJSxC2?~+BbC}tk}L2#&eTYB=DS7h|IZ--?e)(@Xl z**USEl$cBi*<*h?f@PeYWB(gQ5lWyClu|{((R0rKQ)i(pyZdD- z6~E}qIRgo^nGhE#*LuZQx6lVGH)5aKnKT=O@*#RZ!}{5^`(gcT*hMHC^Hyu?XI1Dq z>Z?T*hyfrRy$?I1>Db)o z8Z(Ar&SzHIz8C^Ort|o;$hb7K6blOLvdx3)L!eh^*;6sJqGgvo2dK);HP~x(5vX&Z z2CAggi+U+Sg*D6~iC#ptLlY*b^djQa^GKDgQ7I1DGiIDfw4xBdy$f?4J4MLh2eV+u1gnMq_3Lv--4p4_%b7>tQ)K$lg-)humI>0GLN#Y zu$O*j>t`mPTJT10GtForw8g?Kc`NJ8Cl4i`pe$(Vm~VmPA?4k5X6)C%2>Rz+sln7u z#CKclEdo_A2Goy27O(-JoscV+`Jf^J@8KV~M`6r-F`2Q?MZ%nu`G?La_B)&@fuN2R zK?5Qt%vy=5!5#~|M1e8^R&NTF-NMvJp%GQOeLZSmn>tysPWBjtwIk2|gZSujw#!8s z`w1W2dI2)kaxED?dU9O+c*Wz2pZx+~?-?IG&tz`R4-7u~SFzWSHu2HBn06BJ(M_0b z{tNKYo#*?S`Mt(RZ;~uA0e#Q#(IiR)%s;K*qsidVr1)s`_3g(;i-xca;G<=u5rCA+ zrPeFjQuLft@6z~a8)j<(W)Npt3?pF*6SlxdKEssb3ijRTbN`eo?q_}OjWF8ShP?Xv z+P37?3~GaWmRDUaL9zoZuR8h~(x$vRku91YFL&!fX#WNBYV9CjGr!mJ>i6W;@1ML{ z0=>Ttd9@TAnp9rRhC!pf`rJ_%B#~Fg(tZOm^lL8dX?ZpI4lS>OoSx}z33)XJ(-0FD zGvW6~UZqME_p`j}X`+p7$g40$$d8BP`FzcAe7$FRHG(+y0L!bP=XnijQ(oN+R!Edr zJ+NTlWtMY0NI5ere`WxV4d*#)^7$lKbhiS9aHMGNPc{O&D zmREf^J!^e%B&H$Cs{$rWkXH%*r)58M*D3UQ%`Ynj3!9bRFUNsb%3IWRAMvjb>a4Ke zd*jG($G%NulhG# zr!1@<*l(i_zK^YXF2Q0m+7}^fcQ|z`KAh@WXprT-+|){1awg1At?Ejfat zu`6rf)&g%f6sMP9w09k23S^l@Du9cZjH*Wbwl87()z{bu#_CyL6{UWHGI=U!Z~oJl z%iINJ@C|;K3uQmY5=|)eJrB1^nxe>6>7nSQ=zz+Ny+gShL$zObgjf7J^vt9mc?+&M zc%l(rm|5ZIbzII}YoEk5_sWf_?qvt}jacB5a+`4KHCFOdg8# z$tlbO*>Snd%^4KeU{6HO=$4EV%5IolZxp%|#fN^rDksifKTPag}d? zZq@+Ztli}n&lJ3q{y=mBjyn zN6R`tHb@H1{UwOOh5rXD9fM~xqV+Q4biSdM$zjh9S))8RGSUHQfNo?ECVqld<)O<_ zdh7(OH)5mQi(umvA61&yeugU5Wzj zqormI_H6wIjbCGj_$E!)IYa8TD%KgVh}Sq;L2niZGW!9DZ^5#HiYY55dp&o1D(LyAMJZOqaS;2|o}S2VWvggR<|4{zg@{vuG+~51=@7BrQcp9n$Jdb>n`iJ(!MA15)e~P+NJli_+ zOtcStpg=6*yhl>uv~#p{?3E6+SPh~4_sUCS+&JRmjP`CONalZNDBL5I+0AX)du6$f z1K&(>;QNzKwpP(_U+`Pg?V27*#;ybGl5=?~5!_wC{QM`2GkbQ7jH|MUo^wb!5f)it zSzp9r_~8FgXK#bE40-uIm*8-)ln~d=$WxNpCECvQ0@DD8#UM}jh`;*rb50PgCFidk1?#{4VM_@RPH=MhGUWyO|M%9lxK?=MC>f|2| zOn{%Mg{W}!?D-f)Va~qi(wW)K8Jk=}sSVgLO%D@sC@ErpmVr|jfrlMAun^~yo@V7m z#-~{~sKZ#{NgvLiTfRQTK<*-J`NpaF!A9kj!h`Fv8BAszI3~wQTkS=oFO7o|#6iyHKfMI#mF27IJ9LY!$?dH_Hu~7uY6A(t z%yO~q+;I@ark{>*H{+(CN!hZG!^3}9vh=lfR|s70(;@yt6u1gL<@V=&6j5bs{C5r$ z%`<)YFK<(9A@Cj`0;J1{7`rx+UKpa6NDqSF%$?6_3Ff<+(}7o81^uMLj!&H+y-fQI zR)AbV@f!^S3^+h9Eyt*mgGeLS;)2(cTETkK5j-tvBPX2~NNf00S9dhA615-W@R=?4 z;cbjw{!)j4w~3<&iG}T6 zC)?$JZA{BVGeWiNI}*)yMl(QVriDiFJ|Dt4Y>R+fBiry()docmd#HbW-Fes*JaPr_ z!4D1CKz12muZ4p#vqG;qO#yH-s(3iF%tL$Iu8z{KZN{{JAmFs?Cr~U7bi1~A?czWY znELf69^fHzw*%1?dIJd(@To0mio^;yijCm$_!K7zxcJo1P)cqoe&UI!{@>~7)3Eh1 zUP*pz?OSm#?NSET2zd zr*#O-uoSzjd3x-3NMtYi35x@u++d#?VsNcJLiDZ%d-AFHjTuT{<1=Wry%-a+eV_Wm zZX{0UF@+8GaHdqqSO7EN9To3M>?uTh4t!5y(zBnO+ljR!w$E-?!R^+eAM|Sr&z=lIS9lilcDs<^oL+3SMuJtB<9iVw6A{=W{0-r-XL!*XRdRov zD-p#pxg0GCfr^NXUk309iHh0RFit>^&EEkX$O7t}tN1*rJPhS8C|^%s z_E2tPsMhW{C>n%pglq~$u@%k*%1@mc*ITUtW#c#RgFy&ghsXXQ0tN)TFF3E0-G^OS zStq!4q=W8Uzt^?)l^}WG)@nc0t~giGT<_S=Kvx=fuwM#+vX4g;#xZ2Qqqz1J)HvQb zpjKwyI2{!?oFzfIeWh0tQ0}N$1zKLcZ|(Nu+tYsR^uT)_a!-nHM}p^?ZjW!wHUNes z$aW*e3}L$&J?HdaI>`15aG++^I{PU8fKcf`Re2QHMMw)guEgy98i}OaQ`p@|_>Smm z0&ffUjBhXD_xpfvk8tqqSDc5S>jU2g+rqav!_K*#fcSPez-$7(eGyT9d^-|V^yRGZ zEsj-6z_$fB%RUa@@*Hj+fW?Eby0#e}r@A#^YQbod7J;4xF3NkPZ|t z@i@0R0q5R%IvWD;tjfSljk9DDac)30!-sSCwjKL~cl*hdlZbbJ#8|Md#=9qou9t{+ zYr(>5em)M&-6r0>4)a11ygQB4MSJjWAuG@--u;)%(Cxyz8B!vu%Rhy8yJFNFy!(D0 z8`2W*;&=_5FoaWs&Qqd24)1cj63##IaPJbFvt8LBxRT6-0S$~ z%LK-AQ1)O)bn)+Mkfq>XPQ?yR?2Kk5;zXR$MeK$)o$E|jL39N0Z@bzP*OmkvjJaV? zIJg!~a&hq9w6CkQ4>I=JuLPp`?LZ)nqhc83&cs%@P%#QX1Mg#OtmJHU~U8Ee~;UUOb;o&Rwt5={Qu`26X zCaMC)TVr=&El%S<-#~H+w)f%T7~=L99_~$Xqw(RApJOduFn)*4P8#a zTnwZakB@8Q9v;ERs}w$dvIRbVh*&V*7@Qm*XG!lIXnfqQr_&RA!pD8VgNgWfC3ZC? z!N>1GqB!{Y@LxFi_-)}*Vct4>=dnlv*!v+AG5Z{_k94Vvk8eT7!szzpM7l`$qL_-? zi;w&9`#s@f+)#(Z_HapEOZ)FR4n97fOyc5W-=c5pYw-?JRwr*xU5;Dnh?ZY;ZUevx zg2uGLodqpV1wD&3_(W9LD<{N_!q7=upyi-~nLOf==X)pMWFlss*A}3)4(EI0egYuo z@H)A4a~n9C-&ix^()`Hf6J!Hfx7Wc8z+iDj&9lp`+O*& zY*3r{d2kE-{O#M9fTTD1u&K{-EbFqa-+;CoA5Y}yw;RK6;1l~0!ILhIUPFQ0!fyP8 zi=&T%Txb(Ve}wrY369<^sMfV|w{AE7h81WPN6!aInc8j)-O2~dLCbj6&|h1{`YWRt zuRSql&Bp|1RNcrbyyQ_T)-A>>U=6eJ7Q0HOqBelFspbgcPmW&#o<@v5Pf%!+G@8Ez z=XF;$WC&arG)fwM8i$k8qKuo}FQq{lCT?0zJ)c)s&w^c<`=R%Yt8u>LI`;sHv(Xa? z&76Ri1BfEo2K#@1)D{rEI({D9%XT{acB0ZF*!5do1@v6=Se)QK>?UXzY1K3&2~&z; zdXq8rP|#HoZ_#9gdt3&6AhL>hl+oAd2yL&SU7Gzuy)|?a4D-qT7O;STakbrkAvj6s zmz9jd)g>JMsYGa8Ll6Tz4>%3sBDIiKK)%aC5i`aI5|nd-R7804{w0 zS$p97>6j;Fy+Xc!n6qZQJ$v(4h@ur7_Uw7B*|T>dk-hUsq*V6o^N(O~tvwCu6hZT> zEIb|j{tS?h{S7dPy+D0or%;bk_}Z$#v;M66cI}81|-{U zug~5yj348OFKs8-3;7itkwHZ7i7;Gl)wcYK2mF5p&hsrBv<~;CIkSGK_W|2&#SC$Wcf z5sgPSkpSuS(Mvk#K;!-6GM%2-6W%w$vx#_rJy+FO1 z143ylv>Lkz>=uvrBNQ=I?Cl%4WPyYWkq{r!-J>1$@WCRY_TcZ)2HyV|Or#c`8F0Na z+%x~LZ-?iHhxgCy0xoqA!56%LKcZkA*PwW}eHMGtEl1;h_)o-J$Bn9c2yVm!MA_LE zmYv-e?xzO8bDDWrv$`N6kQo`@-MXQBWPFx&1LjN~L|u#P4d#|V1CNoO=8Mj)2FiDY zjaX*B2(soM@W>*3g6je@9 zcD(0urhOGF(5hbYKOAPCUgF%JVm5&1@Tx7nPSAue4g-x5;yPCu@H8|%bL_(*D4itgr z1dJ5e*Njs(2(p2#4LbDlO+SJe@HpdpI$@Gdi_JcdWEJ_RAoKOBuwJ^ zM+1bOeHyFs%|VQg*PmjD+h6+AOrQQVhxDJQKfTh~5A0n9;c?Fy9O95$Sd6Miz*0(o z3ccoVYjwN)f+k2S9X;wDvL^MYW(4+HkD`BJ0%L!IhB=(jx{H&REQvtA z>`$D5nW869gEyt@4bGtV5iZ{gTd2Kw^!4tRd~C1>C&6K#V+4nN<2ToX6Zh+S%aa{J zpl#@1?{TrtcfPXUA8DPY`Q9Vjtkp>II@jIvIiR#x5{p&F#3`Y*M z{`Hp*PEYJf|5^YWOQQaDCA7CB`qyn#Vw-yXM1L8_cWF!&Nx7X0ykbz|CH zh{9$Tuyh(tBMVVOzrxjuyZt$ivQ;6t1M4ifALTUD%EJX0kh-`d1@}NfE5K>8z^CzMk6-ZX=nr?j9(3E z;c|bL(}V&D$i1t7f|k;X&wWq$3p)EP^}5LqNG<+#6Y;n2#8^;Owj#2>9l}I$236=M zi+4Ht35(Em&?qmws8`CL5NMrZ9Z;v zWhHP?8SUhwSWX-2NZEiS{`Rfj!?TohhKt9LA$5z&4WzZcnJs

(GGE9H&;UU(WrA z-m~72melWFsBT6^8)`yXKS!+!q607w?x`K|ZSXH~!1sPIYA<&kB=fyDT;F?{`1ht= ziGKIeuGs)wt*Sr!v0d;ra@yti8XfD{5got#-*T1{oSo=*KThU94`}aazx(Yw$rF3< zyPt~*hV&#j{W!q(1e`sI6Odk~Xw~ojw@74PxO*3;6$-PT3OBB`*HViDxb2dHC!_^D zMAq29?1IF@0bT5D^@TlBmNix}<@=0kiL=kdH}QT(eJ};u267?JK2ooXJjkyjz}d4w z*9vEU4Lr`-ahR^kt&&9J?6m;5_9CDI`}e!}WjxNFg1Dwny{+h?kL;uW{T}^|!e8z3 z8`9faujuXK?e_ZLua#ChID4=CjUY(ie;?;>1jfbr;o9FwX5)T;e($7*_h~qetP zy8iGU=&m*Vn?meNuoi>&Y$E=}cX9Z)rN2=gs5j~QL*lN}$3%xa;Q03orziG^f3afF z0{;TICc(dcf1`)ba`CUwv4elVLAk`g(CqBRJITj!_*Wxh;9nwHBn)wl!dZXFtEuyI zcuL;#yzK>5yUVfD;GA_Bf>;y81NaNu>OVlJ+N?aM`2qOPl%-cL*xhW@cBzYWi5Bm+ zj^veboF^+crq3w>VcDm#;3#f8YbY2gl-5vC%qI_b!2J#d84_s~7a&n_gF5QLxecbF zptpW&;&XA=!82Rp^l1?Iouz;Gm1pI=^t#d2kq$rFVWgyBw|+_DY(R9dnnomM>mY^v|?aq3~n`oM`PIokIlR|5zmi;`Pd#gx(UI?(Z&@Yk2Y>x zo;}+5<@<xl1Z204L0^} z!}mA-8xfM@wiO64ral-Xu8-uARcq}rw@!fO;-o^hP;>R*1-Nu5L!?slj zFz=PG@44h^o3Obo{iOK~?Vn!-6oO8kO9Bb86fB2F)<8ow`AJSL$Wks$S6Oea~ctC zKZ2;(1DLU#af;qD&c2h800@vPw~RBb`SzjgCg#wI0q7^!!;=UvXdYDgM>^EnhgPmj zb;g^&4gc@{kQ(msMU)@72igpm*1kqI&sz@Z5Y3p%nhuJjHJB?lSAJrcUu;KqI)7XZ z)!%dye}G##QbaH~=^P<&2S%AfKCd0eHetR?@~gH<*EhmfD} zf9;<6-vUE*TG1c;zfkdiZ36#;i~h6xzkJ{MA5uW`|DsSd19xj_`ikqn{I@4>{tRPR zkODcMVV2^S7FY)Bd=!BBV>jjxcYLAu(TBF|7jh4v%NyKsSRf}*c)XyfU-JZbCX-a- zFdaClVL%pR6uXseA;GS_Bz7&PZ_KZk=y}Gp-nJhD5jQ=cZZZ2N=?>XS~R_4#G(>F?;-2L)kMi42x5?;O|V z@W=Ca`8VkDZ*;+^yfO6wbh+wq{G7Ub(!|6L@8;6)x9{%&q4|OB?=`@@t@ihB<9xKv zYqs<^3=`kFzj+W=im%VP!x8pEP3C**^FKL#9wd=Lx=tFO*y|a@hu@mMmu)}b{rxKx zv3LCq%>SS7Z?IkA)P?)Izc2s%fcN*VZQo^oFWz7DJ&gUme1G@%zlpI9tUQ^4BKDoW zL)Ke?;UdDPZ0tpQHjf)Jqp#!;quamz6wQeIu(G-ru=Eip^MO}Sqwnjha&{m^N*?YR zt;*r*EB_GtC4-DhL2mrRL50Y$KeWWbS&UED@eF4=VXI0CWL%bx!?ciNhpk7XI61h^ zwC+e_*-S-1&%aQ|TXSstL$QQio1M2E2Up^wCv$4>4s}hFC!xC4g*N}b_VY-JyT36d z5AMyzPzf<3r=6o!U=|ZE0Qhg$B{;?9PR8Yd}oG_Xqm5dgS zVH(?sN4VJ+-rWiT0z8Q zMsW+X{t~&sh>A|eJG+L)j_5fr=i(BwN5HA!2+nGGR!V~hYu>^z@2YDE?7jfw)M>C3 zGBL}Hs5+A+s~w-O*Gx6V$**(IxfqoG9+glWD@3;c{nK)_deT?H*>If19ZPb zW=DIa7*p?Nt~gk%B#b*7@eguSAJ~Ti&(OAqYr?l=h5MazyQKdy(u;2jcQQgl!w!bG zcUSktsvtL&6ys<}o)~KO`{4*l{w8a|k&gUX`VAdF2`|`*>e+=j21X-?w$v7_IQ0MN<+b*3={y^{0 zKF@)*ZCT0ojbz!+*_60ZT)){H#pVb{@mW$!(or;ll456wE}IfCW_Ot$$cpX8QUh_S z{@wI~t|tzfjWx?VrN7{J)8C;VsEUHRN%?fD{tQqcwjC+8orpG{Tuk<~+nvx^rMG{z zaTkuwoENg*#JQ7jzV`)MjLvr)wZaO`W@acCXn*%_}N2MePjr{dl!610Sl0RtUu zMvoYQjCWr!j>E|0$Kj_L0l6<#)Ca<|@A#V8VeKozTf+zU3P-NZ7HJu|1dNOG&e+)y zKtc$Y;w-%@q_Lo+b8_8kKaZ1XMXjIFw0k!cnmeON#B&zUr~NX9M#XNC7-;_2>Lf?` zlhiW_uaXW$CuiEGPO?Cp36Kj{%^zlx*^Gfq&T0r`f+WBbsrA9std!X4G%|3^-4}<| z2M@!GVfX?)8haiIVh`aTeN1ig@BGY)x zQeFl^ZF)<^csaBFjT%6>}Qes98)ca#f4 zhf8CDVIgZJ%zaZf7K0X`_ZwAf(dWVZU#1%~-jvYz3}Xh5!GPNY7W1udrhFo{=3O13 zz3#4lngW=6aq5h!zamv(BqJwiEr127a!2QzeiE4S0aJ%@x=wMR_REfeXegEXMj_0y zSQmu#3R%E8yy;hBpNJkYzjx;p{SnifrD?x0Q|D4Tn!7(F6LWYh*n*Gk0zQ8DBlcw> zsOyY0HW%5Ehl>z##BTC6I2myljI(1HNw@ttCV9#{X|T1*k(zbz{O%Ph-Od_Hb)jgF zOAlC!tfe@!xv_RjhhVhlt7u_LICXQVV;O`&e*K_m+8>O&;bm20++B0f+{!Q0jeBdV z<{DKcXj38G*+w8>j98ADdAVnCGkVR1eaq;U*4gM0?3`{KiSq$3USM3DmNPnFtcY*70vSP1#BMEp0|qo^MKny{{Q<*lL{Efi{1%gN@i$ zeA|ZDa-`c+BRZlHpSNtph`2^L12YQrDg!eN0W1})W<#9%!ypH9J&p2L!8m_8%<$LD zMI&I?AGc82)V%^f7qU&^ip;pyhAXn&=2rBUk=r`{QKz|G-R4ec>IC{PZ8qkdrG~A8 zH21^?-Q@MLwGg)ZwaKMvy2%$KfF=tuirv}QZnXIttWaFO2(5U>ZEw%LYws+ly+7a- zBg5;q_xaY^y9NHbY;RHN8kbJ-dt|z`1l)&Zjpjh@r)fc}v36ZrQEF3B$4jx(Dt`q= zH=Mc#M>iE1ch?`Zwem}&z__=*plYj8bq$~*y~r-d6Ybp|w$|dY30urVsysrwSGAF~+!$h9=|-8E<^m(Z@kV%FvC$40br8@ggW zyW%wbSdZrw>^HYJuVBAPce3Bm@v7f0X1}q!q^C4n;#UcxrI)>a<8;n`yX#A@-?}>T ziv2g+>Azt90!SXsf0Ne7et|C(|0T=?*3xBLg3<2hy7E6d5QJERJ&~CpM$86!4me!o zW!crRMwDGa@lw6j$js<03y?j+ozq1)V$E2D`}$!1GSIeBWz$1mg!`_hgPrxC(jE9z z%lC9G7a`m?#9l&b>{r~3y5`kXH`ODI^>qRmd-uOFCjzsKfO2SWrQQ#q0!5|Xev`H% zyt)28ch-F7NO#thNxZCBQrnNOV-n1x)?7VJ0|7Iiyw&!uHOTmnC?z&qYB9Q6@GPou z5ZFGi2EQ}G)A)T9iB?U|S9t{iWZ^7osZ*W(Pi7X@Rel7pO6E1|x|~(q2;XQD#9x`S zE{qtM18}e%Po&-fyfPgUU+~HX1R@>sRx8L+b~^Sx!-QW#tq?bGT$wW_jGyDe&>AOF z(XHTjQ|9q=cfBKj5GKVpCe07#_s=M_>LC;=~w19&+Z1+ZW8R>zJ;F4guAh#{R}-9<2pVf}-b-|8cr{1i4T zD^SC_0=+FI$gry1onig)`xuG2SfPo%M4SOidQL6Z*oR;-Q&;o>FJlbjh$kpIDKFT`<=e^cy{_vjOX=Wnr|_lJ>YAkyJtHJ z)9DXE`u#MXY5$4wTnu{r7UMbaZMT~q?dEio9?#Q6oME|7`11~^)p6r_3spg9JTF7{ zvD>%5<>uGV%`bjDPe-@6HlEW!UiNWR;2r)v0za7X^&lrbp1TfnDjGkYgZS~sv@me| zk^b9{=N1UVNC#f2VZ8xi2_Y$xw?kQ1$lx6vjj4@D5X`>{3gkS5@>U1)%b`NPgfJ`V zNmO=T>q0dh=WpzB(3-BcGuQjN`ZKhQEsVVB_DEx<+ap>{eF`;^!LjEF$2zl|PaN7u zf%oj^AO{35=B?g~NyU@#M%87I*7mNIZe@PqRz}m&QIMyYnl&8-QDENcrr%Nc_T|it zrc`5)_23@sevJg`4i!aXb@neot9B>-$tZl1FupRC%A~rYee-F;_!?7bUJvG%VpQun zs&L4<5u^G_yGC`bJ5_E4W2jM`FT{{^s%*N(J`1DoGSCmT9C~hr+Z8(xO&rxZ?x=1S zzD+i&L2#hG*5;^IAURrL!FU9mAc$d)U;*hXcchlT9s{!SP31;a5r_FmuXYpN+Ua2~ zg_!k*c@%1px2dU;!+Zh49Ol1c?DR07<$ld!#`y_OPt@6!plkbR{mD)E1S)tC)J&)( zB*c5qz(i85GfHj z5d`UL3;~?@7W8mM`lPuEHO-&&qM)W-E^3;I6pFz`$id(Us|`vvONXWfaKef5BjDOI z>NKM_=_~hR7|Ab$(I2)3x@bYtrAk|t)%S_CHySglh-rQaCmFNUQC zw_q8wvcstx%#OEY;tGWswJ1DX`e^_NjGMrkdQm>ZVQ%h@VE&h2w}5f}Qf!<}#kXhi zn?iV)%;1$Q@PdM24yM{n=r>`(cbh;^Tmz(GjpvHf&WrW;rSO6%N}V^gX7ZzX3!5JE z^ZlQZk==P2hZKhpoiQeZ)cOT%Q=!PAFillKN((uBL3B6d6K5Q;edY#`R^BGZE*|Mj zFe!%mXLDw9w!)o%Vn9y$AX}1XAT&(Dw;ToN=9g zkV7ttzE?tmdG!4&(4>-6-vNCGK%t=Tod8lUeZLO|BO#VAb}IaGx>MnJ`u>-&Y*PA8 z2XojDV_I_Q`wyI>knLt55Jle=Ubb=c{l-#!d$mqJ2g&yfePflZ(3@n=CYl%-{y@@;^$DhJac2#E( zu%h$(+|P+n(5+6t*4ZDtfeyJpYkEsPJ_8MwL3dJ$w_5%1EF96Wa zTOLH_-L(RZe8edym_Of$K96OdAheq9T<4lWFRXAWTpc2Zhf4vk!#P1m&#q8jcL^H;g~RzDobgf=grhzyFalX$F%CwT2!<5k8lDNSFo{E zf5tVbNFM3JgKh{S?;o zR4hzX+T2W#J{rrTI83bz)+y40`Aa6f@2m$v4+!Mj#+~n@0+d%=#7Bqs8=4NkjZ7oD z67h5~e*;gmbZcNv3OY~qplS8V38MSqT5HV6K+p5}2pe*E-lo8-?B3DUUgy3`uczn{ zN5cLSD(&+OYP%aw07KDd@ae_8br#PhFlLB*LZo{RTC@!i{%zdsnYuNU+SruNb(m2Y zxvj=2hv7se-Lim%;l-}sJ`u-!5W1#k{j;w8uaAXK-in$1hr2pSQZMTL~m#F@in*IxKw+2zw!sz2E z68hhupp@ZxTCP~atYXQ44<5!d8`mZ2MkVd@afUt`=k3b`~ry~nBy zY_lr8oT>z-U{yk@a?_!>1Bulz9;gje#z-m2ieG~YH(^PjO)4~IG*InHMv2lBf5+Q+ zYQ%ysDKT9rG4^Zux&$jtOxy!K$Lk?cCmD8>rLrXw;K=4 z1qRt;@ikKXFcxpsQUJtmVVRXXU1yJ5v7Thlu64vn^o$`1zE*Y;HHP8H&=!)!jNFty>3kRwdgfqh2Rye6esZa6!>MmKb*hFa=@UPA zNkmC7B_&#(8pJZ-9nYW#I6jrYqN#N8$-f1xGD8}bmLnXv0CkGpiWtF1Qm+-^$Y9hf zp39_O&!b*A4bQ9B+u_uuO^5jUzw91iv%R71K>la9)$1BZFXU7RNkQQ-gjM?h|G4=p z2?Y*N66!)U2B+EZh=8+mLY9r%%?+jQz&+uq%}rQj6}n7OLaG(I{B$qq@^9c=r9URA z#~;b`ZPjBU`F>yM^2f+P>nB1N~`|D9jC3v$u;5(4|g^C zwh;rFQRPl(!nl`~gA}!}?WNgM=CPz8O~Cp+oCY5Q|MHKHw=*S#J#Fw{;~wr8LD#5b zm9mFMRRX-clEeVXyexB^f9@j#)8PGFo9C~*Q~RG>=i zung}mhTAT31bj3hm=5OU&hQUm6A@j*Xf%ax+$`m3Jo(2kRVXJs(-*GRVQ1T3RZb&f z_yr4*FK{7iC;PeQ+4ogBeficadbBROgwZH^jQ+UT`4}0QzHosr+*^meqQ@bIMY9mE z=(BXus}SWY`UxgRjZ)oY}i}tIhqx z=hKii7K8v={mht*qKCl+6f+jQced1+*3fLmOvG^&5R}23F-S82X@~Zrr+o!;BsQjx z@I5k|NC^Mnp?%FLwlP>k3-Dxevn;I{d#d49Kdggs=cD`%A1D$UV49I`riJ|dG1$^+ z;m`pTPSt}naUy7f{+R5YYI(k*azNKs;5Yx;pggZrX6l5B4uuZ>@f_%2$~@sJ$AJ<{ z0tFyJh*TL$=_TLy8M(S$vY6S*E z?9H(sDN%*_mGRk(zlBMsBc4J6zW*yJJhro1n#=9KLlq2L;xSq_jv{5^izHt0ws>b7 zW0wf7pxb*?6ax24Dlf4GGs;N;|>($TPzJW{?r)brP zI=eGVe+ULIw&Y9LDBSrYFx+~}S`@TuvHwVmTy73x1!r=&!|a#$dJiT;{}ctx-Nrz&~@E8`W- z!#!DaGow-T4n9%DIz0Ku@G2FQaHB6=qr+a&DY|GD!avpO5)`e(>-ITXykan2@mfMb zTI?6EV;u21(kEUihaK6fByih;H3-4_H`c+C#IP89f>jX`V?4n+(kECSkuQ}XWxq)J zc2Jww7uC!GUntrWsXF3mjKP$7!Z@x}b>w4NPJvIZUP=)_E0|BNK8+mhQgCH#P@G&X zu!k^`OVlpEk}=0&H0;y)Dko09W-|Ts=D0nJhq2x0Q6)>?09@6z2 zSTh;*dzc$^72e(KRzXSEn~~ODDJa764B!-i^-F65d>T*0ByZ1$F)H?JEm_$QTC)D{ zEW}@`s;ebyfju3Ol&m|MUP;!0ND?Pmt6uR*)>5xZkgV6M>{zR(FpbzDl+89g1Y22k z08;d=R%B@nYWn|eS-RvqkfjIzgQUj)1je9+EOk%3Ax`zn(+tR#SSdI%QJx+SZj8~c=*VG;rCm%N zLh6?C^dr8?ZYfV+XMAOXJRQhNI`Z^Ohzf`kuGuNPy5K32Q6bRH8M&Xj!c582N1FZe z^iZg06Mvkzk$>$@Z{gORr_W?3bL3okG+Qj^3?(j)IQP6;`P2)(CM+O{QN{+q#l$e zToyM+mbmKFnU8raa1rTmJ{d%@zzG{%`co{hY?Ge_x=pGOuJ_d1p(6D`lG?r(6Mk!oSreuk`*su*!%~)$ zd!4GDsMBo%By!s8h4A!^DxkFo_(cls-Oe9Am_r=MU8!1XwH zz=x+ZD5y%}>9xe}fzB0@ilf)3Df2ia_U*{t5!phoNBrF#;hWaGM5O5T=S_Zkz2r5a z*Iy)~*K_fXBzdozV%qYNTa=Gpv+WInLlT5D^8wuL2XeRH zf~6sbg={DHIy;Bab7+7-`A4#RMX!%}MF_pppJN^UO`A>YE4rHQpwp?1mG6l-dZXYq zpe(FFX9pu=(quW2n}oLpj1ysWm~TcphOBijG6}Z;Z$?otfWsn-_w%?3GACVa3C2a1 z$$WFMI6SdVxwgzN>w~Sp_5+kl5!|tPMvByPt?U8r3O~!9GI7GLvzC6ApJ0ZAU(BQY z=3OmyL|BmOrhT`FiLT-Y#G31M>`eMUV({%s6rna@ zL-5=rGb;QaIiOO%5-(dE*C+mur~v#Q`R#c+`GZKF;2*||jTbxm06cuBY$Q$F!$n}C z9viT|+ip5GSV*jKhnp(Z*yLMEGjDdevJ5 z1)Ue}P|_e`_T||H6Oj_OWG2EJnNT2*po!m+Gn0|gJKUFKVcIV{q*hK$gK!XYMWh(I zG!6~aGQvWz05LRKDr-Bh>eqT3#dVf zoO=MJ$H}=<9wBWNqWCjrtaXKtl5_9K)aIB{Z^T@Sd6_0h(0Tm4Jc9EuD0LQq7N6&7 z!J17uccKf#j@C+Btx*Cv;9L$P*fZ-&t|hWtD^cQ&KnrY#obp33r7YiicG zt)ZNA@jZ@8ubrClF%LB|jo(*U6Lt#Tb z`5}nD8c#pMcuX?n8i-o>iUS$a5)Wi3G}-1d$SXNj$Pl9B zO)2jlb!1W;drE!d*i*w7Z!ts)LWOE16D*MN38=yP@Q-0uoPP|@l#mLuf&BB(Mk9VP z6-6Qclr8g~KV!6~%(EHcL&z8mtY=7MJ-%UU_-FqfN_y;%LYU9yR3OF&Nq)_DX9%kn zxZm5Apw-x!iLClV(eP9eb~EY_$E}koJQRJeTkX=f;?`1`Fdc54k4X{SdX~qne+Snp z6#aKjnBbOF%{70dZV2ixAh+JCS-HDc)X%-5eB4@KUm&uhCASs;mHZWRmR*2?9$_gj-$pUhwpixcM@jB+u$9tYI7mH4i~}cj4blbFyqpuv@1= z19(&o*AoB{v(aFP)?S@cI@I7PMs_^UPQy@f`!-$fvdw53JTrnhz3th5Vn@gGdJU6m zUKh%Dczq^cfOi$;#__t)mgaR*t?+uYPQQvz@HR=6->=iZo%f_YKd;Mq|K9YgNHcEw zRZ$MF-vCS@ygri|d#XeYe}aA32tThckYtM2S=l&VmzH?EE_H8Pp?D?d5o7{he~0;E zDwf*C@wyk|=oI%T3Fz_qEAp|{@FV(gGjJa3!#{E^E6zWLYb2z?Y#{$UUT?-PregE> zr+EFb`!L!tF_Ex*yj~$Gv{i>wbCtA+vG!kKx{du_^ZQK1`1sx8yF4Gi^SymHs9o*} zh?|;E!Sv-Cj#3~%;`see3L(Ys&2M)?<$P!2JRx(a!|!im=>_~wd7@`$kKbS9iV*ny zFq-x8{JY2)YNFuxJ2W%qctu^~73Jgi68mxy2`%}(1pNLSCP%v%1;t>vS6~D6upeM_ zzApbR&!&HzS40bbSImwh;ppz6X!4WRSp70)oxrCR=xxQPvZDXpK;z-@^YCM=L;O6f zq8#3Q6SDI^1&tR<2YnkIf0lfCprG-w;HMVQn3A4^>f+*)Kt(dCB*(=Wd>s_#t3Yib zV5#5t0O`m2BQ}oto~B|Y?{7!Zah!JqSW$D{O)C>P@0#}*;c(t1m_5OHa$Hv;E&|6=yQCYN_>j~v&nhg2$B)`IHiwb@+~XeqI{eO{a=`~CFhC$e?M64EnwAHjUN~1 zFnSIlEQ&ai<*Ond^NJwnC9_vq35F_keI|Oc0#97VvwN}_uI%GceWm6dgl_y#(en4z zUN!u8=<$|nUIFMa-d=SnhNh*xYBVM3Wr_5fLIEuF`WCW}qt~nc;Er(pGM9)HGW_>D zetLZZb`Q|&FOa#ODLnh?owSL7UN&gDnM{P|(CZa6n&Rb7vZ*4bbG@SC==G-ppOVvS z1mtd?fP&)fRb`Aebop1ad{xAbMz=L>&}(HaE+ht{!{ndl*jE$D*1iD^P!v1j!T%h^ zK0%@Mtqc=#t0{2XOvzz?8zvsU6MddYu{hr>fnJ}aq`3mb=2Nlve;LGJcVC@8kZFJI>|`WL$V=ZUBky_#MT9=%$N@)kNe z%#@9S1kf!7_W6+OiX!JDBr>$X1?s~v?;S9QnZIf(oGwqlo!^UbF!?Ze)OvtCB420& zLS#QV%$yKK2Tqx%Fj9;4r~)6jKADnZOg#M+*k@v_#XB*M{u(KUq@(|a{Nv>18F#rO z_J=oIqEPgg`z9kC`nv@P9rQ;qDsg~Kv8*kML);A#jmKbPeyx@&yhZ3XD=Z$5h$ z{7(`b`aA+$0+^!+Fo?eTE(wsjA;q9aZqAyh7n7U-e|sPZ@UOo`PqrjLHDCMb?@3Am z`cnGjUx7UvCI|82ile_`B0HhK6Cw%p_uLG3-A7UZM92$4#n>=XL?0HkZ*iXvhkdh|znwy)OZ&LJHt z`YZM_kE1`XFJWnk>q?ay;hE3l&sWedQQgr5D}1}}?{n^-!G1`5b{!V`E9Z@L&M$$l zDzBb71;f+ygtuL~EYBW9*`g}>!~(YmR98PG0LEeUY5>`$nYIP!tb6fSx??+gc?|aQ zP`!xD5I5L`B2QXyvHic8RqejWH&&EajE5Pd%g&^{Bn#zyHL$klC{LMoCf z9qwg_#k6O12VBlCzb58I2Mj}E`lav1evmwlN%a5rgFs&TOkj<)RKS#VAD;YECa}Lw z<5`u{UHBa2|8%cevn7V(aH9M zJV9YM0278|pP|3d9~c?GAEc5|N%n(W%lO3oAfK>Wu*A}$YyT05$pLfYC>zF1Bk>kO zw?1Rx+yn`Kwty~2!as!x5|e3b`#~m4F|2SoLMWq2_Jd56X?3mL$Q3}AN63bGJ(Fgf z!CF9lbh}%LUS1{C6ruNnWCI1Xm};3145(!mgq?wclI;iC@EgP@?gv?pNK~=S{UA#) zhqzvL|EP8QCUHN=cFZ33T{7?Cegdd%KK$4MKE~|_*(ezM|Mr83D8=rU8ZBO@$BEa0 z6&(2%KFr_6evsEEtD+ZB)_dahyVws>jG~ox@d@zLcd1=`&-R0yHwk0V!iU-Q>+tIr zVTyoc`$2|+D_iUb`57aV?*}=auafKs>B{)T{UEoqf{y=kK7d@Dd`*X>(em}0S6vyR z5OjBeZjO8%0z(SqYis*Kj*(($`8Wb0SH5P^`~tZ;jz)Jp(}^S%_}nBobgeygid%(6 zueeoE(pC0@%o7yR0)RuZ9)(HXeh&80Wcxv)w;?`pKgdKxQnKz;Y7cx}VlDQAltE#1 zCF@YHO7Z^74`3Rx^J$w!v+jaAm}EVhSN;$`FGr3znV{3o#b_VKqJ|7>eTdWf?B{Pq z4nVSF%!rQnuVEtWu5`+#IN#3Znal)Yi3wm3y$%mP$LQykd~%Vzgq!Xj;}5mzu7*Km zT#YCH7^Y&*KZYw9z+c?PyooBtbEVK5@MOP)CwkstgCPtBiK0QPw8*6BbaHsX7k=6o zo~gs$lI}XhutqFIz7~JcH*Y4rRLN##pVYfa7d?(2q3DVF;}Yj%WHizjF7|~Bbl5Ap z0x>MwbW%yt7we*J)W%ozznB;`%9N+RQwkraI3FXtLp4D%ZT5xN>9ALHHgmxjEQEgw z#jpLfBXG+xp^L!9z`9O86-;>GeTe@q#eAFf#Yg4K17+c^od}vxS8RF9{E7DMHz+80 zzPoGRc}08;>=h7Fv3_y5w#0sok%N%h#Y^PeY7CD3FkkUF1qTPeBHKCb2;(Q}{UasZ zgNAkT&KNj{=j|6E2;$`Lbh5v~x3TA4;iu&98v^Pa`MU%vFXV3zrD^;3k2Iod?FZQd z0GuWE?-6q4?|oF1A)uORDaUg>e!r zpE9kiwGR?qPv5W1{dw{JMSUqFP;0sq!Gqa83o{GkGJ+J8nH(l|Cgt)ms3PSuT%~m+ zSaJbg@{jCUBOwvFi_*d>-N5T@9C6t zj&pwh-)}y&_kQ2?K5MP#`97ERtjo*G=$7ykwJg9V|M-1Y{U zt|s#29^e~nAx&+h>H)UuKD>;o2iS+Z7*i=nZ z8>xCa3!_C@`Z?Kb+{VM1mMjU?j(o2c`M%b)E|O!(Ar(B~?YUOn5>=aU_)`Q_YMmio zZ=ESRHlYtbqsy`=-h4fV!nokrvaAujNhLHVQBbrfiyZ4VPNFErnyfcj5b5{v`Wc=7 z30GYD-6rY76wp|G$OILXlRkp(j=**PvFmI#|M)!}D1)4)+EN~(%B+2Eg`E0MQ)P^- z$%0Z8XLpax2i2oV=@_@y%Rx~NM_#-9=$O6kE_?7IS53$4^$3Vdej=gjF?*fM>!0&_ z7i+J(TYG&mTW1Y{>9txcQ1?roB>*X=b_boTlXVy|;#kVY!0_Bsdl z`dgS}>5YoYK}Y*aZdiNW0j){0*WKkz)5LA&BTmPN(A(>SL$f0G8l5PWF06eg+s#XK z@M!Hd=M#kU32Sw~N@vU~^j$@;Aqj5mb7BP+cNZk4&#aM+%e1OB>}RI^)YA#&L~^a9 zI$ju5Bb#rD%TjxvrQ2C>5sx+cST8lu1>t3OD~mpX6)}7yNAKr^NA|IgV5f!dDbW|= z@x7&eP&HqwNRStlj5Iqk`c7lR{SKHDxtnE}+9&4exBP=r3winrCh|9sqj~yokuR!v zUiZInbO!0?Qu^<3q9TF)FP_ZMK!(DfUVNGsmPf(j{E~ROo>b&}2wd&~dAFuXdg2Em zwLtS%Y}=m$Q6%hNwbkQ%Sw4%yUbXl$C~nplHTpQre#f#$Q`lTE67xC#hn^-f&-$#f z)DCn)y(w)We)CUlVdOi#lDpo=evjBfT;UV+d|FCvVeOkJs+8=}b++n4K7^{(o8nYG%_86FmE83#`#n@$ge$DNfh(m|54_P-Jqr&;+~o@_j25lsWmLU^pLlpVKKaM* zOUzgOz98~_s{J0Su4b!dCHymknY~c8ahIcd{ju4jQEUU8BbC+c6x!aoX_I&QH!qIZ zwHsuRoIP^qozfph^G9_MyT|wI+N}BBwB`9Re~A6>EPYP0KkN>9$Tz%oSj-=0^7>p} zzsLGRcb-TaHWB`C1UM$Yn)FFz|NCDEqCEP;qhI!}$oRu?Z(6ISZ{OJit`z=ok5qp+ zmXynGU^O&Si}B6-L-l{KPdS_iz~?M9z~+x$;j}0&<%K5P|B`Ql@rIf|>Q9f(CYf_> zBj<@My%{#G2b@JYqJ8Zp`CvU^JG3Juf7Hx0Zkzd|CNRI=6Yd?F6we=B!McCttOWXS zK4utTlIgzp1yyrv!@;5rT@rm{QsCe zozMm1-(GtUvq+Kbnd?ddu;(b?0{Fcq=!MbGPr zU$v*7J@1W~v8ThZL0h9lwWm`MZ*Rk%KBU#qp5B9R-kuKNlo1ThawgsJ*^{B7$;V%A zZP72+`6iqanh>$4O*CdnXHV~kuaF~9MLK)B`5IhL&c_QbqH)`_r{7}q;q7VZNY9nH zJ>7$KkLD}{y0sI!hxd7I_xYlGk_|AfSjA*8z$k~<<{!V0&~G~AD2!!$2C)Fl5er~E z(uw8p+0lOf|L}a#i>@X<{wwQAmj2mP&G<7Cvi=e4Nv=fICS?7kyEULB*jJ4z+amOf+1I+T zH`c!1KEhiPV_%CkfOYwsE095feXVLe$q8Bw*OMH9Z{EH(VBQ4=s1H->_^hkhB)gtu zlLoJ4@@Qw@h$X{)BO>{mOj)48M3Rj~c+B-A+3@!AE~XXIttWY+2-lO>liZDqw6Z@l z$~C9prR3ZHhB2A9vX_TuMe;Y9%-_5KcOor0se$f2slzv)Z{PNMl6ma*|IrcbA923u zy2}ZUlnC~ZSWnW+R-F{V{t@d*s-bFQULVH@>yM0J|HsyotiKH6PmW;SJYzf`@Ec7) zy7eU0p_M7?N!GwJRk5CAA#bHwPcn(ulh>1+)QMVb^8w{B8ZrAi{z{@ZkNJQDhI-Rx z>}$abyx{EX7Njv@U#nVAGM{|Ve86;k^Y(QXlb$p@T}sV#5g-f7urt(x58--2Vr54EY_LqO)L;bh z8z=iuWK;-Y7h*a{p4ZUJtU8JeGHThB`~p@}@}F7SwDBV61qY#d@PdZffWKL{{+K;p z+)M;SXOhWUHW-4K3dx);@v<j=n`?KVGkH65X6t zwyG+d&{pd$Gx|TlF2MBN+Yucw_Ic%_usP9#Kt9$m+}Y=O@O2^Mql9V8u7Rzle;fsv z*o#?Gh>I0v;Yy~Q&MRNDEwo(bj(i3?rN!hg>PX9ECbQ{i)|EC*HC7AHkTa-dxc+Ge z-`skkx#|Qm`kR;1*|FE|3>4^RqG@+jPl_1DT!E_PS2U{pG>l4Oo9lk!7-ohUtCWAp zy94jal!K8w;){>wY-FmwI6DLHhnAl3N#)~lMT=~_%oLdrEpicyyy%L2YKx4D7CDhc z9_JOyLMzC#FI3iQEajwbId=kXReVxWZbjGd5idHp^$l#h?JCO4EtjCLyQ8n<;5A5} z>H1(jKRlZ6*JLWpP_A5s5XFSU_kkCGWH|gH4a(7Ng`2C+emij>t|&i6`LtmlLNLW^P)^5Q{W zswQB%eHbq|g=~RjEfg~3G$ZHP)@LFY(!R|~J%d4~2~>Z!s`fyr=%a^xMJAro{ljzB zG$d(|2C9cb3uQ6_lkzg7CLk?!c=1KXFsOGchEcW`g97o`{>{|&acb>B=`hQkBD+ZW zFzAPY;|6?FIO3LfN|eSQu=sS0R`YunpFW~bnj|e9kt!No3F8Ri0hWyL~rF z@Iq=2k>KsgBE|)^J{PP`K3+lnMEpFE0vbCWcHq~aa=GTkK!YCH5yR(NJNzk<8KCZl zSc=b!F7T3lYoLdX!RMYjx^Va$ibN6c$s==()r#P=H}0uDw|HTEPKC(gQj-e^9)Re3 z=!ON+385;3P(5!sOOCpjMUpT|Ixz9Q0C3q46_tOf22rV(N@rf)O8Zdv} zqu∈QLMMTH2yRogPa{PVT#cL2&aQdd$o3*E_RXkIyRHG99y>_7|EQ-MiXB8x)r5 ze4|4Xa95YM+(d_w9Gf8rsMc4?Yo5mX5_T$bMy;;V$&^Y}{**O3nbHQ1 zMZV%($!SJzvWU?sSk+JiQ?AjO5{SArI;g%>cH^_p+v1zS_~;3>)e^H>CuWC1WrUq~ z)YutIuE1w7^UT`CM?&qDmtdqh5$JrCa*Uf@2DNT+W}P03O7`mewKDXh9( zwm*5~d5)pALc2MF*>}zpWR7F#D#W}MPs(>NF`l8bbf8-)MV`hB_orv*)98DeMNgUX z3qUN4r`?f>(vPt}82ULesFJ=h5REZ(0-tSfi;qF^z)+l@;@7L;+)fi16f2f&P8J@rJSh5@CsVCf-PvSeBGWkWICE}BpqQ5+LqM7apO%VBx-UzgJYE2 zmhf1oeaAhn+~hT!qx>`+PQ`Qbgr;lY2no+StkUllsC4jDbc9e^d^($s^Gjf5l&(f^ zhNijmJg_afpss;4Vb@rFTbYyqdA4!&0$J#?o}r8uluB1VT-}zx4CMnuoxYp%1mgLQ z*T^TsvcIwqRpvVy$w}>aKM&DC(Zt#Yqya_f<3gH#ilE45GDV*7f@44NmM6)ysv_cIfI`Q0@ zZrNQ6N;oDCM|4|oZ92!xuM=7q1J}&Qlyw$dEz!P;f5<7C=CDrVU~6~Y3{*DW>!?J* zl_fu-kLkdbCHX+6e8c$vT+6yo=$OEY;{h5Au6G0380-GThhMPew}9mVT>9QSudkke zeOEE6wfYJZx)4mXdjn2~fhH>%Wn#YoHwh%ZXG3aQ&w{hLyJh#}iqAfvfdGJ{tShk$ zed%o9=0Ev1TWx(yM^CUDs;z6`MJh&1ImHD1%OMu$SvBb#sEocR9Yb^E8C7Ulfh29s zk@WzwJOb+F`Ie#g&|#q;UqXhKp*IDxj-hBlsZ8X<7uoW!pnQ~}P_3|D*W$4Xe)3J8 zJj+$eVO79Oe06NCk!`s84-ktcz5X@XkZ;n{K&LuYWeH(+SFn;FQ)xZ8;jucx;| z`SVad%H7t;F2>(4#o!!=pkbE^!bW!z8r}0gW;t{pE{BGUd&|}6nhDq2g5UqGaQ%Pv z{Jz0wg5#;-dajNqiz3OPVGv2``F$@y4a%fP5@6^qwz zgE6TpUZ0x}Kz2bR;^+5`6Bf;q^xbrMP*2sA~S-d{r^L&OD2)NfTvZ ze0dPyE^onQ#?SAo!OO!n=q{7>3{ebT2ZSmtUSsn|w69#A(3P&Kh%ps11mkg$fBep& zKmWLIZv;QF?2Ff6cZ+mBBxw`2+jouool#6_YA`ID<~1E(I=!im(M-r2>Q-KG=GEJJ{QhiFr7|-{7ZaV%~r+Y%~=^xtD0wOp^5>pVYmyi?SOaWakL^M(d?aafR6i6a%FcyVK zvOk}#Zwuds!VXu6m#g4M>%{xAJ1u0D$GkO?%TIUQc=z@=H%fJc>bUVdxJqselA$mR zmK}W<`eMGMt|xLuAQE9mM+QepTgT9)*8n^zP{O(+lk7N+(gQmNYvfLL92H1L*wKT} zX4=ACQP{=fAr%F102z&D=k-T^YE#DG_mGMrOEzj6t!jXr4IMN98PhY)lC?Uzb}V@W ztW|&{)PCygXm8S#LZ#I5GztQZ2uIpLnrS#P9JrEGl&%NJv6LV<(vPxN-0mAlMmTac zpWVXr0SaG?!e;(Zz0u+e^AmHyjokd^b29orr$+M3x*Vb6lns61Jj`re{=nHA?bo z=a*pk(yDezULCx#InfILVlpX#x6NFH9f6!zGH#*b(MW!yO!6U>^5dv*c*Umb)6C)~ z^5+(QFje~3q!C{m46I=a>^OQ95TkGHMgCoeVrb{^Z`HAGpme#XWJi0aQh7T-HYsC0 zT>l4PB4?9&hA{_S21yIT6C!*Ra<^l>>_8Nz@>OdQZsEhb+tSydv_J20Iqf4hz0TG5 zw3(N*nL%{kJyYBA;LF8%iD^AC9mJ(SG8)#3Nr~2pubJR^Nm0Iniv1T2{oOJkCU}sV z7?a%xLiVYN=5va_&|^BQfYKxu-tI#?Q=xQ|fl}`-JA?p$XotX7zApq|@;S(e8w%|-lN#@N%v ztfyP${-l|wb&#hO%@<_J-VnD^x7ip{Hs1L3~sx4I^x zfXSU^v1mg+Jkyr1kMdD>tNDV&hsf}opw?JWzGs7hSZl1pSb$I2fU#Wstr`W0b?w{c z3jN#Wa`TaC=!)y%?N$X$&6xMhb<}s=3?f=ktS1L`Q0)h@`|t|hdSmXGH(f>Fa?^^7 zG{ReM1}K5pTW-uH_a==Jm|hKR(HuM3$m5Jk{!_1-FJ*&%*W9-`7w6wR_r0nSWR*Ji z4HlbpK1K+TI)wTY#?0lj0VuZ)B|a~{h_`u?jf+G0yetENG$$Av7xrYhr1#G~F~qkz z@Azd?LPwS8^qZ+Ok;HlY>7XU!gT=cA7U=tzK$^IZGZt3EXX@4Kark8S15S z7jM+H^>;`8wof~SD!}#;!H8ACf6IDUFxnL(jf58iUj=R6vU@qYp346Y+Q%K@|Ia|a zZR2Bdyo6)4>e}xrFzfj?<8(oEQ{!axn zQ^FF`k^P`8@)|rPws&Fcx_ow{AW4x`u^=~_}*EM+USEf#!bAzkSVxujQlp`ZmefMy4Kdju`*aTe3x2dJa?rN!0y?SNirCSXX$Qhp9N4DFkBicAQ&{hc6g}TK43yR z7zdd{t^41Rdr4z2YJ|GovOCfj7jXqXX2fy!l$jBa>Uw!Y$BuKlI({4RcFEBcB!lbWaMGG9n{X}0!kM1 zMEy=%^1)Gf;b`rCv$n3ycyE`|l3fNCZcXf88e%UUPvaQ4z4J@36zo&H_`CMj<#xRA zQ>n{OAuhh?!u*oIAh}aNzxkKN^-Uh5q(%NcTZ?N-3+h+D(z>|SbEARaxpbxbVCZ;Gqr4$}c&%WZqGD)9h;PcQanhbidl9E@`MT#^4%vv1w=FEa;F@q)R7RqhBH+A+Qc(wyRHPkLLX;tspTI$;S_v{m@YxxS7QXwJZjO{L;XE$>A5d-HJ(l^b&n{_Vi-B-XtuT!;ZRXGzs12<&pE1E#yveW3xXJ#x3Z`!7gpS^ z`nTPwxBQY``S}Q*5$9?e*Q`UHd?>2xS6W2PJlti8Ut*TH$4D}76|7XnQ$goV{ZP@` zy!$se5SuBag0{RIa~b$=F$IDBBy?B(Ht%GX$bWoOY+ECS)B3G5QoL#iEj0Bt0jg4|; zBkGdDsHXfdHJrV$?iRZ|xQpZt#h7YYnVbkrmUp$NgGXk+Wp~)|@-4V%$4zZR^{}h` zZA(jKFe}NieHj3tM&~!SZQm4}v~Tm;C1%9!TVLvLI{UT`5dc+y_AP3h5Y84PexzyY zk0>GvyHzufs@5Z$SCBh%tN$4FAVX~4Xznw_1HqvWepNd&-`bfn*#{a}0ZWTuMdt{o z%a4fb+?w=rXm{R&0vWwL{7Q;H*qz>*DspybSMu~0%?nK*gk{1oE9(#kQ_$z?fU#0m z(2o;yM8Ry;`%1aW(|_>_Pd{Q-vSc`@JxMFiBdK@k<=NWEbJ3A35zI)v89OA;&Fhoo*&0qF zF3-a>Sp8c4h|2Q^ni6Hmg_nDN>yqu2$lgFQ6fwrQ%>jfYc^;`TKkSK)`6^{U;?>FX zS5=crdDzqcw3DYFljnTMC#^h>%!c6JLxbb;%;4ph=#qe?omsaw|FJ+mtvq#nyVk}Z z8#DtIjXM@YHFZ>5JFcDI5amWY>)Xzm(4iw@vQ%lB<)c)nEGp<5gq<3H;}_1E27w;I zD?NFTXdk#FE-NLP7%edE1r2551^ENrwZ^aV5PABYS6b zRip)nIHtUMiD!z6^mQbrf=C~OUv)}qM0z(pE!etLMO!Ji&|^3qy~xUWwf2ayWaB_S zBGN2579vWkqjv*r!g#-YlXa?D6zL7TT+7yd1?yIkuI=bsuOf~5&-D7~UTQ%ZBYlGDnF#3wdVziHL}}&ra?&NptTsc zHN()4Ngj;Sj{9y3MroLi>bI(H+vcl!oW$#y?pL0s4(xn64RKWlxGhC0qt~juClfx{ zhrCkOf|C`mn41rnn`WTKn+ti<4b(FF-_70w01q7K*tg~)3Ooc&e@5=j1Hf?jW~+nR zB|=~d;s$+pjg`9&;g}y((8+oD2Dnu`5FNtZ&sYO`|FsIRQtsez(V6ngRWU2&mjhh0 zT7+g9*j%oKrLh{GIBXOK^Qk) z@pNik$HS*D^gL99ayF8ENq9QcYOKf8O^94l@U*LS44JYOZZHK;r!d-q^d3=MRmw4p zR2`mnw;+3df7h&v3w*O;c>3lbmPmuAJ1;g5#PO63y^(j(vN)c0=jBmsFj`cNEmAx^ z5-Lr?lkR_p4dM~z%xpz71|_@r9Kd>NK^#FQ2hh!C4e}EyIe=i2_D5i3mg&9&E5M0b z%!rXkS#4+Lb%Z6ThEJXHf&pur$jfazFt4!X%skxKdL}+~{tKVAwP+$QAK!Hv4UcK^ z0-p^mnJoL6X+JeLfcwq(TaHsO$MX(1|2y&yt@i}4ZQkLa{E`c?I_=|P&R_=3>%LbNdsvZVy3JmHL%viF;pGY1&NA5(4X|m7XG0xGQw)#T&p6)a0q>icA)7_U zM)1B1jYm4X->K0;9;C2TQxBCkwThgV;=PD2_*$3%gZB>*KmNCtrN5_r`aR3iU(_f6 zQ_Ip1*aHCZ3E~#Vo&Od<-v*#pAtaq&L!?IcBh{hFv*u=h<2N+do)Q5Lmf{N zcL22LE8NjA$Rk5SE)YGJ=&i9A8wI%zY@(&nAm=2Y2`Pn_Qi}y1#8$0!roqFs>T1B!&$8G-+L1y%S9ZiseI%v!&e2cB1 zItgy5{AzN@^mnh&Uz`lXr9zfFrE=Nc=o0f&WYM{Z#e%Y`U9K{I9$-{9UHse@>Yt>& zCNnX`jt0`^4J+jYx^5yfjDOzPf$`6G3cJYo2a-%0|G+jn*qQN925aQ_vhr*~B80!K zG>7PNOp7rRP8t8;8ku&0f)C>#j#VA#9%EFC_`0F5;FeGUF*m)~jej73ZP)fw{Eia&!BL%7acW!d8Om+*dkS)$m3Vay9^~_`gp|H?bpX$zfRJ}LfH7YJ{}J{?Zl`0xcN`6 z<5!;Ld8@;R!!?QO#Fq!TC+lM)=5ryu1Y3>PA4;$iD->Q0E74)Tg0I{73S#=WR5dn5 zcr^bfz;ntRv^lPib$R&>x*XL0C*>Id`1epjTpt&3zInOkdyBci?M^Om`-FL4%+zot ze&1C~jB&-f!Wt&^o0pi`E-_Enpy5SP$S6>riRl^rmuX3zST-}wc4B!t;csNFI-ElX z!MW*E%uuL+GLOtnW7AduQ7qOtkz9+HoG;*+1gYLd%io@~Vmdt{L zD@Lp4Su^qiOt-_~dBjeSUA2>t;-{@WOcaL;H3#Bw_zp(FjaJ*uH`6gcQ!xoqfxN)v z1i-LGK~X7>ljq=J5utdyDtV>s>Feka>WDzGNX}DBlLEye1mq)uV>tuW_+yPgyHp19 zasyj?A0sJrVy{qvLh*Q)-h@5T^)~vxv(38JQZv}VqgfQucU4`TDn_2{gDDVCwFvr3 zGx~pmEA8%C?WVQynT;#ljjH29d^X|ljC;PtzcL>^E*Qw=PKkAZBhyl+!X_jgY}zh? zss0tbnE_o=qt7s=06Sg#fXW26LJ?+ufJO%Wv#SNma8n{8!)z;5(=5@O`fF3`w@hBIq)b4Qt zUyA4ks5$7aT!hvX|2=W1HrFbdV&O9X>l9(fPA_et8It+-Z$LU)tvs+ z#0C(fl)^QZe@YJq*u6zR3B2z?_-V!1!2iUVMPz*1F!^myLPRmU+*xl8tSllhji;5enW;KzTUyCNYdpF53zxLr?HQ^ z9tmT5-G)#q*U&M;?%^Qe;tu5Ow>A9WIxp7^;B~k@aNn_-BE>R5%{Uwbw+HLyN!?G8 zC#2#HvYz=VlDme1I6gOX*yRkM_=Fx#+(YueYT)K?s}zAQ_f7SndW zWBGPbpz>6pV)V!!UiAnVk*Y+%sYp%YAJol|8DjY>evtDYYJ8nXeZMl01!KbDRWI}5 zHn#lJC?Ax^jn}Z1PZ!Pu5#~HFx=JJ$$;VtUk>`O;!?@A%b&Y(=pvW+Fj9%y2%TOHU z>{5-GhGi3_w1_m3w1i{%yYDp8jvqMG913Zr`_yR;J{GDyG%4($+8b`M;yLM=Y1&dm z4qs;N!cRMSD$P6<3qs{^k>mhV=|Ole5G;p+x<4LlA9`SLxEmm78@KsX*Enc~;V@cI zDsB1jw@kyK{HZ7(INW6r5>}tUG;M-u(tw1}l-tN6$FH*q@{;OPm>>lO<1dXDLj+b* zolYQk0`J1(;47yN*O+(<#r0ag_huSh;n*iGWycGBtZQ=nv5=p%te;-ogl2yVt&xr! zvd}r!UYNtN<-mQLXa?GaEw#`)BqiVCGaFTnY;5wSYAE6#PiRsLPsqe3XBZ3c2Dp@F z*vixRdG5=*c z0YWVTp&0)gs|89M8xDZ#$l<8M^M615ApdW&bCUy`Rtscf{4e0ME18l&@iS07iT^Y$ zLc0PZts8W(UIjC(PzM2`v02zGR&j@I0k%lY;@?i$b!7p$!=7s9wp5DLLe|{rS)pbd9!-VFPYhXytM5)-!=1&3X_MNDgP zm$yb-?{hRev;xNjApW5n1;h4G;Lem6)F4dG?!#&z)_birIV4cO+Ecei?R2u+ism|Yg8BCNmsIR;p`)Z$IBOd0RhPEaBA#vM}eM3#Kb5BMEnv2%C^V1^dM{YnX z)L*zB&}Be~X{CAx;|qh|H8K$1DZixV@guxQm+gQ^Pf${B4{sXL$~(Urxh_WwDOU=R za%Ff9d9vYgd5!@Pti9j;WM}WKGn%D|X6KB4gcquK&d75GJrd-3x+=m<`9uwjeT6XO z`IgnEa|889Pjb|w@*IH%$p8o@)lb>CmwXOL$X`)kdA5}&pP@Y86!Yg6;$r@dTvL{&Q|sq}J9 zED>36gi2vjs1zoJN@-GAIp%_J*#R$HoD?xr<^v5DQsFtHQ5A+$m8&_nqiv5!9a1!U08;!xm0CWwz zT?xaYmKFwB-siZ9*H_BhI)rjTpSPmCRj|&;hJ8tn{wSOAxXu;4Q+dCyP9ZrNfw9FW z$gvwJBkoij*q9s)I~6Brz8VtJ4KBCB4OR>uBsTNveQd71nkA9UMzNb;90U3n)kl>MCz&L;-!2UUZdU??On$wzhMi4{L8C+fW4)^=mghRsz*#72b08j`$(p0ia%ef@2Vk$Y<&FQ z4Yty9xutU7jxyV8z`Gse_s^C28latucEwU+89Ha+#_xxsSs1?`sK!l4zB6RARp!Bg z%F^RK6$aS;@ad1$D>>GgG#kN^D-ECuQi|}?kKf;7EDEOWL1fzeS0D?fh4Fg@Fd+BZ z^2;FRcsv%*fAw2cwC&{Aanx1=+K)k)lQllQb$-QPx!&bp%Wth(4XWG7Z`FeiR+-=W z2Rt11vlb;Kw+7I}N0VE-{75Y8VG~`CBi15Yd2YzFmP(Ub?ck`vOrP9(g>rPstsHuB zNMV4*V^%P?Go-pT8bg2ZbuDk`>au#H$*n9|qH3D%oQdP1bTS*wFF!(^xei)NWq)3N zU3*m~yPdROcwgq8iuYlUAOVh2at>O2?;FmS}7a9 z$4?Ax`cS#xfd}A|(b6FPb}RIw8L!c6eUEqZ4YDC{o=yzF>L_IeXk^q_CKcZ~kkz2Dgkw z9C)-hv?kq}qf;8rT;Bpm$7%73H|1IcbDm*mTdn?@mdH>#Do`>eTi^VbEA@et^eNXK zK(A!l8p;c8B)C;WV7&i>;4-A8fo~+3Hp;f5RGN^$d|Un~ly~c&wo{*r8Als^ZcA1M z{LP_yZ$JK;%4e%Q{!T$_W7cwCopo{gTp#UKmJXMF)QDINuD8ncP@uHvC`ZX+(6V%% zs(m`0J`4bscM!SiT82|BOUGzbi}hTS&HO*ToWbEv zWGVSb2rj~V(a1$bEUaP`*08Y*tzf}Cz`E&gM|xQsKxSZO1v+Y{)V_lIz?S>bvP2Dr z<=rH!I2Q&wLjoNGM}+Xy(m~)*dN{5CmZh)Kw^Md_KNr0fEaDu3%rx+_Kp32R$%FAl zK7OVx{}hGnC(wd_iR5!QNK4B9e@n;dLf^MxxGhu1@T(t%G@4D`j@L+NyJr9z%gJz2 zR`^4XaJaG-@*ADka0tB%kv4ezj1;R;v02a#jCsL|%5qPqiKk=nsXQ)H`;yK^7RlWJ zw;ZhW%TKrH+(TphGU)=;*#@r9b`4~3t2{2^NTU@WUq(-ehMc4@=lF>HU7j|-!tt3M z|K-Aoo8*ezNs~lAbNqLnMk_kxZms-xqrJpu)dhyf;**};jIqs1hcsjC z#xfJ{|Ff}r1MN;^r1$b^ifNm)+#MYPzn_#EYJ@DmS6U_P9q3FuoNUqQ<_M;XWU`tJ z7ZtnQB_Rgm(pD(F{(=PToq-E6u<=Gr^K28(_n{p_rsfxh+F6z4%1tgl<_$X>cLWqQh8S zm`fR?b1N_tn#-REXI1eEp2*Pg^UE|)&e=bxI$9}5dRpBAE%J4GvF0ywrL&rolyrWs zOo9%`#i+v@injEQ0Od0}WMTsw2ck&kYyJWdES27T_CQGxj?{(Nlc(lz`DTDn>VSIrMMgd8Z)Fpm8@O>HVWQmlu6~>w z350Shr7@rW&=x)tg@b;k%r_fTsPoN*2qb|ktlHqL^G%H5fTSFRLei2*!jDY29%-UK z+JkZM=(^qvvqpU1jZFNC4I5_Wn|;P{WK|i*Ml#_W?==oYS7PzeJCX84bE_`51}ftZaa5x5d3PBLmZbAN-K7?=FVBFwkIx^Ye*>FWf&bR?+!e^e z5^9D3Ehv@Q0E%?B#9SPpB%5JYbhd4!d<@IYqY1MT z-<$a9HS{n!WTLy_Xq2hl5%`3=A>C+77w`Z2+FSDp2Rk=m(^#D_*`y5CIkKnWWh#cs z6Ka61PE+5>iM;}q4xUPs!Fc}{*qEA4WN7s~R6ue;-M>$*F&!GnmH?vG)zk}Q9fQ>@ zcICs>ZTZVkJ}`Jt@b7j!>GQiu{KX)>H4r@vcE>XKW}0xuqb7HoC{O5k{iWrLs!HILu`QHlK{Z!R2p{wZ0ZID9~j9fbNdCNG3L$(F64S! z{CyPfGDz_nXJc!eeykA9OT4EjZJC%w$3}qeI81N&1g%QX$?l)K27kPt=Yn$bLCmKG zPL79rtC(csA$gg>m*eMys)&gHJ*94eQjDLS)n=u{uZ1!RIG2l2L;1zl8nmT11wWtB zY?sFWfoP1Mz4`2cw)ib5uItyZ;Zl=(v#i~$%jOn=d5E^~FV0Psi-7~XB#mZ(1*q~+ zZ)rh@;CB|Iwx=0F#Mj`+_l*0^Ot9DcQnNg*{!-jBsf|ASl7l+FG`W{sG3lo)Rr`7k zyjaBq4#O@(3?7^RB8t%ag)}dQR^330JS{Db%zvq+aaPt%;@J7fBA{G0A@FpY33dL9 zfhg#=A`A`vMf>=+!qz?)T2m?qlf-!YNG2X({`8;ocO5!W1%GvZ%klSgny@7Pp3R1( z=kL2qe4DdEn+<=@!6aeeZ%s7W@OKl%6`5cB+^XNLf!6rFegBjB`-&QhD)?IssF$Je zk5T@X&|iYT-{?3z51zYeaiE{f-*1ZDr%SVuZS z9Ij+9BrDvjD9oX%Iw@jZ&|@^9YK+$H#V3Lbx>RQsoeTN`zNg|XSIR!D{ubbaLR!T!Kj(c6lJ`v^t9}Xf&T~ZHo^> z@!*1D<5PE9ypHcPkt{JPF6A&a4t(5a{_97qF>>w`&@}+IAwGP)ns!p#HGR)G!>-bq zPRFp95d~L_m605yf?+vyEBC43aQ>60aZR8RVOWk_t`;JNVL37maFp{vyZn61urnEM zgJDk*9on#lfv95`sxOtEe0FbJ{00;c3>!o|ZToyYE<4h5V$302b#bbiTFERu^9Nts zXE(sV*tLXzJ8OW{O|4~dcD_Hq;bf{KdJ0{>l58nL>PL5DjnUC<@(Ca+o*cL11-~$C zYQ6>#9VRzJ<28LkP6t=r=9 z2kS@cPOMUT?JFs}(;;r0!$^kMomll??%hr-Jz%Or6`5YHT}B*thhj1_IN`?dyX9RJ z8`CSMQtXG)6Qh#bRr1(xrN%TY^7=tls8SNXhWUH=8Vo9tjj+q~rm3zc0n^^A3s}n= zXn6Sw-O=f)D3vXXapy^TFtpd*j3qrVHn{=jZrY>Q2T*k!Hz1Znw} zAXfLAvMeYeNxkh?3C?r+3ZG#j@j$2M-|6P%a6sNmFNZxx)+K;$MZ5_Y-mS&wnOV#@h9JtI5_=yJuN=xrNd`_!mR!_8yFO! z)AJG^8ickknVj7K##Q(70j5Z};6_yz^cU((I;382$13qX%)m1Y_JNZx>UMhyL5`NQxY5+S{#!E!b2MBV>4_|K zF~NQ9bIIWV*`>Gq%aDC>WD;c8EJ}v#JQ%$gWQVHe5KxFwz(U|z0f{mGDhm3P|Oc05Dq zDr6gndg72xiibu)xOP1n(dJXr@(BYIc%e)VYDUx40~YY-5NU0_F6}klFiex9G%q7a zDqV84#RqIv9^=|!waL)P7-xT-&xKLYF-~8I2J6%AV2p^m3#s;(TeI-XJWu76K!r@S z>(dJ5M72H1p*IsB1LlKEhl9FbpSB-eA4GQ(qcYRDt-JWfxdakgP%7u};T4QZQ2un3 z58jFM1w7vx3q{D# zZU=#^7R9FXVJaOIr@Z;6#j|i>jQMR!7U^ZiZmqMs%0j3Pvj}kc_Pr}?fK7l!6K09; zWxb9)kqo0g=jc(_ZoVepokRZy>GiYc(3^7Z)Ttr7Wi~6=1Ew_iDML&s1<;GO{eIht z1LjIuGsh?V&a0J_klcugR6xciB%2@~Q}Gg?IfjjcT8>3u38~#Eb6kHm&{|p3(Xu(o zu=V!~C~s;S*ie%bfNhxrPgQ=J3BvLI!AW#nfXd#0n9WU&4CDc=*w)|Gz_kppk1Pku zT3)R0_L*;Nyq?9cFbhQt zL~|KyWaTxQ%Cyz$>9~VY%q$u|gc%F)$)WWU{Y<-Wk*f32qLBJ1RfgDR?z0p?d;%zkytAgNp-w2;@2UJfo3!01D}79~rBnM81Zj;N^`z z*Aq(YJHa3uY=c59!ej(oN}tI(0)z+AWDA#LVMkQP;8OXXp_P%)8u`-x$fCH$GITR* zG@s|!xP)5bKoCgi9Pot_nu>ZTAxer8GLjl&B{eSH4pXxZ#(cHP zNiFc7k+rHAESE9NvmB*-S}^Jc{}#UQ$Q# z>X^GfbpH=M5$Lx9OpLxP-t3wQiNwWo2x&~PPwa^&R9{logN#RgN$U?gb2J1gsA`-{ zDHzpgr~+|XVb&kkwk#h3GGsZ!I5SKM+X0Xc>;0hmLK93=2-n{79kSy>t|bL6XkPY zLM_VQwO#ZLpFS-vxTz;hP5KQfu-@ zkg?l??$4%p0G+bY;nJ?Iy;0}F;dB+<_0bQYyRI5Di|$QUTOSQnmi+3d#96#RO-$GRK2sbAq6uh~KLz7Koe_6gxoT(w=5~G!rf7zIp<0K0oTo8y zgQsDV7`kFJOI`;LQZg7YXtFE&Hxh2u&+R-x9}DJG5?9)=zJaKls>q_h`vAa~uD1A8 zg`uDCs%k#x0Qg7^Ol_)S^xLfEpUvkyI@z`5m-W7bng@DDXXhQ0K8OFoe9j^@6?dBX zoVQhN%`}v$(=;*+tIYfpssZ${8n3O{%jpDPd zZ1JHe9smUYqbc(==Tp$2!9XGqkh3UUIv?0W68w}7#yI{l&_>{FwgzS3YR$PwzqCy( znE+F=bz%+bC=l32+ZbO2OCvc1_qVxp?%+w@0{v}Wl99YhXYt*D{%XYM6+;KfW!8}7 z(mtMU4bvwRJ(+_%nW*yS%HC>1Q|)7}T!-FE4UjKyZtdfG4NdX>cL2}Y$K}7c*3s#Q z?TZch@Jw62KFSB)d;4hS)387;`o7jh8nhumGr_fc+KTm>TVp|F+pQo+np5$rW>1kQwqzFw=d7P7oUM8)3l1~gcc?BNfN-{Q#n^Fhbrl2U zNSH(~lwGZ*c*fIM`J{Q!kWh8nTv2J*a|d*sFnP zOeh|%eY@+`#W8@QG|@#2PIn%JtHqe+KH6I}wmWs^dTUH}_B8T6ji`9KORieIq`^O^ z$P)+%+G$zLSKNOFOCl~VB4u7&9O=o|c zKe-mR*ZPxA{K^47P9^g0<5Svn@OuClNwuKO+79>8RF;incQnR6V=WoYLAi~wch=Pp z&V6K~N4oXz2|2);QuQFeF%|@49t)H{t8|nsyY1G$kHDGKR4`Xx9|cX4_t6R6`gfxB zBCoMmAiXQFZX5YjAPuIEB9G9DQdt3pNncz3Gsp^q0h5nVeb9k@RsO^HarzJG{U({K ze8;A$=~4;jq%M7>e5)>A`EJ)?|CvEJnBK=Soy;fG4fDrX-#t!k6hO{uq9AW9SH0n| zRJtY(^0#$mf&=+zRHUqbSe~}lQd!^E+JFN*ohv;Z%lVedA?cu&Dh24d@+vwkZ7qFo zi<;^1qD%%s<_7GC1;XHa*f@9-pWoFMzY4_%h4Ex^erpyO;0Ba#KU=9hz>v>EU3&9} ziJOY`dV$SmC3NNcT3TUnoUGh|p=488|C&co$JGv;7UsQl?SzB*X?9pp@r0acjb0Ir z%Dz_rUkQ|cT;(W5VO}KPgD2@=UL?8D5&0aQh{1dgdj*&u2KZW-zY$0~n4=Y?vK5k+ zr)>EjDK_w1k7n$QTon&7;9wHt93)5N_17IU`Dixdp~O;qiXhIn)KOw5qzp0U(U*Wl z2R?SQ^}A!)B+YwdX_F#(k6U-QLvS4#*0jB=7tUtRUYaxeqlD z82_jRNgW+#No{ufITJ`v_1mrf-jqDXjJHBPH^E1H;%}=rXM2a}JzjZn7 z!`j4LbFvi;G{Q=%vxfJJq$`AO%m9tTQutrIM#ZF;j-%)CLr1=SEA?G!;p~K_a^pX| zkU#w1;m0EJd|i;?koX2DtJaT@QEGc&mdMc(t8Iq|N?kprC=#=zNDV`3$etx{p+|DQ zt)aYyHMqI#3^+6d!n8;{I*^WWv^+KeGL z`n}*4PibFIDaz7Z*&Rqqm!Qj)tATLYg2t~>sN{pc9htf9ouE13X@tW)@OSG5(qhM4*@II)Te_Pm3N ziB!-T^lM<#A&RJ(@|KPg!X(1uR@aXYmE0OCF_Q@4yu+^GWa>V7o5G%iF382Gue>o~ z?=#`0@(k~sYFqLcgB;VHBSK9OSkifiarrQkCA+ZN5etvF`dxRhaMgl}DGUE{fx=s` zbr4gB^Ng5sjF>vwlPbGnI*un*cGL6#7A5EbmHCLd^;?m2fQx~gbTJ859#KkIW`JPb zNNEVUWWwr*l+L1*R=?!Cb?%BJDP5-vLy)IYDK&=stC%pTVk+fxBz+;|^EZX(Oxce% z5UkU|F*Il$xrO@=2J#EP^W-D);R7;@DB>z|9T%DU95p!zN+@q3$~D;pUrDL##5=<@ z6fTpWDJLVJdqPdA@-Z%vPSWnb3v)WUpR_yNrV$q6!z$%f3zih}da<3C7mq94Zh5_k z$J5FSCyQ1n%Z8b;5Mp;ih+*V%d_stV4`=VpiK54h_NJudTW3nFy<3Jw9k6KD-gSp3 zb|+AFrdNi9CaWZ8N_&Kaj_0(2O|0RWH_B7r(^I!LP&G^?SW7KsvadFHkV{^MoPLJS z@+VLlnBe?7-dRd-hL$kCvL>)W|07%s zlI0c9RW_`MT>jANQI|mF<*$9;jh@)4py$A=be^V2enp$*F;Mr(^84t`z|t}Z*|M~I zAPbg;si4VxxPvYK50np~koudTFOl;;=yoi>3%S=vvjng{vIaqj8Xj`}J(^wg6h>;9 zjKhxrc~~cxATTEC7chW)S3W0Y2&{u~s5kS`OjgJ)_SaX-cvi)Q?n>n{&$zdp_l3S-(7BpipV~w#qSl~MT6@yxf0N}{9n7= zHIk4Ho3T&j!=KWZ~Qw@Cu%G2&7am6}($*zpsDzzJwISG95Es&t`6r2}Ag zv$+)t;zwFJ!1QznPbmbd6Tv?fp9DX}%@pSXUQbQWG?EubvY&ZSsHU~GpOLgd zb*!#YB~UN28jhl3sH~tuIw8!q2H>hd<%KeGE3zWM3gKzBM$S{X<-9!M<^ z_q(}&rE7s8&e!xBTlUSDu4Pn)aGuBse0Gj4ei(`yd++ccUmruq7{|Z!*tMk%7;XeX z4S*mhh4W!SDI3*;Y?N?%po=n{J+hnYm&zkl;@_hUPd?)TxA6;yV>_n15o>3HHk2X0 zhBhp*3aTb)lC=dF1}Z~5mAFY7q*g1PHmm>$q%Wv@Z7AkI0794t$yjZ;JdllP!)QL- z+Lj-N@~}H`_0VRMW}@+5T!|&F_)OE+c%WyMCS|k%O=} zC`hCoyfY-y#@bJn$kEm?yf@raspYA}C9*}8FP%i%L!+b;zQ;@CYdRuGdB=+aS%!jc2ZYUolLi^k@QoOmCE`e`3%yRV$@RZGRfC6d$}m2BvMv7* z$_LhGa6GR5f&O`Uqt%*LwsVq@g;_6V#K84pbe}pJ%*E^r+yxvpmz~Q5x?S{`K0_XW zUA3;FFJmIVdZuMUj7aVnhRYDi2rCkb#)YW#()8ZjJTd!M5HKF%vL%#E@DTT5y^sn< zJw!`5X-uY^V$G%sZOuXB*_4yc?)yAdBE1j+# zLe3(2kAV?yXSv%A+?O1Fkuz+$T(pkma^3BG_oa|u%V-y?gNjb5}dCKdd2D9 zA2pc<@2LQ4tvuiDBskZ$NzeRclP;>b#jP9v*UtU@d2nbK-lyFP@6(>j_h~n<@6*mU z?Lz+}@3VRr<`n_xaLTCLNRng(x+Ks2ou!<83rP~axeg7KhuMfP zlQ!Xf!8^CnouMW3H4&yfDGD_~!=Ci>WAXgeEHabE7()v;YmcuXzqlDu3KAhtpK1~z zSv!_r91feElvk7r0=BOn^dCu9VXN*;$)!3Pkv_A~Gs+x4aH=kNaw5GKs>PIIZncVc zD2l&Q)xdxcRU|W|i<;b!Pi$jFwCo9A!{MQZh$#9{6m``Cr;0*v-T>SQ^C8B(S%bhv zoX7ElXDPMsxHis9Kr=VF=kODx?*aNK!{A?>4Hd_ z)CR_q7h&P@Rlv3U2Jhic7Sa6|6?7+HKr3QWi7x(#{nys%GAtNzJbgHyo?;7Azl_~U zI=_5cpKV+EyBDL$jkaphVis{g&_;*ek1d6k2104Q+{_LtVXuMpJ)ChMd{8n zFTzcVV!Xva@cB!1j z%U^4+5dWff+nKrZU1v}#oM*m(_}4-BazHK(~1iwf8hnn87`Y3}9{e!`BDQH*^C>BglQU^#dO>>2$;? z$mna$Kgiz)GCag$gfjUW)=(qGO8Ha`Bho!4k{F`)i344AQwjY!%)V;WyzYhpV{NdF z0b{0qj?3nrf4<ZG@FoobM@zA1uEC&Qk63-wB9M=o&%%cp#EwXCQcF zEHxHvAsAnP9pLrTBh}sgL9{w!qdT~Vq*!g1wqcoXL*jkZ?~A+#WJ@PD#%%*O)s@PN zymO{)(^JH$?cy=9I)TxLqR>QsLKpMsc?uuc{py{v7XYHoz|6J@?b8VK39r8eh^vlJ z16BdPEdEN0Os`RsTl0{n2A>E#p@?`5tVI=phOa|Bs>~W7TpH#s5wO^oY9QH}=}>d< z2XV?W_`H*!L}ZctLTfqjf$R~y0%Z2lPi$>B7NAy%g(dc{fbkZVxhex1U&Y~0YH?4^ zHotz-sArg>kRwZj!jTq*V-$rP#t?=?Wez@pN-{En%F{>%z-USnL{t_KCi@SfN#Sa~ z+vn1vgw=fCG3`d{sH^#|gs&^V4M^nquy%in@qcCV9K=srEohSg#i{Hupm-4A!0T^3 z4d0F!)3neZ1v-i#fmgP$X*yX?2n9WpCDh+t2l!FGS#6f6>>B}pj z9w5)|Z(w|&@5|Hp0Qg%Aw2enw5r)dU-~$Der2?5QCy-kl_h7vOx%RF`Z~xI(NhYMK zu_}bgcG|SQeZl1A52A${6}&r}k_s`))mZ99dtzdi=i#dGg2NFE3!kKYLobUco;y`* zD`g)~=+Z#Q#4KlMVYxtUNzw@cpn?}nz6id_nW&=tR1?!nQ@Tb-V<4j=ZD0LB7Ew!> zP`QE6*0RN~K=ClPqP;_W{UO}3o;YWg#TpHIjKLa3#U%ZG`okq-D5-sE5dXY2z8CHJ9ug06Ml%L60 zaFH=R!dQKt=CKGiqO?L9;F zIRBT=&a}nrqj;3Pm~Sxr{g0pjdl4I&p1BkIyFSzleNg7s)rD~`MB5pi;Q7DS5Giw) zsn+5AUr%H5yPgj*=8gj&Qepsd{ZBx?ya4~@nfoN28JPQnj)^F-et~F=x$}Vwx!x9k zAI1NG^MC(DRY}ju?)@AmKgK$2bV0|>e4F|fUBc!#IUX)4od2s<$Aa}i)kB>B>nU{$ zlw$nstOg?uSSJDJaxrRfUP{`k2gLcmI$k3`8wa8>e)i_G2ioGdpt!N0X1#=2KMgmf zuaEWH^B7WLxdf(q7vLMtOlX}b&^LG^9>q{|AK8uAfF5Bui#*44d5U%nzd4AJ8{rao zGRbm0%jx1J_%!aUM5!>U)x!CShS^B@-9=$n;oSboWLfb^jFjf}r9a`Kh2!6$mkN_* z_hUJnx;mXK+Xo@Lt|fk$Pt~#muOeEMa;wdKJ*kSbdXG?l|32yZIO z!KV^BOk5E-6dr}o6v1+QLdDopw@AzgFQboR%*~S=$I0J;`_{x-y&${}aIrynZ5hr* z#x6%&gYjN(M{O5R&B`wvSn?YdsOpEQ7?NGSp;<&%4v-s>8~V*LYQY#KQwR*Ya>Wfx$&j^(IdrB69maBKFkx5>OORjzl_{xp8 z2EPiW6WuceHVy){jm!(AV_bch1RC1%{ZXFr-uCiy5yL6N&(Vy|BK*|R({}T-ku(Q@ zZBw6mJBgo*vBEF#vp0Mt#veTysL>*kmzOb|Mu&gu<*79DRHEaLT**=Mk&c)50^V{c zsQdB9K6J|<+YP9*jobW|YaG>B#~)}xskG(8-_k>%{HcVwN2fVIKja^1Woe2=0Aonj zq?>}p<<5H0VK+S{TDPy`-5Jf~&l+RZtX`j~iS|e=84fJ-DdL@{+7vM=0n^NMgG~{; zodzaFjFQL`nPMhOtXbjwfi<$3Cf35R{oUn5_1#sbiMKLw(03Yp6`9*TmmagTYLyO) zroZV8h8Y&+VhtihXHCybAi~^6f3&(3cZW91zT>% zbz?hPg<6bR()`9$kd$U>ypl&wHf>u1^vRK^&#Wf$m6XcOyi>=vq%&J$YWiiI?+ltal{wL^C8Z65@e$^4{cr&t(ojG%Z@ckW{S^KDdm?LuvIA+~M00!szKU$a(O zR-=jvtbe98bQWp0H3e4%Dlfe5sKhPO(`vLFf5ZCcT=^LymWM#yTlTxz8}N52MC#T* z2eL7X^fn*9z?Odr<>UO_7CwK2Q?U5_2tyEWpS z&ny}>-nbHu5&p5G8WyYVE$EKX@rA{nN?lJSj>b&DCS86mSGoYovJ?ME z%B+9<@*l2oasPNUAD(8**F*UzKjZ5^`mR!)Ol)hh-yPk_PVL}kKeDiFMAp(; zfzi2oMZQA4-2JSJ-EinG*5CE`m)GCb`3s0{V?D^JG`O+#cailV^Vz_@)7ZzPB_Ypi zcp6y}l6|Y^>pibJzFMJgz-mf0K`Qj?;TtPPhb19%AZWiNq@4~M%H(;5Oc2*;Fqh;` z5mhbAT8Qzt__{W~;_9-{&?O<5FNQXzgGP~@19Zv?G{0O2K!!Hnf6dGDwO3_w6l%91 z86PTu(mgwf_TJhfyc`p2qU3uvvC3sojb%5q*5ur}ffhRDvn+KDUw8>anhQ|52H&sC zS+rzk4xpjxb)`(Z*-_aoP@#3PYoKbV`HAOOFxa{r%#{rYcy%aa--{_L83BX4XAtC? z4*Y10Z<=OU61oPeKA)aw3-5)(vHY~{|0y&VRrbG;?B-kEEmWfUx{jYHE?oan4_@6n zPTHzT*BOxG6$X_ugF2)QtrKXN-nZbGvKFM5oUOyP181o*Rt_{&I|TVtkF~kG1sV|!)SwoDCIcTX&*4H7${*m94e(bH4wTbq3s#A4SL^-ur~4^ov!zX^Vuo3@O~(qB7fWe zjT+o;qyN6`-9km$|IB#*wVmugt*-6A8YJ}pM%UOnfkw3dYGYEQez2# z`cG(^{*OxAe?B|K7TynqL;qbov55&geNUCzTiWSN+Uc$q?M;ugS?mtUuLVmgwST#-{v zCegeDFMF#ZRLaW?;*1BMArGp3K>pj;+wDcqOe3GmwXk$np2Q`j+}~^gZCYUnm&)@1 zjpVXDpXuTpjTlPhL0&%Ewx}<`sWkQumBM;I7>!rL_$2xKn+@>lc6p%$i})b z4C#5~)-yCIXb_ehu8?9{Sp3L=d68@F56E+wT&h0`bWH~y<5-_ZaWUWI_OxL;k6f+M z%`lTO$6#hTs^{Q_SyK*fG`nkF8sL(sX1aII^>oG>0U!GUy?tbJ=efFc*9~qm0S63j zcBMjMT&xK(z_Hvaao<2_#`BJl4I6av2Op40U~%ekId&`g9?(M>4gXZ$$heb1GE}`4 z@;Q)-ZD#*K9Wlf6p)EN751mVfpMtqvzza$o^3I%(OpLJ`?eS*=~JX z6^?wy0z@Mr6_(~}R9H}6nWBF5cL?c->wNF;9qD*$w_AONRka%aY3B9@_%;ZsGvN>s zQfq5zzUb-+9qI{LA+>f7$bOJRl|s4>4oPaD8Yd)KuO0>GKLQ|DM$4abtz_`8D|8HA zO66!iKg~W;50514le2$1-gkMEr2K7XMm=Uc)ttsTjHfn&RXdQsZ6vd=WvexRdyKX^ zlD~b0YhL|&hYU0tI#>bVbVU2EOf;d@={Y+K^y6LfB%buaA^$Gxd)svQEAdjb_Ayq;H+YwiS(()T?r zg-)9rK;5_Q`{RZ}d5ck`!$uyH*fM*(Jt5I};EO(wtXgCj|~LhXIGX08nvnfABPL z%QO>AOdbcY)XWfQrr8JORUJu}1W08bR^k%A*+dqhChY%IJN@xV_a%hBr2gRA&lj=& zqpuueXZ0!<1x55KkGIsTe7T706?DN56PUZoV@ImYAZce0;-V+8f=44|Wym^Qr+4E(B&K&*Ub(EO0cB9kSHtF# zk!jAcIhHPzCnI&H<}JB|^5VGv2~SX5Vcg%&Cq~R8}3XqEj>{I6-F5Qv@t3pmAit&2@ufO4OU*R86tbO&mO+&sk!2odGXaIJ7t4@}4iCg3X$h`uRVvJfP&;#l zW_cS~qVB9ctLx%n44B0Bo3Ue&&Dyi8Z&;22Yy0@$d(F5ds)(@T47Em0B|Wv zYf5GGg)~2r%9^pX^GNGy)O+f|Brsph=JmbIvw%1iQJ+a_%DI8oNKY#on^VZ#HA?P}y-Hfzwe`mw zYi{jJHgj6p*N^i1Lx1cH%qE7p@DciB-$ewE0~xAcH8U95*$$Z}*gyh$9`A_^2}Epz z|5S#jH%fRKZ|>vq#}}dcS6agU^n~8hRKWh{Y_&Lcd|2MiYg$BY8EXBfk`ywbjmuq60nhaq4)2ag4| zZPO%ipfp=@`4q7lJaCH&{AS~qht8sAKtn{NFdb5xgmpK07xck8|a5IUdU`=K_jvK(TY3f57!exNdBQAZmgL=mY$mKgE^8^W2v%F7@ma zWj_J)*nZ> zf_PfInXG>ZM4cc6+3#T9{WbaZNUrNi+9noI1djRAS@wUz{6Fc@(ldWUzGHrm(Q)Sg zOQwE1q&Wr7l={!6t7H4VV&c$$PvhSuo<@xAYrvs2`k!yw!*om~JVnrdx((_-72E26 zAR1$P1Ll2JlWzq3)9Jr5KgRa<33TESX}rpJNW8;PC=(JBgZQth{Rjle2r3G=>BkG0xv0*px=$|9LLBcR6T#&i9hG zNu`;_xJfwQE)b1z{$A!iQj-ru@-fj^Gd}-v0W4m}H{qK;OMHNlP4f})5FeApG0k$^ zU{^>PrO`b)OBtmBnb3;^G1)l3jhOB>??HH+$^1LpBGUH0E|Jvs9*RlsKp5LRHa+1) zG56&$oHyQuCW9Il(4QWowmt7`PpP4&q>Uw16{$^KKv3$C7~k}Um6)I5KQIfxof!Ee z-3gTaG-$C_@#9ghifk^0wLAMW`)ithH>3~R4*zd^@3wlq`q{$E`fJx6fp&oOS!~OC z^%~@c>D1qs{Ywx~0c}UVKg(|8{1VN5Sn}pwavs3&u6SjI@x$HZeWcyh^bHY)w!7X( zc$9P=!0U{dz^>gz_2!!ij1|#CQVboVobFOEI4BfyStx{dQBf4uulEwzN{;g_H8UAX zV>+U|l=A@Y=bIKF%NW-*70HR z!~Q&gedv@SpKC--vHp$GCFC7l}XW zKf1=ti{o`$eICH05J*~iVN>otfLwNLEFvq>h^Exoh+e__q(yHXDZ>3b6H?H3UwZtI z5v~VDyN8aME{3MvLj?%U+<5@q8K!}6Q$&*UO?v_AAVKM0!TULRp85`+x+0-im`Ly! zh$6M;NF~_03*__zyjse604@3EpY(Sq=}^=}zvI>6?mGyL5erM|FPcQ2pm!P;29x1Y zgEWD}r=};Fh^Lt_By%K%1(QGsj3=|upjYObGbCD%A14SNlWcrW%h_-5XBQ_idlKgTLthmwj)l(!vzya-pkf8oYAUZ$jxea_?6FarT zi0q=K{&b+S5^EwYmADX=gInnu35BLL zEXXWGeC>tsE<<$)VSgyMR&H`28xz8RAb#_JrvC}T2tuI#uiL`>u2ab~3oUyH(}5;# zec?LPX{*LmCgw7s@UH=?7;}dWrwacxYFqA)ZRa9p;)R zrAEn-J3EPgZU+|J2kPD(-bud${(U7*T`D&)kahe+4JynV%zU1ve-Y{9{9CaN>kC%{ z=Ag91;&tAqa`oY{&Usj5z84aGptk}5`8=0D1E2iN9niAutHn<+Zy zl7S5scAgDX%+2YocZC?Z(=Lk$1om8W0#ws{0?+2HcP`x=_;Db$%vNq{ARE)J-|(U4 zAx*!Ua)_*_5B`74>n~danQc#$*@Y}69}{A=SBYk9!}$DTr_dlIMEw1xB-j3)EQ@d< zRVCaALaL<#6gq^%{%C%9w?JjArxLf;F=EiEAyuwfi8`BMpzcGeB6=*4{SYG5%3T=9 z#`yUHGq>0Dj@pWa^W`a9uI?ejhEITU&Y(qQnIe%&oFVe!1L&3MFVw5LDOR0NcMRnS{rPPJS#U%ejzF9W zQ_9@UG2jlYX6(6s$^%m*R0N$fZ z!)*d$h7nF}T)fitsnzd@2^5%HMo*B$V6Cg^#J~p!|J<4hLj2 zsCX*>K-TdWHK;JFp(5sPO^BzuI0wO^SHk!7Zfg{QIKhWx+@h}u1# zJtiSM_P-P8Va0|AqHg#G#aEbxPrH0fq5F2-GI{nEq z4+<8f>Q9!bg7TRWD5R>Z>d(yt80gPJ>V?&xf z8>IRhq@^LvJ)tB>^)cW-d2ml&AgG}N%ONjcl~Yl zB(7ZXBHKJL*~;A9iZvELwscTZdv0Kv3PAkWoA6#}&%I9LgONbH<{4-2tpb&sJe6pK zSYWOJpVIX=6qvckfrA%=x^K^&M@I>Ez6b$p;V7JHY_R!{L-1o6Xg5FGK6V~I*6+FlAip(}_Y~INM_PhtE zU7vAOz3i@73t&5+b7t-c(cxkH?0T&hODF$Hc_q_z-3^vIPuq!c1hau0^I^gYMgnp} zzQ&plt4O9xUmYP>@?pRFDo@B_{~6>|zL^2`rfF&Bn+({Lc@cde7)f?}NsMEc2*kF5 z;*k1{3}jvGCa&=l=!+So$>$=u&KE83k!HW%#6NZFAxGmO!Pv|o$P^sK!R7eBpGXC3 zz*Khc;J2}<-vYHsKk@3;V_ZeQzS4icv5P;AA?H3g3bjbjx!j{&1)muZ=iKt~QO=EmOZD@mhA5y3|1J`ntuen)Jo2fh z21+shwT7~!W_tQyVj06`Y5evOk*qy3bSj>~jBU5va>DABiES@GHw?L)XpQ49d%WGj$rhAUM{5 z3MZ9&JdkxzmqkNe31ORIn*2*h)Q@l4`52#fL!=9SA=E{kIig=L8{sZ~(SR}y5PiC~ zl)T0I{8o!G2y|^z6g+(H;4o`~vF~waBGu0N^9E9*NbFnvnDR^f`5MI2;MXkk17io2 zokLiguNHK1pz~ZmW)nJOSVa-SK%_R~1p`vU-W;~kuQt>6`b zFnqf7LZS{8=2d3@yQZIj^kKh|6&gRq=XV*XlEzO3b`XsdyjBx8P}5w-PnCL;#gUtT z5Rq+LVj?f$R|>)iCKl>ZFV|3JXsj|gV^;uYJ+O?6%Lrd)cuJH6JWUU07c!C;Th3$TKgQBGdSQ@_;K>f`| z_%FLO*zRXiO@9WFoQW@0`b#c#6^0!IXqHS-Xq-n*d8}x)EbP}Tdso== zX02Fw-t0Q?st#oV@o;zz+cmREcNUzON@%S)O>bmAUM9t5!`=EKxtN67Vy_rTs0&pD z5-KNgNQ9E2gsi08{FJntGE-75&8r43EVP$QOEW3i7WM%EUa9lWkb~c-W z5&4W$t3J+?xy6%F@n8=!mB2M%Uz+u^#i+A+7aC(OL?Km2+h(He&*(yb3Hdw=HL=w^ zKG07V&+VkKpXRs@IRuYaY(slv4gnA{5&^u-8!LbjbR>R!zwHc!v~_H&<12yg?PzuE zaJbjFOADOF=_t~ghodIa3C@ou!v3987>rsanr9ADe0V+EliGN(BNcCYY!HKVKoy3P z3QT8MmiYnx!k-ZT1)T?Y-V%zgHc=kP#+n|Dn0cb6XZtMpLaFbzw)ajYo;v=wbgD0yE@yO6`OGcO2%wZ^cE9Z7PAu4@D*L9@_Mj z0QSRC422OR{#7J6ArKnk3B~z;gWy*>{=W}BF#S+K)rs0aaz1+y!2g#acjbR>ARFWV zXl8Dz>F+@L6#fI61egFi9O}n-zZ?MYAtkdEjS2z=Hk1b%nS-H54vOf5US@pdgmEVW zHY<#qdFhQI&q{&*j#4NCJxYPnpcJe$+!PF5oR&HTqow)RA^chbQMU`@Ky>kgDhmOk zHgMI|5BvTPMNE;$?rQoIJgL>_yRn$2;*kEl0`r3yU(!l`&VD%sJTRZb301XJX}nL5 z1Zf-sEw}Z1H4ud`UQ%)FyiNp^66EHQJ=`b~tf?Eesyw4hEqctioUr3+jteH8ViaD%lj&Ma(`FJnV^L{BA{^aYOz;4Y{i|924BjJ&QJsh9_T0pt{$*2~rd&HCay0;@0w}&NQTZ(6MbD611wt9wr|Q9(F`A-X7XW;Ewfw zkh|K$p@Dd^Jq%?2oixiu$dYOga$e;+^$)*Vk3DQP@f+<41gd6dsE^Zo+SsE2PgVUv z2>WCJF)tyb^Q&nx(E3p{M|goT_@x&ZE5BP9?b>jF|C;XSC-M9YS$OAo-Uc3+zPK19 z>zL+>k-60O-PP_V1Y*@a9Wmv*ZjJsu_>$UEKtsv2!PlB4a4UAj0ls%WeGD}IV5qWk zd|Dt2j)%qcn;>!XsHU$0+oSB({XFb=MP&XX0~H;wz-Ms&qiIYQJ5_H=zid@~lelQt z?@iRd6-HJY&Y)>V+HSbPHq8?I^DAUN#7{lQoXGxZyUEHraN3@KfuI-jAr`0&3ZzU{ zb3QXr8Re-&SzKU>1$9#ALll_L4+NL419d+iq95aHuy_uHtCc%Dkaa9Z4JyoI%-mYj zk4E~yV&cm-;CoA`37HIVSxd@q0|cF(n+M>`Rmtz>Wir0E+XCO&G`7*_7UG>9_T#5A z-npzh#Sj`^Z_7eEhwq)?Yy-ZZq8KQ`I7sdK^(Kzgrf!Z@9N&Ki)6(L57x2KWgwyc& z{-s3Sm8KQ6*w(Ki5RKt`2J^nC$#+EZZO8W(%5uT?Gt}Q^G2YpeZ>-YqEQwO^y_~SL zJ@L-n`|`_VyvLm9;6dYlyME7M%|~$J!1sA#jRD^$sDYmz2o3jyQt*ASpiwRO{sFYW z^hYrs-+MD~fGNBJxoa)k1>(v0Uc&rGXqMZNC0K)-AG3(@{|bzzMbE=tqUT}bBzH6d znxG>R=V9Zt_Ok(_`$C;K5|f3euI#=-COC>G+8)c}wxmQdYyYuq64ST3H`6utbgRYp{P*PP^w0~nD{TJw0 zK!mogJpz3QTe_b}fB^L*X|&NC_ale&Z)~%CPuzzLPL=Q5a9&=%|Iize@5o35Uv$8&f~+H;dd*=&6Xkr&Y^(^_h23V3qqa+ZLy3y@Ej)?E97Q(M}@p2g7oTAvr#Z6 z$ak5V&JCW-%)dJ_De`?u>^;4FcLY<-6L6JPhezdGLgxwjeh;bHYTg*=C(HLOD4FT4 zIbJ~yD_=5gDK_>JI~F7E(CP=aWVHJR8%Se%6!$?uo3HMYlTKP%x zJ10Ar>4weamqxAp6f;vase5)a*!g~EL*ykpVnYkrMTid*^YTi-fV`4jVfQ^S#f=vK zZ-lMY!`PowEig96b}Yf(WxNmax-3F`=S!>q8OiK zC6vnyLzz{(YNtXG<7O1|A%!iOOONjxD+anT-VvbfKSLU)X|{)vP4J4mO^GL$qFQ18 z3n!_`y1S2LJcXkx-Ldc6Vqg^|noDQuMx47wnt483FuhT~Eeu>$Dr zI-A=F60qWOiXK8^*8_z}#z0uCNK{y@OruPifdrpdccGm{g3n)vtG!C_c^-T&g3m63 zJi+xgmu5Hc66&Lb8hAo#uwgvl53%ax5LCMw^7jNss}WG!rK@&4V41|)mF76aSxSHI zS+1gxLwr17HKKQp=>MjFhT%L2_0YOz2l~nKxtb&zXqEy-ae&hF z?Ol}*Bm!}`Nu6RVv>5J`ScixO$rL~`w8$~yM(ekF$z#XDmn20;ZD-&+ zQN=IKdqR4fL|D`j%{w!tk!Qp!JzX?;zWH1b zAPDt!;T+CO4%8p)MyUU;#<|dR^bs)bLAX-c{MJ3+ho4#6;>5!R< zw?ili;*G2}cJXZr{{0Gyf`5)Uo7A;Q^+m!rrAiZfvoUT;>R16D#uXKsHsh^ivj)4TFsBZ z8OZgnsa~$zGSBmFA(dXPiKV6qU;jlY3*qtuoHOL6#+Z-Rz$U0M=fTZa z?WgwYnFbH87<4?UJD8cae`YdxU^;@hADwMOj|-OH4<)cQJ1h_d$HREk zwamM#CchNPx4nO6G_YJnu+luG4ls*Qc|G6Q{+TNzQLKN)2?6u>HO}AWXyn`aUZvW# zb;_&O@ds8m3iOF9Yl?t}*1_@d1BkHRtIRVS)l?n~)RyKtYAS;eFMb4$)f$gv!J5o$ zxJI|@B-;;@=_(qRpRq{XcGgNa(m*#bL#nToOXuep-N=jEm4U)42NUm!xAEIL>>bbFzNV^H!&m+mUH) zT?Y&M`3%k5eegxxvMIMt^_%n;JLwl*B$_wh{2~VH9O*psIa>_iz%`+SS5NaL*amNi z?g;qeXl^Jm#3ooE<;iUCSM>bInILinF)2ly0)P1$4gqXfdD>^ zhGeyJe+y*6M;VeTpzyC`=6agGFVcq|;%)Z_RUgKK_&>=qbN33O)P70TX&gK#%~5hJukn)vMJMh6jQRPId%S@aIkNu2z4L z32QNvVXx8ogyU$x5X=TJT=8c_pr4FCA3-b30L@Y^GTJtO&#XQGd{o<_>vW2DMOm{CtwDE6gMApHCx+rF2=ylEY9=W=Ww5vH}CM zGLK3oQK3C4^QbU%V%sHuvM)6g5x)C2OujuE1$55n3X zm!Ug77E}?wX4AIdyjMWE310@kt_8_qN49<0ZCZzQi8ne3XD!V=o5*Mew5v}fA3{KD zu2(yffOat=XUzBfKx{cQEhY<7FkjY_Nq90Ut}q^QJJ^{L)mmC+F_gz#jY8t{J^L_V z2lHQr+-x;F1o}=~W&C(R+r6Z5q~;ig9MWG7QAsDy*2GgE!YO(Cd5Y-7_642D1H7^J zbGJNC5k>C5H*KHY?7Y$I`hXLhF1k2M7O^_JIUA0;dYtJfs2f1+2t|yye)ojxdO|9N zFyHD|G1FRts4lQOvl1T5k3W7%_XP>GlD;_!H{Lzo)shwyn{U;S`6p=TYsL?AC@Lw zS^~%8Ree5htg4S^1n8$aw8GNkRb~MSB!|n$(XEM?n*QoJJnFb44j(5AeI19dMg$BF z_jf2s4tG|2{`Nnf$`PJQl*0w)AOWFN4i}grScb^}bhqu7dh%y0bSVW5e8Ql#I0BbMNXmhvA6hk+-*DC!lE-_G8z3shcj z+0s_ADTeR_Ux z4W4fT){)=hVizLD^Gg0MZ>*BPA}~-`6T+CEN)^e2m$ue+LVa78a2eW?$(-mb#2GY>_NkW%6{t2|k4AS&-k$zkF z+Zf2_ReKe0EPub2=Z?P?zkj7Px1PhT%oXcBhZh{}_@aXjePlJW!|$nZjlk~-igLo; zgc|Z8fl7Z*C63=03FM^X?sQm!IS14|ez&6+24^3~*C=P32eL6dxtW>w)bv+T3H%OJ z+lt>)h{+5IiQjk75nB8%;-&YK7Qe?(mfM5h|5@kFzE=xpnTj6=$l7P{TRL`5gQEm~ zoS?ua{N7LP`;|cDm*$R2oZl8-l97XY-X5l4DiP;;{9Y{aFZ>_AO2KV*ARFWN&dmIb zrvDv$??1-1zY(+TSP|Gi27zAEeP+7`kpy_3bf4KSGwFT&KC@kur1$ar%ywBx@8kEG z?V2UMkKbpu%Sn14zt3!UQqueQeP+Adr1$ar%yxN6@8kEG?ede}$L}-S^-6jlzt3z} zkn}!&pV@9e();**X1l_q_woD8c7v1N$L}-mEQbHYX`KA21NkQ1XWls~>3#e@^Ug6z z@8kEGcaBYZAHUDMvn=U-{66ze##_<)#qTrkWE>WKAHUDMlkPA2K6;OtNL6Eja?T#| zMlQzc`cI3R=9S#q#GZY53tszUdI#|)QQ4>JGRah@PlUJ5KdfW>)}6DRwEy5Hwn?ok z_=KcNi`bgQ`OpAkViS4G*oHhr>quCYCmT5GzCi<5GoeHYA2y{kjdU_El(ow9w3GQ2 zgs|0<&5MeVx%8~13Ic}*Dpv(6kytXL-l+72I$WrZ6wxwn_7F0%kE@@Q>GI z9shVu_VJI`4kFWVjw zWLDF<)Qi0Oe-z>$-a&KUUoJR@;41CjaDpccUYce0L`-oR(NrqN9JoiJb2mz|!vPS`t@K?Wz8Er6 zKYm#t4p~a~0y9&bQ<%S#W_b!(qTI0|=HM6gle{-m~XXP`CzFh@&;+tFn(bHINoAYrF8lemAGQ4Qg|h+YoY z;fnKG#^ILY-GFb@uwg_X5AiR@;dycKJ|tF(g6^Ezq>EOUUnhR(N~lxBDv3lxq)lJXiT0g*D(o ze%%qrTlt+OcoB^ADCtC~H1DqTw08EiQslQuOgK5>(=OO^s=)o95Xna5w@hMO%zuOe zsr=?1;;IUf#pSmfDNNEVb&$o$?|&O#f3AthZ)d%M9WFOt$<1He;S6+;*D+s%Kih<_ zU4A5Ar1x-27WooseNMz8?SQZM!!cTXy<4Cmi0}$Ff?*?@fAF;0ds->@+6FwX6<OiC&O%8M`#god8sr~@n6;|efqb%kUQG%OGzaR1W!l3Zba{`UUdI@|#Kud77n6hfS(*OoW%?CZ zrK9b5P3c3rYNUv(MzGo5*@>#N6Dv^U;N9xfEw+1Zj?XRzv1kO2P`eP*Zr%VrJRXg= z{6nr@%!S_pdOwK2Zs*SYyi$bd+`6;C{4DJx%qpxjy%`{taL{Sw=A!!MC&Wd(&9t}6 zAUa^8{<6Wq-6^ zv~1fxJD=iP1iqQKA#j@#ElRLNDimMY(gx&iWO2Q%cW=GJhny zwago1WkTcc3qEHIVQGbovx|O*{Yw6%o^z?<0ID|5UQVpEL+p9Fe2{wDK*WuUOkS9S zsiPPRA1u}|$2OwyHp?Yz<}PyN0;-v19vASLE~}f!iE_N6(q@@rzQL0+JmHFAc{orX z7R$qx_F+$66h1d>Ra;L4A^LtL^#sz)vI_n?5G=3c)NFgVTJFZ%)mCVV z5J=cG$(P?LcC8m!T7Y}BKPe38xTraL8E#(&T2yjOqpLJoUlZbXf{Nd{F99^nG`JnB z=LL&projMs(CT|)T75hSRZpcFb)kC&v1e72PEmZ3HWJ#Z2Z$kKz5sIA!hYMw6{ctn zyPqR%Dzg@5mu5Z!X_qkw300^9^n)bHb1$3b{$nF-j`sfid_5^zm@A}-`Sfy z4eMwB=&k~k^|KREpj$sXj(7=v7n3ODGeqUKm#5X+(^C1!d=aKMe+Q?zc2gUhB<^UL zXTT(L8cK?MgroKq_f(@9NgkK6Jjm+66&JecR4US^f9~jTRar6 z3XVv8Fa4Qf*-%xlR-BaeU)_2NhLS#;VKD~u_KYFMTO}^0^c!WNBWbIJ~-rHA7Ev4{u?$E zvl|>X0C-~mF#6f({5K#T=D)#1Wg=qPjNiumHK(dD#9mtJjo3>&hD4Br*vsB{Lw>|Go7+qo0*dzwVAize5xO2 zj_G3E!E7EQUGFKW9MANoaRvCD#tl)=%xvhC>4|cxjsm|ZozC=l5X{pMsIBC2 zfwYyQs|U`1WA=SD$Fhe3OlI?d|{+EWm*lA*-z_AXAtZ zRs|y*$wFh2z1`WSKSN5$1nb7vn%;VYb>l0|pl|FO*d^va;*Ct=YC^dpDspSayGGZH z2e~e!i-TOnLBSyDHEED^%)~;f9vzwJU3>!wE7vDKbu6os5tnQBIZ?T;gwqnajxoz3 z?HXF>Y&D~9p44NzJ5o_RoJQf@D}a!Khw|<_Xok5B1%=tdkcSyae+lVUKxS6DJpz4) zha^B%c#<^QXpZ}l1LLW7JmbGB&qp*M5qT!`M&vo35gsHh^1K#ru*{?x#PHW*O?-t> z4YNs_6#oN}e(;Io)6u&*IapJ@|Lmwp_d*D6O?4MR8QW5&M61*~hIvZ!8#ziUQR)8{ zsm&34PA}2SC6LBsV2?9Zt_K=NEP^~YKuT7gBLj8F^Lg5nljp}!Ds!dgcn3K|p7Av( z^g_aMuul!Hp4l6Ykq}kW-9v=wZ8VE(k`|{28;8wcvzI?M_ zF}QFU3h|iX!_fcJ_>lXjrYv}d%^;ek8RA* z80R2x%V};_5gp2V0g({7DvoI=pxea{-MLe>#1GCTVdZvnN(Pu!MhO6;7@H8YMC9@1 zB~}K%e@sOY|J-L+>R}{^IH8NGv;+~kfJ4Ny@IEIfsuoZN`zap05(xdWizB28t@HQR zf?26u$uj=~#$YPp)T+)Z$dc9mqNvpavCYXJ&pz)3ZGpWWe}gh4wYZ z=kL+Y=za)=cEAR;$Jf|LKN?MO2$5OQTh};s5Ep0#{M5bbvVV?pB zOsDc(4H>_q0@?GP_$4YJY*T)E-EAdG$g_rU6QFJ&91n5$9kmGW*tGodTx7SdR-)QYTjBPnX5t<%?N82I4K_7Uwe6*8ei;e&^!G;P! ze*b`5vGSV)ObDnx&x{g2q5mV$y4=%Bk>3Siak>#!k>6CPlgUF#UVf+0n?ZgLLa46) zKM+rr-_^|DK(iFk_dV=kR?7H1-dwUsm-$a76x+D2K3N&@Ha~Y1-m16hiMBxXWFXxb(?!u2iZRa| zqZl*qeNU-jpcHdAb%BQI-A!+pmH8Q7tg53z-72~?@bnBSx~<@%dcJ}V>QI9Ub09On zq3JV_K0sZDi?6rBfA=81IRD*Dn4H4br{^L~dcQqyfv;%OP*HpE-gh0MoNR=uTED%c zLrOC8M>Xl&0-<;7`Y*POlyg?|%`ALzdPZhKzs+R0BImay{-J9EFE>aJw+MSgpbTEV z!0#4bE`mOofttPo>5<>Xv+?occmtpQ8m>AT57h?h*SvoL%5K`Dkg8KuUr%I24j$hJ;c6YT0@;|p4r1n=HT}g%pQNvG zT_vBT_k_I6W#hfBJjtv-9i236#Mieea4d)5dJ|Iu_+siTV)aD=yJ)f_E^<3ZNk>br z21-ab3$C}0`GFQ}K_aruSzKt9;E8C?Svz>0w90+snNj6#3CC%b`*49KndLLjG@$B% zJ!OH=k99nu2uc)~Zv;r3^R(#0uPHFc!7|Lp25j+PUXk*jI723dvdFD2??!5z{3p}By z0Th_?P`UK6>7;q!=9wtK$EGLH<3beoLIJdX2M4k-Q4D0}oizPLNFOzTTsWa^@`q0l z48iqGYI`A^YZ`xejD!hbVNMjkI{^JWUz-S%9e(>b!089Rqye85_@Ny7G>|rsC@OaZa%qbE+8Zmjj{hZl?)#?AyBLS z%~6Z;H`ja*Mx=vV?0*g$FiY?Oekfr+zXoIvhHff9sHU%hT+;A& zh4csL_{}=Ezn6{iWc_4T=fo0md>@$>@T6!TatiwnWI5D03tNc_lgkK&U^1CYCX=~j zvW@N1Hbl^*C~9Tyev{uJaAAHWdm@eW66~S@;MGR@M{s6XZ!=Hf37`$5Da4aLHmLuaDJVL zf%uY-R5ItLf5rvnoOX~qVhyWMknu3)3*(GjydOX+_?5Xh8`v9=CFGhgkKAXQ*Je?a zZ1H0bP6ld#HvA<3S{pV(#xp>JGA_q2WxPOs?HWxO6u+(ld)$J7+^G5&kn7BO2q(dT znz6IVQ3>UL`^8y~gHx{TBn zno(d_I@2mN%b_yn7A?eXm|uFLZelB7^~ASR=1>xK-`bH>Kj;?hF)y((wqBfrSB{9RIl)|Z?7se?*^gl%<;TIaMtYM=scmMMhp9vSqNG(L zof9iev1MwC=wNVQ281eS_6FJI1lcunvO6hqTraw|t+d%<(-V3#1&rOHzBrv1wrN-$ z*ycL?(l&pU-*n3uRQfy5pfJePD>rzF$!V4!VXqHZn@ay2_%=v?ETw-Y<5x(tjD^7- z52IjUq_f{}v~~!zBGPAi^9zhNjr92r($53m%=ZX7tD0*A`*TL1kp3}@5v8K#>wQI? z^qHH|--X$q(;S)^a`{HkWW4x8O5hcdU8- zU!FB0!_L@=B*?G?uG7iz4uzls3h+|$76SbJHAg5n5V8V1L(IUOBzm24C}{yiR|?LV zW+B01FI#MvXlClw*To;Htb1J5$K`gVF zy@%$QiX6e5&>jkSDram)_TM0$!VlHoA1kOM`)u233-C*J4hiBAnUEq6MxjVNkeMm+ zfDxozjKzf+L!aT?=d<*Sv`LElCV68i3PkEeJU0&LrlcYQYS2~&W6}F^KZ5dYgG(Ky zUHT8k1rn~dS(_BD_B~D{tl{cGaFKRQqOqVxfXkoNEJg=fpR94TG<=aUr80i`Jjft5 zZnf0RelS=w9dWrIlbDDb%6BuQrWG9%h$DazZ%N7&@Rv|AQ>0lwM3x|6d;WA(#v))0 z9LCo3LCkv}cN9Jx@+d&y<7I4T(?Ws|Nuxk6RHhF}vj74?SDMUPX>6v^D>(U95M-Qu zACWg!zHUueIyp1Mb;p-L!Y@ngv@H%%CH%`Y&$#nfJE`cX(=2S70vlEdp9(ic8dkMeCP7g@ zENFLpjv>Tu2xvGBh)%D!u)c400mDnQ+$+tafRne&VgBZ5yqnoXfM7nlP&ec~kKlqC zBZB}ZFx>lLDyn5MuLG(B*CYpepxdoZl%`+7@CQhSHaV7^w~~f3Bf*pK{b-fr@N@EVRc^+^@0rGo9MD`ZNc^NsJP)vI znP|YyBqkyXmnjX1*vL&ra5)smu(>4eSBML16H_Q@X%7Fiijs~@IH`}lY6NVgIlK@~ z1gw3Utc~E~JTpOU`}9C+xF@AbnJZ{&h6o&`_Ty!lPoU9ekQCv2@-C#?0&8D`P_>R7 z1K}8JA7J*QHT_7W7yAjmBe}gLt(4M1T#^5Uw6rkUM7ub6kq4#3r?2I8KCjKUC;&Yk z=bghv9Nn`UvC|_{00v1inFPO$_17-ifN%@h3xXGe^@S|dpr0r(btY}vyg@rsB5ko& zi_y?y_lY*2b0ke;Jc0khN-WzVlZTBEJ<$(kNld|vDLNA5}lV9AgClLTou0 z!r}8wPpN01L_3T&)Vm1R)5ezODX5z{8Rb+pwWf&m=FRB_p)td$2~y3y1M%e8@*)!W zjd2)qoQoV|L|; zDx{O8IV3HeWK4^P@%1_8G(rP8&=p+=w4NAf8-q*no{hm3_$8E3KiC*t$LG3G1f6Y= zDO;s20e%jRky7Tl0)R#@GlulhH0&f+U1KUi z+0r^Z(26Jw)0_Q;`6&v+cW8_Zg3g(pQI=aZVt%KOfcVd&;)ufh@SU%yQy5#53Uf5G zFVGxWqH1ZzZ^xpnr160wz@9XYR+-LZtn^z;uk{5|?)dCtWFnJC;g?L7BGU~Gd^y2m z;mN2exGtyi08&f$-uuWXULTB#RzGW+utm+dmb~1k_VHk#vh-U=MWI?|2j-eZ_}Fy) zmbvB#AcA=ZL9ySBYdRxjaIsMsX?P-#1yIWjP1FGEFQ5@-gr@%*q6pDoeE(_0xRLhU z0`NuKaUi1`J(vUGP{py(?rL~r2Vs7Y=dPDT_fw==!81-$PES)#&ye4AeH&QE#_@qG zo+l%u!L?w8i-k0J`(E~)apCw>#D^Fkn1HZLJOeqNP<%NgQ0ecfL^)nyE+W!$n364N z92jVK*FXqsrbG43IiT*x2ih?d1;-zUkhOBn16jxM0&@IjX5LfNUxoC6 z>TeEMZKuH^6gKw~jG;-HjG~9(Mmu_;&2|zP8a*c?*e91of1%Gb2I_MeegPJw?cxt| z_jt3*bNm2_Kd!2DnyB6J?;R1<=W`IZ>#CXdECn^mCx@FdMg}Y1e%4c}=P4TLq_>phUgF%=za8)e9bNOrfE|6{webq|7x7<|{ytRGts6~a@g9N5%`g%z3_GfbZ zmHyr^&+ZEwK)?xPf+ylinz)NBUYM7kAS>&y=tw_;!4M=psgYgKoW}&X@5*1dz7sM4 zF$Nd8rkjX?qJrn&eH5ja1!T;49an!T9*P)aZbb8jRIlNycVIt#jk)j{)`-sk?Qc9t zN-tlZ!=q^aJi=8(ijOeQV}w2@s-a@{HKwsAbU`3wq5KRU{qlLiD3(yzSUJ9r> zRzvXP86HDSrM4dw^*cC_b-jEdrGd=7lP1523g!FeoxIl3@7J=Q))dApcH!Qz$^0&i zKeCb2n9rg0qbLq&eXZzyz|%}9STgi$vM|z+IUlnuq0fAVv5rdsSJjiv1&VOF)b!KT zh(9ayRQ3;4$iIprMejWUH|g3ixn=;&#MA|Kzn|7G^bla*->JQ}a+S+`QFuWxtFqZRU1~Pne z{QtY!=}G{h^KqV5od1UbP|~~3TVN}uu`R=If?JOs8vO4IO;*ZlmbzMk>k1T!^y&GZ-2ZIAu+Z;JrN?>(GYC$CpO;J4U#vG-Gy?h`MV6)_pV;U@I6vtsiEp>A z4Hw(V@9FXWQ+M>As!_~q*byiQWECP*cYe|`G04z(o2{nbEl?Z#g=d^apm2UtDR`GE z6-&*mfQFbMC@Xq?(m-i;RGJD1NCne9(1&28|EZYT2-|pqBu>&SV??HIzQfkZ<7>DB zFOP+KBl38cHx}t11(b4?!8(>2Ut&*iTB_K;fUkl5i||WuS}MP`+2mT)F^Oc1^WRwb zx#OWqq|d=oiS&REwdZAY5#S8+Xs*Ce@wlhez|&F_3G(cp!g9y+jdvb$}}D8b!XJ4PIpw#zsF`r z0eP;5_eJ}uzXGE$UE0gw&bN@cYTyx`N}TBj2`DA2H``A|Fb|Uf>YnL4($Rw~<0~aw zz1UTbiXE7KA~P>0?jrqBNFSI^es3vW9Lhk=s`9zQYD6`&oEAStbpk_2DR9&pG_DlR zi!bejI>v&=uMQ9fYS7pMwM5W(IDc9S)lUcC!UIhYz-Q}!I!2F8jpj@ zn~xEO`JnL~#$+J7AGOgs`WFLP$3WBo^WUKW=6+3I1;%fA{`*)_)lG^AY=4^8h`>}e zcOgq7{U=0!FtJ#fNM>%a|KzcUJrw-D$QiF|Z*uP?9~xEy*wKHoRIy2zIa_V7Tc9%5 zQ;BM0t{EdJkqQC1W+hbF3)`6*6a7|6z$`2sVy*Yx9&K4_zj2N$vZ zg7e|uV?elqW2L48_nV2F4=-agI2T?tuEZXy&aJ_k{=n=Gi4}<|ldD1guPVuOi!8akpTlw7r$$CN>~N6_{)>8YY~qavSqrR+Ks#{X zh0yfqZd&+6*-fj7-A$|Ees(vl4Tyc&@E-!>tbT<``>h1Vm8Om8?R@iz*l$>t`HBKU z=SXR#A4f~GrgZ_kGN<92%pKyjST7(#3Av3!=>7g#lH^0EmAOLeQllFW@eOPAGGCsh z)p~{+pdWc`tXOEvXg432__P)Jz)_(*=tG&Xe!6V)M18blNq7YONJLEMd?T}X=D##=ETi!K;g8cGfRAgW%zF8EcnN^Tu zu~!w_iHqTrVhSSu*36c$z0&N(x`Fg$0u=;aE;5^EO4Lvfixhc9tjLI#Fq7#GDO0eh zhiHk^!)u8onE;hDZM8x_sh02!wIsop*;=ik)D|IDdVHf;vD6ach1;JBtrLL?!3k{Z z5!;_?u!vYgS#Wp^@4bHHVMP*jqb7nFBY=HWD5(@OjFJqMW{^C$!`X2SIhL*mj4Q@x zuyJWbq(I&H(-l_uC!!1Yei>U(W^@7gSwuKr$u|Zzqz-&_^%ouOA8~Q`_qyF@g z-zl<*8TDs?+|RLFz73npkM{tRPa*fbFL3qse{NZSPR90A)Sm|s$yN{JxO9cNOWGEd zX1PcS`^^ZN&aoRk!(;bU8(({GDCgK%IaX<8Yd*rH$yN|buQZb6I%uG2gp#X%P1?}x ze7;#PL3gD&i*-_s`Re`Xw?Sj>Bc3cJM^|jc{#dj_C(;gy_uz!gAc4T?y5OFX*#&q4 zD4;D1Ho(ib2mQ?Ua9)kkmDvO@Y2dg#&>YR|KoDr#a#IIU1$7_?ZY@%gFT=75k}MS?rTNv^UYlXl3`MZ#JhO^=V(_zdtZRY6mXh0y=en! z)wWwuDHPK*Luplev8AM0sB$;(&F)%{{-}rjkYDG!4?&{palnKBnS4hJctz!VE1qn% ze9vZfC*Lf@tH^m0#Je?O<~sk=Bv9Et5qG3z4=r(=Sw7Ek#%?FG9-Cza5zj>sTK#bU zG1c#oxvxuL{JWSyuBafsrU2LQ{>wllB!L6jhoNrwJ@MAb+foBwE4)%RT)?OUCpnetLW&jG1aSL4AO<7^z`R3?{WK3!AH#wyT z2Aq%R0P9?Z3NB?cj4`c+@w?3LfrXmuQD$dXu1p3B<+02z_Ja18t+4HdR%>GI1p;vK90{zb7Y@{+A5bGGk4!;m zKn2MX!Q!GBFYp|zkoi1Eo;3Z5^V0=Kcl8RFVQWF4@a?p#F%i%4%5s(nAt!^jfm3Bi6*ew^oOrBR%Z{6@ZI}9J1-EYT?`(9>3 zWSAtsXXfRV?3tGU{pgePkUhTv1Eaw>v!`g+b;FO^A!EeDlEdyrlm~?n3fPZm44(6k zdlRv~{Nr^oFYrf;DCEU_{s(@IutQmRY!$xEd~7{^Z86wCOcq<@C6;p4RND(HbYEF2 z^?dApz}%2f5hPS~5oD<6D$$Y(%KpFjI(1__&--&d8ptf1n=TVwD z3d|w~-bxpZmwSey&JRUVdKEa{o~g|_qgWf)oyW>>VFBK>T4tVw>YLM1P}Nrw%?kAj zGn$W^YWa7teET(-p$OEn%=bS*f8YHJ{e@!!lC-G6FE(3fQ6AObH}Mfxe=h@I|Nr_M z*WZ_K1OK;9f2R|JQuOyN;9#o$_5z?;{XORPB>jDJn3woLZzkz)TkKW_1+;2p>cTJA zroXG$Q3@43L-ptjHus$V&SIE>m*<6|9-ZThis|otBUzjD`uqKGDC+GfD6YRB^6?c~ z{@dc6IshJy&kse!^tXRTW8g3Tf4^;auAe0euT#|WBb2%anUuq z9_;L>|0Bk`(ULv2uE@l@-V7Hj%@WU675q;;6$}0&W3PM*z1BcYHj6kMjg@|GX6f zv(pIz;AU`MfCQrvysby%xjc@y`DScsq$O2KC7|w+~6u*WK=*hKPi(ndOzp zZdCClw($!3+6|F0^pz$88O8DTM7*O)-luxg6j;Op5hl$uyHbhp@)_8Q7PV@oFDjz1 znWpkiCP~%TOyKQtPOz-(@;b1$F!ww@COYkhhBcODmwj*pU(cV+ywU8{!8$wUytCnbKTMj=lkQ z9%R}nuqdOkJkK@@k(!|i+S2UIgbTIp6FazW$_ZD9{={z3KTwX!uzz5I{KnUo>o{&v zS2h|uA)^i__J22bRJUR*#cLqxIuvh%{iZo- z)ECg)7odS&9>2WM90oz8j7a&#j zGhuK+>;PgD`7L$wLwlhgg@|PgR#=#ZVmE@OEU{m($Pa}=mNJtNGW?c;G%-PT2UMmJ zr0oi_f|L#+m1KRm`LvS6X+OWg$SB5XD4*Rt6eq=4F27S$oYQ5P9>&gU`rbW6?DNfF z5U{Q1uU;oFLO0LHvU3@PDcwBf2wAHiuEupQL4vfsAdv4-MoY!{>qw$Fzlm z_}B&On2Kea`lOd-rZFP3W0lYI(i(~Fk;S4DvjXKt*1j!{y*O zG>jJCVUI_Qxet+3a35lQTaMc2WRE~W@F1^Cc@K$5^I{%^Hfx_a52Ai1#O{x2Wy|Gv zjwJ#o!us$py00j&%jBtjPRGN)5WX+M9aD{!=eQP!GPMYNw4Dl3hms3_GCjK(Nh2=h z6`E*waVdPS<0-QVxjm9yN)IBnOVaGdB(L(uV7#?B++@0kdh1oX$QEiY3i<<{0$!ze z@G39R>LrT*mRnk6R|lRQ=YEC(KQrx|Es>Jh*!xq34m8RuaXO|@(moW8H|Fe zK3A`jOKY0P$CqgNGZ}r^uQ@9eL9c@ODU97`?F2?Toz`6}q0qVJDR9&(?0Y~*iOxyh zYHN42G!fwB2R-ecJ-`Jxy&@XQNXV(Dje)it+O&saK93r){WwZ~qoGZB;uN!ccCSci zQzALkvP$My3ehNL01`;tQs6G~c@Z>`y2-evm%JA>JmPw=(9XS%s-ea;xe+C7L5LY3 zN?0uUg^?#f+Ex9Nm2eS*7kNJc^kePF<8SgZd*l_b7FGlI*vtn`1Bbxh)fU%WOkW5M zd_y(i904dSGMQ&uQz`K3L+HLL#~z^|tA7CVRi+_By5yK9u?o6(2~^i?LgZWZEp;ye z9A9Oq&t?@KlK2@c*J|FPY5psqc@up5k@}bTXFBCO)T3n{`1Jp#@=c-wY?t!6xSFER z`)?`V3)g~+smezPM=1!xqWU+5SdbJ~=bG2iQb~=g^BJ#+{vA=6q<^p7;3c=u%U*I; z|329i)xV7h&Y^#O0s@A9j7kRX(1R0I4;KPOpd{44=@_ZutFED-`(N?}jVy;cvHNe8 zDY=<7Ns6gO|GtIlnxQD7YI)E<>a)2*%YB=AX!S2Y5Rd5}J0#ftYqnld5SJVAZwa4$ zBaOjk^IT&P{h@~sY=Tw=n1*m{rS@bXH|fZG9D3 zpQ^YidV4DI=Za*#?Fbza6pr;9uOO6(-o6g%F@N9T8ZYSyFL-6Ldi(8vqIz3`4ld|z zK{~y?7VoGx^HqN?X5>hHqu#coGU4S{uo+dVgF{gN&ePhywx?bF$OrB~;o+iA?4y5yymc?VdfZTLe`>sT+u6TgTI z?21COGrrT!@89Uah&*1djK3+6nmy5x(w3^s6wNoUqMGUY8}iLAFh=tnTxHeqR>I#%rmD;09{(+vBibuwe^G#z^H%&)Ez8MJfF*~6QH_6}pM(+TvKNnhIiGKgQs~|*T$B#ug zEt&V9n*2~C?=Srck-@*E;D%FbTylkCJ^q^R!gK9#D{O_WExt2#JZc|-w-Rm?`fx)> z^B|gHM3Q<8s4x6u9|3iUf4nZ{1^z<&tsHlJGm4M#XL}MZ>jx>>-m1`q!}vX5 zq%abP41!doIUk8b3e8Q#y9;2AF+9&COhUB^%}NlD;rWdN@X8|K=g)csw0QoBY<=h2 zZ?h4E1J5r>hv!c*K!;}iM^)xN1;ZTb^7U%uyN0692}MQlJjZlm6qW|ha}bA&gDIF3 zP>_wH@j0MgVMg%r!CL;!EZ=@jgHVLvxs5-t(TXd zOHZ1N7UhN_sK-8@ZrihMkG9>p*f#M+4{QMY0n|AQvsJihklTe0pxgkWmQJEfUYMO| zgQbHwbW+K#qbxq1w4V+wq^dX(dX`Oe}Ll~B(-OaS+ zn1@)$mlTT~t^C!d-I9lxlRX!S;3U3aT38pS|HPd{coLihOOABx&63O`?Cerlmt+CV zvp)PIJH5ovEeZraQ{{5Z(=&YqddV zleE3a&nKB~B^aiZ90F~tI!;V57qYezHq8mO++4FWyYMij1lzuBFz+F_@kz$A7Kxg! zi8bannmc+yZsaew5IUEpvM3GfHID-ix6SSml$h1UjS*&}MTl9U$tp61|Le{Ez;JWAy$B+>L*+SU>Nr0!F6IAY4ZlfWG7T7 z@9Wm60I#fTzIvQTO&e(BVn;zpQvv!s^o+TE-8!i;q2t8{aK|c^N3svSkN-Vm-1s-Jc z0e+kD*9i;xu%ju}0&iOIjiof0G&3@%%i0E;i27n*7RyUeF0+q7p`2GtKfqUJluc!4 zSy&Pakc#Rd)uLXC?iJ$yzS)EE|8v>J_MzR*$lOGyK}UxrALdHAv6ZD3@v~$TGY;q$ z(y53lJAi0s4;bY5uHO3)ZF)q&ZBlQy*ZsQn!3?0 z-(hd7G0=X~9OYHDq~dBOg3nESvxnB?O4dX^X5*v&W3WfcV{ZQw!~TtR+P}elP^d9s z{|*_&=R^x0`Gk3%_#ZQG%>#^oEK*?qhL`|3NdE?f@GJRK+K)*5SNqSMC}BYTNq-&=zfDly`u*o5i2eV> z{&Uw8rMGAQxm|ig3XC;m$D+x&v)aDf!(Q8E$D+iKqBFE+EYI2im*V!HTO|k;_Me+C zmQ!Oc_OzaV$kQ5GP8*fYu*X0|=5aCP+V-DY4b3xmqb$Gw+^zIwP@ct93Ng!mfq3%% zb5of;Pji$ZN86s5VuPjrZF`7A!4fM(6L2WQRxhF*LGj_?;m~n|s1mEG+(i~IT#KNz zZ+0eL(r4E(?+eJ(zjPC|2QLJN0o+J{B^>pN@jHMCsj3CIvCB7x+e!KP<{o5cPa4;p z5zK+;Zpm-w#JBAe-kTfEUC>mMTgk9D6ixc8^aD{;^7rd@b1Q+ zY^ssVir^C}O+H@@o68TibE6T>qURN_xN>7h8u0KF`#Bv-#Ev+yl`EMwG+!j5Aj6NEkxm>CNqe zJG;B>-8hTLzgrCDGFPLts%DDbR41bM48G~6^_UFd+YdP{lmmzvXZhB)CnL%&>4bJ? z{FQ2m=;7n*nVDcDT72jIzJ2;|>;FUC+rU{pZU5s@5mH0BMbW`TDNG)VaC+jzs1r#P z4MNBj>dF=BR&g+G3;QBRhI#@X!gxjYemE!|y%$DxQvwvIXlJrcY9`LT$)Ev6xaKZ$VA{|kgR zj&p=m3i@%LwCJtN$=CrBp<_Dq0-HZaX2fmU#rVj)%Tx*FFX{X)FmcDv45Y#MsoIKC zQ-cI1X!(th7xG-x&ZtaegG;}kJ;PCzNs-&*lAj>&iHIrME89&a5$CAV{s1=Xj8QD> zaAHHOT>H~o=|D95U1&u^QBX9rp%e{4!9k|gysnGk+*DT)P2Y38Xx

II3H@6qH9r zGZ{|QisoVV@t`8ZNfgo<$`BYV%nP*6^R!et^6ddWV9o>=s}3~j?6}8zg2io7lON4! zMc)nLBY_OXP{TkR@{w;3*rt@4i}?H|1gm@`iAVlB^0|V@he$*|HnmQJPe+=O`B|%~j?$ zxnmP9G=zwiLP``Gi7{PvUOUW2EL6ixVrhtUHBP8>)OIQzu>fUbtNlb|QMSr%<>ob3}wCppv)MF?90t+NHwRv zAsDOjeUEdokXZ$3ebu`VkX<1t2P!)|Y$#VKy9}$j1g2i(NUQl3%!!_5F&*~AyRl0BBe*mDPeEc<^dwiL(cYaA34V9l3LDxVC%==)CRzniC&VweZ-Um zd7iAsYH6V-*VvP*BG0`cvRtY;39U6n-64!E2%_D0u2^4Mf^&TT0f5~7P19_)V|;Gx2|5S~nuuHm-b>ZycB=2^_Ql``_~61@xCFe-O%N`XlL&CsK>G zKaT9~e0IhyDEo3h16iR2x03{G=wu49(GO00^OOfgav-1G#8yZs&^N!W83Oi7xDZ2$ zI7{Fc=`EpvrNnZNs)I2%Ip%}bp2=xjIi+ySnGgWJ>VO{In03uzQf+sG{rgc8sk4ip zNZFlxaA_ec!5@7pC&_In0;W;ng~z zVjW`YTXS|7GJGIrz@z`4%-R1Z{b+wLnehF8-WoBFyv^1Br{=K#FFQ9pbq3Wz`u`)T zxS{|55w?RAy_NnR|hZ(;-wI4uUOY$GoTq=7zkDO@{B&9i*Gi$1~rSrmXZI-Jk#8Gt|J zg_dzBm>D4jAvbYIjNN$noD2y6T+=RVD#$z{2 zL7?m%ay7aOka()HzT~g_(BOLUFEA&I>8UhJJ3DgwdvXdxtO!&X)&o|$7FXH&L11_( zL|}e{U8!j6qB-Wv(=_D3VkaQXHusa;U30zrCjlD6gGl35ePSOxF+K}_T`=Vi5S)jp z2qV@d6n{lxr()jN;Z%*_09qJcA0!8vMi9mNb0`6ULBjNh^RZY=jJ4fWoE7F{>jt~m z7>a2)hi%C?SgpL3qk<5=tgg3Rt95$D>@A zl);`iXipu}FwXW2#E32oXAuo&Oz`6KrpE$&8MrO-cC#^WjhIJqZBd&IVSN>)W+d+%r0w_%+F{?JPN)Zl)$Vt- zTE|)NCX$Y^!4Dmu=3?S57xS)}#_)yRKL(+X^Mr}ZwPr93KC_&n8R!JYB{Jfi-cM!z z1KxoV=Tu4+;n58Ivez(Mez)=Ik#&%>hH+;PBl{z%uoc|e(tio?7+B}@1k*}Pp>%lh z{dcAwAcrY|O8OQ@MH#K*U0;bkshS5XGVMBnlOMwy`mJ-z=o&$`8A3B{+>Aihg-b=W z5Whmg<{>R#32{Wd73@tk7q%n(Zw+jE{x{=^<^PX3q&iXes>;C%VFO-qr-X3jY5qMI z{?)Nh!~Q2fhzj9(m?a2d3atfD0v0SVDF(3M*1>_w6`o3_5H1F*t48~vV`g>$AG?FP z7eYsd-yr)a_W<%19c|Fh?_6RL{;SeVZ~4Hh5JC#FXv_>zeK zIYpWmR)aG6I|fg>J^(>Zv&ydVBwnxj1(-Hw_f!2V47fRqzxTZ#hT!k%v|zUP zx4^ViC4c)APh~exCCcBp`5lbN#oxH;2@Nro$bNYKuE7cUdlJB_{C#(*YaBgc=o2*J z!!K$1JyAZBzji!5vj6!eFhcWF!~Tiavq;(L{M05I7t4+HW-mR7al+0jIoW<*!{(<} zpJ?T`nsS1?41TwnpX%P8tdacGjGM@sNPeoxdnz}{Pd$K89hn2Ta_tBWwsejtg&owqdO0VC52HAVANP1s3*7;luJ0BLKjKp$ev$WNV3 z5{BLikHqoF4&^ftGwk;=mnK_5hNQ+H%TE=sGekMkw{)Vh!U^QK-uy}KY+sJZZ8SLn z&p`<(JPL7x3Xz!GW`62#CwZKIHN@dujocmFd(?ZHg>W%GKlQp;fgsNbj90{d9PJYVkYcz){qA19ZL1OBR$#z93SSt>PZiTOLVjOCC@R0ffjs1w zk)L{l6ng6;GenLq-yfNuLdmkL>=hA1Md+_+ z=f>j~{)yBwb5>WvFshwfcte(*TUi#hbI-#uVaNC>PLsv|t2sG1P`ScWQHDwo7B?54 z%!;zj8*^4O+kp|?LEY~d-;q8LjC>T5)W)?6WHbEJO?-G?Eq^u2XZokr694X?|3?tx z{6B1QhX1cYQ?OJ1A2v&|FyMLzH8njiGw<(Vdp5#s`hPW!aq_pNs`uN+dn&tmDw+KK z9gL{P{{!u&622+;e+lm4|A9W|{|C9oW%++T{F0X66XgScoxSlnRSAwT-xsz4Pvu^~wOLh{TvaDTzh_??GeWG7nB$M11SIefOPxlbQV!h>cH#JPpVlAJ&ly*cg} z=eT15_mp7?=e|sRCaBmyDmSA9O6-xy%7Fb~wyCOh$TxtCIaHcJ=CT+i#N;>kPWoYr z)#GiT8rAktp0UXl$vvS zXFYu(+HpGCVe45j(7g&(-sTHzJil%KGYXDN{m%t3>S8B5%;p3;Y>b9+NpVE|V6py? zBU)9!c1aL>nlvmDaG!}CV8u{Mz@3jbNk574pe8WbG$~>#e^(WIFwj|f4PTISGEL=D z@FTY?YI!U;H4h%HB5o6h<`ud-u&q8EfHrSZAnP<8EhshX01|VjmM@1Of~l-8`|LsA zSN{Qj)D#w7{n7V0jPTj@N;jjqJLQ$GKbmhNcJ-sHo#9unbRf3D6=sWFeFg5wt*3qt z+UfZ{jvf(=IV_O6#*@nA_ho2iZrAiGKyP}Y3csHE47xv%eH=p878V4u8T=l`hZ|}6 z>rnnbX7TO`nYaQi|ik;Sxq~`Seo`p z_@!x|D!<#_|8{GxbWBscKmTSF?`4Rn9o}E3p+ggrwA24k#XcdBy2X=<@;_;A z6cbdH|4FmxD6p_Us_=L}UxN8^^8%!<{Er8+j{j%@_W$L>him!UQU1Te|5=QT9RE|W z+QfYc7@92yk-$(Id^p!XEP<^@`1dQiB1;0-yb+Z^7dS}8zgRux|3RwgvjU+lmpek4 z{ND&h<+3+P(-q2NzJ}Wf_@@sc{()Ou4f^wKzsKDdO3hF{+(64;gYrRU8P#N1jFyOuz5!Fn%-aH?(teJR%1GzSzb-nQOGf(uVrClLk&}_k zm!ClY1kxM9f0fgyKpJui^W~QSA?6w_UyAaWkH~pcSRB3Fu9elLP10-K=7t7C4&vLf zTc4_=j>Q0T(Xkfppg;~%tIVEMsN&i;mNkb84P;kZcahN@TFIm$(uli96YjujE>=Ei zzrU$`;6_8NlwBtc(lL($voVr~Cw!onv2}%~*$pKh(kN-ZQ ztKCW+;>x<~DbXH>c!|DrkpqibhP|-?aj*;z!DptQwJk`^XZ}FiF`iX%u~xb;^)EBG z845jum0P%>IMTZ-}C{2a_Qsg!$^rMPx>6uFL*xb|ccLPLm*p{xPFf)M;BR{6pEO?8LmK zW*S2g%*#+W4<0_lM*I$Bnn&mjQPcMTk-pg9LQMouhCg!nk$fkoqDV%>eUb~dkPyO@ zs%=2%ru|W4aqUTSn-eZMK=cP8)DMKV3!yHfzIG_`=&Ap}+$f@w<|5&`WF#lU*+;C< zNC{9JkcMcuvXG2KxUvMl3|HusnV($E>u!CL2U0)E+5_oDU_q62-wT|ws(t!S_6l~{ zgr5=V5=iqPCIQk{<~V4f1=45CW zxEYZ(%t|^Pi2qp0&vn83A3v{`YbZ^>%pV|eD*WCgF;yR_!4M1Di=m5w`K#OD!wBw6 z2_hT&l>{pmZdbSoG(euwMv7Ai&Q8t8?qlX8I3ShKdUK?n#3-&B6qlX;^lx0RMnWCz z)@I((hA^Nj{^4gRbxo)WJ!GKTPtu^-p;B9{5(Pr^++)D@3 zF9#(?&Ks)3arnYk+2CW%Nn(6!f=?5Bk}qMekhZdi^8#MNVBFtOEf>(HUotn*+ClKp zy9MS#(eg_3Lt|W&{Q%kqI-^Ut*+2&=Yzkm;s_X^=D&|h;f;mL#S9I4b{XY7hR}r3e z|BrKh1F_M-v)Y7Ga|WMZgM`c#6+l3sic+a!$yiloy5ag~}T zBrr?M*F*Wht7`XCVQ`-{U*R~ar`6?_Jd}BLGD!3pOcoWFpr_)F!7mkeJkP7nRyf|g z`X?e^28j+pqcZ}icNtIMM8#mKLjk5zI z_!F}$^uRo(HTY5WyKT&GzLs>DG;&BF?Km4JXWs@wOkXWO6XnTI_6ISgT!QQ6=M`eB zi$b^(yLS?qCUI~cSR=i?#_k}-15x6mvke2aJSAfzTXeMU=YBT)oz9PMEo&(_tMikjCB+bYV!Z^f0&YVsWQ56ZG;l{@p%Q{)M%a@wsjYS`y6O6f;~&d_A!`I+i}riHT`Gi7#0VFU6^ zoF-=EZ}<$O?4k{7Z#MAIZ`R!bR^s-t`~6imi*|j7TCn1rY6LpK8XT^LoF~m;``Dub(-cSP;kH0}jsAX(U-J@MgEs7`h@C8IY&UMAQvFSB!~vm={kpj} zs!~aBbpu#xdcvsX>Tt--r_of?5%pD+>nJuavk_x?r-im;xTMoW$My*|LB}E;4z3gW zpu>6i-g`u;WXW2A;yDktXAqA8YY{gOtb}n8L&Y`Bta{`R2BQ!b@OA;O@}R9di1Wbw zLu}a&D%SN{ct;g$1j>(E=~N2~+Tf7t&su4Ifr;YP8E63&ZV|j&rh?sx5<941siyg6 zXbc3PO{!drNIxjoYkI26^_-kWmFt4^?NPk{XZlrG7nN^AzlH&g)$3P1Hg3c`irTFN zfi6%TL^I4PdW8%P>zn5_?2WS=dR4=cQ=-mgGNRM;MO3#hRsn2_}iJTja4-42hqc9&!ro!bwN|j zYP`Mf#_H4<>pyvC0r`TO4nj+`rWZm@)G(J1s?J|=qK$yij___vxh~j?tP$nfrq}8q zLC&1~xRTzc*Xqf_5vBN;K@`r%i;}D|Pcb~RUI;)pm*ch_>-0WI)Csv#d4kQO_y(E1 zGF{Cy3CuMg8)2KF_o4($zxBJiZpz|JRuj)aQ9DZgnY$Ehve5QY?$L2&=@MWNX zH7z*MZ$FKDDR8N>E=@R8De}iX8Abk7cz7EjI^VJz5NSWEbvL~~kn^=7XqXry7Y!^h zeWll3ZhnMbYi;d(ZHnPO6x0&h)1%;xa(M~4IRcfMrl_xCe}=VmWLCG$LlS~e+0Nb* zx9i!c7h0dL&^o_7!@VIMN#Eltq>DL%Wamq`T5b-+YXoD4d_5+vm9UTC);#n_N6LN^ zS_59R;RDo!IlA;7sF|A$$)-cdG-6~$_Z{sFM<(`w7P}GRP`^WD9$}f3dx&;8qpE`> zMzLo~GqJU~zx)R=v&)+~b#fe3OW(Niboa(8)6GvrW%@B}KDzptI{6O!cZ52f`tvHr zE`iWpo{)-5=Juk$Csrxj-cV&$09ocnRNxl`UqKH8l)MQ+Y5TedvXF=jNTLO$W&$5R zLCcRr`KUPJlm}KyAPpT7)BLo%UE;O^4AGQOvJ1O$AJ~DeIKIseX~gg_XC>xSs2S3k z0^2DPU>f4)hTE2o%!wE!-MyU{e@sKMHLp|Hg(8Y9fV*dsib&=JwSoV)V_S!d@lp}( z$xs-)oE!+P?d%9;il|&vJUcr?5giY8G)rLG+}M^WrJV!WCeQ?J-?TtBLqwYZ4s)ND zPeW8eMC1QA^4U!~I#6)R=Tn>@dAAzE&ZR^hC?8BhHeW)^*wLJgg6wFP@PXo|G*h&V zA>u?G&w^(_I1$yi`+u?v0Xf|Wbf}za1=^6))7p$ulO%;S9V5zjLV4IT(Vd+8aZ-5-$M$Fc zLH6U!!+DNQCnd0U7jm|CW zI$Z1r)2C(5I@(A2&aV>f4cie2Zo3t4Tun8R~{Ez_a{+NJ)myiG(0oUht z+#CWM;^EL6!)BLPZt5UQwXY2BOIhprZq zy^4si`LDQdAM#(prk=rP*U*`u_(>=p6qb0{VDp36pLOKA-l^rO4?3sjnj57leYpaC zAl%c)dA*})!8Ybl|1zUPt+Eg4%@d*RoU+dl-@m+ z4+Jn><_$~nQ%Gs@mBier|EydudQsmZQ%Go*7*jspwxU0SeTo% z{9=@^%HO%w@K-%DJv27rhKH_2N6qaDYMI*X7CQc}kdtcoJAtyz&EG*{e1yLrp5nP; z_hxVZXq3OR5JxM2pQ8~$&BEVts&a<~O4kHR8T`FWtcEk}HuWoX%&S1X4v&tTXN z{yxsOTmBXVvKjmx#)lhe`RhX??aa^N*wQs*4=c+j@jTaJ^uKQ1;$CUxNsd$6Cz$n6l;tIGT3GD&s@k$7Ey_ey z<>LaCn?02%zmw*Aa5Wddlco&ug1Hpb{bIlK=pVuF=OJa~_o;!b<2PDRYVP90&9(fk zC|`x&tEDfX{NCU?0~YhLFiqzt3e)j<0_FRQw18wSLz+==kvaJ;(S9#lbTFOFCeF!h z;@skW8*|Wq`;E*OlXG#qtWff1Y^e?w>k5EQaF1AzV@_5}C)__s6@6A9v!$&glga&! zV3WkCx{{oJf^UcU&rlWfHQYo+JB=Kc(`SJDM+3#$x{^RPgZp*)@Y7m;E2I$RKE@}b zc>j&TAG^omG7&{G--0jp&~bucIf6-Z5y&Hw{9qDhcKpQV7NBrjt~bMPf)C->$B@W| z!e}8e1A^$&VdN3BdQ3alQRk zMEB&0lvKnU|1LY4qki$mW019wVW)ff++WpniU2QCJMnLjR$?awVxvn?tsT=#nq!0& zRUJmsjQM@bR&%IUQgMU|abH|ueOEKCvhCb*f^R2;C}Vn}LT8Y`8i|Gx|Dk*kV(3O^ z&(GOJiBW=f!Xo%8OAs+nr+RUYLq!nh&;M`}b+GUcYx=~NSv2zL0?L&KZRJ2Gd&PNz zi$f`WN*15zt^H3>;+n(Zyca$pheHmL@DES)mM1{QPT3j2%OrrL3Yu!?>ZJAK|JFQi$= zIB*bH+fzW@jV;%g z{(dWZV{m&UF_ck*BZZ}c@kU%b zF&quX_fOdGbd3L9mEyrbY-JlqEYm+O1#@yUz9rt<%tMST9|z-`zruhQJgg6-E6*nd zvKjty9Yk*K)bh-?1=A|>3;r>(U(P~d{jd5O+$Tpx@H?U`UM(QY_t6vZf0g-KPI}53 zZ8fM?2<9{KQ#+I-wo}xu`ZqZtG59+LWL9KCYV&)P^D-gz1-LQ^u z%P}4Th}-$f2(&ZsK2?}byqiXV3tuRvNPimEP&~p~GmLnKQHdo|GFzpobmMkn52%_y zdVKdk+Cfq+%I@Q$`0k33Kj8ahwR&+f`yf^RSsOjEEk`+GQD(=@MsOt;zOnujZ)?7W zyYTp4PTv7$9}V2P_%D!k%ti}JOQGgAMUK>C!%~{vBmRh znP{naM*jvAErDWMmKA<1>UYepG^YV3WO5vn1xR@@Y@WlpnamK>&d7R*hknXy!cM6C zs1g_9J10R;vitI!qD-L{7VCzNnS)Q1!_j7+?xGnE!f~l)%B!Q7VzHCfM^Gx*tTzJE7v}7DLq{tB)^*R+1IrAz9>%qyZB|t<{!6LmO}*JnC|LPV+8#n zykkM%ND5`lJ0|jz>#YRn9w>n&_+`AgLVj)hiFlLYryKUO8t@QAr+U2U2xlJD*7Uh6 zYHPlLav^$ICR!Kfe9uw^?-mH%=Lsp}J2O5Uj=Ielu|`#cN4dEYC@_CPHEz5?}gMm zpneQqj67$gOP)V0zuVa3?@$O!-)232_xAmMoRG!qz;|uj zt%0m#FX6U&lEb^xp@X^ofMdn`hUTt`9`9&DscFxLm(%B<{Lv^M zgyG|v)j9jCw8}cUAa17>*sdy44ANn3Y>L$V#%TMGg`lc#;EM4e>Af}Plh`{C*2^Qu!nU*$icw$cInV^7o+pe^EXq za4o`j%I7}VQ=LWN#6-1;QgE)x@1 zmH%<`ssRgog1pbipCO?+#{VF9<$pmS>-dirl$v3DxRI8>4&`^4|JG?o$3v|B^5alX zF#L3`WiZTd#I0aF3gVy-cAg~axrN`o;Ijn*Xm?$GE+`Pcq^s1c^n)BDRX-|jk4oe|1k%)xVl}EFk3Xwn*e#Gb%ah5JM<+31RrMoj9{CqodK}1m z{WzRKJLGXYP@wY23uH6o(U%YZEb%_#UzE=wj}7$i9H$b%+Py1dGAzE-3`3Zxq3iWL z1m1CLt$FHm%h554arP4st&&2tFLu1yYYhuZR(#-rWLRX_sQfnJpbi`LKr3a~=xK90 ztRO`Q703Rcs@-pY=_&0ND2*E406 zfPX1Tv>i*C!H5jU6&evTnnt7=Mz=AOR}&1hWb7Si{pm`z za>D92RdIU-S`(T$T2Z{l&3$5Gs&)?IrUDW-qfkdhdyRK)!U^%Z81JM_EDB^DUWLum z`0yE8{&6DWC>NSZdqe(Vhx!MIM7=%#kY?H0^QYi3ovDBc7gHfO!{$?F2Ai=ncDzx! z*UO;?WLb?q!&DT~7rqGI6#0iCr>3Rgrun_b|sh5=nrKvTj3kr z=!yA(jsikB6Bt&bv2uUcI3V11g2jj*&WGn}`2(mlUI_mI|9il~dH&}^7|w26Ij9Jn zKNY)u6oZYOk-!6A_?P^tv3p6i+xy%SmB32Gy^z4iP)asVB=Cx=dG|o&eosXupyO9} zgH^fR=o)~)+zRSW0#@Iy!4-<36tAXY=ox533}O6g5-FUlzbTziwTpAxn7@ei7Nfkpk+*kt=GPH_751o6o&!pltoTowFR&*@ zgZ!|nXF;GcBv2V$LW3+>nwYs3h9s9kj+s)R+guFl73Zpz=)p##@y`%=))=1@$U10ikDM!1nq^!j>5BQTar#j_m<4{GTj4+)I$A^p! z`HF!@9yUsmuU|g z@!^-W{GKSEX%DLKdo~ymrq^Zh8{F?HDT2QFjm;@AV2Z2tCJ%yu6~J|JpNRn!l)KhK zq&^cHVcOy?$$#ND_)qbY|Li2?%$VKzseeV&(7mGa|K*#a{C^+b5d2@DrmBc=e?~R1 zL!feprxN9Vk-1F_Mpgb7nU4Vha~-IA{$EPR3;w?*SvCR3S%IvBJ6cd`9^%8tX!*NP zemnf%Hr^L8j-nqV-Y3G}ZV&GZsc3%$?~PV?eh+Ps#qW`SiSqj*I6m;Z7vpnhecP#C zf4JOJ`IDy-QRHFPZGULP~<-+^dfPlFj)II-ipoazC-+{=L|9t}447@+hhdXNdM^Sz|{LjLB z)_l@WX)YYzr@?lE=lc~h2*|SWFz%mqX;UIbhToDfr_t;E5#S&6iGR1-^RgJf=in&g z`-8OX-`z(opadSLs*W%F*wbm^=|owdG!4WWRb_qBTm`)`wLstR-~BV)30U79kXF{O zuJ4+c3H{c5xJ=78L;1k^`rF>Wd!ZP8$){*OSp2M?zm#G3yTDM29x(OT7y^o<8Dak+ z0x|tAJuLnCm< zxgyIVa}|`w)Iuc{Jv6fXneK)7C$4Q9SJ(Az%;0ZpK3t~do1y$4;%`IFVegxt$>a-Q zBt4T`gX6%lgq4=fHi;PJvH^6e;qn_x{2Lq;-oOU#R}G4CxhCF0xNLq=`zIX!TvdB; zAoS{9o)sA!ei5w5&Eeeve=`}8a>aSd;RmGeQEt9t;AA;`TOgak;dcNObGep(1LdO} zMv^<%`r=Fn%k{#wUT@|AvDNcC^{Kf5Nq zU;1!>{AvhZjr^89T>HF{M+>n36CYkf$Aj`Gp?nZQ=2|57Y{*LFM?xMYMR;0&y2|tg zvmJjk@+fTrddW0L^VM?HsuHz|smmG61KvvBt;ulVjj5rnSFcplym*^()0(nJU;)#ZZ&l`A}zD4vt%Vz4@Q zwqz!h!E^_8mn|_J>3AW8MjPL9=_&jQP)i_rC}IZvRI1TV%BXws}$Ac>`|GXOSEJpP#IOiQ#*UZ|PENGFzf=5AzEYN&%F|Ds<#2~RSm8OoT)GbhoFd_l* z{e&)P0WVNH!Xb7%lRnd>RFk5k_3jXKQHT-YD#n$W{2k^x1MVnnkyPhc2{Y z!uY3aWiq4B)jY@27s10=_*%ikC>lx2hO5LnRGM8pjWYs`2pc+x5!*|q@U6G>DAPv) zBXhja^>{dpE*)&RjiQ!j~E?Yl8l=c;xDpj_6D{0@q_O^;>FTZ^U4AG2cftz&*d%pWmPvu37@DhEVmQ~?iy zb?MlOy3BlmaN2v55>cE=)7umJ$F81`71U|gWjrXVohd5%B&_jV>(-^il9rkm`0SZld@_pL`A{yurt8Th@4Hwsy-T;TN$|eZx|F1YUlDY> zy?1{RyVDmlDENKx!s7mz$j)6ZWz`of7}ht>hsou_FD%0@H(eL7-O^`zb{Fy^64}3h zrE*sKO!pwT#ina`W;A0*a3n^9I6(EVMj+HB5F%rJzW5X|6;-`-%-pvSyg3RL_+IV7 z?7X0^n~87RzC8n3=OEC6QqzkMe@~~y_d$7|FLwH{f%;(m#K?66EZV6E;2_}-u-D2L z#Md${)mHQ-|BAr60hv1i><{i)Ip0!)+zGy8`4!47u@#q~Jz^_*Lxse}lUXN=YFC=2 z^Bk4^0~PX0H%G`b`-nZM0>V5q0QfaEg}OJKKZ<`UH-$u(Sd3L$cXf?(Aj~7Lj^NYt zwfKQ39-L|R_``r=u1ce20L+%B>LMV=;`Gib^lora*VLykfK|=SsiRG0scTG+&=}b> z;a`_WIdvjhlb$8P4@?pJWH7ks3*y5QD2sjsi^X`C2QAz|fjfx1gM>Rs z^1w8w{H*@1H!H{`oWwDZZ~9ORu*6Tl6|hb$*+6{B5ORto+I=xNX5-f4lkKJ<@3O$i z(#E(o-~+V5xSHMIt*|bIsf`ua zsHDpoTcat&7k?MiGJ;V>#MVn>6fT|_+Ii2fuALgzCZx2vllGFG@T|EsMdk|t(+ov5 z6<^9ujzYWCEa2r!wDwm4APsQO3>CnBO@_XqKejCOPMbyNQq9CNtu>phfq8RR%SpRw z%to1xB%YYwHSbm4ZS#?sFh&#NZSznH8j6c@+F0E(Oc?fs@!d1^r#(u*0E_=EUa3So z7hf9H&PU)w;ZM2!h8fFH&f;!?86<2){O1XE3xuq8o+gFO>0<1vXlD%mbi!P)>I78b za}6!&vmuxf46&qr4FXwb%3?HS{rK!=x*`-m2gTK&+I88qTLaNKaPwZ4u`PM2cM_>D zxpVrx)&N)GGI$FkjEc$Tf}CtF5H>e-Y;GHke(at;$YnBy@-Y(Kw3f2b&v43io6|m2 zO3%INxUl!;OetM*NmNQ3;bb7CNtDuBM8|yDIC1`tM5>kMQcq~=FP>0DN^vs@?97hS zNd;2+H$-g4qY6LgV} z!6~thS=My&8*G5=ReiW5yPTo=MZ%D(@sE7wA|+vvY?t&Q%7rMX6Uo^qA#gYq^! z|$XZ|JGHa702KoVL%0|;lQc0Lrq6lvkvDD3*@+l$BXy_2dD`Ohy~ zR^^T}+n76Fb*!rR(Z8U0vLi22Bnhh)!+S|YzJOQ;h+ye4{x8~v?;m?AcLplNkdMf3 z7aNhuC{i)YfIoA+Q1`65l&%u2dQFTISydRwIs=IYl$wY6^s!pJ7{vpIe1G1!{`@BA z)W*;(rnX2Y1_Pa-#O^ygGgvICW)m@GEt*g1(IYrOkG`RK=A|uLC7SSFE*89;?WbGs zl^IO5u>Ongt1tJe7??kllQM>bJi?2mJ(mdk{RsP@!MdVUo){6R9kxKy_S9@UJEQdd==%mK<9XdzI*Rt4l$siRc7hh)2>E38k0fBj9(~BSwfV>_mF!OS>ipF#S6!D-onUm240g0a zeS*ufyxkQT1QI|gh9f07w*N&W&%vB{(?B%0^&ewpZtgt8Iz6 z226kbr+cH%H+UuqqgUW7LI4(Co2lKoa3sLp^TgdO_Ai}Nc|t~Hhbk(%D()oT3XVTZKncg&2eJ-#G2(6vpFNa*4#jUl@xb%(Rq~q- ze>KI-RFA*QX1Z$US7x&1m)=pfJcq1<;BPXcL5sgTiEV7?*v!+oJkYQl5&ZQ7Sh531 z;BPiyYt9wAKEv6W$ql^TL_$lraa14~!QU`G+eiyvhr)puJBPoOknaB!f16=m?Hl!7 zpULQj=SLac5sup7?*s;e4uAU-AmH|kp3u+Vc|sZZs}y^X6MvlnUQ-S?=kfQE#Bb&1 z2m(tuJ|~dPz+W9c`=l1%0^SFn1Aj~;+ePA%uf+O%EG~G9X$gLDC)F??=^^BZko{ny zsIJ+Qu`q-AGGcool?t2b`K%GU`AAawPw^mrfHn{AOK`mKBAY5PVxF`$5|h8&OPMJ# z`M=>ffZD0}n4F|J@x4meLVQuKFtr8=9aOxOt_3t(DV8K21k3a2YNn{Nof;|}xsE)S z3d!@*!DmXqU~Uns;Bo;Wj?6P3B1$m>r6Cnfsq~h5shQ0i=W6}WF+Q{J)IL;!m~xWh zd7JzlU|y$*7!#4rjk zpMm4ahBCd}EfbX>Bp_-loK#R&g? z64OwXf8BszQvq1O)9Y8Y$iEYL`6EFK^q)Yq#9t4@$-l(NQ-18xyhe?|bw)1Sx}pBt zyO$R4J~p{E)u=QWfW+vJ1~*6)@5*SiI9_M#YTegjW2qePV#T{E`oMUvm zV?pFfamTiivn$5)emrue{;ZCP$SLRzh@2vSu$}fO6hx_(M#jVJrhT&28%z@-=l8e$ zNyg6LINDbGDF$l@w_}}C__pNKXNnPbo$2;+ zwuAdKK)>yWU8m50a0j+tgw@b#KH>Q3T9?t(NpS7Y5vXeB^-VmN9%tQebw4qC^f_{xl+zuY&tYZF zTFh*s$xHjLXFZ3~|H_i~xMWn?OYmHJoHkCRU2iI`JWHm}Buz-Wu0(z1<|&49s4Yop z->DF27OLv+tBOiHY5GEVxul&mF99m%Y*bTGN2Lwg*#Cr=Pte-OLFy{)W}yN~+vN{B zoG0HoLfZV3b0uG1d_yu(+_87{iNZw74T-lm?TTjBA5{!rDM@IB=_9#@As~7-`gWy>mft7d{$BgCzO=_JAM^>omwb`nEon`^_yK~odla^Kj{fNqq3G^jSK>;(9dxB@zF%;ZA?yT z^Ie;j75tco8ThnqF=Ma`_)@I^5uI*!_g92hWH*xQx+%P5ZOipQew&r)2eGfb__I<| zi7SPvZ__VQ(C3JsL;pu*-I10K2#XP6%~jpsK=oSNuD&)!m;_)kvl(7H*P4J>o0X;; z0BR}_a8~S%mr?4a=0slpNa9fVKh&>PyuQZ05E{q;EG}|eEq(&ImB`cyc4~`v?_7L! zOa=@(7hjr>d`NAq+V4>R2z)t`0|s4Ft2L1#D?vFVgT;xsKhK(-$N+#FGtG{EqaE27 zr+`5cs5d}6Aqz}pX76h3ey5!+eCH)}WPd^S!zEC@3#Mp+p}w;tF~Ad={-q~0x+Id= zngRx8cid=d(2?B&`I`q(g-@WQm~8@x#dm9Er>3KZ{@hDRf^w}0e-U#-puoN z5nUVjdoY11{Czo)&EW4=2-ZBR#eaZUc7VTi9DnQnulf7;$*%nyzwm4k{I2a97k%z5zRu|WH6g6h8Qs(2lhQAkr`gv7m&7&QrRsP{pmL?BqOM6-!}bGSa=Iq- zPe9ltLA|0I9TRM0C%QjyX#(J-?x=Mj3wI=YmZ1fuW+)$SpyjVY`OqC3FY#{!$T3`9$%N-*swU z!?qFuHK(nNJ+CqPe+7vYXR>fQ%hn~7Lo6{ejj1H*A;ayuF~7>xsiJ_Az`LGWH5d+#NG?@iu&lHQl`%0N21 zTOPNsF!uN_KFidK9%n?gq7Tao_W)w%2sZ3JolOGDkUuXs z(7LZ--D0(V`_#W5wCbtQA6h{?K#3%NL4Hp5&%}=J1M^qA$q>ZF2iQV06Bj+Gr@3&k zK;i?73w%TyQyDBSYPR{v0Y#xzFN*{KY@szWLPAWmkg%8%3|U}Fj5828Dn9V$4$YBL z{7z;jwPk(_aI$Bo6HbFNhMaAen7TC&d#eeiURT^ynmZr?Dj4qVZtp$s?_JG% z7~YRy>hQ-L&r>Tr&&ARn-zAEv>)^dAin1`ZCxdUATvG!_RWNljJsvQ1pUAh;w4!Bm zn5w5hn=;WgVBK=xfDBB1J&7f9VCrP(gIR`}E6Spn%ID>`*fiALh;<95o(bdyQ#)s0 z_E=dBj&4IhnA5(DJuRnwX%V$AjFn@UzcF%M1wh5*;C`&X z-f4UDCAA4nuHuJ!X^(||XxsMYA6X>4y=f33u}yo^LU08ix5p{XYXo?mlmV}&=P!q~m^<$|#>II4oNQu;1Pxrtz{(!5N5IgC9hhN03N<(n{gsc%9C#s-RI z%89YJ0Z`Ky)l|F{#n`jF+(m1DOtR{X5swZP5MwUi!}w-H*8CaGk5@O=d91nVA;-B- zKl05M&K((#a_%iK1}kDq(l42rh$JMwTwoqijvI{ zyId*zNsT=z4X}F}HD8qK)Ibz3JROdnMDOO)hil>6Q8*}@AMdQfkF{d_tK&x-bLE4s z(nTL;^5Y`;?wYe!FTnT6*zXhs=E{v?u?CgqP*3BkK*MrF#(pmob5w;JbATXozR>k! zzX=K6%grQ$KzMOXAPHW~P+kn@vrV+{jVLVhFF2(Wpg?>RT`#fLbVLD3y_ts@0)n0Y zpJ&}37n|dwJc7(fwpYml{xlBbHD+kx|atrz&1$WUfA9 zC}h~4l_5@9$uSmJsM?ZY+dww~j3pF4tJ$xEO2Z~DFizk5gxV%8_O(%vI>Z?>4~y1k zjV_T69LGAqz}$(7Dh>|zrqq1G%Y(H3g%p{6p$kG4%&^g4R42cC!7eMm6%4oj|Bzof zVPEXz_aaT(4#;nh`~2%({ekDG9T|K8AAOT(K;+jQ;ZLUg-p3U$zXC=_klz@F%J_{T z?(p)fsY0D_udi+O`@Xge`K=ID%`Lyy4?s|5NS9>FZ$Dn1!7K{uts|kj0P>Mgg_U22 zp6Ez+P5nxY`iB|p$zfoy%0NjWmORA#XbBIJC@SBw%|jm&se63Vh*!8aLOyz4 z#4}ut@t4s7zmg^MfblB{Z~QjoO`TnY0h|5yg=xp{-uy*p)Vi|u<}At-CrM=6O+7K` zJdpvI#bUGk@JtC$N6h1kFij2vB99@PU22ZwJ<>EMQOWgVsTeQ$yYIeV`(9RV!JH(* z*WAK6c#hMvp`Wx*pf!kk*-C|7I^Gki;|WF63xiAz2q{;3VUXzs2%7KV!u<5?uf?C0 zoA$tzrWf9S*R>BXWYY_1L8;lF4_p6-_d)sKazu495-Id^FsCIlTtH za)qfTf{loQcC&jtGl#$9n5l`R zPWap%|2Vwj8Vzo~4k^GyT--EotCdi2TJr-SWjGJQ#1z=H#b)*ge zJP_GiBoxOJyb`BuEeLRC0e}N5`wWF==|Ld>n16m>%aZ?9>2$qGCK)ZMFca=0SL(iVYIQ zyGlyUm%KBKJ_;@P7?9F8x;)ebI}`ahnj2qRLVQW55hFpEnIRvd9-I%P4U=tZ6C9<* zaiVanZu(X{CqF3{3-cSFohmy!1!#U_Cn;It46#&W?1x8Oq(01{t`e~Ga+^@uxt z^V{;y!}bR50N=RW9}^vLd6K(~;~A0T*^%QCJvMKR*0}?1$qG0lS?lL&Y7M6(w6XV5 z3s}Si3iKr^^YIyQd>n;J0(4a1GGLx49-QA8vVhquqB+EGf0{!4Ch@xzzi`E$ZCz5B z`kex(iQb5GmMcvQ$TI&fIkR`eKLEAZMI&3{xc0# zAoYSgny)O~DZA2P3wb?w0999Ly(E~+Ewmw;(zH|ULd0s(2oT7n)LcfSPl`2K}- zwd_j_%curepXdniiMhp~agjduPdp~=*Z%AlajD#u*$ynjy%whC7N!;fzIlWbekH&m zG8iQd&`1Yk%(x|!0&K<|T?;>V(vBW;(Bo)x;~2grd)A+nGE5Ak$35^lJ#yP-s9uLb zOwTei)kwz3(}zTxMCL|7GR`F8T1u6d8m&E(q!_-tR`+eq{d(r=D8 zx2TC1#$8Nw0E|D~;fS;kL^2q69MF-IamB!~ISj-rE>_0vFa3~mBmGHXT+KizIDp7H@FT#nY@3!R9GYQ1U=~hAGXO+M`v}>rd zh)QyUs`j3tg6^RL3cGZ$t{*-_Y?E`aY=ZlG*JD7NX^Uzq@+fUf8~fk#^1)jB&8*!9 z3wwqNU~ag6nD%4^{po5%2QuzMm%0JbnT-1&ZZnWfSNI5Ivx^E-i!m4o?}kmuAdxtw z#sd*0E_2&5oQgDgP{abbjxe0319(a&7RNPipS*Py=0 zqGWHed_f?aiYEK)eKJH3+ULJ)BQmfhe24U^pruR!$eXm1fo79Ho3u zNyD*DuDsbF!tYvK&n-5$pL(;j%(VcUse^hd_EYPyg=z!FoC~zrM*iy!*GNDxJ=M`A zppW9?i}VTNYkbc9>WQ}#aeb>@n%D@hVNLkWs0+3GnMwaTQZ-6OKl-62$tU(=M8eT~ z?0+HV3Hx7+a5VN0G^jPYdr|{2p;hvU14rQvb{D!npSX#>3oJ-NYPMNx=J;ks@`(j} z_FXM}Cn?~VpuBNa)_8B;!g&#YQ^|Z3X;>B? z5SKH=n|_?bFvs=;B^PduDrS_~&&ds3s1AM-jm8x7nV(V#GZLDZ?%qW8Ab!#RGLMnCe)-kh|{7=^NUUY=ZP$RCzJlI39{ zU>ZciW%0&!q(oEu6SF_E<4jq+#U64mYBOLgcFD$rLxNffM8k>9Xt<3zav0^uUc^1G zfXxw0U)lmKg6i8|#1J?<$ZXDda}9!bHT6ld5Z=(-;)om-h)_^!>aBgLV(LdArZ7!~ zu$@!^2evmIE2ML|^jF!gpJ(}YMNEAcKD%1HPPu7|!qM?WsBnJ{B1Px-j#xJaJi`=U zBqOkXI&)r3JK7I4xXL*7bnPr~oSNZ@&Wquy1NKR`iVwrY6-3#bx^nW7|94RH8bc1Y zg9?L0u`G93Ab%ud#N^`=z%`9~m`kGZ%5)f43?Il?5~AHobL(Fn ztIPlC`645qTMmiJYd)fDj2Yi3pr4<9lfl7@jJUzuJp|2_<_TINr+K5ua8%ILS8!>l zz{*l)9mLFqV12Hh4F->10GiC1sHS4GjtIwC@x#0<@jhxVri2A?2ZjnD*~oeih|cz$ zdy6??UwUg2foJsr%M~{}q6=qcvZeiDQMQnu60rOq1#1JAyObS=cp3u&4g2OY0D7s| zq$((W9pE>;gsu-*&cG=-{kSA*h~hSZWCX>x^VtKm@C_(z$Ik@682?1Pk5FIdcOeO6 zzmHspDVo$`iT4rgGf#^!Jq}ofP@h*cZqOi~R|dE;gLq(Gl#b1u{5s5{fg$$=R2MGriS@eIF`(k&coYd}{) zX8+{5-ehAbrQ5+UU&gsX{7PHx0+Hcowj;rWnDi9Rp&=^P7+kuWt0j(M_f1Ye)YG^< zohzpkZQlUlkwJazJ8Y2OY5AMvcV>PKvSY+NvKhLF_ktaxIzm&5%o~HPWcS0{O9$dY zD$UxzxY)ewOD;BNgg zNqzREFAi+x7YCC1ZSzr=Q9p4IOd=HIh^Y;t zD@oA{gl|QtGlh6B(tAEt{8cnF{GM2?XlD2|x#Ba!&&lb;oyrUg51e|~I?ROYEpj%( zy95oHe^QhKQI%`OE`wA?XR!kTks( zkl9HPT4`1?284zz0iUTMuZsp%nsa@Pk4$$pMm2=x&A6M`EKEbjx`I_~RU}8eW(f4) zR@7GUg=&bcqSP$poy)Wxufq)4H#Pcbl;2HKCIt}+LR+4rMh&q?q$ zQIN@$3KfWEaT0!E2B+djLXrXrNlHxcuJZRVU^8C2*&&udgn7T z1K9&5Ge<9V{PaK;;*r_+XhEsj44{|?wETCFYBa*f8-?!!s{D{o^&&*qZmAVt=hULA z5_AdcAVz*CV+ME%ZH}=ciC^{;X$9D^q!DPSpnz&;iLM_KRSERP3#>=mAQ8)si>?Tg z528C}5QzzE?t9L2+)iuhh40ltIx4!Mcuquze2Te-;h&RUH?kLO{^mMQt5u+tA-zNC zIZ0h~%~BRR=H?q9fQG1};%M!o?M{e-Y(Ft)(!>qV`X**buRR}LPA`e_N27dHdYx$! zuvfD52L!(ujkJsYe18Dl1Wt1Q#xI^|4K>c$8F)gwIhmb-w-!R0rZz>vZh>q)FyGAQ zc`D7Z8#}B0Zb^_m`x){rB=CN8N+}7ve*jSECR}N|382e)iu2R6s26`D*ud1! zL>QIk26h!uU5sjb9wmc{zPr{{bZn@Ia-Tm~y9=!#plZ4c!+}841l3f06YVZs#mlv{ z_P(rL-emn$|0e7%=)MFrt+1~Uvd$Moyv+SdC>4$xnQO=;yC1~xYk0UKbR^g}t8PDy z2J&Wty}?p*#IpUIp&l22`{-0e_9i(W(OGmqh!YuqfR*)4W z$D#rld;5)xu7{$V?A75w>NA=GQ{>hXmC9P8dl4r}qf>~?5s_Eg&avb|%)e=?Q>BUG z+Ufeu;ZK_1jUQ{*s3p1r+OLPsgR}_K#K;odv*Sv-;yG2bD~1vpmvw5&GG(1&_DB4l zUuYc`UKjCp0@=2hX(Rf|wI=AB7qJS=s$N%|;S(M4z>jHC|*o`clkq$cd9|5H1TJ%&ZN>y7yz0|zU%e|QIMg7wyAtzWn zEmQ%lO~T$IL-%gJ?Y&22{>b+B`}iN!}L}%kgAs68@QX%KAO%Ax$-SC zqL84*B%vt11>`X6b`briD^ZD9<=#&cNx{GLmoI(-;o@ITMdt#O4^D(VRFl_KtWu?! z=_zcU>?sh7^9Q%1#ru)A8i}pwieFql8yGeQo~Ys?jTKhXEdhGR5`xmC`GFuD#Y4Tjln^T z2j=E0ppmdq=yX@)=Xnb{tDP|MRxN6~vca{@%u7IfWN8r*j=i@# zHHM8J{OpF2Ep{356xsPTY)|@8t@8m~2o=JKyJzqq+?sOo%QmG&Te zOKH>nDz?U%OIxwM^M8Q1iKCi|L)5w_ZBxtypv7)rjtbqyKOtPK=!KH{Zo7sGVLb2E zww0Q0BwHb|Wx45q){pOSeurLI>KR}R41c0K)@Nb3bMYKWa%~IC&tHa@N4X6I4m>qb zQ>9nxpj$Y$Kg7Nb{Kku@7$^jF5C!Qn3T%dYU>;kC`VYKqRiqFzOb|&fWr)L0_*%12 zB8yDi5Bo3Se$K_HMsa^=4&1M!#d{5sLSB}y^kM}MwEFH8;%Xva{F03H^bQyK|b ze%PjSep3Q71{8~hZQ%~YUYarhlPH%tr+5UGG-8euuCUl;^C%0CohWloWTp$D9G6%Y zPt{@J!qoSPv7PE9Qcd$=7zNf%f$2nQl+7lK^{qA$&tl9rGYiT2nf&81^YZ+|VTl}I z1EEp*@5!NDWgmxsxH``s$#c|Q{*i{UhjP3+a=avRydrYENsrC_G`q-E@Frcuo^%cM zM}5Ila8BBb+mb9FSqd{TN^Axn-=TWjm8*BcgRTK^O9%F0wKFB3N;DOj*ClU&58Y7Z z#9bjEi=z>$Z)sTd41FtNKn7Wqn^b zv5pt3A1X4JQ5VsoxOtT(1V=IRf*2F%=lOi>OPnckJ3i`=tCPNK<8!S+KM!YTeq_d+ z@>+QD^aIp}J^8{Qcc}zT-#8s!7h;ZIMV5W)D$aYbB zb1R^29)FHc9zD?vpbpH@T`e8=GZ#5r6+zW2wO9jICG5W(=XB-l9w^-f)dp3GsJ z#!l;0X}Kk4?&=3R0_@eq;ADF_y$-+THg+aJeVI!4&+I~a(JwIFq??cYAD*{b}!`*~*5|CvEKf{57{unjIG{}yLzB+h!k*&#K1!43a~_T2RK*5CeTxb1u{>exnqb1+2g`kMi-_1H^=g;a>rUxoF%-}9{64^}le zpB1{;xeLw|q*Ly7$Wf3UET4vT{l;RrTWcU45-K1D#?A*LOx4d=DRBBhgKX@49-TA* zKz#AH3&SS|-xg}UN_zQ5)b@=~_)6?tWPXDY$_?LhfqkgT@X{F&>z)G(Ui39G;Vql>VPLhHYKpxD+m8 zVMNX|a7HJbB=l^81Q9cn#u_v;Rz_W7IeE}q=pZcKfCCglc`Lb#>%;MdWx%-lLpn-kqeGl7R(=up z$t$*t(T2Q?#>%VpS+-%j^+sc5*8rI4@4HhLGxHN73-streT^V=MeGs@mzV_>lm51z zYTD%`o{pGDvHSQ!<6j!djy?<|j+5q4sv?dG%&Syo9Oao8M5(dp(Z;_xQ}lJb$DvPk z?BDWvQ>SBx!^&jGze!xO@h?pfzJ$J+K4fG$u42=RWJwnJq9ih4|HW9RmBpR`@SCt7tCi><6EPr%XZxTUeo&M;4m?3K)fEtZGyOkCa zC!F@|-O$f8xt(*)?~krz)zp5u8DeL7ez^zv==aMdQ0s!T*^V`Sx8TXl=h?R>QR}s7 ze&hE9S`RZcMQR$|AL!Kn=vM4G$nP3MB~2-sF)X3Yueno#!do4vs)z+;Xw&$k7oo+} zv`KzYvtiTM@<;Cj!Rr~nmXRi6mFqWtM>>Gk_!ZiI9WSd3xKuy~Uxdx`+x*e=L{oFu zN6Sa_L93{@on#8a7aO1sqTQ`0`jwva1!!ONcwv!kXjXgz3`s(tG$bKxVROU!DXLa& z5uYLpem~J)5FhJoe7KN?g11eDp8on4jcp`J>-rmDF@UTi*8Q z{xLrK>AsdfdXX@xwW+_slcD{-w^@QTlKSOexv6I*^+{0SiT>#OFuSy=_XVQk3I6DP zVAX4zx&akG=*rU9H!yWQuK3Wf%y8F<7bn_IBueaO03$V5>_RWZznkcfR?IHp>H^T$ ziUTbVG}w8#1E3}LZ}Uf!$^%xmX4{02vO297pn%GMlP5Zpm0?d;pbc?qIgge#BRo&E z1h40*3_=fEpoNl1a!ZfUrKz3I);SHW5(dY@T&-Yr2D9ItFIsGiJ#-5vnZsm${;7?m z1@vJJ9x%Js#9q%wpzpusj=nROVEyjs4m2^KWSl!Xka#{|5S65#-@=}@3X1;p{LvlF zqgmV@&c~Gg=z)~m=02!#orKi~w{6gCC#`>zKboFsSq#_1w-e4p^B_E` z5o#ysfVy2T15vqdPjn{fnglExq1H|hqhZmHnq|?tW>dPZQJ>lq{H5Lza`d~Sqhf{o z2{duPWC2xMYWgf^5zPCbUN7El9u0lo%Ex;A(U0e`VGy7Aqd&xx_SBv@jx*)_(Fbog zq$mF9Ht24mKYE8D=b$P;8&D2DBYrE~Bb?PU4Nb0xZSAIhnR=b*KzV-;`qW|z-H@X5 zN8b&jpe}#(Mi8?q2DDoK=(kuh)b!(Z&+A^`;D8=^_*?s1>O5Z)pko-`;S}d!A|mKGR1O9{Nz>Yl52^)z@odifdy+1y9(>eWrVe z=h`Q1;viNvQQGzHGu=SV3$(r`w8d^+;+n)6o7pl)o7616+sjmb9F@UuIG@11AKa7V z!9rY{#n#+vwXqw47o>-d$>h89o_kjR)ngHc7Vl^j%9!;A<}qXZH4VWX^UX`mLS2_< z+%~BZS+T=M!Zr&bx;EwTEgO*{{R@y1LjAs!uHQG0Nk_dmW=VoftPx|{^~#0 zzQX@{F2d@ce`jd9K4C3xDWlE&=;i78$6OAc1RSzvk96~!$={xfe`%N>+0Zj%)-vhf z5;PimEvx4}{BLjh63r#(bI-+JqS_|)5yfDGC80>8P^1SwGjkcFz|HCTI0KsBwJKx$ zM6{S6SyuvPy1zlE~`p6#BIb!4U1WU5RaMXF^7SJHU$%RP84%qnt*6fGHz z`Lmud9K_J)!}2gpgqK&g+PyVX?MI}41E&eb@Nd{RTBw6Fy8d&7? zo4~35oAe-jxk(S&V+f>&zH2Dgp$~>W1(ev0pgB3C4rElST&Q;*^PGkQcj`7a+qF$r z5l*mzprc*1fZhfH7Ak3sQKM|sm#P9nKt<0$CK{-uHIFVG!qqH(U*ftIRzeOpRPHxC zdYrGnBggprOF>atbdkKFhKVTQu=G7^8G(0CAIE#XRCBun_BntVn*&^_Cb}1}R@BLf ztBh0F>B!gm5UD}c0STZXKPQ=}^C0GnQIIMkW5Vj0Q&2ml zY-yaOsWN%*52B@sCtCXYMN2~wEeNcL>HR3Qcw-YDRccMefyGUOA9&Xd+}kOOawXHq zmHfze;FM9FV(*hGqUS@%_rU)xaJSn%^#v{={z{gHL6l*ULyxmMX!2`Ffk>lhUgJoQ z#?icVFkm#Vd89{k9JYr1>cK`J%+3R?=Rq~&!w)^6OY*Yun~h)0Z;u@OBIsE%6ZYr= z4g_4yiTDi_HQrXzutCYqjT>g%*%$O@KSw2AB2^(16cSK;feAP(v3MQcrC~LW}DxD`PBlp zDzCM9?!sSntI+On5G4=cVuSVE`1L%0_{Y}cQ@en8+L|H*nY5aQp2-lCX6@H=dBu2rvo;ek0Bwv?yZZ?|Mith_t|Edvn z&pfF68hCN=EiYb#`$)8XrOH2@tnoEBQEk==IAxgiAVo($8W&H-PpSc`s;LkIY8W|$ z_3^z{G|A(!jdeS*OEAUEA20dUY8@>sbh3ytl8X$Aj{!!u)Sw;H^G-L|o*O%>x71c0 zFiWYzTm~=>HI_X(7?5JiC}4H-m8y{0jZ6myp#uiS=IcEekP*|gey;2MaO>I{ zG-RfrO^nYAaC662K{&I3@LPaNv9Csn)RF)E7?A_VU0fSJ`|58Lfwk?c z=xG?-k}~Q7H5Qm=Sk5h z*cSzV`GE!Wu0LRMnq50eF!)ITBofIWMrYQ7pF*br9yr`iu#SSVwFQjt#vlzz6AMy94Vn zbqllyzn6>pTTmY@94T#Li&3n!W$56xlC%cY*>VvGuC-NkF0_YIpHKe8%i?l3_Ke-6 z)E_Fzn6LwN!0n;vp4}^VO3d`V^Po))(_W(>q=IP*Ov;?Zjs(s$o$E1n?!bC`nR?Y+ zAbl+59A!bQ52|ash3hyBW>?W(yvzelgXe+j^%KBr*<^7BpeTHow_P&snj*|6pW~@f zy}^93iOA{6DtV^L3R{Ilk-&q2(wyH>%fI?=Z=+_u#p!qq>v-C^FO};|jt36|tIdL2ip~@M@NzJ45jc z5@=9#E;*+Q$j*qA2S-|wV#53mjtl=AfqM%KTbO@`5<2W#>?J)$;W$j}-D){RUZHCOzC6{*vr>AM?)3nk+;F4Gl zr%pN&Yv7V;NFqWfrUqqwB0s1bAgsluX-4_dwRV)K5HB%ZtyH_9?9@YoqiXdn)=NXn z&JRTp0!yxe4TSZ|I4Mv^p->by6FOgXlw}vKcEK%skz0yd@nRkBi6Fp?rdKHCc%1rJ zpD6N?VY)i0Bk1;BUYnA^R27||rgsJk^%b}#=q)BqL^8H-OAk!@TIPg#kjY2^J2J%qj1xoB_jb$~8>JKyh z4rg9>^CH(Ukov5&is7hd`FftI{|M?kX2Ka$(~klX0IS?uFU3EP0?Z3g(QFZFXfl$@lkF_+0=4rq;mBF@Xz?-1> zfbEF74x-eIahd8bYwH@d74k&4(#f@=b>JBq%^he@C22vye+k$_9VV@7i3HSTh?cEZ z!$^|Sxu380ItM){m$os{l`4;KZ=eT*{EX_m^|#2<5Okcv-*UV<(i_Jl2T=Si-$UMl zvWOti-qnDJ`%J^=z0ls4d=UjT>dU{`F1L3rXiUg`rYx$uBs{?Ex*3G3TBB9(KGQyC zP?wQ{1P4F-!;b@dpQ#7mTn}dyF>oR(8@UgAV1t}LKm9CR&eB7DH{9N^pqfM;71@a@fCTb`1ieorBnaqo%H6l|^2u%~2Xy^8Bd77G4%*Lbk^J~*} z3==$0L4H0qN6J}lBSnaEBAwg|*fQC;kY25p;z_%MkGhKUH!{so9vBn4E@^II63HR?lpuiEQ-MUds3~t{IT`(;G+dBJR0kYQr)k*1M^hp`nj$2d2=xr&zOAPZzd^i$H;q@ z{9<+%Zafwcv#5K(D_pS~e=v)U{n-y|^GpJ4lwWKmDg6bYXSH}WO>>xjRjbZj*4D_oB zU5{Bwm6S|8msdZ!b;o~sA6Qx~my?Yc%8OR<>+f8)b{;o;idiP?! zyMulnRPKw)dcB)+{P1}Vu5&--O!AljnlY5)b>nX=pSJeK4_JEqlznFWLJ=6h*G9;- z;U7OHGKGeJ804x>ly=?t&5*`&+V%Kdlm7AZ-5uge#rRRVp7DzmNNwMARA;ugfM#5t zIR4xX3G)wFdi*ro%=kG;oPV#4kZZ#~eoR)X@q=vCiPEkczZue4(t#eoYtlb{zPkfU zNtu6C*5ltG_DuxGE5Qaj)}!@w9Gmq{R@%6o3VSrPbxQM$zvHeUSofFg+$&>ZZ*drC zSI266kXM*oE#-p2_&JXHQs?HVStgf$jzMG;2!w!o5V_{$MQvxtS!n)s@pRn7!C6no zUyt-XOcF|h%r>9T&6w~RI|cfMr$aukn=zpiUCzh}dO5pL2jhpPz-k$bdo_;w59>1R z%D4)}cG&<|#t^RlJy*s>ru~$z3ran49=4A#-!d!4WM>e26^Mjiptcql}p zwwr+Q#PYr@|5aMZ1#U_^2fcFc$f>`voT0kx`&nLj6PCAN`M-SS%@WJMJsjna_{z6^ zWXB=zuVVSlwp=hAn}Nsr{4Hnmof1?E@$<&pC4xxCT`eTTeExVXXgro?t9)dtT<5Pc zk5w+QRpyu~-_k7rYRDjS+{Y>#BnN=*GF2A%tBhooXCx=VkFq11q)EaK8jnhjI?<$p zs)I?v&%+e0_bUkS`@hl<%HuqokB!X)>%H}-K%95@>ttwB_)w-3vRH@lyUp*t=J!7H z`v88`K2*E_a2cr^zdN}!L-)e8^41$!aJ_@RAZ(!k5s)wZKh5alod_Y3jL%K(cU2&m$6zlAUN%|5bM8^!43Cl^e7Fj#4ITk?mu zsyBb5NRqSeg};r+@1(p%a48@nD{|kKV%aHOsG$R8<|ho`09Ci8P+Wi$QEFH zq+4hS2o@TCe_lL^A#DM*ggaU5QpS^*jG43NcY~fKdK?rSdnC$BOxRlcw)@cBOKrz) z14Z-gbnXCgIOoKR{_y_KVQw6$?ANzJmUY^%7o$UFC8YO%d`9dEm1+g@>0w|*(OYUCnvs}`0Oj3QGeHx*w%#6O+cM(Le5hv$Fq>jk8&HQp z?WtFwgT3f_&(Nwt2OGe~(;d9mH45~B_Rn;5}_y7FK)IzFoc51~@+ zuE>wxp+mAF2IklA0Ap~1gn0l62QzfYeIrpJe$ljSER)hQLlGVW2eUxLj?XGall)_v zWX3;)nl-SHS*G)J=5kxQ(k9l}M7d4Ou!-3=F%Jp#6j{Q!SZIJ1Ba^3C+QNTs;v6)h zns(<`OdWfKWg|qp&?!Z`T%u@a?Gp2TC2P3%SW9#6(9IO77Vm(7vWRUi7Tf-Ib0!{ zQ>Z|Gss!-#v2f^fY`t8ncLJE?9=RFi1r7vF1KRr=ICtpq<80+L)9d@-ZWsQpi+>6Y z|AawbaM*bSk4N*>yF{`_KEk@5%d6p+Y%^#XnXi74wg25;FhP=YJtEeL9^DaNCy^Dx_Ga$s+t?KDmT{ zG*g}enj}ba0B2ig;6*a<6#jbbdPi0!dK1M6I2tUggDZo#vHhzL#%czZs!HBL zB(5Bf`#0ETwjn`&mj;N$@v_j7$DllNy(5oA>v%b&E04tQcsV01j|1hApwQqu^m5}s+J`WWGUJqrrA!pY3DFc9()J6p76d@vG0GxQcOZI z-xIE`lpJuEew@q4aZ5;cwYWO0K0b(zgR8G4OQl1?8hXFte9KaXt51F3&(&u`c_nf6 zn3ibDE`txFw7>(qrLF~#_+4a`R@8X_bPb(n_-!gnl7;e>LaM<{hKt7H4bzb&{cK14 zydB^i%ohoeeDPDarkb|_S3V6_MeIK`Of+u~cD#L&t347v#i9#S39A+byJS&aXtsmc922B1R zt{>0DtR<53_f!#4kDND}N29_%HjhSyRq?S-6&9o5;=)!`*deVv8oLQc0TULscfIQ; z_Df*xClUKacsW!`74|*(3#p*G9nEVMmc!)%2xgKYoPey6}fvAcE1B*$(&7)%&S2<>P9MB^lB9h<+|;^|BC)< zdmxdxXZkBnc>~_qWBqlj0Q-IV>w3(#sDl6N^_P(>6{G;s8&QgE;IrJz`KQS#_Hu?v zj>n7x`BmAOVlvs5w0VJXWNeWnND=eg(4klGo2L91_i0!fVX?y($0af zOlNh=m29Pn>Gg$Jgu2x*;|IayQCtsW6GY+*Y=^UnJF#;7?<=g#@^K=!Zm-kZ`|A%> zeOLk9!mEUHm=!oIsy7s2$H4ZKf3xLZ8kqPPf%;0A%5Dsg78B8|K+LLz7yW5NV&2&s zn#|f?p>796neR@VhJ|=Cr;-lsLp1viyUt!==35TCHpIFkK85dWmt~_kJC>=nT$UsA zI@i#=eA|$jFWK@BD$Yl$v!_9|7sjI=gZCoVNvnP}nXU{pyBUZ#{kz#Ui#d`l{~)E} z3Q&p~V(KqMeK$e={RuNa+wA!ZF$uXs)(dD%6Oh|DD=+dke$9tD13;aq?rbKIkBiCF zUMH!f7OOqoHR7Z#@=TFisnK7wx(Vy@7`YLP_vat{Med`54} zM24GNEcbJB7IYunRKWPb^thqLFfF}b4c*`88v3-CZOA{nVYNyaHklQvd$U_$f+}G_ z$J(3OeTC*UVC@H*GX0z8nzgfw9^m^Tper@e)L)NTcC#ySkidUnz1sV);#9>yDbE)z z+mG1g{%bbCq9@*It4{b%CirZIRFxJH11Ki>Pu?~=+nMn9_TNKU;l^C^S_;rR^gZC zA}p0Z_ZLc;q0)TLC>; zqMe0ysihf+LQ)+EAF?*M)vYT8sDLc#wsRJKVsQ zJA!0z>mD@ys=%mL8HTK%7MGb3sJ9dE0C~6rIm`oTSk4@7xEOtuPMGtmbWW3XgX(3# zQuLq3T9DiY_ z>NyNS2X{p;&~~Nz8C_75sf7T2GiKKi>CFy+5UKXx;W*p7ph@_^`E`0w=39tv4dSc- zIfbx{E;MUtd^SD#5Fg?$luIq`HS^K!c+s;c12H?78e=a|AA@Ye5^*6nYdGuRxzPL$ z;V<7QV*-CnPtr+A7kg=YgB)*&1h}-p74-cF(`WD+yRt{1s-$gt zyxiBO3rahDgc9IYtElzcEjp0 zkWH+MHUhBwMLGL$GfXfKlL_e_3;s8IaCdemrTX^Z&V5x#S8r>n>(EJ&n!bkXUS4hjHG2sCFCw<~RU!By(1ewxTT%o^~uOg(BQ=tS4r7}uJf3v*uhI-oZ7T&w`is3Cw7JJXn# z1E^0q|3b)lkOQ5Au32kd%8cE?mya^_$D+O?buC|#?5jV?xVoG+NP=;7Ui_bbMqJO~ zyLQy8uciXy$@ZjD2+;dB)p(J&>eTr_Hp9ZZ`1pT?sbRCE!ORy5Bh6>IP#o zHJK|UVh)r)4b_s`X8N@;O2lR=q58p`t~GPT^`^0FaR0$D{MLpZ$P+w}esvU7$6&%! zsiUA82O?B$0LJ6#AnE=}W;Zd=F9dWmrmd&hF`<@=7w3yF>$jFlmJ}OzbFvn)EKKzi z0G~sqCkHlG6aRx*HP*bbfR3OGKr{OcV`Vr?y+02S7fWM*@cwO^U+br*d|D3!nvHt_I$%c&OMhK7EA(N9CP_i-D{ z*WzLjgVqOUk=sB!)66WMLH(oPMiF*WVvf!5Uq3?X)i z-`+ls*(+pZ)v6t!8`_!U0PJ2L{9kX(6vEC+HfFdx!$3Rb^805SGedWPYBh62DU&=o z)`!bapKXHko3u+N!rP)hbLt&(uY?sUA7>Q1*5yBTO;UHy%<`+dwO~eVQinm= zv`glrGnx(;kx@a1tqeWRB4RYfE>ri25Up129jI#@s06!Y7$`fnF8mjWSPcXi&!i@Q zsi&CT%|L%pB)e$E_6~rSVQGGUrun|6#EGGC?JSXZVT{1iuG=0_MdWoo1AB`tYlXmD zVs;IN)duyakMZCN-DqM8?9a~j#H;SJn1@gRY0Tf$B?Ki)pFFrj8|N1sh7o=GQwIO~a8wC))l zwP*+obp2s6_=cqIH+}}WK2Cuel67IVDi^}3QC%p4L9XJ-IYi`Aje1FBg$|YU)8G6+ zwTL`h-cXy-Q{6WRcWhUHe4639ym34hs8d1pYBh!xYiJB1@#*}P*%fBAt=SJVz8MLi zK)h3&aFp*g)LtKhcvpf9=-e62=*rvh!0?-Jedgm+rp~?=;-J%Jwp6#r1Z&gscsU=0 zO5h0X`k`N?f*JS&@sBX>o}0|`X!BddU)C*W0)KudPO@*NSZe4xewPsaIQ&y9VJ*@o zVj;;CaWD9dr^S3kwu}P&IgihK2-CTKD$Ltn4}?uRe}icSNn}E2t2JV^`>8(5=TEig zAN4}h`q%w_OGVK;ZplhOG7g|1W{Z*D4});EK@+I(2~j-=a(5n}V<%y5(A44oQJ*<&ZPsrXL(6MBS&@9EnU z^g=Jye72UNB_m`qrBN}~a31AHGjrk+y7oH{ovV)XMyqG; zbW~>MPBUFcx~8sjP5ErK9CewH9~7ke0YvO9W2>D-{fzmTCWI}vT8?Yh+G;X? zH}U0Grv5dk?-xh8z)vMv5KB=RoA@OljAz-0}%;Zg?6>X zcI4qlUwpS5T9p5@_->C2{W*jE&x!B0@M}=5Sx3M=s}rRn8s@$uFQ;h@!#r&NXJDm~6yI(5Hh@fw?-rwxXydyLN8`|;43yLkRnmW%#Wxa7E%$x{>{?uc=6q)OFq8mNl!Rfb23+}klz6+tQ!mqX1JdBJKFZ#EGbNU zw|vY}sxvkc-)$OubTS|%#&?^<>?2GE#-IZRJIx&o$jE7WeO8zJtM|TpeWuP)IzT3optWc`2Id7YWF=K9n zYCQ=UxeVvlm83@#MJ+eEyypBp^#kcZ%|R~R0YTXp*Ow2?S-WoY;}EZ}%QffcI3|;? zPLHvDZAqxdNSe@3E}+{o=`|NTHh$XS4QX-s1GFjWI0j?<2Ixxx7H;YY5eN3Rb05_5 z-FBuDbhn*@28^sD2U<-~APEbYH=<48Hbqr|BTn1!%VTWY-KOBC-lM zp3CIMv+ZZ+We*}WTQfEW>d6K6VrM{}TEBQ{=NJ9uSuAgxP(GFA`%uw`{~) zkD*eC!Qrxtby&BfCBwvz+Xu`YYh|l6F;(vNS5YAI*jVKBL@PqL>a(%BseNrdct=zP z>+O^qx`6hYsq&6w7-)~Mid=K1?^Et1fnGs9>N*;cXyhvVGW46fci9z0Y9!7>Syrl< z@NXABBI3cw$!2k2+@U>t?-;6WYF zg$ji2ZDMUzEAf}1fiQ(`L&NtEDX)9gN7(Kep=<6ouil(m=!Vs?fTfnBH|Tc2f48|M z3udvyfZr7G4F;w-5Rkd$WpEFEjMEK_);-Y0e=DFn{ZEYl_60DK6#s1#x@Oi!=n`hm z7yoUlObYX$B)^8Hg3R{fzlrxpR+5PSW)R!owe~PXjA>2V^Pmr|0`CPvQu$sH{|)q^ z#tRe||BdPqn0tpasJnTQYZ#dG#ed`Ld8Ym&sPDJv>x=*9ko>2|fBQG^ofQA=D-MLF z`2FpOAh$wz6W=ZdJu(Z}z6pvE*hc&}E?$W_(rSM$#=%@$ArEvbom?wgN1mb4+=2F1 zl9m+wmw?RGVbZ#b|0Y@(@!v?N(z%}lUgw}E8I$k;v+QON zs%ni^z4Mv-m_c1e3KAUr(AJLwdp@%V-&_x$8!>PqDr>n-pX)C;1&(tAkq^Mc;rfG5 zJ+eVVG4Z6Zqw6!c3*w{x@cGO9Y5;KdHe$5;{h`(BCg875dpmBB2=GpnD=x(u z330pm(#bH$6C=--A7(|Sk?8R$GJ!^-KM%Fay)oFvviGwuCqAO$FOjIqP)KYlGX=%Q zNfjW;%Y2}4=jR@*pa;u{XE`PrP+c%JDQ#kPC%~zrsoMVv)lUvV+m-4DX16fVuLN{6 zn4O2(!Jrnm{DYvyI?f+s>gx6foQY0rVw!;_$=U_rhkbAted>LF`o0sG6?=GsEEgo+ z@s+}qi2viY5pr$#?BVVrQEJD30%NEXrCrY+K1wDK@t;V(LgV|pCVlqs#e8=M{R}{S zUsUG(qdt+Paw^n|u=ZPmZM^&$8iBhO!oMGJj*Aj<13dt9Nc;rZHj!f!L7NEKgjkU3 zM$8kp58!(ltfOfBspmh}p`)@MPaQukAG49jFX)7Kf^$8k45O&DRxvSLxOAg zzlZk{t70F4aWj(Q!%aaEUrgsQW|sen@!?KgTmSfmgZdN4=Uab+80hGofeE2V=yoAa za+r-=^#MjpudcK6&ZX$lx_ZvRq;j80k*B4CF)1E^Zu7_2_z6-`4#FFsaX`Hcl<3oc zK@C!Uz&f83**51~^dP>H{CpO5B}jIfA>ETG$51m&L2|I+^EvCi?)`L-*F9~ch#(28 z8ezHGKF6@igGN^y05CDW#&OJEfowu=Cl0#2H0by_4gdv-vq<1y>%-kTy;Ud2THiCL zt;>Ps`jtocQw|H)RpEW0uPuN#QZN%ESqE4iwB<=0)f(;SO2VNGgt-Y(yj3PmJNh`#bavu<_$ z<7r-NJU;%KMr~O8i`wFS-=wa^9LT;2w#cg0cVuMEUHw{lB0rkj1#E06u9;1&g+BFch0XW6|=-w}y#scYxmeiw8*a$%Rr|RSBo3;*&$cWNIFl zr2db-vlj=T7o@DU7_)>O^Rx0qEGy&xHp3g`pn`cyO$RI0Vzi(Ji=wPnbK#yy-aj;q zR6(wQBc+fE2qGZG;fnu*PloS9&6#vE{5FmVTSMjY`)41fI_M)(g`5n3{SyK|6?bWN z;e*#{F!Mm36@Z$s&lgrFf>vTz(xmeA!2x*Hou|r^hC~)V)zTv(O|8$|!wf8Vgj%EU zS|Ql5ra~$gbp`sP&r@9^&em#GWhVV%E;CUxK)K$X4k{4yr<5p~G1@v%KDi!HtpumF z={2DK0b*C_024dFZy@$(_BYfb0ML`rHGoeK@W!GIL=42;4}F&|!|hlEjD}#(mjgc% z1-P=Fb5w1}Z5sLAWFQv#B~UH2c*0yFq&!FU7(|adNA&>(k?v(D*GrA@Ywq@A{2+`= z&QXzn)IOA#7N=Vcq9?k>-fH0uS>x24*RB*XkqV~=LA|KCaGAtT)fA$h#se|AOFWc9 z6=kkjU{V&EnAJ+P4uqp_F!kRlPaM#iI@w~xM{>*7(4 zhyQWh&XrB^723PXCCvzl8Fwv2x@6}*857S#!a8t|za3)+PJ-%CvVd{mUP~+@H%B!! zxpd(6F4MUI^$o0tIvu#BU?T0n{hCOyKHHl=@I7o00vHVPwoxfd(MJ-2i&MF`$#HQi zKZHxyA&K?@e0*>ws>MRL)`N?XQ+0cA&qg8Pr1apLc$K)dxF;T5&yU(J!KQiJ=xrJXn9E1JYyrhFGf^=0|GY}_KX)kPn(kEuRbGWSEs$#Znau8@A1 zkP$Lobsvh^L_q8UhIiRy{yC8dX zW9nL3boOBgC_rmmt?NhiCbf~exHY;`sBF3%)nzn@4a^_6ghSTUcIXf-f<_BZLg>{@;zDW0o zLY$aYxB2OA1r%=VbWL|<@kJm!^$M~$hx-Zf)u^Y4cR&e>{1d5BquP1x4|DDNh%Tru z20f%8x}bUmC|2hZu6)Q&4#dVf%g7zk{To6z7GS0f;xGrq5*@mO`(JFUiRti#AUf603zPmpVw~R*p+=*Z%ZJCr_8Xy^ zaD@(pk5D^>aR)=D8baGMu{IaM7a`W}6)GZ;J&ZYtj~5D?qRX6E8_Ti;v8L@?lGl0Y zktMHulGovHadJss8%kN##g^2J(6n zZxoik(49)vjBP#1`9Oz%1(VgLFImrojK0*uZro-g-=LRk>C2x<%6|G{Tuudh

00KGpnNw$ymI;DBI}|qfZA>pBeS|)(sgn&@P~*h78IM#H((LV6s#@13x($rp|55 zj(A&!+EfcENYyQan8c~-GohtH$ng%sprBoM$?csxsrYliFZwuGa9enKc(~J)V$ryG zVo-Gg{}3&S8S*FTvx7um9Br&l#2PShA+Oan(PlYvY>twd$wZjud~;vPC!Xt~_0dGE zx>X;WPU7wzhn5C4yC6^#n^H5SDW-VkBxEv)84Lv;tIBc5x+VF{@t4oRVxr}QN+-8C z({iM4Q!`D8h@7geC?F@$P!mu7KXl7rs2gbdSe5D(J>OX^TA?t$92Qy#D5BBHr4rmm`*6(NqfK>UDSJmRF1#<5wyAx+BNIfzC))TsLea#5?AuDo^#a^i8gfHOL+a<`qqs zU$^IoR#v3`RToFJ!gT~R*V_%^J6O^}Xh}7+49y0g$_Ujc#Sb%|<`3v+RoBX?A1t6U zlJSsZPKJcW9^V~h%F5s;dcOfJtSMNcbLAU_wbtqujJ`0=5ft8zWf_DE$s9+9R> zV`Ut(xq7OfTxw|vM6ymWw3;*Yvc7oHu&J^ z1*s_9SRIZ;i)zD|b(3S!gj|$HHq|C$)WbsEa(>-ZQUxiiUqo%FjZbNw04fSrRP!`- z;hBD2uG_&ER@z;9`b<|F8adKLpA6lcO}isw*2u}@Sj4`w2fCz`WYRF2$yg?%H_vYx ziO;I(ElgZ&kxNTUJ@r$9H!@y_b)eCiadp`Por=;#4i0Q2gc~pV>b|Q zg3rju(o(f@8xl`MqKVRaG^Qa=ZPK+H6cLSV?R>%XwpW}OIu`$8E;w?v31*Wbsi=YD z3yM;HfUh`QdGeY!)W8V^RhfvYYI6=fn`|{_qhPjItmRBnGfHe3YB7~!jaYqQXpXtD z)Njbewl*{j3*D4Nya_FAs*=0skvCqK^56ZYv^z1U(n{ewl0u`1EsbMOIchJNfCEYl zV*~z*VnI#Prl;9&pn0R3>Z+oN8z&c)kHyD)bd0mXG^{9?-JxJ@bh_Aqcp21aDV;3H z>)=83DO|LqV%$ip9Z|CT;kNKqT&YI3HXcTcy|uv?u#XW&ukUuIHL4x@l*~78;m)X! zBbP=9D}VKkaDw5#!)xO+hKI35BX6G{Uu!-ZiHa3D6beVfRdDMjqcda4p+jMU6%H#6 z6^=n`uFAQ}yWFVmSJQk(IMGn9w^rKn&z~G_Ob&;Ll)wwgClFd8RunZ48pU2tpoEYy zv7^kVeJ|}Q#IDy+j38}n;DSu zS&6b6LYVSSCGr|(CB~8=4L#Me0v2R>6|vi+!u6_b=U}!Nd+ELP?*wZzOj{^twnm^( zg4rX(GmB%BC;Q2WUSOlO*h@8HCg-V+31aPuR=_nEo)X1ZX*Up)`?ZP)dBmDG*ZJVm z$}u8d7oU;{H`Kt;P<=!LRnXAXSOc$r^;G)IKruFhr9;KP!S1Mss~RKCH4deR>aW8d z-ZP9sEfkI#6$*|lD~HwO{wW+8mE$Us1vrO@RpX$g5&qj`kT!t{G0T(qhmsPz{c{2s`VT6h^G!*lt#wM!F^pK9Eriy^cyig^Xz464EEkWtFmaSV-?C7?>mh|Uxk9$ zoiv6*ewMH(XB14aaP6@0EX+u~Kao3rWqJ&X)xo5WI8x(jHg5!ErFujRwyRIwQ|V&I zOJ{G0 zw!2Ww#Z1NsGZ$WlJv=ki|IY5#Z-hyl~6^oSA&WvZ+H`k1=Sh;NE@tC!SkJWLrn&}Y zQf5aI;Tc8JeIOfWx5P)Qdbjzp7(9A0V=BV`>=8TDv}$UIU_ap)S7c|z6YjxD(A9n+ zLtFYTXnf2XqYaLGCcBu-S#DBsau~ifi6}c9%sEIS;Z_y4R`m_)8v0PfHYDy1p3w*SsHkqEgf@BSjENfu@`c>S|1cGl- z>41Uy$k>m&s@xVj=uW6s+XPiBun}%R`%%vf)JiB6jKq`v`9-vdV#AhzZb2N%xDgym5;P@cGC%NQ8zK$VV)Ferwrzvb8z zSaSoNvrOt593aCe zOgZ;3IfPn5K*UKZzS8{3O^}R8u?8dPvw?y>X?xC>3?&++(r~JaC$OEuY38gF-###P z0OTVSDlRTXi^;tc%y;}djo6LUPr?63!*za+9&FkDygj7G2Yb%Zl>AVxb;dPV+M~{_ z6NX`nje{ouE>SWsys}|73|rJXGUL5eyt~4Q>auA4lq3!jz4GFJaHsmTpjmZ8UbUu1 zp}A9bXc4Uj)6o$))tXsFqAaFvcF+tf z$(iicgEYDQoHn-h6Ve>6u5Q9o2wNFpYLTXtVQPN0#uDL?XC+X>!_`r2f#dL<+>S+4 znRk3{i_R4zAiSC>2Z?XwBl!dEWjJ3leTi-~|jg*2Z`cB(desuPd zl44iQtBn4^WQF-sPHr2o;^6d9bqt660HUjXqd1Djwti=|KPQ_(x zt>5UNHR^=6gz`mis90*6IOr0$Phv^D;leiT{(_E+T3L)^OGnn zU_uY50ItC)HIgMpbxrK9PbAXCS{M(8e)fWdhItMR%Td13c(U|RzRoY z7HzD7u{ITFH-4k6rO|SD5~tp~tCJ0`7EEgye(H*v*krlJiWL+dy5jf@-<7~`84&-b z*ZCA{x)>-Cm)P7$I?;UIWlK#_9&VeRlOje0#wna2Eqf!Y;xnDGZO*Ud?q=mI!MYm4 z)ijzN@DIdiVDA(OiF;aX_vB2DU^Xm`3W~)}G}WoF(AF%lt#y0|Ll4$QN)HV?IS>|H z4gBp@@o*x7k-?JZYmjQagOR%_iW?)iGUq3bdl zxM8KZ8*I@pje{r(drEx?u2g=@7ghN%)DL5X+ z*0;LG4FW>|7=40#H|}v?kRlB#6Sxn88v$k5&y*N`H;7>l_VT5Z%VL8{ocL=c?}+IU z{zBh_W_k54G>#(OWV91)^ALa;MJQ&ARt%~HPgqdc!hCJlVru!_{f0zIk-NOnx}ba< zK6{_k)rMOM@r%Ca^rAl0^=KdcM+BaeUn)CWZs(Ok@!#=yXJ<3}9o3>zX-Rs5^-?JC z3aBE;TC^6I1zI}f5PQ>BT&8NMbs{dM(B@#!=xp5CTKY{)OQ``U-#?wc`<{fNWPMsN z8NNGEro4k@jsrXs@f2}ST729vIl9o9JWAFA0or&Nslhq6I^0BW1SHhw80?sUIGH*8&xG@!e1LUkq_Sor2;&X5}<;4N7XdZQa^ zm4ZsnWV)(PQ`yrrT@=D!%n1D;yzVjF+1F7q*~KY38Dng$?w(JWyBD%3cqE~FnRReD zf_DJu{^Pek#G?~bk}P3-RL2`;jSNqX4igdnewFqU2s(xJo+Eo&qo#rpi68{C95Ub? zS0%$r!TH2QdYOcc+H5Riioq@-6SzjZh*gK!PR>D2%zGqL2^Yl zhW42hV8koL(F_W~O+?;hq6$uAxSqy1cdPPK6D{?xACH)|+E_xkVYsdmK7ZX>Y#kb) zs{9%$r5raBtc%>d5n40o8)UkCIwi`9&Z4lY6tA`5a|}G?VcdvSS}!~32osJpn&_~v1P5l=YL!qBk=CjJ%DnK4DDsmBx+p8hOLSd< zovD1ISZ zI3L^#*_^(ab)Y5VKrZ`;)$?8TJJekLGye2h@I;+$=V%H&&14p{K%K|) ze$bl84eRqRfT@%T$_)i)#Of(bxa1IoY(IqNaesui=QF_cyIXhuxKL!gyDOjSD2g_TRH>Q`Z{~CUpTioc8vOl z>iEq!__$QCR4zSm;MWs=eBODgPL~{Ea^EI^N35l2YKH^Yh>X}T-bK38t76F+F*=r< zNyn>RQ_Dxbn-f<#I@Q>YiT4K#r3t;Kmjk6b-DQW@Fx4Q!o^K4(Q)eNVQ3)ehnu-^a z=-1~dOe&v6$@P(nkV3Tv*`@$oAk1fiW(z$}tAR%z4I3?`uh)uiP%fkFya*27MNGUr z9z$$6K7{~$zef5AmUjo|sh&BjQu%9|)gAR!I7RT49qLRW5#>$EVeamOoHF`0nO{pV zT(O8>b3Bzd`qpRl)>FW%4)qj>&{>g}KrI2TGs*qK)qUFF4Q6AdLQ~;9!;vD|RMD8q zV(@(zM19O!CDrLLpBsv}R62zc(K_oKFW1({DS8;44A)__K7yB-_!rcyFOu#Izo0m; zt`KupI0^NsmZxNQf+-uwPt)@j+L2;vR;Ioy*21lSkDW$${>O-${@(Wh$aW)jH4Oj7 z_E%6f_a1;bpJ<~k;#{sL>35?PA=*$^jtzr#?fQ+X8QsM64l^<{85dPT}r2J|8i zUY>HMig2I$6BiLj#W*?-#nhu4hF*~~J3ARqhP|)4C{bcFYhbIq5ju#^YmJDmaYxKK z;THP|J|Mn23c5lBAzO;y`;lgOF#<4{U=g@C|AZQrJGh(_VWWQPz)#m=lIZPKtSaSo zJGqTdVHJp8a$Z+Uc}t#a8>iRle0eG!DoHu)LO!5H+=FOslm zLZ+(iJsj!lF$gvmbT8arHdQL17)VYus zZKlb{Yr6~R2$57EodtnvQ#sJqy^ z=MA^-f(>%67(24=jInN`ulo0`olls-aJ)DGpD)gOYss|n+onh^V;?BCv5o_jZXC;V zBI5UizU!3%9Znrj-UVqpX?vppB^nMkTFbfG;r#|}p^t0)Iy%Lc0}n2uLARS16R~&= zs-PS7^6FE|?<2qo=69qm1m52d*s>t#s8Aups8oJ+Q&n^fIla7>JH@9QrhHcx9c zIOd&KN1-27G@ZW?s2DWo8e?;ou~I>_M?u)9^9q1>+>9@l_!TEc#^1&;&xFKMC+>Fp zE(xBlsL0v?^EPvQ?^ggG61i=qm%)+ge^EWCg)UBN2xz5HHc#2VNA4CL5A}QL4_x{A@57(pPO@_S^%mY_-Q5+8;qCwwoi|gZIR|6#l#;~pZ z$y&b|F)#M>5&_{{OP+-HB7{m?gb&&esfP% zt5j|!)#6H>R10rA9uyMqI)<`+UUg(@(A)w0b+X^L5{=j}7l{BN=l}f}_2@W(O5#Cg zsQX}n&*fmhY2atx`h-&I@2L1&?*hRaVFoIt9(C2dc1)4d?QU!i)(o<`-9r=4^le5} zikV!64M?4M>JV2fFrN(nW-!+_t7SjgXIzmOT|o4FRyEr7fIxc3#S?M7c|j+qNeUyP zfs;FrAmVN5RSXe^>jb0Hxs5XlR+y;&z|hG~FuM}*e?=5yhFB%9Q%zM#^{k55A6DB0 zKc;uw#MqL`#lqg0j5Y+Z@s#xy5mDxy2j8wCpA_~$sTS0h&KrtIxv&5xue>P5$u(}h ze1naAzx2vn%zs-X#KR2GcC-xLFf?N~F?9yZL-5uP3Uw73+O*gNlRkI*9DU28j+B7s zz{S?uF9`>_xZ;=RUh%`_zN)6le)s!4cj2@1X^>%6%XPIC%PU5fPVRh`SXyca!36hA z2(2kx|GB^WTy%?l+QZ&D_EUIVxb1iiKRur6SWoWC<-{CF^A; z@dz1n{XnsLXHn-%nv(1yxq#{bDW6$Db45%TH)PAZS2mVCx%b2GH?WW+rIGMz3b(>q zFOJ?BuSdVpjV?-|aYQR@ABERsbmorx$f3~WxUS+TRGM|#osm1E21gAla8&OH2Z5KBtBjQa{YADdVvAnhI#V)Pcl@THH(N=BKX*Y?NMhg+b?21%Zc91L$^>Yjd*{ZzG{2!TV~Jw*4RD^e z=vGDykN=B$h6YM$ctZD)C-*P@SQ3;S)@PUOdDmSi~KY>9~34Gi9|5(uFDK zmk?;3LyzETCONl6=y@yVk$6x<+-JhYKj%IZ9vQ%EkgYpFF~(J{Ebin_dFMgYUwecw z{T=U|Ut5)WviIQTMNkoa<^S@U-!!57GZ!qCZTO7iSt*6Lu&!iCpqoc!{^V;*_ zqu38Qe2$>zSBxtb2h@I*@-yw7(`c`VHKT;da>Wmt6ynD}=r@|EKgG|Oc^amARcQn3 zg{RbOgd&gP<}$pT%{j50a7il&##?SP!sEE4{Wd3B6PQyBu^!l~$Y6(*x4L+{>+o_~ zxZc?Yd8=p4GdtR*IV;SyaYc*>P0Ap%vT4Vf%5|l;v!>??3t(_C?myWtjHd{ebfUuS zAYDUel8NN*p(*rq9Q{1q%#v8SSJaI0NvX7BjU&U2ICE^E2owM7fkrW0N2I#5=s(B_ zyxW3%i{jBg^8O-tWe2DBK|4g!0O@qXAuOTP#i0@#y4%&FN~TCO{3v&O%Xz zQgZgksY4QiLF1^`83Sikw`?lgDAYf2(3;?c$yoQj5lw1mIKugLv{0;$f{L792XTZ$ z#JIwL?DPs%o1CW< z?C3giyH-yMbpgunkvKJ#Nh-l~Jk^X27pp=`_vxi8t4RKr;u?8$fVW#!Cn@V+e5t-^V2)qwJvM`U zSYMinJ{u`jd?`E7@jc*E^G6mZgc?KE%&%+cQFDJ4tX1`Pica|hB$%M`$y3|`rr!u| zOeW&=>o?HZ)y&7sXQs9zZdAYO8lvA=X&^Zlx14dx=p6@h!E)}1OH0LLsEA&2Rjm~R zCz_>vpAEIvdFCKA0ynyoQRg0ePO8g+enLvKSzXu59_aVOhkg?8TawRljw$agA7m>* zbyVI!-?!V$&V;uUXDjM(rNC7Jt*x<1Pp0AC@b?%*?qVx z7hOS;_M^ey7FRHI587q^e+w+BN^QP)bu4Q&$8Q zIln2QCt-FrddugP%hqZ!ze6-FisIQQcVOHN2tpwJ%1<>UKB}D?ZLm&b^nSWJIQIV; z1JBG9Mv41&s2rPY^NiiEo_-@^DI~uTKqcs<(qJJq;hr0**+Zs#p1AVGZFfH1d2vSQ zj5-a>cTU&EZ`G#0;>eHS8=Y`uH&R}9O1=)+Q_o4Ywrg4)YEfyt)}kd;1auh3HS)7| zK!-CV-k1w3j=81Xyy?dY)J5y+;?tvMT#(Zh`OXHx{}BW~(V&;mAs*`79lmS#j%&e~ zcq9>eI};X@$8a$AU@7On-imQaY7IBcJNiveC0T=Gbz*CbO4I!xa%ixw*ZbNS zOk29CjLuVm6@#4XBCQ=rPkXg~apI)BznAM>gT+k8g9K0vtvc4lff@F6SVfAgpe?V# zr7NR?sk}6NvU+|x)63#@XBo~2{&USu6r-#iKE|az`2*(x!-XRUBD-W{L99`%3Ap#^ z)epiRv46q29&aY#Q4aAV&e&^s?F*%qxlV!IWdFvStb>oSkji~X&J`_GbUBHplyUNB z{+y}fA%w4tx$n&&nH>KZRLoT>nLkEMfvDux_l=U_!)vY|zDBa)J%++}PF)@4k|*L0 z{FFhJpK9^b2D~;EaXj0r=Wb4PW~;C3O3xvF1(hsEzgneOvVEKnDW+sCLY^YNkrTWf zo+qKSU4bo|dLuWrQZbhTk4h2J?(TifaBQ#VP|tYfPz^ToWHSvg{kY$7*A0tELG_!3 zzRw-0S)}|ejl!EH5IM1YC!Qfu^s_C6kx0VRMp1839RyE0n^*5?g-cW$r{7Rb@hh)t zY^*(VeX8(kZGy+<(y}e-INRC7}yEO%|f*i4L-U-H$KnPA^%HNLc`J zqf!Iw0SIPei9`*?Va<~_d;P0Iw*usgI(~D5Z+@MHcUD6FB7Ex?jdJ0Ean$1AI_G!I z{Zx?mD9EsAhTGSrLaEIl(aqF09=UYc!t66 z| zT9Z25N8RBcEN6*d<@{DIMBllNzo6r>XvNE^ekFMtpeH4ky4Fl^FklC)elE~!iLG`Ti zo~$S{be!kGOT^qba+&%l*mV4$e4E#99Jo#E~=oU@Zw z9?QU-q^JwAsuWiF?nYEs8xb^g2I>3^isOAz+ipwPN+2~f9Y`LGO}met;cJ~}0WO8O zYn>tSaCVC&!0+cLRR;o4&to=m7dL6W^E9#i}4kV<}!%5d!G{d}Gmc|9?+4H_0@Fg@`BXoz=%P z1&s{=h`fGwQCnFGdzQi@*rML@oLEgB-w%q`qgv=&`dMR&&6Mgp2cF+=CkidElD<}g zqoc^!!SyElG?YDFqyuo2`n<7(9jabU3|8g;N?mz}X^f-E$dMO1r(1_!uANX!-nLjOZR$wk!h! zy$cN>4Ga;J%qvtjOLY`o~t7GmR@)#XNzAayOT zq#6=6exmJl{=fdBXw~o;w}JGi;TLQX4Ctql{AK~>rM>w31%)~*OOE?mzpzeeMnkPg zHCW*OmYv_1y*pR0lgu$Ggx?zP`#TR8$|Y?ewp=ARIB#h5ZI`D{wGxTpX-&Ug=XQuV zVD;YmPCZ;|2%P98<>g^|BAb2`!uQgVd*Le>G)y4BuJau<;}vgD$TTa=e|Tdf70Sol*AvwIaOV+lY0wwVhyT9KUl&-&yIt14|)E)rnXGTtMm> zVLmlg^^2Mb2lF;s_m$t1LB8)d^C2q=*Vp6t#%tPY!FJv{_7!c{326j$3D=5s?*?*} z(rtvK|Lvm$c~85Cg@&g9$M^I~WHMSYt4{r> zn!N^jwTvDM&s{+ILMlDe>>WcjAWA6pwav)E&%DXkYw(t7YbUJ;YK8i-E_ejV ztfuRn&HQ*tuKD97o{4JM0^)L?-y^cTM|=V)nwxMUz(aW`b6}J>O}3UoUN1G~-jE;f zmz1#*avxQbTH5;ET6qzUrmlNGW{`7$L2(pn6HmX#LEccWSQtGeJ;d~rouzUW&xV82 z=QP-FbW<4g?p8V5eBsfr0KbLJ8$gi49J#@HdC~kCU)mU1CMSJhApBg-zxnPNqsCcv z>S{4P>SLay;ySwL5&{i{>URHMyu>&ZwKa6J*+G z$uWNUsIJz>yP$eiX*%VnF**jfsKN z3}!btuhN=Frp*U>sn`eS$(o!L?hWity&FP){NeeXG2IBC-?Gl7+n?qLdmHLZso)qq zn~5s`)Y;C!+pY78oS_u3(ut^P`Was(YNX@vvokViNXn8ut^A)DaJ+^^G8j$V-xAMn z|9`Dr+j3h+uAMJRiKJvnj|nX&oY;xVQv8q?7DZ7O6W)px6??vZ7Jx?IfZp7ib5gF@ zmN&V((P&&3faqmjf=8*;V{$-gU$RG*%iB3#9%@O7OC5M~gz{gHsXd`$_N&Yp#WGf) z>L(omZ1-qq6idLxp(KLa&e*xn8F5KnPbZU!5SQ*y81+RZMWBa#|W zPkfxP^dp|AeoJn~zL1#vut(MKVxt9{v9BXF6)=)N#M2TQsmW+cTG6<_z`95GBx$(j zZAVo=?KTC#;@dj4X`owc76OCcQ9DC~{Gc&T7i2`+1Q&%dQCTHcO;cs3!gMYo)E9`h zKOS8%5(L7?ziaaF+)=?YXvL!=#2qH0UtxoS{gQZABoHv zpc?i|^A;iANC;R-FH7&M>(zkdb>~NQtV54q*89OiBmOz3K(b@yoy4u!T{s`XdI zt~43IXPVXgKj)`3AD*g=wG2LN&w(^vK%LK@g~|B#lvqY^n!z!i@KGorL@GZ+1(3a+ zp&{GDP(#!vlcFrXYS?!zz_JV80~KQ3_Rg5Y$KfyE4{_F8I=D8ppal~>t?Pjtbs(A@7L*P z3`<4uWj|h_^lxFaf}%2N-x?i(5D4;c90=$EpdQvYEhl($P&f;A|t_9(5t1r}6?KGyFJ2H6CTH?J}H z-vf#+&7bcG>Z#D8LVU3yYdDv0b&s5{7GBr7x%70!Rna^-0V4#H-e+krM-d*0RfYPxi zrk6)GK`E`8qzQ6lcRZjj?g68F%=?orV0f6qb*>+_-%tSpqs)^so&Y2qVuG+{Vh&+j z^l6?>=ig@B8D{ZqzQEYz?Pj`r+2;|nAEFC<=6QO37eYh-S|_XwMW&L)&d~eoUTV#n zwa^L2yowLh6hd&D)WdPV-+o1Smw}+dE7w%*M{NaE5}&+YQestxh1)xhK{%>dfA5cN z;w2YF1+!JC#Tr}(BlOE+e*R?RXdL&&zfGP#E;Y@^Ls(M~ChSu<7MtyRj;cr7?v-&_ zGvgQx=*3WNMIQI!tX(N{s_B~GCgyVyNRZ182gBU5!2P#}lD?$@jOuCbgrJAjibnz9TAPYM=Y*xLUoxB@u&tDl+~$7G80xCSCkI^h5=<^;>H38Q>Bs zcSv;doaREBSW1>+(*n>8!d=ejicXIvqxejiA{9YpH2gq-@gdyN*ELxA)&=5|X6cIz z|6Sma;RSiQi!CL0s+LdQdc@c)2;m7{EXGzuBu;~T(edV<(czprYvy$7^$F@H#_E)4 zad=Y;jmer5unAulN=cx};#W$;-<_DrlQt3s$mwmsxD+nN9~`VQGmrKc(|Cto+Q>Sq)mboG%0h*?`4dlGn!R=wsr$wp4vRVf$P7s?H z%L8Q@(PaX^y`oX5yr3YLIRXi&daWiu{r+2uvSiT*|rGTzX)?fU^LUlQjr4XW@fX4^|ELz1zr) z|M{NXg9r$(cct8|wi2x2TQnI!!wo8{zt?gL@B7ojxx2Iq^}#Uj=j)xX$5WOpLEvNx zDuf>0wN$Z_3X7rFopBvS`p`-K*Cf&F;T=q(L5_K&6)L~emCQh$K0W%XqCyYp{jH#^ z34H*SV#u0tQq%3k46YZqS&f*E75Kw)s}e9u%pxc_+HL>wsNe=~m5f(*3a|OoNRek1-lq%VL*WpVX`@ z*Zr#UXS;Ikx7O5Wl{Q=*G3JAFPgmqlY#YsAJ@(W9KWJ(~4!hM7ktbltVURDmu?GeB zgRd*`S%VoN!s`CB%6fP_ExbYE;^|<&gni$CuhypNZj zeu}RVQ(gfK5?4qL<#n7LN*(R3G-Dodun-Ng#DXW{Hmnn#B#)YJQTJm|&Q8K=Z)?m2 zB9`YLdvn3IlH!^na2en8^g<1nq^)OU%M!;%QmI_1di-}tq_pP(Bw~qEUvvW!QCgvq z1i)NU4du7*N+we19|c?JB9=;;P4OxF8G|gf98rx=9;%TzjMYkMWrvvDvpvmpO{LKH(}k4~=dimg#Kqeyn!^-- zajHPlYC{#7m_ehV7g`QkL=LOD*>k$T>pGnB)ZcG=B!9u<*?=OqlA9vZ3`z zQ&M`>Ug?(He&?!X#GgkY(lm3mYLmD2Ciw5vXiVCwB-8gw2a$W6<(U#RmSs9y%pR80 zBW@3I{&+kc{a|HVk|{a9LKPO$>dSBAX8?p3D^2DU+JM|T^7ENjH!H*eSL||-%o|9Ly-y!+rwDv4H_{4# zfui--X2)hP=$eFlN)S6xtP{Dc95qP*ESNea_|FKsY&{>U_F~$8MZ01RiZV5KcPs`% zHWULZ~<3B^vrO1A~1b@=*Yk)RyB=x*Ay0x=OJY#YwFxU!QJLdd@Heu zD^UqBx_#MC22?M`X`Y`x(PX#9&EQO;T~lzL)VS${^OmpsdF7`2w{G!O|#bl{#A6m;Hhpyx1x z-g~=PhAc#;ak!E9Aj@ui4@-yJZ{_7L4{ew zg7p4P!?kfNch1x+(gZ{9&X^(#9LX3hlRcCfmUwe4qX^*)iYOSmWSn9JTT(=p#p;8_ z8I~Pq=$wRL0~945&?qaYPnT@YNK-W?5k$xntKP?<<>VJppW-lZ6k(L|PhG8;jbgwU z_Vej&acmgGU_sr=c2Gfm@1*tBUs%=3y(sK>fpF?ceDs9c~t2II* zI@L1WJs+Q*{K3U~o+`~-X~kT$NG=m&x3@>T?)JfW5-X!zQ$jbBcxjW^MTu9@J9okH zXYa0f_kp5EUM8U4Q|$0B$!l+8O4o5n3>0^_oP%YVyQlpS%Yw3$jlHlDWy)}zQ&PS_ znF!03ZH`2ios%B&$SQ{R{g)yc1keBB_~hk|;F`~GP9%uZF_TTV$2MmE2xUE{!q;rK zLN1Gc%RohU(i0rnVOVL9wP`}?Iw!$Bmli$5vI8v<&{q3dnt*ak%tAUMo> zCIRCe=%FP+0urHYB9?eQUuvh$P_aTFgyv%$aXr& z+Ki+3B+95!8DgvNla1`)+77R4sW7De<|^G{hYgo5<>H!BG!wzr^kMJ@y>6&^qmaje zx!lMgh3eH3m`Fu%6owm-Ti4Dd)i)#c4-*ACye8{5nSRw`ToVvnPI;wrJ+uy554o8g zA~WJvBL`g}2|fiAEzT!MdK2BtAY#oSE_G9nr({&uA52mYzyW%F{Xk7C6u|Gd-zY(1 zfQ*=N^t_n=4HJm^i7zp?2v;iwX((D2t>jSj-rY{S%6e1&UqVzk9yl?>_ucZdfa}?v zrLAk?{hir;dWOtTp1Ie2C-DsR{eMpqR{0pjIA8ja!x8>%vTR~~TL)37okj|nmz}Wu z0U=UnpM_EsSI@wqI+9=Hnln`nzAyYz6q3RF}2{H;tTSGV_Du%%@hi!6enV76F-ds3VX}0r_6f(X z6*JyRhzw}kd=6U7EHTzsw6`-+qgHEcJSuer#!ntV#E026=+~yyRp_i@AJwhtaurHu zpG=yDN|-%O+%nu^riUd;m^OPoy$ho+ZvOfUp8x6QChAHGJKX&Yyf>=^I6}pYJ<5xf z=8?=LcdeqpKy(SA{m*G@9@9|2Gku1t6uoD>t4H}J{^sjnjzKTVM=y)N+4RcrBG`1{ zH@IbctEFLMhSDT5;>fs6n`|Sy+9fzL+kHP7~_Yxv?VOq!udE>MdpRx9*&go}{e zUgV2@?x6ZxtcE?IwkhfqcVm!AESREN%|uO%X!JecC`}Bx(|u7)qPgQ4%+G3dgb}M6T=cHLgvsT!evg(icJ9}Wqb1ad*w4l%y;Haxo4$% zcQ4s9A@^10J{_hupJ*7=N+lHmB0rCQ6YIplcVA@qgSS|f%H!Y*k@Nq{cD)6BWcM5j zWQLceFl)gTg3FxeKYKdNohDml6b^>Di`bVjA8p;2oNWP4%9CO&_Xp>I51c3f5E?*o47}HALHbrgLm!py9=)>E0jzB7@YC- z+qdZs1w-2nh=r<=*Rx*iq4K98rqVGSj_Vs>so`Q+_U;g^G^EUafLSh$8DM7UHDSeBU5cYM2+BKG-*Nn^ ze*%p%AP<_{+SZ=G4Mz*NNtFBLaXD6Esq(9svg)Iv@AW_Ins zce%5A@*P^UHS*Y?)>!=a99^bf_28l!@abJYPi*5#mN#X|PZe9JO|z>F#?^>jM!UBe zQZ&23`<)DF%N8!9=gxh|t{M8OqFuorojV`42fE128g#nc^IJ#h@B)CFyS2uX%a}Mn z#FF+?NN9pYb+U6a?>VPVVJM+K@p&7nDf*|_z+BpX7T&f?RFzxdsTl&Xc_jxE@O~*7tv>uEO6h&M$ZX1{engj-i72#g}Xq|EBbAUp7q>Cxef(n@}hb3W7drY(Fheb zOID{Os%~ySp!@W%b1H2?Vf*bX;d^fBjKP=bX!s;GNyn)!@+c%;h?bT?(*8fT9U(1v zg@HSM^AY^3zBBUIB}Ef|-=q5|!HzTr3&t_$US3YknVml3C`ep&NI}|gV$!MMeoMvi z=Iu2jghoxac%;wK7P47;DYcK`N|k_IY&JPQXQy1}H94Ot0%~~SRe`*<{DT8|!waI5 zNxM>PF2&ulWq}*>_#7kdWKrjYb?}6gXHFEhM8+b(w7pReCj_5m8fLC!9kaY`gZ~}h zx6AfSLuFX|958()m>6W}R3+?ea-ezk9Zi>V00PR0g+f~&N34}PzBSz5F#1ax#96e{@t> z;sF^mrjGCE;~)18@_F}A-I8}wi0*w-4E-vckE5tkm`my%X((TccN^j@jl|Zu=}~}s zbwf%|{I|RZ!kER5)qF`WK9r>e@8l6Wo|uorR>d7uOjc$RmB^FaBD#C$5(9Pd_|-bk zpt2394inu=B`QyxN5Kt}EaTuOah&zTPUQ;H5`zY{y{A zn-6;(uth3X%0ou^$>^Tj#TJ9eG#lh-k|b%-xocvWsNQgj&3Ig?PsgL8Q21|(f@_HR zFmJE(u|SnA2-Y^#Ar4ajz8Em8)sLTuNk&jKc?VwUWgDh4;v<>k>;?niALjK<9B zj8w)kdW1=Lk*xQCQL<)ZdQ9O?R!{Q|A|i-w39$4$%(_^1R{JRLkAwHLPGgt6RqJso zx1$Pm`-s@A>-O|9c_RS6rfm|zwggCIEL*E_gm?Y<#Kk`&{b{(EX;uM{G{kisgsxQZb; zw6E?zJz4NYNkQY1J3_;~^Lhi+66EWqHOmmbY!zIZxEuGvzNZSx^iKyz`0^$-6dl58 zR!S!B4arbmMZY|eZa}Xvhc5Q0H6hHBGSA>_jkv;W^V1tVGA1f*zr$lyUrR1lFOM~g z9XRSR9|$46_(cKom>w-cDDyRPfeYl9XLxGj`wO~-ZpX9T%MnTml6`sj9mAv++FszC z3YSvrg3=Sk+WBpHkeec*vc_{Vy!(n6>(thwo11bhsngSerj~}H{@et4M2wEgKEa2Z z!&3v*EWui%tm&g)ViV@TngsdJ6s}_5z;vnKAbtpsU;P+@5dR=L03sfO2G+mU0w>0nI~) z%}$8zT+x&+dFgp`D=`QRM5(#2F(fx7$cZy_;md)!t9` z=O~v6pNvAM9t(P!G>2RVUCYqYOhe~0BxG6ACYYIhDmT0F7E|rv zMH(~h*Gw^Gv(m_%<9WG%ARl%>h0qJ7#E^J~z382YC|&%&47GYj1tP7&%SC&|?7|V> zyEeEzeS$hd-$z9~DDhz3qAXuoebO&&*5-kZcly1|O4M9MIaU zv~o$nuZTWBxkJGda8zrOn1|T6EOtIQOYxh&8-}-w?pn3n*9$$VP;Tw;ojysdOD*Z? zYK9YxafPs^(t;QEQjZN8A23^PVpp9C-c1op?Qr=*?+1z#QN@YbSxZBCVF+12iSDcU zcK4mE9c-b=f_%lbPQ?{XBnXo63~2_)OY=egx){C4O#cz`JZ$w-TM?SW42qqe*!BQI zw|c%7Ym(qWIKPskYuswgGS~dmqB09`ncNTu#7V&V)9iV+nr*fZ%T}8vtiPqQGDoeM zRgRa6jhGFhk?8-;VYP;Fs6v~HJ5cEBos*Fb{N2uUl$t(zzskVFnV?RlU)LBXqkWm{ zk0t>S$G`FLLAq{!!<5Hl8)T_lx@OW)WjYDd;@yTRs`2W$&gQs&SnPj)(#n*860DcY zB&sZUlsE$b`+euq)Onv!b+e>_$cw|_^9)^dyyMW1jzcKwfP7M%PvPm$`U4cbg*qIe zgCa!&XfUjmc=d-uf1l6y;mT0dgk%OZ?W>OWx2_^EJL?o8>;*zZ z@{!@XM0G=?XBS_s`U?RTQ&>m@`rwSiWQ_3XEj_7{I(CFxS|Dl7_pQa3p0ooPtVszM z{*N3=kVSeSmS*dF+#}LTwn0l~9i})XsFMR0Eqk{ZW{W@>P&1ZO5(5IGzBtevK6=hR zs5?`k$I4)hR5G0%L-66q;_Ciq5hCZ);SXKoBr;H=^^Wyr%8^;CVowrev}#(?iqHwX zmQ-VK!Ll07SbTo;oi=UpOS+F;L&N+@OdKF^7C%1riVzWe!ZGf>E1Mr1iH|oN*-_4{ zX8D@rGi* zQsRGaY@)k$WtmT($zejt&-yonTItRiE}*llHEPjZA(@lZLk~mAw&XxKHflP1ZUXm$ z@b7E3nLOIflFr)i9fk>-cH^ENb%a2fg&j2-CB%JY=5&+1w$0`dGjI|9Hx+x)sf=jqiA@Z+j#45UOb zjUupxA%<`+vH?jI6aUXQrH@1_FpBnAc;gXv)Ih)0XBR+|_S^J975`#~AJ16+pOPgY zvV>S-x+7hP>xd@yh;{pjm{pEDB^HJk>BML=l39hR*)EDr)~l_IT;ADyyLj^PdX)&X zlV7}7WHKS5{H${1g^+}rt$vz)UmSP_IR^QnLZNYdxf*!HB+F;Bxt2kIU7Bb4{TwWr zhTKj%rYrFvev-ZyHqfH{5rc@SlGnSHi6GK#Foqjq5FfG;Hxd6mQg3s*l0)sf6{AkC78ln~Ok*JH0H3aKt@fx(%zgd1=Y`)A^haWeOUlwyH*%;srgJ^JmKudX7 z(F?f24b2)@UQ(#Bf)e^nR=Jd$W7if6k+UB7*D}D{BKu64`Vwts|H9H;sxPa$nd9ye zYTa9oCngGC7zzPaCD{e51-SPFf~2KoAuAPI++0PJ>M1W8PV(-h?0jYWwqTXy&&9+6 k!~5T(V&o*epE;d~dADwEV`D@3OGQ+Kqik$REj&z# z$iuN}5d*O!m5385Zsqd{5bo-dhl7Sbb08eQWsf|hWwaIba}m-K&@#R)!tUUA2cIGc z1yWGHnxPPA9_8?ig6H|)iw_N-lZ)^OElUUm&u<4lqW^I5`L_pOD&H=YMM2Xe;KD)( zRX_{Bj-=C7_n8m#8`}PTC9LM1Mz6nYEZJIXhsY;sWWXK)5;Y<|={62cj#Z*0xnE3W z`?^XV9dT%8lZ)6mOtBx=*iaykMK@W!x`@s|CNDo=sh>hzEG;!8mb4&h+Yy^*79y3p zSmrghRg>xIH~n+j*&2*Q2b~5vci~~8}INp)w z*aP(>By;`;JB!LPj_6*dYqTmd0?$U-*gD!0HSE&C0uuk`QR|HKj^qk1>8REetRIle zKuMeZx*EFS?;Xpi$+(sd#4(pnry{YVH0rvI%_PWSl!I-qE-%TlGQ>QJn2AG1UkCdN zAJmR22NJnoTd%mjxh`q6rcal1DqS9%0|o07;%K8zR_7vpiLHGTv+bvr*IOEh#X_J? zSJBsV@Toa7iUgHcSGYQlJQ?_*&akzDGI_&Rc`TULh65>+)(L+036WHxIM^D$M zd5z7=S&YP?ki|e8##PEpwu~s`QqXaf&B-H1TVr}|wNYcSAvOj=NJU~SvLE-vH$Uye z^=T(I;6Gw85Th-g69@e$O;YJ*4$bW?jbXf`z2kmWOPk_|Ol(~YmQCUDc~dIsZskJh zgygqW%PM_vZucZ&oOUeo0*bc~opZ9$qHE~vsdY|nVm@XZKW*Krog=lb7H0>4xD{ep zY-2GHV$7o_Nh2*r16f#M)M|rfy=t&?eg_goIYt(iDF7&Uo=kPX_~;y_JN?N4lTmDG{)7L-hMO_MfTh(isVHil8I z=3FFMtI>?GNiRu^Gzq<^PxuEo4V(e!mBB)Ke-`)WCG-W{Uu5(pgqPX3ucaSC`N=a9}PLx7=x1sD#D1jYbk0sb0?`w75AV6x=-6x>gh&@}!u(9_wo83<lM!_e*>Ov1U3O%fNcQ1wj=`&1*Yyew81z?mX7_13KTcQVY z1NJ~(zyZh)6a@II2<{yjT?}Cfz=_eN5gGtzpe%bv)0M-0d7y&iSw-Ae0;&L2focG~ zs!Mq5)6(hGDL5TSV98+0TP4a5L_fjA%@NB|One!u`=5HJK71`G%2H4@<{ zU^JuYvoY*`EDOgW91lzcCIeFd{z{VEPs9CmM$bSv6PN|eWzQBMTnH=%l7Z#GUjV&U zAY2Ko0#*ZSfOWunU=xr6Yyq|bJAmClD)2Y357-YJ1P%j7fMdXM;1qBgI0KvmE&`W< z8vwm-vhY8Iw}CsrUEn_O2%y(fgwKHIKo;;4cm>ewHNrRS{w)jX^LM!aAff3qdVRvP zFMy4j5Dh@*|Mb!!)C0MJJb*pGU-@wFAffZ)z5q}VC=3(@iUFko<%RhyDGQVXDl&d$ zgjIlQKuy3Ms0Gvp>HuCqeV`E_y^NrnvS-Z{Ja5n~*t3=heE>h89nb;j2y_Cv09}D@ zKzHCzfWLa+K1f0bOYUi$o}f*VXT5PB$>?Yn#vqIZ;(&M{0q6(x2l#6s?gvX~`g|zp zVTxxK(8JlY5eP>~_@i+@9+(JBVb7-`Oaf*Av)MD6_Z-~M1Lm`53lS~?76Zxb*-{qL z_{wWJcq@Qaz-nL(unwTtdW0L<{U(Gd?B2??{<~H2Y&+;3z)s2YUARwW^d5x!*nK*} z1Hd6h(;N@u{s?dsI3aod5AIJh`V7Jh;2dxsxBy%PE&-Q;tH5>OCP1(M5M}~*fV;qb z-~sRmcnt8@6Wl)qo&j0x`AdYafH%Nf;63mG_yl|bz5?F?6&58LKnvIbIzW2qLFZ!6 zayxm0KJ?LmSXpmE-kq?;NF?hWe}E? z@XF)90#FgC0#pNP04{(lKrc6h9zboN4twT_ur5#^Xb3b0ngaA{#zJp|&4CsGb`gj# z&>G+`Kiszk+5zn)&pY70BhX3mtPAeD0^NZ>fgV5*5Dd^OgoU9jG$D)tdIOQ{8I2c> z`xw9s!~yXDelwWP;U-`Uuoc(_>;QHGdw{(_8bGgf zga_FDL4=2Z!;H4ZK)+Ma#~6Ja;RywgPea3#;GF`_02#np;2dxsxCrpqC3b%W;Z@)o zqpu^p!R~3?|JeO4gtvjaz&+pr@DO+eJO-Wu&j5P8U||-*m+aokvHts-J$sAr1Mm^} z1bha*0QCAsL+o7G04jjLG`QCRxq&=@JwUI#2=f6BjJ7_t{#$@OE6Bn^2nz#67+n;h zBTx(|0h9!s00ZC*lm+;!9J{Z8urg2us0P#kTmd)09iUe&7J4A8!|v&GFWlD$8URM1 zDbNz2S46>PmwG?1-(h&~d+$z7T^@M`ucYuWH;cIR4IL+)n}?;J&Fx z(=OhC&sJmDcK>~CqS{40S~uryN??O2S$7thvwREJcySM5$FsTh=cmkjSD~Y~z4wMe zD=fFW_878a>hMne>?Sqo*EucV=EJdk`2FbGY2?`2Y2Lfr-fdiEs9#m@O4F8j4YXMb zze2F+~L>L-Y)rX>>t-Of4?2Ma`GyQEKBg<*{(NK8$_wbnh zM>nROD|oZrw*o+|220Dh1_Xa_2fmI>UHlD59G~9FG;%_>j~N;5{9c!NdU}7uUCZ9= z+ePWmKvVCFPt&d&>y}@vEx6TCpj7f!RjIRWqbk08{%Cscrwc~xp8PWJ-Vgo(LrV|a zH|*oU@w(s}L+rMhbr(v`?~oh!+s9Qe(ztS)%%;GKVhyIX>a~4TfkV}_H&V|%KU3*q zn@JhB+nlX`Y{FE8Rd=8HRNzL?-+LPMdsXglOK8Kf#*?2zKKkVPG=6UD@v)vp&|g5O zO=@4K@PL#yTL-3pT6ZCYbYX9wb7F^{QQYK#x_H35{``j5CYeu>Y3=LWK-i+$)7Cv5)Sv5}4}bh>X!@s6+|N4Iw#S(T zuaehKAz%W!$EB)bvRw*yUXF|A@Ng}QbY~-)@|iq ze{4I~_~<-(Uu!bi8H2D$%HzrvmJT`aHoj`TQ(fxbJp-g|JG^{>=dQ?dH!Gc|VTH&| z;~Y!1)h)uk&#Mn96r*a~UzoC@X`xw5ihm!2`^~s7dwJH8er@-c8X? zQ^l)%p1rt4eHxwy&VTU#xAIX$Z!8+CH> zkco9K6`u_J+qX-}@>lD%_Vafz3@I=(bl1VGD>N)&Eapp5_w|zLP6#Oig3C+@nYcB} z1gOrx9yEJEbL}(tU7hA3q*wrWZr;;waNym@Ab>*Kw<%iV8MvRu&NGn0p$G3C4SrHG~EoGvYsmsh;MuiJagJyW@= zt*Wj35EX`SK<*QMCDk`N(XeH!iOvzN%PcEgu>C=b(P@JY{xNpM+A$aV>2@}4f1%`- z<{!r5Ubi*(-th>_T_>V+?F z9v%OTkT zbJF%%x{u9YJ;427b<46ee8}#%FCG4I7@oxrFWi`h7NF14MlZUj{IzRb$6OnRgT5cT zwdzQOBY;P^r+5rhPT%kbxpveaj-({fdBH=Td6epsDr_NF=Rn+~=;|Mh9hj2X|I8%=r&gdP7_wP*Ep zZ_eJ0Z63O%PqSWa>>tPHU(`umVYcHrJXl^S!|uIW7N6|u zdtjQ%qu;Uji@Oamw84G($!RMx+W(Qnhd~RXr&h!><7C5@b9hea1$no2J&*h8=Su%| z5h2C;8mYngANfC|VF83!a(C>xI@hAQ8$n;g{oap}asDkXJ0{nBn_>U<5XSyyUXrMm&$^WgsG!NINnP+cBP!)^$_wG5-tv}xJ60YOVV~Om z*n~rm+tj@AWOt{@pC4SNcvSsNx{-aDmC-)M*XYk$W1PI%*5Iq&vf3H1Itqt|?W?D1dyWv8Y`ofyBg z`rdqiZ~mWc_Dz(BfOFbgTS*ZW=EyA5fujSk>_*UrbFqS}Q5(?Vb;djL)XwzSpP5 zf3;mQdfO1ka)l^{0gFq%=ro{}M)xGN*0PxgYMX*)4y?0Xx47XBAQ<)Y4+qRtEYZoj`c%uC$*JD7Bd#CNNTm)u6(YhTE=xN4RjsUlBIZ4Jy>gHEwO);d!5=q>fxm>6?|^z3YK4@7v10BEys2 z?Ot@%ZMd1gU!@YqKZe_%9-drmfTsA^M}-?7_;R#)k0%JfA{_1)(!br_5%B{?kGZMp zb913B;N_qBjpEJI#ck>$JQ<$$S3QIYNyBdJ4|F=RvytKbf0K`FxCO+uuq;fPkvl5? z^^+TyF3WeS^lsopuijufH|-YV));PaZ`zxA#FYEZR6K3` zy0%R3$@G3?`KtFyIt83CHJIG`;I3;oOeqV(4j&vl=O}RJRd~hQuV&}Hhwv^?XRn*> zeS}^J%NELSc6oG7T^`~1R)fEn0xKorj=6y$9}C=Wm}wIDi7o71sTj)UQOI7$l zce~ot3Z+Nx#r@xa_ra_a&4GH8icTr`>T)E-^HaUZ?Psqo@9$mhwxPA%%}v0wdvmY$ z@-Uu^$w0ht4ow>T_YOn7I3poAlw+8ox5FTuM{OZKZ+L*yB3I861k_^n}m0L5Y|K(g)p;bVE;wA zH+<{jwj}P$8blbeK%lrIE z!#0k;(Ivd8AdP;mP7#4>dpL zSeAxfrAv&V*l=!H`g&k_;VJz$A#|vl;AILA-e#C?=+@UYgvs_MSD#_j;?>3|c|ME-e#h z)r$AX*mUGLse^F(mK`-~BP?dzp6H1%_OoLPdQU@2HyQb*QE||JxP`2#%jl4!$2wVe zfy95KOyYe3+~PBiG64?>=(6#429^`iV+nT$X{>7j?^^rO>q+GnMl9>AT9 zvOc5fpEr{8o3Z^x`jqnTXyo;sT~Qu?-F!KHHBnCM6#1RKAm^7ZC#QQ=meZ?j<>lw4 zN?})igypL6lvUo^x8=0WK{@^0UrvwTAg9BOa(ZGbIlV;D-fsuX`A$3Jv_WBCl@#d{ z+sNa$nIfl6S#r7#cYe_NjJ8ieS&_dKw->sLH095}E~lpj%jrMj<@B&W<#fdca(a}f zoYs7m)1RH?bot?Odei|qy;0Gg4R*`_&9zwuS&@q1pB(;1HP@=A@C^9LW5)8(4W z>8pzN3sv~ffPwP(pDM}AYusMWPgm5p{!KamWGQ+2xy9sk12=j6Kb_@tmO`Gg>2m&z zFLFAFJ2Gm0M%zC|ArC#<#b1=a;sj4w{q?T%a{7s+f6@3MOXd8I-Q@Hrv3i{SHti+n z|E*}>Wj*BlYyZmWuj}RX(~@EVRGzi{VwpzGC$_{cSrgPv7E`oW7&*p9K~EDmGLeU%x<3 z?_DFO+fJ3!6=LOdEs2Ao`v3HYoPYU>oDK_;(<$Y|3exn668}r}y{fW2{y7_Y{3%7` z^b>`@CyM#YC?|P*zozp151Qq4e!ZMNTv(po?X#Re@~oVmU0NRh(`-3^$V@r?DM(Jo zt&r0ig}v@9FVEk8u{{3z06FbBNlqt}m8V~IOU@rxT~0sJ$>UG{N6s(bSx&!F`1=tG zeoTFN{JaYPzEaVDEMj!7Kcp+h5A$F#0=1_`cjWXttz7==6Xg6qhso)(bLI4{+HyL0 zl$=gED5oDP=7XCxa(-eNxjggg$obNbr<@^?{3kA? z!>y4#eqwRCz1>&LPv|=yMxgzzx56Jv zrpKKAkDQUm4_C-HvY@{eRBS0wVa+=MNT)bDyIi2{OcQqf7zh$Kb5=4)0Z^J=^uukR->dHOU(du|;m z=RXUP(*e`u^zxQ+df#F>{d|_3F7jSZhn|wtA-Cl8guDv*3(NVH{*%*R+Q{jH_(2|j z(e}yf!Bf`uUY9JVC-}-~XNi8OJuWXG&u^Ndy}iH6)3;a5ce5_Y`Mw+Ew8Xt~_Eoc{ zoImoKoNlL>&v^}y^Xuv5@*Z6+=a;x4r?-jGx$4~4@bWz3pse@uapp3#_ z6-kz-Uw=$aPgCeKx58hAD(YKdq&$5gH!%aMKUal+sH|9D(sSbcMdSOn;3@0)&`e=p zW`#UrX@nS%^oe4A5KzA!S0^%?(bJM>BX5X#w>>MJ){3_56un)~fGrFTz5dK}P?u(9$r z84+WUAAE`5lT_A`JdBWPcm zn%+n!98r4icZ4*45ya0puH}AjPU#1?(H^CA-0z(!9S=IGgigpm%7FYI*$VoIL%vD* zZAm1nUm)hUX8Zti9iz*4#P3`2V=^6+=fGQ(S4BtivifY;j^DT4(-Qi-X!OM8Gjq(@rOLr&C>J>pic+rlW(tku&*GAefiWt`s;du z|F}BJpQj={nY;zR;x`qsI-&ipVtr)9k1go$P-*_5+fd)jTC$YM(-!;`Pc5Opai#o9 z;FF75GLG@TkHne=KkRA3=y9Nv@nfIbjMk(7W#WgW^takH{d(AY5QynoYk&Cm8uf#| zczfKt7x5kJ$XyoyCDzaBdF+URwQp7=@~kz#**c?tACc7zT~H2vF#XrJP0LcbY7=>w3@2>Ez@_on`+q{z>l0{LyV+;6~X z{1nhh-!8&h z`=d1`GkaJA$LWw;O$M^`%|R#Y)k6K~cRKvlwnBf3e+54Loq@@h7ycqETu)RizqG+< zuL&CN`w_JK<##}r)d}f0W4ukr54>)%@~-3kT4t!2z89r~^yD*>cec9?8P%V*KIB9$ z%{HSx2eg8}YzzH3MCb^jar#Yc4?ceA%Jzh5e?(e(CSg>3FhkCE90( zmNaJR$J|AI;qQ5SbwIf#sqx?{053by75p?6X$_jDf3p_lt%$^B|Nr zRKtC{gz7`1=s&YiwV;l+q$7%;`RyBs{ySL1eOHCX?*MyrfIaf^G{Sl_xv-wl-{VsL zbgXAx3h0TM>1!1X&C*uvpRbC+|Lm}(-}2$&52%jx68o>33;dS0+;9D9em7|PcB1^p zZ$kd}cH}g(XXmPr2Oj~Tzq#b<1HQvsJMOy`lx}kw?G1mx``<(~`Zg1bKhr;kr4x=>$CePL99{OkI2o0&s=m6SZEgJ6o zJv9Hmeme*ym@(Z`c8)CcwF=Zgh$qrD{h-bwYlPDhrr{A$G0 z{;wg&8U6Aq>T|3YS=jW5775!oTSfu|)!+jrx>SsOP*ZJQT?Y+S!lrNc2 zkLv?_k<2$L!~Rkv`sf9F&N!tbQ&|0W{Rw&TL;P_}ADuxb;Ya$kztZx2j-&ov?MQFt z{}PWuf1T`v`PCKlZwuyo!7Tm&jNe%jd)r?U`hqj-h6bYjVe1m4H)#cXs|`9SL~LJ& zrKnG|*#5Q;V4qlj@cwP~4b<{3#<-X%Zj}z6xcrd!M4Kej6HEVB6zT(h zKE^Ls9{S0n654AG;u~h!k;N>2Yxt`qiT$iDhxSve$OXpVI0`g=j8DJ6MCF-Q3;H;( z;=T(+>7$5m(Tee_AifKJ#2>@rZ+3-0c%kRM;Y8y*f=+&_7uE;&kv>Q#roWE!6o!_1 zlFs^TXlcYx(sSQ7qv`(yKe?G+81I9sLtnF1!gyHo5aekn>LV}sLHN+lGFG2g2ce(( zV*O{1NBdmYkZY{Guh{>|$g3ruO#btrQ}Sp@X+|eJM|}80-oLj6KPi`1SbvW}dl|af za^D%F{Z9k^8*{0+Z^BXeL($$zYH|K{0S#-ZqY~yH?ierAk$+v*eznk$L6~gw_B?1l zXdf;2T{N0s$|T793iiO{8wENY9}eK@n$wYgVJ*4D_)Dvy|KQ_9iy6Jv9rFC6;=YAP z^UFiiKi3H3L39Pkf)!Hgfsni2b~h93JT>g&kdFJt9*w^b@&@5!0(|^<@H76_5x7Lo{;FI@`iFMJfz6M$ z!oQgPRl@x2-8Ja{jve=%6q;XK(B_+V+&5n+{pBU>$d3=M1hD#b0&TplChZyR;Sc^?HL1mDO@GjlSbsA8p4$d_Z@@nI@dvc= zs+Rjk8O^_MKg7p}J-)N_{UT6*$$ah7RJ4~~OX3+nqY&&DA9%ZfY-###uTfvg`f%JV z#K*@K`1bFRjQHVNVSZr)z5yR$c){{}i1ENbOe^%Cq0aD6bM@S}uW0^n4Db)L^kg-w z&lBuFnC5<)pgl1@pjo-}M;CzaFV4@G zfX?vKa^HHS^{WUvtF=~G&o{$-D#=gmZ*TpOK1nU;{{s47aw~B?vIPa#i5(Vut>0e`Z%zZPwVe5g-bR-bbiUy|~uNfTE8 zMVN0VSJ#pmtp0AG9ja={2u7RHPt1jE@!4CeeV4#0IOC9%ZN2pe-+S+&3{Py-2pcbtGNk z$bvAxfSS4X84X8gAfo;ju7ka<(UMdqpQ|hMi}ff!AE^(%Wwn-cX8eJtK@YJbTNwT3 z6zG9=AB=fO-;FJD#+&2=bd^JEPC15_v;x{P`8Xx?#XYx&+2D*UQ|0;rZ z$gdUJd+=u1$3ZQ(zNPuCOhfrjcH{!f@6}4wXQpT`Z}%a;Vs_j&(rEl$pp%PWe#PPw z&>02oxbGHHetpb8&DV9pd^zz2(o5Q>^;yW1WJmPOAKr&N{zoA%%g+_|l@9yj=LDV%{&k0<|6fXlJd*yu5&Fx3{;(|P+V>AH zwBJXSuwE;S{h{P_dV(n%$M>N8_bOq$*|!n$OXk;Iv7ePC+3&ht2IIvF>_4&eBi^EZ zlKFg7@Y5HI{CvpYA0M6jkJ-l~PY8w&($VjOQvDtTosJLI8F~Hxhx&XH?V}^?+2Ogk zANmaYE&jVPA7}9g;(c(6n>Ziof&7y25j+>hAA|Nw&(x8ztb85f8*7UD88wl=%M%^< zElMiS3LWN?U&Zk-2G{HuwTfhe+AljTQBHu zIP57yvL7}Y>q7_ZSHq2P`rcmy{w!Hs5BP#lz~{@`9E$QPY6N>}P3iLDd}J5K1v3WM z#!Q}>7zmR$itXW{@OM!dpE4!(S??a=!`^v$n$?0l87l5ut5pAws?q)?%A2O}clRnn zpV(}ythUytD(o{;vOkc5b}^&zBiZ=%EEnvdwb-A|I-x&ZwIvf^ztn$z3jiMhakw?NrtOZwq%iwl~2FFX~0L2dHXop3h4)n^X)YlkCKG_R!5Sa8}ed5@ifxY_E-q} zG%ObP!xB*a6kE&>nf<=Tz##RH(?^4WkCXBJvm4H5XL{+$RVGh4(8N|>*;7jJq4PYlhb;SMND)wk^iT|mu@COU4BEQipVSd>! z3jPcI17^Y5TTi4i}C@Q<2oV)4JBJ?MuSVHR9_v~36b z!G}S&GX5sme=-Jz0On6_bw+)TtA+9RV{`PcZlZj9!B59WIm@y5JwPYpW1R*@pGSQE zE@J!*pe>z5`NvXz2QmMU2GB=7aeWXBzM;LCe^c0J7WPYdduj0*`U%#N6ej=cC(sY{ zg<*xO-#6$RFQf!{91uVGPaSb%*-F3wj25kL8+p8IY)ZI9Ae&sZeu89&7LPqrhmYTSK22ZkG4zM`7Y0_pL?6f>sph_APx)KVK|c%axbHkuI)7ob z&tO~bd+)SalqVVAo{T_zd}wky zYu}0)(AQWy?tA|y!t@c&O9ejK!n!YEc^{rM2<@L8E1sENM_R7TMWi0ZeR5B;0>w;t~yZw(!J&hkHn-%I(A)e{VXT=|}`A44BC zY0CJ$KBB!_i2L)!&8VNmUp)K-dAs2ciZFRzyaQcT$9*TB*5_7Bv`2)RK!sfX{wU8l z8t-cvy$tEII_gLmv$t_*KNqxLQKrA+-y#2K(I1E2fxaa3-_t3GpC-P~HTt6dec^9e z{+Cf7OSD?pUv1zZ_s^MVKjTBOe}#8N{r?rm&+!;fTqN^LQU&sas7RE?TA$EX;13eV zmwnFQ!`Ks{KiXcS;FvP&isNO6#o+f4_unrsLVdbpzl!O*!bRk_SzMnQ;7^ROC*Hqp zK>MY;>4g2V<>SylE@*`DB&jXrpQRSw-#5j2&qcC6={z3ob45e&ii2y9(a0~zRVVcC z;TZqY*NFPAhy1h7X@v3O5@^dM4Y|V7uYQF5;eUAf`eQr{l8omSFkerqEY7deTp=&^ z8)8}dXvmv9L)@PTGhqEQQ!lK?$DscjcBq8?grbP=GF?r2F@4qh7y8Mt6W;Hx3PF8J zi}Qt7my!Qzyq{$8$56hLIDZ&~`Fm!(M%aHEf%p!m?8symf6zndTQWa0A-$!9sBbOO z2gQl=wVqhN6YSranEf_Ge3xPxVLf~m@2@hkU&8CFEcjVPG{XAsSWlE!NF$7|cfMf! zFpKTsdmH89&nHz-S=(HT}d zo>>1o+z9$`68B#VyoLTG>;IC@sMr&%uUP#&ke>zl6=k$9-oGSW!utrOzqCr&U$`K? zzp3T{eOwjy!z+fOKa>^MQ>vOMf2BBIJO=#)Ico%e;fVFQ`KpfldoI+!M7=?J$j6Vj zQxyL14c-qroEP`EyX``KKWn+ai$c?HiA8!JaeiDJe3y?}Lih0~e-!xsEi|M!qfeHH zzdRtWH?Ho4yl=JK-(8^bKlDX80rJ&U(J445k1h5SQuLxe$K^x)bRvBpib(D(o(CC%amI+3E1}FD8q|Y*t@(FP~Jp#jYSt72l>EBaGx3?A2Pb~%c@7j`fEPVjlD@C$@99k9q-6HPyOn89y zkgWgmVLs%K`4HcqZI__F!Q%UX3GnyEAaVbH@gB&d5$nIs8S}>ldhTzYQ2PlUj{KL3 z`wt&1kY|b5KSG_MuczYp@&_gwN!W~pnQ-IHU+Yo+CavJVB8DRVbPd_T+V?s3gIpx* z6+gtcY!vrT3HEb|4fd;9{3NWe)9b3U!>tH|Y zM18EZL4R8y%J&5FCLa^uPcK7x{&>H|>wh5PlleNbk>y_rv|)};ct5z0*7t~b{-!D_ zX2yD+Prp=;`ZN~zw=Tm!x>VK(?`Io1L!a=!J&-@0UsT3>MeKS|oDBmG$X1EABl>$ty5M(Y<$^@q(gbW6@Y;z~k4IG<4;G}X`Qhp4Zlzn+bP zJ)pnlX7QaWqW$Ws$WT_^mT2fhGQTYpg!n|VVczaBz8vNYm(tJ4k9zK-H~rB$H)i-_xEBWQCWk&cK2E%7(2FkaF>QuB(H=ZpHK zgAcWE`Y4I{m{Bqx8$JdMeB2*fh#dd=M3je*>3#5BAOKvlHWEkiDKeuSVs~#QHAzwP;VOr(=B&`^>7^cCp`ivHpd z=#2g%{plIWBG#ExtY1puA;Tp}#$V zzAPWb{jmhlDfr_g8BG4Eh;K>O3iIRS49GXZj^LFMXCFn*f}SF-U#Gl=Jd*iV)(Xfw zSv>#K3Gx~x`_bv(XH69OO+h_zzt!5{c<6wkx$Ie`4#MfpodKpx5bYv*qC56OOMDXd4$BlW_5zsEtOuOrH{UD4lD z@%z}!;d;Rz7J~f-Ig0ihfcd8Z>u=uwt$l+2C0WnbsgC?-sf7K5_xT|o_M3SBf3rN= zvz&N7wgcvO=~z#|tT=yG@+#WnGR{-6_HjmgIG{avf3|Hu_*KO7`wubx67cO1(Ejiu z9qo@rnE`&6(&Jn(e%u$|hin4hTvj~Ke-QE-=VJcD+UxKfT;5e}?G9 z=q-+yhliqmGc|&Jy$*-HO6GGZlac;2{2i+gea|i3-+7RL`UHyo|3nzU`vq}6)(`xQ zKgIQMEcnLS;{2@63#6CKe->bWJ4G_zn)n^|H&Q2@mwI><_TRyd9ANS^0ZrP8=MmGJ zq5jQ8e>V?wN)xrPUZ~jz@g@E~7W{Ohgx?MIyDrX`bAzANP#m8QWBrzn^&3B5d-D{0 z*lQkSOYK8tj~kqid%$QPg7IXE`2I8>=*%(V`syUcLra1!*Pe9zy?Gby4S!%@>5HOc zWXu-lTQ;T8|HkWu{pDEnKYvdnx9Bg)t69P~2} z=M$Jd&tZO-jLk`Ye&>z;oP_u9M`2G?zEg_+XHfLFBRc5kyZHMF^*yAQ_~WhEe+$C= zhu7a8te=7;`$cz~B7OsL{jEZKWl8LDHux^}#r3ua{GH)}TJT2~|A6A|sp%AnYmXk+ z(choh3BON!h4I}Y8Q=4|A^idTK8nR}*$?p}#Q9!n(3UW9zV{>xsD<-x*V;oLEAcnXS$?n3|1%{0 z|4+jrl>n`s1ZY~S^->MhhKehyI*rFHovn&_%EAbciYN5PKD(-Jw()u(g z48exsd>E=h{gtjK`;EtEVE^!sp^=eL(!iI$=Dm(FXb%qZWS8u^Id< zFCA%)cBT6H8xt{yQEFj5)B*JiS|#>xrzq$L{fU=nEXEgqvp7E0-wHv`YRPA`C(ZA| za+Gfoe;=^H7xt4Nj^Cl+n}>@1yWd#wk86bUxZ#NJ5HHS$%YpB5ROD}=<((JHOPvM% zR?>2Rf0vej7kD(-JX)A&_7AivFO;k?LfGxBR7wr6FGZ8r$or4xYV@x?I^xUp zm0TYA7sYvRW`D1TL*IXi`|oMsXBAVCP3Qo${3+lYmurOg1x>*>+tA9`A=YYvLpWmCJh~Hk%{Y^|-{!q{vZS({~ zCMVCUe~>;-d_VfME$nBGn&8+z$8SgZX7PP#33P;vj(A_f_=UmGj1u3MzL|seuA>$9 zb0?I8y?n;|MKlO4f6Ex~4`IAw_3eEU`sgFRUoGZ|`ae~X6D)uGov2urRv7PeMG$|2 zIDeWm75UkT=Y5CM_9%q&hb(^w@clIsKGsXg!}P-Mk>B8bbI=ey_jkytd}p3OUdj4s zC)OkB53wK0P6i`@K8zJ1w3!yNdZt2KIMHvGy9V z68>qLcs|M!x9=`qM(vOa0N3i-_z ze;@kpIpmX!cLQ4@zDX_o{<2bC(BH)Njy#0)MsdG5;xoz*(Fy%yIrL+e%qP2!M*LKb z@Oy`QsCW?m$Rqzgbm$}6-^Ka;4EV2P_%BC|b$r=}{%w)W7p~O6{vO`{=VAK0?1lLF zFxS{|>BJb%&(bkEu)+7FA@qLC7<9C*1Kd39_XD)lh^U-U2!9Lu?^|Th_aaJSjA254afc+ceR&jrK zUq$Gvk&62}+SEVzzJ)(T{(S$AzK`^h`ByyBn>UO5!QURkJS6sg>lW;xfKHe%4F+v- z!252d?+ftfS@7pWm_94CLi_>Z`G+&U$giJxexYJ-*ptK`jmAKbiI?HLKAYBnelc0+ z{dKZ4@_!-j|86uvKh-tD{@X^hZ}LSuQkKc1%?*Ey`QT~DL+5)f)X;~6c;2!t?9nJ$ zKV7VY{2z<{y9LJA4EP@d>rdsr!#|A>`^Qb#dx}NuAOE@_;vSW7o+gy?2kW@MDNglo z3xAOXf56XYJf0!G#6FMCgg$YYWeL+?0r-myNq=byex^a>Cu~A|>{k|L^7&K&KU~HA zZChI2=Lpz~#Gft3{MS4j`hWmbA0->2eg)J*d1=@WbjdH$hx7ea~LmNX6m@} zGPM0CA$@vF9e(Fx%@41N38^mbug*vMpq*m+bx5By4d?Y){ReM^e6TORy+~kSSX4}O zU|f7~Yt2xrHRUdeUOs#Dpe9ngXL^ zqI<=fOx}r%>EC&ZfK+(HGuYi$WXF(T9yBeL8jtf)xwxwO2HnCSS}h@N0t zEy^3=EG53lB?(2<$|4)->2m z6&n~G=o)H@G{u{KJ9?NYJ|Mwtj){#oh5E;u5+h9g0?_iP3a62tX#D8Fkl^@`aQ4K6 zsz_;M5GYhQ&>SC2tI$eThg$u@gX4wf6P3ql;dkp5XNv#rO2wLB6Q)E{V4$z>|5GBL z{~=YR2^RK0#7DpR-8$2zMjd`%nuxfzNMic^62fAGd;YdKXzJgND>KVL9~eb+zuwHc zUEQQySU9{@x6nv`^x@w$a%p<%_pE+MnJx7}9em{diR-YiPWLv!@z(CirSX&(?@+xpNaA|Ti& z!rQw&m4GJV%XW1OY?}~oPKa+D8yh@;_He&|Kp_I{JCvv$5@R0FAqI{GTzL{cBO1+< zMx$L;@KJ6qOsLS91bF)&r|+me@wOwMYE`F<^41dH&-S& z7tgI%_JZ)aKvmgFqR1sQ=UmW8zmbc3ME1GV;Uskr{LL!4|D;MnHosRV_n*{B$mXY& za$^c^Wg3t}J9+YKPBpH|rsn%v?T}bZHcZ|T{;eXYdl7mnx=i-!;*5o=o68gbB9+jU znju+8c=icvTbbF*BbZ(%Q%J3!G?Y7dJ`t@WYI*;py=9sFq?x!(di`c5wSSRG?Vm~F z_k(xio#>AYVZO!tL*@^DOu;jLNWt8}k12S@4=I?>4Gy605N#M`ii(O!q|>f|c-K0C zf$`z7G5s(p3=QP`I(7E0b(l>s`-6)RUG|Ue94V}`z-)Z7s08L?f0UeO{2)1F2$ECl zlM80`=h@1}5<@mg3o)PlqYOOb2N@VcB7>`!Tn27<$W{cFRxSc-*&H(D#*S<=WNGCY zvWAyt=qZ@0 zh37{c$=H{@^w~wsR{U%tO66A=1m~)MRsls;IoCikzT{j3X~Z0BAc-icft#z`VAz2E zt2(gEa;}49aLc(4(ug_MK@#zobqM`c9av`B*FhMee`!kD#>}=B*~R=-HQXb9Rt;rt zzo>_#`Lk~fAwOx%oa-Tp`OA9r`ei+Gv?)P4i3$9o9@3aO*FzHXm-YCmSt%vVzCC`N zv1MP2Y~p2G2jzS#``mtrCFF&18xt=w98 zGQpAkN*`{l7Y9o9{7WrM2Gne|?8a}tuw40``O}PlH0x}2sa)Bh|7Y8kuvJ3Sy9izU zSH>=O9M%d}qpz(n`X5{A|7i8y+&ttyOkyP2*UU|3DB=M1`!(cTgLOlK-%0U+qO3Lc z`mx5@#HSlle8d0G8mJW*P@8tkR_LA7v}g-)1I|5u$Ty~r<>qD=7rtDWSa755yqqLk|Y zsusDkqPbc7j%W6LM;@IH#Jq3&pAE&Gj>BBn;l_J6#nM&bZGY-O(0k-U(Lsv}MKk|@ z>O7Kx@R#~fPD}oO(}rT(i)kZ=85iGcB+@H2foYU$9lWaZrjvwDrr4e_ad@%m-IHpG z^4w!hp%Jl|eRC1K{hLQ%gEoZDR9yoj{1d%Vgg@}Zi=dDgyxpXZ4{tbWkaG;o8KR?2 zk$6|Fc%LNACM4z;5kiCGgByp2#^#tyC|(hV=NJR;W25N%ncs@!nsclubNm3NO4@qf z{mrrb0t4C=9?=CwPF0VHYl_$A5zz@T2|4R1A}%1IXH+oWruNS1z?`$T**o&UZgo(%K)lw(JEsV71|I>V452&6A!e#=AyV9r zktE!R9J#%6_2or=~SjFW?MJ9`%PchMw;+il~;*GAXL^W&=094HI1u#|xFXq4O}IQCZK1%COF~zPE^s2}9dTR$zRIt}fUcU~5sDxlm##?;n2I zeqzM`t%#Bj%ltm>^39m5^g;Cu=%M z`SO1}za-i>w92A2!{v*D*^5b8QNL9V%|IeJw@C(}>jc1+;%EmwE|OzDm;kh=n*UX# zocX4>@L)4NwZ)kuZ(DV6Vvs)f{)IuZXf%0FJrm}GDOq-BwxpUhC&YyZy7mkX=_65# z7x$Dtq&_yALwV91aN6>R9$Je&{9ih#H4}ONv}W?F?irO3$=fflNOwHT&KQ3@E+(x9 z%P0qbWIZ3%zO7H7D|djAFV9P0a3`EZr$o;?N%kXhG>VAmMAU;e0-r68g(l+2SWf-h znkZ*0Fb#mE$sMRWp8YSa!(}0F94?DrwM}S@%z3(l&aPc@tYTc{G7nd~9%ZoH-`Wl%25I z9#hMyjaUV8Zln+lbKFFU7s-QrlAQoKYRftsgT82k=OmR7kv40N5xLWOw2R4%GpAE~ z*^C3fS;Q}*MJM8&kafJ0c}_2!j<&Lx!ej>zy(2<%?)gG$T1%fC8~_&Oinzau9Qo_f z0_*dft4+_d=Nu#46w;@Y^;Mr@Orh%{fr)|T$3w7E7buAfRs6~@#EiMf-Ex%Cx*lM2 zjhyUF8Zl?f3=Zv`5El=3EFHOohp6l?#!<|veqBpG(l3<3MMIzC<`+3tkBf*t_j?h4 z)riY&)Kj_U9iCQ61eEvoNLeC%!CAiNC364?qD6LWjVUKNn>!iB4^<+ zGe5n>(vo{k__Nyv#v0IuV%{gR7 z*}s(;HaBEOsE|xx|88bV6O`xY>V^!3DE~vA*;ci-HM8H%Ml>(EP&6CfzJ8^9M3tGB zJSQ)K{kt+r%uAkC9b_fgSN2jhrF-TWnh~?Z|Ht0jz{gdU`QvxeX$@G-sDX_d*QgQO zrQ%GQwux2=P1>f?maz>`q1wDR19=(q(gwFmVp3=fapQt+U2)B>=vHOdxOUyT;+hhm zMNtBa+Wx@Z;NtGYqDDn6ENlPY?{nX0&b@OdZPCx?Zpk=Xp7=_nh0(-z#g- z`p_@t3=>uo&55{&@0i(iBs#Y=^l$EJ+S1V3hZR^ot4b>v`M{Um)7*G7Cc1I=XdhdS zo-6!P)*g%{aNxrBDp#KRbYIylu~9bIT4Q9QVk-DZRoJmcy<|w)h1tPCunE8 zs)tz5SJs{!Z>u}s>F^OU4*qRw?zgjC)1!w3W!A6JU{-d{<%H)Z+x9&z@viop(Ab!* zP*0Z;c8&}1RWZw~?rX$~yrt+^R5+2)pse`mn?=~wTqT*S^7U{rf9*)#zBNS0*W7}g zLhON9vW{`p(F`_tanCEosIxB}>fU7MMJf7NYWk*fmgw9A!tSlDnu;DHNF~2!4*~ix zgiRrwFtYPstp})bPhk0leWP1UzIiwIv8gxh8+kK8mA5Kq&l#SGT+hp2eB7ePdM-{6 z#>z||v%>82ifKLjO1+(Tsy-VlRxy4}u2pgtrW-n&`)q||4jar9>#MMk)!gT8yRMYp zn+du!krf76=E-fzn)`d1`qsl-YxAD_6js*T+xl8tsRq*&Vr69B3|1E8F>lGt)6d_q zx4}`Pt^YMfh^>82Jlz(MXa;tx-(&70%vfdkDc_3Z%PkIdSvpvm?Aohxm1dPze7rD; zKBhgvWqjKStT3hI6)J9&Ft<$nnq{j1s@%IdbVT!)3$}0Sa6bThV#|bLNNonI1m)2f z!diW`X=m#1r^z<#zS!)n5}X4czD($O`^p42E+o;uCHAz`aYfrbXfKQMMuzm~D(+O) z1j+d+jfY6LHf)7KnYyS^&%7Pc{8Y(k_9Ud7P*o#v9)NrQiH2U3-y}niVRo0E%mm^jn{515jxD?1M3Mt! zYkp=1d5A}4%guhxl$oEu(Ck=#g0o}!S&kjcPdIigKf6IJaeZ!PVTfqzEXEyD`+-VU zDnT{EzOo6+&egwNcQKGv%iGybb@y>PZ#F2gGGt+etEvEQRkiTi1CgHEnP(QR68+W2 z>z9mZE;x1A!Ja(Xi_OL*IEb=2nLw%L!F^t8(?)x|Zfw&(~Lq z&0MrL8xoX^g9v&tu4%clWZw!gTfc(a()Tph7$HH6_bHwGJBg+naeLMK`fxQIMtx?J z7UR~H>zUVEnj$|Va&=dqbE8%EeC$A9}sRW%9DM zucNW0`>NLJ_|<3)tU9eiM$SjsI6`uZ`?PGFkFsU6t}oHvys49u#Rgsq=*WuOdg+g| zWY>Xdi)DtpaYv2H4&Yf_msBoW__Zg@kL-ER!dlxC=I$^ylFA8H4CLwuxHqzSYggxW zcsbc2o|q#zXeoZYC)(TNuGRmFRwKyB-M!4}<=^g3&E4?@D&AMX;Ha7}U-i3Ljg^yE z`^i~woP|fbj4Y;ri|?+$T5p-Hu}WTsCt>SbTYZYb@*sBsGdrbn_miMX_p|Aieuu%X z1PYp|AOG#J`<|9ET}4&{Q`X`L=j))3S5<-R*I4-`m2<}pr`U{PefC1_sreeCIC~Ig zSnM_&bi%Ei+c3q`E9hF!U9AVGa`zY=`7Rfj*{}AI zff;92fH>u8Y#JTPSq*v2^8Jj*jN`YeW5)5*w;9Jz-L4_!%5ycMlw5s_#a+AISbb=4 zjhJu=uNAB^3O}7W2dFF0(fCwGy?)hb>T7J-*3~70SbfE&!VFsP*8?N~mpQ|8 z6aWUwoLBMSNy`bGzprfS>PYep(d{=m$8RxI*A6$h za*sj$b}XEUvlrihce<>R;~J(#h)NP34Ub>rYvh)N3G|tpWeLITFe{pjAUQh0?n+Lc zr5*C%?G@fldQ+mi$+_9QS`V=MM0YQv&-;USD`xh7t&ADR*H8Fvw=q37hwrhdydLg3 zMQ%)c|IObV9R>I@f(^a8E(~07ILk5zywq+7=U@|A_VpZ@W-*P1_NMBtJq_J>gA*sl z9gMAlH#;Axm{YkfIP;zHr)BHUUSpznZ-N_-{kZzeAv7}_!79Ey90|cb_H5>mIAHMX+R`5*_zxK1e>t-hNvuG}dN?{+mUs$XRqnwzi1Ah``w4yS}2!K#YL z0!7X%6pl>&O%W08nbJxi6%XE8_Ur8=YrhGSMDQEmN(8^btwiusk3{fQN@GuRqP5jl z$eIQ{BnLV8n1_*>zYexvQ`5qsfz3pP8KmMu*$iO636|}{iJ4-8M z7TbP}6|CY2_M!h@#hMRK@#-t$-CZ|uKx54#V^KTQyR6&`s)m63dRwkxe{K$}Vk=RW z{XkWmsvt<(@3j#-Pg|iqQhJ26eu{;u`$Mg6A6z6j#p|cee!QH&|Rl4uN>k<%^d{1MpzT zp)5#TLvMlN>iMqBK@P#<8uATPH=W#nU9fku*gQv(LjPTat;>R$T^q^894`wY|1&15t(uh6xoz>F4Dj%abTbBiv}_z zQ_g`R3(uVa4M}ni8rjG>m?0TC2a9av9L$i6oP$L+at>xlM$W+^n{p0hNT!?vMKBJ#Cz3nj_sx zD@-+NQVp3hMq-(zbSLBXo7|4s`KrAQu17(O#Aq{Z`%f5iu0rO{Rp#t4Yel>VgSLdb zlUKB2+ugY+5oNXY&=@pHm{XdSk6oa=P(Fwyegxot*hPXmsVI%-f&PLD_1=&$qk8iGy-Td?02tm ztk4P$S{mP_*WJ>wN1rPP*{#ySSr9=&jB?z<-i2($SE# zO>dRppnU1FHqJ8U)an_nnMDRMWAIwjwHNPDnCe(#gaqYx zAqvq;45Si0Y5=8lw_vNcv!%NRTer^b_1LPU`%%z}DfXn&d+oHG1Ie2or{udxySqB; z@L3V2F^*5HPNqFmaq8G2#fA2nTh19Q@~9jngX<76BHPAej%JG$TO()koDWqc>McS% zbG0!*Q33Sk;?Kccxtlp}&a(SPS`pcq>geiDqL1v_ebahuaGIWhLpP3a6+@~qEPF49 z((q-5g*2|tk}0HY!*6%|AT`!k7%k&i)1j|9s;ZgY>Wsue9D=8MI-A;Yg?#7kYU2d~ z%Oxr@pQ;#gzJ&m<*p~S=OS-Kh-z#uun7qeedtl5FtSTqF*m3cFL+85ozHWzK)PpG}%ky=3^ahuBKZ%3c~>x#{*S)u*b6~ayu_+MI&OVC?*B!!5+mWvbP-xxvajc zuUmhk)sg?Iv`4BkvPw%oe4)=d)brVyN z3t=q#OvdEOcY$V;vF>3;J`1LTa}-QWiQa4RBI^~r=z|+@Tbz@FvM4X~Pk*k`tKN+{ zM&8O}S=-m_3gYdGGK1FTYI8Y&ZFi#eCaI3u8kOOsRH9cA#iK^3R7Sm&2D~QoJ)m^s zg_AT^_hCS1TH_icL{+w`5KalL7wZg{$!-f)vDrOlcUM>Q23%*;hq+{37uvz58*Of5 zm5RqfS;r-_kYl1Tdqb}%B#7p*?ui=;`%_0^JTp2E;GV?B%d&CTOL+WHh5bOK zKQEDR;G{U>7r+9YB?gp_W6@UCAF~#!D60Cv@PepvV?!Ul%F)nyWeaABxXsh_(iqRI zxgX-rpla%L8owblmQaj~^!Z*>C>**B!Wa9XA_sfwhs6PMj*A25jkRCx#FK5Z2cgX7U$aB4U&d3sd)DBe@79i8kW{lo@F;Q`POBoM8n&Dnz z7jJRPKF9L1_wYD(n6~21W0Ml3s;^SG;M2`zfX2`)gl3p+LSe_vULTw>9PGBjgH|!Cu$#LZ_L_!i%Q05w z3|AVgZW^xBW2*BQoYuLmB}bOB3)abMkmiNEg?v@p8dqG4Yu`6FG~!c%b}JcMlMyYb z%vt3CYr`4&JK4x?B^k*brm}F}1M#h|D49NoV~-37ES>b$V}?LrzYcVWR8=iL#wce$pe6?1tA z+;T^JVaWYV#XJT$su5h)9$3%2w$C|jzyU`ok)f)xR0TC=pGDc31{O#+z73Id7#|Ki z>ZgSntYXbW4D6=^GH4bV#8YJO&%Gmup^-z*2^g&6Fc2GN2Eha+M}pIakYK|KR0;?(ms{iuQmFd3xE(|PZmaiO1=KGwimaoO0WR$QPFXz$uAH;_cYqxy)697}aP zI=fo9s*rsa+St{#(;PAs#LydMIF1cD)2>W6jjc+16tsF+ybn-~^kTDHM?-hh=9bRg zz4*kY*{@YvVJhabHIWr;)>nD ztr8rR9XLav2FNHRdqZkO%r4OFO>LNx*?h0o19D(INW*EC5`b}opX{8+w{P8MH-F`X zDz1ZQdUFHbAZo(Cm@S7&D@>KC%A|s6(8@N*IK&Y-yF6<9%oDP9Ph*AYl~`lMFskIf zxhHFTSliXNyN#O`J?M6<4maiq4k{{`On}V`N5(-M(2nllE^3FKFXjUCroY(doYa*G zcSd+n{vbNYFZ*D{nRgH#&5d0B#XWP37%Ovz2j$TkUXIEkazLtpXWan0zT?SvZNss4}ma$^74cGMZs{T#xN>X}jAU)Db z-s6wI!Fa>Fq%G#vSS~9Uj^ycj)wej!eRWGMtbmbO0QW-dI$VX$H~kzUaWN3%OB2pW zsFmwGn^^hX)VevbdVMa|gFNX#o`v%DEHi>ff#nM+&~Po!b+;JFvc;sVm=N+t3Pk;dRuGt++nj{LXcH$K#vB=CazGdR&VwIVIoS z(9zLQ&9hE;Gg!a8J*V`Y?JaxcErH+_&#Kg`EQ{FP-PM<*$(FHC>A~xW_H%ph~SMKmqQqX>5U?#Q_oBrki$Te@)j zuTzu8Tbu40mGOmLY}@h$Rd*LVk% z7>LVt^<}}jYqn;3b><|{%a7A?4Bi^0CG-CEzTtAA#W*62Q#sTmBeb5@I@1gjZU zmXJC4w*}}nJU6nwu$tD(+2*908nzk-ugaQt8E3Fe_wI8xXh8xBFhw>@VQ$~5cs zPq<*@<}qA#q}temH|`Qmo(000?Dc}}yEZjpOxmO`74TDsHDzdMvc=)ND?JX|mx?P} zZjwuWWW|Nqalp&l6aM}x<+IeybR9-s`+COQTBE>@WYmw4iqFYySBa_se=8j z-?DNqy~SdV1aK-$pXJMk?o}VRXH14vBFcj9OU-+6o_KEc=rRaU*zN5~EX*Jh=oETzZF(|^yYlpV7Z)N+%xiISCV_LZH_3fO zviykNI!k-X>9;$bV#X0fmu5bj@ReQbqLD0xb22W4>xmXvvgf!y2;2H;4SVM9-Sw>2 zqsjo614WL6dRB9pcz5Ue_?1mJEXW0LgAOl(%pBAQVqQg5>P^f$hRaHTgV3*yb=V-gN6+e3GI+#kfWRq2|o{Rpr1 z{453A8JH^;K31aQAlsTQv`g4vO)ny6&5dx*PRgKz*}JNzj^)0 ztTkk-YPQjH76yIYH8X98djh4etoOm1gIWX@y8xKw#}-1QBz38hu~M=s{%^yDBZpYCXz17-r)tBfs`Itz2xD z=r>9Gdc9X7+vC0K6Ldfh|6T~+zE+aXcZRWNu zH>~p*`+dAC+3W_KkJsS#>K5E%t?wJj48xJQW}*8qIIV4d20<*(S!8)PzLf1hX|P?! zo@iT@4AdCd=}cAvB{SQQ+zK;O1t==rID!^udZFr^j`azdQ6`6<{NdL&3O3}gZn1FPU*4p>78QBRT~$UNL4&Dv+gx1Tj>!6 zUci%78Q$g3q(^3|wZH{tWNxGTF`l`Zt`~NVD6cQsjDxIOy7*cqZxZP0cAN=_RVp6q zb4Pr9YE!3m4qkQi%u`%?!96Xzc~hufvb{PRTj_cbv@pj+25q5maYS+NIZJ>KH0U&@N~0WjYJvRSj3+ ztTgC`_O89#H#k;7)?_@izxd9*;~4MLl5=r98|P*Ix;`_qvPFsOg3_5#l;h!C%G~2O zb#K1X;kVJW0_3@exF9i=x$4*YQmwKcDpxjT>Q(VhuaB?|HZzz0v!}@%BKV})(16>R z8s*Ypt3@bJ6NweaLo6zp;6xwvpSjkP&Wi5D&UgEazC|u5aX(cRuPPh*P@U1sR&;i` zZEWvqY-qo#o39}@m~W%&{FcQiw2TNUmboBW1`d6#wy`hK-t2Hb*h51{Lf-l{J74E@ zA{=XZFIXGHudw6lXVe;kt5g0mTMr)+Ahn5HeUyKQ%0Vppv} z6CK9TSf+az&aN>+Ad>Apx?iLXVQkkk4sc|s&9jh9on=PVhutPSs}mgz5NT~`+l$$88wB*wP7?iRej;1?y{fR=hAj<*eglO9MV>(1f|4G+OCRvk1|hOaz&GGN~^TRt@pfMi!>KH% zec%+^u9&Zy>;z&Nxg-#|+-Q7WAeTaNOe5tq8M9P6gM@w&8m#l{? zWv#@H}v_e3?KX_V~p#Um-Meri;+2O6RNno3u~+1ZIv^jNOvI#MY@u(xh1LJ zW2=;^y0GAB^m5p8nDaS*sb!Y8u(VQc(;m!WyV{#`kY#R3q?EoMbnHES-Q8Wgp}ZW* zT1Y~>#2Ab1?(-Q{WXMtq%q^XihtCss(lF5p;ujC#^!m-vGRi%R^iqB|XM{LK!i#Ts zD=ap?m8aCD@yRHC!KwYc*H{AZ^>C|K^UKJJWQIN^1!| zTSW8ananLXce!S~toeBzsc~ng_t*NIKrXGUw0dxE@?6Px@nvT~^-_U%R@rLdELPUe zUB!CH-ZTZL0%NPH_qbMxyj7Q$+(Uq7EK6}7Rmq5#B|py>=tPKSZdPTCEn|qqc$B6M zywBzQe})(`*22At$x=P5d=4ow!gDd?p(kgbQ%Xl9E z8png!YPML-`t>=xc4Glt@urnjIwKbkO*!I8+Ym$BfG~aJ5Qs^ij4g9&;1^RwlvnB% zaqhzLWhU3Q0(TlPkIQn^uvJZ~6c4SkidGySZlyEm(T{;*9R>Syyt2!GK*7BIjhrz= zq=s?p^>-V#Wz;U|tF76QpHF>_5=G=zi5`~g9NHTDGseKtwp>RXZ=+DpYpjM#IRf-e z7g$VKy@eC}Z>YU5Dm0P|CaEB1WFsnoGOa}gse z^Wopa?yOOTYh6ws5@uhVRi}1#v2|Qsvn^R^e&+h2Wu$2BW6|n8<{GBAv8Sc9ZvFPHp*>%$ww{i}hQa^P26*I@??LDygGp zR^DE$Cwl5Qy3uc|!I;G;zS0U;FXbR>KXbxk{NK8cpltmXn2_;F&unkdqscLnXY!2Q z$wcGI=3U#-wX}3?-?+|L=f<8-O`<2+fE9n`ikl*xz9gM{IvG}W_H{J2bYInK7V5DM z?p5rKU~@E(UPQhNNGIj>btc}|hkFov zdbaV^?OcjnSW+o99?PYmz_InbaXUwFxy6IVmb`nCxR7+b-lRX(L+tScqlW1+trND(GloDI|C#eApKPX^9Vr9P?z<{yN1fT ztlFhG9j6Cb57LHjnQ!i^t7owksDfSXds^b1ar6&84R{OEs$Y4J>xp;uJbVJRXvDW* zKmzdXDk>MYZH6mMeiz1Rb{95y48mP^3nuwDz}w}D#=5SRHTJ>ESViVj70XJi!O7I@ z(#*`CQu69k#ab}VSEb7C{X7wcCT(SPS9?3AHu~L&X2Y8CP*qRe-_!moT7+Qjg5EkP z8;9(IGn8ib5SA;fbUnyc38mt2@-nLiT%`mtGaPuvmn~}e0VGB@#Ez3MR;<&v3i5aD1*c?}0C)(Bj6;z4FCuh0pZG6ASVU%UeH#3H+$}W#pDK~rLsxB;WMcS}H(b(0{-HcD3 z;iC;0z?%(EnS8gfIz7VqtVViS!D`Wt0r7Ty{lLL~3_j(2V({%A#^bIC^nU?7$eB|R zuFP98u4sjRS~yNI#Tr|k{!ArNaHTQ_jQSMk_|w~{^a6q~%sV$Tbj^f%?^huVu!J(C9V(m@H(R2G1_7*Tjd!^B~K4w>Qe&iZ&pL_EHj9Rjf3u} z2|m8!oGF*PpQ!lv8KtetZof@koqO;hM|}WR*CkyLmL|LOQxb-TS9`}&aio%HR{6P# z*wEe8p=PDRFWxz1Mr>?boo!W>hTUzeQg00P=H_k9I`7*MZmwlDV}UJ*k(rAx`dH1;|F1InY0)34_<&ShtfTZFX=DtXamLRpgHR{kYsN3tCtqwr(RgMOR@yYHC+8h#g#&_`JtVW_ET--tK0_ zwSHK`b&D<85e!hAorSm2S#hi%R_E(1FLVD`tkk`sXbCb~^*bmJ20@zmZ zi%}U?ry6%{XlQD|_CVL}PJ9{J^HxkVw(jP5X;6P(&-?S(nF(YAK9>Yoox#beo4So} z(q{UDM;W;#ky1K0No$j_{AITv9m7z;}<-ADj15xRjGaDk-ZqkwaC2MwXo08#)@BmB)NNDv(P8 zT}~~X7y-ZEx<)Xd;o<{0IVO|Rnr_0oRXGSNhxocC+8cJu6gprvT7$>;dY>V~Df~uR z8If&&wN`>EtuT(T^b0K6#bov|nKoc0rHu6nyeqrR(UO@*TH~$Xnrr70D|bg&tw&IG zH5h(pP63RHyE8H0H}!4?(&Br4px&brKCcLRr0{wV+KgUp)T615+cL6bgW;atA;-$y zPh6M8Zgad!Ql@`^NS0o3Po4d)3g1~-jdxbIVdlH36$tOMU^x=%$Mv;}%^QOuFUGt@ zYo?I9CS^3TMSla%*w`AqI+%na^m&kQr>N|_I2K$#ib+11&K_p37>7x#`kN30D*DZfe@qLlh$tsjSv^wcuT!25r}_M%)3{)7#M9i=fVChY)XQ z*wau}z6uGo(yO*@+PG<}=JRi3b@i^5D1PJSP3x+6l{tRJE?cQp@7Qt0I_%Y5wh~O( zK##p+2U3>%lZOIhAO2QD{N$CVl!tQFE-%<=n~(YGc!!*K$k{( zX!dlOF%}k*z2t@EOHJmQ_lp!odFSE%M9)V3+^uUfj!sQvvSDJAn&Bp0Y}C-5r};D} z&6kMjWtev5%q2aJ^O@goQxhTWkN^9l1pX+2KT6<_68NJ8{wRSzO5p!r63}G)^TCxN zErQ`v3I2;;a?^`7EnoX@gz+EcvCHv1l1RKn)56-%tmk>!ZN~E`;_J_s$aCh|^V-A( z8fG}Bt@w+yE+ankf8QftLU}LJ!sR*-|Nf;LF?d|8z0AP9%l{I?!91-PnEz@!mx4P@ z+icLdLF)|K->*Nvr)#OUUkz!eNsolI=Sh!;w0+Y<79mBI3P~}HEh5F#rCL1|p=VNPvW^sT+Ch4Ep0<;;K-20;p}=O+ zmuXrX>B}{(o%9u&mLx6Iv|iGSG_9ZXVolpe`X`z;Kw6|}!=$g&v;(BC(zM%1U#)55 zq{W(c2kC1x?I7u&YTBKouhp~((${I)U8Jwqv_qtC(6qZrU$#h_B)xl)c9^tO(~gja z^R#W;DGYs{v`o{UAzi6yr%5r*f1Y#|4D@}VtKli61z|0pbd9DhC9Qy;koGOo3P~%W zf6})?|D{z=~f{gYk}{gYNf|D+R(v~tobpnuYJnzoj-TGOgXYc#EfbUpM> zx+_oHOu7O3C*1`7lfD!BC;fF;+evyQ^iR4O`X}82{gY0HwRTdhl_W{8g8oV4&_C&K z7HRuP-v#}XZiD_wza7#JkZyARtS(p`(SL!{S1 z|D*?@f6|}jX@^O#hyFwsSK>wsSLjT>M?a)8zZx?C#q#e*d>6gP= zgtQa>Pud0jlO9JqN1BBGN#6(kll~@8TSnRq{gd`U|D?U}f6_kapL7rOPr4WSC+&y+ zNpFJwN#76uC;fBipL8GePdc8b?I8VtrtKuXd68C6`a$SlF!WD40RJZ)g#Jkz7HPet zAAFXD3Q>3>;|D<<7|D+#-{z*R${geI`^iO&a z`X~Ja^iTTN&_C&S(C(8?1UyT($B&FN&g=DC%s{jR!VvZ{!jXO=%4fp&_C%Hp?}i5 zp?}hUfc{DU5&9?n67)|x3H_6P8Tu!E(-JLC`cKe5=~v+Yq=%t@(tn2klYSNYC;b=b zpY#ayPgn=Xr2h{6lRgIhlYS5SC;dMB zpY#XNe=q0>=%4f?^iTT#;QyqLL;s{tK>wsag#JnY1NtZZPw1cY`Ndi(=`8e5`XuyE z`XlI{^uM5g(x;$*(jP0pnuX|L;s||f&NK<3;mOxhW<&Ph5kvO zga4EMFZ56PJoHccf6zZ^e^|SV^bGV*s)e+>NkbuRk~A-*9VWefv37)X4DCPZqL4O4 z8V+gqkuDBt$4Qrjw1-GB@R}jb4{48)UJ%kwkX{(lo*-SbM4KgjG5nwOB^dXSz7*p= z(gKY8NW+V?XGkL$_mRE~<37@thcvAZ^z&gYO!|tDmQPxU@gM0$80(T=9MTF&|0JXp zk;c*flNN=v5_!HvDYBq{~BEKk1)gyi2+Q<6Y7>hqPhR%h3Omz9ppHMj8!iPJ)~=(f6@x*pR^MC zCw(jQPx?0KpL8wsPx^M~pY#<=v?oa45z=N!FNgj~tDt|bhf6@l%pR^JBCvAfMNt>a6(iZ5SbZC)wh_n^@C*78(O_J`0|C6qR{z==Q zf6@f>PkICNPkJNtPudRslXgJ=q@B<|X&3ZQnuPz8{zINNOF9dt$Dn`Geb7JY2cUn_55oUR zZ-)L!2cUn_LFk|KL(o6z5cE$v4E>YthyRm4f%zZlEzm#dthS)FW~>A{z*Rp{geJR^iTRp=%4gX=%4gc&_C(lgtSAXpN9TPC(!OT{z?BH`X@aE{gZwk`X~JY^iTRl=%2KIkv2|JpY&g$f6}i*|D<0DYb!|af&NLa3v1=1*N3$V(r-Zjq#a?ciuAjSv>MWX zgZ@ds3H_5!LI0#j(f*U(3;mN$L;s}r!T(9`hyF>wxmar>eE|9=JqG=gJ_!Ajek-JH zA9^}Jb^Yr#ZE*VW@e8%lp=ZvVIkSJNcX4WUiKY$Rmw)}Vb_V=w+L_Pf<1hc6`OH%M zZR5|mSwuhcnFui-&&>!fBxc~;l;9#_{+XK;TufX-JRz7r+m;cJ3oa#&5)TOGqqYj- zq~I0ARmAmz8MUpJI4-!HI8Iz8m`U4q5Jv^CC9Wqf5nM&wMjR1bL!2bm1T*Wle&YGx zA>cEci3f;h1+%EO1H?0e>xjpRrv$U;wu8izf_D;65KjoMCq6_xF1VR^l6XKcS!+8& zoD|$nJVjhDn5?!PCyop5C7vO!5=_?HP7p^0?<1ZiE)hIHoFa}09wweA)&w6QK21FT zf2{v)#2Q-5xmm&E#QDTCg6|-X5Kjp{NQ}`D>M!_C;u7Ks!4t&Gh{pxrMI0p_5PXOj zqXN`l@ZH2!#PxzFiED}Df)5kNiK_%3Ax7_r`U}2?xSqH~@Dy3Vwokf_OskEb$@Yalua!PZAFZ zrfS-b5GMscO*}X!@f8qh+S;2AQ1H?0e>xjpRrv&dHK1e(%cqj1$@r2-d;zPvaf}4pai3bF? z5g#E=3T`K!BCZ#lBtA|Y7u-ucLtG`epZElERPa9HS>h7G1H>ufh~Q!3d16iQ0piod z^UsR^6Km)#=Vk?u6Xz4p2)=_jLOdn-AaN1#q~JS=ONb`~PY^F79v6HUag=yK@FC&~ z;-p}7@X$YTz2HgWTH?6i!^CmoD#1sHcMwMf-$PtaTq1aixQ#d>_&(w!u_pL9F-8}t z|7r1m;sN4W!861Mh-U;pMm$bDCHMsKLE=flPY_QKPY9kRK14h&_$lH^;sL=a;v>XK z!A}!U5!VZzCq7Ob7yJzI3~`m<)5Ir;qk^9&o+U03tU_u zcz}3TaGdx6@r>X);&I|B!8?c#5>E==NjyP3A-JCS5b?O+X5vZW0l{s=M~IVx+li-$ z>jfu?j}ylQ_Y%(#R|)PXK0zE6ypMR6xJ2*(af&!1c$j#eSQC7J_%!kSuf_j~HO!Xh zW(AKE=M&EezJoYIJSF%baS`#P;5&&+h$jS35HBMh7kn3Slz2e!A>s<+q+o1#LI1?{ zf+vYXK!A}!U5!VZz zCq7Ob7yJzI3~`m<)5Ir;qk^9&o+U03tiiS0Qp6F#SRjM`i8aCb#HWepeK!Slq&iQ|HwA)Xbs2|HRq_z_WtKiSvnP1m8g%A)XR^khq9= zQt+L`CBzegCy193j|;wwI7&Pq_z-agaZ>Qz#8t%gf+vYi3bEPBd#D$3dRc^&_8j#;3#n|aa?dYah$kHa0T%W;;7)Y#P!4_ zf~$zzh$Dh)h?B&c;9BB-;`yJ5{}T@o&kBwcA0VC)Tt_@kJSBJs@j>EA!8?g3h$jTs z6CWZT7u-xdNjxC9jra(0QgA!*6mh-aB=K?LxZqym8R9Cz{lq7Tqk{Jl&k~mi9w1H; zM+6TO&l78c4-lUwo}Ux{C)QpJJS%vdIG=b%@Eybv;wixgiHnFQ1>Z?rLOdaOf_NG6 zxZt~pqr?M(4-r=oCk5Y4Tt!?jc#^o5I4<}wah$kH@Dbu2#8JWb5Z4oz2%aKtBaR5Z zk2p!J2|iBTPduLz|0fi*)t@f2~r;CbTX#BssT5YG@-2|i7Hf;cMpdE!~(62TgVc5Nx*h~O~s zJh3J?pZGNK{Qrpm6KhL>X9Y)y^ND8!7ZOK^rvw)f7ZFbiE+#G^o)BC@yo`8Ua4B(= zctG$n;tJxV;1$GG#Px!s#I?k6!R5qp;wr%v#5;(ig4Yt)6PF0CB5os&2(BT<8-b|5 z;96qrn4tduE&fkDKs+lrPJDoPMsOYRIPsL=9mEHTCk5{$o*TyNRoa>jh5|*AmACA100yR|!5syn{F@_#WbV;u67A#BIb8!S@j-i8aB;iTjD? zpA!Ek9w43-JVShdct-GJ#N)(Mf=>`1B%T!f1n~s%gy31?L&W2PpCXh7G8iw|5DdLFWF!4OGCODt?H1Yg@ ziT@L8F9n_z93jppo)KI~93h?(Ttr+%JSn)CxP*8@a0&4;;&H*H#8Kh_!OMs%h?9a> z5LXe`3yu=k62}FX6UT|G1XmF6AdU)NOI%M}BDjjUjW{B>hB!&A39cpXC!YV2_&@Og z@vPuD@d4r)!F9yr#8ZNI5FaF-6ugsof_Or3J@Fypaly^Rlf(mp+lY@4Ck3|?PZ8G( zP7)s{jtlN3o*}Lh+)sRhI4XD_@howP-~r+kaYXPi@jS66_yF-~;`t}V|B1B%;90@r z#QDTCg6|-X5Kjp{NL)lbDfmv}65tQe2BP$I4SsU;ws{L!IQ+b z#BsrgiQ~jof{zgIAdU*Yhq#`&MDP@G8*xPNeZ)y(P4IEze&YFA@qgk0;#t8n#0Q9H z1V2VRPCO;}1o1)QNx@GLPY_QCo+Um+JTCYt;z{BG!71V+#7V(V6HgJ>3!W!FP8=8f z4Dk$cmEhCFCy1khpC_IrE)lF@YS)$`jtCAD&l78c^NCLr&;KXuzm%9~Pv&L?M~L%@ zX9O1#M~J5c7ZDc`PYNz3E+L)}Ttd8zcwBHPag=yK@G{~G;-uge#8t%gf}_N>#Bssp z#Bt&(!4EA!8?g3h$jTs6CWZT7u-xdNjxC9jra(0QgA!*6mh-aB=K?LxZqym z8R9D7YqqCu!iMkQ^s}88YC}&Kj(z0-o{yB5rtZb=#@J9lQB7(7=nyx1M|w-cBjM82 z5{!I@?kl|hz0*ivDe1pvroYWd{~@3B4`Ae|ryngYoo*es|9ADskGH2dt{R-)ZR9v~ zD{>sWeHee2&kUVjvUdrSzGr%Li1Sapk1{_}SbApW)ZAF_C7PuFdrAK$Gd)w9>5rby z8##J(?zN%gkDTr^GaBVa*8T^PQ4Q*c%pN)QU|{!MImhz1&lIR_^({rY#kqZdM)@|kt!5E?`Ri3qsX-i zoR%Z!nvwR>iqWBmknPY^{>YNC4Pj`ZXlx@%7BkZ^26A>%xWkkO4rV15P4Egwa>BO8jqN_6PH zwbLWVX!IS}(jHk`nz{&EyT}FNx*nT6W+_+czlEb}{66*2$gxvD!hb(8C4l-CBla>V zF(1kZm!8=c9=fmC5(=L0#Pc@DWOIt|j|@%^+$b`MgOPgCUmYZ}13CxjNN|Q z(EpubBBVVNFB-jgY;=Mln@jUWSP0!#!FaE_)s83m<8fi?3wossPkj`5j_oTgO&x^A zKuAoSx=j~Us@y0m=_+gg6nMFY0_~;nFGkJC1ybgFmMIw*roICEOkHM_J@;~$2aNVl zfssD72W?Ac%|`A&b;rm9ENu@H{{q`njJyev9r{@d`PYtaE{#J6(I3{7)}{LK9>?JH z=>%SJi$Ko#@T78BXNBcI%OGglEbX;|#zG+n$zRg%eJ#LW{(T{1EMlGj~h8k~_qE ztoL%(@gfXGg@c!Yae7Yv2x4rj;IqV&5XMv3tBOs{t<>fCeIP!%3_G~a>@7=?>|xlS z(cR(_X7RU71fHIS61?P@S^)AA4_^2$41MCmGb-@ z8ZnXQudy`el;@98QSv^Pmb;fc??;8b)|A0P&$n*O!n$)|P z2t0iri4^kOLHkTS0Yey^-nm=k36BL+aZ|1^K#KItHa~fO@(DkdOnL58%kv+9VaW5d z=sTS9{1_b(yuUpycQ1MNv3U?*$h6M~VdJS9Dan-QY9PwFvE4$C?zWq*Sa6bnH;OI3- zWXg_|{WsbytG!HZh9eD5Kh*5>Z_BSsJ>|F|^@_B*x|OY?5_b&zHl#^ozbqosTv=91d0 zcaW)vp%$k!74m!@rZ70YuSqG-r9SeUKH@XpesQJ$LS2^1Fzwi--IYu;kQJ=)fG~A5MiJzsq1)LF9J{v-n#kg8ZI^@YCgo zvSmD(S^@XL_-B{WpI7+E@mG)ev0}=x@D^qJxCE@7{F$_m|BIooQM0n& z;B@nQFVv2OOR>4Y=`E*>oG#X;KKET@@Blid9VlDoA-$;f$S+6ck3RQW2zl)&Vviy= zW<|9__ie`PjzjjfrM4qs7-{R4wOKsDxY2)o39NITx{p5E1u#Q!mEI(kbWvHN5mQkGPBXlJnziVLasg z7Who+3+N!pibKwS!tlRDIOY5k=pnnDO*6j+hpGx;oh7L}IOgE=hptCUv7r#_1xx0K zmW6sZj&4|XY(ojBOUE`8>!e60Sh7PP{~*PRV;c(jYv_S!<$P~Gl#_}*WGPBz53c~whuo25c9aZbQT0N`ebSkjB{}MH`jre!=+Cl6jQdUaMhW0pfy-% z&_>sXN3Uk+X1pCv8VAuvjxK-X=(Bm1_rgPJu_RTC2X%Y^y2GG54cbhKcx`avc0BJj z!jq&0!&e|*KX!NN0Yrtaibw7)J%pbKnFKjDTsjOw zHkZ$EeQ#JIj)gyjUlRxgxf5h;xO9L>Ull{D+kjY|kvmGqK@f5W2>&7EAP7S45{!_$ zOYhc020%nK!%})!4?WCK{FizgjB-@Z2}-??N-Y@1;ZF^LWEy!+fbbuFk}v+lPZr33 z_(^W~&rtTTS+vPsWlGoh<=;YC5IG78&kt!K-H?nj-^wxr852Z4^uchRVNoMXZhi?e ztrmVUGJG5*}%1wTJX`1 zMM3=BJhEXa++y_QBM}Dj=eF=jJUq7r&138Mn4x_5NhKV1t`qB#vMft`Lm(z&XXdU$ zRAb$j?F-*`nEM(cjyxbTK(tSR3qJZa#w!T5GaWV_Jb?#Jk5l^_*Uty1Z@JdxZ%YFD z+oJR7Z+Ejf%*o#tS^jqKdGj|BjqYz;NrH>e-}p22ZLSQzK>qgd1243{eSpiRzZZY| zulvuHzdeXLWcb@*2B!Jj1jF3^_GU<&{AiKYP97ZxYeQ}-3-xde- zxAWS+t>q{wCx3e(`Zp1cx_`SK1ML^e-*!#E(Ej!kuKWI8{H^ESbLDUEMjbNzt%iYV z{>Jwm(){fk;DY{k5f)Tk{&tt~;CZYTX7{&CU?hXnpNqTvO$+32=c03aJBB{0!Q{i3 zyyW6+M$Z@IG^EWpv257M2*K=MM=)iIDX$9sXtlN-{x zhW_?htY5kOt=xEU8y;l$x39uh2B%kVars;5_wH}IzWs;hZz2=5zkP^Hu;~NI^W<;0 z-19>FTRH6L_v3GW{q=L~Z#SVT8UEJHz%+l0GtBL8KLj)Mw^dkObNSnS#)D!!$nI|& zU@L>u4{UPzTX7(NJNj(+Ty$~OXp~i5+|$Rhx}34x!L^dG(Zd~`4I4`yp?k;Dmr2aU z70kNK6%+h4*COy!y+$J9&|NKJcWwNMq9%00);x6qSDe!wkGD*mhmP(gU-NOis&moN z;XYB=&*PG7UZyw{A z9Wv%`!wkI6?Pp1bxz|hn4_wgCuAWKrv!{#)Yw;j^|JDj48Jzy_buNE<$sdBhEj{{& z=x-t#wZC1;eW>)_?L7J0TmI>V_O~BC{QL5^>Mx%we_IZh$ndv92B!I&#xS?Pje!gL z+mEsL=IYmajRz0mL3V%p4RRiw-gCLj-(DKf-_GlJ^3HE$^|u#jJSpM`GM@YzHmfr{ z?mUerzwwVg9(P{GlQ+Vk7Sh9={dn?`e{gx+*x8OJzl@q>j3@78V4b_4JHRmac=961 zoqpHBZ8zsW-1A_P59;v1dpvmyOk{9+$y%4c6$bRTyu^cF{K6N$Pi56#@wE}-Wxq;ahLq7>2=)hb_^i#)0~6JUUwozRy?=5^(}K_=iI$(Zft`a&Wn7_ z37s(!M2LD`Ct}k5a4Bksc-)KQ&y?&(r}uTZ*!Xb-!K%(M5uYd>=?{jU-(lHq@)3{3OC2*cd|_bG59{olW(`QNbd;0ZiP_dos2ZWZ1| zu1PJ0i40EP7K7*2hA+*>+mnfB|E1&cuMT}6oOkmDZ~H)g!7#pj#c@Xj!^)+r@SCUk z@H@;G97k{_b)P&c#1KbIy&c2d{bxS7ZFK#L8y_yli*Hq<(b2rot0SSK`uO~0Rrq98 z{>iGPC#xclR~0>8Rd`Yxnkqi|s*~S2d0qJAw@+S|fAYbT*DXDHU4*poMMrp)@IqQ#KGddT379KhA#m~TyK3FW}yWx5>d zGdCd=L)_Gc)>{i=r;K!F9kweBfsRU;`t*p1z&o2 z=vZjcmdM-o;my*}$os+t!}$0Yhivg1e^9X~f)~&$SUgVOrrtb(a`0pHoke*6Ja1$x zeie@_L0v|+7LUHOgv);G@<+CojJ^|ZMdpvLTRO6}bZjdQE-xKj7vUR|>k3De?uov^OC%Mm_~Q>daIT`!Md zkY?*;X~F&}23#H4|7gMfALFNdT0nnC{C_z?!fVSbu?30qNL+7s+GqO|_4D*)&`*E( z(u$*J!eW_=VVS)>rd7U%EcsaF-(xTct9&cP{49;~=#$|LyIlHsRsQ2u;m2W)5tt*4 z5vF+EQn9`#P3s$H{rYaT>-L(0{d-XV(Jhhv5B7a>?gsqMu(!mGPjo^j!a%aE3^1eG^ zr1l%mXT84iF4QDrePt~J)7Dp(G0eTb^3ULf{oG5qJ?9)}-f28|=3jKjQ}5SZ0u#ae zUw`KEx7P$5U!K?X%!dyBq4=AKBglH@9CYwP`P)xF@k0CCCt*mxAAbv>uIJL!j6()lBzux5Xw>Jm$w-;bOcg<%_ zuk&Be{C&*lL`>@WTo?DR(|fw}w4T}XaUah+FYB4FgGnvK^Ui)gck9R8o_BWZnF-V* zV?I~Mz_j^X6~o-~x$l4z`rRA3UFYoQ4jT`a;(>F3iNP|@I+diVVH<aewS4jNZUGcko;{O+z9h`1*#lJi=KJQn;4MwBAIL5?#mhP5h zE0Cb)F(S2Ma~G!Y(s*(jzXKVSZXZt0hY=&P_r0TA!`Krj zLNLSjUb6p8!Ttv@&<~x)q-JCur)pQAl<25&brP?^!8yaR6(8RJATlT}*#9*|#$Z2k zDdufQpUd0-XkX)4_{gcF<2+}D+=e5Vstsbme&+ImTVI0TV=M3xVay~jr>(<9Ly!C= zXAQ`qaO7yg{%Pj61d`<~N?@!ob;aMpUh!irynN)Pm|V>@Bdeo7%7Z$JkwHFE7GYMx z3%pW`ClPlEikI0E_&oYQc@RdvOr&s*>&IW>bNRxXKF&J&WF88!p5M>sn9EUWn6@L^ zB1n%DzY9h?!prX+`qA^DZy~c{$`0Xnr3;2%1h#}X)*H{wOp!@ps+}355_-{lwAfE$ z56%@LMe5~f0QCE^c+n7)his|SDEP+pOsPk)xj#6)?R7Yv561G0Id~Y?hNT_O7kLl8 zAM>Ukj}Dzc);QuBflx4y=G%8Xdsj%x>$V^|OzMg$HUx7Ki;qjf+M)pz%OuN;uoa}K z_du?Az)PG`4f+ZcC;!9!Cz?*?%s)>(ZMu~0AHRa54Nmuz zIQ{7=Gpn)BwLGQLF|3e(PtMa>>Xjj?fqM7dtap6`)QTEzt*&Od<5CH_b30t z-`?N#H@c~$+4~}uy#38bldwPr@eoV&wcIvw94L5=`8lX zj~OfN{q-;iKYRZXJY{gY@lW0MzT~{v`w}mE@8D{QVefwY|T=m?(W4 zYauFoUnJEVdX3ZGcOgv{dw&QU--FYekt~S4|3S6yzqrlc-Wxutn@XC!E8F)sLaKi4 z`(kK6075C;>y%@Cd5K{gCa z=33G6j!UCzgby%WH!|k{V)?zHZ*gD&wj!hN3X{iz;pIr|I=^7&FXM)C<^?|H-h+r{ z@2TG>#p~c?yuf!A%<;m-slS6N2d7{EO4!pi;nv@3X9{rNY5&qtZ^mtB9H)%lZjBVe znInaJIff}7<9G-6NIFLfnwKT5#6pW{NpCs;=EahFhbXYUe+8_xq7Gw}@G(pnX+^xB z$Thxw2>ylkujuz}KX-va(|(FjJumyYjnld;_OpYtV}<=xG5j3a&odmXWw9Ukc(V}- z9Gw2q#qRbfbY9w{ke3zx6e2LKD25g$y*+v}rfF(RdINeJwntZDW6seYS@W?WIMm?u zB2>?wzy0SO8U3%mKf*ik>QZq@GIP;|+S{-kzdVJo%A*Ch%KN?yjfY2O^iagx`+iKOim_30)evZ2!7Z=nq-aB=^8SL`?;ZKU z*wvBB?~gpkrla8Ysr-8jhA+T#v>e5}j>g=VyEpW!g0AqAUlrV3vzS{cm5=nDVzy-B z=>LR6KOgDmO&M`>!^lX76F*~1mw-L&KGl?#;=g4d+zd8K^S}U$+|yQh|JVm21-I9f zzNPYaVBkXh=-rO?Z5dc71q*V22DZ-9%6}H+BU0sNTpkR$6byH=UBvyy`G|InyA1(W!iW{xTIobXMy{L&qXRbD@z3B;V3gpBuX-f>x+# zbTG=#My0WUH2X_b_-kMj4GR zdmC=%8F`L;_r6-6x9WV8k-VV?gzx#jpX~oBCj7W$_#ZK4Wx|n)Mch2>&!77UOGJB$ zHnJGzp~cRe`eJGg8VJ43Hrg}Q{O*_VQ1t(=aQj>FdGWVmFMs<0Rv1iwYe2V%%DMah zk9zrAHFj#G|Npew-(DfryZz-(f9pbSY2#g|zsX;^+Na>w|JEHPK6(XQa`_x*eOTHkxb@$d!@klY3?x4C1wBsD*!svLb0ZIj&cOPM z7Q+}z#i$+Eo4P8}O#Y;Phu+>h$-Uk(-ad&!g!WoK7N%?(cLQ zvwtnp{hj@5q4;}*{?6lc>|cLJbKZvr9Jj_FTmFNiXBNTJ3JY%iE^|N%EKz-Ao$j-R z@Qp`OBlm}X3V(;Uu#1AT7gat7KOZ?Zwl1Q3{IQYK!w4=IE}_r=v|zZ%@cDvkAEcKU z_E)g%D7wIcmxo_kaB|rnGIoP>;w6#^H6NXGizT3$@geR#orSix<7yanVVCr5)wt&LZVS98hN&0cpQ~udvKD1x*|*|9hLUr zyV4#Mj;+0yvYLAoPKf@!SNiwY?!5_&6ryl;&>z@mwE+?I?Z_4>ih5r;j`$azTypZ2 z;N#@G#s81ySI)!#(cfcWka|q-{Lr&=1eI@{4`SZBf_^f09m>V}?U_@b9hsTC6aedP zsVmv)e2-O~i~N7=y$O6()w#xCJMTi;($6`e; z2-Q>*K#M>;0o`s6rb_K(d#UwSTW!%wty*sz2qcIT;zTN3aqK?E0b7cQL%#pB*4}4G z&IxGce(n9f_>;ZQ+H0@jUGMv>XT59K0D&F9Wa{txVL;gbLFx8?@By*^gR|QIkCXPl zHp6)TAt#&tM;G%Z{JM*=Tt@rHmxcWgPP2bf%V_`lro~uqCs2a@^9uG~)$m<+mR1dM z7OCS|b{+KgRj!sF6}zMJhH(+crqBPKOo9Hs1BEMQ-I6qEz7DQdhgwWWx8e{qeEWdy zMcb?MpmN)%p-Tny!tlzuCQ{Yf7eoE$!sE!70wmTTslSFwlT>l6@OYAjvXAubg7iM- zr+2!H9_?=Is(hm1wxEAPf4BD|&oOz*5NZlVf1=Ura(!O91-(`O6%JpyL^UvdSjfRG zvG!7AU&o5R!|0uhD_?WgNpH81-hk~#lIQj_R1Hy$e_#)Eq)T@*wx^K7NGG53>+G<* zP>ICKDx7}KX6J&u_$SD%1ru-Wx>XgY(zzQ{-*|`zo_l$+_ezYvGwJUbRHvzJU0%BW z9&$kR_mFJ*`w-d(5}Nsl;Y>nMk2Jn|@Riy0_q1hJe}^%G%%s2P;pu8>dwyW5{{ESF zGU~4#AC{X}Zs!%-e|<)XYQ~3ahrYqAnsj#dT5Z_jPM#H zlME^cJ!*MuRDBve_PbogVe!Ko{#posy=cM^4W-5oDS`Xd9g)Mo6OJGRURx4fS6;bs z{$nz#SZMLu9sLZgeM_dM{-WVnXOpk@Zi3%J<8bRm+w+|o3@#s*`@EE#_hSo}8a~&f zr!)L4R`{pc-KsFX9sM-_!*;h)Bj>HPaZS~gGGk|Y@Yteq?a_I5v2d(cs-=HKHUvl8hoRd;U3v4!y6izXam>VJr-|G}pI2b=mIToPSdUis9; zs=wjCu~8>-Yv?fZi=x1$8{?^=!eu zQxm1sH-KUbW2VjH1Nq^2L@9}Heiz(19JEUz(GT)iBszQlxemi^Y8w+u$DhIjfMF`ZAKmpMXgx@dxJIAXL$@$m&tWsQb-+#NZG59=>vbwWLCg zqn}nx6c{_S;8XaKSXlymip8rn1U;!dbqLC!Z#D0Ucizl3JoRbAspsW{OCJ-TE`1<; zSRs_U4oY3x7CsD{3=f%bNKyE(v4@n1X_tl%FNA5=4Tg$N7+etk=!C&V;lsxcE)n}K zm7VUHe<#LX7#sWT91B#NnNfhf(Oe}BpLX7|_LAh`(92F^mz^R8)-H*z_hX~3poX;> zaVp=(DOtY3s-p0Uw|e7L(MMtzrr;E;+{Y=Om8as=e@VK4hz93BviGp^r+ro~j$PU! zIl;$`oS@`&P;w)u<0Yr#C8y&hr{g6jn0npUB&V-QPG6Ipz9u<+tu)$EUir|)l9N&` z5y~2DE<17Ui~U62hdv_5Ny9Ln78qPk;qRSi2Ld}gFS;Qg7XO@qKg;6H&|WP57KAa= z;8~K#caF}Jl-C+Od${qr__dh3Nne@-_SJ)5g%-cA6@IXHN%r8?+iqsO`Vu(S)OP24gHrtI*N|F9ys~^h$GozHSH$ z%lRR!PF$R+e}XRwuXNtD<^DzNvKR&dGW$miE}=K!E45z9Fs5Om(cS$MoT+gB6uKIX z+=^JTu;R0_D(2m0w)o=#xcT<#f@piu_GvKmB8>`Olrq>Z42950v&sTMVql>DE>`#+ zI#CpGl|`e|#|7I@57IroAiB0lqk|V29o%H>AvrGCK8E24RKAG3867;=6;GBQTfE*MbEuC-$^QapW}mLP_?GE8{bLS z_aO&F--l$=_urtcfxZuCMu>W(>HDjfWz+Zbe0@KNF>9v&`Rn+ySpWBSs=ogxsrAtJ z#C+4?CjND4@h|El{_~WU{y#1L3BBW6e|a$jxpMDz^Gdm1xjf`H=2X7fcyiZlpBGk_ z#-^0NpDznkZnldW|1yV4wO3TW+3<0DzWf@UY2Wx;fx&BdNxE*5zK|Wm&pZYG2qqU@ zUu=(QZx?R=6`j$OKQkH&fYM$Q`@`l&YQ3E_7UTcDl)r%w8ZL}XbH|0ucChv6KE1-w z&Z-yO+C6xKsyubqRj<&CIJJdt?K_=SJ3FVb$!%BXv;wELC{|k><=X)Royj3*L+AL= z(0_FD^~iOQpw996vtQMYv(E7a?xaF@anaIO7mcOkigXR2AK|U&%f-=Gb36Ny%@{eI zI>)H9>Kv1=48|0YfqPP+Q&XrtU^PXJ4|P5W0z|q#()l3ocYd;SQ|CFM&U5lQ&&lsR zr@)z5=qxG{_Q7N|(j#P}+Z)cLb&H+9!3(;{uV3d; z67@HYsK04M{Y@k4ZyHh4N{D(AAXXghD6U-BxGCCRJoInMV;%DQ%BM{ht+{hri*p%b zW8)E&C0|#E)dqv+8@n&1kj2ipoc_pHYB4S^e!lwm{KG0Y&7&;CeA&N4Y6;oL7ZauF ztrM?}44S`f&Xx*_&(1+bO+0Ul?!v4oM z(Q$NPa>yO{X@4_iLwM`jhvjvO(~fpht3E2w;c5wf#Ow!m!6B@g3PyXWm&1T z)GmGn1AYOX#^Fpo0KnpxzG6Dv+X*~i|GSZm#8nrW#q7+;YPN%F(GN}fQMYC%Q$sbo z^AkJS`Jy#n_#HerU`!S2P_`oNABgJUidHJmfUyp)x_kxW|*;Tb$s@5>O~} zcRKMs&ZE#1nPZuQAMdU6jm;Kg7iE$b%bc_<8|~DrScg3M70x3# z82c-DVWhla2qXWnX22$eH1eOs-qn0(YW~Q%n5uA5rm6(Wowv}3A?`rtv6%T76GC4Y zue*F3cTB#f&G8Ms-nb2uBxQb3FO5MWa4YOI`rOV~-A79(YDMLShKYgz=G5(XPs_2( z-laGx-Olb&cDYIVF1eP;sLQQj<(#V+-W+s36E1p;867nlCX%sITshBBu&x zK<)-w%egQw{xezOs6mx)sqXWJB;~nNmiSgEeyePdd*<$+Tl=i$Hb&V|VO>>b98>(XmYNsxZ4oX-POW|wJGJ>;2k{|c=eP0rxsL>GZ06&0_gSYY{k~te9gaw=HDXB>nv8Utt2ftD(z#x(~A+U_z(Kq zU#E7VLH*=EznytVzJK@kfKR`NJbLiyezVuA$DxJfe@yb}V#}xTrQ-!qj7nmJul%HV zvOJK(j87dOqHobR+QMhbo^yX$l)gn)k7G^$Yve1*>0ZErvI5ngi_@{#Rj2EXPQ5vZw(wBia>e0zDIkLmb|dd zD@jK_H}w?ZE46<-;d3kW-UJ#`xVn7#dxLtaza0RclQ)3QxRw5vyy>UOoBkGAjghRJ zG0L8*!s9og&c*oR#N&${c>J+|tE!#k@!c{i>V-W7Jl=_ox=NAa&-2Xt1*nu_AJU(C z+)I4_Z)c!CW;FG_=3tCpy2l($c$wz$dd{CrmHJ730~|hE6mmw(De8>QbF1^C?FG(g zM($=V8OpfOy~G|z@UvdW?tw|l+QiH zEXJ_TYIQ6VO*u%`&JjJ+^((8FEO@_4ZM|1`G_xgqrTh~>3o8%Itj$5w4ce#9+I)t3 z8k|1F8LaiJC&?l1oTNOQla!5}!`VjH(`ycA0;{JeXU{pDQgS!tG4nV-QjOOp=5Y?< zq1SevX z?N+zu!Hz0}^3H-HcfkuzZGl_6t)r@(R9Q7s>@Hv$mtldkpu}D9inAaRTaf20D2**B zcNUa24BQ#*mrD}SxB7*zIf6Xk6K<`Jh#0I()h>5=WR=Rtx;}c;Q$qgl=(-9DF~F@Z zn8{p7qzcPwP=;LVb#;h2QK(vr&AL;xnQEFlvDmDytK-L6?!*!Y)e&46;byIqj$POieXXMMwT4%DOe>{)fy{o&u5SN68d1G5{o`YN z#p^-y?XeS4W3$AXkz*r{3bXXt;l~0d^}BBL?HD0c+N0{i&E+3KVyUMEtTfW#%ni!t ze-`Z$b!p3O_~_D((Ir$Rr(?8Ptk*Hx?tmICCJ}sn4M|5Y6R#;yHc+8D&n_tI)N zc2p(L9$^^uH-`0Yvo#GqtM(G(BybIeQU$YCy(z(1l>ow0=B+1o{2Uym!F-@EBE)kK z;)!4C_fOD=GV#Nk6qvFO%stF3?wP&OFbf>vXUkHpcE>`4%o-o7;;jE!b_Eab-9f=A z!>GuK*P3Q%Is^TIiq7fQ>dXq*<%os_GGSn3pgRQ(FL0(7xl@WAM#OZpQ;XdxCC+$m zO57=tj`0RzI>uX6nHq8SvP8_8S{hwficL=~^N(ub!+GIrUXrQ8$*IiZn}+aX z&=7tOm>F*ln4tpL>Qq>XLmj6UJ5!3?sU^;o5_f9EKXInR9yY_5`(jh_oGI!7oGC$t z7VGR8oQsLGXZ-1{zcGwXFV9j~N1zh1i4}ZM(pbSGKp8k1s`QY4dE>D74l3zRE)xk> z{p?+$ej6&7l_?K5-$pD(9j}O&zcus?5-DNb;JA?HOUJQvFwY&wlxaSv4XM2*7sjai z)Itrk+{s01$jQZO!pS9SzsV72YJ?VHz%_U$+FlZy1j6>)#cgqV%VKwiz^hk?atKp+ z#VM-$c&q!`$-xkA_dHv7zpVDyBfpzCQAl4NSO!&b%kim{H=firSN!G!_pjfYX8YII zGWgx*(H6en%`e69!29;`nTa-`{uO=)0T*@(=3pEZxtn2GDbRpK>f(#v<|V6TcY}{P zDdsh2a|O*fD*kJ0??fLF0E5gPVCZBfGhU3TA1BKTUwH{Jm$u3hNlD#fg|6Y|2Ew~; z*Bb-RB0DvXR0~A7`D_Tw7hyVj`0|-Nm+&g4U`R`644yem>y;yhPc`V*nWaVJaoP z&wOCGOex;XZ@toeu_?Fl0>x1YaMF&y79zYBD(QWWhG}a1&5MIn?-JFUYeZMkgqwef=VPBO@E7T_uYxr&@7MmNH9{&L?VDVW5&S!~r+*SpYi4>!)GLZ<` zj7|39KM~KM%csd}Nkn=u!N1n{*D7<>KZ8e7&ZG`#Ps4602do zOKGg8+^I3$63|TV4K_EbiSRlG5X5dYYV1t`7Z72@`dl^cl|%?BQe@*nA=dPB0IDfU z!SI%HK~EVb9j{Z^nqws}z7hX9esLCr#L7#zo<`FCblHD06(SsX6EuoNU)HCqQxx>s z)BTw3O8D=LJydtmTlwMz71;l&pE#ccCHFQE;+@uKCM zed5KlwUq9_;l(k2dHceP&#T;Nc(Dx~L2v#Y@nX2-`LBu>H^~W(h5l>c#j#KUm8NgJ zC}#lC6EEK3%|7wsyOT)T$BT<_*9rqVF35xzcc{NqOoJC~puOV$gt$(7HoSNUG-+x( zdHaFEi^VF@hlLk6o}3*o?wXqkFQ$vrW#<=nOrUfJ4ln*j^84Z!@6iFI=OVc-ACkZ*!PJS-}nX<_3`2a$f_{l2aMr*^NS`r zv?N|!2ihy{9O7oji=TofO>Lj(JTQ1MNhSKQ@Zw_pecAZMGUVTzUz9T}>jN*EYAD@- z!;5t!zc0LaSo3vh{9++G@^6Y4kB+D4|EhRVc#@IpzX)Em&#+L)b{G*2L>;WQi(n+ycjbs zJ6=pd{=M-choM{_cyY>DN_XJ!;#!j57he2~4j>&bCY(y4{yoPdKmR&K|5wF}7w9`Q z0{GV$k6f=w<9&`tR$)Fp@nSAZ3;M*1F6MIrym&@4B4EH*8RYin7e~_pC-LG-bZ?4# z5OK5P#c`lXQ`>)UJ}`Lkx_a;r3oi~EogFWZMgG0&biYfEIDPDZ%1d9HziWkiSvws=9n5hYred0wu2Hg`cMlprZCtm#Vc#`({#cIf^ zFkm3K(Hk$`P=A^5U-ajf;y#Vfu@_#9!cOYE0-&U+t>rv>fKLVyDKenqE-sUd9$OR<3Cs%w>@tUicoXVpzSAn`^&2 zg4T5R@UQQdBbkyf%8Nrkw#J zS-BlwLsf{&NtRljRN7b`znhxb`4;BR{rO^hXXlICE7a7)4>!k%YUYavnB!&awpvXS zSDR^KCim^M{&*JSXfoVR_1j7Q%rpG4shZzM4bx}$SKuU0ObBbv6UBPbPR<5}S;|~u zHyZ6;Zr4@KX!+p!d;wC)rq7FL2Vb8rSLO3mmZ9n;HK8T-9+!i7%PN@UDs@j`?Q+OS zTR*kny-xE+!Ni4EPLJp?#j!vpuNaz+<#!Q1kM|4t7uvns;LI}XIzJ8#sD<^Vig!Eh z?tRzBYxd)9!-bf-KDEdCvx7mBrncBdtv_oh()_rNc+OL5-y*eWdp`4;T`7ykg7x9d zV|kmYeN$U4F9RpD-1^YSdX|Y@jQtI7Wxj8QTfNJjvZrGJijmV{Kb+H1ou^+z*D=BL zDDzazvpR+Lxan_IQ0pmskXj*YGplzEU8h;sTvk*&$Ln~>iFx6rFYzuBruWyXQPwps z3SofL*&T|CGoNN=^@8~`Gs~?NV{FTq%n{L~^HjKbJ(0-K+x1m)W7k1DkICtY+&6j7 zG6DYAn(qpZxzcBVp!_WdE!0bE*QDGsZErp9=y&`Q);;$Xu0R3xjF`F0CbLwoq~Tl)X;`Db{K^NSrMo+-OiPBF_tM^~=Z$(_8{jy5 ziuLN_pMv0A0LF82u* zrE~tx5RT1s2fC9ujI`lMn$UTOuE4estG~Tp*aQo6UnU;=_@%le)^IGExYNBTH0%p$ zin#PuUlkj_KYUBqPt@N`=M|S%XkxQmW)Ed8ZmjKEZ00X1PwSW8M|loUD-SzgJ5-+H z{VorBt&T~{X(SyM7#rzuY>&m`fq~lG3lmb}?{aR6Uiw;qLuS0Ech#uu65&f)tnD?e zd#Yl_LQOhMAlT$$^#~m?o&50bcoUjItIW8U22RzkIqUK%R%yHg@Z#K(_@MWUacC8# zS)FlDB?U4|K}M2|>1K#;knEF8#_%X&mx4qbVCHScLtZ;hSHSHN$g)&vJ}F3%^t_Av zt_OJdJOANeyfjUF?Ox<#DT0b9&GyaTEbd6P1r`VoC0ZQoA`j`+4@pwlv3dEd>|ei*yqbbr0o z!eW?HWY=96`0G|b$a)ylPo9Lt?6|P^e#~>w)!yUSJdGrMv&*n<=k(z;!3H(5^saIz zvMyxh&ZXAwKS((X%+erMqR^>$KPGvVw}HJKN86P{csXc*$%_R;xXwGGt3ksZ;N?}Y2di{K?#;n&e>!jw>*{ba(Sr2Hr(;g_tyP`4b z>QIzNA-ak+lvs>dJegEW5597?jK6LBpEK~4-;tC+KQmvMi#?W@msjz!+ZYOw zqmhODUKY1#k9*;+v`)g=JM`tXdpT6!?<>O1zYO_X4fuq>VXSUm`&@sDj$8}98^1tR z1h*y~Dply3rHIA3>ndM&2I!+5Q}Pbz_Yr7iONah-d{cMEg1t7o)1Uy*aq_S*9m zqZ{(b|K{8UjSLuXuV-`vCkM_7_9Jcw$(O%b@Dy9OucQ53hZ%zv}lK*4+e5 zcT?;$slwWq`b*NUf1V7H_MY)pPk@I=*6!DdlV?> z@a`X#G#_eghNkgnsNL}0So(3)Qk*WG4ZWRYL!Z*ZWxvlbY^dE(pagkx0R1OFhYDhR z&N{n_%Q7MRDNqR-{prbwjHDB!NKhUKd_5WZ0$)VByk7#8jni&7wlkXK7|JnHd8^@b z&U?-}W9JyiVE6ORXWa`!&ILxy_B>3^>tx4c91(BSt0g3YY%F^!Wkcc&d%#DFTKyW~ z{^g-zm*=EuHJcHZvY6PXZ+}Gfn~<4QBTpqY2X=aomvwh@th-U$MS1!nBAYKU)XJwC zDx62z%!%NQKB%0z?01BXb8aW+0aDIK&|l(GFg8f#v3T`$%DYE89Nl1unaa8JbhQ-P zhtV3!_3aSAA`PFx0_5Uw^PLob|Lf5|r5?9&@7E)L-}NZik9x2rD7_vKx()fJoQZn4 zi3(s)Ir@4jb>TD=i`xmlHrR|O^VtZhbM6#1AZ$CuLllJOA-{;zh>5A^F4dtodZGn| z+**BpzybBqn0 z&hLO?J?gW-RD;^mxDLZJOiShD-mtIuBgFOo0;pJ?RpkTvG`eJ2)hZ$6Tu7q2YPiPj z)Yp3edf@%iyyHD0pVdx9ZB$eS&e*(sRhC5h`fAjQFfrW&>QiYq!UzYed<>1T}H~bzS-7w7kkUFp7eQ+5fqmO(_ZsnRkuzEDvckNfgcuo z5 z*NR{C$q#eP%S(9K_4%PXN12E@sc?Vz;k*=n_?(DVk{@32X=*V6bnlZN4ii5_U&IU9 zZ~ZZCN#{IV&fT`W@WbPTPojwdMfqxy6FwcGnCgW36MXN;a?}+6TyMUY!xLj_Xc+jm|9o#w55D(ts-56_zu7n6JGuwo`%Vhq zi@y)P_gC?~mjMjR_i{iuVe_oxxB~093-MC<9&P~dY5jATdB=N36HbQjEmc&A7V6ZS z@9_pbMH=7RK>y*j$gR-s{Ya^h?P!~w;d_l_VEEob|Ka1KY?+D`Nfzz<`ifPY@%rZ& z0bd&5D^MbdPb2o`dzNbCdIEgN z&xE)j;{);^z7&%2hC_r>#mhSj9=ya^N+`w_Pbc%J5|O5>k-m%0X1nF%s^ zwlQ>GFg))fb*EVBcnz|CC#_H8c{?rhGdyp-=30oV@mSytlm+(SdneN{O>Ga|nt|`# zNNRoay#n)c3opAq-+SISkqt7DYMP0YttBY~JNqPPPw)&!fnXK2oWJF&_4thVEr|@g? z6l$En`JzdFXwNH>*zNh?@D6zk`^~=J>dT1|yOMQ!yA9V+EXL>XtsirjkuUa}-O)qy zAHq=l!hc+a=c~@FfcQaCR?>L9NMlg$9_JeB{9(%d2*uNLp!^U$}IMEJ=KLc z5rCySS!rY&HA&8cy^nf*Bw4T3-1_zU^G6S4y;A69P%k$#z5FEoT!LTT3!}-*FIQPY z^#1*cRTxX)50pPKFO^@8**CxJgckFLeAOvYAaz9c0pturG7W%CDfE->gJf2SWXhjdkQ{&7^HGyH5}TYELujE4uEl!Bih-Q6 z(amj`1{~~UJH`IboF{S^be@u>V@k&25JzWk3)2A@a>*4iD;^-LLq0tSFstZj*6l;+r4M$TweA$K1{fhHrM$56N<$ z(r9svtRRhVF0l;M@Xb1St)kvW)IRv;b_&?kcJ|LR@XarhTHkzgm3g^@mtCK4eo0n~ z$sED8FTSZmms0rVEs)IP?8$~lu#jWza|yori9@O6gv?IHcN3G)(}PK9`uqFMxsJY{ z(eh0lt<2ew9IedRkep-7*^pz3bTB06IA<&^tj{4*%=y4nc zo!>dGfCHi3aprucaYdZ5MzuK8vGV1{`>Cg?;HHcRC+BX?PF;_-e51Et^NSkj8U6}p zrp|AF%nqp%{8b->>cwAw6Yy8+pTV#B3&@wxUmxJs-3Nc&eF$2vUaALw?GW?p&0mfB zW#g}>lAqsB-%1W0R+0lLW#zA@C;4mN=WSkC=v+|fURdN@5S+L92Kv`gY(By zI=$)b4pBk)GS;?1wanxXrF$|}wR7Vl=by$f{mNZJJ$)Ydhl8{7z}bn(aXap(f9m`E z?$PEv(b48SQG0&(Bj)^WfA+lC=fI|(f2HkD;BzcB`=NiT{>-}fUH=ry9MnHqai%bmv+$qt%ai2ISHRRV z^X6HW=)Hg5?5R)k=h4NRw_>z3hMkAMY%dLjUG`i~nKBQ5ZV%r4M}7yqdFcD#&7To( zJ~gbK%wh6-tM2IAHscU<;HX6wLlg7xTR%zkuS0&kPnvhU@em2in_ndWgDPWq--|c@ zoYzE`SiYbo=!Q3cj{eu%1I2(hv#rBq`zxD<;ms#=Z+PvxtA!+Kyma3V@#XL!7VQM9TZ}<37ny zEuB|4A3=FC&DXy!4C>{_{XV*zEwFOuLo2*=wNwX z%9cZd5ABIDQ_Os^^h!g(%$AkIRs0M@Iel&CalzV70RE#X>pFG#H@n-|x;4%mh`phJ zHJ(qKg0C$Y`f&6m3SM52odsu@*Z&q8kdgnM$OBSN@?VCafhB>oc3%H!%+2_3E5&nq zt?&F4aPITpZ*YrYCU?7JSl_w3Kia6?rU(E1sgNhZf5nCYdwcBe+V_xlVtwa!us0C; zD#a;(-#H|?zSF%hk1xTx7v?(`1m};x(YTGzIt+a)`YfW(D^RK# zSnb^uFfmX56MkgmwV{;to?7y3ru9&l5|!K@lm0@#tdwqSg3>my{#W0(NUY!Di@*B5 z#c1<=3l50q`xcL&--)%K^c2y*QvM3*c&g*}WVIsNnT@Tsz3+M*lv1xX-23&)-*>$V z{PmyvTQ5~JtzIcCG^m%Gpx7B0kr+uT|3n*;{BzvC{kG2v z`+fc@zwPi8zwNKy2micT{Bui4i0{BZ@5Z3jUDPtLb>N>rw9u5`pL-Zicx^BQ?`P&6 zuLA;Z`RBJ4HOo5CwYZ$p`Y>=$WPqvH7OOc9cr^P{PS}Z#qiG={=@rPHS9Lnv=Ebz zt}i;X&BwuxP=u~1$D0v^D@|ve_c%3gYO5p3=pQ!&vpJkyu(5k*cx}I~fo@K$rrQ~O zCM9V(&HJ^9`B}w$rHJITL&s};`@Qa1E#Jy3jn$SrwPnsa|2TlPdyn$ZUmXuBGmpa;8T5jk5YZ?`_UZ({TQ@RK6{t=D%!z1P+{y9?+oA4#`tM?D_1}M z@GWlM-G?4CwkG=QK)%BBm>1m<_FwnU>(;2U!dt>5T~k}m)$sY+0zSgmY(AJgD-?Z~ zlZL+p{J0BBjrYWX6@{7BezPF`rcL#>6A7b12{S8Hi4~173UPZ~9OLxoL5N>YO!@1O>ED{@W4PRdmUleUw`L6k15BEd( z|B#vCU7KfyFX`myhfB7y&n1_DjWfgbEX-{$sEl)Wf8NaSx7J?#FtK@XNoO8|2V!6J zsQ;RIf2;Os6rqw4z6YGwklzy2cf;a;Ai0&TQU8OF zZf$;eb!}eP%f3DnZcdcF|Q)pf~iR`JlSo)w!?-%&lC zdkzga%U-70ORc%+AL7mJsIJppkleaGN{;WlG}zCbobOh%ouO1wI;xE^bX1qvU)El9 zPdXzys*M>5>vr0s59X6p)P5)@R=vOhnZA$t$0=`jth!3Sm+E&db{}J)#P3Facddv` zsd74;!iYD3sI1i$K)p>`4r?M#V>s({bj4GRF=0<@y+fiqWQx9ht@SulJo&pS&o`Hu zEmN?RfKUI%z7_V2Y71ulX~mdz zg}0MR@%_JLCNUD3+hoL--G|%`C7i**1T8N#DN?WHICJ%+}W&y$UlN%SW8@>7)4E7oQ$% zdMCX4Wj>F7E7nsSU0-I~%8q-KEs21c=Cho7Ri9HV?mwk~gkf0b1Df7AWsvfA3tYmW zNNOM_9Zbl%o0{c>Cyq_jmxzr&8vbgW>F~t&Dy)2$Ab0w|sMj8KDEJ zkE*{A-uWq*aw2zw=BW>YFqCi-wNBQ*@q5UffnMUYhTwZ4R{l9ONmJXBru~pVes?v% zVeh*E)UozxfZ077AYcB7_b30mOy9RZ`ks)U&j*zEe#-w8W%AFzmvSLYsGY2*W=&z=13`zayptL z+7PV27>?N4d5-_;=zO9K^`n&eQJmW%ONa@embv40VfSUcFNc!;xE+zatq5LO8Ct?a zN~8;Mnz`r?kPYeDf7~t=a7}JdwAh-6_`iFd%ITh1;GSCOj^5=gH=*RYYvFrib?_>e+}>dCbh|2ft5J;pyi&QHu6se%bJz1P(QYGL z{gAS<`Ph=(t}q*b)I;2=VO8TO?u}UV$Sch^YrIBvppr-dk0slDT{(!%ZYT~#x=+os znYp%1u~XnPNU}EEd@0U2J}aj3qcJLb<;L)`J88hmJ>lkW@Kp3{4!-`OI?`$sq8!Br zjfv`4V-1noL_5&Oo{c{S+A=DanUVVZaPvP%#pNAJq6rQ%G#tnq_czMRkCE3&0O6%K zlRCV0dW>(XD@{MX_mI4UoKF#3uxsQWZPeq5>aOH0HB)MH=|@uHZR+v6TW+AlX0EX| z5Zh96TBv^et;R_ch$v_Ufe29uIe9$!oHBgtm-zTvx_ zT=VHbV@HM^yp?@`k_q9^ZtN zy*oQC1*PAIu{E#a6*XBa*P(A^Y${~MSGT?2GP{v%u7)!*HPw#=9Vt!qqB6|TRL9`& zi`O#5wf>X423|ELKVF>vyX#)($#|i{oF}{P6;GjRM!sru6kNlH&UZ-YNR5?wuHJ*Q z8Bp~JR@J2)-ZIpOk@6E#srS6AFn;G{zTwPIPA#E*yhdcxQ%PdiEstU?=m@IPU0a0C z8|^tX<`ye$vZu8bglse_?>@DncN%;3l_)(`x~FEcJ{{4S0INFK0#R{-gW{}vWU|W*Cv6k z7cCC*d9DQT(FSQ2*Er90EmQ|KiO$@jLV<$Y(G@Gu*z(8{1*bz%ofE7v1w zsb|_xJtgmUuMETRZl)FD-#|T7o_4QDVb>{)W#jua8yfl|qWS&@`FomPSy%wP-52?x zE_8ofyuUL}YHGWrfx-To$W4R{qBAl6AHEaK&&sKJ;T8HGedYZ8@QNe2p=vEkV4!-x-v1^2LL4K6c5VoY1%N)q_SIC`7pVs{@h7iylp9i(+Aj zwGsl65eA5c#M}i!px``8LQk#ma#1Du1xBHnJxhkHB*gIQ`cQavUhW&Q#@zms3Y<6D z?-UFDw_c!9RYnMfJ1ZZJ72XlPZfxYMm7BxYt;?&6d^P%BUib&=nz_5zMAkW7b#6nX z3NO)w@YF5Ccl+bJaOpa}SQtJmbZB_U*hBNeXKW0Al&zaH!$b3H!)MsLv9)IA{>}MY z=qIWwp=aUdW~^KC?+kr!d+}y@@}3AUzlgw;fXknYpG}A=!Ia0zx#87t6KaJVDstQV z&o6KuNvltZt>WC_s!&cW9kNqTbC<*lP1z@=mz|cN?BV8D;KR1)VY4F?HLi-Sj4UC6 z;rBc5&J4f)G~a!G#LOLB^cd?CRI%e{hR;a)sxCs&M9uZw z_pa_1`4d&&WL?wh*%{RHjQy+Uo$2*_mIVzh;XmY0LElj53ym^!E_cl{IJgp(|DX6h z#LG~5zl}rAUM>3a8$M`ha;k{sZ_NFhvAD{I6N(!48~dk18>6Qz;1v1o)z~kn!4vKM zI5MD_|L(W>48P51(dN@Pp#wX4$Sl%Dk8w}?pZhZUK=?((o88XuCu#G4Ag@=Y7Z*!j zuX6KQu4e8@p%+Lmq%9|vaKp>>TNHd!Ils2>KPoo#)GCG4@~rhWf@DNvrbl01!W`L^ zoQ^1>STd5jTTNQL7h76m&IOwp{!{*tnMk_&nX!>-U%D?Z^5SO_2bN{gtUz*3v7_84 z)1(|o*Vqw8l>0^s1O$@R;cbCltrD5*C0T0zdaGZq6#dG_mM%i4W<7*Vw{wpiFM1x? zA;WWbAjhrzzmZz+qWUi+jV zRr!YP7pjD%hufZ^%A)C4=oPd`_49NI;5nVa-@*m?ht3QSnQ&+xwaNFf|E0jV!p&Fh z>F(YhdNeSh@bdZT0pREPgbd#jzaG=#!a7XIk{LI62jL2%TlCI@GI{U8cp&_B1W@>p z_Zq`oF`?_9HN45t1)nwai(S`?H+3|OS5g~aG`z{gKGcjX6TC@}KlTDSt;9JN^tJJ8 zZ0TwMc}mwwA<@ttnWc{!DJMo&`)ADktSH+DgcDek^qRC8!QAcBe4X-LgPHvq7&QO|hW`6w>ez%Y+&T z&ht9ur&Fl4PuP zQ!v(ISPvff11;}xas}@>m0S}WUg$N`@)1O8T7H*54SK^fws5w!SxJ}}&cO{k8ct9X zY^4coZoL|N_-^mB&(T2twF|cUYsd%m_eb38wKwD+&02g~_!qW?<0zjuLod<7_wiED zwz_6&Uk|NqYMV8?kM{k@|6==|u3^LnY2T9n=}SOC`#zqzeZTbgwDyg>lHR_BGJDB$ zyDeN_?Yl?TuL|@wX4>~8w9wn2L9E}t_i(TFJyd$VAMHCx2{h{^%Cn7j-dFqP0aZj&=>Dab7f)J%pRTEm+^)nVxm#E@DJx728RlFno8{^=(=dAvf@!gS1ERB3;s_YlEHcu&o4@LFA}`r`(_KdwMmo7(O^kM^dg9kkQ-$Nkja z18DE?(y*nN^_5fbVZ0o)a(CqvT6z2XZe@D7o~?Yn{6c;|&NngoXyxyCe?cQsTX_>z zQxz}Xnn)vSEC1ea>)W7^#oGic4fKcMajyqkhF?%)~u*Ekbx{m7)MGEZh54l<=&H}cUbt){cDoBa?>dj6P-%B!*8i{T`)?jM`o0VXma6ab zuth=WCso0H==&K@_NDK)k%9LaW@Z9?|2X&3_e)UW{n7V1O5i2>JeB8oUfQ?5&(cI` zn|EfqzW>p&*&E+~Wwp4#<>NioKEB^? z@!k72bTCyh&h~-lBb(D(FB7FaTDj5Em9(I>1@e~X)StuBvlw}Md60*pdZ`GV}IWiC>cA4sE? zK@F4M%9%oc$TwKPO+W7;lvPs@e73 z@@h?#(F6-%GOrrRCArLly)7UkmwEJIzbtcE?knBz!?ULuUV|S-|L;ftHzf5R8KD1U zOM#$1;`8jNHk7H%%x6x{iMbIcWkQo+CN#_O+o1LG@1ga_@Jk}POy0W838C_TNT~e9 z6yJN5j!7y%j+e~%&F(jt>)J(Q-sSWrO>OT^))GYQpgi1st2AX|f!2{|Hh_)kBkc;V z@QN1wo{~RuWn?96HN0X8zlYg}OlB`p*y?re5Q6laEVMSN={YmeythAdnmi^kEu0EZ zvRBY-ggY={B@I}~DBntuWI2y3-0}I9HpTGrotgtGAL)-se^K+pEf9`m1}9;pqwhgyzj#Hv4F8O6{Qb(b9~S_ zd=t%PrQ{pIK%)Uvx0Yq$PDi+TD^Q^pu94!|n3ttoz-@0RhmRDbrS{HiMrF^EefUP%Gp+$F?QifNPue3FH zqw18qZg^{=UaFPkLfM}_D^kVts@NSl6rqXo zj;*<>;kte!neqv*n9m_^(iu}p4rMU)j1^&rs@by)JmL|4D4(?Qg;#J;2XmiD(v&fG zV=SKyKwX2`hs*jhh7sLCxtK&ku*!Nv9$zCHfOJup;$R_1<+?==k#YR{ykc=BRSw}5 z|3!FeJ{jc@ic;h0T~u2P@8T6qYtlGscB~e6U^+XC08#uDDNXiS=S-9-L;cu z7I7uV*Lq>cvn^2z<+%Z*$3_yFm#!I!GQ=O&)Zuteja9K^@!=I8=egF5m@bD*m3i)3 zWoz~+DZf1^bf9k;C)F6)6XT%^&rB(B!XabJ?K`b4>=+j3bs z^w6hW7*LOz4kvn+nhrAOL>HvjGf^qH51!P7^CBuF|PPrs>NmMrMX<8NaWw&IBrRb(hpmeO>E zeY;n?=kQ)ixw7C|kJ5l^l%g?FiiD#5?#)!WSVcoF7`SC63Dt88oXrVb>sWN2p*UEZO7uMDO@9dvEv=%R<84&V|9v|q(BjuS zeya_4Hrn#U|HMPZ+iib`m!3+K$6v(oO}~Mhme$ZziT5Ai3GtoH{`*sS7~LBzzB>*f zc(c~RKWx99dGTFcDp$3O@0#&q(3u-962FL4V663oIq;c{a)1WZ$FScN>H}xZQ*<-u z@TWGYO{HFgvwn})%zpKzSLRBAV2oU1wqRZxF8HRluYY4u&USVk@Zf1Qms^okpfr!Q zFU4F#1ed4~cXqHO_>~aBH3ZKhHMJh+g4V~-X~so!DSiV%Ic8r#z3vF9GKT`No6!GV z<6p=6*XjQCEdM&qzt;NKI{!Muzp|Bq%4+^Xj7Mvx^C?O`g--6vzDg_Rn0>_>Z(kbL4S?l39MvW+^I1mOHtS zpGbh|2ftG-Qt9p+!MT%#Y!EhIsf5wh$E*~1?e+{54}d=ZWL_wZtk)u-9*4jt7a zOj6b5x;Rs`CG#w&S_?huoGBXfO^29Q7dlf)xS!!niNvPlIa5kwQ_7twWeuU7(VScs z0FCVzzGf*25N9rQ&^^p42yP+X>}yqWZ?5e=Dp~gxijEedVAHvkqGeVfJyc97XPxdV zJ-cOoEOlyHFS}qfgR@sVosGT6sHOi=L217pkd_HO?o)EKJbR0Z9!?7>32Hk`M zXig#D|4`t$ifnP8_L z{H*&j6}6YUuD#)MLDq^^V*52Z)L3}ZTQ@4Al6p_$l=7yKd;B<4W>elkl*d!X_$X6C z!i7t0t{!R&jzUv#=c)KGJx^>K7`6cq^yB-ICX3L|vdL1)X)3~Fcpz25VUH!9Va)H^ z_33A+NvyOP%hr2SXyT@}C&%FtGs9Y8mQKb*RC(OW@oS`Z4)wAiTfy&XEw(yA>04m6M?Jz@BoR8dGLN_gnftDN&8!7 z;ZEooTWO#b2uT2Fb0owAlYDa|CwVJFFrnsO>MbDVMWwC30DErvy@9*+1SPW^X0zN% zAX)Cb?ESD3M%(9;1@yaR!$x_hD zn?K~DKWDLlw>-I^rA4&mM)ENeCm!qyms1&M4crXuVF zi?CxY!m`~yfv|vXW`rF9!bZv31)q}$Yi$# zcM4%;2?>OSH^orc9{AcRW4;vYGPtNkg5cldAX9uXCL7>r9VSr7dM$;Yr^pF>^(cT| z`MDm=OycY9ez>FXwMl#}SL8w@E?a!fyB7fP@l~xTo4y$@Ac$bvf4RM6!dLSge7!G; zuL?@R*Wco5GWg1je-dB)Dt-?E0j{Q6OSuPJ=NXLg=JT4$@d%DAu7GQycbfu zd%Ew+RL*SRI$XrXuTBv+(M|JmbVO&?xH6D4oKR3 zK#}yE+cO}kKDmO#4iJ)dtu|SvBk6?)6iFA5Wx!P*{Otip(k21k{v+wtivCX_=?|$> z41;ypN4e-(joCbw!M_~}&%zi(Mqo?c9tll^}TH$Ry za8&Y^&AsMb40r2+qu&G^-Up8E%8H{i8GITX{YM5IJ&_^=IC`9llfcn0@S0%c%KOdL z;ONIyOdm(XKFs{#j!fmuhNE$Dg}!n0EA%ej9qq_#Y;PR>(QgRviKEQ~X2;QGkXJ)e z_a8^^q(|{SPZ1=5Wvo9+#m^h0la?L9FKfNKP@Sf>uZ)DBmI*&2JQPLZwv^kA_O^^$ z(JVsCV?^RA_IkGs3VEyG#ep4;z16ovwwoA6Gs`fV>6sYKT+3(}AZpB5pTcMwvN4)E z7>z@|#9x?XH0Cy7G&2H5V~?MjhYc=>F=*l~s7gH-UFZ7OS^jmpx#}Mo?LuPA(0xGK z4yIa7>m|M<7 zZ;bC6F3}KZnDBF@*^-Qssqf7=wlfdl9wk>N#&)KkmS@C{?Y_9)x0wm*vOCxS|22n#C z&(EaScO#ELujk(Ke^Rf%Ottn$uRkYDlP^S?TaH{(^?HYZF`?JLBgquKKJB)BH*G?F zVs2ely*>l)s_6@Q>2(!Pq=`SY+GO;)4CD;-y2MgwZzC4hTg$TP^n#V;e zRfJvLE!vrGR%2?3dcDhfV3z_!*Do|{k~TC{f-qk6*Hmdh%BCycS+*%js{ zJyv4;l5qfQI)2TIbi!l3>*bR1JK?LW3{R9*3U=pM=*w8Ri*l@AXXb4qOqUw5B z;z2krp^d1P_jR&}S4%V7y{~dlB<~tI;(L|R+0eBbM-WL~qZbGJk-tS`d%l`?Gm6sG z_L&hn-^f7AeWsJ6HCmCAS1QED{fvT@;7B5)Wg5QC^_|irJm9~`xTc8BvpKQbB1?G5 ztu2@tUd&(`$;@(VifMr9ZfzmI=DIcPN#^$qe$=})5fWO6zo417^vCTIUHKzILu>-> z*9nmYa-a6gjV=h$6Al!;=6KD)>JFX3ZdI9U+@=fho(8~m=PwR8aIK<3GrWla{c*d>+)x@ji;qz*;8woDRJ%MOVV5gS8VS?1^DQaX zdBiH;I_nKxUTJ%PcDjNkq}iJ^uQUY~Z?-@o>TIW?6YSZpGRsDw8-R^glIAUgaQ?hY z@|UI2e(LG7FSqDD>j^~^5|jS8)u3O0+^PrDAGb?%`$>xN00_@$H$@mqQBo`!CPY<1tBlu9rPGqnzd&q)B@o&X6nB zo^xq~6nnlA^F}(GNg2y9mrPDT^8g~gGilEX0-^&V{wq2=W6wt=?YTZ-&#t;cRQP!j z6=TnJ-h7ZIu;)3lcdYC1jK#@j&*KC}eyOvN@oVX$yo1R-er3X*7b)y5yr0VP6iT!& z0c5e~ZAgH*WcmI9s45soJqlC8PwyKvL{r-XWoF)4QwBU-;NE9u$#|teAvZ*p*aS3b zD}yIQ3MDyxTs$(mO`_y%bgLHS19JMcMq72RRauPQgzE^dBJ?4&rh-(&az;r_ybFFx z0tswrCCFlf)nu&LsJDTKRpz1K;WpiwetM;W0fKHfL4pUVEc(2ji~hK6H8&J^xtU*$ zS!m#w#>%M(F^;0yo_nQ9CIumkAtd|nzyHJ_ge|2H5-KUfY70l1Ktjc~AaRrK3`nTl zDIl@NJjgU}aG#mr04FvQ1w~MSg04OJE5HepK{`$t30eODGSM&najRnKk6Tqif84Gz zH{6;lwesbvl=T;(QgI zYKi%30Axu2?POwt0=&nVCKWOsrYIINE|bMv1cD)`ofHk@GnY(|VIBY(rEX&EuAmgi zn1}z!Kn5o)%dh@~U+k`{WN@&6$#(j}`wbN%XN;`Q8!Z=-&jP+|3AVTWXPHWr4K>2T z7{B0IP~$9mR_`h3Zv4^&YSbvKO%xazeB91S^eKZRYWVY|QV5KHGZ*Yucv0mYgZP`; zZY(uGA!oJP7Ojvr5}}6pJM3*Ox1J(|B0QMp-Az%F7T&^>p!c6H=4{z_y=C7E1NPm3 zbuLj<)8jAaE=7sBRWkY`S~HgzG@je5xJ|X^Pp8(Qdphmy-G;iwPnDGn;6jgfvbFj5W^P4w$`ijvY_9c1kS3ypaH30)2>^g3BE_A)YKak5$H zw}mi%sk2z6_ZL4wo0vIpc|x|LGF?|lQLMlCsvBwagkS9pMMkMJI6cp0ejM$uIi zt3G2hafbWf(m+y+W!Um&k7i zBrmYqWAr%xCKINv$jd>Sq*b?IVZBW%n;!q=NBh*{&tMqdjrccHc2pj&uoZ+U$Cosy z*@qEZJ+|{5ru+8Se$Is}?MCcPZS#&YU-HYxuKjsDP0xv8o9dkl7i399prbclMwj0r%@YP#yc^(_B7&A--~tNxMeFuAI8vt8ABA$4k6FX<$~c8yto z-$PH&7pQ2>k@R#H$d`(@MH(R{^z>!YOwm*KI@46ER00k~)g=?&ng`(RJ4roNPzv5& z%UHtb={I`f?E=l5pr@Oyb{Og~1CA|AlAcaOTckBP8HZTf>;UDzhwi!??R{)O?WF1q7wa^Jx)ZiXQEeel~gjk@{wnXBdO=51=z%wO*o7KjWozd0M`ZY5xy;!Tz6*?(hDeD0XERRQ`W`{}1kR zx~s>r>f}=K1HAu-kvv&y5Sh&K0Pp`H%YZ2^SbD&_-KE0F{pfam_tb~5|A#v5@BSYF zUZ(v&7+dfCKMKsU|3{B9?f+5wz4rf5K43umf8cFB_y0KP1CBsB8~0=X&(;9s@Z|AI zN}q8Ey|n4m-_ZCv!~UOUv$V3xDU5i}anB0HVE+%@j9{uvlV-vGABJ$Ckm0CnEMN7` zx++sqdx2%J{|7P@)F{gm^{0V-Zwk3*-2X!vd+-0T1vf5)4Euk;=l8q+M@4uH_k{#7 zj6rP@azcl}Q2nctF-T%XV za^3oTUK6%F!oZFzYarPFqhk8tI@QOX?kh5tGaI-L7jX$nt<^ICb>MLIH&-sA*YVz> z!^yb+XA$8i7~-F@|A)Zr>((E3eQ@nll@+>o3UikKZ*}-bPR|OMkymD}iLW zkqd9$%|(CQuCaHzy*+s=w~_uj%)ex*9)u`LTYo{8HDnoZ+Z!(Pk@vvuQ)nV zcw-fqzF$a-cNBl99;NIT+Dv^FeyGAz_6wPHm6A7^gS%tkdOdjU7swS}+d{$smAUkC zF9r|5-Kr$+DsBqyeg!9n!QFqP;%>5vLnsB%Yic%FB}(wxcQh-3aO!Y-as^UGcpe}U zK<_qjT%I?B9leX14SMeqz3^+4h1d3{L-H1pdwjGhv)R`kh;rtogpW!>ulZ>BDFjmR zd;RD=_O+kJ0M78*KBbuky-VnXytmcQP{Gr1za`dZE=RoH_t2N7wnGORyuFuvG;=5={^L{RS>@eh_%p~hSm%HC=2{+y??dnn_P}aDw%<}jM^n3>L>ju zO>?A*Kp?7y3&k`m%~TSLuqKD*)GyX1kXn!Q{#g5>($-%9QU5j1pyDoql37;SEM<3O zSzzB39_x=Rg|PbLR$#3^Zg<+dBr4|H7nQdDf-Jviv%HU>WR^>bODUy?WVw|KDGHDE z$E^@nf84g38*`gLY7@l!kJ~6pTYo{8e{M_y@!{MX3Vfi~!(2-Nuv{tD{;!AmzG8nk z>tQxE2%Tk733S$a7<4(9qw?{2t-zojqc`i)li#4c2XKr3M-rc>3!iUN;2Da%#>{uc z?1?wRVMmWnN$%&|L0uI79fjYP+|M~(f$vZ{%pZKd5&P|d&(~nHh;EgXg1IkUGU2m% z06sS)@mWDB_?#yFevM;PY9U-2k4}XjabP^H{G&CH1NCC?CFF zn4hVr+3lnN{2+WSpcY^l@pZk<% z8b0p;@VztDZ6bg>@#lljO|M9i(VkLV<_pE>xi9fvM0cA0ANJk_&aUbF|Bsq7h-O+- z4T7jaTKQ^HK}awdkxJ+wNSjoH&>#pMq!P?AaT%wFK`0U$mlBtvZHh{?!^y=Z2uXLIf)KYx$^ZRXYwgQ9XYV-^>ihcrey{&{$=+x0y>8F*c`oZ&xBPMI5O7e~+*`g` zbtCMC5l*Ox)s?|%miDVeG5oR@=zW`0|KxB;&U+uFC@l&!k={Y!)ur|r; zgus~F2^V$LTjPCvo0-Nh@o$wvU7f5=S_Od3DgNX6eC+x`uKzO{aWd-wb^1SRs->AG z@`Paujh$CR5g%u&GB47C{~IDoU+q2!N1O_=owbT=+gG~}!mFp)A|;(skzegT2ooqW z=-13W*_+d^Zbz96P0Rjo?t{=qn@S=|>euuuv74=kbJVZ#!Imk~+<4(@0{iF#FXQ#R zxL;!=KIo7`^eWb`xz|76UC(3vntu9N^hT_)ntzL}cNt1a<#UD!U)SQc+ zUCVtC?mV3;Z0|->%(;ZXlHy6yr|5@fE>fcARv?tgZ@;?Q>ru_4l7_V%n^s0&{%=sJ zt**PEegeLGPcC@r>OAO=+b_3C*SaTK{iY5;q~CUOMXH}bk;`2<>OAO=A{V+l-98%K zwgWsiyVI@2QGeWC?CzpY zk(?2pD{uV;MQ%xvfl57X+*htrFQn*f*H!9e`ut06Ewd+WDp9F!U(;NRsy}IV4 zBz;x9tUlCGxopj)X>t1XunbnOuC#i`?}O;|s`^@iUVVxCNUuKa1`*5-av8l|ovo;h z*Q@tvYBWCvH|^r9la+W984j*S#;HTVCo?KivR7%5uGMHux-+i&(U$#ueflZR1KW%Et$ z(!=EWCYS3~!hDlo>3QjVlPYMAB;&u{VdE`U&j$O={^oeAXXPHDnCvAf?qgpN_i2dq ztRBVu*Bcmlv3mBsxSn<9SYGtGP)RX@R$SQAxkuId`6gpYDNQqnxI0%|Smu+ere4)H zculn8xTLD4soJgN#P}AaxUeT-fXo}%o}~FEPaVzkSlq|%H~SYJQt_0y{A@aDR@HV& z&7!!EL3bO)pG1J!7~9B&O_^`<1`uNZqjv-UHEq7h%TVk823Pj0oo_Pm=T^fOyRu*H ze3N-c*&>TwS=*EP%3Rq)DKcA@F0rt?GpFf%Qoy-STt7l}@?VnpNnjK)JptpO*KRjY+C!nsR+u zUPR}9>^HDFrj_7o(tMNCM)SNhJ}{bxRQ#dj_`p$g*VfXH&0=QEdQ?FTJc^)eq+E5^ zvSK)a^?G_)V@w;s$>eL^LaTB>6@h{p(kLXrxaQF8WLBP^OZheu!d&e4$(V?BeU@)| zIkaW&@bFB(rXglODIJEOnH%1iFp8q5n9{ofR=b^}f-Oak!1BHDPGJV|W z{w)#y2E82)S6Nh%b30!UO0V75`7bpj>aGpGcvkg2QG(X=m(*~5?5FO!kEDZLMzXW# zs^2^AgWW&l=TD?g4>#@d_QHi`S%+4_vayV{(Zar_KhBJV4{2x=MBkGusKw__ zNX^kMd{jr?QAg@Eer4UjeQv59&8PnTc&I;a`$o6n*!`4fYL)$r^n>|~ua&p{g7DGH zM?^OFhuqVvaf9i80~Z?-(FQKdNmRb`!&FOOw}FcY>GppR`D6P(h^)!`Kg_Y??rZ^Z zKc8S<1c1OCN56s%Txr*BzKTdbSGq|P&G!`g@!g~?iH7U_ypbeywmEmDOoKh&+#a!Nw}qSEW6Hat z090Fz#jRk*51<<65o*}6gX}Wof8JK_Y56tOlitkr)?jzc;L>8M%n3(wYFRsH0qXRn zhPpv~Zsq@EtX9WD9_ph8|IrgA9}U$<_xX>u_8&86vWL;J;l^9Js-m>iwT@FRnw`vNyv|La_zD`G$vDJ%Jta8^iqfPusaj)$77)oB!In?B@=Vyo)Mf^uv$C z1!H%9AB-oVW4JDR5g?AQ%WifqlixS_Q0Np}=ei2ng?}^Dv@Wd69vum)lEh*ZtMLS^ zpq8!6Ud|gad!QuNuXA1K<0jzOv^$sCj7zLaf&(^Oxm_m><7-^*gV}vB-aHbh$$yEh z%U-N!2dhS4@m641sqtutaQ?I+d*)%to5}CUHo_UZP}J)mF#t)agP$bZYiY~>?6<{{>SmU zNSZU;N8lzhjNx^^!z|0BBH+HgB*oP13pxUA5nlBuhSvk}i!5Ht_HF6s#!k$|4kfU6 zfa!%whR2q`Q9eSOe@)odf|oaWwKBR9EL-eYgRt}{Q7RU{{pjmVO$W$x&_*HeQ|?|w#mc8|=P za>aN5{z40jJ z@tj<3kh2qsiaT`lm&rvZSB zfjPD23gND#KYe_g_%biXm%li^JXg3k#+T<2J!(~~B9IA%hgZEGZ+6KX*#*ZSUUdQySYE?0IC?z+-bZ<@qzq#h;V-DgNXYgHLTk z#Gl#9V^<}jQkRmbls}W<%Xs8TW8~%`(3Q#W`2wtAn{V93ek6kfRghe}kGJs={oaXw zqMf-O3E`FcSiMxMhp(zM0DaZTtk4>mGu=f)wo*3NDk;o)4<;4Gm-?XX;PmeDh40%Or}ir1m?2i0_qmWmp;Nu1p6Q0=Em{9%!__YN&absolTR|@ZJNNF_%zQ zeE$Pypv+>|nM;w`2=7k-Co@Eepves+D#iO~*#`8{1hWW=W%7;BS^AEkQSx>y*wOXb z$#vt*J9GzLc!!TCG8wg2ly-~9q?2s^z`fK!#Atr%FiZ0a`hP}5^CJCE_^5D`nS?t? z)gt}xMR}8wywm7COnN9y|05D+bIP%Mox1%dHsYs%d@QI0FDZ48=sik#Jcp4IJ$52d zDZR%-?=jw9o1dY8GWnO5*inD$|D*kBUO$S2#CD|!`ePi_pJ*na8lk=$4h$F&t{w3# z*urU;FXiO3fu2nMXN!yI4{3Xg2Ta76DwT@Xw7ftV0#>{(Z^JXfE#9ex5t``T-O7U3L6@rg zEOsX|?Gb7Xflo&z&Zg5PmwjPjzsZb{w=%&`8@AnBKkOoRX^8hmyUB@%a#f!Z(_e-ow0gxfa7+;z(Uoy z#Hys>__`q#ar|Zjm_3HlWBX?=*0Y0EBTBs0Md?c-7)kL2;nysHu$lY;3#&QbM@|VU zb|_J=ncO0f<7ik7kLpp??(7NVGoFA>%u(QJEfIWBS7$twKJb zFC!hRcwDLd?D=#NLm?F1qI^GhR>>TJ)8#aNd_TT6XwUqI$EwBi?i#35Q{nJXepf>q zp?3g2q}`wIS-FsD`|~mX0bK~<>haC$6CHKC*P#iRqNYW*_6X|PzunD$Kq(@y;eLmM zyh?a=&*J>cNFL08kg`hI@9@-mXz~`3f&T5`{0EIXz}Eo^v!zxan;Fc1P!_i-5j0){ z+X6%CuJr%4-{A=QO#Mth08jsaK@okcd9CofqpPR4xO}=g*6H+p6=6uPplfk1TX#k1 zd&lYf5xn#CEufU*^j${7EnZ6UPowW_2#`kK9telI#FiD%cQU_3-z9K3z4{S-7Xl-5 zfD%Dp#W&08+rQmX_gR3E$sanux?3~deYd3~Q0;M5J1FjMnIP|4)Q(ZPyL`uVDxZU> zrB^ec@|a{Qx4}VC*<6Ggi_`clDHqzabxH1NG(J*FCI!QHA_r!BTau5mn4ZpNn+b|} zbpgZUl*OwMLUkHMqKL+AnA4UO{j!<>#k31^GKwRUe|v6qx7|PPF~x3WazHJ!EjX;M z+d6LdRKLy1&8Uwq?Vy@nzvbMjulapcTh}|UI(tA>%O_tucTW_JwG-D0DhJePn#2J; zTZi<_9?+|GNU!Vxy<3O$&K^+PI;1vxKwayQx|X^9MjiF@yqj0hI;1LlKy~X7a_inY zqN&tn*}XH`1&p7b%=q6Bv+6jR44J=`#UGFJ zev;P}+m;lNzn7!F_iNpPpK8=Er9L`izVF|jq&(hmmd)%% zqKNsrYgsWK-)NkV4GG+P|qSf>8qI!f^&d0zVlx{g>wTuqDrT5{!c$z`_%?%Zc;nDzTK48cwqFt z#CB8v{s9asKHQ=z#(A&bKIsKn|d$ej)92D;aSuL0D{ypT*Zi;Tb0!Mw1 zk*geZ(X3y+lgvK z-4AI*sxAriV12;6;?VJK*wp^*{huvd2W0H7|al_ZK^E zGmAGo9Z4>8-0c`{W&6C4@^W0O-w|gr1M|-1GVR}^s`GR8xVe3DL;AH2(eQ$TTnpB- zaVvMc+JYg~JnCkrWE4+9UNEG)-Ygi>+g*COOHX&HahF0Wpv|A`7)23k zh_+b3ogb))tx&%JZYpX*@l*kBn1JU2_fKgNuiqDY>79haE3Zug?5uwOy_AZtbi(N1 z=b+d8ijpnzjNB`$FW%7eqg5l?{};K%71|%u?~{XJKBj#DlzQ_R`X-bA(ft-%^0auW z(-d!Uw>nKxSd6eW5{*9r1~#GfCz=y?pc(UL{yFCn2O@te)q#cX+34a)`0Ai77E${Q)HiU7INi7THS@uIvkN^;(20J&HLF z4`59!t}YF5H6P%6TupPP#^KB2s?J;_J?C*X$?KPG6Kj)(tHY)C(x2wVwYmD(TRDzHel|A*rMW<&&YGEzbRe^#~Ab@D5{AA$2=JBnW6$%)9K ztIQVLQF>S5^j}m`@=r>XRBtd|XTC3YG6YOn=QHM`G^1bBO+aAndiga-xUBKM+LahD z3zE=Bxe?@gYj>uzZ00G)8tYshBi-ada)eg}pZ6)QK>|FikFu+jR7!X55`bpD?rRhC zQBK$MxAC=wjs2-edQ&4G1w3fJ&2Tdo7-#aY{V{S(2oFKu5rAN%*?wEHEoQn#6cWr5 zT&jU`U3v(el5OSnU zmVwfCqGw~o`7tr|a4nJ3V$R=Ydob2Joy<$PY?i>z5Rt#`e9X zLsI4fT?u&2H+)q(wBxB$36u2vBN;l_elWF2Z>QLPSnDHZB52IyKe?BAx3d?KQMaZI zS{_}qvd6<;E3BXGmK(aJb;D74r!l}sFF4kRxJ`AornQRG9;;gB_VRE2`U!nA9(SR) z&36X*X3we0o}rje&+K|iNwy1bM;x+%!A}MbEcTvIuJm!DQfG?*QFSkJd*vBU5hyJ1IV;B59)LF_J{Po z%1oh_+@L2|mvi3V$Xgsk#<+rzzm#I{_cbiAFdxA(TrQ_t;}s_cn%B0=uT z&>^;aC2H%%8IlLNU(Pgh(5J~^`8rM&_*fUCMMFK!_Xpal_iUYK8wIC(@}zY&!)^@u z72d)(JJQ)pqxdEX>6bT{{iHqp;M;D}T2$qqs2Noyl#giJ#{E?Htwp6xJ%=g0T4#^) z{H*Pff%hr(__mH`<%HQ0vZM9%Mb!nI4WQ`qR<_P7+!mNdchz{a!=Su=2v=`@M6^DW zf9&_b+G7w>0*rb-kP9t`Nxm458uWhPYb9x9N-E1UE@6QEmCI7gk?GiJ%z2!w)yl%%Anzq*O@($50Ot1-FM9SOU8xVeZ^p^C^XyGbyHH8^l6VcP)$9v{vp?kQmA(S?q%q zaHp^Xd#wlTY2Hz#%d2VI?jR}{j2x_UK;?L0++0p zCg&d)PX&OI-~@6MW;0yLLiHx)fUqSi%~JLDXkThA4cV+Br8|q%nHPX#Kc*4(cc9nq}T)lQEcB;|3F zL@iN|)m&Ak$L2wJH~(y2;URGHXF2@vXk$5TRznRiH=?yr!ORgNu;6n z5ns-c220RTe+OTtQHA>DqlISMxPb}yDKk?IJQZ9RBo{;Yj@3%#V zINmpy`xSWel76QIl7bHN1tsv_O8S-31PQz^fEjbV3U*_-(hAS(*`dDRJ3v4p=dGl7 zs?U2ts-9lt;q<-0G?VZ5yDuy1pP*jTMZJGJ^Dla8RZ*3jd!acOR@%F5M!K#^`vt2) zeuvWDzjvJ9wdNIddShDM@_V7mNYv{0mh<~vI)tVCUZzeW{N5Q|;`lwlEKo?y^Lw@g z5`G`+3rgen&C&%4{QeZ2m@`#y#P2ut>=#*X0Y#g&$9OAT%gDS{1IEh+J%tS_Yu~U#}HHG zKNiK``%V&JB+9Rqv$hs+^#7>i40S!`6=tKvzb5kxV}B}l&YSFa6rA#m{g>p8VlMH; zBrw)0@y8|T<&5qBk?FlTY@9~bJ3bGUcy~R!h-WJLpRhQQwPB|M|16O5NvWTV}2&e#SX zx}9;(7+?XBsN1i7*8*a32nd+4e=h*>u>8A8?rnSM-|=<_y}jG|ay_P8C8v^y&we|J zxPnKot4sYkF+sh2Z%Wp*x%!)zabh~`ylC56`NOs5R|*3HMjBj;YYj+0v(Jy#bDlYn zEhxMW$gU>?ar#4X-1l5tp>aUHb0p+cb~|6$1dxs7TXUSWO9{x5D@{e`n_I!G*+!MW zDn#BDdbY8z&v6o{GPQ_gTv6;Ub`^ieAuO5v>o4znev>-gIqIGY>QUybnhEj`Q0a#EkyB`aOQUl31mkrNc( z48+@fgznDd&zw$3WsY6gOwYH-OQrYi`l6-55%tDU9WFqEu^Nn}=_EL!PtijfT?&IE zFTm~Cei#RKr%6SzZ4%X&#IJp%+mEkWZ`#j>=`?gwJOb84hnOmF0ODKU&k~EF7uPdO zfQ0E4XrNw%WezHw==v#NJT9|2?MaKv>;zoO<1$O7oQnLou^ykjqHs;iYW8!}$hOqk zt~HkGrNIgtqWN`PI$Vh6NX#Z!B;*j zPUGAt!-~tj{OshMJ|_;Ab3rK>GJz-3DdYWhoWG8-SN)>|WlO$ZtcEy}g>fGYx>K?E zGlrDRm2>h5mBRUOEL$ss0cLZH_y8(qm z`?aDUUVzwcq1a&p&Ev>(0%)wP%`KO;0@lIKg4^GUhmChUFN*<^f8cP4C=4)p9N9n= z?ssmp`FT)OxhV9Q@>eLPOsZ zaE7Lt*~x;xl{|bouJxEQPX$4?@2*d&59kL1zCilatU=QPA{O9TpDuxl1YWgpm+xs= zKUMxtQ6I9ei#CbktLgE!9bs@Lf9F+Swtx0Z+fB)K5>yTPXOF3dUGJZ5_?iV>QUB~d zco^@W9WN_d+CMvFLmrmdcsW3y#9>DSlqh@?;gI6~*)cp3RUZ`ieE;m=o|d>)|L!X- zjsU!cGEp%h=Aw|Q?Vo)gu9UhiFo^zHGjKJZQuSE>Y`mVm$ukwbH$|7X055s1`e%I! zwd{|CWb*T_TucABEU1>liR#op?i+CVSYfwI!8GE2HKy)g;xZArn)8;vv=jvshSeU2 zetKSLhuXG3-p8Cthl|Yn1fezCA3s{EFOlDUe^c7wGE=am>@s_Tgva&APj|M}JTAU_ zSM?}NIlQv`KGfmBtVT{dm^Y~@;tVj zzqjBd6wC7!JQ0!K5}&<1pDS0^^Y?VeceA-KCQY7yDz#G1U*!2-0BClkn3z0I*R${V zqEDsh3jEEi&|ElXRL+orpJiRB*6DS2+MHW-V15MjC#(X;MXfnqR-ZBZ`E;rW_ zYfkV(rfyB!QtmVc*kj608a`EEMXncag$t+$;SxL7VBQ692jjT&tN^;MiVLb^pf1N1 z46{JYONPy++S8W$zm0JmfxOxScrSRC5+9|+9{U@fk0h0iVeoX0>R6?5;A#p}AywvY zu-(RTmhml5$I!qr+>houjxTDcc=2J6ZYsY!>@n-kd%{@nB12zX$pbt+kk|D zx#yWwpvKj*pBDDfEf%)QYSy|*P!O1}DF^fOn4SJ>T!+Cat9dd+l(VIfWx?EJ{s4ah zF#k>(1>j3vCim~f@-He2%_3S`j^i7h{PL-AV1eq285z(gD zgTTP*2l_R~p6U*)SMbZ+j{eBxzn4Q}F$%<|wO|!Af1*$CvU@Wl4=ee_`##zAVO|ye zS#{(>gtLFF(dT@?>qc$`@)7S}cUN6A&mfsNXttLQ zmwmCnYvJq|o@+Y?e2Dj z-zuA@W4K-9o{r^qDYpUC)k-SuWL3RywRj{@C3xwG2R2vo5frtAbwta|70vT@euT_% z)Hrj{Mz&#^F|9DfcqGv5&NJ>iZK3^kJn=@p|F<;UH`FC~Yn|44O@sC9^={<5eyl%D zO_}oWom_Td9L_w&*Q&;z)T%g$6X?_=kIw4*0IdT4Jh_1T8-wb`Q4=CQdV z@PDd`V!vZu4y(oOgqi{Jt?FQ|`7Pc`$0BQM&6V6MNkM*USxvFA_c$lI@HG-1uaB|W zf92z#XX~ym9yH#301-0zrc3C~y6Ij-vTIZkIMU|WR>MW?tJM`Eaobb3rssb?Ta(iy z8OKs%uo1Z?+_BjUdkUwLr#XQ}a(rxZvG=DpMe^Ij%W zAJ##-c`wXU5oa2358RxW(?zJ)KW9s4=lciKf;*eb-0d11G9U=;nxRN+vf>JLC$G?K z7?pAq{AVW2HH1SJt3U{{qjSliSA+Q*V#+fMnLkOtQj<}MH3)HOQ4W(T#f4D-snF2F40FmaqFu7%nZ@BqA1ZVxz#nxLJ zXF=iI5wI~9o=-GS;Eo6=79i9(i^dMXu|}>)`gT~2pTU%hYnlcGyeOV8d@RAC!cA>v zl(a)^bFyKM{$>>*j%S`g;d;xjb*N*?RN(X0e744{#%y)$DnHz=7cR(((a!x`-#2}6 zr(Z8TNO|1nOrQA%i2_o)tJ`n|l4}@ChTH6c{mSGYybzMRH8FM!$|-)ok|jIK26cla zE_1B?H{TQ|a5&&nU-7;_*3fgNr)vD3e#kIWB?sU4$L@LzpQ+d;;R)nNHK%~7S ziI9E4a)9vx+sc-i+^){?g~9%z)99YUzY)~X%1PF_yk_Ub;u5&HkJ@$|R1RvDASd^M zG}E7Q+d4M1oHTX(P{PRy>{nhqGR-O9Q^Ya4qcATFdek8K}AIQ&zAE)tiX*oaN z#h%6a`A=yS&(FmYDEK+aXPCy%*^=u-etr|fZ+=Y?Mf{xPY^Yfbt2*Q7VsK;*_8;HM z#{oapkwYRDalxY(ckyDIxtK*jko`|HcHx)&T3%Seap#ZlOUjJ2h6{3gulwX0E@9wzEd83}-2c0*CvAFuf+RRa+{helKzyszQ_G7|J?+viPU&^N(g3-o99-MziS*dQ$4AYt(mJ*X0E8< z_623WEpoi%LsGb*5Av7#B0tD3*ttMj!q+oFzdr})jN++n9Usb_wvN!ldN^3s zp}46aqf&cugs#l2SV;Te`EoULx?5oCfPjJ4Y0>6dkeX2@_(U&I?^NA+2*M>WT;UxjNrw#W>@N%ROBuG$|jB^bv~ox{)I z2tNbCkKjrv^}wXT<7cSF&qt&OKLgY7bM<=`KLQE(Y1hS9$TcQ2S5%4PXQ~i4)k+n7 zS)>$6_^~g*Pkn?RJ&NIH8qRwhKUsAyz|X5G zuV(^&t_F_<4l$rL3Jv%w~P4s2@;UWzcBeL#Ls#-zj6F*FQxk>@N=Ki zPjR7+K=oL8R>(moh*g=x-sKY~FO?y5qEyf!0$tZ3)RYRLkvP7E7C-;^2Kr(L$P)%b zzZI4|hIU2;2N>GHD}|?5Ce9gmto>L`eD^ON#!N**7hH^lQv1+XT+Qd3C(+0v9^x&T>s%U zXVT1IpjN3_FxB@Tc2YjKD=9Txhotb=DqX*ek2B$BH}ILs-#^iI0mWZfH$i+u2A7K( z)_hn}&2o-S=!Y!TYsurfkrQH9{cMW{^c=QmepMQg!@+XH*%hg~UGHR`<{1J#AjWm) zW+)JZ#GYCpO&8q^zUOd*lZBJ6yBRM=TyIxH)Sb|pNX9>Ls0%IZ4|Wo&C^yTk#_=qd z{EML75(`UUx$XG>Q0gb$rDy@K=5q{B&|`ex86k6^VtN&t1jqde2ZNQ({Ypv&dyv$* zrNO@`{0GDE|I9DNe|j8V!X5w7UPb(uHY?@7o_TjSQ1fEF3oQ>21jcvp$y!0+M1iIP zy+S_ZvNhLHPXJ6kN+56=PH>^coQP^F0l<;aRrFo+R#H(302m@2)?XSa9RTNpCi59q z!z~7$@9c{yI6GqYQ)I3}vw*?X&KKQ9ncb_TRI{3-0RT$kZF>UcO#Y!JIM3GeIR#-$ z;%%eQRGyFa#E~sN;J1(H{DAvT;%&>tyNdC)hJXRZ@wQdCq>8r{q$6VSwq-O%jcO^$ zJb~&~>}E?yQ{rtsRzRgoC@26M4lKHl~OaC81MmAZjM0j1s5#m9%6 zB|s*VAMs1#ge7iTt|`bVFmU{W2=V#Y?9xtxE{ibDkg@yS|5zEKKbZ$3HYvG0<)_pn*Ce}?y z&%CKZ;K|+&(7*xyNWXZ6ya2MR^ixYpPr$_f%I7wbGG$%q`;1E%JSNt-J5!L zj4DAP4+0Q?kHdB`KI6c&T z+#9x%DbZpl4>&y$AL3hMOvL1I`iwpQ)0~8i$K`L7S1Rk4l#oDcD}RHeBU9w>GW4C< zkkb6_vpR3-)76gGhp7visyrh38>T$wD-pceibQehyZtd^;|m9288i7OPfe%)N%(Pb z`tK;MT~7a6RWF_XufNt=?F0J1m`wjqkou7Rh4+i;zl?&_)RvMG(&#@M`lQjn2l8Sr zp|lwNCrRZ<|EUYs1^Pb$4$KfGqEa`Is8jks33|Z)De3etpp)bDzZ3n9imcW_z=7S_ z0h*)|e~1}}W=pSnK>Y60C`g9C*zQcikJZuM=5(}O9Qc9!Z&;Fh0^#=-0r!Y~yv500@S=TXNeO9We*(Isk$r1q z#$1P^%1pN(xPH~I<+eih*I-6h$o?YSH%BTFWWR?*#c(ej&wL4^!TCQYrPE*bCQkp^ z=*s}-_bls_=*_YLsUjK>eR49qdiJyw(Z=C?i*KR!XG&H3sj~DF5 zGb7H}@yu=bjV9_YXDvRixw|B$tK*u7n#W(}^9mz?bQ9VrMMq`L3fNaRuK5g}IUJl~ z9;E_sFdWi+z-xsui6tfQabHQm6%aZs*CG(D+e%!VJz04DDsV7I*&^*^7u>Q&bq#nm z6P8wL5e&V~IaAjuqi-uI3_Y2oafZ@A{k6y&8=t-oJhJ|M+*E`aXhFuFQZ#>ZgQEO33Rt?Z;};iIzYjKQIj3D9~c5=FGu9rR}Sr`gGn=8M+j-%K=zW3e*%zmK(J z02lL?H`;SjT1G73XQO356ndy!k?BZ3zt1h-5yj#vVm{ zSRtYHd{|Cv)#{5&vQOhfJEThELoI@2E~BV`4=wWh;lnaHI$hwyV!&VyRw8P2D~TK* zI8Vf%ckwS!lgS@*tUd2ysHkj@5Ye6sKy`!!Y&08$@i@yd7%g)L$B$9`-BMeNdaB>a z=`6pFC|DosM?Zke9QGgXL<_>%+JK1DyqNA+QbGa|ZU1c>E0r7{+?~AJE`HN{jU$ z2JusLyi)xp=K3bL06hEnNmhBRr$o^4C=wOpxpcnEA^>YHf_Rzy`o~x_w+q5Fbw!_mK2Hhq*7?l!~asDC0YWF7*IoPRt9Ly!`GT!wA&e6jJzX;Q9! zw<5<9SgivHn#oUGH@^}cl9Us2*VJGR{5!8x1>EV2T(;Zw6kBd2euiD&0xLWLdP()0JyBf5e~+^CpNQwHIZ#%109baGot|PwqDMUo`U(O)2-zKqF}U{?Bog$2zcU|9gWt^ z$8|4UZTpY2kz1rAbcXMQ03yU;iDe})$d1ol01d;Er6vTj*3cP#4h)$?D739(OD7%; z>O4T3>GB*aGznJu@wrQt&jw0L4UZy$8^V+A{O2G3?HYnsMoWV+Y=Is^Q==@v@M3baCMv60=*mO&qiK3{7LmFBU~DP zUY8ID$SCK}uE?SJ1D3C?qqpPFE$R%wpZ`GY@?y$F$@AwOKx0l&Quy<*_ziz>o1`^! zTEKwY#7_;G%3b2N-Q8*qj?4^C$8kG@TM*QpG=14_zOPu!ad*h1B)rqDNZZQCsbv)o2b5N)9YEL6eDQ>nCVzI&bS2Ri;Hq z7>Ha`1^G!(sceF0V>Ti*ul1rUXH(W z`62%HQm3vAMrr*0hVp4uQuzCd@DlM?yoC#kpebbUDvLEiC0G-a%IXD3I_F>hw3(cW|x|Lm#C^LLmWbf~xL$)u9f_`99dN;-ef z1exYNIvH&pTRZ-~#t-p#u)=#?=I>6*=XNEfhU<`2{DrAY9aAO7F%br@J$bprJx!2i zKBx^K5eas=Ko^04L{7k;L4RcOhaA}juESx;b|}b4ETGltTs~`IWuk=CW^&~X5X9-2 zX4)F-Ih>2}EL?5Q!Z(cb_!m%46zq{$P6ChZc=tY1FX=qK0%)3zDX`^|V$+*8MV-Y%hR4gj=wc_L z=`he>rao@TA;dycdr6xAXkXweHAF1dqZo_jKNnia5(YP!9R7KrsWXsGmH%F1Q3+hO z{lihxFzNk>yMT<@finH92D1hC;_olvJpApnK~X>AIAyaO)1nFvBxyzd9t#UH`BxA7 z!uasKYA#;p~9XFXb(93h$695a=_vtNqy_xJRL0{-4er9{3T zB^H&&-$|1Bbp9>`Eam{p4EQ^Ud-3;5_0zh{-wTvYHzlQlBS{+Yw`Ij}oFxUZMw&LL z&iS{8qCn|;f!`Y6G@PBG=8vcwdw!vf6fSv$;+2puv%F^BLe%qUF#AiP5{aY})|fDn zSWl6ny8qB zeJsAROwK;^-5Fs>Zvm3y<10HVk6V=p%6<-o1IoHG+J0cQ8bBx8KAKrz)XO`8;h`bIvQtJAuZwyM4c$q?Gv{8zBwmLdprc+h=nx0za!> zRA=)%d_eYbpk($}B5HI!iQ)uiJVOiYu_<-B2#{0idb%eX(dNleq@=fdGLQ>FD^1Dg zMPqiBodHeadiO*wi56-AG#yhlhPC|D>;8j&=XoQ~%_)E{lYi%6tMt_I7^$!4)K$6k z6EJgHQb3KCUNqe2?#iN!lrumzsUros2XXL=tb1wNve6o6LV! zZ<8+Pqh-H#-?Ea0+cqUbXlZC2x1y%@O6cu7O1-Fv-2k^0%lePM(ID5`iZ(jl7VRy< z!UAz4>qwY&@qP9Z67g8f-AgEBz+^9gFnnieGQMH;}>$2+y>`M_$`$774X>bV2)cf{t(;&he~MvAfadC z*iU>pp>#wm-t5lD*Gf~BuaGkD%>~%wRqngt_AD>y*+^gZy^3O$HGm4n_Bj7v#y|QE z{15vo@vnYPD*ge|7@-_70Jd^&xE20;0p>+Q-Tz6VfXia%SwBCAHhobMIQ!Z}5!e<~ z#YJGT;%_k#uzSD%RkBxxAg%6fEEj=SaH3-(;QI@T$4d~l6(}P?m<5{xL6|8g7eQ$B zrKJf%Tb|k^2!g@FnPXLYM<@tO^z6sJ?As*XWzCQv`15qVj+PC8&Q||p3eeA?4G2}e zwE<4QEBr7{Xo?I#WGiF`WCH-yL{{c{18_3xfb+kG76|~KX_o01R{1m4AxMRM|8}9B zJ;f(VQwNZDTZ`Axg0ZS-*R_6cl2{RPTA#d8@)(MCgr|bGPZ2Y8zXf_^QTWiB5~=@X zf1z=u*x$%)8DeL;YFo>zueKQPC{go!1DL`Rpyk#4yRwZjjN6@U{vqKi1BWm^r5xj* z5d|%r4|3Xr^^FCd2;ch&YZ1Pez>@&qkK%#~-xGXEY52akoth-zyBfJLEtJ#ob=wms zU-Q9Vd3LHVdx2!PtO*w1QM_7#N%J;Rn8}~ak(MA*Ru~T<7*+s=J)wLRj%7Hs@EP{R zcckpl!b5m;u)HV9y7RqtFi+?_ih1^IDpw&;KnJFo%E+gGKt!h&@LZ2#ZhZ#s5&SqC z+l{js4j`N;!b6d7C6*J?I$A(#yTuLCH9@Jgrepx&s)rzN4F&q(hpCd`hRvfD7^~2l zs9SunvUv=LTov4gq+Z|5p=B^&1s9r94<#j5%lT=Bp{p|aCmJk=KG1iJmg3*|keJm} z8kfGyu0kdg`Ho@{-wi}yP$c!Fk`LM7uS0b;$ACy!*Spp9e-?Qc^QW}CMSwi+)8nc~ zcq)*z{+-|b_%VYYEi6WcItrlhRHr}jVy3>rZc@hB?%k@IDSjOH<9V!dC+KP(!L7-8 zkh##hzVGS3DUOMc_r4TVdO|(F@Z6-P-hN$IbgtQTJVT{!rR=N}aj}r8u;qAuVbu zhO1SE@N%HKu@NMYy`vmnSY5QN(k2iq?@Mt%Wlm4^4LUqG6ObDAbk|D5in*>aA zle&!&Dz|q2cBCgy7t+dcIf};#xU56dgt%Prr^>txQIgA!OvU9>1aBf-w&RlrxSU1U zQhIW`5w(D_khTz_^+Vz-M>bDD7+x57I$ug9z$K0D|IlmrPb`-r; zv>x}}Kaysx7Gu8j2TPuIGJ^~Dl4KF>3oK$=#A!W>S;W`Woz!~VW$3+J@p{}xy_`=- zEG*4KyUA%RS9H0DwhG{wQz$i9k9&->!sbEP2ghx?v?fI!+Fi@PHSvF^OfR;o%CWDcVKEgt?Bd0xokDg?4P@Mx+mQ zR?yr5bGwd*>B{CiN(vrMC21KR%I3d=Zsx!5Zk55s)DC4(A2GdI^-bKL?M8H(XSiL@ za5Og2Gu+L8=Qo~8cQPnm&mg9Db@F?oS^az4{x;R43;VQnyOCNrCqqeMBEF7#(-)f6 zC@S3Eu>bXz7oIB;%Sq$rT!=m`b zMrQ&(^By6naR0>D_#rk922EX%PtV4kl+W!-N)6W`Df54#@#K1-m&v#9oIIX<49?CA z?*u`p*KVfV^zq~mXY#t@c=F7E#>L~w)l@JXPyQ4MhQ4)yd=#j)YJX)(2?><7fu|=V zw<+VvTO(uUIt*rPJUPox5%yKMoZbQ?!;dE~16Jk;C8APylPHi3ca4uHF9#Z#{Fooo zmFi?Xc{Q-+eK?+c_Dz=3Y2(S++iX26j3+OTC_N?yw1cD?soyKZ{}R|nE$X0#}0ls(GcrDJR$Ldn!ijeFOA>7lcWcw+eQ}IpN{~KIgNtD z{kO*QL$sR%^C~qf;`iOk=X**@4Ku<|BERnkvzh#E|MP|Tz4>ac8l}oeGQDQ70XwmbSu#&Bzi7;aVpC{#s&uRT|h;}`Mu0b zPfKE9Y5e}Pv_U$**Mo*8L#d&BH}OOKUI_ClwJGBFUjd=nT}k2h<-(8KZ_Rh>q4r9c zU>1N`=6~;KV~k4$ch{qrv@6tu#4p!V{9}o`I(uR7-bM++-n|rqxfqk|5nw|-Gf`%q z|2EV!PM)9}4NBm@Aq$Cp8UGa$TdThP%HLXAwUZ&VwwE#&J9vS}w@3Wfw=t3b7|%E4 zKgugt9Pe4`1*$o*yaWhX$LNnz5b4`*txIjpxfI;7zSHoh%l!rckHbz7V4IWHDiGio zyv+fYW;Z1T0hf_9ZoTdKV_M%c-pmEYnS8_dtnWDioqDlIFqx6jIo2hI8xzgmGeXAp zbE^D$yg<)dyBaR1-vnkb&ed__{A~p@C@0&*;w$A!c14S?4mJDUVB6J9xsb(PlDK7G zz^zXrZs}2sTR);Kw(n^3>pxI7$ZG?nv-){QMIoTY;_H)&OJJ5AxB5Dz1+_}(<{bfH z%rn+O@A$T3SDs&D*A@!vRUSa~<5n9hql=Z4DlUg@ZajOusYCl^^0(~JDSv(o3BBNX z{(L6m>w;>QKX3CJ(Nl+CY>M=2wK-i8iq2W|+v%0t5>hFb?E}I^a|$eB(XEsv77;(4 zhBAorC##48jo&%3tTg@{2i?MwZL5mx=Y0^y^tXj}^mP2$mS5t}Bq6iQ{5e?}8N5dmsIFytDDK{<0ZQPM8UZ!05IK})Oxrd8kkZc~L5s6MtEGdnP!f#Na z1SwkyB+N;+$Y6hnpYcmv{IkGYIZWoI>^5ceT_vT8XOlEiioQQ?W&y)YegKn{U_nE+ ziMSNhU`ogQLYisTu_XojhRf(YSrX&jb<2K%5Mxfg=`6b^iq3=tO#$(>ocaSu%<}5;E%C|cFH*z4;>HEoV zI;)~AMkCw)FiPsxnUB_r26#SCV@geV>H@Y4qJ5u`oB%3O0V)+3BMz`6c=m z)EVjmeP4xA<|rkCzW0$RpznC>`55%X6e**(0R{7)wt>uP>)B<~V;15GX;QDSVaO`T|?d?x$Qq^aed?VV3HTpZA(f>Bl{?6oRp>P_aGYP z`?j=TzpY;Cn?Uv^iEL+!**&avtc!V54lZQhn?#Z7(4Fs3;V`#LQ@S}t%phv!7gSx& z#ryzsvJU}+VbBu*6it{C7Nl=Wr1ng&PhsjuB+Pb@oYDxY?} zuiaBUM&~EoOOs>vX>hK0{of$-CKqoGtd8^LpH~)1NKKPOlCKuDRy*L)a28dWK89NiaiU% zo!MbFlV=s*3}5?;!12cDFoogK%%Bmx(W(tiA{s51-rP=l`fYsx%J2Xvw4~@jjt@nB z*A~EHOLuj3Q!(7YPSaQzXb!x>Qm&n>;efp)IV1K3&dBfxEP50J%OMy<2NpAgTY=;T zfyFN^{Gk+`2g&vV4_tfd@}#m8Kw|r?-6R3&J+?zYy;+FW==hpM$L`d+r=C3^C!Er1 zs0|pgElF!B7zqy}lR{ZLynK#5$mGx6;=hWQ4!J8|0x!RkMxNqU5ovvD1Qi|JXxFKV zSC5Dn6(M8w%Xiu;6Eb8}F58?6nXPcCOZ3Y|^17+cv4W6?&d0b)0dxvp;IF(asSXLy zvHE46j1=$yK1HYZs2owRm%3z?^tQ3jXYLIuYOKmG> zP(`R^FG;$#4g5onW%qA!^&sw|R)MEX1exTiND$j3e`GB!jY-7yb zM66n?aO}~)wxp1Whm`h`#9aFV=9-APdK6=>hK7UlPK^9L_#4LGhe>{+-l}X;X=(i3 zP8y{X`%h!ddl-sZ5QK%(<+DBiZr0fKaNF-$|&L zIDb!49|G!al2}w4fA<$!)A@TfEj8by%us%-_#qmMP7Ld(?cZ{+@_7tbS~c8;r0_RE zA0J7(Gx_H?K_7=xYOJs321c`Yxd}E-Gee-hW*)8W|7`1gmNoUYD@DWUI*%~TzOUII z{S;T!Uzfchp;gXWYO1K0_}`uE6k69=#pXOT&DtsG>o2wpp5bKJUXoBL?eH zOong4q}utD)6g?<{hWnrs@u+qC8hEAI6*aCKi>!WOn-`O3$_KQb0*zPb}@Tf;WUdA z>E~0FPX`u7HQYydar)US=4jYdd$yPfKsA$J`Ax)F8rh9g($+EXKr7J}iX?cX;o(JA zO*>T%2HN!+N8Sv?GwR+1s64-aQ*;Xk9gJnOYw#cU`)iUPdywv4Czno{PUj~ZG# z>7H?+Wqwx`_^%`8mniVt2#o~eDbvtHarw?l@WlVb!czG!ZICYC_W>r;pHjo|v^suB zz9)l&uIukplurj1$MRoz>XiRL8UFY9()`Dd>V*HOwj%z^>k3C#rPu@3_Y^F=>`M)p z^At2#D+Pc3l*;4^`M`A9R0!Ncr^3lUnyZNa5=`+wv7}V~OIvipf52q=Q)DFn{IL92 zl&(UkA}0UJr$bJ#4syEu!-`+`1tBWn6(`!%= z(M(xp&Zp3f(1<#a!$6ct?5P`Bo?Rku(q($z=ye3@*{z?_OmYY|uDykzFcTf_}{^ z#|dG|lv&H!zeuvvWp%?ASaJa+b!_Tf?O$;(&OQU1Dm5eGY#y|ky_A%iT}@IKsCDcA zehqDN2oQ`?6U}@e%KqOQavG8gQ+a^PDGLORYb`&+#dLt)yfX<(Xr783DjP)W?LVpW z*FL(M`TDMjDNQ)w)Q7D4`S%0-b)fz32IOB6-@rlJ3bmGl_qpNpzekXkH4Yo$g*F~?jN6VKw<0`KbPtIXKP*6z}-J?+9EromJ(*I zZ0WPk8Fz~%d%^keT&CW^(o}PiExne~YbgCkh2xKJ`{L!_@vHRm|B5OO%dZK_A4mC_ zx#F=~8oQwNIS6PMi2M)MrseZ>dQABPZ-tD7=U7n9;3t^0m(Cp8rbssQZA|$sCg(z{ z4vP}QT5GWn=keqUO_reoW@mnNoFPOn5T{z73wZS)ijI%W75Hr(BkTvQ&lTPd*4s~# zXAh|ttN&&f;EpxFkk;^_%u(vM(~xx&8$DhJ@_i1{# zf?BG$26Z1Rt63k9A}noHi3@Avl1%Ul&J2fxnf$`_Y*$zQFE1zZdAq1P$4*orkloBL zvTMOwe6gC4cNg=BiLjcq?k zVqW$|M+dvznC~F~DBLzzhAqlfEWSvg=@*i_bCL5(R0+w>eqO0)x&VPLTtfV+a3cp> zoFx|@3_L9?*qG!9b8M@9X06JR4jX2TYFfsW1Ps`4c7OO8Rz^mNykc|S&j(QEGn~ne zH6a8}(X$So2?G02oAT1iy2eG}Z05TU2L2-%qwHrIfo>+hs0ueplbAQ0KUocuBCWq`i<1)5o!CnGZI*@>6|YHlx9RMCbV186y{%lM=CY8C;j^L zp4Gy8P(AUy6DVotU%x5Yvn7?2anNK)b{`fqn16kT{B@Z5HTBa{D~=)a^RHKfDRYJr zQKKh8LVU#0?yu0YVzjBk(q-~f*0tyPO~fYIJw}j;W(V&YE`qr=VUU&Eo_b_&xwi7C(1L2O0y+DqzSnD{pT~9^uAi zXBC&8fDe1V-vX(HlriZoz^_sKDR#c!`g(T0T1p}Rfc4-crHSCA=zPD@`JIcz<2NBM z@&9!I$5eORy7PNzSZ|cFZ`;w9OZ_KU!b$zhcQQ9RF7$$^fS}4;uHP}I$?8KI z8yMzoq#r9!I8}D@D4@nPpKBpvCUS*iHS9XbcsO$O3uFw+w^MZ#uDv8V8TJLrsy=|L zH-iyk6@Tg&2UYPj=DYcgqtv4StA7$)*pJ6eO*IX8(E(fsN_kL?BCipTR@Xtw7NiGO2G zrVb-m1mOX_NXl z$NK)3mQZweXwor`#?a{nnPSxn38G7}K^0MldT0#lRj zi8V>X^j>FD@AU0wE<-@fhN{xvLrhQ7vybGXgXx2*P^ESkW4dVk&K!kd$>d-D6zI-! zoeigm6GOme5p9!00gQXQ>DD{J2oNzLj0@l&J&2@d*K1i(1+0P@h-=!Ge$9^SOlVzz zL*2-RY+y3Sv@(8a*Uo0>$*ty#TnEJJA+D5L+k59T7c9_?+ zdn*+?P5QTNmo9BIcCYXtttM%7`$?DSKh zk^GFVivT0%|Ez|s&URw#VoT!Np=37P2SJJ{>-M)YN6F`K9JW(_59W8yZ^{#dexT&@ z>a8WS*ncc8Pq5qWT|@RTdIAsBkD9?x-+|)SO!rGH?;5+U-gSDFI@_wd7G~y3PuqDs zp55o4l0-_edwDs#e+OtIc8@Sup_&4Aw?bx#c7LB?8oN)E=qIv!0YEYPQbfS+U7a;F zm%{eW*nO!IZs0#YnvZ>a7t-A;nw9roe+@P>`R<>ruNhnqh`e96|9ZA`)EDi)Zj1#+ zK@|00Zy!&FUF*O8RiBo1Ad9Sw|!BsA+!6p0*_c_23GzuvDt1p?GVg5>+J zH{;{-%nJB#B#lVh-igJB{Ezk$vkr>YFX=IiM}Uvnlagco*Sq!Xd#VkkW#DtA#uh7c zyIv;hzqUyq^ted{zJ}RdjECUNxV+&24dcA|Fu4#baDUnC@doV*CVAiV*S?x zL}yy~?uo^DCs5M%Uw1f_?^61&lW;oBN4Ua)EWO3On7OaQ`Kc9$aK8V#hcfuH5>ca# zNmSH-Em}Vz^?=f=tWH#gR>4a@f8`YA z(}Aa|8ty|<;yJ;5^JW@u&H;Iu{AcgOyp;Lo(@rC`i#OO6Nu#`zqsp8r1D-a?yutJy z$LrLdFaVw)!p{r{U)nOYZaYBM{cUZHmbPi|jkax~-ehX!A+g`jU?b&mp)-f(Ww;a<7oDg1Rh{?r2*Ata z_k9o6Ih8KiwNTQC! zEe_6m8U}{o?|^{6gS_T>^|++M61Z&T>D$trLDA*%bUa8juVBH#^PZMCb7uOgpID(y zkvwgse6Cee^ zVzcKjE&nRddwK0!$s#kZK_tM3-U$BR3=JD?Ob+XDwi+C7_#N@MY!Q{ z9-l?=qTtqv@qH6Xb zX`r>k`9bO~mCO%o1iFlW6zDsQ#vp|e0d*t5!eB4`QE)*0FF)0n_x0u;=ppdZWAOa8^dr1m@+81c~aS=H5D5e5V!;xm0ScXgiN0T`kXFkyK zheL9a{d`4oG?Dm_2#_ig!~@Sb_|Cp0R}?;c`ij^=f$>l*moivmUz``xqg_w9PxKcb3| z+tBg833{k;2bScXK>58zzw@O>QXKE+z>GP>mK3a~-B0}tl1&2z>6M4zeqra8%HkVJ zM4gT&kt29X|K@r?!}+&wq|3 z6|(nI7Qay<$o?+mk5r@XO7XrWCN`75{3{$v&-fa4naoFGKW%{s^5)$Zd`rQd(^PykX8NnwtSZdJDk4 z?{7|19-Ar=3^;{E#SA#|h{G}~q*yUZ%emlcH2QclRMnYVyfAw27BOdM1RoN}o$2f#)vPe&wGApvRD_HQGdlRh|hI)F8A6aDDe zSfXy1w;rWuFYrvI*?4`027|kz0pC*j-V^D_46maUIT>8*%TCu zC!NGk;Vv%)mRk8pzGo|sU6lwHT}q;2`7W_<4}qLae$RiV(|;NIC{F)ra@X9!c#(bU z&J%@?5Rz*1Z~2tzs`kJUoi!vdbxZG{*m6_%Gpri{@FQ@r<4hPrI8^O@=dj|u6PREF z;M>a@r!e7cgvfkI8-f6MhkRx@u#X~vopHbiz;{;$vy_M$bth4r110p|6H5sGUrML{ zDBPtu{kMQ0;J;e<_Y5#+sb=XEUwUX~6%Qz0eJ2H#Q@nsZgW|6tc5#X?p-8oFa!KB4 z6rUy4ltS@pM8~vHPC)UqxEIBrhTC1C_yQnh_EjQkbRCHzikHy)Nq~~cAM|26y$kqB zaeCh=Em`i*RUu)Wko>qqI;(j=@`1@DpNPvI`V%Li|Kfxn7^9H zUxv7t4Jj?=Pkid^iaAVD+!@-vKQT;s%vU04ulQy}d);aNJv=ZY&_wM2^N;m4*GH;? zer+&iQ7!|Jt`VfU*6NCx-yu*t)eb~MWzJxxnV_6Zc*TlAkTIZYXB z?E$;YCAO%JAG-e3WPT~x5{0T#YY&NhH+CUFG6yIT)cq}qip4La-xAoJ$&YwGM!!F( zN0>&xq2?x}F1-c;{gx)v@6UM9{oDILTe!j8K{0UxUgveiz9so55YP_IoDGrE2>2A> zH9zuY1zpnJ`7IJ&4(8Gu010n&PHn3)sZt_HIGRLp5_T;Z)fwjeHz_D= z1<6M7r~d6hFy$jT-H`BLfR$eP2!%tG#luPj2{$8AK*A7zBM=+(|B{Gy<_|;2JPm(C z%y&zgRx&Ha%Wq>c;dY@eA{^ttDC0Q%w(y!L|D)ojrBUuv$$1Lpehh@o<2b}n&dcu{ zeu;9m;HfK=>#i&=QX(k#PnZx;&Rs+L)gm?Mzr}I-$&pT@-%#_fp`Fzr0N$^Y>GvVJ z)ywZ&cy4h5{=-Yk`6c-$5YWo+gS0S>fZsqkOsg*|kl$Z9D`lR6hnEqH`*+L?dZQ}E&ax1n+U2;W|Nrl96SFjG_-XukUwSdZvk&TO6~_EnKsAK z;?uu|M=~EJ&@VUfgF&N^9r}R`3O6z{{SFcgfZsUYO>{b@Cj8D@-rKOITimWNh8^YY>Xyb zj#T|ui7F+t?~;WHX8 zjHhW>jo8p9Tue6fcjo*h|~>DCXI0fjMT;b}fE#&TZ>Qqin<1@mlEihNPlGTtu_-Z2w}J z)PS#1!XB=DLAALJ>lJLoF^zl6f8oDEgD^0D-S+FC(~MD4IPw5V1CVrvkLBnB&i{Do zzl)E@(PwddT!LW7@o}~cMhQNS*E1K<6ifn$K=$5(jNoBm5`)OGg4tR@Q#9lh zqw=W``GgQsxZhF%_p1abOC`eap*gX@G@$%Z3ML&W>jH3dE+u!Y=R9G>Qw5aA;d?z7<2WpE7!Mc)E-=Rpfz%FDyn?o_sxK}fjSV|Wk)*@}&Z7}# zHO3(p4=Bi1z=r)4GD~d%lIr6DgOta8&cK*&kf@jeCG_7D>qPwP@pSr+!f3?lzlHeW z2G-2hdr+rzqA%aOvnmHfx2KaxRTdL{72cKNy#+)!PVZ$jN%U?h$vlnTvjvP4diOwp z%q0{Qi}z0Ar|7*{0kba9`w5U`hA0u0x`9N+^mh9n#H=Q5(V~#vV0a~#!OR9foPYcn zO45(hxbwJWZ@m%ab4?1g=eAI=HS#CevQ9Kep{^VGm3)cUkmUwIp z`9M&hYAhd|RE-3n+R)Coq@vRG^ReLFybQfMJ`Y1XKi0Fyd8X37ONA;m8Z}Wg9Q6M& z_bzaDO?Ur)LPT0aQld>s2?l91NC<*pRFFzqgF#A=wxJ-*7|-v-^;q} zwcYrj^WmOs@NFgqz$j)ZI|aPrdIi z`@#Ksy&NV?@JyKKkCSvXo8i_yYv*xZz}f{Qa80aj=O?TcTRD5!U8wc(z!VnIDOl#} zXnu}wcEfc`+~(YtyY539q?|)m{qeKa{pw+U2Z^AoUI5hBk!w_S8>aW;Y_QIl_=4K1 zAV2%?v(f$RWmZ?bpneGQgUW+SzAOJ@F*N4$gow{WNOs|70iQpB4KbfDmYtnt?F9^3 zNs+?Dan|!Udm7N(7%*P{|@bXGs#DS2!BM=UL=24TO~upC=KkiedeN)kx#> zk9V_tc9pUoqhP4=0+hq7L*WTNpQf}=75`J(0kREc8ePg~_r4gsGitee`O?#Qx!N@r z&&d84Im=kpjf;c1UCj0`w2IkifygTj@Z^=rWo6(Z?n4ol>2~*K7L2atOm=vbKaSSX zWVVYL-O70Zqc0)BGrEnRc{@B-%%=H}dkz%PqdkV=aY^siCAv}SG zD!?+*-QQ2wxTnC_-YSntWuy14Z{rnh@G94v6w7-7L#N-;3f9zxwA zdjUp{$TLz&2}V}o-aAfG#A(OK8_Z|KM-lPf)o?ncUA$wcv;gsJkLZzIxI2AbeFIo* zF2vfn_mmc!W_2jgfY%@%gzK_R`5w_TJMoyGDJvDco~+LAm*d~#xQ|S3>;wM?{OdXF z|A2oZMXDL@L-Mhy^gVx>%2vVcqJRmqNoI(AbAp9K^nl7)I1q~wv+%U-%Oe&up(Wdp z%EB`VY71C+B2i<+!eaneAb%%VS{e)YgdcI)rRHxIls0`;tljNpu~}VlH7q;~N>$Fn zpXxCWD@3P)gUIT~%U_@aY$l*SncVXG*X;4a`08FHjecRE-Xw6-H~%KSN*%u3+iW(R z%Vo#S=S3m~(S%m-K#eqg!}o@U5MyIapb8|Heg3!UFG{kt6YM~(gbO_d&TmNHSNe}F+@_+H#!T^stb{`0={{?ZV9m|O;J zw*A#k1%ajg)%j|Ede`Z?ZLM@tHX(_+P9`t?=~a$|UVRG>EbXuENxww>)m^Y>NqG;EUemN^h88|N zP2OwC%B1vHkAyVlMI2`FA>Ig>|<0%jY0UF1cC zkBWM1>rhbvHrO{NzL1ti1($k}pTSu32)@c~X0#Z4tMLx$-+CJEE7K%c;oq28Ll0_H zR;suRc0{aj?+bhIk^cl-i3TGC7e9ydQ(WMu`hCKapCkF%?tUtYewd$x;*A`LtrZ4n zP(V*vN|L!7rO)JcT0lrNzIu{XEk`|WSmyGGP0RTSDf;zsNg_%W;kj0-5V+6KuZ5O@ zW|Gw0e*?%}foe0k!EI8Q9M<1;D=Q^26*1dGD2}xk0REgj@JdQ}yivHh&a;vKF4&s_ z{RamL^_iMbNdesLq@D1HDij#iKN!B6r-74rSDKAmTqePLS?@SQL*}P?BNy+-P^) z2y1%*^zv7k+?e@a3wl9*x*TO?kRS5H5ESu5ubokgh2N6>AQkc>7}EgqLttAH@_hh7 z5`A}YsGI=g_I=4sWP8&05I7#5nz!*F9&)#i&b;J&irG%{4doTxzc0DD9@eI;RC5&s z$lLeX4s8L6ifSJlsoYAXx~)}K>8Cf}@v8(gU9NTx`17&kc-Z}{U}_eelFEx+z1u-RbdD&Aq7J) zl!J~necVxlb@B|RMsaqz%~-gxq0S83(n_?AoWNoaAzxrG;0p%NLz-@sO-Tt~FoKYu z%~_e4C z;mYhVllKQ-LSXX;Wu=BsL6`h~Jch=I8|b{c_kgj|rD)fq|C}O7P{VJciP#sS%B9$; zOz!*hz6q*`BHxm)t&Ae;Y+hcPF-$Gl@u?J9pI(aHug9zfL6iLW%q!_N)uE!&`0<=9 zPdY!gfZFCN993(_x-MRr#fkXw7GPJ-k5?hB`MI*fk6Xnzw;w+UCir1jd;F*TfDno~ z6xv}ZLe>N=_au}hq3Gp~hy(d4TVW~1A`tP3Bx^Ig>GAZkw!@hbZ$iv)Y+@!i|7JRt z-Vkxr@(mdwv1Enf_R3gdHZ3pH7_63TYbs0L#n0ew3|+@0MV5I5Z48zZpoonyZj-@D zr^xD1+nh>)QG{_kzr~P8fx7~R_y}Vmv^6^_D-1c0tdXN}N6#1JI!|(S-%T)g!TC&X z$GHgKeV?a=x89KdK<-V9x{r-`v_BEUjdzz29 z42(YFGMLpzeJ|Aadj^_=6a;E3N4;65jon$8q&-T1ph%$`UvoIXd^V`Ocw(`)WEZ7^ zy&3~8ESb!hPhc8wPox#LWgTAM&JUIpfZO&QbFv@laQEK~oX*2Hv2=1RXA&};NpdfM zUIl>rp5tSX(Cn?O0Dc8o3jufgF`$O+4(~P&K9~U9<(!$jp)>uz8?AkR*j=BggV$a6 z)l)LHk|<{6^<3{JiU^J%*YYdtzt4KD3}SLux4|@x03rE`VoN-ISMqwI84eM;{yyuv z3KuH-KI`)vS&54BKw=M}$iiMAvRIYph>{YK#j7}rqVFd@Mv_q54EkWFL>zM$ea(nH zH+fw=Fj$8Gd+dJoXUMdq@6_;jsA%@5B;`hwNNnQ_s!ZU59Fz2jRBOwvEztlMR1w?EA7mJlR2ku@Qt9_u zzXcJPwHw?*YzxEehkUJvN$hF=hP1Kl$2KR*(cC!DO0HcstYANenp+XLb5+WcAj_@9 zHU9go*Yi^(`BLck?L7V@gXvAonO=Xl4wfAtiCwWUK?W&(Z-!f8oT;JY#P?Y*SK0v8 z298XnHgKfNmco&O6${<_KI;*XpZ#C2`>*olC&Kb;@Z|;N_Z=xkzbW5my-g}umNHXB zncr_vUXU?RE!hdTBL5&|PGuCL$i7S_Nz};+G*6vFFs>1G#(0H)|NEgj1gPV_|14{i zPMwqBp?MEKn6xjyP~yuZiaW!pP6gb-zGys)K%gA?}*uq|m-Nfiu1 z&dP(QED3#ofbj&P#d|Pk@&XY+Hwri;c0jP406;ccoFPk)J~;6lj5Bo<7)6Vt_$`{u z);L!AVF@2Co~y@vOIfMlFUac0ql)ml7>;Ff-@CedcwL2gO5*j4O{kg+JO!Ju5@d@0 zK+qg7H&7X5YkseMBV*z5{y-{Z>)@z?Y)xYCBxD~;=a9&i-wl-$fXt33JtZrU4%wzC zsJWcDw{ScuONxM70SXlW=f{(tgG{DQS*hk6vPMx~@cV4fLqYbxYa_mQfjcjs)+yx1 z2;~EN>z(88b1HH$uVN-N7>Z(!R7qEC%(qv54_jW{#@9`duCNK7>l4YlpEQF(kf(sM3VR%-a3_!7i-OE{Xz-7x#> z;JX?4B=KFL^8~)n2tPl#8%05amjF6~c%DLvZhd+0x8Er*jabYr*@>wTpNKPX`a2mD zlf?Hhh0REB^rxN@# z3I|dvk4pLV6pz6Jv$rx)sVm47c|vz=DtzDN3E0E_7ni2XzY|N4l>be#{9W$%a;Yp$ zItqub(^W$v6dn$v@Eg1ryN?`AG?)b8U~fXcT)Xhu0TAqdFFUA;DIlEP4*{-0InnMT zpD4}-f&Jlemk9X%UVfqnEK(*A7)Yih0!?p?W&eWOtUmcthrwh2&x;X0grYtDD8|Mh z)0vU_u>?=CXID|IvT#q93r&m0=Do_Ys^i$0-wadW1}oNQ>>s7Z4$nJLRHuK5vNSOY-?8&Wr8HK9XrVgD+Xj z*1F7MxS_XGkoe-L6XHIUOp&;?|9HXsaxL(L^=}uZ%ljtmT~gkYWur^iA3m#gr;B{^ zZ+BI5CkO@-!Xh7m%Z%3_4#9pU1-~I|QZsYkDl8!&cx%5u6E{)>zaQFS9;J_vSbuo0 zx(vvEJ=oi&?7jV7OV2n{nW)sOV33!+el@xMPqcW}^uxAfa;+Dzp4Rpo-8$NxoxK>E zWnpc0deQe1_ckx}vGu5>4YfggVYfkP%}cU2VK3gn`B{7M27x(Jj91Y<=XLXxqNfL> zXzj%vs$|M4+HXMxGsBh?*^84nlMw#~k_a)S@_Bo)7`m7}lnEhTN~TDN$S-eymjZnz zSNEr|{e2KesxtqqZfv^!&8}&y6kEtb6Gj`Y{auuoXVU&&fgiK`d0dDUO4{Euyz*^T z^z<}&9wqg$W!YGWxTrb)_`}^W+YGlQMe^K&Gqvqhcvgx1JxPyvPZ2Zn{0W&7@|=!8 z(=2=|ftWc3W5M?yTCF{pMmM>Z9M+}j69DdICvyq5%bSE6)nXjZVzqW`=se;qPSobNK&D)y^8Ke*p@;dovVzO4WGxx5 zYNPe$U$BYqKb~j%PkvZqCKnL-AdN9HD`C5ocq=>B2xNYpqWjQ_`6outBW5Qvug4#& zGc3$F4&oShgukt>MP=hw`}MMo6>|`lJ%oC|_5uiwm`^QS4_HYFyL2$#l;sNzTl6qv z2<^Zx9&X&lTY(j;hssLBZ++RZ^s%d>p`LjSPtxiax|(M>5q>r5MwTCM^y?ir(sM3V zR`7dId`Q}-p%^aq|85D}rvosd1TMXhL=Tm`n&Rp-J2lFzSdGhxNG@!r8X^(vL%jLU z&qu_fcu>i=6JcSOF!+7*GDry)ircfdJo_Fz(eg8Go7p|}mh`(D^?ztTMpxr%V+ zpZF82#mM>3&cBA^@lodFo?X>5@&|Lnay}P}M{sx+1~MsQ&8Mh28y7u0AY&UG9wJ+n zBIBFT1G5F?M8V+({1$(XhlX7W*mrM^)dSv86itnGB~wzs*dM=sZ=PuB=E$)Qau7>N zG2<~(ncT}~TT`@v6fSDTR=aNhkG}W4uS{)n`b!^sLNO}(-uElY?3d}p-$u~-&>D;T z&m_oli)?tun>nwhuRfRJx6uv^hdqRBnZ1B5dp#mR$*0rK?k`&wz2D7-Cw%{#Bti6~ zpt5sbeE(Y}+0*k7Z$ZBLGE|2EOYC&%Fj>U(G2GK3oB3iri1KOtg7yyU)WcRPBBq+7$r>rW z(|3<0dMuS@8QdULUlW~?X0s;*_hKuhoY10KG`CuA5J$b9Q!mvV_xa~vwP^dUCE4Ek>?O_C zw82X*a2d)`Q>@ycscJRSIl58BdcT>LK7LC6+Bo;iF1Ny;T5{KL9NlGZ`{`84gJxJ3 z`DrCL`;=KEn`sAi{I@iB_zc1Y_x>6yZpkszK49egZszBBWI7R-%QEo7zxfF^4VZL? z*;ST<-MrrT)IBifQhqlt`{~rpWxa}a^Agmjz(}(Scf_)+om`~)rlNNU^kaxrTF3mS zQ-7?i_xYRG=Vnoc_(tC^)5<8&(Z(YRKE(^FTLqSF{3LZ8se+ZC6V$)pbfLRRZ)Vh( z_0gD2?!pe?0v^gcdEp2{=gi`>;z$mkRMRZI6Z8lAWIl>GOny(f8*bBV0iSGNLNy^qF+qf#G*v3eWvlJd3mz%;mO1~!`(2{? zsX#9yfmpOHS(A!tPrNiABm0zCAqnW4(j3I~r(ih&Ky9~Yk<>XI(DhdVL@T`v|1mN% zLp=`gd>L@cwFW%>`jx*yGV>E}72xwc6p|7J|AgK}?p+O^e1S=Oljr0hjC9 z2Xid{mqx)#B?KOLC-oGxxbB2~JRA3=F)<&noJ_wmDgMdc`m9$}MnLepOMqjLtgT02 zX5z|^5Z4B_ypFA$U96JxB-=y{sHHXmiTjy}@p{Dbiae3{He`wdBX>mpQ*3{80_rpa zDVoFZh?(3=r&#kgsBvL+BdK=wlRFe5paBzU@X)9eI*saph$5R_W-IKxA9&HGl7qMq z1Eg43Rljtl1!qE*2LX=Th4jsyY;MeJMSGhUgq6pT1WKHjNEEgonkF2R1OBiZb?D~y& zgiMYIOHCmJvtL%bUFU9nf$eRbAd6mKUDGlaUteYSku^t>*1~(`3P*UVUJR(>8GV#b zmhMXRU@H80n!v3yUkLj$OIR^O?Gz>zJSRfgcfj*eN-u_KI-7WZ_%qBxi+`WlV{N1( z$h9?CSO9Tr0UwmYMn#u`z8+THoQw19__ou;)A=pPUj)T+P0Cxq$KjvZM_H-hm1K>8 z_xkAYANF(+10I6DncQA~WH4xt^T8ju#Up?5| zhT%yP;%c2I2=PmK1j1Ai0PwJYz0A*)MrKQrBpK+nKpJzd2G6=hh_~M979kd;65@T_ zU}qEiAF4A|$cJTK&l-berSai5S&DQ%to|NAo=Tz7>mbMTTYPvFa4YA-LO5!6R95(K z9$8EHpuU&ytwIWEzqt#7^Zw5?YY)_isC0ec{a7`Rxm_RlKRS1q+x3CJRS@WOO*-FO zMMpyu^ncX6m!wqPS|s?bw=KkNJPdi-L#fY=#(9_r$wRDr6P~vpu9mHUza^`b7(xHU zYtvP4g~~~T_ZhV#Dz#MItKS8eD;dFvUJ$*2-)es+SOKqXMp|p~>i!CpGvkyMc;}Hd zxZd+$5S00kle&%PJJ_rwp0DaWf#(&90bQ^g<7ka!KaVn2q{{T6t=)rkht80wi-znY z;W9CD7XnZuRWg{kD{dM{cfhVBA>D#Oa!KrOZ-&YVK)RQp>7jN+rItdvKRh&#GmzjR zZME|S_mZH@>uG+VJgyhJZ!oN(XEiD-wOj_ik=hpEYu3RYW^(5?0IVNaneSezDOk4a zM~iqn&uaG{5H4u-$}9Civj*NL3G_ocPXK*~`W(CfScr0!*;VP?($>Q|LLyIvwjmYT zU*ed7_ApFJ6527ySwef?gvtp(YsU{ZmkmjW_9R4R-Xr#G?O4Zo=hrzAtRo><1+e=0 zuC4W)zbGp;>`7L?-V@>q8qBb)vDp!NW^#8OZ}oK!DL^#=g6sWD8-Ce%%$kV#(aeFy z9!?~MD2Rs&G084(GjZn3eIu016#EFkB`TmB&}r>=SkN~9CnEA?bgE2 z6aQtUZzF^X*7nZLqpU0mn>-zN3(6;AXh6Avn%mYLh3q85wS(mZP*$LBc9&U6U-j7v zHO*QS=oQ%N?`L=e*obag2<$Il9wX3-gC#FpJRI|3cR$!Mj`+acN#oge4Z9^IbrXNrXg}e4T zUEKOSG)hV+;3jy^&OezAAP3OIBN^ZJR_nIcLgfUYvAx^ZrLO7Yd)p%kGmqXt$41V7 zUaOt~uzZe;fyLG~tu^^@_Z{eBj!{-*`2bl1^QoUgtxWEZldbXz6`W^9^Qlh}40^Qe zeCm7H6pxjiPrX&wquvTt|HVJ4dgYU-zg%uxDsmtwdkEoXFKB2YkDKLFerVztd|~IA z(fr9_()q+(>KJdG-v39axBz@E{sEt;R)K-Sli-|rkKn&G8l8AuJpqI;Qa!qg*ZXX( z=ln%k!LKJ-!7tJOG`qpQOz!?;y5$f5f?$jN;r8fJ(jR`P^F%!RhV(4t4@Ki{`NQn1 zRsd}dR(lA+YA*omqCBihN~q(XaD-UOEEWMgCn8)M>70y|B(2SY<)qpSKfE51 znN27#_J{pB5uQ`jqbuJ{@$u{tdd>@qG^yb*vWETPui<1S_pe`n9e=o;q8?{UZ9ID* z+UF6q@$7o?4MqO&EA9#ULwTKU#k1GFY{fDU$PlYNgiy8@K>71L%1TP0JQqg@%GY5_ zKskEP=$d<9VG2fS2U1{vy-coNrc3#cnG=($B z9|!12nd(xl}SBCpoS+=MB3R-@G-1_HHG9T%i zuwRJaO+N~Y_OI-v4h5ne4_%;1mo3Tr=bz~@k0^4bW}A=|d!V`0B7PhK+cUZ6f7NY% z+)rZz{MZL;=WT=K$BsHr@Z)1y!y=y^3DT)3v$(jQGTy26eo;>`l zGOPWa2X>1kb>dYS*~XZ)WT&RGj7_`szp{CSR2dFpcoda3{xMoOmp&7*-VeQLRqQVM^MID6!-0uVUHkLYa{#@zub2x%tnwt zle^*wX5~9Hq^u9>}v>#xglv&*Vn* zu{*2}m+WVQ>KSCwMh8rKcuHx5HBTwaIVk0yYvw$|GrRTavyYx5N97;fnVV+Cw zYkq;B!v1`O!-Q1oG9nDB*~_f(TAmFS74TSg7p(hUpknDC)maCPFwK^5Ia$JPBvDFX!R|RJ0p=7SRIQX*omzC3ci?>J}xQd#b$j z5|kKpPg;QTGVTUSJVy z^<499H-W@UNW?TK*3D0od(*OjL}QSU_dRuPnCK*#$)=YzRN|PS*;5QcrMu zMv}#Dg@pZZ3oEc$64v#4eueYs4aeZy%(YKi@wTfie$qM=(c0@owBz!GQBuOA+(hi& z+0=QhKH;YKX)aSNPnF&m7Y&s}Ro1vp2qj?QCnB>p?N_?C|@u!;VRA7!O z;m;4zfK2W$2Y-Y7`8!rO$)As&rS@NgKWEB{`%pJBJr)lwcO7Bl$IWz0r4%|=CTNq# z%8N9{tR=f3l|qAw(WCu>I6)J-XB>vs2TX8Xs2cOe)1)Ov-{e2NYqFzn7s}qUVk}`W+zYXO zRnPXi-mMY0?H;kMpQnmdFydzNXe&#?VC+nMt6F{jA^Ib%_;EyF=X`LT~7gHqgjh`AEXAV6W0+&Qew? z_z#ee0#kQP!5b^H|uk#~jJ&mfBr-6Upa1A}IQCX?xGUyP|z|EHtAzCX}`hM}}7>P`7`p;~zpxy<2 z#jp2%@tZPr$yh*>q1gF~-Y-4{hXxgDU{>_Xo_R#UMcJ2%e=YWGowGt~qa8@z9zy28 zUceko%X2|V33ISL0kZwRL+4fa`z;kNhplkUH4Ovn_G3BnE&o6pWco6?+0x2TB#9IvWF0s_5xVW z&10#g1eV7zER>l4#=r=`2)i-Dl-j@F9x5(?otyub(Mf-o_#}8}-oumR=f63TP)0%} zK(3|4q7?e`aZ+al+9Oz4 z0CMZkx0P*4H|I^z)qDxXBY*yHehcw^HP2Jtkb8eVQjfVyS*c)OvPOoyD}TNt90vc< zUkiUcNCEyjPvXyc3I0m!+MmyrN$eH{j=itEykZ<$vi%pPg>=W!2f?2oiy0~+z#+(0 z^8IRIsHgw|tUn(ngOpBy(_xeO0rXL=>-=c|d9IWt^eWtoqHQVIfAfXgdKnRqnpmqJsLfo=;Y# z!d^Y)5b=+>*UE7zwFL}&2w4bw0ShrYPY)#}EW`nHSfIxk*|5UNi4n+Ka$kNAMY%G% znkxUk%t(5_U3@lg;B}&@@)!6m!!Qhf!iX+gf-CTbVKY7E3T35cAHfXA2)F+)=qh%L zy761WZ}$HkWr;9{3$|xm-L%x46+K2^aGt8Z5fE^-Sp>xLo+k<~b z=CEk}%wI_Z?UME2zrUS(yVaXN>TWBWd8%Rsz4^vG%F2>Jc?#VUP(BJvmF&eIr11#^ z`Ne`zIROh`=byHbX-e;!Qn78NrvbLjvN4Gfnm=eH04>EJTm_l9W6LUx>k8y{^%XH!L0(kg1G#HJbcQMP|c?ptYUMYM`bh;0r@=| zNI*Kj43?7y-z+sEJs^JrE|>`v7=1slp5KCRu6nEG<8cq)6?)9~m6Zw}M^@tT;{Mc) z&@Yqw(eB;G?^di!62Gf-p1|)H_fysI{!G`4>Xk#d=KS)Fj)mEht+_7_v5FvE2j>Zd zYhnYE5PmG(Ktfl_?w1HR0HKYqo>C*yA>8y%WPdrMu7&$EWl0f`D?p{Rtit!iu&>eg zFCdqxQ&y@uhpdJ6waA}7FGbBAvg_CJr_W$tlJ@liV$Y;~ZBi^+-1^ghNX64_ z?oeoIZeo-q=}#}?TzJ199Dgst+xycuV4|6ztl+(Xta-eP^;hFpncT6vbX$KPAY4r9 z@2xsd=Yo;FWHoMhF|}ORY~gZE~vDG zwBnXfaRDUlI<56(QPOqyXy|8NV>Ey*&A+9A|HbAR&V=I-ak<1sl;G&sY5hPCx9Td5Bbke)bFVj$2xQN|CDcVEXtPbvQ!+; z$8lnO&z6x$_|HEgP)TRgU^xK*ZTM3!1Cb8kb&$^tqQJ<1_UE_Yo1(^-htK=ZBlMUT z@D{4zFtP^x=dlow_-Ds%<98emB8lHH{7Di&8xMy4=MEaEsvN>4bIUh67G_Jf4?6>? zPlZ=0QpLX!R0H7>EJG5)yO4||bY!rc0EE_mo-YHD4&nd82XiT8UU>T zNbf%{)WiBIE7hDt*2tb_2{2#jk&8}S?Kvx-Um0N2+<9%E_=foUIG1M6Yx@PRyEJ#* zJxqn<^Sd;6eNZ63OLNx;2d=v`cYSE!x=VA{M+C0BG!SnLU7EXI7r5@y z-1TvR>n_b*pAfk2(%kh)f$J{KU7s4b?$X@#X@To5&0cS8#N*go^7ve0)7*KZx;4eu zZJNI>H7UMs)BN>I0@rPtzup$OZqxkrIf3go&0n7vxNg(@bq%%@*UzT49@zBO0CNN8 zB$L~6JJy40i%lk`*1)_rEgEoGuIa6T*rWm32GZF!%uKhpe!lIRJgh+yW$0<4ov=okr8jW7>`qvtBWqXq9gh{bDSZ#RW3MGp^HIqRryx`|4pd{5%5zhIUANI{A30ZryqZOf_mlv;Uu2L5{z z_v#rtC=-=BmrRiKxhe1Rw{mH8JdTtv}t+Gi(qW{v^M}>p*4F`&y4_!x&GNak}qIL?^zFCbCCrb9s zdK}l+*~BB^#6)vHVR}T2+mOB5cWAJv06px)+&RD(7i@W5uvOYCu*{63%y>QU7)9lX z?HV92*CJTrC+05HbNVSOHJn7&h$R*A<19Fz$*sA~H^PsdQQ{;&4!xOkr|^~^8*n|r z4~V?dQNvOsriELRF$lWaw+I$#e5G3yx%!&&0*#Ss$+o9b=Xo{V zF<4Z9BG!T&BTEx;vy>wDz&Eo!WqOL3AF3Y!Lo&c!0YiMKdx9SGjzUlvvNu_i_5)pC zh0w7BjT|Cd;@>5S7m3^D{BGexWMJGHNwYV)k;yHtVZhg|7f;v`zog5X6ImX~O7_G} z{>YDoElUr=%_MUMVQQ1iSj7K&yQ`T_DKK5D?&x3@uNznD4idN!)vn)FBkLU zG3WBkW)QVj{2T>VK|A zJl&$mrk9r&XpB@#c6usBen^Czr^tpfik>2UHR1pOcg+qJ6`+XqKOHhS=@i)+YMVRo za*_Y}i}QGnW0v^Bp~Rw}p{ru;Yk&teRECb#z%|6Tvnh;2ayXM+=W9f7ZYTnn zEx$+UH^{&FO+S}W=kX3G4#>lfy zTeM%gUw`agZkps4tWwsLwR^1I2)<2{4Rz+DMhrkcrd9)XG&$g2u>=gHR zTXiz1=6W;0BaJ=vtaS8bSp^;xo`7CQu6SL zs73=|o+5Ryi1V25D`m?Pd|%FaW|6ST^Ihx4kivzt5_}(hA^B!m;{tc|LStale18Gs zJiI;b9O&Kz-}k$*knh`2iskzbT%gwGTt_qbzDNz)Sf<4{W<7PYVc@;w@wG@Vf z-iUb5jV{67rCi9rUc5|a?p+}GzFcRRYjY}+wq#^0Z}lE!Utc7y^7hrf`0=RD9lvc$ z-&8PEv{3kZ#bRV`aobzuH*LpC3f>*?iM<89E2MZAazu{V9hpy)zW{iPs9#y>*N66lvV8w`pbh&)47_I} z2f<2?Rr*=s`q1aF+nxk=edt3vcOhyDN$ z12OD7Y;LI{R$00TOVtAB9`oPQs53*(x8fANLBk$G#IP42#@swHtVZOW;kFbFF@_M1 zLyTDxjwi+dSqH@5-bzvB*N1-MEQBkuRQ30T$#y(7BF5bcgCWLuq!bme4_#f4IaOJy z+20{Z6z`v-n#Ti8ZJe1kVK)804eWZsIlAtg3v2rZF=tTz`=-4wjp4T6H(ks*3|hdW z{2D>~nNx^0fM>>^I^DR1+k91=u7T!EGG!L2mU$QtFcZ(Sjc+9x5!*uuMtcDmU(ds+ zq=bG?rW3(xhneP=*dV;%#0bR$6pJIPPtOgN6bM-CLWdgJo%Bi8Q(&K2g_~fZ!@6#v z!<)`&m>-MEx( z|L>yqi3X}IKmk5Sb_i%h>O}HE6*bhAlEtXkV+!+SA`4ijWA1?TXdcv%#*T& zMVn$}xkrS-ZYc1KNJ9T9$YiO*BZ&e`KSdhdqRcwk^3siAV^@+?%B)YAT|k*$ zvWT8C(tiQcc6O+=0A+0a@U-kuI%PJ4?dA%cQM7>SPy7~R-UR*%7~_Y9UWUfz5M`x; zw~)0Yen{ufQoIY_fA9Z|@#i#*Ymz^|(|LkF=P42^;?Ecv(v-+=x~Q zO2yVDq7f8&LXN9j6nboCd2z=uwq$3eQs`;?Q2~V>xX1!3Q*7?=I(>Msv;c)PpKi{R zjY_A`a``XZR6k{fMw7@I(a7z0JrzXcfXwBnLMHcv zb#0_^2`P3Sif`3nVA?n(+Nf27Ngm9j06YHb{nbQQ9(;|Mm|tR2aV7L&!BtyY=|f(! zYjR$3h*2eqguY7Ye8$>m*;Y1^3qjdKD6Fs-2rHK6(Nj{wXY|MRu$b7t4BT6W&TIdt zUb(J5GgMRnI;;K9%2xOq1-3TZycv17;g&i!cK%|Pb5Z6kFaS1N;j{+jdk?QdC-ZY< z1-4ts>et_a#sb=hPzQlrn|}J4dKay6VX*aAncV*U;;0eOhDVX38|P5)=;0h8IDL@k zK1XbpV&OcZHfn5DjOV|PNEd`c>^ItYz(61#E{K!R4pX>YE_!q@Z*hi|iYSOs?I8rS zy#UO89*jc+B_&|qiatx{*$(E3W@FhS5A%A&E0W^9GeSiLV7B4o+tTOs@NqZjY3{}* z`Mv=A8Q$a^i&(k z&pO1DZqQcaCS05+;rLxMH6noGb`cUN_G5Zl#4^|6y}V_&g9aDs+*x)zXmE!tSJ9xs zaop45ztH2hQPtoZO5dL+=HE4T1FggLG;lI6NyEEk(LXueN_II91sZz@k;PtsEVX&E zC@Ddf9*prs7F`1g=3SXE&zBWm>$1Vp()eWIT{SIc^OGF{{~d$(X|0W#U|QB9SpN+;6;Fw=~r zSWhPRe!vjt+DsGVsDMxYdsHXtSt|+m#i!9^jri15xL)xTv?7!H;#*jn@Os5a-8l|` z9X(7I3+nN<>ler18SxhVxuGiRUy2{nfq(pwI)~OXHtZu)(c?5Or!EO(_{x6Sb4~Z2&OJtRZ{m!TH3i(N(7wK0W};ZmQ)AvIw>?TW0Z~9nW*2oM#zQ3SB zQRNzO@am;0UK_vz0b*iSt4if>lBnE)6;4CtCx4*k>8KogN>>ewP0Db#!aTwk{s)#hJ2=R~I*xQ^4ZCgIljh zJV{8aI&DcN2S@RG-otSzncOSAyNkzB7_KB9o6C>5@4I{D07Y#W-JfG0bLa25oDRwJ zCwJAT2$Fs|A4L@^%9}XBJR~opt`Q_ly?vNg^!NZIyGpk6q^~K8(hFrXr%{ww6mx>} zz2-4>2)Y8~I;ST&cwu%^CIGp}Vk=18`}KIdHrS66Jj}r?flKWFyav;SE=(p=)p*KM zE)ozch9YGI$};r@W1}P+3tvk*qi^zZSgWzCyJOuN?)G{!uPFv?yxME+wHq24!Sghv zD&-rPWq^+JKC+2;RhdqiJ1!04Qa#6Nj^j8EHlK^`uJ#mdYv3xFSu~fdZphUI&F@Js zU66;DP^=f6RZZ zZwmIF4tN=gZH->fZgAexJPxAe8kHYUyB{W+os^Xto=;YfKQL=pq=*LX@HdmIT^;32 z;16KjzMR{|0Z#`}0fM})hm2I`wVu9Ff)8L-=efksUgkQikJn#Ig`?ynx}ZWA6yUMT zR4imM??}@sW5WuWgv!}4GnEZ*651EA;RV?yZ5;uOM5lP5I99t$Iw2vO_B)aKrkmy3lenB}+9I?3c!W%$Sw0Zom^aIN5gJCA>_oR*O@2+KCmtU_!^SV) zSCo}vmVYx185%4xp%f{4&bUSNvoYzufM8rdiANXQ$eXCF2t4iH~1a*8@&fCII=n#WrBC z*2auwaxI_tPU~-(A=uWW)tT^82W2UQz9Flx9%QQxr%a1D*O<3{`Vad-U zXzje(`~{22#H~iSt#7>48-qQHo*fXowNHPL9ZM1W6EMS!rksu)egA3~^<>bW3p7%f z0JX5@dHa;r6M8EXazBDhk!5nnBKy<=Wiq+7KTDVYO?aK8{3px0&ZxGt__5rpJR-=W zkNJeKu?q+`nA%lCBJ$4KtChY1M2>lOe0-xWSP0D)!xM^eBP2=+yL%tQDe z4}rPc`6BZj$lN6Yp84O>17;`_2)qmuNd)5feKs)=Qd974*2G@1{^1jAmD&{?xt)uM z%LdcTdLQQt21RyInJ~`B&Njw(+bf|D^<-8sKh7V=>u9!=L1@zF7_=JVh4F57(=WYi zUNxh=?}hDs?`#)(Ed+I9HOC3uSVvhrE_JP)`FskGrd-yD_Tyfv*U!u$c%qlgw6eQZ6it!>yL=2N6|D0{^e@S1f#JkSf`AiVfy1nG>(L z%e|J&43-z*jvX?;M{1rP?5zd8&6%!XyLN7|ndaQ4Sqdl0wF^b|L!b}BWwWcYB9aTq zDjE_0YhWDW*cjTaL^4ZIbNYWDqs49+67%GZ+y;;}+-meQxsb~D3m1`|U1+-og50#? z4-~l5Ik1bU8s#4}bg9l8oU7-aDWq|(&(e*982xxee(0MUxXwpnv86oemuUNyt;FBACJfpq&p@vZh zxyjrQyrtD_eyH=7=EZvC{mQtLzxjM_2LG4Ud=IJ{3N}k1GWdUJ@fR_`zmf0ceF z*V`;tLcHE)rMnaS3FSQgcDY2O3+;seq!@qFKyN5%qlFs3C(&fLf-=J23aShIM~gk+ zJ`VgfaxkGR{$!}l_SS7#x6qcHnp>|GYc%E%vqp6g-D1{QeR1ncVa>DL%UV4NDYHnT zv?C6lHK!dDf zv^5W>ODlpvFGHZ_KuU?s!#?V50Na&d9mLq1hbxtF1Ap`J+^q4zfPc6FF7p2WdtcK( zOvSY&{lf$o$rbpAv7|-*q1Z08P5Nel?}lHv{HTyb1^fr4z_%0X=7FCj;BO{yjDXjC zih%!#e`Wyq-K62ovH(Hg{{dypp_Jl*Z#k)l8N;ctzXn)?*w1zPeU0*N~CX^j8x;NVBUQ9VX&H2!}XSUHwdk(7=!`oM@?mtdCUL~I%>I}i!6u- z#5(2PO)j=uFV4;V6*8blE3m)D)5$E5Vw}l5Ff$={%q`Mmvjr-W$zA^@9o%?-h}XF;Uij$3)5BUO%#vuL z;GzrbWR@!sas3s`q8>VLATZHpMo6HY^FkJ_Qq^3v>VtHB4O!d@aS4Sqk1JT|7Lj&7 zvb@}5JbB?Gl}LLKYUhcxoeZhBIKoFndh&=+Q2`>k@4v`Cr4#7@SZOr=X^*65t2Sq7~fofMzgr7pAGYjQK}grQkehADTJ!Tv0?YhLJq_M?F^ zuP{I?Gw)+B0k#$+knA3sU5;ktfUagQYs1a#ai4#6_fP@4h<1F5oCk9yZ;Za1>_~`VMO!_H^f&JeC^s zeNI_751snYg06qCxBzscB>cT)F?_88t1&Nza%Np$ZpT_0?pSPAQ%FwUL)D9$=7-QTyRyp8JX?_guK5Az3DvldK!uN)Sb*rG2L}*U+1dH7QLZ35bt*QqNcHo; z9x7mOJj}LV>%9=6J%oU@7XWt9k~jiTQUcgp32g;z$8-tUV>??m$75^z&xmbAu*5WM zkCnaf6$@Z{FGMjrL`C;>*lxkOusv1XtXZy6X>H46J5`VBltZGDdy>_TC&|8ha}7oc z0?qFsX(spa>sBAtkwPEqY>Yw|F1E<+%IJDnC}kaI^3e7EI8H}%vuNI=kw-3#_Ls)` zV}st(b)%3|@#7;=ca1-emfCZiii|0A8ab!i94>6sPjd&PXsKt2lG_CK)E zJF4f<_va)8N2$h%g@vP3+Y&iaIyb$vv(D86AN z%F-&xv7aCbr9i1?JG+!i|HLEFv(1tb)t&;=YRuD^-H1+$)z2bP4?fmEI6$f0 zMX4JP<=#?CG3?m`dYh9e%D*JwzJKnVp~=CS()vQIn;fxnFxl+pZ@!3|9kGh$ub#kD zWpY1V4wJ&`HBBcmk`D?8%4FUobL-|0JsvVUX1=PEK5ux`L0nFqeM3}sY38!XcN8vd zm_`iQd9{zHCKH9>@#NO6<{qXFGh$r5sDxPDMg7KIWHVBxuFg9InSGnmI)+&-)_B5a z&Ow<2HBpn=0-S{3Z*q*Du~?a?)J9|~ROr(Et@lNXGPy@zwaPpbnxo9Mj;0FU8c9K! zFX2FNr_nP)ur>~6e|&==fPwb=5Z}ooWxo&6Q_-!T(}{)NpKakrt{!N9>o*w zgXacc-mNn?9LViu-c`T_x9U-Z0R111LjWZuLV*8a1grB*a|7WR7{=dhyI9>>#I<{{ zoB)zG9zPCA$E8{>LPRu9(*_XrK?q@XrNF}dk%y3?%T4N-lxY*F`gr_AJ#3|O7-lqC zBUC+}amTKW2Aki*N&NrIc0N-5IFPf*Dh#m8#RiMJplikMU#Hn0-{LiGq-wZ_e9a8E z`jBZye5+;&dP&`M`QK8+a<9)|wi^yG|JuKNTVqMKWb0D#dxvn-zI^=#g@fQkDq^+p zIYc4^PYIS3K+sMF-XfiicoVlRMzBXcLOcZGQ|c{)c`8@G}s?&bTpfd zp+-@pHoVI1Iy8dYM(VQjq?+~7>@%sgN;Q8IhV|9eXFT2To#y`4Gg%C3Zp1+Ky;I9V zRWE9hrRXe0H!Yf;i{^W;!@y|1_aL>XPn{J;BTqz{_7IBD?FH2HoIJcrN~mXFhDwID z)A6#Cy`#)mvM?Uu_oX)!cG-TR!qO0aLK>a^8r(()0L-NrqhakGYrDWHD>n+lD}W1x zZKcy1g?@U3pM!U%&Us)nhpZ98MgHy;c$&!_{zAIH`wXj+^mq5-M=dp}(~+_`=}xZk zXIxHo5s{O7JQ9DQle+-Fru~$&Xg$1({oBdj=xkQ>@PNeaTFS!^X_|k#1)`WAQBYz( zj z|F#L|Xp*|J8kC9$xqqS(vB?W-A#_>U86{0zQ}{Y>^%7IMu1+Oi<1N(sC`ztInVGp!JoND*lG7NkAEI znvHnvc*QyVFted!CO7c!cD$mU6m?-0oY;ZKnre+z$aeV;*v)rA!F(6LyTt)S8}pP; z{jL^FyvNuMLH-nv;I+>VLGCIK!B=z=_|i$KGDj(>o^4lAWqld`;jT8BMXI(N#&AAx z#lBW3Z6qUAdk7gxdjTU^pNCmV2_yL?HXE2TJ|2AuwgGnR2_KU_A;}X|o+~J2mm!Pt zT&Z>XM}xnBGG>}BF&dxNc!yAa5XeB;RyeIed6ehtLE9)Rbvui!4rM<+4O|bam995# z?t-wH+>Xy$m0iFEz}4HKHQcvc_az6GmntEi_FAShA$vP^$Rj&mWnQCmd?~UTt?FMO zJEa+Jy>wjV7RT&|x4=cFeoy{9X?FP$1I7n<2gGhG_4a5hq;_hBlm@q9L|y$yxP$mxb@e^5jo0Vo9-Y;R6lk&byR=jjP^mXZVy2uZz?W*RH$mfMcK8c_>9uGYX zE1MMj7;hck-@UMefZ(m+8X_~7BKXtLAM+{xE-@b3sfZof?+JOkl)WDh-9^v1Q<;$c zI%LYrUcW-?aZ#g{7#ZuY`2P7Lb{MS(-8=$;;mBI^l&o(UO3y>#<|}Snx|X?aBlS6A}cM#D=}h6-t|7yP$Sgyg_JQw1tt@WZC^ErtoV0bY9GMkxUi&^@f1k`HcD1 zW7Wl}bWe)kP_h}Y8#&5s1%e@4uD0U=O~fJ}ylYy_?GL>%u1{SV#;zB)%-@3hdziII z6TZJvPmD=gzlGihGBZBjHC~ficMa7eK(u}Jzz1asqFPDt5<+u4*tZ@UVa}yfzzYX& z$9v{}0gKHvbsa!xDJ&}2Zj}B{Nrq+KdP`#PyP?ju>-iwea42M!VhPy)Yzh40&0d}? zj&ae?u8$&@rre=ZEmSi+hl_#~skcg^B@1t$+Uxr1cAJJiqs^cJcGY`?m=AxV)W7pZoohu_x|q35aBJrL`%m)}2$~s$^@l!eI%HDih(qL`ly*rS@t%E_VODQ90 z>Ffn8-GV%ql$5Y^58^Q$m&{$xd78fJ5nB$;;$G86=oN8j7HA69k-_5AIP_Zx5!Eta z<){RLaf{_RF5<+Z7S>K86V)%2@un~gF1D*8*}MM zrM*a+xU?-5>K>O|{J&6Y;|F!!mOExgl!5)P@3Vpb%UoA{hFerzMNmJ_rPN}A6f5A6 z>M%og0NV`$Vg<%xA?moOiQ0O99H*oCBj~W4(tcCJvrW;LCgz!?<@N?Yc^g_d$CIa; zIkMbtDHd;dyvZ9Lyl3oR#ng_MBfJ&TH)pCsARiYomg9bgrBda%F%+ibUU{`OQaDqb*1W*Bk75 zJT8M?rde1lKVEh}8en!(Rw{TtS@ZqPm1?EycEKz~6sDb_6ZZeP*Y*dr|C1I@Q?IeZ z+F6{dfKC31)$h-i!UBcr!pq5>62UPtC5dcDj3HlQm_2}b9 zcG)Ol*d$u8#4X?yJ++>|VL_szH~sPR9QR8FOgBCBAg#G`ZwZT{qGmOf)LR8qQf$@2 z!GU5ZC2(~RSG`A6MEyf-@~ zPt@f!HbK<=ai^B3ofjwKw_Ti*H5WJ*z~F1iW~vGFBL)xf7OZ{iP)!01w(;A)x8?5i z{_LKpgSnqrxMM|ZFSk+JjijmFt5KnH?Jo4avX}0AN3&@H;M71qYd&2ckNtFT4w>Ak z3vk4!6G$%LMtTR;nw{llMQp_Rq}T;WP_RbhgdB(JXf^^1_cFi;3F*AS4A=Di9YDX| z_p~0k(@D*XfqlY~y@zwT$CAB=>d<`f04_~w-n$>yCN>{Dkh7`0*1dQC=7R_EyP^5u z>Xw81G#@;;<=~;s2iG+8TG`a2XVdiR`qlcLH-iUo&-CoxHQ9BW`$azw=+#`;%RcVo z2fo^v6~nz(HVy9Ca2!d~THLA-Dm_)jbDZLjll*alJ?bBQuzNapOw{k4mby=IkE2;` zg1C9*Va=cFTU@P6_U@%y(1l*m!W;xR9d@01O|==vz;NdoV#^ZsVN4&zXzI-T9a)+e zd0O+owI=xl!FqCyg>@begB|uz5|&37u)Fi}bk@BII-gFoJM5!jUv=ycGL-!H&hw!I z)Haj2qDrx6^n4r%h}Q741_D=+^sGy&Hk$!LCU?yp z#nhIamJW#FVmham<(Mr?a}XL0BnejEz4NBZOMCJ0VKHeFdf5vy6hTEy#9 zWRU(SLLrw%Od~Hy#k?M>croVnYFmJ5p4Zp>uspO8=J5K1R9@dm6mEIFUoo#+xgsVS z$0p0`={wpq>xHH&#)V}Af>lZ5^}bt`<@LoVidmP^JMN8n-K4ZHwfJB>7z=0V+R5Yq)q~*1bIK- z)RT9DH_6kiNuC-q$o`_+-fZNHXfFiQZ`LoD8H#8TR z=Re|c6pUb@TRHf6gASYnf`NZlYQ^zVOUDQ#<9+EufrdJ$g9>`v(&_r#Q* zr?ere5hXT3?Q)GTG}SWMh3kbpU48EjbE{68SZ+=PicIdqTdcL#aL-CoeSa-Gy?TVd z*N}<|)c8&v8b8%*gXqtdp%Rqq)MckIX>+(jG{$*&D`Y5_>*+bv5gm``TpmMNv;`7w ze5~n7FoU{j|5Qb?>Je)HNX_0=S?xj!YhyGk=waCvMQ(iPvg-13OOR%IDrv4}(9(|& zUCd9hLfJqjI~lAeADg;rvR*f1E=!IYE@?Nu{R;wv`^)gz57o5}>G_sCCx zc;+1<-{km@(q1G@T-lZim1}ice?|eE*#nAaau3|Z^oa9_D7Lov$y^4m@be-u^zv;Z3T;~k$B;`Hj z&l28q7N=?6vjOjU9oNY$)7pz>7VW$=(p9s={w^LWpMjq;zZjx!-=YF{U!SM z{FI~BU((Ck1oEp~(q%>T7dg};5rx~#Cw8bs@!{bz2EUMzM358cQ*``RCvk|lwnDEs zMEnQOOZd~qjV%M@03eb*g#4+!pb#<7dnF~hK4;+}!y)2tobNObInqIcdh>GvyomZg zWgtRi{BWaCRRYwvU7sz93!)mQ_|pb-$-GN|oCpyoDD5TERN{8(6qcDc2>8={|Htfs zxZ(dC%YSv;eWzW@A>Y-TBSB&xps~8}8f{JR{C@|0b@)w0}hl%+8da z2m)_X+7_x2C7!AtUq$_z(*8PTMjdUD)SQXLGr8W^+W~v=HP2I-QIXkFc57ftPF<<|PkP=3A!wt$q77-aE zJ3W=Jn-U&dzLtzH_2G*6I>E67BRkxDu%%72C>q73u%{JGDrHX^UmyGdcXvI$)E5OX zXA|Nl#+N?B$C#5yQ;C05cdpxfHUC1ueE;g2-aXv(C2^^G9xZN{Px>O`Nm;2B7Rqos z0dqUln%kz@XnMyj3xd~!xL0$9{2ahfO^)+ZUH4Ty!4lth-TPs>5^UdNJq?I3`SVW%O#Ud``P%P3$S|S#?R<{I)Slw&Agd^8;3mG*mkq<|rvnU*~1r&xO zb{xySg4*K5PE^sxQ}oAA)lPr>R7>>7PqjsVOYFbsze*xq1?VpdN0zQDJ$amzI2>(77*bAu2!n~>| zDPh__Cdv$(_E!{-qAJ@vf}$!D%_Bs`k!inAsswS53RWdeRW6o}r%o-{{{8E4%N$AR z3Ddq*X=79)O1wtHUS%4e8jlDQ2%S(8P#gr)6S^Wj;U2q=N%Nm23Sy>$S0=aeN;{vH z)D^6VhHa%U(zv9~yxhM)XV&ib|4e6Yq;~&_&YZ1$-~Z}wH;$tjHMxFWszLSc{% zi6)x!38y2yIfK5rSiDuRDrtIiYTgsu3kl*4SFbdgI?8 zp9NX`hu`@a60c|`0$1`9JckItWPtnY{ zf+*_=?66@KXV!5ruw_1@@+>r;npFXm*~DAoi5KAB`i^Z*BhLaddkEP;djT6b^2s>7 zRZ_wRzE2R{c~PtBq?4y^SA^6{4nfltEGU4K`~Ht=o&H{UZ>)gHP-1I`w#U@`KPMv0 zv!bP0MR4rP$b~ITVat8dZ*a0t!YXT6t&7|o*v@>l~?4a z(46Cb_Ay)0@NT?Fvkr=jG(NH4LmX7Z$__eF>j|# zIAaeX7TXJo)AB4pi+lc$BLRAT{ z+>RT)EA>kqGPQBqNYuvMg=g!yJN7q&m3EE19jyIKoxL)H#rZg`^`K$xha7yM`wE8X zi=Z>P^DeU8?xn~=3}>m*a(*gQQ&6^ypR!>~KvzmYi@9PaOUys8!V&55G@2EO34c3n zOAvlW@Bbsh|Nmg#aRhBGB78C5w}(gZyyFOrXTiMVK=X#|U(ENrcwmC>#T=veKK*3L6aVveE;Fx_pEXI;Qhf6H)x9HFDxRr=qg{T6v?p^=o9dS0@rlFz8m z@3vTnqKu?II%;aszI4HyqrT`do=mF31b?ixo9We8k0NcfPaCVFxoXV<4cm0}|7Tqt zb=1LRt`4!2iO#wh9=JsrzOk;(t*e0rJ;i9h$n#}w!8kh4a9%WgaCB>nb9Pa8NRc;c za3;e99V0-s6`$Bht(*H-n~bdTuHH>O50nGV)~<+wybhPY?-S~wG4~IfzcjC`0gtrl za#gi?ir-D|XcZ%4&a#h)a5>LLL+l~s!|Vlo*x0;ID=DGVS-g_f=}lC#hgkM?9^vna z&R3Zy=(kZE`aIG6f><%q=Yyp0RO1WkR9cOIKJO#3Y$HR|^*F=yKIrbpj7M~wFaNc; z`T365q96a_M`>Xts(c|mKMLDcPr8{W$qKANJM4M_>&#Xn0$i2edQ!;Ecz`-odJT_M zy&*-)G<4q%ySZ13j_x~{^?yhA{h{l4u|s5|dq}NrcUx+|@D6u5KG+-EbcdUwG0(z? zOs@VsXF*7npQL8|TJCO>N#SgsJ1lUAb{%F%liM2mz?o{znI9Y(Z0^NICVK-naSkcv zouSwOe(H(*?9b0u_fs$E$;1ro#W}}*9CS;(rP1|>E3;@NsfQhLq<{12pl0ma+Py5^ z8iWK$dkE3aUVwJ3dDK=n=VAF)S<4+OSaIT~t_rTy|%h zr!j9)ol55?%w{NzxdNBO)?b$Di%qL@P{z0+?xJ!^c;&bLdJ)Q-gOrsjieJ%Gh2@v) z4@?m$FjO4y0@`A<5m~~hMt5PJ4vsWgw(4_<3tXZwB5%7pyTqM6?#|lW*|Y9!jyrqV zoy~J+ue&qLAKrR$XYKCV2kz`)clNnETk6jA;U}{-PBWAHpL2SP6*XMYai~8QcrI!$ z2BS8udJF>u94!6YNw)MK6s4#Hn{T|C)o22id^=xCtX}z@i1!rJPmtyZw~c)Se`Q8dOvlc0 zA@)O9cPMSBFM6hi$x0f~{7~l&rQgSX9I2Qa@IHM1__Ej2^UAII7 zVch<>tV+2r9wXO=I`iI_pUGT_zStpq2-!w^0o&N)sW{A5Qo=T_BAD+y+wb?)sYtSD z|0m=rNE(9$#sJ!}3+8Q>G}t#_Zc})ZIv-|M?+Ie3QgXC0;_>{J1Re#&a?PT{814TA zjm(b9N(Ij&YtlN#-@{`C-$IGW?=O899Ay8enK(!PK4=s=^N#O)*F$%&wt)U3GNxH2 z^R`&rWaH`X?=QVnSVteaT0@xpKiXgVd+NV+Z-43epM6%izx1y@hm9plr~Rczh?a?edg>v zXX5q#-|>-s_S$>hp6B;m*0U~KFYm?64p`3KdN8ekXW7|Rtz68H5t4N7=r3v z;@nPF2Ch?drI1NxTQX03bwCuZ_3_GQnjW^)oD(ifDuN*w5)-h zQ>sR1=*2!CwtO+OaWE`-123s$nw2_$-oWViNeoa{-k73wO)yzP^L zE!y8@4*1Awti@E0X0Vqyjb$I8v0CyPOGy!pHHHXN&qcB~J(n?h*qd9m;rHUL-NiS9 z_f4f#u@8WidlfP7zcj(sop5a7Au8#EPr}zSG0xo>Flp|?s&#bhAR;#a7_P+G&eWMf zGzBfzXIIf;mn7n8YeXQOyQIy=zShtk;mJb_pGM}%`w;=G02>cR0fZEd$1sxC6GAF3Wxb@FmN7j|4-fT|r%?#ScnVj-m%SLc!(gR9Z&&_Cd6l5q9P~41n$Z;7j8Jdo?W0}0d@&j{kb)S9 zy$n#z@l@XZX&6|2Txs7^gQ)GVAgvz=i>zIi26N|~Vh^VS5fp(mnYmsI6>__$tv$S) z6b&MPlc0k=?9i)mN1Ot+!mqoXhAxhJ!xzFO?+jIoh==nW1XWV7w9uUtY|j-crzAT# zL`xO8Ls`jTKhHsJIa1#1@((N)I@RW+V?%=n=$JS2SWuRTj`L5IA;#aypQQ*QTP+l36(Fh*Xn_kU3-I*4kPOL>k9;Aj#ucrrpDnd{o- z{9ha<>kZGdUX(cwE9wDJFVOx9etJNRROj~NIe@6eL)MN>fnt$rV(V@JB!UgpWI0E2 zQ~Jx%ZcBrO#G>~sB<4^soVS-avtu6siK-`)9OcI`C;=o|%zbF401~%*p*$|Osu)O& zR_}i=g^}zcuK-_W9CZdKWMc2kq0Xq8neezDZnFPu)1@k^Oj%*VM6w33DEQuZfCr7& z(z)$^_4(uBFl=NL4?lyMQ9OK2&Yn9wMUW5P2~(wR`4*Eta&`hBKRFoaAGNU=6ul2`6uUE`yi!VXdkTL%yqMG;X%oZdF?ZN zAG8J+F=|2TVQtic4CQ&&#XJE+1)!)zQIu1e93Dk)4Y?YxIzYz+(;?;O(J|G_VohvSG3Yo@Og>)fC2@kv z+ziyrSE(~l7_4{Lfm>xg17?E{+vcP`VMeZSzU)aVX@zVa_|SY|pZNF*P*3MxpZxjb z1C6u-l45Eq`DxtGDzh=8lPF4dLy<;N@*4LMlz`(^WDS54f&;(^Ko&5NXn@8%>jjk4 z-dZo9EJ_5*nFLGnK>3NZq6f-2FQgmBRuuyjt46*hrIQSl--0?bkUBepuK~XAjG3v| zIIuKKyc*eG<=myLz+_9Z`u&seY7GP@3faoVs}T}4Vj=MYJ)7#LJ9>W)R3dD)MLL-O zJPQL9eh+j7nS=wsUDCtBY9i;1l>8p(VsvX1HLKn)M9mzY7osK{IEThY!)akjmMFlE zStzBwUcmWQ&w2sp<3!+$!lTXuXHRKM51d^j_^{=nH{z;F0?x-P`vlHmpwKMB%7yml z9%u5*)(ZFa8#o)OoK|I}g-ZcYpkLbFoW#<#CaByBmCfd$Kb_0Apji^m2i^mf=tbY2 z58R`BulY}qUyyuA?VN5NCmyd${Kj z;tz7`5vZZa9(`%T?W%llfl zZJQ`oT6coKPSn->4K=HCc@S-R4*b+~h1?2$ruf`;6vAK$o@y7KXjrXX&1CCTae zV!*g)+-w4JsQAW_V%y-?DY?W!ooIXMC7pFODL|}ORrv)qU%tHzjyQa z9WM78cr1H4uC^rnZU(Z#x{L8U9$uMe(O3a~AC~Kh{{1pwEY+txez#FE9m+}r4Fi%w z{7UhnL3Qjc`nTEZ-&&Wdqj5=*(yewW^)4lqnO!x3pXPPcJNv(WW-;FA9@bG_v-{b= z&&mAs%6Y6)&dq2Gskjz@ovf=Fiuv^5UctRr&c^+ea+Z=IRm$0#0Id?^P^s)}3ojhB ziNu6>VJtryy+WoephLtf|!|rJ$zyIo|z@&a9I<(xnRVwmKpHxff`lK0pWA3+FjD5Q4*abaSAu zd(e08>)-|YdM+>u@_1KL#K}0GczLgQ^r0jPe={AJvUBYO zr5S-IPO_EaYK@`6+2R#(&4>{M8;{=LRo)I7RV)ZcB30%wR9_?>eSp&LCQY?%MT1Ip z8i6PgkCq!?w!=*E{ohm8Hy&+vlxKkRm-VYLzMy-zb-~7;ALAY%%%>6=bm1yG801lx zk}MsEnpIL;{R}=MQ%uPp|DvTqCxt*tdx;Ai*atAaa!D9zR8oZTCt;U++O%I`9QqZV zFd<;>pc zPnGjGWd#re$yzkuVEMZ@0OkDCiR;bZ{c(h%{M}Rc5&o`}qvY3tYreB!*e0$cABr(| zxt~ML=Az4bvH0BQOA{odG`?6Oi+_XZ;&V&qO07lsd#dCc>TMlcPZEC*lEz8q?`7c9 zY)OUTew0DZaGLP|t^fQzT;)7SY(XvDnXK#&D;TdG2RhTa=TA-^ubqpU@hr3BwSDDn zI`2HsE_vYAQsAGME7Zi~QQRBLd7L;93Py2v27D+S#eIVJ&DUL4qZeR;g7Q{_zDAYm z;)T7+jv>GOy2}E|eZnAadj!>7NA&u>3?!IFWuis1$rShw zy1Mg-dc|v>1w-lFF(+C1lEnS*#cQXaBcl>|6#^BN$niYK-nKe{>sP$??5Cm2dPP%S zUq+4+u?M81i$=)at_iT<6+wP{v=Vy?cx!!MDhatr^Hqjg9{t4y-7kGRBYj0 zFgRKVyv#Y&8Hv}P%B|3mgW;w61V((k_I8z2tE{x~0d2Xo{V&)bFVcq0Qnr_wUzjK1v48pFp9hpw2K}yBRkE&W}MssUD$U zJaCRuIg8~@1DtBI2KuG9{RjQn|Kt0x|ERI$*;``snCDVT`nkGvBd!C~mefnQt z(J&~P1*bgOS4RT>YiT?SeuasJ1!qZ;BmURPIK}Gk7kZT)!vgDnje*QbEcoLR2)GUP z1pe11&a9ggH5g39PTv1IRs}3k%#Ie-kSWT6wl~K!U(t{uFkzY$3d^k+hZY2A*uV-1 z-N^6B^sQ2kC24YcQEFq&>HnsB?-@C;W83ucph4z*`NS%f>>1u|n?K%fiaEHQB~EJ= z+lDvMqF5V%2_`WJ zExGR@5NJ~f3llJNhz-56))p=HD37? z?i={6l`YL~VJ0b!gwP0m&7dl~yV4!T(8kw03XUY|R9pCKcQYDh)lyoa1-*9VUKu}| zDYl$z3)c#+leuakjV3&Xa&;R#U3QWNndgN8KYR8Ap^zR23xD5{p6mZd5H5a{h!`J1 zI87Wp=rQv4fBRr#fL*^cChwuwV=?VV5d8P6dOEcJ*O^iCdxw{-{{})Osx1I_xrCmE z%%fGs4T*ucjft(4qma0P&>VoIn7i>^yI%>{Xzro3>*ViJ+ow{UCAtmfBQdeIH>aRa znn6tmoHd)n59U9QvHCpeyp?LSpI7NxX={wMp*unaq?Qe$Aeq(;1r_mDV-Q{&sLmyy zta9-qw(YItf&}&w7t68_P&XZUmMJNsajw9=_FNWxZ!Ck_L)OCcRct%}QhhnLx){b- zfB7isDBr3W^>ZC?GdojnNB8zl|CDRQL8m7H6*yvBozyEh;@zI(Rn9WGp|o%~S$m&{ z^D|(T&b@V1pZvK1MH%H!ojg1z$pL@7%t}*e8Ds+B?dhS#tc`1Vx@N3LE-ag8~-aBZZR8qWT9*_a`J2 zM*G=t#Mw)Vj>J;ye4<0q!cqS0>s8Imi{dIu;?Fi}buxcWei*vGgr^>!PqajCJN&6Z z82Znj9aYX1$_jtp5x@QSyu*b-odeKvyQ3!4x$BRx273-E7;J_A2;=LsRhVZj4%Sli zsibC{IW)u3U!HBF)UGCnhc2KY1g{Bg)t-p@tF-_K=yDfmOf5DQ5 z_H70?;?JA#ssH-(1wdntRaRPfi}>BkzP$yy(z%}<)+c|4U?rpcStM7hzxM51Au-v$ zO}f8ye?mfG-YivTvs%^-=ze^GavK*ou<)^DP(^5{MizaVc+B=EL#bE=kVis;tLE-Wdry*+3bfcOy~Z2DEyjd z&p5=yr8tG7l^3juW5_wz4*W81!xiyVSwb@!_bbZ7K}b&44e)|(<#}I4`f2bm?pSgkd9XWa&g-(=?$kT|nBN|o=eDv$ z1XA(Tp5L|m!#IA4!f?8LkU7ap%6U-kJpA5S_;LIuaG>Zzz-ft#)@WgZDXldAIZ zs1u>D=)6g6RWV4g>uL9uo=8T*bYNu$Q)lw^#z4r(PYNC+juOZiDsm6Ett>xzJw8~y=* zrw}31(B1AQul28t@Yj*Ln(u0W<~EE^s!CBp%0C|MuXVZ-L?T?&$}hJpi-?k(q+xt> zlUSEj4JmSd^{1EJ2Jx}ka1K`og4sReTKRnw+ z+613AQCG8C!x0@PN93xJFw}J#_tJwP^mLY-C!%D*{mJe=JSyvG?xUN87qa~vo}K_> z2tjQz^$IExr|!Lna!TyV20V*-T_Mz^dDJQ?qQ4%%O7&z}#OWGY*3Q8Zja3!qQRyLm zNNnG>DQoZaDNnm2iXZ5CnTXXi@~efdAf&mHSFRCS`9i`?FUgwSRK1Y+kraV+f^Q_LN)$3~H-z$JCn!Vi% zmXAyK(3JqD|I&Tw)kRG7Sh&uC1&WxStFpJg%kra>IssRw0aesYIx=P>wrK44rU{~a(k2$cS?>Q``}0wgZgK$|AD3d%()GDZ;4gvZ0-K3qO)==C1(g8)a6|Fpr#7+fBXO98mouY!Hy;%s)(z%1b zTZl)eb_G}q%{W{F5CBlgE5*><>Q>qp2Tcn}vRM@NhGqdI1wIc;KEXVtYM5#%fTH{C zvQ%yqw7iuJ&`rfH575IksEO#!J>^tI0J@{@`wvj*j}V}z3q&RO6Y@WsX-ov@*|@R= z0Ie_!ZnN#wNyq}|F}xvw-t3@(Hkn`!MAHTU-P`N17jKViQW8M71Z`mhi#6ttK)ZP! zqZ$F|;Yxc@t~dbQnbwpdKyQr+KIZ`@@VST!_?*qf><-I_e;oim-M0Xl8Pe2E4ezbc zdv=$-^d%1ZVkLyzY?taz=HknEN$Lcy!g#fAYu%%AA(j=#r{qO(z)M!U_J=w%`!A)r zZIwW(oT9WGb|}Tj?w5hO5ngMX39>@{T$F{>uom_RsqX$Sndr#lqf70`Px%~+QDiOb z+}+eZb0|7BTX;UzNUt$g2sK4D#<1Tyid#vWC9jT2n5G7B^wC?P-^*_sbXEI@`5-;%vr zFMte}0xdzvkU`nZBZ&YRNiYV0++p#t2P&->J}RU>Ab-~_aTO-v<6S9}WPBV5yv@B> z$?lCEK5lgu(`>4MNr`Tu7Cb&a2D9du%1Q$t2PeJo(e`FN^MNUp_E<9joTqcG`=TaT zD}i8_c?Aet>&&~TGo*Ns1q(PmLc@Ax<=qS~w9T*22eikS+vf1Nku?TPK*ug3Xpjdy zvWJk5c&{uTkE* zsj&1UD(@=Qm_X&T5exG^p2D(~-QRRnevLczZGV8-A1d#qB5qM8sJsc80xA=)ELb1D z9~zMO*GM3ow4WEX?A4N8AHIXs^i;PR+)*5@GsG;B_mQ+HX^eUL@3!I1^tm|}-F9nl zYo6-St-S^lBdXi^(B-H}C?75jf7r|F8NF&qqWq0gIti=8)`1Okj;*cx>yGk@r;76P z-9R#-19^PUQxSVB6O{iwnF7kYYvg=!h`0vbfGV#wcL1MsZm+$GN4WEDrR^D#t>OiO ze|F6bA&5Zb<2GdG+Rd&gm|7_quGYQ9)brCiwdTHAPzui!{`f#e%D+0=U+qZBT!HZ5 z;Mi?rw3_@4{&}Onj@8xtLhm)=%@7fQB)nJP8gp-h2&P?@M~2I9wSWzfT0uBWl{Q%C?J|WKI3N?YA==hm3@;ax;XZfW(N3sOtyl17u z=tgkA(A|gOi+|{T3MQ1#7jSM~UC&7!nK9MbqiYwAsZ>1+#|(29TkFCxW%f@0=)CfU zW2|Que6Hf0>u|ysjAXOg4w*6KR5@3lRz343_2ohoUC%Xk-!WAMTh>NWy*o&JoZVu~ zhl=QMExa?IpuO8ywXGpWawD|~M1Q=2#+F#Kf?Zsp#$h4m1u|5$ROc@!M%?#EZClJ% zZ7kV&rhTF1Y&qxUW3O>x%v0A})}-3;B^L6#`&bEtzNz$*{Z~c|E3$re^&* zpOI1-UNhHtCA8#kalMEU((kFjjip9|K3T<%j-e%f6Z0{&+dK)WPVVTwEgY?WN2&8f zK&VzOYg>Y9NbSWgKKO`;=j73R6`rMYH+(acMH*5;lmj`GID)}yaSH+TVi|l7094i> zvvwUDhjas4E1IyCpH@1#I?>$iS}RH^D@s=6QWPyIE)>mAoqWjDdy$J5&1du;0+1K= zc+p%b8ALRPKZ#AP(pi}u>QYUW{8rV`SXE_yDh;V#dO_7_W>?HM2yuey zVxDP0TuuRH6jULp4LwsYrBBF2M5J`Mt0$Dwo!xyXrCaJgFQwn>`3dvopIuI${A;qD zz6M%~<+Oti_Lfs<(2kTGC1>5qsXj^|#J!6iD22EX(|nCMQBMC$*@bfY6Et~mIsGw1 z0jS&{`P}^)%l?!!$PWDOJ}Qva=R;YYsTLzzYw=tnvN|1I6v%3;#F?H{t0zwS63*nb zBiIwAn39F`RvK--My=g11NZiX-xecpErn*RC)O_r?{xjs&lYJR*E4UMLpAv;P8hwelql_{R(34>W!viis_? zOG+uQKnh(Q&PJZ3cd0w;))c@*sRFZ7P7IAjoTdOXiL>yC z>MVhT(9T}YrZAhZj-zUY zH=0l=XEyx$4ihcy23y#2y572YCJim2(L*?9Gd0V@0PG6d36qDw03DHj1?Gv5Ts)0v zAP<|NirY=k<(M%q2zZS4Q9v6H5Xkg+DZhM3m{{6xg&aoZrIKHwBHNT9U$EyQ-K@Wrg8uV0^%E7oTUy z?V`?bQ@)Vd6`ZGY*M8N;yPhLOEFZzsCH!Rg&7Dp>p2wY^Kw|FAO|g4y%-|7k%;!>! z4NGRXpu&74;}*ff-FS*ISh$md0xaNJl!syjE&d@LRyu4RIZ^N+loZtKq)>=&FL7|N z4}ilpc{nI30*89kYkvRO{xl?j!#AW&p!UD7j;ksL4tAm0P$`z=S(@X3u~|y+AlyIp zFgL=(m*riR=n>Y`FEram<#Z@3Ei~Yw1Rk~l+r&Ss|3Bd24ZM#i9&V8G^n;mz?{;vZ z&C9(thERg?7-FtLqpTk!zVWBh1PnQo%{-R~iG8r`c}VObRp$poJHaCneBo7bRmDKU zYKZ?zpCm)#dqCMdfQ|ByFn2k7Www;%FO3Kv_}oy%oTscb@EMTd5Ycb8}t{nPAODn1N6D^~&Zc&$C#={FbF-LlcpY{B_ zoS)QRMlFS(XmLR<_lXv4ak%E;bR2E_kp>w=MuT&7@$*2ADsBZtuKihO(re5KaHQ?+ zfee~zTK19Se(xXEgX|@t+P2?P&_^1|pTm8T&!XTBsrW4~VQcaqX_&y%8uO@>C+1d; z;(ahpMXTVGe8C~6@*io~R}8*ZN-6du4Qs)sIg6_NM;gp2&cvDf!A5agaXoH!pM1Gr z8Ta!qU&_k@5b?%Q5Uqijd97KDo=N8#s(?+4mE0jbsz;G)W7Z&k*R?Z%c|%R(^$hQ1 zc-A(5R5VVWxq`=ygwYvc#zqoTih?wVe6G+`L|b2tFU?n{I?bhuZnR)Uykt&8W<{5; z^(sB2U;eQ81IS}elCn?>Vodg3S3-ypwx%F{yBl}PcAg8uCp11EMLbJIydz^mn+_pU zpq}IXvtNR%bne4ltbaC3m@e?ogv??aJk`NgxR+QK_-7|$9-=yUH^$Ch+YaSgLk7Ez z2@(f~x|sja$9_6v3njQEd>1sHq*Ki72$<;uig{Q5F+x6hXRJ%Eyl3cKh$iBVkF6?( zt#@#rPw4(B(TuxzVN^$vP+T2erzjSG7 ziiE_@X67ey*$`;RbNSzRNCB50lUzZ;vtp}?;j%TTvn1pJyo$N}Um#;zs55Z6C-Pe? z?o!`Nv)CKdTUE^0l$8cfBdgVi{p9bp3O(g^-RXbC-%gZNl)p{VtbV0KICbMz6=3uv z*OF)RMdv!T3V?nXSG)_yjjBGqq%=iBN^{UlB8xY|>CdzHL#ZDx$UR<2E{UxwhQ-#6 zen4WM%;IfvJIqDc&T#$lR2fou{2UDH2amlQ{TR42hd3K-{zBGx{roI2<@}#7h2!U7 z>^z=hPKhibzZyf!VA=Ro@A0#F2E7#J5C2HAzwSz(mzS?mN>bs=S zVtZL7Qgd>#SP8d)L-V#`*>0%1Q$Vkk#$inFLiYfU0Ic(8&Dv7p+U$Os}fM7U)KcTJYvAs3%oTMzX4S zU!GF6zt-q#rbu4w0ER~+?CAh4RU`R5N{`(DrnY^})GmT1Tinql5zjJ+pa$Ma=^e9= z%+ctoDy9y#ZW&IOnQbn&LEbVP;Ypbm=FHU`+R-+DO86#j>PFM32NnEfL z)gC871w(k;pvMjroGzjBvPo=IBGE(IKrR4YI2F(-B?~HpY4Z-36$4y5!PP2-mb~KZ3FObzt9j3c6I}mP+DJ7B`gX|dmY@NB zD45`K@mPRrEtcQ3^am^w-~ZXh+8YfFz~JRv1Y7)vCIGFdt>8e?Na_a63czN3jP)|? znzuLgBGu7Sz7F8nn~`K1#zZ#sFUr=Z0 zA5P##l$!@{;c1^e%KH;HshqDUD{x4Y6@7G-dKO08#ZB`M;K})aTU$j`4eu=T@><>aX72nb|u#&!i8vS&IwX4b-uelXAv z;@$#PW}>pvz%H^De4h&ul0`7GSLxg@x3uzUt%mzR8#BR8#Qq?Ea9)DNP3)K1a@Y*d zQTt`Km*JD=r~NY9OrFQ_^By6&pWoVY{5<5hmI8BVIRvzqIMr+)pqj7A^HWI?p=@AO z0zVHynM5UWAB7bl*Gtpl>Ph0~*3v4;AKID#2+fPQ$pJr~k_(MM?%~4ref9={^!)si zin&}_Y2YgO>it180uZKibGLAgAbRrx^|jCmG&e&z&!bv99c%W(6Y^ZLt0O`+pK9rt1IdM-A1BlxgmbRb?gC<;Jt zkBF@&i5*iV^vO=!(@?@RP+|9u&Se|ztgX395wy}Sn`g&`DrOU9rGY1sH5hM=ubP5z z&7y7G#46gL<~*yO%`cD)&#fAjf6;XA-9%%B zkn;mQkMQap8AVm>3-zVBN{de**`Uq#v_thLjVyx)@U8mA(6%W{gm0VR>-YRN*iW$* zIfs4Wc}zVT`qTPLeiK(+4EtRCLo%K`UfvebG#6n>JGwV@{F};+i1HlFfPa1VDsSH& z15@S@Wu=9GA#0(1GcSO>bnf_}@UFB`!@suH@DKi*E;k~~P{7p>bLZn(p8_&-wz5LwCxj1Y z?~p~Y2De4tiy4gKB>u6f6-f!HHFPdmu~_ff{ZXC#LYBIe5PpT!T1Sl~pTe?6?cIAQ zoT$CKP5vBPkh82XWJXIIMYb@TeKcz3;Q;6cL03}^iMVFPF7xjaYR z$<{tukSOrYexod{NVuc^#?ctxHVxc|!8a|xoqP2%_ z={}-k|6bZ2-XLa{upkhr6+UQk$mCh0qzG$n!4P5(Z$LG`n$RBpktV3u-;J#&hBemR zY>`?BYE_?6FOe0=4?qc%qQbx)j^MZ0GF?WcL~ru$$$2W~zw+d1;19?eV-J4^w9>gv z2V3Ejm(pK*cw*jrvGy>H5%9dS_V7D;9^utMV6h(d@ZEE5Py4hSP|!YyZ&fRj?BRd0 zfO&h^Bbkrb!{ty#)M<;YE{1*99^Ni_4=OKa-^PfkNmFlV4_mkq{~m#l@UPEa@eAq ztCLcow!QS_56#FkU++fL@h+H1n7;#B2IhyBax<^(Zu@0iJu%F;^8Bi#KiMDJ3&fgR zKzK*@HqO7jM$SLn{}@)necO+u?&RfpC5SXXRaUrvA6eu1{UMUh_ z-*?D4bW!SF{MH_@`4Cq8&Wm$^04oLh=&UHq zD-_y*pj~Ii)f2;VEAQ`0J0!FGKyYgA1=oSR->M-49N!e&^@HPH-aiJ0=9kJ!10RR; zMe;sfurAc+AEFoF|GJGz>+_*lny5a1L=Iv^pASQA+dad*^?9)1RdOtso38Q75ObsC zrhj_8p~X_7Pg@3s?Q<;j$9cXgDI&_pV3V2Kwf^U!sHUhg-&dn*X!r6Nah1g|*UIxY z(lSBS#rFAR5NTcl!5!UYPM()&xBz!+H1O-Uecn;!T%oMA@Ey1uFVDLJP|kn&WTRjO z^ZLfycf44hR}59;$Ef4koo{o>>y^DLNQ{nMI$*vB`~5mEa%8}nvQ@088+PHv^f zJ9dy`S8ETH%F<}ubLnq1uAdfh|DW+{3F`rD1B+`}uO zrzXW!6~ja;FB?g*1{D{}%h5lF2~RLc2hXJEfJ<7|=7gSEWvck-lpj*5> z+zyM${;$+U>4 zWBaKQPdwFds;4g_SoWVJBRCQ37oI0Q4t)^ShlhG?^WG_OwI%U)R|p?8DrWxq zEO2SwVNejBC;f^>2=I55MuMfyppWP7sdE0NtnharS>yS;HyFkKz5ki{yBBsU%HP$p zTW)gH+rNOnl>ys>Q{b%|8*tsSxSRI&lf~yrA(x;=Nak#&E|JC4uyRg*&qd)x`Fko< z6ZN)^ttyGX2T8FcyE>NvO|vC+hVnaz8}WC1?6g3Vy*pgxJV+o!E!>%`*t_6-q9)S> zH2MC~yQp3437v|bPn6L&kIqBQh;PdNqJjb>0*_@?J(Bv_=(%ZiI^&3(D@k21e`0pJnKQ_oxq#wKb)Q_uC z2T}RC10!p#p42J_Rckq-RO>KECP|XUER)9MIz+wuK-#B&j^g)D=GFeo%$)&8{Q>29 zrv3m+$JpR|xOP#2`LGsH_3I$_9oYYOQzE%`o7Z&RzAkW$j|U>b_R>ocK8}Selz6 zfZI$VuPD_;_3EgCSnf5XWZZ%eQ-%lUxjbHNU(7vBtfNQ`3zn*Mmfvx!(P7AA%k%tl zEVGF;k!vX>+X`Gm((~1q{AMY>e?7rcXO0@tk7=&=^SoA;hyb62)pPoB9PVY-1&rx~ z72!IsfL8rDuC^qO?;?egJlS;yFf}U)3j`zn|L|K>`?f-Vr6;;P$7@u~^~wsz*TMNh zagMi#y8~gy|8IR}dEO0YFDlQkalc;jybajwr#zqYBTJ7?$76fx%gw2MImyjQWBHKh zbI?6ec|HYti+Eeb)|15D(b6)>@_Z@4G-Xs6$nzk6i@W0iTL0zwa24~Qh72@tXR?-* z=X$Wp`R_fShdigySy6c&fNA;+^86K0*-v@Cu*p)Vzw$gO&+A_Dd?^mNljjUdsFyrX zhL9p?Wo&Ip96v(3CRv_m0w1#@)dupsmBs{c{1iCfe|a9SVxGn1sDWQ6Ymq!l2NIvP zg5Y<{9Rq|p|GL{U{SiGc(%egwny9&tMps2Sy)7zuSzFy3pRAK{Mt944FkGqSA`77? zZjJrz)Vp@)NUs;{%K3{6?sW2ADLsDK@ug`L(mtD+mB{x$Q71Bf8TzOf)7vC~&~%U3 zs$!UK*Y+PLrI5_@n?R8H3U&IebevDQ19xKk85#nXR^xtc|4Axn1*S($97)!I?QVTP zrq>$etL%yn2%Ul_iy^GlC+XaCuUpp3JoB)Q*WctPD3G&8N<(&DRjYnZt$Lq}2f|n? z!-UTz1qO-5<}lQn2f`%vVsm+|Q>*mFOISRXqN=#+C@gspAFD`b&1K{*3V2k*BWvsh zh%g2?#B^xT@47&Jwp!OhzmYt$XmHwiOT8{Ntv}1_^MFv6h+&^jod9AQstiEP&_i$4 zPnUc`?d@Z$iUEWTT+~ZvB(ItO6X-G()ae1yVK&nc1WNu`(olM_ydT&erE(Tysno)1 zvgYwHU43v9BCt~oHphT%waj7(Vd@%D|aDu%sQ-hU?TkQ|q~7p$0ls539` zduV6_f7=A^5}`I6buaH{svyNv)xxSV;G|4X{bv-~#69mE_LTU89pt$b&t4U$>@0{Ahu~JL{Dp1?>D(t!`Kb>$&LqFK+mh!*dSegbQb+efziELese~EkEqVK<=gtC#v zQp>%Nw8hpH!&hsTZ#nqkgc}o8@}}o7qQMVe3U? z_2cX4`C`^0KL9$@xr1J?7P+!@p54C|93>X$a`!D7nPcsF49*v$XxP+ymy|+@bI?~& zE;k=X+Y98^GMhDy!^CLd5-g!H(+=m=^;##s;gOawouCH7+Dn{?whu7TLtYI{w2~qw zdQ*&YPg^+Ox)voA<+tLUqUI$>#MPI??*}CH$;0n$kAhDZ;k$MOr{z!OM*Myb=D}~< z?4)kx=Z_u(gys-srG0HxuHvX)Wy4)E*I`4;betsj@Ir*HQnLprh zW2tj~Mg?SLd=;VT9oRmd^TT-KMnoNYmIr$AN)6g+?SZ|@kD=rNBFMRrig_4I7C=O=YAYD?$JmWTAz z`AIsKwBY=tZ!7MaOxHuD&Jr8udYa9&#MAY7%C_ex$^VX0^&l_vZ+*Y8^dze8D(#g( z)w4mmc^^{-RfF^IonbTk3p0t0&r@|T6>*C)LDfyj6i~GQpD9F({ol(%dKTbwm%iz^ zVXvMRPSFH(oD)ySZEf!?J~u;2MJc(~OVdngq)1njD7jpEA%T)#2jk`jyw*HEui{Ry zeqX(d6kpiYTWRnT>T;8;WW% zkCwZMkOnMw!RN@dXN11Gqua>W6y{pua-}c>O?~FF2fV2t8Q(O&k;Szs5^#Tj1Knuh zTlJ7N_CPlyBxv#kHK;~$pj#QHodeyjBqK(y#S9o{$*ealf7nK2uJQ$j?9U(M_8qb0 zIf--ZL2lmwpyuzmu-$uGlkUIAawRDOz8oaSF%z{`%|ZEuc8 z7uBP9n#};z72iL3#>!6vAa)<08Kl{&#tiFgu(V?#ZyEGcbgbN8D|9vArG2v2>cun2 z14WhvClAOzp~LDkElew_FdYUse|&iK#e(m-qXrIE=CcpwWpTHj|BG#~QZ+_?%^Tp+ z?8(hfVok@~_ROFdn$P)c$6Df84xH}Q$M(I74>^%P5^jGH^av>gTbYgNXhBMNaM*Q+ zLCl@0s-t^X=U%GMamfx|m*fs9EI)><*8Z-g<4I)wKEh!h!W8B8+dO&t(6wA+x4!7= z?e9<(alSG^*B4<*Fa5@RI>c*&wkcDF`b+0Jp0ao!4PDE5Gy!_n@w-Aa<~oe4G+rfa z(Fd7CdrR+IHJVPIU%#!+Hn_z1e>(E?j?S+yz=-(y^?UIAgyn$VH%f%*+mXG>kD<4n zU(ZP4B+ReB2qw%AsVg$Sekivh_)J(1!4ujKkp2Anl`3OfWuj51k*Sd2`W^#&&@c#! zw$YJFa|MV==f3#lM*294OaYFq9qvhahzWjXTA%r>x1sk+UXsB;=Tzw@*J@qOO>#TP zg1SwyZA#y89cs5csVMt?t$%-nzmD|Rl)sMl*E(IzY4n3mS7^f{#>VU@X{HVIji&Iu zE`UXS;fX5yOIwGZG-%o3iK%yJ=5yjC(ozx7D(RLpemghd$N@r@MbodXa;BNvr5S+-bjUW$9?R=m+g! zbEDY+-jF7dF{>qz^y6w(R@9Fe=-TsI7E7fh4{~=#0?bteIJ)PB=e2CEvEjy-cSD5}2sebcF3?Y`quWr~syocS^-?(6 z$qban4UV(jKu_hnEalfS6}=E=r520L7bv)|;cg^Z?FvY}OkG2`sK~t2gZq+j_<;9U z_5(S^UVsjuEqlGlUunWR;;-C59}u9?rUBQ}L1T~^?Ko-PJkS*w@~D@;QX@4K>#zI< z%$iTI7Tsf_{z?xig35tFrnseff8{`Bzt_Jkd^x@1)}unSN{BTSCUin0STzz>O)>kU zf6}>oA4Q|X7sn;(9UReCE}6M@=_ih;qV@jx0rATmrEVngYNX{=%JM1&uSUVEx{|yS zV?zg3jxTO)rFbqB4dg!LFb%NK7)zMSa=7G?1)kgA|85)&#qGLcZl6v}2yS2PxZP-e zgdz&KeYE5k`TX=duD}>>+hd0JmmZ4c_6&e$wxEjcZ#$K+A-~1vaeyqA&;FRS=l7owK^O*~<%fN6e}c>$-Kep?;+ zqGTIK*{qDg-s zlVeL{J>poi7}Rk7^F!;;k2|1ez>n=UFplu!4T)k2ek|afkROK}&{vlNessn2V?0(W z&yT}#P6K{?SGtGJzp;N|@k#vHMVc~!A7_C-^A5lZ`SA)j;>Rcj_7cIA=f?<@aI-Se zq`_ng`JwZ;T|6m;-6Q_7$nrz+x!8Ep_WM$!Oi%oI+-vu<4N5rDnhp{PKO}RS^S~37#KIrjWkgek;P`-F)meLs^A96;uk&zBXr|bJDr@9F+`uX zyx8YAKl~P?0uvbS+Q5*@c;h?By(W2t?76IRf#3C+t@2&phDfn7qh@inFlO`|-gML| zKZ0h1Xx1WhN!+o@6McapGxI*!ej@NJX{cDM{3&oSXH$i@%EZr4Q6(VE5_XQGqRxexBQs$9plv;tKx1a{b=frdNB zwwfn?aBP>TnQj^<=M!XyqYMvttp_664mL^yep05_Nwo?mdDnBv-t;^c4zqD?;~Qz?93 zib6@}w(0sT_`VCo{IBEt{Sov$7T=9xRiP5!4r?66_h4MX0N;vt3g0t*fgy)_;rnGs z7?1CLkPI`ISV(~HS^O4v1_0gSHsAipxes)t0H$)&6e#s{20L31KJ> z^e;qrw45S%{PCL>Qwcr3M;OIzWHNJPz*~{v_+l{P+Iu!A`Zm(bLJn!5y-l zw8dz2;b#D*XyoTdSj`f3T2Rre1tClFLUV}9X^=`vaFVXUt1vtH)^u;8L6-ge;B=)A zc1GNsKtm9pgsvh!SNQSNC^HrHlgfd_SF%-3i+5sd|Ax>RRK=TRBn z%J5c(xAXz}cF~zWw*16=;~Tc}ga9u6vKzeS=jj|BrXGYnEu{02=wDCgY95L{tw77{ zfF~K%Oq+QvaNF?0>XV4v1HoKl?0^tE*bWJt*7Kdx|LEFM8i!+gCdRdVd^G z@26pFA-!M5IC*;CDV{^`GJ0xm#Mz6|`%1|-WH_)_^-1(TS9&O6nEE)7G2in|iO~BW zO8y8-IHh&mT++;Kn?Jkf(Fsy9-n1juy?#cBhv*rwv|dnr=`KbhKXs z)d!biXC};S=_a?5S(6&iw{^YCri5T&xunB>sXax_3Lq9vU+jbpL0>4EXx?*SB%KUr zBb+nk0Ys)QtiMGof)LjkQg$UyOGU-mli^<{IHWHX*puNOLP8izbUOjYc zF+fxB18$I=_(p=_-Gbs9$oj9hwl4mgY1Y)^=#nV)1CwsmHe+&A=P~0qy?SC z+^BrqooM>N54(TaZ|EmYE|ySjRy=^_uYkK z?DX2gF+*(Hm_hDR=`O?ErNUjx+@;)IY%i@~(D*yuV5idC?P(9TRNzyf1uJ-Cm}!?| zByW}k7NLyO%xg#c{lixeUpM@%?(Lz6*%qK!SE^-_sPVdRQ}Q5~M*=7o9t86dp#^K& zFX4#sXDF`yZF3d^)&4GXF$>Ic9t(uu6d0}3I>IbuR2P8o9yzeI;m5t&5CcE!v|d?5 zTN93pc^e>`Q+#{62T0Xgf9hGKAEAa&(G4z~-fRmg(&WYdl;-v?fD6zz3{()A(9~&W z;;>Ar(#&Ewl+IP(iiK3z0u*L828)9V=0s`9Bz98S`A}xc6r!l*swLf@!@TIOyF-=O z;apiy(QvMgAw_gJH&Uw5tG*hs38*jG%@r83q1SM(Txu*fTye@*Aoi>`DE;rm zCkp64Tap9)8-0OE^q&E-;_3eeP%tM^ML_>!`7H^&1-AD^|67!?#=kt3m-|6~eAhBF z3(SE3oBt2#zpM0l5w=AC8O8Lk!v;p_KN5u*(7#4nLiqo(D=>-v)`dj%I{(XT-^q(R96ifd%fPy)RDgyc+%WsQ+*xncYZ&Ah?|MFB`cJv>Q;@4!C ziz&?mAL-nv+0dHc5$hiaBVqK*G7~i!*&KA^TGXTUjZm9U#|4bSm5ZazPj{wWiBS~H zlnoe_zZphQqXDBSn9`oB&6zl5_;=wdI>|q6F`M@)J0xd56ttgcbCA?cLMZ48M8T9( zPY^vF;>@>clAD~^a4hMy&isy50oLE9MSGA*gmwQBub2`stbVj%mn9v621@6iyn$tO z8$;{K3>K^z#>8S=tp}xnAFW^4aSf)nec`lq*|zP^|76`32QAFjSAAWsd={_W`^@Gd zc@7%~RZOu%O_i{&V;0#nnfdq&u(CFD(17d@P;3=Etj#vsFx3!SRZZ2p#c;Vc+x-5% zx9RmWzpP3^EVCy>8vKu3tZ4+a+&O`WTw`{Hu+pH@c_y(^}hVJ-tLv z=d?edxZ=6eNZ;K_&5!%`cB7zC7_mKxC`k9u5Qb$o zHUp7~bS^#1smTySE}jsFp~eV?SZmeJBILs~RFcpS0g?`T7CeHL2CQMJ46!Vn4SDSOEGt{c(gwg zvAEw`7K=}ZXoL2mmoq&qA! zntzr)SD3RD%+q$Z2dscX*a@-$ANBl+F~qYyZ-Bv>iiTz64`qlPvL=|x67Xzmgqc(hX_bCNxY+t5kXmX4i z+gVSf#xC+|tcm=B(4FF_hfuvnzJ9Eea?mp7i>^^I5VC5lT57HsLc>>^^8lauFZFgm z5Q69wrM*g;s@{*PV?pHASci*mOv?BLoNlP;b(i@*7*FROpNYn@DlF74f$oXbF48Ji zth~;;!VV=qZ=H3ZE!C^N_0d`7?F-;vZN|bcJMn$iUvFbEXn$6>&7ai1YMf#gLe)ja zV(!HdRAA_A*wOQ-o}S^JJ`(CGo{rPgyB*}t6y!Foh+}~u_i-`gzJ`?pa=#ShLT$Ck zMQJ12>JGVgAF&fE&{iMdTZrF>33484FF~OIYEMhO3IE&m4YhynkOZ}@ksNa&)wsBt zwV3Mwt~tk7{iNU@r>_buChJ=;iM0^C&aA@jGyne=;1yZV@^46h3P@T%ch9yw#uni% zreb$5+4XZH-U_|d!2i+?vd5sCJv8_Md~N*IaQ)ovq8s|;xL&1)BANGB4-lO%l^#i$ z@9&1;W`eCL@K}!)zsPoz`j*&u)P`T*b)AaXS(#|l*<=b-KK^Q+esd7+bgs6u7yanx+ycIYC?u~qr2P;wF|3uY;7r(5NV_id%pJEJfoW<;r*EyDnDZyhE;bGHC?+GF$uhbPpCBY*@8b z>1**yRMC+%1ezpt7SXcP)Or2`SNMNbJpVU_$Wi`pP96W>_>W-q#eZq&e)50IzDN@A zf56I6RTl7{07nu3MQhK09$Wr9f5!12%@gw9;ld2<)rLg=OKl|a9}JnTeS4z(SNd9K z<;;;Zq#ykE-#4F8wSv(Sp5)k!+hoo5>)E83@!*#8&;AH58d+vj#=PJhy(!J$K4gxr zXl5q*PEzrcmU3CvwvvtRAi>x{npR7Epn60mCIJ*LN$>^1GNeM zI?-R7{k6qkC;RJIe;x0y4gT8bueG{Tbs1jJP*j+kE@F~SGf8%&f3U{EqpRwr;v}4V zi`Ybqg1PBu<)YBA;C`X*`|%-jihBSu)tTMMAT;c$FenKM8XPTuV{a6~>_MK(+U=t#~{6 z^tGJ;ulXbL*uB!a*E%0L!>zKuEHz!CXVN-ORXtrsNe#-1s+vXCz@R%k4u6n|X&s8^ z(MZ87AQ>mBjpe}*9`JKKKUb0JfJs6F32T(#vD=AyPUk8w-$-va^00!8oE1~aWq^k# zsb$=5ap`>kRL3D#W?dyl1qImc2k3?G^#!v)3b_}nssO930;(ql7Gm{Eg_$B)&4@)2 ztnMKXErQh@biY1WJqPfMfxmH~$vjq197;(gI2wW_o9Rfz>O@NJjnzgR#sI5zQdwYi z)n;)O#$eTMG~NXWh1Em8L5Ol>E!PhW3>#=J7!*BKT+p;Gmx&=^!^M6&$^~wrR z*P%B8pt>vP6U`HtW;n&SGQjI}i&qas06G%QSzk2@Y}J`f5Y2S1{ZdqFh^vOo5#qZv zrTp_iQ|47{lUK2XswM-o-a0SPiA^*nPL~F{bBcXFwW?KD{wS)>U+euf<*%c4MPq6Y zVX-O&a+3q~I7>c_wnTbg=K>7|8r|{@B4lQ%lwJg)lim9bLH>!l?+0WV0J3=kYSx;g z#6)hW#(hcKrrwNfh{b#BDNm$!e=cNQwh0L(cphSjIg&&u|B2CTcZ1aK1MWPGo;F#)k`NG6CqQB}3H`r(qM83R(B>AUv$FLB-&{3`sFtQfo(dPMX-d ze`zItEE@n+8)-_Z9)t2r6z@pC`5deUy3)AWc{ZNqwIy9?s$KLklJ(Azx zhq8e3tEmbb&=QJd1dp!#A#$PNMXLsavJ)+6aKYn@>+ zM`~PHqF)GrKX~pSmD8oH$W8@W12xb7Joq#*TVGYfIJv>pW6L=I`}ZhsoqHn1s3U4> z=&h7#t*)6PM!@89@`%~8@U5is)UF!hTFtH9F<#GNQ^3YsL%3?Vf#qiu8^4)vw0Ufh ztIML`Nosf^D9ZVL$ZL$#dzBr-kpqOF8z7ad zshAy~!dr7T_4qS`wEv4cWy@)t*4N~u5B1!mBKA@yIB_AF0#3MVgdb=Q$vd)F!_Q8B zX3DtHMn5SEv5VU(ZY9(arjV*(Nx&F08Dyq&YqE}~;wVoYW2`sUKi+&}!@fEg@MdZ} zZw_L>zyU1y)BECMN7;v*(13pzXg=J>4TDrwdfy!_G*V zFFT{104*fNpTDw$%D7CKXw*vhRLCE<-W0thO^XDK#0Q$rPqQ`FDxJGzy7hX*5IUJc ze!@c<$Kv6LpID4LxgaJfF7T~VyrQUD%N2Ka3s0?o!nICsaUJWgc` z{!CO{Gk{0^UxGS6fRF<{R0$+}9j<)$$zm-Fr+fa;z5w~_@$UVB8j{-eeiO{IIJJTJ zKkP;AjMzrRpv?Lw*GgFgt741wz-qv1enHJ0-Oq;p$xBKcujbI_xiWwy`Yiv%``7+k zP*bCBHSR(+MJ$Dl=5`cCI`^&LSr^2Ju>b=U%A?^u>4a5Ebs-BKX?EeHxiCn6ikyGIos9ZQjocC&59z#7e+~SY6O{?l-9@Ir zXej=S6t;pA;I3onCIk5inJHB@_TJfUepc|-1@5h3{Ooi;D>Zk)?d9B>Tfj~_H~hCY z+#|C;$Gud|&sqFr_M(wXFsYGY`LF88w4j}jS`8O2S%vCcW}rjGvK7ydt!cY|NP8LG z8`fS|Mq;Jg9Me8=(DP$Qwl_#dtJ)_IX|JnjZywg(P|khz!j#580~V&Nby<)~4QOwu z1Y&pEL}#*6b=>fZ9dNfdRJGr~Fx8ME$?at{1~`culzdcgM{exM?lV;>Fes=!#Wz~BtQK4 zCm2ZX0D$q6Rky27AYD5c}3sBPZ&FjR3QdDwScEl zKB?0^E$69FaPr`+}C`XNad}yoAZ%$<~48840FF znSt*In1WRLc1N|tl-2RVRfTY5p*5^^3L9cUmV;I07MG?{f|dnUl|$IVl!cGO(;}L( zus4vv*(0R4Hw0#}yJEY>bGr{K_owb6l!|Djg{8^qQXgE-|hBc#ZDlVU!2ISt^N0lw1Un^3Eq zVKH^!s8p|j-sR3U`?iYGcwEp^ygq=Qu7SQcqmh_&ZhqSC=k#jDs(iEQy8+vmnY8X`Kvl!d9GW&B#0Dsz|eA@;7qU52?!rMpyZVxRTWG6Ow03L)Xu zN}}!Y7m!N zhAnn$a#%y?mw|0-mw^?V1{U3X%Ca$*vc8awED(GRdiFemBjwlkUFJN)iuRA(LY(i* z$4W~-6)jLnScr2CaZ48BoGJ1@&~pz#&x=VB3*Hbb>=K(1<~$rRC=BTanbV=Ns@dLG z6Y{>-qSPNs55%tKnge>w9#qr)sSWOkcULKGJ74wBsk*oav@cv@BYB0tbuhHnbbw0E zKR?g;XEt`D6JypLF(gq$#+Y&M^+!ZTz7y-8od(^le|8ewj|#|9UcY=IWf8$|NTgot zYrgd%?%togk8cts|#0AX8(s{%>g)z zW|1GG)Sk2?RzZpp<}?jhHOI2B`&_4JXY+=TBz*B+)=zC*iUSz*xLwi+ZicH>eNJuI z{i>%(A0!Xf?m;5VUewzCS{OOFUTM3iB~){c0$e3JkK*^cpo``=aNhjVCA2eY2llYqxbh>wg&yE{B8M&Fb#ijQnC^WkU0NP0qeoJ69 z#W987Rgk_$-ElFDX08fUv~bn7NsV>nn^;kks07O|G*RMLN=tgY)JzI6bi2l%uK7Xq z?Fgy7Uh^L=sg>>oZ`F_#ng6ij<0ukThY%gyh7P(Qfj4oE%w;xxh@2WkinV;VJPRWS>hZ=CHMSIrHvCn<&Qhn*c_&TE6@$#^OlrRsZ%KY$v-E8QK9E( zQExPmf_f5F+H5azBPIKwa3PP9BIr4kQiD?{pjxB(Haa*Ei#;7$%rkGq)fVF{*l^eo z*c>z}1{uc!e6s}G+P#6(@r(E^Y*tD;l;{(P@ZqqnR7_S`A;+tL%8rkU@PQ^=OpnnK zI%;vL=%ra2jn2DVdM(bG2K9^dEDs7^(lnYNF^?zaN%Vg@cSMWT$8$&_R@=%&P$RA@ zpb0hs*xVwDJ4x+@y8u^|UGCvVl7*PnR(8rq3vfJSw%5or0-3wzvh@=(djhu*GFx`X zF|p7k3)xn%o~9RMDk!x8GJ{cq0c6&C@t$eoYKwu49e_RUbdD;DQzpBCcJmivvK`$6 z9cE^5Bh0)B0Q!L$Kh63AI5x*BE10>3tcw4IV=pX@242SCLUI1->2{~i|7g&)ryYA_ zRB-PR7tz3GdTYl0ht^DDB&QIY*-y+>4F1#m*6qY*h<$Ign{q1JSG9ix(_;rXhWuQ}b#0jCa<=pHAwEIhe`J)RHr+{WFZZfGq;Gy?XJ^vJ zNh{dKScOxthVACpso<-=DUtKB{-E@&)Cwv(Mf!=UqxgQJuM5s23-&j;?-e7t7f1-s z!U&~vt4~AX2BQpzkmkZ0qk{0pf@!OR&Abb;Z004r;bK9;A~2dScn|jFSv?B&HDhHr z^ZL-cS@{BOUXW>1pgoh3Xe|s5U9okxbq#6XIQt!a7PA&{stpflDN~CUjwyE+XXnQZ zb9a^QGTL3NB40Sh1=mGQ$u`V7Qsa0P8^%;BxlOfySRm+Mb=#fVKMFQHb>3!$dr3AD zUAX|3_1v!wPrq?bABu|pDSCRRjk9bq^$Hp#@@xZaGlL_Wg&{1vMYrSJb^$ODYElvE zkwiqSP;0|5#I9{;F;AdJ9>%rIa#GIFD$~h(>RPpwcc5ApP+j{6R<(4>3q`fKRXGcK zUPXSMbEd+K5PDDx1ZK@5ct<%B12a#OOM@q%7S-Sowzk33_>5wU{kH2hQXAV@;#ws) zgoaZFN7}45YhD4KXHZf1O`+@giqcL{0SztL9W z{Ivh==T4r6Wr+5fTHEe9ry@na8k9M6z&;l~=5?+;UEy^} zV?s1-Ux07!z)<=nOy&j|O*pjy;7x8gde)z=@X<>YaHg}g=21j2Fm)EME*>!*wZIsv ziwlaS)~rSv)48*L3UcP!(YjP8p6nc6Kp)p~QLsTDewRl}(g-DS9jR+Op365;t>}t| zWzLb7ezqFakBT_*T*Iw$Bv#sWi1%(OA*B+yI{oQAVeT7zdZWLN)zwVWxJO@9BSyvk z_QQHkaqD0+oV9f{`nuA-9jIonsGauKlK~l>#AVg^K8uk68h*|);cg_tvO{J}IRmq~ ztJf|THTYy6?{hewh^)Dxex`tFUjiqB@RKre1h5C#V@Ra-T_3Z9l7UdhigDW zlwh2S`B>tXyin&bB*rWvlHg9~dWNGYa3$1f;i>ya)~jrP1|&6zS65>xx(}1_E8(M82#GlYF{mq6qx$S^0*jf@-2@RReknojx^; z%&CczH!F8hnS->|8k$A$TOr>mFH?u~oXmHQjkE-wKaA}3?e)I%@wL*4>gph_FPs@uN8e>*$M6N@s zAz9X5qG@J4HMrfu)+-^U% zGNkY{%~I4`{u*+m2rd0gk;O*78yRM?GbKPgkLr1(s5MCi`d>=mLGTa7n(*@V@l9T7 zqI$hL!CxopYW|}4+!t0cN{XOoT_`&1Xw>dQKA4=W_7E~lgtSz&yHyOxml`!pcABCq zRU}4BtKrIMp!@6|QrtdNEojzbt}XsL*!vBl3{Ke)3|-)dx7tw`tx$#9N5L>@}VO0 zRS&!SaK7pu?$h%HIBO?ia3{apQEM;BIP4~iVJp{kzz8!%5`>!qJDF9Id@1#gr%6?c zwSBX=v-+W3B_Rl}>+uY?0dTsz#MxLa?$j85VqQNe3Dg8jo>t-w+iwO2kh23R*Rm&{ z$hWNf5H)*&xtI54w6;Z2(2%U3?YbV_rlxCvbfp*817q75qZ+=e0?!l~d{<&ZvI`Iu z^BpSh=T zZuL?e=8~t-27EKkI-WJjTH{pSv(|OAT@xkoN2N8RLDo>Rv#uJs*IQjN355{M$Z7$jMB1AIixk z-1nN-6%ghjplzR^0&HXeCasmkhK^1beT)fK1=FS3%&a`t^=<^~8_);*NqVG-Cygow z;0e&g$R@eBZTd)6Dc|<#?w<ke^;q4AJ^ZKAulrlf63s!^$i6Ejhl zrb;9w6{W0WT%ztF@_v77t$jIj_Q{Nf_w#@MC!fsTd+l|7p5ODVXFcn(XNiol@zqtx zb1Jvp_>CA}{bL#UWqc*8G3$)G%8eaN+3{**_%-%?XCv&sFF!1tCXb^=!MI|(Z;hlN zW`~x^=M$UhSmtzhI7C{^b8i#R50U=!H><+k5NQacf^VH-f;Wef)VF=FAyNcs1;rr! zC48vS5b0SY$_|mL36?BPaX7KoTe&=o9;WXi3Bpe&mGTwX&=6@Q8Yie{f^Oauu$y1v zzA73bRnmhL4UsOOulR^Vq^0Cz4yOcF`Z~s;(Gbb6zv+$VE@U*78+Bq5p3&j5E6@Tl zcR@WNvX6%O>u_DsjOK$!9IVkBLN>%NCt8D#)<2)XoWGI>s^5x+0HJSLYRuCd`Weg; zsmB=BX3MOIp#L%W5x)H32JkQ4STycgg18Fzcz+uFp}*DR3shz~ zlAg+q9c#xk*6edzkNtQ?BNaebPZB%Ik!9)35esBq$(*Sm7T7Mpv_#-N!56D28qZiG zhN#V6494kd{;qhgr_TEI4{qk4fy_c8pgOgyXacYw${@qO6b=zO6ibKKPp`Y)-r6z5oalCTbUQz~l?kXH$zlVYP8()JZ;5Uf_*?U# zyhm;+Q8ak@HE#)2lgX_AMc7P9D8ev!8A{@yOgDI$@&KjDg7bryDGZKzo)Ea`8lVokJ_CLDBhS>2* zg*3~n*lfZ0WVBxQv2+HhSwq1ZpNxxyT|u!9?kG9gH1^1PB-!zatJ#+wHEhj-@ySlE zmow+fD8oi;scSJ?VDbk-{-GDUTbCwPFhR)eT+RxqgrK^`cG?#nLZEMvKjlU@Hd*e+ zCZcaJKCx}}c@%Z_Nch!me6k7|GULcg%3;2Sk}vzQiIpHHU2GW@vZ>NCE@q`XOSuz| zWtzeDX{$v<`WaYYbi2Ts8W5wwk4v=h)b@eydnWXVD->^$#T(=wmS!&5ElO?RMUhr8 zFLWX5(@+Uo17Zg&O+H?sO1PJJ$o2@7)C7>}ENzWRoXig;y>nJ*OcFXPCiuJxepdKTuoTH%g)Wd_ofkBK8HK?ubCGpctrsgxKbPAUU&R5r^hFC#z1*9*~4X2e_z6(Qx0O#9*9M$<=r7o&-0?ut`wnk#>o0eec9g5JH zMRb|@KRAF>fxQ(rSKTP3m5D<|r4=sViFal)H^U_8T2RJp>KFZq;mp z;db~ED&yyR{IGfxW3|&R`Aca#?FRH)v7Oc^+bU)| z8c9Y;fAtQ;j>hZZe9xxJ1cC&e#~UcU-o~9D%v9&dA(-zunFpBO31PvGy%g!U;00o_ zG!hHNjfusn)H)i>OoXm>Fmoiqg8I!4WI%nQ*`EPH0QJ2voCUsr+)q7R1vWI88Ppv{ zOIpZv3IbzR&|?%0W`w7r!OU;z9Y5k==6&)p=c`Ym(vM=YMuV9md5~H%Ymur{?#E-S zZMT>ZTKTvxAIE&p=k7z1Z{&Q>-G45c?~zgJCwhXeMlvT^Q;!kIp4=T(oNp!w#qc{s zHb*2=8#&)|;G)v`P-!iM?2wkEgT%pg25UbRYBvT!O?4UI(BgMg6liX!48*J$R0x>1VYv& zAq1~yQ}g1Amg-MB#n;ku|oO#|<+=GxI6;91gM` z;yHP~=XG@sz2|#oNrH;zd%Er+G@S2Q$Wy_5&pUeE|M{MVNI&$N@A(iLxYT^lzyBE3 zEErEcN_=&`yQ3%|Vb;(0{1M$2sD|6UupXCK#{|`2=X+*|45jCLmLn?W7z*Fm`JSvY zs!%J?Cll~g_Gi9lCX7qvDvm%~d~)+WH{WfCP<_t#TtK{p`5yYxs@PD<;85hrTiA8L_=T>}FF)=Ba@A)T&R)U=5WPin+5qG-h zP)=^V#Z5=($87R~oG zN=eAJEtv1|{h!&>p(nupJ*bHQ`@fSN3b0O427S->bg2B&^F7c039OE%*Nyduk0?y` zy^68^`aVe0yFZMtN1QMJPprYY3chZxSDg% zW<1t1WaWUBJFGmkeC0{yD-W$$c~Zs7LkF!qY0%0;D_5RWnK`s7eOOiI(CYMI)ou6H zwB28Gz$54V6M_c!x1NdI) z;2Amqh2`&}5mt{r%{PoS#nMC;o-f<%|loYa&Bo8EU|x69;_QGK_WNABr9@ zp(eXvcTqL79h9-7mdEH?qzB(RuSZR$=`~6|V?AFtwbq8ViB_81iN<@KepAWfdABI! z{IY#s;k2M1;zt1=LzVG9iZwL<&N?l$Xj_Zp={7YBInZ+?kiS%-AmJ7LQ#U}%Ty9a3mwo>H?(Ej%%=ihz>69}!LWDE z>oK<@$)^1dG06N5?~iKuDM=1nZ5cC}>onl-*4AZS4EIlLL6NL?wVXssbEb4cc$A4- zNfAto^ZHMJpaT0+vTe$*A)$Ha_o`0%NwX8dkTG;(e36Q$*XDmiq`oMa?&|reqsWOW z;Bg^>rVYg*8z_TdeBSo*cry_tk;*+k0)3$4F(BR*zf~nN4dr|3W2&{VNnLK8uJ^sW z>6y!{X!^4)XFN7|`Zfq;YHi)ut|%XT@x9zkw=p;j7! z@6UWOzkrz5f1f#FC`*Pj+i(z*yE~RMa?0Dfhij^P-QejNA~HmMHS?+Tz|81!gfBCi z5C2wVMh{Al8k8AbnI2V{8C{hgRh2oRn(Yf?t1~Cmq{r4|PN+?ft<9WJSNL7v^w_%W z*oyR6EoM%S9oka9rmbv1+ePJ@48BwzO|OdVARppA1M$MD%+OBXK+-4I)gtM2X5H-~ zn*SCtfuF3Tqx%*C#q@sxxjx=eooT77Mb;(XfP3TC7d%E&O06$=0YZd+hT6bo&>>UD ztB&Bq$3VvVD6KlE&aaKs!CwUqi24%B5oI!k?r=(iBCw}UWit&Z5)h+(TT@~k@;bav zK@nVQ$g|At&3+Q_L!a>yrM0L7|NJmxcqHh@(injJdn%kN+KB|7KTUKqM}zir1%DEm zYbhcNRuz3Kn_GT_Qg@kF-lzpDoC$@x7-lA;9s_kT!K;%B6+$}_{diq1$4f*;N~PGg z+NwkgXLvV9mHJFlOD~n<+J1_tMR8tJ(P03T|6n-P8$4{tbAloZKo_QS0d_^hZ7m z=-^Mt7y&l#rE>7@Ot*$x-0fqDj0XA=l2 z^Obm-TMjsS7ljuah2&{+xB!Up6t&c-Y&%J zS*{w{&yfZ8k{=Rsnvk|AsZkTTGq>EzIOk}HscnLLJg7~CgD=y!H6g|!L6(Ogq;|4N zkv8#>-i$|r@jEEeGXyHqs--TcDxoNr(^~*Czure$=p&sK&xw{UQ>5cvB=9#>)G!$$ z1Not<-`x$cVyQ0no2iiia)UMb0FY`-znW&wn)K$&7+T7R^J_dPYh`+A zrq680odq(r*n<1E*K{yH0=Eu$eT30)qX%`yt?i2`{&E~H)?gZ->UMGb?F=;XtEvIT zjFQo24Y*0>pO{~&mE2{IdbvM8u^L6nrK{x(QkuDzG_rNiJsHj1=7$9%*@`dH+|o;U%^& zqr>es?bd+t=!_WGx1x8oR6ZAZFlBdvGOR8*ZMnM!Qixx3dKFQ`*X|a32*NVx=nAw3 zUexD;pt{?j@r^La7FRAU*<~)1`OV80p*_qi(FVE=f2!p(z|5SaMAYgI66yTdG5aj0 zxJF1Aap?tcnaZ8M&t_#Tf}T*uMq7w1cUTuUZtvbF%%|sn(Th*Vq0maVG2qj}WImma zdn4l0INS9FO4>*4e|3%Q!#-LQ(J^ix?L?il{}tmfZXJC}vQJfi zcZ+Rbn0EpJ-TqhF2LThkO_dMZ8U%h6*z;TK5p_e+4M!JSCz=kHGkd zc+vL;Q4dp18SCE3n?zmQFnrNGbPvtm)CuQcs` zF(euCUgD7CES#r3v&n{54RhlUub-N{CV2NI%Z}vnKzEB}*)ZuaHy&^^%z?)M&ddZ- zW>bpE@9Ff?JL=+iaWn*gC?)ERUh*s(tqksW{i4}{L=nph&hz(Y`O`Vg7tI@h9rz!L zkHGbYg(vuL^CvN z7#srFs3mTrly zfqz)f{W#~}uc}(r>0on#U{kBS`#)mE~Y2ij2LRpDWJeXZK8QyBUTT!|9E-KD%l2Ck$hd*bV-o}H7o86KZ`t=E3o4E zUc4+*Co%&RI*m}Z{c$|dODE$N=98(s8`E>9^IMG`l0BimdvvWW zeYAB%bdR>KkM7ZxHc)@cGNXrfkFL^Pupj=cYgG=;E%w8Yj_-%3unS?M_p?wOu2@@z zA*z{oy#~ws-joN^FyeGBGS&3*8!Ss&Rcb%{tep`Yl_X|$&7u2Y?UL7&TL7uFu^P@| zv)U{aN#sQ7_9u^+)8&-{SNA$}pa({pW90<&@EqlJ&#ONtrK?+_bELb)F67%o3N!Se z7U{#O)X2O60b)l;PE^=4A_|n)pK1V3H7m%bXbui=veCA{AJPQ%Q@Le(6!rHtCHwnP zW=A!tq)Wl|!khw=><{@O6}JA6-7rm|S~$P7t>m4S{Mq3>?35FF{6j?ew9(X`e(rppM88MjUw`P=1xU=HN<^KmAyFdzmcjH?u4%U-`jtL!Wt6!NbyB(t z0sX3R#HxE#*B6uY-{SSL^!p_u9jD)w5@_mrP_Nue(eFIzo)Y+b5Dc4LBR#K-^F5?D=px@IpkM}(|I&x ztfMZq&M=SZ+*l2RFS{7OR@=IPgWJc-!4}L9-fu5tK4;} zK#dXQ{bDbXl*hnz_jTrCcnn;3+i7a%e6_}{cBd1agnz?%0Gj2H`w{8AL#~;gt|@ZJ zo#S$Dpfc7F<7y2v@72@nZ2G0N!6Da=h3W1bne>Xirr&=3s74a3W}b z@3%~NKT7Y<)O!&;KZm=>-zWye{VL6AcsA?`QcNM9yo7&T;Jkzbl`iHb{2OAD=MX^~ z=?Wj|q2d-Ar*|K&nq#3k6z3rT zRmIjOs!!gJh9nxEooH{WDMUr_$nE6JjdRrvJk^NB92I_5G|P)xb~ z9n;==3BIK&@=T&q-tZQyDMz__T2*4YLRn2F*`ob5qIP z5-XXY9rZCMbHX(=DX4O>xjh$Dn_Yw2{{Dl&;O}D~BAgci(%tDDa|vS*S# z59)tDPv@VP0>Bdcs$WG@hV!1`Qt3J1o*E3xv@nh+I*;ndylo5DVb%werKh<>btI}~ zt~MM;{Ll5?WdrrzFJA9(zIHYJM%$8U(fL>62qHQc3a8iDvBd4LLufk!bpzTucm%li zQe;5~FA&0xz`SrHx0n!)0RcUipdEDZRIQZ2pMIv&dO8@%=v1Eu)!r$De;n#~KCZ-iM9{ihe0pI)BSgai?_LyiR! zXy4L=HCmg^Mig5qIe!nD(~XB;r_yG!Y6HQRXiU@x!8GJxEUYtdGxCqEMx;6aZ$|{t z4OW2o2Ha%g0!7hCyPyZ*sU@UHKF#I`C$^Xl%c!}srmv$wB%m0GgarGMP z|FZL&(^>F>hiVIka;e-U+gq;cBqm_hpvxk67?TV)>?ll99hoG^qGYJue}j^5Z~uo{mm3li|{pgwi_I$c&xPe>r`FMDe1NPA{rk*T)7%LHm-6@ z`ElE6!Pm&FJKUSl6~tDjK*)S<30ci0JFl*l3r~iNkg2Bes5n&Kb{sq|)LWLvaJyKy za@w`vv2{{SU3IgW#plv zk_{MYZhia+Bx@De(M2CJpI$f4g><-(MoO6ELgu)TChFVjLbPmxRO3L|6c;krg-k$Z zr@D~&T-XHD3S)_??}{vSFBZC}`t;~xZp{b4HI1Wc4!#&alj!#XaJ_n2UVt#L%sN+%9oT1wAjxZp0ShF=`oIVw@4DM2P*;bJsT3Knw?~5!BCVqv?DHF-)c5TQcxkWOxG z629?5geoGmx(L-S!W1IZy4$H%5nb;N$qN)wbJRs1DCgrM+awgsVzNQO@TIVzo4l3| zA<{2;AE=5}8BOjBw4{^@B;_vt{M0c0Nd@T@D@fm<^p1x?VSBrv4#i?rD8BDNP-=+~ z9w{7;r>Z(LVrDO$a{&R6x{3He++LD&j(wrwRHSnh6w^6B#fR0??sU!wKsemY|0!AP z>JmGdS(l>5dHtww*MFWs9e1+i(D@s_5VXeVAuaw>Q~tZ^xG)Bs%F7k*WzhL6Lag(b z_VJy+(4N%!V-530iKf|WWDFp;(nMxDF0TlZF2(v%d@_l-W&yIJrF@R-6)OM|LtlsBatZ+8>8b1kESOr?54k`tkHqJyXpUE zu%fQpi_Ce`qi%Y0*B_g2JMf|Nr8mug(F1|VydbFpmzR>PxaJAn^eD;TVyTAGo7lES z{>&@}1-_fM_H({55TE0m_^8Q2CPj(`@wv*G(MtfDIfTqq&8r!RI`Q%6C;We>zaE2L zO64B@^vB;{{{U?X?>ga~b#3&gzdl%LK4O3UlC<(HYgv5M{`xPn+x}ncug?NmRC+OV z4TS$4DGO)V1VPWcAW=UE`nwB?_pSF5RJw0n-P%i0?3|*U{`aj*BSq1xZ{3~ludr_& zsjL(F)J;>?tK~IAu7~4ibsN@5coCVSN9(S8vGQqO5jS1JDmY#PR9%gF8E1Uy$;ak+}`! zk;?74g`MMRg`bRvhggZ?&>ufATx5-A|6wSsc6C8?EqEJA%wP< zA}t!c(2($ha7d`QvA*UxJRz}|T>say@SFT{}@!b^P+ezl@IZA(Z!7-=BXC=@mlq%Xtz`9Lf)*%;AW;ZmMJKm!+D zNBV1{uI9FBYu7X6Pz~1bOji0x;t;O-o?<-)ODn;EM(6YB)o=lb;F$`R^V{Lb+;@@1 zP(y&BL#4?2aln&bmvJoh2w`+34^VqPMSvE2NrI7mAuB(Ek%D3{x*kI$Sc(MEnoS0k z7{KTvFQ0oPmX-jc1|i`@r)yl<)*vldd3zg}G`p!-t#4@yQ7(^Yk5 zFFJGv)#fV_=k)_xek|T~O{IiBlxM70h<${l0}{r>`4MvOnaQ&9Cws2VY7(sr7Y!Rt z_XT+hH}h0HC6-Ha><)@%GY3Tk7{ntp%M0T$)yQ>Fi3UR#E4RSU_oNxZX%s2CNY;6M zFU6VzC^G-Lm31Y#O<}wFYWzgQxIP-rezx!v3BB=26HOW>rgEEaVh7-hpoj1YP4s01 z0WRZ>aEH(ZA{d+0ix6tzmx#;|mKU9?8D=j9tmGeJi;@Vqqb;iF0UDryPRk9|{*Q$o z?F7Ml+YE)R|02TnM$cz%@q^O%-U)6N<9n&pOG!N6#@hru|4hLFo?}yaC#$~z&!_!} z_Z+g;A|AheUf*mDVN>uykY0FRNA5i{Et380;CFvWh(4_IZ`V#8elJjfAdBK$mYgTP2ZJu+Ss~JHyMIAG` zI0UrwC(ToIXT$*B3Fa=Olk=a}mrLauUS=0rTaivhn%AmUU}v>1n45%aTGj?xtl&Zn zt8}?qJY)9EB3PnHjAe>Z;BZ!D^&|cSokT105pk&t3LHt3e3Xgy8hLC;<(xQ?Q)h~g z+Tx?ie_?O&q?V1?nVyidTcEXDb)|N)}_^5?@Uuk}dOq zcb!-0)w9W5vd3^pvUj1(_6Yd}=fvD0gT{Or<0_J<$^9DKn`uK>tzzK6NiS;7kz@C@5csPVcCxBL zF?V9~^wsP*@SWAsar{1u?TYd7=6`J4Iz(5d+`Z zk}D+?^7Fpla|V8j|J{DP?JV&<`8y2v!i2DY4NbpEvYlv-L|2kHRRbb08Vvjzc^|It zcc!8JE?uR1FXg!}`E9+yMYt)9`ZF zd78XqJ<|yz%}>yNaMU*A`;v4ZvAt^KN=#HJ-ZVVc=^a-wI|eQ=4R5AcvlB(eOvB3* zwuP_8sZ^t{hR01q2e@AQPlUHKVG`@#-YM6VjKJezSDsm56-uB!Xk@81a73vmpRTf?hdPpak)&EPz;BQc@A)> z7SPWySXgdmsuux=4RMoh*U5deBZkA+2N8sT7!LPDJi(h=A!czu=X-?6q#MjDA?yr6 z!0Nn_=6$JX-=)2Vds6!4>hC7iB|-C9lkUNDh#g2svGUsiIGZbJa?GR~sIZjCKxr#A z9PC2^gPyG;^|D4FXsOj|_4W39WadS>mQ?PHx60+Bn$HWgc|C9&nk};3a7ryy1gt>J zwgtNN-QHwt?gbW{dcgpdTrDIerIfssLL*_A~ZhL1z)(7e}FU8|1kA#cd$h|0;akNb>5sPwueL6UQS6b*> zF?(*b>^WAx*F`-GflWU4hI_>%!?D}_e zhC?1G0sqt`#J}W2_($}8k%=1En{>-0vO&#?f&Y1=$Xt&%DF0|^(tTQCGYL~Mt2IvS z!;%ev@3gP>zXyB&wCvHm0%=pZ@BJ6#2m62-9(y1N!&FO4Agy7??qDD+gttODAX&>L z*$VLX+oc#awqYoZZ8UO=X}5pNFxtTK##3da1I6BE_dE?j7 zssY0e_A2Cs(+jgtP*sOZ@$DqVR>Da80XQ_TfOWsT(c#xTMTnAqMO{tF7Qm((-Nfu( z%H~!jg2Nxe>40`2W^edo6VQVYdZNKZ#fgr{5ql>U2!B`Us{r z@F=D_r=L?fMofHa zQ)>M;v7>Q8Xat_7;J{xJ_y4IsL+N})-FN8!n@9`;0uIAicOxesbT zLhdmmFY^BZKx^d9bKR*amthz9|4^8qLEVR$CD{i~!&IYw`2UJLjJ#R$^RER{)G8njz8dTrNK@4 zK+!>F&u`Mixc?7#Q4HG~@BbtJQuy8pZWiNvskBT!*lF__+EHkNAA7+f2R>*{eP}wGzSVwf&ULv5A<3j-;dN1Zq7=qOM(V= z{y+7|fut4pnXgc~nMBDkV{xUznpGQ0o7WFde!R~t;s2Af4^ zYXqWMJFN?z8!?1v5KeQ^}KA^+cz82>2SPssmw5}C(L z#S7&GiI89D|Fh#MduV6|^E~Jbt|Pd&|4$07!2fqD*bn`GlJ`EWCJ?sR>+;y)=L(A1 zivy@^f&b4nCiMT|w2`Jh&>_zni;rUQh4@F$Sp2I#{qXSLF7L1K*h&asMAw#s6o(JRUf269)c2KisK>LjIevS`_S^e@RY1 zSri`oGu>@qS6UQ%`|(kHJAwUps)h|4^8e9qMS3yv|0%e)|4$}I=>Ho}z9kJaXYIw# z1urx$*L@!hME*Zj*Q>YZWM)G5Ot0({6x$(!Nsc;|GA%z#zh*x3G4DL8; zMb^-|!HMVh9@H$N{a-x4_X7a-$(-N24NWO!_w5cLLc1?iRm)@vX*&f6>I5o_zP?&Y zwemYjfpa`xtDyrc8-s?|rNG=#l#ED;%3nSYWZ%ebErkU7fc7M$O^*!6~{+F6BJI({<``wk&fCty0g zls2jmM%{_N6n}p2bSVlqGA~-?utJqOj>3vs8lB&}Kykk0D|E32m?gUe#yiR9_jV%j zsa)-|Mf!6?=hb`{I+a#IPN$VTujWFGeycyvV>lSspWhKl(N&-Bm3t}ud4z125-Xan z0br)emK5mEfjminepAL_KlJBVW$}0Qj@0Q3BntIs!FeH=3=09EIe~sEm3#6T>yPau z#4ikNdv{>4D6rsM-}Y~6-E43^hz{P3&IsXbk@O(;w;)@@@W=UPqvL!Vh0F8ZTX#mt zWcFFME%WlCq0>0MWlLt34Vi${o)6NaTFJ%YXB2>zJ0FAwHqt;fboDXrI?f5nNPkSn zS}Kc}H~PnKZekyU^Fem!kwO}Rb97k%Yr8aRxdtZ1&Iw78&l-1dZ?AJge&|YQpi<4$ zQ{J2=U44l+@cJX46Y|f!X16W*JjWIPyW>_EQ~R&-vUgXNmo4M}T>iK6e;og_ZCi7` zh8jSj6YMFb*b65niBE$RJ_ficzeX+eL9xkCW^NTFD-&nkD zV)6cYEb;V(NguB<7H`{FygL*x8^!x(EZ$bJc$X;NcY=7C$($F`!BZdSg_Ivfs!7Dt zc_9~1ap#5bWd(uKolgvjZbVG-&l0Fer}{|8D)U&RA1l&9KBt3-Bx36{;lIao1GdK~ z;v%10mCfz+j=`68#G_HD`j7kt1nK||!8~16r!39iohmjD%5$L4@!Ahaknuur%?L`4 z6hNYeIm$Tu+8m>Am{nwdoIHXs&4?N*O_A}mUdhD9L0Tpf?V^|u_q6l zh{Z75tOEqdl8)tjkiO<@GL&BGVo*pfXvA^mBuqoXY>#r3R1htAyNP73>WQ67cZ)qI z!Cbj#Oz@#!-toYHL!_*CNj;N5YVMSQAe2pi{91(PwN$oy{< z7Ey{%M*5-zpGF{w=8yO`Y;m4Xzv5YZS`X<j$3ElUN@Z6EAIpX+V$qY zlDaBkoxKmAt^E?LHl^$PpLEvzFRT;Pnr)WgBWQl1IRw{`SI-SvIiZ|Sg}I!oa|_+lldcOgUvdzYESVhLE<~~2(jTF^O8-`` z(i8AtHv(NP*;Wk`){6B|tT~+m^INOGw1+D{sjy>J2@1IlR)dQYO#~M|j{_hsKZ~Ji zD%bFMk^F4vJj4u&sWc27s!NVN;h>+p6tytF*-2fIRf4CirIOReefU^Rx7O zx`jW6bG%MEtaLlhmOj75o^OT^JkTrEs#$F6P1657?dWxeR|dO$!xYS zP%`D~N^LEiiLZkxHm_&aqGQoBvt>C04ZC9NR%(>j_pxlCHa&oyzZ0=%RXpSnD%tZ8 z3Ti)j!NB{a4#G%Uv)M(iIIp0#^Sa~SBWS_~W}W@QmUkqTr6=hc8MMo9ww!7*ZeJ#FE=k{2|KT@lQ_sgC6~lL z>%c>;h}FYIx;d?X4TY0r1#=lZ*ZXV=M9#e3Kpvo48K`Q3mq6NH4O}jI%TEPTZj*N) zMEu70i`O1f8o$3quZ8#xMQEIqV<3Y8n zAi{cw{dp(dTChISqrWm;#d)ew)b;7-i`bi^m_So2oC1{gk|cNb1#&kck~;;(`u9q@ z?4C>EYbCOWzUnp?qK>WXb(uF*C?a&E!!RN=(maD01|swiv7FjmfO_%OC^7V$pllXM zS(IKBxfz0JS}8LBzC;LB5Qu&ctre_phB*0`oP6nAzj~>Fqi_fIJlhvBI7j?Pq3v)T zO*Gr7aa8_nyrezyLk9*;&G_#Lxt-uhNt5^R zWf#?1qEDu4b5bjZAfloji?!TJWUUN3GjoW782wF)&tezMeK%>x42j13#N+%maEwjy z&%I~VC;0G*{@S9e`LnQOKb#fphS3KR37JX}8_r7U6>}OfZKlB{nEY4CQ=ig6*{Tan zqdbTgE=Rvx+O+R|$kBSqrBbbv90zrNiTR^cLa<(PEe{}IzG~AC?@dc24oVUehXHs; z3f4^s{MM&XIvLz_IL0B1;flk=#4-o{vsO~Se>CNZO|QK*M4-0_`b>smhbB* zZZnx8W9ubXDQujoK{4~?4DX}mu3L#P-uCi%z}l*okFOhJ{tRhTxq}|GCU6rC(i*!# zaBGS%3J{w!)7ZK)*}`1gSc-;6aNmxN(um_Z)Xust;L*=ef5fj0R>|fxbfU;i3mz5a zOP)*$7qGH$*m3}Flwf%z`(M!;0e^P(V&6?! z@$3_PzEk@N`z~`o_X6h*ai*Rb`JUTo*(oj=1uG$ z(iWXVo>l8)L3m>y_=M+>uNJOR^-~XU*EJ48_8fBNSD&KpAj+OY&f~uB(atmQIDki; z*RA;lSt7u3G4c_cUq~|(kvzX}k(;t;pfZ6PZK9rK*(q-x@ZsPYDPDi%`30RrUUwk# z3#&U0hJQmIC@-tvzgbdVwv_*K`9G8Yllb4r{~`RZ;J?XfJ<9IPJA4m+Jr$~edv4AF zZ2jv%@bHRw?Adzh7w8eQZRaJ#y~D;8^W7=*gD>6O#{G$pTOP}|-Nv=&oly3RfS zX+peL>BaNckPYXOU!3s%QN90>dtY>JI`#jp-k<2+7oD5V`%Cry0QbJ=+;rZbt@qoz z_p=hpKT7ZUl!NMbdBXcI>itUh{wn`|V2lo1>HXdA{m=dTaL$MFU&TzxU*q1-_3t-} zrGHfKf8^d5o!hSV>-~v(kNRuM&*ZL;d6w&kkzxYzG=;MB32vU{9Hk4UP{?+F`kVYo zKGNE;F4BKF=R1sqRBfY3s|Zw1PuNKCdGKWmd&PSRsmtF?AbQ#Mw%5okeTDgwOVv`8 z)y&~&xIX%wuwUEIe92|>oz_qKOJA*neciY1Rj0A4Q<>`2WlnM(uDJszDYZfQjWeU* zTcG3*#5=26{(JwVS{AGN!P7g`F34Cpf)u6u<8LD*=4pswtNdR?8)e0k0L|MR!8>%* zP`)4PPNshi1%!?pYEH(LLU*9gTKZG!zeY&bpHhDi_xY;!pQ*FLOs;2QZUR|qYmtuHEXy9IuB+bW;m4M1_pzj{=W7&Sy7r22V>VrF$%8MQ6MV~D^` zn1G368RO5R3-MUZN488AL(f!h_PurpCN_qEbmz>jcYy4g@pb@6oq!Sma(Dp9Q#2Mi z!VbVJeE4Qp9Ffz29{YxPWf4g+F+}Q|B{9TI`jTyZJ1&->c=~qv5YPnT6ydO&gq1J)p7qAzD%B**PdI z#IG$3{B|QmWbn=XsLk;jFiTnk0Q2GxklWO&@IMK^xDxYw3Bi7%y~-@1Cx+Hl=+SM2 z;UhSwE)+#u3YNM*`e~9fdrowqPG6l!)oq6F~s_HPi6?fMxa+D_PU)kG0>5o9DEQSs)t#O&?*kziRfRqbv-s z3vr5`Iv+mvqy^Fu?(Lm%kdD*aUx;^smfg>lUZ7?F>s%S3mOWMqaFs;0zgo6T*4ci7 ziPf@peU0gnOAJIEF3fsW`y^ynb%fQ2P=zY#TwhWM)kxLalZ)>B3N=YkwOfL5(@r7z zMNZQWQ3XOxTjN`0XTOSDLQQXSnl?u_X%6<4y$Y~8kh=XIw)=Cg;*2xgOpY!jl{@@y z>s6jB3?C8cRM-1?om!%k4|VG1Qd>u8Gt_*tK^7}ieX{7ST9ec&17%q4RBILM`}VZ3 zimBEg&}bAZXqf;YGvxy_4WVvUA&w6-spRP3fxo(GsURwuxNWlAYU_FohXMNI(9 z=52Mp1)6qfYKe$s=Te2k1xH$x{xrlPqQ49l=D!Df6yaw!LdQkjVGKa>TVkN$ocq$2 zI*XF1fmNkVa*tS5I#!zG(7i3)dh4B1s`ONBsXX>E+N|du))X(;o%)uUBMBBhZx-wQ z9`^m=NT5si@_OqZ5_4&}5TLzu={B;nl62{rP}V$8-SaB~U24A*>vd_hnW|1z_D#1# z>4tP^jWWAhgn$CC0q#hb>h6Elr*9#hsoZ&YZKOUuSbAk6_3795u+)g@)86)~Rivk3 zr-}yOm0tLt6zSFs0D9Z2d&5L$ufD-S$}<-CvRAj#>jHcAWp%Oe4vdEO>Re|+hnm#d zt3Q)u_E(eMrLktrUcHrPf%HQ$-Yd~7pfv=rO0-?Q3QF{gzK{^3krK7`>e*kU5(!GQ z3vio5DWrtGy4+jkecG!(QAY2IHPlw}TG+q$-(HBqRliX5um6&^ z*+~8R(&sHDV)}JM_UZ>t%b0R$j{ix`-gdZEvjz5Q7kqT~>MZxRz+RoJw*~fUySn;b zTJ|uFz13@auYk2z_uh@b{%YAK25Uw3>Q@OAR(~e`dsVv!eN3QgL}@r4ce>ahgy^t*sHa$Ftk^nYjE~JfxY_S z5Rh76uRe_;3hdQRY7nzmFW<$|t-to_9k_lH?A7ZC7R%od>%A_$7>)$G^aAN`^wzFH zX{B`Osk>91Bwczplr_UtdI@`Vsk8q3Wv@et;qG`t%U-qi+oHcqB}1kc?z zPXz=YdQa6rfcBU1r$#0H(w0<4{OnZ~Hr2bl*RQ+%&G2qbcxQq;8z<9VZqq1F{eqG{ zYfD-{XjIa1wxp$mWv^Ps-|qIsK2>-7qVR5EcsD<|vpHwlALhBcU3uykl=Pb&)uw5L zMkNifC3O&%y=sn4HPff+Zl50BwT5?7gFBmZrhPWg-8Rp7Nr&5#M)EEy>37VDW!lFP zmc43%P1VF-{kq%7gm;bMT|;nZ<7C>$^W5F8JoO9Ov~mZvX%L}NNvGJ7Y6;6;HN>W> z_NltttHQg=@U9}bvpIt=p1WMp0T~BCK(%oPu(>|ZD>{a?qS$F$fpQ^k4mhi4Kyqgo; z*_<=&3wZ8sSDyL>C0)0@s5g<&sH8J&NmB{SUe#(-P4TI^+b4&2lft_R!JW++eDU0E z^Ng3Yk1c5^@1l~nwIwwWmc6RcrW(#){kq$Sg?IJg-H_nU#>upgD};dO>k$g;LD~_p85qPonlK`L`YQ9_iahb z2+Llzg1_DEOZlr`cl#1|!`u#UvZn$d=?(8+wF4rnD?n8r;(kf}xn5dYgG(9tFLU!z=?os0u zxqDQzyEM5=qrK=4@g{bU8l$@)y8~@@RZ(_no86G^QMHQJJ*viCs@qQ^i6bdz=kePsflwNs(7AG_)Q8wm|01;FR=YxnxXtJQFuobzSxKVK;i$6!e{#M zlN3ITxmKS~lMg>k;q6g)wGZD@;VYu>)zCxbZlmx$nUwbV$j7MgwRqF>)57pkQxy0g z1=@c*lxM=j)TsiO3yqUVV^9Cc-r4SS;Fx21^Z;l z!PAcR$r6F5PunNkbe`64t2}Lkd3xPG*=F+eoPDy5QLa65fmO!!&q~{J^y0veX zjy_%9eowrc^E2GJF!sDF*(Gf+*RcGh8iQmA3%sh#)6g!J8-H_o8LPW1Rh?_Man<^= z3@oA$wYqIdP0m?0q|f;8&fxl&+rK<|iS5((d;9%izJ<~Xzs)JO_*FNR+wZ1ym)nDW zsGz3UCNl3h$?W(Us?$Z8(^kI9H%_WFiNlk=b)s*c45G@`MYJnon*MKk&3$WkPCR9= zn<*-&+;WD=OQ1{cc4s}8{dDjxekl`WO^Gc6`~U}#=J#5A7iM-%+;`+1t7A}%IqiaYznC)yfh%3 z$3(M7sA#n~8qM2-$(02qU1pY;A5wZmhFBTxc_9{C!E)(H4E|kZYtRHun-aHv z*^RA3*ZW-=S~sUf2kul4#OA;EQT+^}t$5fG^+Y;!1NfP&w(bZrIS$WOhb3X8OOwW=g|Mh04AqhB{f%rslFG)r@NIXy*xfuqAB@vG3UwGCzWfMRp|l@6)a2&sL9Poii=wB?-P4?tgiiYfmgK0W9_d zT??fOO0TQi9F&{sD)t>2d|hTL_rl6D7!Ou9*e0;zS83c2fXzWl3dye|sf0el%VH=K z;pJ)Zaxt2N{h!xbybKR^Y;aTrf?f+yG#DbY2qczNZjT31>H-wPa&8}1Y?c6Ws}T~n z_1t>g4dd2AOiKpFX`sn#k;L0El_JVzmSsj6Mo{4ywC^v3`6Kd6MkP6AAcgP zR6OCg&wBF@AZU)Fh+v=iVcZKf*8+j!IdJ9 zfqN>q^Bk6Bg9@~SxXBTL#u9QwojZ)R2Hvg~WyPy$GE-eops&D~$<7+AK@Clz2XJ3$ zvdbd|nbjy!9Ke`g!%1$ouAnZ1&%qvjWAH^&I zJxb?oYK)v7TSHg}cNm$Ys?uX?d7N!u)Mdw3q{j})j;&9R9okZ^Rq$;b{Ikj6%jWT( zXc}oVoXu>L7Mg;mW6RS`I)T&X{?2{tca8?hDw;QQxMXM7b#h_HlFHVPTfNthTQx&J zZd-Nb*KPljUL_H(0`v>U!=3p7B`8oW{+aVBO z>ck_QKSQbub*1%e)#e+{-m7Lw1`&3K!p@<6uE4c;OOWDZ2lXToO0bjs3v^~(*@rr{ zP6SchSohjp3)0`-hhe<4)R*^Mj$XPNRrVX+2nL%8Z&sl;n`;n%)lhKB(iNyG`zBT; zl!|ERV9#+m19LE_aq-5XeW;6B@M%!~B}&gPaEmsmM5n@bRE;R{Ty;u)G@eEjIJ>Pr z^_-UsteRf;fiC0g#&P?mh(dn}eI9n9GMrv^w~gGcb`hj3r3*6m6x5K!#WJU84 zVSx)BO6U@ITZ{H?)$R1UMO-LJz@M*>@o@SLkdVrqJKIjUTfjQ2ig<&FH`gW6p`4xW zR(uqErPsETM(&?Ymf76Wt%H=Xz+RF%TKfXfOo~9GpcrVbpkM8|#AHwqaeM3n_#*!7 znpjQ(BCI`jJgg0>Q;Z06EATR(r@#)o0s>%GxnA8&1`fbrgUyNTv2QA$l^D0G;XWkw z=le`$4AWqy0A{{_c?B$BMJI%>qAP1b>orif%TY&W(qenOt}e_MZEXnyE)%=mF7{n6z`r+ zxBJ6WFOSFf27IUL0`s_-H^2?}zns~pS#@*tVo_Zy7f9SJ(Ce1sv$7#%0*tX+`m`BP z-Y=|!DZo?L7Jr?ps~EFAtx}7#^Xgg^q|+T@syx!(=|TF`N1*P_v$Q1Z#BEjE@Xb)3 z)9Z$)Vr?r>li39pbgEC*=~&VVhWcTu+fI99lk;?of8th3KtFEhalru6?R+jf>6YD%IzWxbB@GIT`& zf@(w%3l5t_9dE=1A_Xd&+o4&=#I+jMTF%H)_1r-vb1M!JVGy@!4!0HNEtx@JNMo)s z^n05D@QeiC_k+I#=t#$SpwOu_gWT5P8`OO}h6baa3#92BQi7mKE~u4+L6FslU^;j{ zf+(pa5*v3~jyqezdTiHjVuw+Kr(dCh(FDKVloDpwO(8O(WG_h?-o8M?n?U7Ew{c?{ zem<^^o-}@3*MhX&5j4hp!S(;5h!05SYguCZ{b9;!Uuiern1ueJeKVBK07~u1FUMR6?HK&aI;E~JYS2Mk>$KNF z%4nf%ULKG~Mb)7J*(#S4pD< zs>KW@9mPsqLX9?DwX+%xrL{zF=z8E-C$~?Hshf)IwQ@`7IKaJ{TSBKf%~r2UPp^}n z;372;YPL=y>PPVj3XJ5uP(AeHR&uHzw+r2!Ze=HVCAXMctK_CzEP16if+F=JZbNh{ zu_mFEniL=}rMC6sHl_!qk-bu@bn&DEeY`*q(l9A3Qc5p)r6zUg)fX?yqEvxaG{e;C z00oqqj7i`mMw`zm*fk8ouI5cirM6hf)YFB^Oxz(CDYK>SR)h7Wlul?(mPrS$(rcD4 zPOn+sIK3wR5J7rPE#kJqd`=!Zwxb7nZExv0DM&A-kI1lW2CUBg5$Lrp`A-}J0}XYv zv_YY&>u^EBWf1EpE~qsYbQVFWC6cg6wK>dzUQ_eod+xByK)XFe0D{*_d=!GcBxyJM z0_`?*Kse}8P)xf$M$>!Rt#%U-(_XtBiL*i)=Q(LnoxW*DURSPrk3&W^I_fglkWeu+ zzXo%Vm67CZemckoL)LP0zSkf-sj{w?CA8Z}<#d8mg09XMSMUZ}Yd0IO=a!+AcBRz( z<5F&Z-{f*PpY~0s-)!1a^gzGOar*5PWwipQ1Qj>+G{EV%X`&{If(vR)h?4Gt%=6VU z`Fm2i6&G9g@-jkbV++w2yLjVhp)?;UpLZb>P)>*7rU-nlj;)c#Sff z0HW9E_Im5>vP#blBM^8=EGbr#;;C8IAe8zePbJom&{URkl z9=(jIOz8%%|7u;lx+vZde@k7;xs9}41y5cLN`aDlrH{&Opg_$qby2`1yQuDI>gbdh z`cJ_Os!0J!C&`v`ujFgAiqcthq9rJk3HP|WmDXF~Zl(2BkOL8FAh^0kZl(Av4V~h% zbaaYOv?PKQpKS`l#jUtcXt~*5hNggM-CE`53-Z|kN<poHfx@@xh zHIQ=b4z$~i(iw$nah40}q_7~?k6chkEa-HCQcEO-kq&esk!Ao>{a!B}*i8V!*+G0{ z&t8&rpnZW3oD%6k1;upWqcpvz9Z^_JWzZ1TY#n1pYq}1_^NiG2r~@y5H`0Mul29>T zp(B8S$W=2rn+q}1L^hH22VTqUkXYG-;i;Wf9)qF{+LU1E{rvs+)|+Yy&p(1HWPF-G zL`qra@B+J^D1WTeha02~`tWB?AAUpmEycK|5vG>LIL|4rnt4#fL(ZH40yzKtr&a;3 z7OC9%CBRolicLYe$v9B62^uaSIYooZ)%Mi>dy0#!jxdb_D38GpPTBD)w*vL=eXjOg zop*Up(PdB+AsJ4VbzJMoUdUJ9O_2L;s+*JJO)YOcqUFzh*;Pj$eKGE~$_#i5()RtG z$IxfdWWM!|#o1zVfG75ngfsgBoGplOrl1(kPN#3|X|w)|lVyXtdBXrHjSfU+4XcZQ zwaqRnE&(alk+BP`3u={slyiZdc@-JWR|p|||C@VCGzUuDi%Kb)!Sk~+LzT_1loUp- z2PlQ^3_m~PCuX3w*lqRz`l;NnT3v@JQn*PM*j8IOpXcUdEnYf_2VT@w_^BLv(ZxKO z>hQfJ%(wo+-K124f{QJt^8v1To#uAr zw{+Zpo(FM%1W4?wMUlVoi^}KMN=gj}kTjX!`%7`;{&?ZX#P1>KK(8MxzpM2eqHuGJSS)AV|%S?uv zn`B>Ag6n#)7BsU-1D#_&Rz_sgFzS@&A z)6qe3Hn%`L5wRw*qy#owH_uVhH>KHpBbYHe*dhbD9>kO6dZGrFeY6NZdpFNmW%PG? zB31DVBn`B>^-r5Kz-TJ>;*ZPA!tIk%#Hj8D4Ucroxi4N>;|>{4=rSohh9o?WAXjzP z=-~!J<(eGnuZ_BzJ^mXq*v^N2YAwxn=B;!${l3yv`!rSM+>qa(EXDxgm3q%}xFTn& zvXftcGG%+n`dUC(COS;`8$D;a^D8o=G>iIC)&>W2nvcDN70Q>Q-f7t}GfK;i7p!R; zGT^-5iuy00BE$1Q!}EkYFc_)R5O+q6vuyn#xyREBxFbP-EEc@|Dlz-uUWkiqW^qY8 zt|MPaG(@E9Y12k_2x87y1A*4`bYKNMt%9lXFyY2dXttQ8l0R>?F7m=ZF|kGoxNTBy zSBcuCar+|Wn^R*|%l`-K)mfkq>D?&R3Od}N;ZYxS0L8NcxH6xz&tq)iaWFN%gAJ+N zm!{&HbORG1Hkj`(M(*G%@89P^!Ivl+q-l^Y7!SVkK8s?|Or?NRNhC9V*BEJtIsUam zL|c75lJQ$JYcfM4RhH@V6hJba(|@3C z+80aXdMnUSjO&HcQZz0B*FVA65X1FOFB7v5eulW=i0MsBb*8(~i-PMqs1?KYWv_&| zF4vd>B#w7Tk}5W9Fp>gXKVR+;HmfJ8Mx}6luc%!b*Mkufa|uljsusib8`RC5EpmYC zE;+dRpo7D;^G6AdAVMP`H0?OE9BEDE#-3+8T}-2OXd_|PULs!}$M7h_p9}^m z1A>|eTI_-d(z=wje>CDI;u1ABCw9? zPFxu!>mS+6<(K{L#+xTPtfBNjO$+ zzoI%u>!^-Z!bg1M-H0ruioJI|#9;U?;sLR_dU8j3nI>c?m0R`$)(5zQ6i}~CYzHqm z_bby-<@#V@Pdo*1<>q+IAaBykIxySPVXS;r9d<8cZv8SWIr$bT+g2g*w6|!9?IN@n zlGUjcZtgL04Um1$2(2~JFbax|&~kvdr>(=>gK|TAObisq?OueBODrhlSG2;%j?ZRD z&G`Bx49=H>A9D;PcH}?pxHi)D!RC6oocd@_WUpVUd^T57X!>oEZis)YT%TytV2*hpr+_Mg5RA5s;6OG#$FjX~-CEj^gJ zaeh7pl|-$!#Ij29^A8gE()=8V_?i@jcI3Bk{A}StQur{;=|4a31!U#`C8dU!3m>jO zkY_mSJiX?x05X;P`Z+9WWQB-8?0o(3$SB$Zr&wilEU;cY+|r?!-cBw^@#@EmR$X-< zd8rkD(`^JBw==qxj?j-}Qm6WHD|ywA+u80;x3~CPbIiXXMPv0FgU``OVoIi|^7O$M zX+<r@~(a$Y5bHCDsR}-^*0Fmn?t%%%Y zK-<)E7DS$lrq47D&1^w8H95USreSD?HIU3VyQPq-7`FQsvfT$y*Ov#Oo0J?i?Nt8! zxb35XHW~@#B#pH;*hBcMf4#RpSQYyIs`uQg&<49Iw3#&%+LF`MU9=vQd| zk7A1U&ALCpxCQUsv?;YjpbVFHO3P=(Oc3UWduB1?_WVrer`Ee}zxI=;PP^6^`Lpxm z5E-A=To35;n$z?CYzczwi~R8#F?7)f51#%P6^v{xe%8v?Y95hIdr5N7+85}46R8UY z0UWK~*C~hHI9~1g&poX0rk@OI^IICq*Jjh~C9}t2-k|4~^5-iW-;%`wk|k7wkzT{x zfw{;=&vVMJJ`DJz8u_ZZgmELEIq5RrrW95CZia)CR=g=z8(XZu4TOul1v%>yHm}<0#2C8m4KFF$w zkbGrU_JPw!wu5#PJ?22Tlgj<+EUW?;n{mOZf=^!Fx7>id2Qv7>e}3XJC@uF9 zWbjHw-zXV8oHn@~N1+UAegRZ9h!5~Ml*8egxLV`okoSQcx^b!5UazeTD!r3oQ^nXR zZx?0*VjbI9WA2j$#Elh&^gHHqYLI>jiER0y5cOrTyAR6+bakrbNMlWTIM!28Od1cM z$F|b=IVX)zs7*Z?te&4CMLY6Vm_1k6wGIrv&%-d2FtyvfFDausHzSM1)hUu9|7Fc5 zq)LLy+3`grMZ@asitJqwOY?K=*8B`fkzMS$mBP}5sl;cU2j4c(XH_gky;fu=$kR)( zBb7VrjAD6`ljRfdKl>w3dp!C7KY4m%dB5e!z~~Lg(}Q~5Tb`Ck{T9m8_n(KKF?l)^ zs>bB$--uT+fQu#~EZmT%rjqiMr;jNuPY~?YC03`D zJk5|Y*swf3_B67i{x2p^_bTiF*C(3Go$LIg$y3|Q0BDiGTeh+OPZNl{6Wv z4P#&fpK6>Qut}@uBMamRJV}N7b&tbZ_WN~*@KP*lm8*0|w6K^I@MlY}eePUQSbPF! z>;i)xNNlT^Ff2XfDGK-#u7+MaqxVqwO>(vYgtf4zbUV?~Y+H~11^lQ&C^-N7)MS1v ze~i4v4?is`D>&F}Fu8yqFDquh_;C~BRJxvy1h~?Z`SAyOltO;wQ1zZ4b6j_0*2w*r z!TyT#;{uo@GVR){_)`2h8QPZO$M1lY+0$1gwtxC+g>R!;P|#V_qI5fp`Qi4fPX{)s zT=}=-{AfTv+^s!(4gNWYo{pv^HFDaz=anDe& z+pj)^mlpqWsqTo@gx=wP^)0=&xbum^;!DxHM`BuP{|7Q;{)D~W8~+NwiExX5=w7;= zqW5v>`%N9nE0w$DTjgbK_bkJXDQnrhZRz?ogYVm<=Tn)oY~%X$7AJFL2mTt_(u~ww zxn#W0YqlL#qsOM1r-2jae~=H3wt3FKInNGHal2`7j^ZlxTi$}-wbX|l^o>ppdd#-6 zcLvhEX!s7>E2K{Fe7Yl_HIJvM6C8QiUfOLijA$xqxWA6j)f|nMR|Jg4dYg%6Ovt@k zn8uNu?wau!BgU#aGeg!Gv7&SKc1f>cx6m}2+)7p%*O-iJVX%{)W18n_0(74RpAd{6F8L4CCXU+bDfFh@;b=+!$ zOQGX-KG@&q?K~fjz}(~W#1F83qh|p^akc9o&1-^$rBSnKgQBXEY|tM{G%0OoUs{O8 z$g6v}8uBgah=kp-Z<{}WW>Zfw`6t35+bN~AT!QwYfbQ;CoSU=EHNtw5}w%48@B zN%2?XfPlo012Nki0a^nYa%e)dZ7&jo*_TN;ELeHCUL%%%!R`RgLjrd911I;f5V&z9p#PIq3Nf zDBFiHslBvV?oswRX+@8+tsVNz-OF4-rBL=BCQLHQjs~CRuQVh7LWr`jD(o)8RPWXN3)W156B zM*j}OsGZdhLH=kzcIvNwl+xrqZ<8!ICM0>@COgj?ZZ>(|*mmSMU^iotl<>SY<9WME z4Q@t@`K{OfHt@Vn?B#j8Skc+0kY)Wox72?M7hNa&>m+}jV6Xb41kooNmpKRAD4Iri z>4%xGzYRD%7U3G%Tigiqz#PS^b=6Y6?Xr5X@Q7*v-=Ylk?REE z{)>q9EPfm!vMWt16pH*z zxSV`mx;m1RZO~2$=mD~eg8iODwKXeeo3?62-RHx_VWWD4F z=>1CSK_Xd7PF|4ACY-%%<>UaQ%q*bt`G*qZ_13CFsw%-n|I z6xl2>|MEQND|EA08Mk!6JPdPExiQU%%_2Ab;*CwTKSCjP^MmhX>JjA-Z=6`}GPS_4{vF zq-sJV{OH+(FrVWItrbyC@l|FI@l@-Pc0kpV9)F9wAJysA30AEt2n0U%T3 zU{r2yK{fGQAAL`1ZyuWiaL%A-$xKtQ4yJ|PZIMrIf%_cB1(=au}a zg69KjJf|OZnYkJ^P?s{n(^9C#%!Z~x+h;OzQBn22s8H+n8tzSziIFhed&P7CXmcpV zVM*~W?d;@u^4Mb_SdVp||r2FKq zT{>G~-h<>ZGy1^4eBw5OwTt^E$aCX(HZYb}J&0(5f)Iw1Jk36vLCC+c5XsP!UE5-;Z-)_PL=l;k^qxuI=S& z^C{cli;qMySvZxZ_x}z(P}`E~lXFU+X$Vg9J>8b44maK8!rc4Xg8bck7pC_s8|n() zvvjg*&jt7`Koav+m;Lp(_4DKg8-*;Km1xm@8dO23-B=bt`HEmJ+0H~ zs#v6M167|)Lt%RyJzZE)eY7N<`V5KcGqk8aXW06@*cjF40Xn08@xL#8^!>h&ep+=* zb<3mbR>Z68@#Mf2@Y}WDg*kpzh4i-1mftSSy)TqE$InRKR`rr6_db%RRSiQE@^@h_ z|6vL5U6^}6BH_IYbMHqcymw*HlTk!#;_F7qkOtRa{}Ls`Y?jG9_=4+egg(BnuWuKXBFHQA-F#mgVHF`n8uwE^y+!>0J*! zMu-|EK$VKwwrIo7mLLUDs^&;hlqwaX7Kj?$p+!NhfKcAFdQc~%V1)`jfb@QU&&=9u z?PTpFsQ3Qw{eRBxr&+UR&C4^-`#dvi7U2M<^PLKo>2xqVLE$&K-5eQH-2Ie2Fx_ndB{xyh7W%FRtak1#@i{|$_h zuXV5Mbr+K*8r=uk-A9Iu+;tJT&jjs4|IVoZlq2_SC|3k0WkzEB>~^GMzm8=?J|7-; zPvKUpzqtT2(SSv6eSc#P@;MKUvXGV1de;JjW($uV0}ZQ;8e(%X@Dk#IM!|~h^Xsvz z^*M5LpXw$ ztR}Lm1WXS>2-)6Xf1AMPLf-Y!DO)YXmUd-^nk`&*v@rUZEUDyy^FsCR2x`%<=ry4` zwKuzNoD)UwfTc{sQgYx%A|}rMu3~f8bX2Zh^Grp$E0$JkcA~#(}B?YA>Nugj#=Hy4mmx1Z~1D zu4$*c0p`dS>W(rDI%LI33yv8e7Ea7{XHW z9E1W6wPaf#w_hW8_d9sDh!(;jA#1n}2WB%ZqG>Nyd&aD(ZifA5hKjznw zm-70Lk=gfyT4FRO&Exm6ilt~S*c8G(qje1rh2|~L!sT$zlDg^Qpb7RL)qOcMpDrC% zMz`Mtr?`UxC6oOa0wFnHFE8Ud2{tR_Ja;rj3iy$kIJq%SZitij$H~W|BS(IWOWO+8&sA*0b&uQbbf?p z#gv+c1d^j;Bs5zjIfrDwPd1QT>ywQn2YhlK>@K2Ewwm0H;D%&nF1&GJ@e+UPB_WgW zp6+)kL|IYxWsR5eR&leh7!5vpPj~y*Q606_a>eMgoVDh$%{nPTZ`|lh2GS#e!jQ;eQy%pxVP`}nBIDQZ0vLsx(MbOsR#%s_+e>0i`%C z|A7 z#Gy*2xiGNH;S~;B>V6=%2aaV^uAaZOxsD1hnOujysZ^Ki&~B(1xsEC>^|=m(1!m?utBbwtDM8$)+zQH3qnlc|CUM4S#x zQeopE0tWxQS76M)S|V|fzweouXiqc#PAyNw7Vb0<&iTi0w!qH6NPvyR?K}H+eBpeZQ?iPpWLb1R?uA@ zdJr5U!<%6N_d!*HHb0?KWH^fWw|Kk7*S5emeE+lVWyRNS|B3<9z!=7fua}9h{gU7& z@wN5DF2G?iy8c7rje__6quNfvp~WY^2%fC}6t*3K%3D* zQGT*V*%z&VRU6;4k0$;e~s?6@6yA zTjQi0*#C2L=aq5C8k2rNCa*cb&>80n1^Q8-Jx(rWOOlNC+HNk+46 zlEukxVn+K9Ph~YPmZQ0W(UJ7iS#v)tT%`=#vW##LPsMKGj?7*zAZ7BnG(reVRAidD zfXWea{lxDe@W^w`2IJn&@Vzbcy)|>ukGf59a$cNlh>})})>_D*w`OXnVf}n>16Nva zV)?kYUAUuCEf*@(gbF09<78Ewtc;S@E)_FmP+E=Z7dcDKzOVbqiu<~SJE}M?RN2Hu zKa!97l#(0ceVz^IF-w?rQ8G4*c*OO;Y>q zy9=oQh22ayA633hYC}GOLvi|$@@-NZ@^#u5?X0$TB3}*_zsItx0K+|gHF%w1CMIVI z6v<5(O89eRB|L=@Y@0=?53^2~gRvYdi0);CjAoIbSf?}xuW1CgY2;7)Pe`wGs}d^J(hi$v8lPUzf6Y___lxK|2j-hu<8AHauGi_B`Z+&VX2XuSxPgUciGvfgoy$ zGEiaUW}Bb?l};u?c=!6~a~lj{Pr;aH(MlNjBhhZzwQJGetS-?vp6`p_su!Sd*0$fe+igL^gDa|}`&6T7yr^q!=Bh_mu zm6JizL3;k>{#O9|FCM`0- zhjXEp6on*fTP#`1N=VifH+ab!`C_!q=4+ZFJu$KZKe>~KV)T1p5cfg+L*%CTRk72w zlnK{tm)QBP%FOm{NB-aoI1q9(4f2C|!>_^jbRh z=Yv#gj10Jy_?LSD1(EdSM}mZ2c|~dWW3^MKt;*gF)?joN$HuCq*JE1Um%kAd>a-%J zcTxUl$QPy`h7#q+364jdj}zAaU-kyT_lS?frbcFxpU07|5T2?KL-cN8Sv?b5~&Vc(00_IpnBZh3L$ai;7PcgeOzO zlQW7>+QSoiw>rlapDYVcxY(077oV&QPk3NY_V7=l-HZZ^P+T?ckIavLjP_A3jJA%4 z+->rAGyL7-{;uBNIe$0H-);7Hv;Ex`f2Wbs%8U{9UiVTkh{x z_`4o|ca6X6^>-pC4KAX=KA&tOxymQoN#5X-9VAzigvDBSr7Cqg$y(!|E`H|v?im?u z!F29n@)}*dYpy9152{qq2W6-EI%ZwB|C-~_x%cmDMn`bZcSrOffGh0thBUWD&83AN z?Ewb$nn>diMDQI|A_IE+vH>$mbo@*1aQHOepXt}*5FpP4X}^wTLlt(n#V0Mrow=dSvDx>nQ_dvyd_fr%GVioe;$J zgbGrdlyyQSDHGEZrjarcJ)w$}iRY)QiNKp^9znvS@)H%LOd>x~Ny?=06VphUBz~fb zlu6+`s--H3VVD%|O2P#2#0pX-d?!|tGQm4>8YvUH6RSv>!1dUsh1`6(rGo}!Uw45PCd=&9hioRj$P!z)z5GL( zAQ)BTbU+pA$r&}JqS^nlXsbsBNmEf`@%rqLIAOE^ZCh?U-OM9+!7gb6WDgLa4Y2?z zE8&rDCthg+^ehuH1gKh%g8=;(!X@s^sWab88K2W6XGlUFfJu>@y%0m!Yuyd|LzCTS zxRc~Oi~|OO#vM&8Irm&fLno*zuziiHk>vPmke_y;CdAx z6wP1`y-pmxHPY!0LqIXS5)AmDf|}fkkMcd9lwm>38z4~Hk!Kf>O7~b~PrUEh4|bsi zi3w)2iiY$fDTWA1)eAv1XGtrdzV)N*CQDfI#W=YwPD;A^ z$d$PN0dtRG3qlzt=!En&%lI4J?vj{=f!{YXl{^&>gtQ&XO~9whd0 zBlBT)LxQ;gGBj~|l7Fe3M-|(DZL>+W5QlFBJ(RtR%gcg<@>WrVATXuqbOc)1|By8{ z6wY`vjC~{radP-2zLMxqg~{WdYSvpN73r;m$`LVqZ{P2NgSR9;;|%-sQK!3_{SR>U za1G`q?0*n@bQ8Mxp7KB9ClL=s>_+(`fIQx@&_!wYz%m<*bP_^omoyDw52PXD7j~4D z&=8N|#bfOJk#@F=qEFD^gBBQpKiFJ57S_32J=RcgK>F} zk4&&PmE1m`Td|!_5BMhOK={JmFXs{+X&AdrJ1({H^r{RWvDGDwruG1uCN>o)D?!s& z5QswSFSqlI+zZUR@*JEJ(P>2UWYBlzXVSVW=QxQT5mYJafA32n%oR-i;id2alcfp- zz%P$LF32?QuwwQ93%JM4Q&m9hqw3PlOIQn(Gb@+CJqDL$3uo|!9(bk`^3+ohW2>zr z9bni{(T=L|2S>UmF(*9IsXRJ2$9+c=XRYLuhbnS^mswN$s!oeJ>Mk8x60gfPyK}wb z=n_HvPK;E#*mLMoxoh#|C}kz+@-d9?$QRtX_@IE(7!l-sIJX+@j;Z~m5Tro^nFYop za8d|zDa_=i(BbfY-jCf%7(?}%U=B$QpheZka2ezXUwb`T&D^T00Qf4Z5)X>rhibMp z+9uRNws6m`*jw$?vG<^uXH?R@#%|jsY}vcq`%t@~XL1V&?2Wr#Til;e`aaZtfM)MQ z?InBO8L!yx6qLkwK3&>+3ObJwTK*{EEAvLg5lD*bCoB=a5A_Z1RP7}Ny2}C&9>c2l zv(VOkOqIan&!`l6HNxl2pXJ!NWY~NOUdR?Mc}2w(-sxW$P|D}$#6^Iouxk94nKl!Y z!|L}h84oZ&(vr;t4GBiQi-@SOD*NPaM?#XAy)g*qdq(w84zoGwnKGFD9kh4vv)+c^ z>iM*ZJ7IQ}!funm>`FE98dUw zL{eYMpO;`KQN?}k0`yK2i)X_Ff_V2)t(U{%@lp~CN?K}p5F@8>3xO*c^H7@SLevH?tfvZ#r+&u~6 zBi!j)I$zNYCEsK7XZLF4C|kI4C!47B>Oq*F$w~HW0NAy~OiSdihbabkkNdkYWwB`| zQyU!6jZ(S%NXm)UkK`&#nBTv~l9ro31sIA8493aC6d>}bX*V$&kgu*E;C29~d}0dx z>!v~wgZ8%hPSo_)IJr4aZi$mq06{CTP{u`eh>(%cUmzLno zX_BaYTb*nZAiS=#gt1|67xA!n@#C4w!!%0fzkcrU2>_*GVL0whLm*Sr8!#|=N>Sb_ zZs5X9rIX4L>HJJ(H+tisY-bDGnpayiqF2mZiv$e6Ruh#A<>tF8-CtxAlyG#wg|2Ba zz)s+W%h8}=t)4B9^kfK*V%I>lf_?8{3CY%_?PE&eoPwhbIL-uI=ML2p6F}r}pWK1C zqtH!`G+^d83y8WUwfKoKWC)L`M(jXCdW2YfUf0E8_#R3{+k7^=vXJ8zD6HSkr=a4I z_>W23f3Wlbc(~7tTk&oG6r9`o?QT*~xXx+ox9`7@4jv{FVXP3Z-=3iSpOUYRUI!HA z#|f%a*KcQ`rm}_pzZ<^mC2dK3Kl@?YHhfP*lT8lae}FRO;qXJu=W2FKT`&LJ=SJgu zDmE?Pdm|5p?|N9w-HonF;`?^Zm_fr?qq;AL?`y$bIeh;GmT~6>P!j964=R7YprE5~ zL;dpO9F1?kek-$xl5$+~K6Zk<%FgBeCo!>ll3oR2^FA_=t0HfO<(Wn8jgvhgnVX-% z5nIDuUK?G|j$E73s&a#Jq9AUK0yCBQYgW`VU56$`)Dn3nyQ2(+pDrm(Afj|*@qy2)l^s~ng7=Zs) zr8zozZC0u#o&xXNsT|RFCJ7GJDoP6)2?9f$Sx{?rV1Ko{8$0i+083}1lG0+21mI!HalmFg9SN-@SX%MA| zs>NWpNTiFOiqw1iC7bS!#k{)#Y#*Tgyl9LLR_-^*m8`y`DAWWPKw=XySl(~9c!y5Y zX>hLu!`Z^;w@sbGYGxNm@ga{!AM&V1k1R$Y=79{b<^2e8v0gJ0pK8~ixN?4m34B9N zZ-G&I=}=P~znkm6jjk=PPiVv5doPAlOYPK;us$|{@5fODQdr{MY+ zk!_iLr7~t{BYfoEV?9L^_~W@#wXZ0eUe?Hh+7Mv6N=;O$5-_zB>G{DWg~U_0iut8;#nr~xs$X7uz%;=(gByl=O3jw%0x(xfW=)u z5u!x=lRM$_C55FXfzM}Q8uu|(0;Zo)X*519zBC1y$`-n|PJKD&>D>LH@j!_1S0=vn zTReW^OAq2;C$YCa2+~JKwVi@Ji!XgsN}vq(UVy&tLi&l~OXq5q0q%YZ?Z(Ajh%eoy z1`bmtaF?f2ggaeBd@1SwgzxR#0taLZhdu*ivKI%5aA-7~h0v6M0bNuAm|PNcQ@al9A&?@}KpB@F&<={IB+eD(t&u+6HC7k8*un>jz*1 z>1Z7#xem)x3tBYlV0`p~v7f{ebI{sGVO@`Wt+qG@7b}P1~_QbOOfKrex;^j|g;Ty77n(F<|YT)BPzUF}fI6$nDJp0s$`i zLpujm^WC%4AW9MA538w768G}{(9AY^T8-f{(8AXu9+0z^_gnDchg!r&@sC>JsOvw# zk;Q`i9G8!O=h3qxznch2#DCiT$bss%4@&;&*5n^5!VM=S!3WzRXHj}BS16%q9nHI7N&gdr1t#IEadOwWM`4f{&2vC=188o{wR^v|_};tFeUFW;j$nr^Cx? zb341w!8+N(E}L;Y{C6jji8{t5^CP^yl3$RSP3767)rihn ztmQh7id9scrixrExk&tUBHH%eh2(Nf>qK9 zs@zRPJBQyc&9u&4M_Nqs0%RHsBT?>~?7Q&p1^C2W7z+QA!b$a|^XUBh4izZ2Y+>0G zCLc}0T2emNy^qdJKJH;2H97gX8I@L^ZzB0vC`5vyTj~D zjK*DydP>SiE@+2CM|EFBdHi_|Q$y!TU6qlKpI|`T+XE;G`S_;t-zX@c*(Vino3woR zc$qd1<4bJPWx$SHa!V2RY+>rK8IR41mSa-vyffQ3Nq8t9-pztB=PqQ>#r^Gll;&=p zD)~LH+*!cguN!bTTSa=Jt+5^WqBr=`vwYo+qBsXk<^4JUaW^eRQKLb{R@=_!{0iG_ zk9WD6O#S3&#>vH&w585vac)Gc>fIz11Pv8z+~?$(}g5GER2J$&NTFaKl#5 zrZ`ua(GPeV<78``Y>AT#<79K3To5N0#mPBwQV7-$0w}^4yhl2$vbSV5qaQ@B zrEDKb%1(<5)Wyk6TvqS$DXK}?+PFYXoSYt))ysaG22gfJl-qBMJ>*nGIsQ$+)xO-6 zxS1TpEo(Zg>5M@Q@mo<9ITg4ZB#RwHqcyxzTQ6&D=rMNF~CYkkI z=qrUQ$W3E~sIOU63_D~a-ta-+D^IcB#Mp6fJNw?+d~cH%kf^tI-9uKL>tcqT$l9OCXhjnmJDy5K75C+c0p#k~niWeeZ@J+`$zw=15_KYiF)M;)!q z%XQ4)(&W!0ZKl(pRCM&2X`O1}PN$y*%_J3L8mtke7Iqflt&)_crmWsn>GMgBKAW+0 za~B*ON7IjTEm9KRpY~t)MG;+3 z_in12s)onx6HcM4y^eAiB!aHJfSMKp{wy5rcCc>!8360tGekn^)13sLjqRs2>Iyx> zi(E&kp|z?CD^yW6@>dtYj@iQfze``=-Mg6@=C8hpt2Q}*bw{bFvVL4H%;#37`>T5a zVyVBn|Cw~`--q5tTth=|Rupywe>~~0J|ekea7&TUWcMkUH-GhZDT^}x>K@3ZyNupO zO`D&Cw(jGCf{uO;S&|)%L4C=3dUo?l(bzL+zKI#uN{chP`{0{w;XS`)&KB5FTe$Ul z+6pQ#18Tr+l=LwTYmg_Q|5bUCk0fZLsw}s9?L9{)ub%0^TM?;TwBAn zC0rMVYje0R2-ij7S`)6-wIpb-O7|hUV%wwdwJ&Gt)0lqgvmN8#M!{AUz^VP+|KRH8J!qQp(iCZpGOL@vBRp!Wn-8EYlBi!7 zWc4GXT2DdxF{0l$q&&(fh38==cY*a3EysP3J5~FMLU3hO9ykumaX(ZOZ&M}6a~YN5 z7y|f52o}=0-VSU)w^d}Lhi&y+80Hnkv*c@xuz^5&lqPJ`K*)2gEd0* zhII9OAx2N@bbMdrJVJd*RG+T60>hd*s{L}PKJq=~QGGd*;&!#ZhG+UP&5MK=gz5zf z!%YI!O=@DjDgo2$sT8AHsqp2BiYR=a0hfn~KbIWfCjP zYh|h5yBPkfNO%wS-7YlZzk0bRR-utDHg)e7&trJSV0XTqW2~5c)dC_{ub$gWgWm@V zdw!9}>*x;d{*eWqu;_BNAPB+qfJ%=P6!j|LKvByfs1DidmFyV5XGg!R18nZY^Zfz>4DU5ELCFe``gm=kP6!~>n5w~9i zbNr5APWd|v92oda1=l7(4r(TBmK1|0*Hb#cceb$QH^$bzV2kw>V`RUc(k1-)^_1mG z`t_6-~M$&izzEH>m}djVCgy2^U!A zM3gRFU>PAUS?VeNHyR~)iofKc$atWFws+v^CCT``pcURbs{0fInx}XLlt3@C+B0^6 z<&Us}YYm_zBAdTZ{yaee_4=S<`Ef!w`<{u99}mG}*+S#}=8qeZIurX_Kif!)=8vyL z&Bj`_`1$DqbblK}aJS0OTV@9W5UuQ>D0hk> zayQ%GZIV3sx7vB8_*g!Kf)z1a9UZ!&C?d~o?%I@T%J=)2LysM(Lyammy4ntG$*-{Z zG=ZTdWa>v!F_Dm*XGsf_Mv)^5G{*&+;-tb;q1vK2nTS>`jB{J!WP6-!jFSq}gvJDJ zhiak4H>*2c+9oSYRWr^QJjSU*sH7nhvoAJGd%hLXqavB9t@ zBq47t1r=(0G=91*PHv40W+<3^N1i$^F1R@^SVuvLtdr;zD(+GiekMXj&g{I$C`mQ(l6>uvPhs;CxS)<^`X~qA7j0V3FF>&^1aRWy-l8(qu%ED-kRtw z;%?vD9N(M7F7E9nsxjaW-`nKHH|nj^SBzvi3iNI`8VyRIw};0*(0ifcynR&buFkmM zqhN=RisH86vz|m?f2RB^qWooEOQ9FPr~E%h`CebCoiABCk;fIdi1Ke>sB|Z-p<;XL zw#To+(aThBN5y&j{vApH9~Cr1)+H^@YY#?E>(xi>Dt;52#r%0kV4_DGZ;12eLCN@W z-t(v{<5zeuHt0-zJhb*QVJyUXPdeS3rDu*!12)A9+N}Qo{H_mHP;}GJ}T#&qTbXk+Psox&9;-%hi;Cqyo7seNrl9g-0pDq5GKqPp8c6c5zx-ezSv^HVSUU1W>d%=_I6s@qr&e~KT9-ey zzWdY}ofTV_PT658`+auY<8s^Y$E$X^xifNmcFV9>a(+d3vvw;i=zi{w?XO42`3dPq z$!g1kZ>F%V>YHmzxGoIWX1nT-One+T+~8H`4vfhkOVYjN^zP@+eE%7IFMK^8S?9;} zMROJRcQ81T)%&2#N2fq)-cJZq#p!O%2}YCYeyaExnllB|dA@MJKbk@ErOqc2Q(P7K z)nPp%Rz3$YU77hTUm0!=Dv=lV)^q0SI#;EM3_!MtN9JVU%8YvvS62{f3293Z^=pnw z10{t)Z65k_DWb9g#hJk3UIVYYFW}%04-}QlROMYFr=P(;p!nDXgJ%#n|4e-UfC(7i zKUj|fVKz-X%b)%*{`W<1pF>v*COS`}%zo%!68+Kl518R4=OjP$2zBot)piQX{Pzze z`(^e+kH81+9Quj&L%)x-sCKggTV(+UJJI(K)W89%1Y$o$rKCg`_Y?KQsM*4-bv7TY z5eFydgBSiA)%|?%a8kut_jvQc4;*RmO)wwa6!9&}AYm~tz|fV>2bZGv!hCQc4~2m> z3Ui*sv@?nOBZKbxFT@G-6uVEsz0JD!kpomF6#G#a-#x($c+`CGH09r`nK39m2q=;r zjnS+n^TEmPx2t=sVMeYk6E@uKjU;9ZR}aD2|5t6eyW|MQKG_X-|B2;GZn(Pw^#=k? z)m$pUM@7CRG+3n8s10`?mMTi!a94oc+|hIqZMb_IX>rMAii?ebG2U=@nJVrS3ZFsY zRQXr|TV)H?1(OfitP=S+^-%gU`Dl3?sY%Jlzt0|zZ#+xpbJed*-EVgaQH)agI19BK z+$Vs*>VtmwoyB8$JW#r>zWX%1X5Y#C7I9&NRlP~$$ z6)2J&{cGf-;jxCsT-%%=AJ4)**}{c){NE-YXQDU9@?Mz@saZ^KQa(N=jThwOY$+S$ zWB1Ttky@kVfA7hZzj=Q4q1-?8<` zTZZs4FQ{hf)C7dhIenuXfR^gZU9Q5N{ITuZFCF~!Cc3k3Y200m#vl1*_k$c15?-Ns zv+Tn2`ZC+eV)@shTgURJWT;EDm!qp02jgQ?_fbTFN4ik%gWc==d430L1%`f>is&=6 z)jQ)}BV8FJYd5JWy150#t!|2RjwS0@k?9#p-dM?UC&8v}9Uu?yY%?yEJzRM=YZ!FA zD;#DWkQ(?8}R+U9*C=hL#DIielP4(ws6$|3XW6vAhH(ICL(To z=B&a;F$wOeK_MC8&JZ=zX+L2unFw<{Tdhj=#wYpK)gJC zqUrx+^vQPK`WmfI4o3D$^~pnoG!pvc=ZB;rMa17UmiSL%9ZL1dBjkvvhib(#=#%x# z^CF_&F0qHGXN7Lc5%oIhn-rqH05iA?=_H{~{;a$YgpO~6xaEmDNqurBtd}huzxJQi zC%env_}`&V`fVZ*)X`W)HNHL(|GaE{ax?4rNqzDM2;}w2HU3FapWLMr=amzvDz|FVlbqvPHcn|+8-Ilhf020Q}%tYE3x<4!mDqw_4>_P-(gj?-1}aq zzLC-Y&--48y%tBcju?^+mb5kUrZ{(AoNSDft#LA1Nu;sFUV-MgKw=NVqBvKpo}pu{ zoa)Dtz1C{+xwSGxdwwndf8O^JazIC7=fYNH@q^L-=Y22y`&ZSXS(e#nf8wQbaVDAd zT)kj3oyzlib<)^&w zb%nJP7QN8Ml60I7_gco9Ur~JDYggUI?|Z!<=t|%Bx(02IC%S?9ENs~&Z6AO=DBTC3 ztb~318b*;7^_2I$E~VG;_W_(mUD>VMag5!mcyRgB_q~o&U=0I1@`f~EQ_QUG1K1Zw zEE-darTqxLgBvFD0o3vPUJI3bAGzx9U^(g&AP1`(jx+gHD zOG$(mY_61xI*IqamNR87zVCH0{v(dlB4{l?)}04_h38kRVIQ}FU<%*JJxighz3+8A zB#Mfq(DiCK+P%uUjo$Z~%ANS@bogv+Kc&t#^bGHNy;BX{C07~{YpFW+{UiSlk#tG) zD$+byHCwprTaddd@iNlbSCMk_XEALU^WG8BsRrBk_b+n;1?escG__Q4q^gOd$F~WQp!aa$X3^!&XZhS0A{iXY+L6pLc z?^07glujzUCBRj}+HNV`2GD*{+!XK>y4ih50D+b}6b73B3vlM=y(91c65ooxzv0iH zy9eB63r*iN-XnIbsTw?HQzht+vgUZi@xBz1n+_CZ=NEp;lFx09NTAqYycgrEGc`!Q zZ*Uc}$>j9tQb`|Ue9NfTQ!sjrFnYGML75!`x56!Mmh}|9Ky)B?s&1BZjdEo`3 z3)RG5F*1Vb9aM^N%KI$xn=$UT_ebv+j+O5=>4Ac~eiHIs|Np#vzp&TKmhWvaa*2E& zj`U5$*+u!u=ZYV986)3EzqKetrSg5i-q6y^_veD-^VrT$s@-VAQDh4$;lm2d6sOAmm|bvM=k zMOhJvCVwfo@-#;d5Ve zOcU+z+ZUcK?(eIG$jSYE8#JlX8kZf0{B@d3?C(1ciJXYri!znZmEYfYA|^V0e_uUM z#1+dup=HP2bR0wS%(bkNCV&CfqQb_4lERv{zwZgDhO)wL`}+=u%iYbGxWxXx{gwAs z^3>zA3YbqU>ZA7eRe|4Zq4#SnT1#5+e12FdyK zsJ2s3X8ZeoD5)w|M-VJxEO|9{-tF8+^qLcBNAUhHK~P zWvYsbG8vD*3cFJpkG~F$7vk}Yc_<8QfLq*o_>M{3pAPG2q*F(ApMrba-*+VV^}sON z3i!UVn~44V+3MhO7~kz0KuNq+caid+ev|L$M4*5UWyT2|O5*Wlc z$c&C3>!ehj7bXuMI=BN_72&o^;R7Fl;clM4YvaOQmA|X^cM5Oj|L450|C8s1iB?3n z6jcf@p;cSbaz!2eplhunVI(O#EiO*%?u9 z<<1M!?3;2^;!ZcH6;Ame_fbn)fsJwU{y4cMPHv8qE>3QWlPU3Irt8fr!1xqL?pJcc zbHmp9Qqjv`)En?k%pp1GlMN(?d{XCz-Q|<>NZ#XG=lI@AgU7G% zy|ww?TDj;)7Za?mSTSI4rv{OIR8ue`6z!`@w~7PDgVEs{P?`EcUwDSMt9dkB7o`xdbZpq>ZR ze(AMFxmS{_{zfJdT*t^3JJa&ix{TKc)|J_Ftf*1Kka@X zw+C)cb52o)Nxe+gQA5euxsK^v=HxnRxisWDGF%#S;RrFliB8y&K7)=ao*ibJy8iLA z!|Fz#9X8+9?rNmyWi;T}E@|ry_JDPV;-NL+Z2CCt^d=@E1em|Vdq{2{d{O+VVtjcw z_(62IMJmJhQsz6<7FgcAd$CyB8}vuP>TWI3BM5%5?T2*V=1z>aqk@2A0@$TqBZLlt z<*l%x`>3h{+j@A#d$ktNTfj+f+TNi_Koc)^X)^yd_cSw`Y~kZqPMvZT2s}ahYGY?r0;n^ol$<{3``@y=u)C~lo>(78%`W%JdgA$VK-^+jC|lTeg+&MY z?X_x@i`bHozRW$FZd*B5KsG%$e+IY@Qr=MvO6pWbP_F}QCtjI3??*u zAnf9zA7G^SWlt$@Bf`JeedG?@xr0Rw^{sQEFz2y`>Jqv*%|@u=L`?n`@;~6OzMJR< z5rg52Md|h0%K9ngy||M})gVQ{sa?`^ggrnx#9N1Zc7d%M-QCQ?$di=0p9FdRU@A~49B$FWFL}-5K^$vCp4bHjJ)52_dN^bF z&Y~UJpeH8@CJ>?dBkILkDTfHuOUdeh-~Y;aWGgaVQ$QdLg)(V&-vrUw!kaE9(7>K_ zo-X4l_C=J`#CZWwF4Qq(9Qfmp&EjY>|DAeG-@*Z9NK38pdK2!ZUux*!%KC!N&C(YB z1v%QW&GFH@#dF=k6{d7rk@?ot4Op%65fbso)&voc)?$;^7Y-xX1bK=_3LOvx{1;P8 zTTh|MF{1EJ(oTXPW%2O6JA?N7F(dK!ZPsz8YWu-NXv{LegZ{9-u(z7HLY1K0FcgfG zPPATQd)jKzM%hAN{$<5`Ms_q1%ILjFX_txjyhN~oc+YcHr8@i13`>w+KC0~$>{+~L zJ;N)fv-d$Z+|~3G#d|IzE!_P`e7V!}iYcT@D-&qTOWr~$^; zG+?Xbfgi~&KBeTKC2eYXPn>&KoYb66Kgw>hge4!1lN;mYhB$eDoQ!5y*bB{`X(*Zk zlT6H`wpkSerGj$$Q9$Df$rpV}xixWa+C<*(JY)PFlmi>RT)5N32Ba+@XNAw{rAeBr zMd!&&xvz|{!n06oNSJhO>uHwoNS1bjd5~8 zoLm_vHKx!-nn2nE{X{o7gu>$n~xrqPOfU1)w zHVA5)B`5;X#*+&7m^6hyV=10GucaWFyLXB{cO~hHgx8HBG-lp+9?oD@8s^vl#HC*%~Ssf>< z;$&r%v|?1ukU?oRqhI7tUEuqYiirCXWvC)8MwLxm^dmWesx4P8PFMn>r=Oz-ow)%Ne^Oe9H!*^TNq0Xz%sW>4tjRr?N?ETAZlL=Ko*Z6B zCmF`wH?ckasDk`8{`+tBSf&)b^pe- zU>&=k1J>5=rv&W`pdEg57@FVf#K)!bL5D!Byw@ywFS0xs6bPc0D8p$aBR4ZqdSLjk zbTSdb`3}`FzAJ|CHo^EEEvJBioltBE`z%EPgKgAhh3&azuwpl6057V4rnfxbsw4+w zF(Ih%R9onFgCVko+%lY^#!L%^Orol~gA3bSK}zmwuDO!ba!XAk)nh4@lRnKgSCd*{ zsTxw(SZX?{m6oa{)n};;DQU)Bb6u`sMo`Y=nTqrgXm{FHSYjQpbg3XX1OE$ISeg!q zwPXt(#!-#iTmx#QhNYDS97$m>ltvbSB&CrBAxUZEKA&tL8ML=F zvVrC`@@ik838vODV^YJLe3srqY!;*PBBDFh<~ZG3Or%8|6Y1g(tn=Mf(l0*52&Fbq zB30b%0pd0gi<`0%;gr@g_a+!Mjwya;^sl(y zxCni~`#)J*Z)_6|Iuq-S2R;qM*m`4&X%oXdlcMp?6WLQbsO&l%9B1o|TO!PldjE$J zm994)I>pf52ihw&T+rUCxa12c5YS#cUveZ!@RiS&c0X2ob=rF4-5}5dGj=NbdhDP!-`k^8xc= z*pliGHV=V@Tth9gQR|vT3q;eM;1hN3XeHWtKfE1T;cagu)O{J%tf3LqnIm{Fld6_L z>-Ft+?22DzttPKd^2J%LBo#HC#_r0AcR>~daP!^k|3=djY!iUT`y`G?UFKC`tvM)$ z*vK77kc;46mh-pvVFsKOtKA^FhH(ozEVNZdtbF{_OP}v@EQOB(VQwvEkoaengle7p zHh1E#9Tmu%U@%ZOihsgx?xU&-Z0lhSFJZ*v?$Kzy8pC#VaR)W;Qs!ziC`9F?!RX3y zagV^i*~00Un7ydu4SslKP|y2q&IV@;>2AP#tQ;;aZkQ&rNkvbQ*^#4qKk6h;;LV1N zcQB-T9plwWbhSF;`lW6-w->Hx1Y|T`h3miqiVHw@z2}4q*Fs+2+bUUvJRUrZlc8yS zFV!vC#ye1GIj?{%UQg7pGVq=+Gr8&xL;$2~Kcd7L^{7TO<7jY++`4#k^XgD#;+yMu> zIjRcFte~n`CcD{8V80iVem`6BM`*i2G!5UsTw?mcZ2W*{4e19NQ8>yNt3igm&ecC- z`eBB9lTXyU{d{7U+sh|ryJ<>zjnV2g#vIp%`xi7ulVDpYcTmQjlb;hBV>)?(Pijcc zqE9K2*<7VT=7=|pA>;tSzI~o=4e*llzY`kdu`R|BYg9Cxjh*Hu+jBI?vX~*1m9X27 z6SfgUxJO=sffGLN-9bjrS5Ll+@$|=PuV#FQWA(@1VJWxR`WoKTGsC&ui5Y$hWnqSK zhY~YGzZ!b0s)F#(QMIT)!VFAPFPf{>rN?3>D2^93XzzyA^8g8*1(9aBtKoR||9uip zY2sv7A77>5oZOD66DbMkF=d=p;gQPoK1Swc~pnOSSKS2v=vF z4cB^o@;c2opxVc^6cA>O<;D_IZIK%KoqT4fb~sgw`ozq$KV2GKVCt#g@5KpgTPw^h zar5EIY~kJuf+HduvlgV7_=a__0Y5o2C#1&9w44z6F~-;@Yo6)JD)Dy5NSVs%V6xZk zU*2#BD-XkxN94OvH&OwACQoH3o5`co0{N%WXr&5(4ijbR131sdkhT<;Ff7ZSFisd$ zjdIIP(E~Uku{N(N2~No2KAU0rjC4}zo{W!zXN~hUshyP*$3W+sk?# zuJkOii(GnG;{C7&EHUn&VwQNf8oCE_C=fGL6-$KqNO1H0T-==oPiG4co)78$x-dxY z6BJ4Ts$mG~zd_9R zhxyiU15~f)thjp8^SI8YntgXzg{f4@McTzUA^Ps{VyPCn&jCMUG#H9+*+=#yZyiT`@Aw(p;M&bsE8$z4qNGy7! zllTFc!@ZkchP7>iN#^l#v7y>mAoX}8{)#HrguvNYP-B`h5)a8PpR`wm$dJ$FesVm25 z+@}l!MVVbP-}*CS&R9>;`!aiTr)nS3thFrgsJgITeU6&=Jr1Wd(GgUNe3p{+>JH2! z-@o~Y$6w<7VI7E9p97~kaJqD8PV5slx+5PS4+KA^e6GxT^}7Ix_39IF-ICZlTmlcI zzx%J!wo|ZY>(#H3t|^1P^I%fqQM9NP{6V ztqD6`Ao5chVf^(79s4gzenW9IV0p>25VXp;FGv9fkNvZfNFX?56a*;*vmoO+(i~;B zXWRxuxx)f9!>^Ejumf+tuKaxk2pwIZ$pqM)YHH)Y@Gbzt~o_}7K^4#a?`5r%Lm=Ox}>YlHSRkMHCGM8>v zSgMXioe1<%s7xYjQgaQ~-7JX?^LsX`H4x!ZV=JS*~u5a)+kd`j(p2`$|!JYv6P2f0_1n zOl@y7nkdz{dxEsioS)>od9=)Z1%juUxNDPk z3kNh2bdf@rr^qAec%Fo5LZy2pDi#cOfWgij9V{JM65G~h_dC=N^O-IhU_xY0X4gxV1dviz=brs7 z+Tde!+B>|jckmW*BisyV8Z^O>@LR6+I{&+B=gq2$R>@Koel4NTkI*#P!cpgpOP?EX z!jkkc{T9|(igc+86dO&V^W>S8C()coCLT`_QG~?GlIS>uX5{K#cO?BK>7#gnMt|SK zX&sfL&rZLibb0!mir3+O{TISJ+J7I1lQ%GdH z`o2^5UIZ6v7~kg}6QyF zziALI{#%k=e*NY|IN06EBx(57UQ}--t$MGL8lPY!i0Vo&s#Dd>C8`Q!55N*$>1Q`T zDolhR*h+irghscJ>88Ni*+SO`(EmL~{m1;aL-;|vE)?N^1R9d7;50LG!w||hA9h(l z*a3?i6I7L&g{{PP7?&BU2&ZIn(#cJPRPRG$LFFZ;iJ@#k8jJ@}T_N~3aUSo^7^hq$ zSFMp)oBa0>*Eq|&Z?(v#u&Nk|JDI$PmNvfeC9MGT(lai-KIc@ej$kVb(@3GviCva%8^ z^*hXsVs#^5$EoeN{a)^FdHH~~zZ^#5IsLX1#r5L@YM5Z#-4F!4S#Wp&%7k?7ovLnW zssLw-ZFm^2b*C64!&{4c;fIuaj9`I%zNjht3O~|v1BUZ>rwVoV6{20!>kM<>B;%_4)i?e}Wdn=Pg?dgZ!0^a86XDp*4?zXF|Lw;R0%Gb!`~f!!V#=zav* zAt5lZ+c^x^edvC}zC;u<+9gec*#p>ZNz87_O0Zi8rYvGNcNRB{#N93$8=v)4+BGp_ zjQFe}Xb*QE0QjEAD?WQGu)r49 zLb;K8K8rlqus>a!jp4bqx)5*HrN?%wyBWwzwy^pPc#b^|Jb+BC6y2aCyAdhwNWwGP zFVLo8a!c8bzHGZ#*_YKK(i8%`lx?A`R*ovUg>F?`Xjd=u)R1J4XD_(akEAx8=_k>J zU0T%`RJ{-V47)VxR_m12*bim(_;pYDLDSfeudHRZOI7An6~9atthktxCL6(WE>*VP z`d{H!U8}vWm?LAu=iO+;%y+Q82Pj(`>zG3Gcp)3`mIEV|rTGMEHx2aIK03EeO@o~L z>dc^?qM0QpzxO>Z$`AN&CJa=~EW{Y4W5X{nL28 zBiD|sBD(r#Il#!?G2(lZJ#B_}8&D{b>A1acd?TCZczxKaXOX_yS+M^1Uc6T? zdJBDHODNE)WY|8sEYs%imeB~$y8NASN&IbA9xUepP_E?ih~EUpoqednOKz{|S&`w{ zDi77#02KfZ#L2-pITVuaPY;6K;s#Zz0d1tGF?SD7xNXy|WS*>BebxeH1)2Ybudfj> zSk-R`^?Ur2uK3A}@Z{g6jvH~Xe1)2jeX!4N13lMJi4rX&SNi1QP}p_{1IkNk9h`@3 zDdujn|Ln6nkbK!O_(uiY2A|!IWD zFU|w&DPWlQ>cQ}awI$bZ)`%ilmN8s;6=8^JrX=GoVH4|)$gCE>coyZs1|D!HTgu%=?odI~ zD!AoZ$wk#{E1E3}QdD@Mr%k>mUD#AdzF3MTWSv4!YkW_G{=pU=P)s~Vv2EOq+Y`G- zsGiVn^Shzs?)O086WxvV|q5n7>d@4p7KnFjdD}mXJa|gH#!~k-yMP5=9~XMOmplsVA6tAqs8^NmO2? z9xgz4tO6_s-IiC4xQn5ei}2)(bGPAX4O{3 z$w-~LWvb)1lhKc2U({I>7Zk(lN3IxBKi5p1$Jj^-sC~eP7GJ_`bFMS9L}|G#k1cIx zwgugDOLz$TQ;#Mj{#sxmc@LNU`0xIJxygCIrAMO;e#_m;_I}G^We2~dU)kPoS)}aX zw|vDn(#}wtLnHn@StbQN^dfw?#lFcla5N{;@ABx1$UF4gfkEN{pMMZYj+h-AhYlSR2o0!Y`osbvB7w! z#z6M7A>od;Yp5d7knaWqq{&vN^csWkO^Mi-VUkO>c|)dt=P(8l58&lp87{q|T&}8K zQ5rh%irgCMKAa90 zDa#CicVK7Y!9Vnr_+t1Iav~qF_nxADuUrr6KC4%o#Bu5y^KuE9!|(TDtf#$2a3^Yo zm9GQ$XZmU1J;LM?7EqV&S8{*>`Jy`k*~41K>AhH7v*u^TVrlP{?{F9PVF|qkpof@r zB6Vv7TR7(Cx6+C+xF-`U99}cF*x}@lJUB9}(T)6SB4qN`)|o{odkGkD&rx6FI+-O%t3`nHJ1fzZt*aDhEtpq}qH(VgfT={uQurR0m33^wp1Kim?&ps|QU< z8nWf^n2pk%epE}dH=BN9>T{41v5nH5e$HuYj285-?IhPrb)naT) zh8%{tkvocZ`l4+zU!cN@uk;Zm|0XABSOYyYibLccg{g5*W~Fk2*{aqPzo8f5TdpU; zay>6E$A|i9snfTl@wiQTqDUk}3QhU5g)m-nz-30Ub0OxKGM{88y89 zGIVjMMj?Z$N-(DV$r_gpqI7H(Jj;o|9|Fw2>{y1=)lZ2x$0mtsZA>ky-2^%Htmr8YGzwX8! zl-RFxCE>)1;XcI5E@}459w_`7+b?A$?AOO&4l@M{llCj>_dXO>(SDiQLjHeDW@NwK zMs-=8%ZON_&XAEIhT!)3g=I^=+#bxWrRRV6V?g$>j%UBN-~mKKN(tH=dk3at0CWVP zgZ&y5qJsU}Pg>SHsoj8kIX-J-zmml+BEQ6bjf>0@`!zrh?r_;?#&!8`%Gj@IU;^=# z9Ol{g<$i#{%NG7^9xF-AmB^L;!>-h;TzHqmi@CX;3z+-&n?r9dE(7G+$VEz+3MzK>ANe3blV<#SySt_yW_w+bok1R7BAK+W^k@O)9Yw(07!_~dRiq*9axa&90K z0!tOS!{jspR(`uz&2;~gIu~?(kq;{S%wTndvaZdi&O3cj-q;d>0 z-Pjqwd@XKn0plvhVAV2U&)}kKMsWK^yOaXMRBxy8Sv;*J>sYSSRf=m37b#JNNCX2( z1vt8VvH}dYGhn?ttthCXlR&tPr@J!(k}`<;k?i#;wc4?l#)knSz1SY(Tj=UODb)gw zqkSR=$$5h|(f#ZEeM5A=EBF3YmRS6;7{KAS{}9=KYKP7xVz!T?U*Mivu?&-QUDD*< z9w7GvvD_;wA@`racjrqK1Qc-J+&)8a2QT+_6J0;~HlBQjfb6Lhk!*h#L7{J$;c*S^ z7>4FKqQV_2@dvC&s52Igo#UUEGi@q%BGcIlH7!7i5}d z4zlBFj+_+pGNy5W)}S>510m6eeA&*7XS@tTWD+rfbc$R>oNRNngoh>L_{F{X|G0s=30;`@U-K8k!S?TM!8^?=ON;XfERlAJ@AB(QDblHyX?y zo44%kajj52;EzJ`EL9^+vZDtf7g7`q88j2l^vO3v9LG8;`m^O z`$&3=DT#nN>tOa(B=sXH!>FH8xJF9aH@6_u0Q}SSh--}%!C0y(Y@l$-wIyF%MUbQz z0K}@nMTgqs+G28RxayQ-f>C{7eTlW}ZE@(LHQ3cfJT?W$)fAq)neYXNpd>8m29!JZ zMN2y9@9QHuT*G~KNVMyNzlZh#=}uQwxwU0By`8VS^F_I#T6Z)NCSZ<4|Y@Q;IGQS$aKFK^?b0(tB8^7b7ySf}v- zY!{QeG4keaLzQF;Z$GLiYd|RDi2EyXW4Lv2NPR4J6}hlJ6^k7hmYkXBbsHO*^pWL7 zZyf@FI%_biX0C8U;HOg2`!3*4M)WSkKy&2el1yd_3_U0gLJ)QCq{ZFQpVDe$1@V>2 z_L5V4(=eExe?$;3S99J7%f}_2g`G5L6gB&Q>fw;J+{+|&vcHS5KE*xE zp_5p2okk1tnTvY~zbeBtx%WKRLzd2F*YjPYejg2iM7yN>b`+fxeK7kUO|3nLz*}MhD=R_Zn=zN{KfV#g0)0Dy z#zu6l1RCrer+$0~Wu(;zyF}V1z>z}OU%(}99zFSeLhIQ7?ya;Fe?i!B#}O0uQnfQh zRRL`wRikB((e&++3>T*T+rNmu@5JCG>HA@nQ?3#sZU?ERtXEdGP$*}5>H(*-Apiuhg&S9J<0C9vAN( zt21c>D)s4Zh0WUm>jqz8&{tT)oqGw(k}X_vD25|^3TB1ZD?NUrJS|a-L2Tke6N9>2 zX*&+-^DgeFw}A%pex6QAr%Y52-M9JDy*%f^7CrF$Em0*K{4*IVM!r!n4P-W|2%d^> zj66+Uw(81{*M6C zkaXWgTQRO4;FA=0S7+R{UV#ZiK(6&nFoc?{a9KU^2*qt&q$gpq?t^*~2(!YLLutvh z=R19C_xRT4Bw9Pvw7x3-z_ zMXlL*JcrWSMm-6w*?8mwds6aL3l0KXC01uP(HOk)I2U&qz8YX|QTt)R43vNww~xHI z6&Bf{KynH54SpPJRHt+aa~D4ji5jgXmoT67t(gH{?c;>FRhpCE=e8 zF1PB57x;WQ)oGT;{@45yZOjD%Q5g_LQW$MH4bkvC9%%iVmKu3z6vE>NEO8OYXeDVu z3qLP6Rt?@Rk@o@U!pNpcT3C+MuKz$7y};BHQK6Os zYXpOioAb+}06Cok%ROW@6cFxEQ`$DBW1FjhC*G@r&fWJ$qfR}7PAqWoBYrH}NClXv z6g?nQB8A(q!m@d~QG@W?54ZUek~AJ{;sNC)Y}`5S%KCs2u z*8bpBN+xKmxuPUpWhY82{8yA%oD-g?A&`g4(iBsR(o^@sPE_Xppx(dd>$wJHEJm7tQuKRvp*ZX?^zy93UjUC@lrfSwjzbhdUa~FOkR2`=%SrbdiSBF`) zr?U)Z3Z!^*JH^hFw4nE~fEw8(o7)2CS8%-c`Kr5>&Qa<(328KRiae0=+> zJZ&u`+wPxZ2yfoUXxw$}-x+(_F=QXado?19Hg}L~IV%V%8!`SOt^H(>zuNp}JblKc zN&CB*sql6)Ea>Q+d%BtPDHlUDziOQw>NQg(%-w(@J0p0Osl=|9%!JWmDi%|!OJJFx z_ZxJxo9RFC$j+~pyZaRmR+{JjgB%y6rTU>!5!W@k2G$CCd*LgUOzT>=AJOQMaFRvG zD~uy%Dv%-=_ozlj>7~sYlq;~mN)3vULY~O7^yC;q&Q(q4%JJl0 z)BMC{%23n^(O!D zF2b>N^gjF!V|%rhrE397U&SrVN>+r?HQAp^#VhrIJ-4e218vux1*~HZTiP_bl4)%h zOMA7Fq&Rwn-36z9i|3qoY&A1TO@v*ko>RG22aO7PyQF&7K6PASFbQflF?8Q+%n7Zu z=Bbn$^TfOkab?yQwwVw{$$)C*kIR?rm4+nyZHcVb+2;XU@*F^aJnq@6Jnx^r1}S!h zbSq_a5+088{vrxl38quGtJvk%PpLQ{x10fm&dr*{z}wcy8)(e3Csz&kTKTro=3g*p1atx``6~aoJ<9^1KSg4kRd@mg&Br2`rc$tc`3U z9Pm3d<@NS=Xv(ep?${;0N(W!<$-s2$Zi~#;KK*&>AtmKOY`}mK@glfE)O{gsx_UNZ zy3Y~-Fz`vKlVpF_p}7d>6tiSb^on5&hO|FTiidxN!r?rmltxc7DZj4&mZFUYe=CL&TYMW&+p26<@XgUhKvaXQl%pZO)YBvq-$7qeFb^Ks8*CO}sb ze7n|Za|ixu3h%Cz$xi_nJn31Nx)watNI69rP|LcmNXxqQ5Tc>mIEuv((G={N8mC=f z@>-P0Rq2n*jrK}3lWXu0N6m8GZf*zp#+mXgoH9AiEbDnoKMmhbY_x6s^~CEn!({YZ zpTgJ)=AOCJ4F23H0-aYB@W4J@&DCisO1V0(m_*qlT=7=b+=-o66!P;ibEByHiu}$i z^!7&Q6_Z=X9a@)n)H=H}_2?5n^%nWFO(w15&5f?tb?VJFg$w;!IF8ewf(HJ=H!ZNM zDa5~NIM?I#632<_U)G~G&UEuG8^7Rz$j~2`qEdfcsz>_ca;LfA(m?RlBIW9d{(@IW ze!mgC-KU`DGbJ! zA>Iucc;x}!O5vd;_=;(E0nY>kC;2^d$B8iMf{dgA&(`-_Jmp4uy;V09F9P?;6FjJ6 zFN(d1wpx2e`EFvUdD_I#V($s{kEfJ({>?o8RqL&adC7vl+;%HneC4Rt>v+KXz$PA! zzjj}rb~O_+BTyBY3@SNA9ReEF>N`rFfNwP({5av~0-jJeZ)0>y)7|#i6R%jV)rFr) zvU#Z5bxSs?vN)#8RN;##K8{HQWgENAqrct{334OITc%Om?!Fxs>;sI(3Vf7y0@3K7 zKTECk3Z1ZY7jx3Zt;~03b-CXClas-kk-c z(F}UgaFZJ_^UQOKtmz!u1RtA2^Jp25KBD_}M%fI?>~c0Y7jV-g5X`W2GyLPBe4u7r z#&KH?(xmd5UkYbhG}EHe3+kaH#&wA?iY!uY{FGjV& zIl14~EFQ{zyap#DXiT5FeY_BFbPqp+`UI)Cpo@3U!@Or~=1q`m@{ZNSRqsu2AoP)P zBRoZB9#H5bEzbv-5X38rT&josET^+@zk<4yKU4~O`v|gb*tJ;H?%+3KuTb>rzPD&I z&wyS)(E@M^`Kcx)#Ywv6UV0>knLY^rA{p|(-QRgI__0Y#QkQfq!y3*mMcOX1(1uGmj>| z87GIRC;H<7^+kVSW~`PdR|EPBGUFfLh~RcfoNW2*9Qx3b54!Jr2Z^d38F>$(2WIL` zx!PVdsIOLu9uEHrSU$~blY`Sd>TW$S4>orWaR?6QogqW7lbd#R87F9>XSTtuxxwJJ zq#zvJswgt&@h76h)EB4vee=zD^WYcBF`uGLjht;Yw%eU=zKaYoFsAJ`-}IJdr6V-0 z;MI%Wt)*akpXr8bNA7FQL^CU3jQn+^#Ec@5;r+S`Q~3Y{^ldi@W#BrIz)BBrAA_AK%Ottg-seIrR^P^*1grtdf)@jII-^!6yps?P zmI~KqiYA`MN$;d0WZ+7g%vz$#xTW~z0?t~C&`PPo5V-AJ?&lKXGo8DFPOx|x44dD1 zgL4W8z;I4sfJ-)P+8NSpPT?gzV4T7Wx-w274>}@FVVoG6!znxm5>DZ30D5eegTi5R z@GEc%rh!bSaFg)tUJRUqoGrGk=Jm&=sM8;p>WTiiJY=uZPC+eEt_JiMIEC#UathM8 z&?#&j<_{iV=EP{ZM7JH4?OPS`2Q$2s(j$Vm^#|{T!x4Y5AE)Ew?1Y*4;T+}z{) zvfQf_3UINY_aYQT!uZu6a~7+%oW*Y8(pc{hNK7Q4;d$R5Tw zaSw~#2S9gs2!s|J{0c1AG>~brsv5a$2Vk)~0T`Z|*B_UnPJdjgC;H=Zv$^2XK=9QP z_4YMts0iOn4X{FiDhY=IK*-frltnN8Z9Pl;*mb3irN+Pohj} z>U~w#1sRuQC1ZpO*wbpZs$VZdaMX^R51$Gh-oAAhjOL&EZgwu9?>E84@3|PQ&rCPD zt?Dn7^$LXgViW4Ibf^mbLT;b%YJ9;Tq(whHeb z<_m_d$@B%k#DzzF!NdF>?h97uW%mO=A&WNS$EY9p5xUqnBjN|xziZpzljjjXa3PWj z{J?1+P5Xg=<4yTkF{Jt>wOyo^BJZbKTz7{twuy0y)Pb%Kd^6&Mf`yolkX_fUOVFJ6fnaK!J4H>61@9qwp*J|lrkUT z6|pX!(*!n&)>^6(ktK4IX1T2UUK8WwAi5V&7vEbqj>7Ilup@#>T*ef9%2G_C*b>!b z5`oBJ0o;~n(EvgjMAGR6f(c)>ewC{-bvY> zc{WX&XL!VMG!Z%OK zYj@4dSRLb6kTmR4J+J&AI`;p%5dLASVn1s$jv-7MzJ-I2I*KpzJA4Jr&J8TzxM$ut zh|)_-+P&i4M_S!?fqVf*v=!y-Tc95#+K(e6)|+jeIyylaf%k5NfBw_&~uMgPAE=0!tk%6O?Qo zpb?S+`_8pl883H_R5)l7bgx^)Y!R2t_~4m!m5dEAaUGIe7>*R0oZT)aBXEg=MhctN z7!$PoTZ|&igpl$1Dl;Pm`wC`#&KIBEV4(&qWADIv(dNB#eTLm%)a>kjGdKTl=lq=A zn=wN;SO|UvcCV2^XXaqxn-pR9djZ7LM*!sfI|2IRa)-Urr3MuG<5GT2f8k(3Em5up z^cM^kp8N>nV&**<9Q+3!oZq1zxQpMz<^-2-bmAH|PWT2XwHXp9KiEtgvgNzb$-eVt zac+8WhIfT+g6V+BbEhB};}sYuyar!nBE)#UZ_ygb5AKzdia&R_A?*t^Fpl_w`#<4) z!6~9F$Pd0qseHlTm?wr7e8GBp;oXGW3Vgw}au;~Fw`HXyONw9)ZmF=ILqud*p*Akz z%U5GNlOUk)lP*QjeyOLT>B49|$1AvlTPjr9Pnn{SSJ3*J^$L;oY5ak9%b5J&3PjY= z`}iwIYw}!PqEO7T;|1jhJZJXeFD@#>tc>Yfqs+AaPthn|V$}UJD@5E4@K0U=#a5UP zhYt2j2MX|Z&x|GxoxiA0k07MnyzR6WU_1yXyx=m*MOCnq=9tlU(m0vQxxznwC(VD;L&R=!h0tdNzXG3Oa)nHvali2XVY$LrR~a9%H1HWW;`h5EKBSG` z!*T_?ZjDeSx?z4}H7U3`iCKQbh!(%`5wy1N+zh|*R@;pA8^LpZ&=UBKA2%7lVNPTE z0oUQUqE${Veh#rO?Kg(y5ZAGmVa65a!&iadNNI%wzwzJ(LkoUm6TS2+#9@+p!tOo>n|z<@E7ccUd-i@+(sphb_ZLa z&Cp^g%o9qDgC?MRQ+Un|EjDvQBtFv)dxD|GBNXB-$_N}wX}Un3!|rgoS$FK7yLR=1 zEzD~vg4!}@qfTu-z;SG((D(%FeuU;+k2%TSow>w=J>1bBm$%z1k-Nr>JJP2had(5e z>)nCsMm)-{I_FVDw97&~E;}{)IKV}=(Z}<8z>Geg)0G*09O8;8%V6~Jg1M1>^zj(? zn6ms40F8J_ARK)JzXFe98ps@d+$97%e}t7PlF>gkuRktDo&LB~PxQy-0dv8nf#9np z%GH4Wf-Gba48enh9_4IeMAW04%5UCglDNW^N%ofx8-KhTm9@DBmNo@r?7K(~#Em~@ zc+W82pw=0|_~SlW?bZ|(Iqs+HjT)9KXStE!uE3%D0j@aMy_~Q@6MlVH8BJe>HrHt`M$MZ5G#4d~B%ZqDB zCAO%0&myUg-s%^R*3#o7wvw!+l$)CY*qmk*w{po$tzJTBgZ6rUb*(9yWF99qaoc3O z>iNp<vn6NaBsI*20US*z+@D|Q2J z$1aiXzMNDm4LNfqkK2iD*t<~}*%ZOtKoKg^ilCy1BAl}pMc8?8{ql_mH#0l^4t`1l zu9qh8%>%Y=c_)!D2I4=?YKHgC4`*vXOXx5u=Wh^9K;}p-@XkW3z557Id_QTx>Hs86 zQt539TS)Uyn%5eoze+jN5@$bA4tf8TpwiLX^1|Om-oIn0kW1d5CrY3^8VDItn?*j& z7Fl)p-U@EV61n+U)(mOLC@ct>+}(A1PLa=M6+|M}x7oaNv?KzzKPkaGfxit07YO_f zQUnC>U$xm<&n56f);n1BQ=nArayt}g>_W8H+riWX0v`$me!5D(Myc9+1;LNj+f0F5 zc{{`249~seQPz&$PyEFQy1*H(vwCBj*49w+LSxD^itKwzfj!$PLaeor+7wlb1Qo{# z=LlAggwkS_0!vN2ZZ+HNH3zuBm6r>8-K2epJ{Uk0@W6QXammA&`Kndq4`%LakHm4J zy02Y_uiW@^c70D|J_}FUd1y3!(JME?zihdU8Y{)iqoru^HRrjUn)6R<5ZQ};ZNv=x z#Vx2$zYPuEWl|#|hWy2|xWGVto^bYiJ@kwuA(#4mOWwt~wUPdUwX8gB!Ml#O`}#ul zc^(b*R)`+D{C&-Pjnd^z^>O|psy`3obUJ!3;uUe!3dG8{n zNUP93gq5j6AI1&GSE2DWR)y*eKlw2lKSPOicNM=mOgx6go81Hxr$Q_z?)JW8gNyes zP#Y-FS2X?NmP2oK{@~xT5|B%Qn&hpHq(I+CAHC_cZKgP_&D^81w~HPW=wm3-XdTW} zpmopJqdZITFKdmxqxl`Z|NOsZU{Xk_q7h-BhU~w%tCjOz))dux$E9eF9UBQgfEiD>5jWqo$fJYFf9eKK(uCQt!lSZZm<|#fN6HWCkiF zEvOH~#7?S6PVhEd(ZR2HZ&_wsB}(2wk!V^P838V!-`;-` zXWSl8=9j#CY}oORVRbGy?J)f-mQ#mO32(KKkk?O;TRQmb%Ebe_wHA(=RJI>=>gc`t zd8FbJJo}LySX)R)&@|M&k`v}6IZ-qDpvm{_H~+jBaZOHOGt|l7$yM1z@<`&kjQ+A= zcP({yxe9H=(bSIk%eiS4RXjS1>k4(7bJGGDy8SKb(+{q~9UT0GSApX|j5UE6OSSk3 z$DUuBt(`2wG#{{ft2&U=qP#}*&HDrStvh;2@_b6uYOv^Rjf_X>5K{e|mFj7#X|qtF zaRBKCQtfzthqz}apV=5LIeG6uCp&t-{)eM=c$yL|CjB(0Kbhwa@Kduf+f4cn+aFPZ z`(yz}5b2lP=BWe|X(;kvtVU1LZ)=n0+}lT$c<~NG_X2j2j<@gcf1fhDq|nVCCRD|3 zKH_QuE;zl@mt6f(D|^uk5nBZu5tihRUFulcdKY@{TVgo2Q;*tXBD2`AyMY-$pH=~( zL@c?IP^|Bw+V>wxsJpH51-ek8q=POW_)xY6v*^Ms3G{VxE?BbqM(vsi;(yU8f1V8pNmRrtJBg*U09&k7Y1|A|GQmWfElCuf7QWEc7B;eh4zL4%1+jyf=3b_CM#n~Fmr67NnYsvi` z@L8z0_aM;)1iM;gQjUO_ZPqnk>M}^yrt>D028I{8I8i&A}2u zZvs8+^rrqCX-`(o`1lw<5W!{|MFa+n z5J7N5IYe5o^lU`0o+9w{twz3fzWv~*aRhrPlblahif6_PHQxOsYk|If-C6r(S&7J^ zZ^lD+$bC6lM~)Y~pQF5<^5;G;94-8p%FY!vbotY$&}dyo>o!Hz%lRMYc$^#ZZo^A; z^uF;q;*tGAPj+u&)dwv)B$nYayjSQj7n8le*^4RO^Y&tz_beCPc7e%c_r|l7c=e=V zM7w(9fSF@ltlw+cfr9R;Qwx-)apVhXZBjU$9g#0DLn}dbI$I_<5Gt zSgy+*_T({lCeyuk7os1;3K@q0~c7DBWLY)fe z(Fqog4o2Iq{cgq&P_FN5O`x*0wj!0461rocf?Yup&D4kc8dxMvQ5r64zAgz%T z#Nl*mokyfZBL^`jH1aWt#s$pZX^f5BKbf_uEDmgr6l~}FAuQ+Gn)h0I>fOvpqi?)X z%#9|WyG}z9kRR2Eag<)+y>7Ef)k9ZO*G!>8We=!#t_MC09cATLK!|@f7JR+M(SL8(f!q3$jUvIWvhlagpPS_G#2=)IvYv7A81fIdKTsU)%~jG456YCH-Tz3 zVx67OK)h?|f%hPa-O+pgGpGR54cPY-*|+i!%|I}p;XQR#D9K6E%P-(pA+ryA7e zo~~HBklUk9SF8;^mk@h8dVl;>Zho&s z!=2?AexEM>%Y{RePv4Kg??dHr_`NSXzmK5{;`d=$V1(ZXHEyPBw+yR17r*}-e&pi! z$rygalFF04bB-+E3v99zJ#SuK4BAc^%JdMHg*X zT*Y{13UIWuz&0CU*#*Ebppp_iP%YWpQeE;jC2!{YXa#5Fz1Lopd1u&*1>R})qTHKg zFIZ2r7nR=e_F}Pjl)YF={5ycHnJkYu{0ZDXaZtltBiD(XpQWOk!!b%H0eqTb2RjUL z$#?2WBC$gKTT~30-S(YKcwpS-u`1@hO9ZzDW|{!~7X?zeQtR!h-!z#%*mW9r%fdv_ zM7k*yEuf*vKn+QXffc|apf*7l0`(T9a>}JZ9gk*4R?J^|M_mTj%?i3Xfri&{iA`qm z>&ij&_6uIPwDH~8k~*NC%i;3|D~EX3l$m%7{>)#YxlqGkUlc!A%M69 zZf3(U4tf&i*U@|X6Guy5-C4k8JbH+mh<*W&59pHD+diyxqMw#@M%#} z30*EITFi3=dhXuf8}LhcuFyVLgSMF8_p}f+E%H372VpPetpk z{kTkXi;6ZCmFg)CThQ>&QiOum^P~}}OEy2IVyW}Z=E&eqHxIW$#n>g4snP%4kd6gA%cbCY)QD#tp*`QXOs@7HGh>-ga9_kn1X^z43tQo|0?l zI+R7bZYYa(t|CVUea*$PL#fnoJN)TIMrR-47D`}1wn+eUgRxtA+74BOLIg+fK2Gv~ z@Czni{crg$@*zFoY;ikQDm`DCwYe;|Xae33nTkinKYV0ok)HsTBX@UETMoBZj=b<sahw$D+*?vBKe8 z66T-GCKX^lO~^GUv#_UYOM58`5v8#pmeUQDkA}M;imM0K%lpgWm-Wz0nBLy9aTgYkI`{61gmq+?(fW3{4?@%E}Ds<;St$ET^n^ zzn+iset;JG%vZs7D_^}>`8Z~9helH)sN|THZ`ea0?hx)<-NUPS*ebk@KZ{`!+PwwP zO|I;HjY!qe`}Us^b=WU!#j!y^8!j0na!H32U)t@>jVi%2^1w`tqKBWvq0K7A!4iMq zdlT#s|63hw7xca`3w5=l5yM8aPjanp@y9+sEz{|bODU56ycOU`qruzEZ{}KOqPsFM zO3V0JrmCJ?w}6*JIwuz4(~amOryOQIl$9n_o&LB~+ND1(rF;70@?O9(_HrxWt^7l? zwgJ3PBURTF9$ID=#K^04#|@}mj9ibmgXZTz3}WO*PFhh0FenZGushM{%5!ve} z$&sA8x0&@3FGbPUh}OrHdhwNHdIg*+tU!P9XW!lkr`QuTvNjs9h5 zTq;J1v0Td87_<`&pr6AaLQdI^5d+=;phHg;Zn0n^+$&e8uQdLEvm zzZs<9{QCEZY_|qr*9u6b8>K_K7|?Ma;RrgU-OAoFL&Yv8!Tz-3gZhpKdFL|f(2Q?TFu^G1~v+md>e-u5} zxT%vIvWk(sPHDSVE^-_$0N>QagCq?GWJ_ht4N{p$(y5G!jMKJX6F7m{7;{?t%!BI~ zujmV`nhW(>W<%me7B~Jtv$C#PjIn6Bdz*Px#e0;|4#gP3IB$=}?8+xDgeXf*jtT2V z)wE?!_*k|Hpg~&pT4~jiWi~@$$PyO>lHES;Jx=eqeLWACZhWH0R4N2z4ZjscF;m`2C zud%svwX(G)9tyg3Jb=;-~0p4W)m)8G-nzFBI3=2lfx;jVe#e*%mihNH^&ezO}x352NWfK z&fHWa-c*yn2l3`g{H)@QKOLe*k2fV`NFi+|;UjayYwKh=8tSBFZ(K-Lws^A~r~VS*W~eLf*S5p9HlSMYPK{p-W-DGiZ`{?o;BWFri?jZyzw3;z_I^#m)R`h z1~oI&!<>!Q2~{W$NDp=N;+YxhFz9D6x{HVKD;nmQAra}U&_)^RY<-XM!LkG8$+1Gu z8gOV^vpKkLH^kA&l(K3@IwewI4S+f)7HROqn`auLfU;A>*`W#J7Co*%F6D&u$7PSb z(kC~za*5;J3{YI;rY`lz<(>9Qm%&)b`wGgJJ^XCY9T$FD=_d;ESw$oleiRAFCT_4U zTO+r%RNuwqM|bw&PQH*whMzynx&PmXpI@q38-7XNsjT(NAUPz{Fx{}5P!T=o-_WunMqj_ ze@ZgPJL}GI@n^^e8gC&(LL76q8}B@;T!(?*w(1zEm^#92S3?Xao!QJQB_<@n@R% zQ=5rSJHmKp-ScaBf1uv0(BfF{V-GShj0TVcYBGyh+=?~GK$t3O_kIdL6`#Ac4*!s- z`XF&Ox*GfzLS|&i&g+(kq5dvas@kY#-L5s!?J>W^PSKpTG=dNe$Tm_oH^^KzrZq=J zk&*Im#D>p!@_*ft1>%RrI7n^I+Dzr`62%-wE#narHyL{~N02okE4c4p4*l2lRrTjb+|{qRbt=({~^l`(9>uy`9R^=+Mr| zATC8p_P&_JnTbt)c|U;nPFi{))cclx6YpS7fk^#`(wBNNEj#q_ej_u8^JX9clQ#z5 zhF_znQVtkpKsLFV8^~>MT5d+4(u+0kBD8#f)yNGZ=b8@_*w^m=38@m79!@ul9egub7l$n0w5B5rsni^fh);Bk`zn)PcSwSIA>1Nd) z$laub1>8}hFcGM4J4`Swuuao@S!l=Vy?wMz^UCnZo-9w6X)1Du%kxvg8$@%yJU_K` zM1Hpxpem`rym<)Br^xOt2v&B?x$^rcg)xr^IL26x*wgup&v56;f9(vB)qP7S7qdX2 zC6Bzq5lbFFV%jJ;SKgg|`yIJ0$C0?sW}nf8gcvMxyI)!h0df@Qb;`W(Ntq0+R3Zm9(tmXxIrrgH5B)_K@2 z3;z_zU32oH>NYv8ZU%EVDz#M|oTEjz$GD;i6;Qriz<%_IUpx7whDIo!8dJ6Upn=`3 z$%zU&>D}xIX+5u#fIVljp(LVVV3ZehU7pw7H6yBG6tk+foDsm(7Y}|5)b4He3(||^ zZDERdKF@iGKb`Js4Ry@a%JCI@T4zXM&XAM4+}Z}IJEmgCz`T6A4)ye2A-46ES&#J> zu7z_E`U5wHlS+`e&rX}p7v?TKjTTSh>F9iC`27np54R=dG3$RFy?Y)oA!iFEc$cM6 zBSn&V!af|?X0A}eohopHh#mGAa`kz3I#-awc*iJNvC+qj>i;Sa&DKoBGr>3(y;u$n;h3r^&B8y z7ilG+1~Sen-xcU%NAK_Ne>L*0#vLl)IEUb!N=S{$_auJ9+Wk@z#7w}k;zitnpV=wi zBogsAibrQk$ZabLWQ(SPy_j{dwv1=`wVXrYT3~{@!Ln|ag4tLEKHt>d`e+_ znRmt8z}$^b*E;w3*K-@FrxeePi>lG^=`Wp!_!=EK;EXP?yTB%2FI%^3L2u_CIma1A ztt-5Dy#vBIYK-}W@+>b1sub(;0@iC|NIKU$pTIkH=hUC`vYk6K;9~5&^AF}2kU~Xe zSksOkdnaiG)yjIXBk&mK!0vf_X7iyd9WMuMT&X!Y>RH*RAx2s%SDKzteD6DH;M=6X zBE{}*Hw#QBtNimyi{bKL;CAkQVtSvG3`>|Ko+!DgH0D-5JlN_9^B| zB!>Ubse7^bfA?ueqRV!zs2r zeOU9k_+Je#bMgPXsE~J-BPGKBFRA=Zq5}RmLq&Z4Z-Rg5|2MPq|B92TlT>%NcRL|C z9-n6w9jX0*&z0Hvd^ve@CZDU{9rC$^o6_@*XqtCAel*JGH#;NmJvFoWTzoz(bIhUV zmtia3!vwHldj4LOf17gg`FN-pozH>4vF|qqG1D#;b#1fT$n2|s-ap}yv;Jd zLs_@>fskTGB8zs4_&+D4Xk-Fa8wFgGmWYmBN^qjn4_DzD@xz<-Xp9#RBOyg75!bei zKlg^C8`gBr+poq7Lau~&*3cJ%JL$JkS?dfJNfT0_71R)u%$BrtXM<67voE;SQHsg4R?YcGv5P-KJ-9Oqr|ue2Xa!VkRlNIkbq?e5x_UBY__WtkFw z9H;3de2?;0Bzz3U;QhP&N<_k+gyCZRIm4RICE*4+lN=KMAzI>{??{PA_->V-CMsy? zqe%M55)KU2!Z82Xgf0^Qwxs2qk^dD?y@`JdWVs>BkCf-ldW#`tlsqqATNpKp%FHt^ zC#q$#yc);mSYD!`gW{iNKTjth!}2iCd$V&&y$}i~!sc_a{IHBKSNtRFdk-^XHaz~R z{M(vRgXPEDfoRxJIHzP1${^1R*lx#DZlH{A??t!0Hxf!adT;xhS+adZ+HODhXok&` zi&%m!7LU?nW;56h$ILrb$4TT>F6^1{@}v44887!L@3P}5)?{=-k9{pwo9x{L?oNf6 zMB}Ls;bKP{goLA;?yjYQutyA;Q$#&Gp1KO>i9B1a0~zOCta$*~{8xO<$fmML)eM_Y zLqcK0rhgd;{AzU5`}3KazdgauhCIg)>C_B#Yh*JcWa@^^XQ-aLgbIa6fok^GvnJw| zqK3r3JDF~?E)ZE368pw=*@=%xP_x0#oY8LLpI7?^Sg?lSmu8x%xP(*=mS_yD48_|o zFJTvksiwdU$0igqq^(P6)$1L89v+Z>F! z!6c4MpBYR z-^^(70eKroaF%JBW3dyjgO)e;9S#q3_%lS={?qYYDACB~M(Euw+FvOBdJYbE^wxZ3 zEc%s?a(YpGA_fFL^zR+!!f4=!p|rleLGjY+TbBlm z=-ac>JEw0~lgI`7w$gczld@8hOW)?m8|K!xTWQ~WEiFg&twV*T|L9u<@bXLnc8so~u6Fuyi~@R-)KZ@gZu7{7L^ z1|8_xb<#JdXD8xt13f#&>YsPdN!cjLqG#renrGg~)4BER43yTpi>{i_wA$r@H>vEi z>ON}rN6n&*;?GgD@Qj);evX6F9lh(nI2L^?J~30@2JpU7ed|%yV!!%ls%k$Cu63U8{+fwC-Jeu-5GRAM<+!pZMq8`ls>5Nc}6LKg0EJqC#3a zu0DVXjwXjt&fQ(DIrVP`^``aj43x>~-yJrFdWWs#;dh1iDYCFY|JKRzq7kQOr6iaB zeGml?=^B~GeS`MBchYi1|30s>*NYljTLm?v^*clV?0%LeWG%zUf8RhxLp}CyF2`7-tL1(GAWNj&n zTugrbki3W^C`-Qc24c1MX`-UFC;R4QMEP3rR{ zS=-4Xl~Os1>!nb+t>_UH%Db2b``%`RV%|qTkKe+*MPX-j!SvXme50z$6DmYXzk>OQ zVe;ulyEy;%)+5Si3&~7WKDBbv&Sb0+VNcd_>;ztVLU6p-r4e3kL5)9Q^lpVL>+b5w zDX5c3{tw=|(K~_mqLMmH=f#9Oj?dOm7D*X@_Y2u}Zb`izWAwgEwA&cGy}!wZKJR&W z9z#sd-~9^p@!l^~DEtwAij;<9N#r_Tv%X!vs0@tI0nX7MLv+(-Svw`KPJ#4Pu1nY~|-<-RSm_tONM-==QX75kVa^IGj_GV7WxNrMs%k2GWS?=31d%q;heOoq~ zeAYcbgDT3HVO&bwn&M5yDR%VU`x)|Db2J<`boSvid~!)AGpsCiQ=}#DasI-53}p(L z!L%;!uG=*uof(vR&mM?52% zeG0pg_N^OMe-_J_DL?c*8LVi?k_McJjd`D-E%WVbK32zmiR<_&yhrVncn?wwnqJiCy!|iHcE%lrA@(n_X}{gqqnuo%m-~o z98x7o9UYSM?6NCnp&FUxlnnLpUW?$mj_aB^QR^s1iOlAaN!}L1R5wf4Mu||ecAQc- zdk!%Zt4I|_x%{6+v}nP>I(x3inDyu@hjZ)EcM!%%Jz5G~!}Vyga`Cht{TTZgjjf^B zy1RDd)T76!H?2o=6&TA)4q2h zEl20GzokOcfAncRdNo?d(WhJ3-#|Xrj;NP*>761wg3^`;<9Uwtx?p`r?+Kk|1Y~xr zpi>MraZOSKy>Y;z9w$}Y=y$Js)5l--Pn@#{WjPatn!*$LGA4PZM>W1UHmF}4lVK1w z_#0Vc*Ot~vT}PV*y@~8QEQF>h-Q_Qwcgrr!rEMa4--!Jx>%R90_wu@%UTR&l?nnNi z6Myh8H~#4QU!b^Wnl|xtCt_g-duhG%35p85?`v|2i_W3WhuDAF`XLc^x6B~xyAOw? z4RR&w0v%;z`j3e=eGM|?jnhmtAN=K&5w8b- zhia%A?`GLMtxj{Th6+IaX1nD>tryB4DF4dxc7(SNHS*?(3UdnjZauqQWoNsFyU>b} z-RXSj;9Vx(+wX6X>?sSxS15yH^<4>>w;5IK=q>y>qPE|(K!#0S$B`gz)x+*6^qG^y z(8%U>b*qht2$dF6Rf32#A3}}{efUZcdpNLxZ}R=#_x<|nBILUdizNEGedFkOP%Jpul2Oy z?Z5{GvfV0IfNU#Wjag)SzU0*-S1|I;7w-@{;jN^FzUk7i-DW&;6~CoW_aontWFpu5 z?0H}J3;7K9@J&2yGmcU1`yLdsqxbY1U#)%r6%B}O-#TCP?_l3&=4IOVuQdla+`b{4vhP3#jQyhn6|U7RI$-R3lk7i>4m^+Ucq>Fj z+P?RzY>{jD9z>i~53GGFKk%MF5j%RXT&MGJjV0K9F29A{z4J}25WqCD%*lcB& zeIbRT)ZX!mkD--Os5d&~CZ1eZZVPAPw-@u>9Q&)}nRQF;)s!c_XYk|HSF8GfO%Ap? z*zVxu*Vy~TdQ*kxiX1FZW=XT@ukVn4oGTMEo9zQ8Kk+x1rC9)eHo@T#r!&GJ*vMuwTIN!$6J+8UYt z_@Zw2c^}&G7L!+^Z$h*4?G&o%7ha@kTXl}K)O_UU-Eu_%1taFUc@1g}dg}EPp!$4m z#Mtc}RIoz9XQOYU^^F9=edOmos%EQ=b6!5E(ecg*C1espCtDC4mrBQI<^cSlw185& zDm5I(B{wx;F~MHyAdYv5Lp_iqCGAS!s>@`9SeQeZ~FyxsIS*b>qhEpHN`pfbvvn+^tJG%Vfy<07icTKzCMyu zUw310+4c2a25y1AZk9WC`dXT`>Cii-Up6snyLw<0wS5Tn_hv$c{nYGkZ2Csl);JLKC*nOy=m4{q-+|yf?{xeav}irW zIE^>cuzJ_~m&_Kju-?pH9+qRtvE}wM{I2&f(XMZ*O;DJ>{GNgz{XR+gea;@hayNf@ zg6g=*MojNlNNxE>HgM&8r{Xj^dhb|sbl$pM`B(L-FZye2UU6n)Q>vkQ^_kU;&8ahI zub$IhooZ^WYi!8-aMkLnGwZ7wR-BnGyS!m#L*vyAg{k1F!n%gSi!PmCSlw8YD!i_? zZbj{iwz}!luc)g@HO{VTYHe#yEooRXbw#RmQCm|}V{>b&ro1_|x-NC~qPlBSEyeHr zXx>=sY)Q2Wea!gHsn+JY)auldB^O=vYUqg8Gv843YG|rYRkfsEWxuLwUQK_b5Aeiv zWvsHPYq^YCQm=}R70p%4UQIvHxmU%<3>{u#B|BDrWY#6JR;Q|48=I$3Z%(zOnpdat z<}6%z*}}YU?z5$ij=0ay zP569FTMKDll(7C;NuDonYfVHy&nKdv=M&M-^GWGXB(Eh?uT3>Kwk%mvTAHXIA>RD? z3Gs|E$I_)GQ(M}Wt*WxtA5*sjoF!A6o0G^LJmQ$ah)Y|lwW2X*%(1#l8v%|9RB`P! zjaSNFoESH}J>ezk8{TRm2}2@&fMXdRlYcnO+Nvf+ZKpuV@hoC&ymUy8=O>p;ZE9<& zT{3l9RrShby42W^N}E(n`Yet!rKax^I!AP7ZR4ucnN`i?#EoapCSP`$_q)$J zW7-+-IdfHGOl7;{FVc9)^pQ+!b*kCgY9ciJH;PO&hcbOS+1esA>T-b({J3}!;?zA>%naL82P{VXw&ph^$0==WTUMV6 zn~L49S&fbL0ah$tG*vZMt*W?&JTK^0ytWKpS*pG%A)Y3v#v(KD#w0U~7ZMqZC*UNO z88~B-8F-0h22Mq)y;T!?F?^BL-BooBmsGVcXsi}SOx;~gh$OWPm=}w8t<6;pElrIr zN%hI#EpKdI6|@$UM)0bWw$;+MYEFG>Rf}sXR=*hOswtT9p4dE3<4hX^&e>zYIcp3! zQ^$Z4XPn`D=0}w6-2iF{TBFH!#@c!~R$B+pYjOQy!p@$B>Q^E>-|{M^nyZ&(#ef9&&yMX{b| zhPt(`NvTl$TsVyrpHFF=_`FKv#OGHUCqB>8IEnaX>|4Vl5|KnUKzmW#M0PL?H<2xX zyGXvNxlQAxMEr^1CE!tA`|R4PhK5vqFg%NwGcdjlyk9&W*7KGV@t#jLufX3W<6Ww% zVNq*MX=-&qTFkyg@LYp2X{-vEkl^_QvZ-Q>Wi>Be{dztLkE>QR)U~o46JW)Rx%G`J z0*qL6Dc5o4m*DyMx^L@`&)1c!5O`o+G5Z|gB+(C6?^rAeL>NyO1Ds^-u+^s3nQS~q;y-M(WeEHPXXx=Bj{O$Ac>#rTdJe%eK>#A4IZBBipjctX* z;`r@3+}Vw7;mmX_zJYfcTNcUvO6OK+_A!uBOxc2eY1^u0spiX;&n~|_nSTct9CbNL zc03&hcUD_nea-v^W!6>NbQw?{v)`-8@8ZjUMM}@dm&fw@x?mbT7Cp=B8fq?WZ1?~h zgA(;)d0l;dWC|;0TLDgj{?N{Bt<1JZT8pV8qA`kVT8)ca)zW$)vm0jxT#1RtT)L)q zhdZoT{7JP3qKl`Csd|Lnk~+(BCO%Ico=dqCwdIB;332SmE*=kTpMs1w8BT(HBIJQp zig@kRtzegJvb?RPZZ+GgmrQ-HX2xUcP=IsJ7;wxeDq%Zj6qOLi4c24vE$ELMq$I+z zB8{hSK|4w8JRR<1wxRSOfzH&VT2svoD5}En7_*%)O-S&30=lZ(mZcUpHU^!EC6j6e z`OIJu6VD`zi_En7vM@Ov8*Y+5Hm+)_hyRPNNfvXfGw@7wB`y5{Q01b@8#IGiLlSnY6ZcjD=;+4bX6^+auH$4^FzAG19seJdJ?npMro zWEI6rBCo3Y>e-E}tD4z$&|H^n+>6a*;wET!SzYVZb((c6M7EWImp})=OO(f&=WW6h%NES@3G!F-yp>e^`V+(pQ-2cvENiK$ zTfRJ*ECQSa?JT;YG^JRU#4ge}rlDB;T6D!McIYo^ThKl^#=HV@<+k1TeYI8xv_fs^sCv1-rQ0v2NOqcB6!VB<>}?o(4od} zcNl)WTqSq=PMMOlf8lZ_Y~sg)jJ5;Zn6V%$ZcP0TaoZYdh+piFDsQZ7Xbn^U(d-~M zenOsw?Z@*=Bil`=W39D%)v2*QscyEUn&#Hkqt|DZCf3Ug+=M!rft!%G^wiATCe^0^ zFR3myr&d+Pj4z=a9gb-#uAMbq(|kGYm>`}I$FL$HPEy(~(TZMe-SSqeFuW+7P`Bw{ zwmOp1QOcPDiFK8>EnY%B4e^}P#O85xYIb9DL#nxyd4TkkN^JkrG#o!^`*M4lpIy<| zP?3tAvxNF=!ksp* zxMmLH=2&@k*mfqaRk*l%lZk7E9v^oVn^^-%#pl&3UNtm$33ZIw#W}os$WGRV6|*y^ zU*hw*uHi!7?^~E4KTh*G#p^c>v+CR8=f`PW4K*uL%+u-J!esdzt8dk{i}YS`n6k$1 zZ(R!uL#>T%&31h#RLrfqdt`m+=I zmI&UM+B<8E?SVI@_5$w}U&fX%fpWyht82ZIMV||`rCNirOTx4zzJ9WKWn>-JF(jem z@*w|~W!Ejg#*kvGiA{^nbeY{2%&^7xXCYqF_8J>&=CWCyV~AQR8nLeGm4WEu)10l2 zq;#0q%Q$+aO{cNwEg#%-LY-y10I#TxsKZbaSB})Nu(8!_HLw%N8Y3j{i!^|Flk#u^ zI!XHx+@!QehTOwFWddDFwa%+*<1O8)hKp0=bG(?C$WOw<;rL1WF|TS_V{=C1qp#*XZaBa%>;B6x{tUS8K4S+$F=`{2z9c10xB8*T4#nNFxQab=Z>Yg>+w zn>m@094}eE&Q>2cd6JAywmoTgMxwF4!%5_S98MyAVK<(6Gop+cw7}uVq?k^*3{X-jg>-t zyASYO*Avhi8B8Vcht;bYiPXnlyA5Mo8ZVi=?ZGf*Y|tmYx)Mi~e$^~?UwJW@$(ojrQj?^fCdM@@Q?nYIYf{WXB()bE7!<`%-oCb{@)|Lj4S6!0NXk#) zmvOw~SZGh2e43opLznJrxw*EO5~S2Hh~Zwfm+CfvmBnypsRa2nswunor| zCbY8Ts-!?JLrCEFy!Cy&p8R z-I4gobSI6^B3=_jdG>5ph!*Owg30k|7 zm@F=5;#wh%t=(ia&t~)*99k5&J%^J>cU!JmHK!*1Dr;;#Xx73(Dg!k(ZUgVKMKR;{ z&z>QiT<_`{^r~-iT@Ub_dByHu9cyf*?5k*Ns0vy#sI zyD2()S#z+nD!%Uw@y5_z=@|N`_6%Oa_Lwo_Z8-&3FMmAq~s$w<~oHYJ#&ahSOl{h`fcLkcO-48Zx?& zfCoBw_liq~b1{DVEmaB5D2(*m;w6_^ePb1ydz~vv&`*n(ygj@&bK{HZR;*&SEV+)B zrdq0->zdetogtw3{H1-jVq2bSu z`eWANvdiYB?I0Y4$EST5e!M&+!t-JHPRZi8&;CdqjhYy5q<*{C_7nK6k#)pe$BguU zc2!GtRZVJkz0S*DUKd%UiO+*9btLA;FgZA8eExI#`_TnI-lxAq7uer_CLhV0dD*3N zuDtO5d2{D1yfp7JzJ=F-$>lAYKer-p(d7&0=3TaMR^F_I^XJUf|BEipo4at%h5XNp zR1LG(cwLh>s~KCJ+t^&4id?>X!PMxlvZ_^V;a!oXh?k|JrHdAvl~;OAL)EIf>b&x6 zIBT8HfXyTB=gqxv5d>9SxG?XMMROMB&6`_Mkym!%LP*M6RHt{~XVo_@%PUPWz@k&5 z@O-)3dUsaM^y#h5Rk6=Di{i;SwY9KAsG14nm^zmeKc>Fr#E%)PbK*NSOTss^H96>c z0)9KZSaTmydL3S@Ts{k)?O_5sS2l10d{nq&!gYcj6K*^o9PMkG=6HT1id%;+xA4}f z`x<8Gij(Ms8Gd6<7pJaiaxcRr=6g`b{8d#e!d>qP_?Hn#Vt(7md{UGR5^VV+i#)TX zx%$lNrlvE~>{+s^s)3zK<}*h+r%@ihrqE#2RTtKT3233w(o}O{E1!rctZOkJGi(WX zdPWjnx^D`_%7@d0PM^+O5_)?wv7I=)#PZ_Ejr3c?3FK65BV{nNn8;_Rag*zL~ozn||427V?jN@OGU z4Mr0+$AD*;JO(_&+%!*|JH@YyoWGMDUfo<5Zr@0vhioBYGR9D7Lqc9wFse_`8EuWz zr)$TJ<`p@{-+iK`vH6nA=EYmr9IcKx{2#3j)6-F8=;~-|s$u2ll180SshvxW&A|vV z9)3$|h2D3u6@*3}4_9ZT-5ETvx_M$J{D5+b%P&VZ@=c~` z#iXB|S&wAD?YUbo7HOGiXvym<1Av}eO? z%s!963;G+MP7a|qujSYlljGSIhd8lyC4v)MUuLf^6=~)A0B1Ooi9w&6->qtE<@E0ayvV?_RL0_A z$oDF}?BEb%(8+j>#dbSYv}EdPGw@*LJaXuKJR6z4nziO28;jAddp?2eO#KPu#s@oF zS`*7{_UcQ`cavsSU6X2wE~~_hX;z_!_P7ItW*a9J+3^{K8Eo^ zT|*`(W9j7V)pHg)yN)e4Z7o_dwISG;6dP~$>WeQ8kmCEqG>#EXEWJ(R7~*2!AP0^( zoWH7x)7tETqfUr1aGn1NO?tGP)au{DiVShZY6ttuK#Hd)8Jk36w3D$pB*ya@n?hng zpM<|Ij*Ox2vsW7=Efgk_Pw1<8qhnbR9b)ut7+wO~$TY(^I@HwMruqB?Z5V$WM1Yw7 z!QmwGiw?($I8I+oOeHwQwejG2(?krpnCIstte@xO)?e3RJaBj{XPiFINzh+C8?bZQ zDE(#fqi)&REMTwZLi1ppqTEzPbO}~eUIl&}euV9_Esy<)=B1?I1k6PEEd<6?qqaG6 zc+cg0L7RTD4i_G-9Je1wsAuf-#M7ma7a26f)2m^)vxDvj?r1a}%Z=|eDK`JiCz~6} zIc>hBiq9y=^abhmhvB>CPX5vO%q`~XLGoLzNQu~$j0S`<3wP;ar|@0gJHOdY>k<4wl|t%djj#r))&3o5Iv?R zz0-Ttm>1HXt&XI;H%TQfIP|{@FPk59CNBS}!+k1jOlJ>;{W(HCiR7Z>zpQapBj=kn z)e=x*j=4fQhT|u-iQ)K3Wg`5!tZ{LQ6I0>!tV55j55wyiOFxF!F_yl#*UgmRx}C~_ zmyECL;z_o8#zaqTs-uPL!wRO#HZd{e6wz&LpTwYk=lw=He$@u89 z$WWdf{nHjA7in40wt7zVLna6Cr1} zpB=b^*!fHbZo+iCRuxn=A|i( z?@Mf(7T2*gA%85c6D5&p^Gvqe^Kb2G-G6*wbN}wG|E9MCCw}8kqpgc_K6op8cj_9N znEwr?k4NEw!&%NpJ}<4;VfZa)7=L z{2x2N9DVV8_vmyvYa2T~q3B)(k3y3ltqwve3BfWJUlaO@0MFWy`Ls|hADjixh;S5s zg>>3oVafYvCJ*DuCTuG@i5{1}aPl~w-GpsL->!_?7Duvhj;%?ju>0706Sg@VKc+2( z_-1#FF`wwE+?y}N8S34mJo_Hf;00%!zh^Hv>dnWTxAfG!yp`kf^7hZ`_t$XUQ`hew z0PY8_`B7e8*~)%@Grv9HKHvawauL7l`~Ahh{8jyaD{wA3+%bN-`@}1-`?+!dpmg7_WM(SJ=ak`F#n_d z{s!O%;CA4^+I~Op9eH`R>-+sO;Qo*I`x}8}UDN~I{>gs7u$X#2)9=>;dp_ImZw2Om zuHPR3w%^|G&wMBKe2IF2?RP-0et)gsFL)R5>-~Nuu;;FR|8`*Q-TnU4!2S32`$ebI z&bRvgdf?t|{r)!K0Przj@dN$-3&2X?#L4{rF6{sdw!i5T=1%IBVKfnRtQeg2j$Q@V-+zc%E z74_==0r+}0zkl2B*8tm}qg`O-|8O7J^Ly@3qr8v%!2Cb-`#r$@e?gx5-QVxecn@?Q zrhZ_p-|z1N_8c?d=br-|#|`)kfCDEC_!|XJ9Pl3lmQ3LOduiwO1AYx~OTmD@1?T~v z29}*X;Fpwu|F!{t4KV-I0e>s7^XbDKL9L#?|@%49Xf!O zz`g%A;BN!A&l>OtfZKtS&!hhN1O5i!{sja6KA;CY0Gx3N_0IqwSPUEhE&vu>I^Zt_ z76V&>CBO~9GT`mNTHsdT2H-AWe))i3a6Z4^H{e$Qdw{jTEx2mC%nj*-$y)_W<)hFyN0P%i3_&fZqhnuNm;S z14~v7_!IFw1&srKDR7_(x`F$f2mCvMds~qMaQkZTXF=CB1O7B%&l=<*c>RFC3Ao_~ z`U5QK9`Fli^W0}>2bkXjJ;1Ur4)_yFX=gM126|r^@HYc%?;P+S0q*}ge3%2?J@5~> z|C`7e*na&tb{V(w5QQ!BWA6W7O+68XeO}oJSeY9If zJwKyfV8IjcADI6nas*cX3jWRK`DdvYSkMPO!1n*89~VK#pO7nX%ZtblST;D|d%%Ii z@Zn<0U#4DQnLps)4BVeL=|Oa>}4T{xY6FbGwOR54ab&9q0ic0hSc=d^z~<;(1{0y9fRC zz{)eB4_G#J(0>kCayI>VAN_a_bOQ6=JLtCpH=IlR`aOf^fF&0|-$L4%MLobRrPKrL znah1(dl~gCqTY)K{RO~+O9%b6z{>JLe;07?`v(0(zzvHA{lW^$FCX+PfyGx2`WyB8 z{e%8aV9!-N2lPJ3bC-j+WY8}ctfD@_>Op@iup|Xv1Xu9f74)-~c7Y`y9`xIRTUHMG zcLIx7!4F_h1M&dYHVyidujIMrL4PrDOUt0Y0a)Bde}L_)2mJ%Uvi3oLLM8281OI@v z*A4o$!2FL6`kR2>$DmjD*ADv61GnEW=oh@7cGeI2Wx)2EpaZ!74Xl!n+N^8 z4*)+k=uZI_e1_+MTW;YwmETHzz`Z^2`zrYI1@uAii^v05`z87fEdDa`T?{>UKp$}L zo#+Fw_-liH4{*cR;XAPMu0j6=VA(x`e$fZ1|Gq)L0$BD<H_ zA^P(GdJQc2F7*O?wo|Xl|C4%w-VWrX``;h*CoSRkPWTVp`$OmjZrKIBzzw^hS8y-% zE(Jaey}-(S)C(;63Hl4%@-x~8ZhsWMSJAIu@Eow<3FHpk|0Hw*w>*vg0o#9t+?Te}J`r zp+7bB=dZ{SSo=5Z0l2rHdVytw^hdvk2L1dL^uL6hfZJal^p^q$eB`L|qlWxPfbB;Q z`7i1IF++awa_)~C@+*KP#}4^x_4~LXe=D%y_#yuQux$L0KVb#+|MQSP16Xq6kY5Yj z|5}~{Zkagb4*-kbNPV^Z{?{SD5;*XtA%8uveG<kF9i;i z4*53&OXd#w+kqSA4f)Rj3+BUzRn&J8d;pd#fDgc)ONRVS!0nd}`8$Cd-Z$hQ((gsk z(ZKJ^hx`S=$}5Nbc3|=Qhx|K%TRwn1fF+BOhv0{X{K<{X^o+kMG5uR&?Uf`D2A-^8De>Hpq zmRvLB?*o=yNBh9akJ5fK&tFgbz`Y%`4=h+u`@n%4XhE7{%ZIFoCMqpoC4erECqVNO5gyn9+-az`T#5dZUPnq zw*X6kJAh@tM}U>U=YX}q0bo0D{MED%ECludrvbMB%YfT~i-CKAO~C!Y^*|5U0~`R} z3(UV0JpmQ~9|INxJzxp&C14qFLObmPi-5Jj5@0)U0dNCwDX<6F3fuzR0Nf6|9k>^` z6}TU`3+Ms&0|$W51M|N++nbS@1mY-xqlD*2JXENx`5s{(Q{z_x9KOaY}=4O?mB*dmwo`f|A7C3--Axzz=OyW zxcB?;cMat~fDT~sF60HQ-A%uM1$)tNVE)6%{iD#m5B>l*`~>|17Cb_|z%4(6&$|C8 z{TBSikYDgI=y(D;f&2F(Ct&R_;Tv$^DfAK8{tR^J_phMidhmV&9fHqd55V^S;W=RC z@9EE4VBe5G6WD%``hZ*hIOJ~z4*Ur{0&f2^&viiGU*Rv%`y0;zO9s#f;Pye}qx*;H z&kfM`GI|9p_TfLUC-1Po5m>U$_pKf7mYv<{y99UkluS!eM`_ z%K!PW|1@y#zZ~}S*HhnX5Bnv+o|6vywZM`$9QJ#F?FEPZy}-S1I_wXq{4Iz5;v1p= z}y$8C1l_iJ$ z1HcXE9rmYep#Jj@`-_1E7aaC)1`b^K|FQS}aeY?x|Nrafh65QU@1!JUxssA}CMD%c zbZnwSMTZXERCK85u#uaIiiyECbW1T;qc$5RCAOvLrczspZYn0~%9WIqq$^1&DOY=q z*Xzak-k;|>uU{9}_43E(_J05M-R-tLUOU(0Jg(*SWA094QN{qTA3Z5344y zaDG_zg4Jh*mFITSQ^|F(<6P_qN2*9CSiO+@3SNNSYw*(|>;@Yz3ad7-;}Ueh-b=%3 z1Pm<+tGu=FF2_!=uns%Gk*mXMkKi?7mH%z*SQb_lVE5Ot2P|ApI>GYm!fFt#yB@#Z z!Sx&PD_D3_Sk=n^n@Bg9_sy`{0fw5%FR zyfvf;>|GmHGuyd-2l)&3wqu9<|L(Bb1(tj-toDI}9i;m%?EV4i1~cv<-C*4h!)hDY zzX|_?<@bhF)_34{g;f>U)*V(G!I8~jwO7J_7FIcT^Z$p!suT=8999ir$+ocC0=7L$ zy1}}~!z!>2y`PhxVBvQ16AbO(zF_~axG&h-hb~yR6Mugf{?qsy%y^b`gM)SNsWPyh*--gCp1>_!i~u`&=KTyb1mjzk(fOVbv=C z|2O3hY}-e<1snfMIrstT2w^u^G9FenU}1!E05(P`2VnmM627qT0c{!y>8=tjmk2Q84teh|2#V*AGV*4CO~uBe?j8i0YDXun+782fpsHgVlUWRjz7WjGq^7}G9UZElC$7-!LNh|=A9c+bzpf_ zM0J4u=SNgO*iju(`@qHvBWmW4(OX1%z{ZQ=f&G_~4sc`%>G%mcSCHR=b=U{CT}3*; z!g|sHR)c~2i4W$39bhro3s!)GU@cg94e`K6a0u)N^Sbf>GW5Z`Ya?ncSbbeY?ExEa zK<@$k*F=1<C!9ccZ%*z3-t5cC3%6 zUa)%u$oo%6|-K?5N871=qg{AFL~lsz$JF4)%c^bE9e>m~mQE6>KN{^P{Q`?5K>Y995GI zuM;1vTM<>e!O(TYe*(L%Cw<^(V^l2$^KQUiu<<7R4i4TNRfAxt8GHJ;Un|$a{#B$K zth+6$GM*&f+Ni1m%fSY)4r~M4z)r9m>;-$l0k9t&1_!}?;0T!U6!!u1!HjQX7gz{Z zf+b)rSPnLV)!72!_DfV8$KjgLz;rSO_+P zCE!}H9P9+E!EIn2*bg>>Ltq;?26ljfUEB}M1AD6A4sa0c z21mhO(DMwsU@kZa7J?(-Y%l~?f*E(=A21JW1Pj5nUmV?{CYOo*d07t-H(DN+% zU@kZa7J?(-Y%l~?f*IdIAIt+A!9s8?SORu})nFgk2<`zpz)`Rp^z?HdFc<6x3&BBf zHaG%Sf+4UL%(xqUFb`Y{7J{8%3Aha`2m8Tla0sje$G}D~@Em>w^S}-5u2PUlaa)cwqGg?hp2EjH)gPzlZt=Hf|yv&tuoUlzXu4N7P4f@Bw&W z|7Pk7*zq9g9DvtD{)5#IM^!yI(u@6I_x7lY$n{_1hZoTK73B^LJw?3(OP(SAi`?(m z=!1m=lozn~1?nf5@lsUn1?zTGUtU7*H~0}8`7PxZEPsXa1lIild%(dzMpf2s!iQ+L z!O&}zYq0JwQMCo^eVy_KX6(g(zk&Bx(kJ+L`~Z%;g}=b+QSuec_$R#IV&6OPz`_5* z1A9ZH4=fz#zF>cZbi9nt1bzT}6@CCad=qM;ga;$x_13U8212aB8q58qW!_fn)kC;$}zsH_}2~`bt ze{w>#fq640)Hbm0Qxj?!>@R}%3fGUFP_N_!G5qG90CWyF>nM7{24!id0@t}2~`B(>6HiEri z2e=!|XqZs@z-n;TYuEwSgXPyws19&s`Gnd9hOUDLRyR(lnZww3BliPKzDYR&L*Jr2 zfo->8*I$U=j=#W>yC>8RaBw~LzfSmu2~`31|B!qJJ37$^8-Ij8SlxyG8|eNFJHXx^ z`~o&^BY(lV$I#!4-P=hY*!u+K9Sl8*{$IKNG=nKA15|`2z?4LAt?_ceuZV{~P#_l>ZkfRSWihQmJiV$4sU6f{mY6D)%3x<7lPIz{bxg zwE`UZtWup|f00tV!M0qXDihTR-eOt!SV&<2Uv0* z>DWg)7UCDM@j|7#z+8{@$9vRAH2bsZ-?2LKYS!Ybo zToGvWoc!^VKKuEZpDZM}l)s(q&G|0qnUg)Mq9t(ND*tMqZ{f6OpV^rmn44L(^O2cS?3vi~p?bf@`v~PT85Aamq8<0WOIx z^2d8)WrenM>V^Y5yq?2j_7-t%CVzKf3)RLGSif8BDJ^RWp1mqizS>{D(&ziuw5PRE zr)27MN8q($&tV|kf(6`3bi~fJgkO&z0?V{cO}W-NYqfu+uQ8!wrC}rdq0{%P(}aJ$ zb>GUCzy;!m3oJKqM~mAJdgdE$i^Yxk>{1(-?D5R3Fx)%i+>4xa3%GarnfuiVV#5ZD zTW<2=JezI>oW*DDSEmT~zPPT=H@m%!aGK9H+=tS0yWn+le0m!SDO_U<*aIatyrQ|4l!1#^1tjaf9?v={dsT3Y&tu#VpWW!xEipPvKD9#7V7`_-q7Us`-LlQlGKj0fcU z3a;m;xxS9;MK0H+-QLFa*<62-ywiO=9ehi0p)~Nm^G&NQ4e=~4m%tsq-HZtiB~3CW zSmi&rC9v9emDMlK%k( zPUT73de3y)mo(Q4xZdeUZdB1usO+E9y z?%)>_e;4rwezsqI#NDqobq>*Ga3}Yd$@!KC_p6KC?_cA)*FnJCzl`_;J-i2kmP=VH zy~-iF^wB!~#INVQm9Mzpztn$(Q&~MtkHdCJ{H^ps?&&Y}{msDuM*p)U{fE%s@?Okp zWB+QIr>&BCT1!ACcsDaQ+sWMQ8HO+{Wq20DYv-PI#CehaJ@D)^&gq_)-RbpT>36vE zoJ@aiz#$xuT5Bp}=nwvKzxoXG09XAv+3jB6sH03NI>n@G7w_e0jqNTrzK2F58a=$b zqctA-a2h+%*!Y~$xbMShcs{}!*$YNv{fE;iLu18njK-P|r_qK+4euE3lljywt{RoT z4G!7DxL?ZKibLs7crQuHzSI|6dArtD+&tC7BTgZ4@|ZsbPISF*jen6-JvlYgU+*-% zE6*%NdNJCgf7-9+OPY>Kqdn6};}qS8m9vm2?@E1;H0|NOfxqlm=S~4%dD^)S&B?Cu zzwDHzQ!@P_Cwu1VG|j@E4BpLZmNdPZM*9LM4J%Fg|HE3&TOTY<-Q2g5IG>a>ZAf$9 z?Jj8=cCyDxQviErj`3cbq-j$c?Q@(otTc_GQSWT9mNz)~1?)!#InhKqo zKr2myX!jhj#wmd%X|ykOPt&fCdOVTH2TM~WaWbO&)rC_@(@|;edxGwWeVenkv=JRFN;mRu;0LKsGl|nmoZBUz-rVJ0pWO1Pz&%aPX!jn>JAIO- zx6^35tsjhKxx9sc3gLbx z&b^Wq1?yQwKi5jQ)*8RYBLv^_Sagrve-us^9GYV!Ey_(zU&hDrtt~8{u$hF#e1rQuvj{67ES3kW zd{^oRG;*ziYsFlf$#2QaRldvgHSs|m*UGteX*wUAEbYe}TXM@X&#>0V=OnF<>vZfu zvzGT`WlAW23i!L5uo}W{mjDKDWvhJUEq>oM)8t_k1I{{v_rV!}bFsTErM^Eo^xx%~ z*E)ornt8TED880^)f~?H%SS`%8|m)#4~O{Wncr{-otN%j1KexRVgEPxD#@q(@h=`}Zm%zv{SG&fy_-1BE`&(&yZUo|8T7^N&emX)mmLy}TNo9(2liC;O8J zp|jORXI^&u4Bv|m*<6w<@sGcV*HN zo#xMmKkzxjzx%!U_3)dHGyDzj&EE*W==hL&TkN@S65oHcH(`<3KLCI9^C9()@HZR2 zUc)_4)^Pnt+V~OpTZ%*Kb>@fKmy+HDe^Ppjj-apM{dUdYIZ1yKe=+=`FZ2Gp=)XLP z?|%lSAquI*-7+@SWGKn!Wx(rvVh}r#l@BLj~G- zM^awT2&v11Xusm3Jukc2&l-G$zs-LCZ)MaW1vj}tWqHgadUfc{IyRj`Ys#&lY9GYcjEkD`CMKBTVoV%K)Gtmj&6`8b=+ zH|5ro7ix`gXT#mt7*eMS_gRYzagppJG23)>dUgGW+joO$PhN)W)K-0f_M{iR;cuGu zM7ip{@Af2PCeH_2Lh2f6PmX!Y&AxWOZ(jCVKZ{#_|Eo>~R~i&aZ#8;3w}sR_qPNgh zuhZ{aki8MTPQO3mq$dSf+JkQNYVHWBx5U>i=+Vb}D_epWtqSPQjix-l2TATf2*046 zypj8_alQYT@9Ot%A4N~jGAj6PNSz^ikEhYQCas=s-$bub^nPHwuICVec&2by-x2n>FTr9HVmUx{a{EvC|Y;AYV~+;&fY>h z)8qC1-Jut^+J>T|8E^E2)Q`j`cWa;2)U^Z_tn!zy_T6atge2&;p&D-KBO&!2;cn90 zO7^bdzjNYT=`4y!Z!6qwoJ(`3a5rl%eVy*+*kd<;rSFzR*_OQ720!;v@>=*?Ep4Z~ z5$-TtIqT*!;XYy69OpJB@=>^1{Im9P)Al{DxefDM0_U&tFIernIh9)iceppCz9qV^ z#JLx)@?W?bdt=)H^?r<6xMjaEea{64PTDsU!athuJ#(fgOdFZwL=h37(Qsrc<^>s}6f@5slk zPJYv4Bf0MiboOwL(vQXF6Ri7k(@R(TD`cMKJ10@xwVhq?t6mJL&BC8EnZMA+-y`}j zh15-wt_AR=uee~PZ(&RD#?^iXk-mX=46!G04C^t!4XFjfU!wJ!y=S)sZ?QHvGLwvV zE!v)9_*HvC>LB4SkK1#ZOtMIUZ<~!@3%~gHoI543Vm>K;xkTT$fs%JY zOK^eSb96`iu2y;70)HdtYia&6i_d)Zycqv38-ED?;^B}Q75!T@U(X?y#3Cf!KNZ)5Lq?{l%*w-jJl{EzXmZ`uY;_-15oSp1qdNj|?kx%P`R^I+iwLG^D;Q zde6J)&67T~J=U2<96HnZ9Ipwz*<;3kFY9zQBAMRRN6kEsCc}e33`O zYE(%5TKGq5evg*~ox9pE{FT1*@Wouo>(-Qba+YMsbKnET6Z_hT*Y4wNGVzCXzdkQD zyGa|z{e0h#-$(ep!Vhp(nDCF&_U-mAZV7&ErLV?H&ks}hBk((?98hzFf3g$*-W0x^ zdD)k7K)ojEKij1LG}0fpC-KsQ*i#O_VcG%pFX1oL{66ikmA(rsdlH3H_;QA3DQESC zgnx+>|F*b2lGokvbFvRucT#_>kd1hc8U#~;mY_Rr7kRq`(E;D<_Yw9A3LC)5&l*u{uOb)*j)vG z&&Q1|+bvtz7)zx*Z?&&3HI2=1^YRa<8%6tBi@TsDaJe2wC(W}&cMIJ5PaLq;8(!Aj z(%880!i20IqYT1r=L|=AA0+UawprI-hC%0By87e8n_8zW8e~!mWWD`lR8iN!;p~uyEJHt)FS?T;?-U#%ce~ZwdIA zVy*U7C3Ab>7JS-p4~uaxg*$%|cQ4#MoKHC-_R2mCt$Te-;HDI+_FK*uSbO};0W~Px zNpL_+{`5zsNsdz~91m+n+-{@>yel$1$HyKRO*J9c<(mV0i58!6$OOu6T3xK8buUW>iJZP=pcEql=^JIUzCJwK#Q(TSANuVA~_ zvn-v?rLOjf?^mF+=Va4YEJ0^pcG|w83x3xr2b4FFSADYfbveDTQ{S=&e(RhAR$sqd z>!)qs0!$!s=bADs{*nE4r?&(zT0cdh-^Bi6_`BvA{T3(v#8^V|ycT{{xheCq zKP-7I$yVmu;kKP_%KUoGbuIHd;O;o%fO<*l$9=}`ls@{p6uU>^FRnOXo{wq1ZWAxn ziIg!$Qwl%-ON^h-GW@Mh{KUeH_@x5=(Ag%RWM81kR|bKm?MRH3gx>^z2j|mviv2s` zOS@#fSUI10D)yY4V$T-%MGFl7c_;pPDf}V$8>>ve^|H3dRt{48fUGYw9zXwpx=`$S zRqOMPpY&Ua^(|S@mOL$i+py4Z-%QV~h1*qa`tf%)H!)64RC%r24mWfGXRJzDSK8(t z#&v-BBRe(nvVYh9mUs{!Vk>GraHr4;>2H*!Y_tj zcC{H>EKRZ}K9@+fzZU*p&PD#C*t6W|r?fALdMfSyI{522Ke<8XnK!|AZm(IN+w0oS zO8FQFR>OeQ#oLfzw^QKORtu0IK za|Ypee#?x(-_hEMHYaI5AT|e1rhK-T`W4n(*Ky}8xE(7EcWS@YX8WASor~dCa1OT~ zcODw!I*&Ws;O1~v_eUjfkAy4Z$do#pI-l)@?^$y|eMb04YrdW@Gfp-0%?DHRWkmSj zHuJ9Iv@HpL*ydfirPS{`OnZHjjcaSKOW}6hY36>XxpV1yJ@s%$+v(fI<^^!6*U4+u zdMNivs_hsrSol zT$^8t;6^wnT|XPX$;M6c3%B=F!>#N%V6E@0hU?UZ>3N9z`p!l)Dt>T49WS=E==*W* zf{g!@CPUn-kiY$KYdEvLSh#D_bN9hru@T#Zd$+~4<`t>Wtwp!sRK{cX7#lXE*R6oN z@rTBS``o!wpBv%!Y&xK}iVd4Jx4|0vROqF)v*M-KTKnyWUwSWR-TUBghcEB%O8sD8 zmNiEGUOa~A55k{y-vKp4_&c5WiS?vH_!)EH|Jda1^OimFDQ!~T3bz<;>rYJE@`{a{ z)V4@pRs*-6Gx5J8cDw=CsT}Bck8LJAZOtV|1~H6U*r5j^ef?)KYYMC1LJM@R-1mY zQ<-goTmOjh`F_n!v`IFfcf;-5YJBd0PIMF8B%ezf_rMK2$~pOB^K{K^_SUro8&(A> zSNkb3JaLOXauJ&Yb7}t{Gkx9R@MS&7$Dq@9j+k8&Yg~jo3vPF>;T|31UIKTXaF-@? z7sIXpg<0b}A;!Hr#$A%kZG&67LC{Iqj` zM);Mx4gV!4eqszQws(s?zcqE`75K^J-`@Yht$Nw?e{a~iw*Idi?L7yqGj`t5+{Aj+ z;+RLQKBfrn!0$|78Mf)#`pRm!IjbtXmlUG7H;IV1L{y|0~Trd+|zR?y$x`aCg7W8lU*%A6?c6@N#s4ttZ`wzqfuF{9?XO zqvtvAzPJ7g`1{69y$r*jwC;6AialNMt0Jau`k%MzW=dI4%>9Z;?;iMjqsIPB&F@T^ z^V<9qI1~S?*gQ@1)B2|vewlaN>Z=de{M~wP9A8K5PqDuiezAXCeOBt;QBM39Q~2wI zALM%`!aqjyhrMiGVZFE78teWxg})1a?Nq)iA^Z~(`gQSVp{e=1PwYR)@K18$r>-Z> zJd0;wnS5_W^yg@P+Gn#>!atbrWe9(P(NFViwiSNy^l|Ii>=Gyby(#J42EXc%acli- zx#p*?X(Sy;C*@%;{J^2(*0b50jQujMiI@LXDf+o*Q-}Zb%$B-JC5XAHsYHnG(T-!sexZTgKwk=|JelJ z={cm>(+)rMN#mcFG~ec*)aP$~@Y`nctrpRL#fhKV501ev{Is$EwS+y1vXEL=3eLfw zNAryt(SOrPKeg?tgx~lXWB)r&{8am!;b$CU?BDOiPuQP>f2z-AyvDb(rb^!NN5bj} z>mn>uhgf0Fgmn^@krcL$u#JReC53ep)?wrAAZ(qDx0|qb8&AH!x7NlB5!PnoWi6n6 zweboFYqs%<32UC9K}Y>maPo#@j+zt&P`5SdERhhp@#q z-UwmUHeQ6VDjP4SimGPg%_OYC#w#IAzJ->YZxw`<*?2XCmD+eK2%Bx=wGmcg<836Y z*v9K2Y?h6;i?AXaZ;-G;!jj9{C}A@Ro1RpzJm;}qVdLczmT%(~5|(G<%_c0@#*=R` z=Gb_(gk{-yjf7>`cxwp@*m#|Ud2GCGghi-}$?51PEJRpxdgc3%`)s^1!p3a8!1>HM zlX&^uGLNtk!V1J!tOe=!vM;n=rf^#c*@>U}{Bkz@*~jx; zF5#bM__wqKm&W&)tWN1y>qY+rW6#-6`lXuHJP`O?1i@GA2KX@70Np+}ByB+2`Nrsv~{hT68MDYWn(E zdS#pvA6Nb$#h*Li=kt9--FI8*b?v(g(I15$Ib~cuCuK*{n>xq6I5tZXTk@*uKjxS* z?sCl?HqU>|xZt4_Tgu_L&mC9Ciw|UN+^o+okB_nLN#Qq&{%Iz!WNmvAzcbD+#GY>W zJ*SUb>pW}V%bbA)+DrAm5w?t*Hx7im2X66v)5ovZ+U?%jmO!nwCUixNE9vxHK>e=Z z+jwHPtXYd4)REfwv&>C#JA^+=_-FAgK;dsTe8#pn#K+nj;(W2C27cZ-C&(RF(*+@^EK)oqeLPsF(G+IkKZsOQLKxZh6Z&W0PQHfeh&ZiBV|G_hSv zr>zcd4d24lX$!}>j%iy5ch@45wyDp>d}g?*Y3qYqc+t4^OyXdWzBMO|kE5$& z4@?uA0f;AYTZ!AXWL$k&?q#hL+v-|%_7D$ShrPb8_)SgRKH_#?KCb>OX*y2dw=Z?B zTamqkxP9K_H)gH-Mu=N+<+vJ`xKp$ z@ay1vmX2G`D`R$Ab1=hyHia+W682nU>Y3%wK5t6j@|`$;7Sw+D1xXEou(pswcc{kj_>&<+^TJN{bpAz#4(e_--`1*!%^_ckJ677R_ zX&cP?dCHo25!|es_>QG;mufqt4VJfstQR&CQzu>L7sKDtWX3Aj+w8WDRodWszBz8~ z$!#%qq^$9z&YODSciu9t{vfuj(fqV~)<)o0-fH^R*fWJRYXEtdP@Y=MJbHuHkCkt; zW|lgSE{8wXYUX|Sy|?|1@S9heeA?{9PtB)pv8Qd^dQL6RE|SZDZS8#z+}hhrd68!q z$y{4`@zgN?znyPGOSybr+o7MUF4O%s+e&=Dh!;mGU$fwktmT`f!hcos$7H?W#yJ0} z6n+i-!aK*U=bvwDezOn0j~6|x=Z&Ehemne*yT;WQMPHsV$n!}npUk-9=@fn+{O-HQ z)v3bI)N7A6e(GA>7<~Cg@iO7d^8>T?yG+bs5y1D)xV&B~F1VEWM8~+bPe-01m~jgJ zxms_#@kQeiDm;`i_#HnOxAy3q4BvSy$a@pjv6o4lUO3h77>Zsedh*R=eV#&Ey^GW7 z#m|ix6ZZ&Dq%&zh=g7ef`s7^uNdY`k(C-$H zq1WF%Zk;>euGho418Ls}E+;LsmoVShY_yiR+ScQ3Os92frlhkCt@fXds~LXMd8Lci zIWh!T%2;3yKm13@WWt`#*!%Z`=r#1v--_N+7dGc&$~ ziZAz|m&P?UdbJ}k+M04PW6Bp2Uuz8OdYr0nbKYrT#5aBb6h`LSrhkDkoMoa zN0Yi{Q3*eTZ>3q+I0Gq0hI|_4?9Q_{n33q%oW1$!xg!f8aauk|$5Vb?)oZ?nx4z zR&>_AYINkDA5v!kovc5PtAC3<_q*zJ`uq>4EhD*CG5;Jk?bjxB=J7s6%3LM&J%KX# zD_%Ezx$h)?;yr;v(z^ow%)MqFYMxcc_nIZWYbE8i6Rv!VUa#S8*V<{z$8Pv_Z<_M4 z(}|y2K0H@bAO3F2$Mc4trhLqTU-z~dTfPiGxqSFi=i7_n?ie-ce9g{H%(ulSZE$n` zIj*jf_ULWRm1q6zzvf-1W$|g7o_}qFTlLPk`n}}yez?+)UDOg}bGuB>IqB?}l=+wF zj}XW6?{Vv$HF+kS+JBuJyMu6Zmy$>C8t!3}xGC?qmBJm_XSnigH&r*~{kD3zi~noJ z{KvsH>u+YRWj-fxiav*^1Fk1DZtat>p3x@H%j%Qu-37PnfU)5;bW?3ev3Cq^Uf6Ku z*=#B|#oqjS)+8h2*84J-z?E`ct>@IT7L@WXMmgNd3DfQ^HQdC$?{CHImHNH{Zm}n< z{v!EzJzVGdPM`3NwDoicIzzs&`j+Te&w;Ias*eei_3gyiT;}bg@Mj0Z>Tcm%&vj#b zUUh=+pOR8;iilHi4Smnlu=VcMojP5y`e){WccrwqmGIjR3adlp9_tN1Wv@trFJ-M6 zeq(0XI%h|o2PeN9W!p=%1@4xtuv#c}WV6L}+e}5=`6RvZ9Fysu#n8NYhKGCT_r|BbMb)V?Svs+i4{Iojj(AoPxq*HWWO{cTR zwS0(ghtP@Sg{^1RqGRftSqJ`RtV~KiXDnkc;m5;PzimC|?UZ-ySifGxo>5NqNZqqj z0>3jqY^}Xp&v|Y9#GW0!{sX`Hh_JPuxL?~6D@XA$lv$@sER?2kLVUQ&^~`EY>nv; zpTvznI}mOa++CjzTW#0T2^-8_y7<^qxXp0u3d7d^fDBW*8WoWDn#Jtf6?Sqi?Kh=KC+@~>CaCz^=VDQ zez(07i{UqY$=HAQd)vPj{?M0={TtHTFL|{Ce*NsQ`i+#|`wTz6=F7*ClAZ~Tz%BS{ z*y_t3(%j^HU7gI$T~7ORa@g8)vwafx_GE4;+}=}+51*Z6LwxU#*jo>G@f_p7mnZ4Q zSMh|~0k>eTvG=t}+;|Hv++AYtys$N9e;aP{m@Hmmt+r$gZeh9c@%~Bn#y4P!ZvF~< ze7dO%{uiwNKT#LHp6rKmxDE3Sclso5+zQcM0e9w^Ve1|E!{Mgs^E%;HpA~CMCh5l4 z9Yl8kZscr}ZzpJOkGy-vb1s?Fel6MF5ZvA87~PZ8>(0Cmn->_}`RR2l;g+9g@_3=) zCVZBX$4zjv7Meb1iRShhd*xl%)?|CT;f__Cc72)V>ha(*>zQW!6Q|;bJ#f1&G&bC1 zxQXYVDeoP4zCruD*l<@*;-<9KMR1EQHa@#MVS{PcdT-=@_(^?S@>!aazHM-~U1s{jt*P8gVorl6++nzbON{Ol zsoZ2;u{rB{%1>?BI%D#AZF7(J8rH31d!!SO-}Tz?Z1_8_2wTrIU)I{pSxk2%-^Hy+ zO=CU${5mt%d)4A|CZIk8F4-2by8~|JRi;kAskySxUB8cdeNq}_+_ektx~ok;_byzg zahE>c$$f34Skm7}`s>5$2YL>s@2Ss0yWBkAnVa1-S5D+eygei1c-FFrSNpZF_1?(a z#FKFr`&D@_RiDjyI;Ts`l~-!+P4!0;@$wst|0G^=JAm_IhX{&4y5Tl|-EjA(=k9^K z_gYgv{4b`IC42es+`!mxg&9*%*W4QNWYQUaQa*~{&b}_Jej)xj9ImAEvZV5HLu&dK z!yovDvE!(?9h34~wAsT55;+wOVmc$6Fcjp-bIHWJ=Xc!q@YN5a+;w#ydQ zY2qjIwh^|&#_K1n*A_NJ*fzor(s!`#F-Dl!8q>?=TA+zBD`Cktihoy__{B0V-D&EJ z^{kV_!sKx4)HRwe_`7ZmTkEgV&Y1Nj#*NG!g#XL9k4w?t1AlKzSgjEKH{d(B2Mzv{ zoTg8w>hodpu)SbaST#v`wAvq@^>Q~kf16s(M?T~2x1^~Ye#2_Re@EBPwERZ+eQkzs z)#tSQZumL38@_sP{XzIOYr@tUnp3SZTxOkZFFnxmg!R;G?9aHF{%37ieO`QKwF8W` zSx9IJp2LxtB=~3XD8es+-+71e@zGje+n+cy@jog2I{0JlVf7`^KiTk8^dFA%rA_aE z@41_LBK(E$o%68S>pxgBQ62+1uJaG0Q~2Gm^{(#%ecw|05&aU6z0b+|Chgnzn4?Ob zTlF~3P5yql%&kh`R;~}LuyB`XUH$&|z}neq5f!)l|Xf4k=EXSmDtdaCgM5>LOh;l=RRZ82r)S#3+a zoFtbi$-f%7-9HOk`|DoP+y*v_tk7@cqMW?hMfmN)e<-Y$i!HBcezO-t;t_h|rs%$> z4}R$3u(kjHHOrQ=mH_7v^NQHL@gan+Ync$8c60`N!|GHiTXIj=vNi0gBYqr2 zXLx&99WM6lM@PmgoM~&70a=^qi91s2O~$vVJHHI8<6`y;-^T!iRZe;sx*FbXj#V`H za*x@>8F(UWy(g1NnyKHCk=e8+Ku_@M*t@Ka&6N$fe! z#hvAz&mnF%aZm7Kztyiv-F7VVv*GS|CaipZ z$s^aY+UfOw-?glkanDwCTAvH6V$nGvoz8PAI@gNMuD2$Q z(H!es58T1G!zv(Ue36R}bh~)EQy%JlV5QtMgihx>VYOe%g3QC5d`WxmuX2*Ho>9(j zqdk1r*nJBc)|$;EyQTfBgWIq#tX7KMGA?tv-#Lu!{D*pDn{qC7c-SQ0SCBH;i(XGC ztbQo%l#JO}JBW>)&dsjie`d*%O*g_HfnP8lRzDH`dW%1AwT~$Bd4tplh4Axk!#`ou z4$2tMC2gWz26tP;Xy3274YCfzSc~3}jxcG*xA0fMFP;di8mT{9;mdrEJ&|;>^tPO^ z>`Uy0*7YBLttX=H5&lm2PQKH5Uglc=B~7E~?C?dbxxf?dY0~Mxo$-L*a1&#^NqH;W zGPrvKhHH)S9QzFE*Bao4f)RB{khHv_)2`p$CE?a1*nl^-SuW-^eO|cuVGD7GrbVph zl<&Iwq224xaxS!Ji2wJYGc!A)o|3X)jq4=8&X4EUjfs(t+;`?0>gRNG->EKR5yneC zzt{D?qEnAf)}aw~b%6W2>+JUVUr(#E4V~h{BI-+0cinZGeg1CO@*ws^&?)+OM1AZ% z?YYC%9$o&|V#g5?YaA%{$kITZRWS1im3kg)JZ=Es6l7& z=!ms{Rcv>vyE3Qs-|K2mDe3A)r~R`LYrp$pF&*2UMwxOy;m~L@;}qYFqES>7vG&kS zk7?NU;^}rI*VPu$DZYd8(XkQh`x0$mIMpXa)FV=NrRlK8B*3|a0)Vezif27!y2RHw}kMdA>Cu8yxO?gPG zlfFDOpws^)Qy${^Xv>55Q675HX`XG$Lp%@Oe|eC46+t8atESx+|GV^YrEm+|*?)O* z#2PQe?`O07J;n?5XpEj}?st^?{p6WK2V75CL>(mk#4&Kq`&UcMBJ=@=UP9WI-NYL# zk67b^`T8Dd#|56dn7f}5vBm`pEPlFi!7TXw^G#c{$UP0E*s>VzPzB{%%IXrhPIaF( zpLFN&ccRg8mhsso?)Q^=IRJOd*%5V-^b1$I-jDv#{oGX1$^8!f-MJBUt>`Rw)oJ$n zBWdgRVss+sMXY(k_2`&9q+ht$s^4FpRKMGa(_I}=8^mWVI=wYk{jTCmtaK$wAN!H< zL?8UT3nOaY6!>?Vd$5Ory@1xIH}ajw6H-8qmmVjTA8 zcw-{>D!QBY>avJ6pW7MZ>tTzWn88EMt+5Z1B@S+uc>Jv+Zo%ad^#i%r%dxo0_ez|j zR}Q}ee*2XXYaR0)_)cX<#*Mx=xaVAc-0ax!%f0uaweKpEPSMiO50dYln$FyHwD0vM zote80|NW)2lDH#ZGwD1$7B~4`sp)KnU;TBH&J*E($aG5n44}1Rc|@(vz{lrVTFLwB zc}(R0$Q0YQ`nj2&eB$3#w7fv9W?LVU;T`{cb`kpZm!Sbx_zx^gzyquxcI!9 z@KVC{^SryU#pm1sZzFKmTIurmu z>2iP3)9aSs;RQy$0vvy7TKq5ycjN7hf5gY{!nK|^J3dp-ThIFAnuzKNz*R9jZT+2m zuh)N~e6QMd4X6g4^7e=tPOroJ8keQj=@xs|MO2RThwk?9SN33JZ!h?+K zFs@4a#$HPMDf|%pvBwO5etN#N9fcbxFTJL{U8MPr?QI_12;8=xM<(rCkoz`pt%Gal zO8H%ejoO2wqlFST|vE|76|{!uBNbWPG%nFtNWt;!>uP&n=SsjhXAoI)P1$$&z%0mq%E? z?S4gs?XvMo3G1`*st6PRCfi>}Sg(!OMA)_@p4i_`SPx+Z{I=UK`OssoC+CmY+i&!f z{XJy#ll?tr;-{p)llfSZ9XkDlb=&MMBCN~CDj%ko{)q3Y2orm=b%+&KN0{B-Cc={K6+7Aq6MOA;bQ3QAD-cba9TKnKTu&|+ z5qe;0H-lA>sY-lgrI)!tC{-lCVA-ua+=-eP|@i zULV#HmRuj?zMX_geMr9VHp1-XwVyCaPcmu2RC%)sGr;{ZL6af22&VAWYha&WM1IMR(>V(@(7dsO6C<2)=F4%T`wi9nXu%# zUPYL+FUh<*!WwP7Cc;+Oc_m=fA*doD*L+A3>*YAfo>1MducDWFEIbf7^Do|4i1^5}z~G`>OJ=vH8V_ z_0HLy@L3z?VJYuXa1dBxTa~r{7jD-}jA=z%&dOn4&GLetfnRRpHozU+9kIS?DChOV zWgFX7PP!Z6X8qQzwOVK1I6n6&!cYBh3to<>Z;B1uwXTf+d8VhoDV}%%8?Nv}@Ei6p z4i&zfdtXHwQ)rCerpQ@>u1SnrI=**MIh__C4SVwn8S zfpAOU?ta7Mjhu_)^6WtTQ44qP-iTTw{y5G`hqFH}arKAjY(b}JG-B=3l=JbO{kq%h zZH4S1%d@rZj(I8Ef(ICHzhk(UxN}#)t$4QmrELmD)aS+iBVFxh zt?B_+9r0NeI=KTJzsT-a4sZsSYsn0i| zgHPpzZjM^A&aJv5(O`*UebG+Mbr#zQ*7ygX-Ms8HSE9rg%u5&(;?sBJe$MdlPo&Lk3R=@ME zzNhRx;kYcyI@^VONh?*7_D=XEA2WI5f8MpU<%@jRQdoeZ*gPeqvY}{`BDqFtSD-oSJDF4tfBLa zvR1Y|`T6FZwY7S#e)nb_{C&qp)q|qFL2IX$kqcAn`Y!k@W*NV1wrp`5n~cHjJwB?u zl8@WrI_G1j-=FQ2k9z;0PJa*WOG(r^Z}SbyMw)m=3pDZVi9uhvq#thXm!s+q$qP9v z!R4Ob%VLjD|K_k{JV&34(t}p@Y*T07)%SHA6N-&PaO=Nf{1Vn&$2L&%DC1%Jmaj(D zed6=Xfz-Og2@v+WQv!chsnI?ZzRP$}=Re#Prx@*{-dlSe{C%ey?PJnw_rWbGi>d*! z`y|b+DbsuKd2Oj2yVoYlir6v+e{gP8T`zetU-R{QOVuqw>kQ9GD!=mTMK{V1*Y$M6~4=yOSF68_FZVS-+XWF5%>ic8Etv*-$lC|?c8nn@M2S6{V%xr zuJirW*BZ3iYD|5dik8bfL8l+?#!F3EkTV2a>XhVfFWlkFO#U9}Y9sl3nNvF^kB{|P ze4a-cPhJ&O2c%4$X6dBPvFML}=iDF7Chm@nvIez-YbH-5g- zvc+vIvk&gfYv~)M4_F4*xo-6N{a3sCPV!*(V~jU$h^pU7`Cjj;!+h+XwDYl6baI!n?&_Hfa;paRK|JJBFNcazFKKqlc=dYIjV=4Sn_@Qq_tuv(_*Zg+hvX&tG)XW($ zi8H9go(A}JE2HWGv41CgneWh`T2mfbh^bA{?}R_HDr%j_Zk~@?OHG`U zUyS~4`2B5B>rCsUcZJ!VWZ84Q*)Ei{DP7`a^fFhu-L!L7JiRX!yYg+vh)cOE zC0@o_GjEc3R{hp%2FdRN3b!6^{5$_?>nAM3#v39m zVB?Ju=CSbtzhJ!Do~oBeSctF;xdDIV9z}%hv++s^8?*7M2phHW>IfUL@tO$RYvZ*O zHf-Z{5jJGw^%6E{;|&nD$Hp5bY`2ZKkFWt7FJn7CwDIx@+hya;BCL!Q{fWUs=NeN(a%G5*bgAEvY=qwpi&jjD^JZIdy(Im=q+oz{6(yHfc1J1CnSQFXcS zWvnglG@~ykpIZFH+1k<{RKTzQK~y!!9N9nM>|^FWe*bAsb7Z|XAUf;NS$9uVZ4IC! z<94UKqTLVpA9da9BsxRrWc?_rZk>`=XUy;48E{)K6rGvBWKFA^xsK?_Sl`*6=79g* zv^w?Z3_KWBo#G!E*E{QU`uv|y>z{4t%zPwjy~pRS)93TIrqzjvJ&&6Fa@XO#7QN5c zanF+Yr|eg>U%gS)oIYLXqwt%ah^jwIJ(RhU%)?Xo*TwmguKXvk=SlQwtOIl4JLgx= z6#q$6)0LlEbap*w{3B<|IqQ(`pGccuThNINL{*>od6BCQ>%7&jIz^;wA37^`N3C-a zWlm?-BjSC%@0)QyRKTCvNBi~LsM;9dzDr&2Oa30_H6PmWZ*m#&iQWqIT7Mr^TSRYJ zI=z{3y&t*g$=GEVdVBxKxI)?~nR7~dXs37|b*b+C7qK;IE@2$06?=tU?1&I=bcps% z{3-FA^QGD6dmuZ)nd{9y=QG!3z%F_fPtx8DM^!K-U6b~<_Hmv<_6{Fs>-+q7NV4pk z<7CnxdK=Ljcq6L9VxPO;$GpBV_7?Iy!RvQCcU&erqBn|O)0Lwg_F)_ z5&f)P^1Nii`u4_FCw^k?BV(1>@QZvC*7+qn;p=Cpfs0r9%UJ|GGd@8TZXMiq|Aci; zhpeOMwdwe|n(@<4#Ex}vi-Qx^_f%ii+&W%(1dmV{@a?b`nHkI4d2CE#b%`>TEa z2A3AuT7RoXr{SX$>Q+BGa>k>R4&zn7e}QXXCH8cp)BCXr^_JK(-BpJ=d?2koBk0uR zPpH}{=qU2g*`6_lEb<##>@U)v2T+l?I5xGhsDGlIssov-0NL^)#>x?=Sy1*_IxZ#q!}M> zK`-)!32VG}iMCz8i;x(%q>lH7;BPt6@MTSG(l{qE-Vs<0~8b@DQ>psnKbHtW@ z_+wu+`6}yN)bYeKprnD1u{{L8xpcz%{)envS$y`(=<~aLi8rvVHl&b8m8GW|+n=?k zHL?FiY^j3VI%mQXjeRGc=U z8l=3*eO-0j-%A#Ia-V06disRAQgp(k)m3L^T6-3wvvK}}^{hqKq+E51U3H|~bfL57 z%n57#;BD>uYVNEt(9bt_&UK3#}Y`k{D8f?5S!s>0jUc%~ZyaB>$ZMToRpnf6JPHSJdwhY^z12l&rT6sv9qIU!g~L34qVeeeZwr|CQcNz+ND0> zpKscw1zOwj*@4vSQMf}3Cse7_>m_hq>tn#RK8jB9OVq~;Cam-I+~(oBy~=Xck+e3U zGqh+zy)E_6vb`bsJi1l!7T)S_w!q(b5o00Yi~aHp=`==^y8Awp%_FJj`VGNfe2LMQ z`6F$-o4%wgYd7QnODD`(i!T1rXY?E^>2h4h5uGY@YM0OkiJ#r)_kCXHJ*)9OLL1Q; zxO~F;?xmzRdEA=3A%lDJaodm23-VOuir^bwPh*g8kKOJKZufc~^9H%>Q5(MCoq58? zFZJJY>~^2$VF`TO`^90O=V>25|KRiU^WXB*=keYw5%@Fub}XKMp1*zuk8!^(@qX?7 zR1ZIe{D9BT&!=qhwtdKWO&>B|nT~h7_o$=%U-U1Y@s8JXGq&}5KgEHL2gKs<^5tot z=fPw%b7J>8#{X4+?To0`vr?G5yr251&$C*X_xb$%+>sm!pA5cbeDVR~b?SI=8{f5T z{Dt>Zojy-g?)-g5x<1b%$+xTj5OGRAq(dp$oC({{=~ z5$~t?PWW?T-(P)xe(Hjvi^{IQi%)div;D^f@AU?+o56QzzbDbQ%Rl?PpQ0{2DRKYg z^Yb$iw@u{&I@v7s0DNo~={l=ZlicH~(*v`T>o3 zR6H~43%)wVGbpzIIuLw%if31_j+~w%KYuS~zZ0|lf0aJ{ifcTc7kt4reou$k5-%4Y z(6{fO{=t%S2$kn$uji-UAPZc>;y5h)0D&Fe;B#J2mpAyT*Yo=i9{vCE?*DJmr(HUO zr+vC&kn9Z?dCz$)@Q8QwA)dcYSw-*tt&AYQ*Jo6FJU`D2{xHY;=)u8TbG#2^&+vGD zJ3aX1A>MU4)3)dMAI_O}$07dLb3UPRJh#b_XD!L05B@mA^K-x0q$$nyuOjNcCg*JpU&4g~*k zkT)C*?mo!-yNuxe49~3xoy~&HLBZRn`BzR0{xQR!kRd-l<2`Co@YyMzC%n&i@15fL zmw(2ZDW0bS&v^;kD2>Wf@{g5w7kMkM>LvCA{v-Z7>r03*Qa~f0gBZd`fU*mRC&; zwq<(%HI2b|XJ+uHnf@PVO2FHh!Ea}I)@Q-qmK7Y!^#43d*w1GLcV~H1Z25xs*agAJ z6zcDy-%s)U#Xn>F6wmrVkl*_QyS!W&5LdS9Cw9GvETQtTR=795}E|IaiD*q#~uQ>N$5OxUei!M;rYZCS$JkQMB7 zvTL8V>%}QJd&br&o;w3Ue!qiVT-YVXJU2y$q}uhho!Iqr{}GR6dY%@$?hghhvb=kz z1c$P`J2Ha(ncjypv1>Rp_*SO>ubC3CIV<==mgg1m*S@S^PnQ2cmax|x9Be<>!C$RY z@mI~Cr+EJEpRsd_XHy`^Z-rf4*dxaLVTum1*(EFbw*`)v$ntzQ2x)&X_)NBU&(vTS z{~jcFI*^53-3JFBIoSU*ed~7*4t_V=vnd<)j_hFQVE>cZ!hShBIGF8V*A{Kps;Qp4 z0yEy8;(0a@JTZ|N#$#o<*x^gzwQYJ|C{A~WlC^QmiM6ya>@IE z_-imT_NCas9+60gw44;28-Rk$=>N>%6tkdV_2(S>+2pmhB1ogM9~kRs}wF+rj=j zg2C~F{C}o49qhkr%9Qql{o~@8UuH<~Z3hK^e6atw2Nm+WciI^nr+Zp6gDulNlovuC zmK<3>oqt}Nez*5mIo43m^6AFcha zOFjaswbqttuRXLw(pNH!L;CU>lDpu$r~jNx?6;mc+s{90Nroqc^=r-ybaIw$KFllV@B zr;~V-pIg9}ZAAF>SUFrPgS$;Od@b;?qP`R!uhA3Y;lso~9JV4`w3yC~X}u<}`VynGQRB5g#XS|h(E{2cVf(Us{SsG zi7@ls^)=J97{kxc=gVeI_^-4AzLke>G(`qRTuH;IVO||EUNgV)%G9G*ial2XdxkHA z>k$5%SqE&a0BRfjtFl8&Ju5^oTbUiyJCyz;Nv|i3wv8$O;AFkd(qAR%kpX@NL(fQz zSONYW6I}e#KgiEd3i9)1L3?*djN_@rCw!Thh@;)gm%&NSvScwonQ(otLflvTaof6Qh?Yz071VDjy(mdfAl)aVhgteyl19Ic z(DBaj%xDZ-NqTO8e@6=N%Ygttj}+kfZ9#ixNSx#DVG+k&7IG#hi&4ph>*5M=kNt@A z{eZmf5&bREU-eg{S1M*pmA;_#8N;t7ySs(q17Qu|G3q_b8K4DvULjU#0&Cu(DaiZ% z@SfJaEqXy;B8?6X>?x&tkp9N-o5}8HVI~1Q@+QP?%NgcX&v!L}jfHpdL$B?(xNE#b z*`%&jVmD8`2dOZ8k8kB}r$iChubGWjru7E*ko?_$+M_iUOP!&U7sMc<|X5Ah5B7pq6y@!~AvRf+lp)r?u{HH3q-a(qCBmGfRVC zrtR;vIKW}{<$yK^oW4OZn%~CqAb~9x=Cyv9ZPQBvRLm<-eZaa2{9>(lX}wu%@Z$sa zp@7)H!JXpEyr6R`D6;tFP6`p&a$#=V%VmbV|4HxEbYAL>T7OEv1V7W#n=HM`;@^j9 zyGUb+i!e&~GAZDc1jLsd{BTSvE9AYa2^00?F|HgPxt^^4G4-ULAKqW|IkwOGoHWEb zU_VwGGOy!?UrP4ZglQg<)RXJU2FvNG-Ra2Nn!v`gy{p)($G_40iSE&RNRLy@qV{1t z`@ry%$o`bXCJyg7U*58un_F`uqVq^V{vhhTmS3T*DMpm(KfzJ3?cO8XQwCvyP`ZytT(v- zp1!oDANPMlU%Iu3%M1B#eEq@@oshIBsP`-VS(4sP`t1;p9=XZ-eM`fO>*RpOGn)hY z&18-K6Tg3DenAh8=Vu1_`R{`E{*ai;E$uVD%t&&IlEv0!!u3dnzOj6C(!#WU6`zoSbA=<#u^3Bpuc`UnMVx#J}t<E%g!I_V)HJ;u_hYF7gZE(ReM$N|&CkpX=!;2uRe*oTlpnvKzrynv_3`}2L3?RP^yX;S^CdgU zS(q$lBonS1D#Sfymsd~z8|E60ANPMl`nVGWD{pe#xEkq{Z^rBaVg)@!9+l&rs@tMMo1N(WLj#E|Dw z^oLd*tV=P|#V<#-?wzDDFTpc&Li&RmdP}Gdo;e=kf4>Rw?`x9mt!PO0d9URmdzAUz!GnVMhBj_{@ zW$U=`nw#*jPI^vHlC)8V(Y+t#(w?Fs5RjTRUoRrC<2a;fX@`8=Uvz&-^JI975<3_- zTt&W70+R%c8;ZaVDu>&?!+p1r-$h=b$eJ9maiC#aMV6dv7csOiORrL*g8PFw%rYHKa2ta$Jdx9Lpg^V81OF;Sz)U z>E7>&Hm!8I-zzxu$s8mKJIqin*JV!3&b(}W{PEGC0KqPZ!5G* zvAx`z!QQTivwO()CnXJ2;aMaW@k3`-ERWPGhXgne_)pM86GwSnJx7%P;4 z{KRI8%(2{c$~23>VlO9F`(_&)=EnEwNt!;EI#25b)EN8)(h!hBY6t$HW#?^jc@TPXAs-45*!ZFrZA_TA$p(MWgCsqVeO|mn-tyeVMPPP27pur=C9nVzVmuMp z_yAb{g4gX09@fJoy@ag^NXf(Jr1+5Wm-wl75MMPTCs$n)_J=?RKX%R1j-;j8SyB(VI0Dg)y`Bqz@j z7>hBBun24%zKxG>uD_6ko(l7hk(hOO^IFsTM^VI=v-Gf7 zc^xJ@y(x7k;q4%XeSFEW!f0-LILF5=0vi`2bO|_4yt76R-h+O8w!H3;rDv!xrdcED znyn<}a}Y&*8DRC6!?nogdK3$yo`-MvK!eqIXY#t^5(}2p^+qZP@e=2`2Za}tc$WkD zh+tO~2N#F7c<=nq21^umm*ImFgCDPs&6c_uzOyR9v;LN;Gk=e9_LwKfw5bvR;DtdOrTn2AFVkmsjnRdaz2x9`-cOy4jaL zr@}KyEa&_~Cl_p7{D(Gk`ptWsrKpd*4kf=>2Ko>LtBseFI|88V6ygFkU2lmZevij2 z0vi_!=D$BtIILfIY0N$H=35)QOZ4h>5C=?@nV&!|pu)>7eT=U8$`ZXe5#6~W6zJ0_KHPcGuQczxnq}VE$G2>-j?4V26H<>?sqZTE?T8AZIw5*fcqzH+ z0wqLX;}X!s$5%7{(;MCNH@g2%6D(RNH%jye-%#olOAP1C&*94fD?AcnS1#lgn!v^q zFwfvIW<372+DGW{Q?s=zfZAb&XK4Wu5xQ4RU{^$2E9So4ym%YlbwifYYh)O*u_ekz zFZ>B-Ig4xAT5>Lu7|bb{XA#&~wBi;o-bj=1lPETxCr#&0y{dVV@NjAO{`yTxTcmvs z`ald^svHQTQ5#Tu;G~HJszhvv6C0sV?aq__eppgZmD-QQSblhy3NQ3F=4TUuHJTJA zrO2kpsV1(C!9ni)(tiw6v_VGDv%aJ3ql%SBU|P(VrR1C=0jV37&=!G>#tPeZ58tS_ ztTRs0ZW;MX(I#bot;A`@nqhA6o-fka&Y5{!lp^J)!>^BCsEpE9bU8uFYR| zNy<|8G;({dlgjzf8v;Hc0(+ty&WYk4-NWz4#v7c;@tsl*PqAx}KzW!vzzP?*D-iFU%s9@O@=BQQ6 zu9{a97mDKQccdXQ48QvGIKGoIf;q)JH_Zh0FtgF(5!=`M<>RLFtynrg^!lg|6oEZa z9+wxzx$Sej*FFzdv5sl$;nMECZJMMHBv;rAoX{1@f%FA4cq})Gz^*L+{-e0&GbI&C z`=Y0BwpuxJNo4cG(}}>EWotBpr(rwA?K*N>9kbp-I?a0PvntZL#K%2c=kGK@h6|wg z#mT%RBL|gO&4sZ~5!iF(%O%Uh)w6XodWvO_JsAdlJk)@`{OpwPl|8ofaA|FBTf*o1 z*x7_NC?4OcoOcK&S6~+qf!$KBTx$F2Y;yA}aL*Bj8skci@M{^_uh8j%Em8#b^YZB| zUh9@i+KsnkT{uVDaQ8RuxeGbERDQAs04lE@C`$4()(6^enRvxZi{r`18bqNnp zVi=buWJ_46=W)c}FGslA$K6mTgI*60QDPk9uy$Oopo0l^u_CZ$QtW3A<5kIOE|)uG z<0A2E_fwuj-`U%Ny_Q7tN^*rrYXG<(Z#*@9=a)))S_`a}&i5l}>usx%5 z8ZYw`qS(0!W7&S(jd!ucAe~Pvn=s3>{+4szawQaBSOnHAqvJE{7%axy1jo(OD>+W! zI{pRonm3gFHU>+s9^WeGSX9PGU{MX?28Ja*&hMu&k23xAvdZUCSw8OZ+uV7D;Nu?puWgbpNP8-YiHtj@94JN1<%f6Bzw1XdiQ*oE-pIEGH{~*L=ixKMy!WRc z`Bc&%^WKe!sEYT#+QUT}-u^F|CG8?5c6!}AX2fQ0+JgwJQ3r{2EPeg>8s6!SCjk26 zNoBxRKJMPc#`UW`T)pcW{DuA%&Q)RpCv(4ytOLshv|JI`-OR?4DN5iwh07&T+;k5Y zX_DAHiT&A_dE_hlf~2>ljUCH;#l>nQ!{~m5m`ToI5?GnCx*>s$OK%eo->8SweH5*g z^*>d#P}!Jy7?mTl^*NWEqa?oJ0LGy|jt}7UQa66a_h)=VNP7#EUFu1gt6FHxGkEkL z58Sm-(kG_AV;8^5`rggQwP^wSkP~Lu9>%!Jhi$?Bl1#mA({i1p&kZj3GH@Xum;Oj1 z(*&^{{+`OU6NrpVJGr2oBP1Y27T8QDu#k1e)ug;A?(svCa+D36S&L8CX4`?imUe=Sf9w%>u_Qre_ zhP-7yg)!jIx9k~O6!VC?ga^C}+JU74lw=?{gi=Mk=yrk$59`(!VV{F}W9kY;LzInm z5j69>bFw^S-b642Mc zuCWSZAX&lDf6qX21pf0(KUB^4Hr?P8e{Y>VT%z)rTRdE%>vrMc5?yDo-$m{R>;%^t zIX*7Ibw(Qxm-cA4uOv-YHss%%`8DQ{bBSCj_<16*P`rw333Yv3r%Vh77zjQ=OJm$* za)w6r%?Rw2a=0Ul-26qFBqkr27sVwu?scnxyKMP~!C{X8%bB_C_$u5F8(GR8O72e3 zSgzk6fA7YMY3Cvu?+q`~NF#Sy!V`n4bcQymzaEr{n(mzTur%Z$pXg1e(xInOL{nbQi@EZYmtnMR@h4hJ4_K+ zlW;$^?5{TtbK1o$mXUGZnjlvZ*tzjJ?B-*_nr#^`%TDlaACo}ZYL~}cR@(N>cgwKB z^>xtp?hXu*dWCEZQO!v%03)PXu9`vslgE_}QeUv~W58I0$DHWJj^9e0pF-l)hx+_*Sz-DkIiJS;_(I zvse?wD5b=p*h%0#8|)Zatn^^A3oNmiU&%$xlD&PO1DGXS;+tqGyK@Bgzs}|5=?z}C zE$u)Lm+1Rx9mOT~ey%k*$S-EgD?XLjHny>OXC_QN$oYcAM$YXqBCtjUG`@IB=D2yZ zA&qtg@K}hN?Ze6!qSJjG?66${Ja9$;f4tHKET0o###Lx;ekh>;%z}IDRaYgrzs1Kv zF~DssyS>IT+H6Wd&j{OsW>doKPnbON2;k!mV1C9nx;Ah{9AD+Qjca(cbj!t`4_@xPWkM8}*u15K!C~H!zAHk?9W%#Xq+^?@lNQQr{ z!I?GqDPDuu!whq@vauF6Dj=rU8c!@%JjQKkpv7XvXwQR@bGnyLU*-n&M&r9-rE=zz zyLCL12<*CYZE%XgVQif&d&(_}j;hqpy=nPU3J>|OFC%c>(B9RsZ>zA+QUWa-|#oekzwqI z&gYkj^+Tomp?y7E+Jp7`NP0_#p_R0afdf?J3vV0dm?E%{6vXw1t$bX9a{a4)T!M1_ zl6>#oCn$fK9>v|aOwwo4hER7f7w7wx%k?|^xH}&zkaWPj1|#OF{I!c)d|aD{wo2IJ z*rTCT!10w-x_t*TQ7?Z9#@S7qQy=q-7*pvytXmW}QNE#Qg|bTsipId@lQYbU_Do=>#&;FXecUS( z@_Rp6|nxyR2FuLP2t?WM!OZ|y zu3?o5tQN&^k6e{P5qB}fbN5ogk*L~a=`S@5x81xNxCGOg62m2w&RyfYa!D|qV+{^{QX96-esvFPt}G@SZF3*jrEmGN z)^aXl;9~LCJWXItCS5txqaMD|-`Hqxl>AIWPI#E?$*{)Ya(s&~$1P{77H@L^do_WL z1#qpG?z9J<-!G}3(V;XNepTpD9vbVli3IzY**@;!W^YN_EA2~4WN|fEr2e2A;sqj<0sYisD{@M0%~V zq1AGfaq}vzc4F%ouUzi=?Qlt}jV$9cp1W5e%NXwCp1peu`}DBKV&9JAJ5c!?^-&*} z`9fbsi$N*xw?slgs6=o`qj~aoll5V0)E& z@2^L3H@UWWg`O?m5EVJ&nKPYG1h#iM+}%Fz*}uS%#Xz&CzL3kMiaqrdAD8(k*27o{ zuZ3b0*EqEmv&KHV&&4%!$q3h)Rjh?HK}n& zSXPDY-`PAje~IpCEjPGZWm?y^LzLd2YzUH#`tM?LU^n3FnSd>bRx`I}!*tt=(|%;E zMBl$ur9vgaY;p{yFtjL)y4&|CRJ4rZc0%M2Yp+z)ZPT-?r!YE3Q@6)j1QrfOVs*Q= zkGpfY{6^9!l{$%B?~^B0_+7&84IW-i1lIWT@?U=ONVjb4M~2JciWaC;*hj(ol_y-` zgPsf>1`lA*#aGe^9xm-)Pxn?dADTSm8kdeG2mU!q_}Ly7frS-#ESaK%A75|TelV8{ zZ`_(a!KsyQ+&1xXV`Rr^3JTozd3MNypjTGf=FjJPb^DfvgC#AMHoSdcuK<<=%E40l z3l3oz5m-|~ezAzvKJGzhn$gehiRc)&K{*BF8qdxp0&8{|qPV^WhdqhMcSzX%=1}Sg z%XQ>)0M;!Qd(eWJv_)XIMhnKPH1|ib%OvzwFRD~*fsNpj+zqc_-kNVVtXH6=SrJ>? zOK+lkFU2Fg@<~tzHX(}hIyh#GC3s?_@z#40|X!&$7IJBvjzk!CqVWU?( zkRRUp!}1fqO*Qo5OmrXb>~KGiiRs!e_i?@s=a85VC%V>A{AQLBf!!6~G(;~Sm*AS| z{wOZ7HPe+oF2S|PzF~g3BsN!{;^7jNnLiiBB_=b!$>1>0e64^(NY_>^{)d zb*mZP!+vr`Sz;D9i}x%78?AAWSq|lVVvnbnyc*h&Z_B4pGmj`1r5Qa{qXU1EoEer_ z#sTcN;D#VBfJGj@@i2KcWS1Lc4~ea;%i9SS_;mH~4Iin4`PejhKTg5GtPJx7Gb=Nh zSVPV?=vFxTxfX$qOaDzVJiH1|lYcmaL@2iCGNVynu6*y2)5l^jji{0qfsMnTeS`Cl zKIz2?ay`3dvZq>NBBPE&I*uJ>MnMT4>@j8y)N6E03>NxgkRQV`{jh>>#QO`-*~?0PLyi`HI?} z!)1i@wbFepdP4WG*arkygooJ~);HkaBbLHp6CW!-E~sM5$@RRl!K)eASjlCX@K9&} zsUgRhDeJ?F9Hbf8n=sN0vo|r3oR2(JfFTxJsmHyu<}v)&jBg0*qp1)&dfqjRzATmj zSP_`bhA|d_jl&B+KaJ~8x9CYd$7h42Z=`dGECc(pg3aX&6 z76L!cviDhHJ-@qBzD(CnsfJkw*wFz38_O`-?ef8#XN37LU4nwc#=WKAB<5ipjBRXK zzJQ-<*@wKr{1k^dPdlfy$YQ0@NdW>|F3b)lOzf@Q_{R~F)|fRGw%VcmT={-+6Cd}- zhZ52UK0nBNZ#PGBENTy*W9;4pEJo--#HyR%cax2+Dr5Knzd6`jupGEHGu{i}>(tl=>!HfwleS9{uW=D-~w7xMv4U}NF6@xx1J zH$9zR92wMbb22!np=Jyo^PEE#mpk}P+U^w)@S6sYcwD;M1D)3gMNxT6<&X&v`itA; zf5Bp?M6tIxSazEp66P&N6A6YkV7GBrO-k^tw-4lU!WeN$J+=`Caaw`Cno6zI{u;(N zcn+=}pw+9xID2y#M?Nh;V0|>y6%8Kmwe8)mp7%(C3K<^Uubj;=ao`lJBLZt=BT--* zgX8&P;&Z-h4HkCn9oW~*RC)72xQdd@e{2*5&8pHU3c>>erlHd~BbzJ&YvT9kRl5do z-VshG4e`k=dGm16&_w`Ws_a~Evg;3$(=6D~BAkyb0$VPefhHW3!DAUkl6n|<%nUAr zD&#S(eO#wD3GG>28^t}6(4NJi{+wT=^IvUXivTMGXr&k{gf}V`1B%%m&h9NANkpfn ztB*@izShjcB|6`=3=Vaw%YF&HGiaQhvRn@+i!5g%`a6Dq@L&jbAy@18m$ja_#xla>i)!7T2ElHGz%Q9-Q~$$A4opW3z?Hl5exsw-Qrcu`Ao_4-&Ki z9vj6aW&_;L$KCsTSC0IRCr4gWx!y^`DDK{Q5~iYTbJvsWc@;VYXM0yKf6Jq|#PZk6 z$NjnO2NJ%M*~TxQ(|N2?6-jZwk9)E0Nfu$*E6KGm?^XF8^pys8hrHn-o~pl!83^1E z);Y<(uXr9(%C*PLLrz#uzIIJ}KGXy@R(rPh;xfW~{c(*Y(rI}Gb|kP+f_<|Yx-orZ zjUMM+OYG%(_>DziV{nn<<2!1Y+I3e~EGHafVZ$qsartZp{qYvBw#*W1ApFXf!?!Yc z^nGpGeIjXqu~S^YWmbiq;?+K`V>_77j8!%Sd{|f^vE*Fv9PpfBe*&T3?T)j@zHmV6i4=z4+%cBD*TQZXzz6v}|G8WHMCT-K__}-S@eqF=oRZHf2?9xKA)Rp101EsBd4W z)Jx?02HZ?xG&=O)#t0MazX`|Nd2xESZ#^&f9IA#MF726Hmr5#?_5jRHIjK8-Sa!(c z%h@b9k7>`|>dwvI@aE=wl(XKm;9Wrk);QgY*4pJ!T(gAcBfWgw!*?&1^r`8m`*J#~ z=%??G;(niyP2`mZ2bt<0prfl%XCV4Fb*5A~&{8sjZTOrA3tPYP)lC%ti^~T6URPen z4N+IXTzeF;k`iWctUZs*T#F3`jFrGR%?1N;6*kY{QC}baV?t%r*2ksaK3&o-$sUou z;dStG-0hFQBMEYRNMns0|2^d_g_xY{%|c=)UR$GePTV>S z;yUb=@Q=*?l4f(8s$y+<{&g>Zw`U}xHY-;9xQGAC-P0cB?P-5fc|Y7YihFEZBK{3p zc{qQqce#FquJ+@*z2#&{?;Bn1GHypz=xUd|=9SCCe{lOmteK3wqKY-s^e8T|_TJgU zCF&>kmS}wGiKs6N;p3jY-<^Y^zL+^^74>D~-~Dn)EWR8cmtcHte4Jm;E>m}+u8-^V zR3gK@IP$8OzqYr#`pW}7{pCvb-?-h*^KsAKu~E`lX`?-i;r3TWd*~L$C8itF+{Zn9 z#{vmotGw3feHmkD$j3d~JfZfty_;JuiQ2s7`8d~(|FY*YZGD{UoBy(H>>Ll5_S{Ro zct;@>W@O8_T+YC0s^|ahdt9}P)33Psi`;X?KVf(fh6>C3+)WJnVHFnixbTDE;p!#m z3pgtppMIQr8NY>7Ilij=7Pd4v%tsz$Pp)st2=>3B{0#L2qx_6TUFGwUkQZ;-y{#8X zoW+t_tZ*<5_n}ae8 zd)5t4_H1Ov5`kS@Zk*5Z<4dqzwDjZqV++Vew#d{S5Et?L8>_;YA{zCu&lP(yHrt-j z_gA}`@fCM<%Ox$r_$CF z1p*cD@JI}Kkdel0+!bwQ7Ux7jKOaSzo+3=*q55&J(ZW5!T*aN&fN{BnDn4qMtVNuT9b-7hH3DV zIh;Kd#^$@(pIl%O*mCc?wh0F^z-A5hDC&9-c71oq>qnD52glgn;IWN^>0yQ6wy+BZ z7J43FW5I0g%w?09ZDw8!a|=Bl{z{5}A>`#UGDQhv)(5AwU<=|aSv!M)&-RG^RMJgd z7Am;3;t?4;cWg+8%K(bN&4BSRa2Dqfm1|<6-jBo|dV%u^+0Aiz-B#97vzJ-oZ7!Vd zRr>v#?qmC?>oAzkCL2Bvuo2F}9tWGHpK`(W)C6`cOU`1YE5vgKi~XT8sg5f>Ox1zj zNJVc!n=Ple#_<}!zM~0jEQE$$+|C90hJ2r((_p`ctt5_dn0BxoTg)gNqvdbX#-QSK zC$C+y28X<^lnUeJ9;vZG z4y7ebw_{36m2As4H+a0G<+2?FGt(eS#*Q_YbDk+6X5w&U9F(YRs7~ec!_X~)u7xpd*bT>) z2!dafNhkV`wjM4mtRXi#D#N`XKIG^&sR#^i(JQe(xPQ03bqo&UOEvv~A9>Dea+(}0p#uePS>Jlj42(84&KJ96!G}81y-*X_*c<6;@bEQ?-Kft= z4^rdh2}>`tY`nY!Tqr{r^`3V61R%KvyDUIpV*#MNnwT(99~R46XW;lt**iREDfma8RcLYgCis!DpZK+WSUXD;wuCDf=)9R4U_fFV7N46TH8hlNn}4-;vkRFELp3 zt2Z}*I`T_DM}GlDnEg^uRjEaudom0j=;n#1$;P?E7iO9qOF>sJ{`wJ}@OkpL`0?M+ z;E1BmKZmvx4yA)mX@5#Z`J0i!l>ag-{3X2Pb4ljmpuYI>Kl?Z2ztTBR#WNnzcHH`b zgE;4p0In}@1qf_hC`gnS4)!DV1$jSWcaTqnoQC5rt?oEeLYHZKOF)cc_-?+84nEs2 zB*yUBSMdQY>BxD*z6N~QV8#t!+LJ9D7E$LkX`Jjh&GWQ1pHua@<-mKMIU{txCb040 zbT;9bGD6o6=f`kZ^R*0@pdH|ZJ=RIHKlNmV$GJM@8Y=E>uV8KcH(sE{Nfz2Z5G-(1G7YgkEJGgoKRTn+ol+nZ>>qOzS z;%}%O)Q$?ZJtiPdGTNMx^I4$N-k>;MUg3z}zZVYgUqhjMh2)c9`l&kD^qWOCEPHoy zg3QU$pIFW)4R?CDe@_$G_ypZ=!ue_Hk?`}@347>2q8=^ztM~qYWIgKahw~%rkz>M% zw$IDzzt6VkJUlG#zV<$xUxQV^Q7zA^%)MGq(e~(o*ut+PC!l8pdDjkqPT>Yo8gNbq z#gw21rZh<4aRhm}#xd3pH?`Fn3x2F{$Q|_dGmmH-1{B6zWSkDa6L4$a;6O5(S{@Fc zvj_)pU;{Z2S&JjIImFXJoZJ^*5$-nOCYHzAep=e-5h0zadk6J!bkZUHF3#BqX~=qT z_WNXoa{nk#>esmFe0Pk}pQ zZnbmxa+DlsOydY)*dVgPXIug089c_v2laNP|9%y=&(QTRg|{zaq18f$8==*7%Q*nu zVGbi#6WCZGw>Du!^L$x#x^6WvC!XfKCKk^UgLzoCcuB7dVb5AG4WW?q{1Dup>ggdU zgmYC}sNNy`eI{sc4T;e_h7|E74+mZ*xd-y~$ElY|ad~cQ!iB!|!y2v({sHAq9KjQY zndd|m9|aJBHm+xoRlc&GzUHG)U4gEEZs6nb$bTdN|oJUP`A zBCsp?mBb1kb#>QMTrOtJ#b})?^`?Nv;Q=_Z3)8)emfmLB?`YS<+B!}~-@x-<2gHC1 zy#XA|c;Vax4c0Hv{NdU>#D`GhNaukNL6ijaf`C0PDE4xr!;zgh+&lQU4?^N_g+cf3 zSbDH3Lwe5PPgnnPjehVNyUW;!w-w>*AAczBoOYj37Oj1Ry|WsNHv4F+?ipLkBwxf*F{=B1^@Uv>YG#*0LkI~ne9 zar`SBYKCy&pI#h-)9E#Yzjp`iEg^A{dsTDlaZZx6ElHH{bf!l#fsNIxLyvN~BtEB^ z?c)-h({wes=zF-Veyq8{K+aLkbC*Q-CnMt>NSWugM&twnaN11;ma3;d-*dh}O`Ag}K@(se4%v5NC{IBODtJ5X>-wK8DY2 z`N0*|iT~pKp)Ean1tv1+7xyS}fcujPDgyD7u|ob<5m>#3ipLHpRr88_7hTiC8^Mfq&Kj%Z_pGni|AouRqzeZkBV9N$7U?Ro zP#bDMCJ6t?_+PB1Ydzgl3#eq4OIJ59tecxP_<-p30QPI<7(=QfGVQE_ zBthesxpWO&0B_<@Oi<&jj^8IgZWv3~k1ns0HeMcog7O;fhxO+MKf1hHdSU$(<#qAT z$YWJq$l;1UBZUJ-okzfXQ#jx+QaIp#H96pk zH96pSYI49kYI49wYjVKdYjMD%YH`5xYjMDZwK(9SS{(4fS_pV%ZCuKW>`-5^7u!U4 zqc%9^N{`1nRSHPrSm6MdaVi2m03*)M#^Ev*_F{Ksa9zND%x^Ks&@(KopC!YeNwJl& zW+gdOMf|P8VZ^(JaZOJQH~8x9<+d?c)}Ktw*JfQW&a{8sgZ>96CPI2<5@Ad0@3)!Ey4dLki*?)W zH^5Bp?;0A-m>+}1LK%J3GQ$tD?5&nq#N9DE4Hn2cMOr{Ef-QsqfsK`38xtPv^>#}( zl!b}x2G8MFf5MRx0^2-z1w>%`b5MR#>Uy}yJ&Ea;9BIvYH+tHI0gX+IVt!ezaHxYq7z^wneMH7R=e5hRq9Gp5zh2Q)41>FqD z)h$;z{w9R$?zH z(|PLvcx>`7y5!li=;knVQ-)bcoYcw4y>-hV}I*A%iZ7V z%O$#5JADH%vj)37Kwx75qHAgO|3R1+Te4o6a`%VSUez;ichET zCw^edrD^$(#J_Iv)f*R@Gq%#PoAZV>eyav>IfgkJTF!F$|14$i2dVHt2HSU(Gha?q z*cV=IsUW;WxflPzEu)6*TS6kRRyKxwXb`0?fwnAz*Yc%|!n3@cCwPef8&^ts29NWm zf8AiEMV+q4yT`)e9Syw;t_L$=!l%*Bn9sGYS!=obGMM7>Vb<4Umov-kb((F@xL`%| zSh$4d%X zU(9WvXz(TP9R*^MJEd_z z1b%@g6nJcG!<5J5z!>ptw47cVnMZ_6G=Yspi29TFfLnhW4U!5nN{ohZ(SlP(Vc%t} z!5}yiP_c3rdP|tkh|i9W1<}Ufan{6*l#t){x<2kUYDJl0_!=L{4Z2ES<45l2^di2y zC?Q|t^9(MrHT>%agL!Fv-I4Cm%djs9ainw)B*uIPehgG(lc&^Zj;)R>7?f=@58vQ+ z-B(fvoEyO!rpKhKxqFracuPef6f=IXa@nU%xd1NS$9b(@|Fha)pO=re(5F*qVo(*Q z=z$@0V7*)_#hUJK;Z#O3{( z&VX8C98cj_)grL5Y9Lyf_>A8MI7R34F<=;C&c`@fsnO`w1_znIS*akj*rt9nh(~8W zfy`*1nPu)a%NeZ21g^qsHGz#qTzs!vA0m&^3xweT_0|jzRrxueU~q}mhvy9DdHL81 zeZ|mPdU8-7sj1%x={YI-5{kB#KAxoe)zW*C^`;v1RWkoRqz3;!tp@+Tss{i5bq#xe zikQHK`WavHYc@PvOB}03xMu$hKg9BO@gA>jVlFU}pg@V81HI17dC=2A?SwnHcYj#l zYPrEzul{8H2<+Vki@Nalb$Vlf&SKL9nh889`84Qk4~c0!DcKYvum_@B6=*LhK5hVe zfT9hS{h1agIFqxqgMD7?$v{&qKwxJ_#|7Yawd8Vw%rhMmA!vB34+9#*$sw)tHUF|! zGY|7n@Y4czSwL)KcowH>UAE@cE7_;a`@HnXc$ zS8ps@RYjhMOSI0lttQSfxM-cbton2IZnu56fJZN6AN}CeAVi@kFTL8*-8JFpPw-zE z{w-~nY1fK8i;HSiz}XkXl8A6U#VtNoYGX~fkQ=2}Z+w2#;Hy`E>KZJ|jqY13sYIrt zZ!!95^Hq3&*SGW{0z0l;-*V(GE{p2LIon{(oFJNTBCtmN(XW-(kI3W3JKa8^(E+I-ke;t@hQ~j+ z`h$126&&4RzD%(kY`|mp0xL*OU}K5yYw++*{uI4TV`O-Nf-*Rs`&vdm0n3RmRsI4*}UZ$t$f5>bE`E z4>f!B+RF(xTJ!rW^PRH&4maPZ%cCT{%6c8EWEf`wmT)SJ<=sjZfl|c)4sRL}*jTPH z!87>k)vMM9i+XjZo}NO*0R{2#I!Kw5Shc;Vlf$^84;Wd8lbWd0qq zPyD{WhP^UH^x}H8kuUvgI)ybwF;}q*H3@87Vh=UvyrN#gvnw14?31Y{l|aei{6`fT zjT5N()j-S(*4WPbr9Io=uwJWA8;F|nvKA`KV9`CURjkMezCYQhkMp^dj9UB%%lW_( z=#+)%qX}#*A&;8y66?zxgQ;FQpSa!4cUs1M(7FCprotEQ1jz>eqS z``H$Y46b@{cB^Kb&3{Ckmcg<-zrj>Rulyg#^Lwk*8Ol|lFe}m%D(t(a#XAEVdn+*W z`yH1PWGgKjpz-&SE$|{sWEv)Eea)Vi(SA6{MKsb1L-b&D@lRU>Ha-~fx(TQH>1g&7 zrQ_^voS*8wza<8X^>SFZpl8F`N(|&0vqeT0Dv`qtWi}tb3%AJeH3pMogZsZXR@C;= zndn$i+>FZx>&I16L5n0u-GXUOnX*@T1E<-P>}xq2am)j^;fof5jlJePgHK$3(e_8i zTeq6=9{DZj_owT_@=SP%^uM?_%H#&?=o9xig5dJum zj~AtabySi1%?P43WWy7<*&2tcJ8!D}px@b$7jN22lr3qt46joH3S<}%Rpc~SPWe&x zD=&J)Cs%C0mGha{dmL*pt~_H>Ej=fw&)3vrLwaS3?w_Q0)#3>uHgYi`+*^bC)ZpLW z#I&%Me!B+${(cSqyu2IZEy6XoygZ|!i4SG7OF}Dxpw)mG z4?M&{|Kk#8#^`_Svz*ad%;r_kHcen-ZS}-0oX6_byLkqS`SyM7c1rqA+88sYa$z1+ z4xD_=;P|%?HbBIoa^>31$G!6EFmr|nDoQEiaQqkiaGv0Y2OF21)A-Gr#0q?xdpP^P zYuh`>iE8MCm!{1c6tv9-~g2h$4pZgxKD-QS>Nnoe23_i$^$zIq%kZaC6fzPMbIiY01p4&QpTWc%eNTC~uoz$I zK0tZp3A6v3pzp&8zxL|NjfwdN?BL@PJbR~!!J+N!9B9--+Y7Xq!M)9LzFg4G;(&OU1qVk11lA9sv%$BJzh10z!UN|@Pl6shCyEMpjie>BZ@Eol^mfvXop0>vZ z#90m_pWnx!z>D8wy;b}x&aX)iobMw&u=H9ge>pY)n}#tGBB6IJ`wi`CiI3s1O7zQn z1L8P`7@wXTKb)8K4C7M}0?7^Xw)s#Efw?DAc+j%n(xN{HVGgr8r#l}FzznEDI4w;$ z7-t@6k&y4W+CJ{_7OoH6L7orX4=VSKSA2uljvsHiNzyrK=MY?NAW=oiSw}c59PDx; zu)CQxS>}2&!QlQPe~*3M&9(Fl%BMcHby;8qef~2&y~aOq^L0vi&(C!CTH&=n(*u5H z_p2=y@OYlb+FSc;JL9epbFToq;|c;BUzT^9u(}5 zCT>bCdyW=^xI~xmrA)u@?W&Hs*e&FCd zr1xum5jzanXMi0|z+M`JMJ6I&&zF9oZhn6dpO|a?F#oFY*bw=qgfbJBw+CT$0tdPw z4bw#!dGrV{-!o`$4vLu^oUu9^73wx3NldGtjwCkz(yb5ndigilbHlIWSDs(TwG0r?29j_1Dy{A zMcKdAfhm5N4{7u)uMU!{f>=1}f*@OZfIn$zs7>~O-a`;}K=$l_fZ-ozwtP7s=zJn5 zzWcW@TX|vL-UeCIS$LNK!!aHLU1|b_CNFerq4ta4k@I<8&X-ma=*33Syd#~RM#&99 z=w9o0f_xeT_-H!8S4`(OftS7v3aFozdovhky)aRau?f~oPLs!i@V2Wz4eCPFITWkb zi|~3h^P>Xxu7Ft0N&J{ElY?DGg|JUnA<))-m``i$ybX|R<>x`Y2j1tgL27C6bF|)| zHFkNxPYu{50fF%X&MEmaJ=l3{NWiSILPAZL(9L)TN-fk~hQ}*91<#l`lbm}u7;9p< zSI$hFj?4Yp$E;`(>)nJ{-H^-9;5RV7-b6G8X-`%nkD+E~8s<;FJ=rJ5w9Ib|=pVB@ ze53I)0_iFA15dGpkPm%1zyTO15IDLD$|h!udXB|DdSk7-c?|FLRcyZr@!U#fpMe=A z*Pg zd8WjiS@#Z%AKipHC*YEp3;8e+*zd}XBW--#i%*`DG*Q_&Az>rq29pE(e(ch){t>-#d?pY&dH>H)TQurCa~y< zV_6(%a44I%#|^8A6ghH+d) zKUrMO{kULnGB=5xN?-!MSG72btq~d~H>Y6ax)wJ(boed&5eP$?@@X&($;#1CcwDkR znq<#S7CD@+clh#EvXh%42Br|M(<|g_m+jS)1byR<^>B&$#%<@}><5m@j}&&2QlYIg zl$+3d6ox&b=@2KF^F@qlqF+rK`ZyfN2p@i9=o&n{m~mgw_3v3?K35lL3xdUtZmfEp zO>^^?)~1E)>lAZivyE8A+<18u2fxbnxwL`H<-Vy3TrY!z-d9Bbm42_^ks2H7o20v; z4q*T+g;nHTCH8Vy=M;gB>zT0r|B-mh;HxL++8FW5>d8A3T1Vs<9Qxx3l?B?ae{PL5C{Ovm}3m z2<%Kwg5OmL;o%}T-ugPGKQ?yGjla_z<#hK>D!rfx?6C5%jn{kiKS8-x4&xH6L-l@3 z4^mgwT^+$Ws#15gp-CI+(C=Y$2w%?_58$)HK8n+bxs9CR%K~y>VBdq^;S`I&#=V0R zVa{9i+Q>YEMcfbGilq+B>Y?ilWBzRK736R!ZXpNe6eBtAcPs)M7k6hLfA3wGK<|}d zXc})<_F!+fJ&*B~xI+_7eEgjamf*d~t569>sMPm7-6X6Q*l8Ynh}_OW9U`3j79aoV zy6$_mGX~2(3PY-Ex0}JkZ`V$)gL*L8E4?ubt>p_#Y~<29 zW)aw)(RqzKgBQ(wEEX=XU7JJpVapZ6z|tj8!{HLJZ)gIG^_ExpJ*@EZ>z}tVO9_Zj za27%pXC-g^KjMEr|5F10Qv&}}0{>G2|34{#&PoX(m1qvC%RD|3^rv4*^WD+=?%{op z@xBYZ@15TFQSbX(@B6A7U3??&JKg(!!ux)~`~Ii*JrLiu5bqp({|w*j@STG1Yd?{q zCcayrNfN2}ZrLeC{BL~of7kHuxW5+P7UV;)8mK8K9h3oT z56T2(gK|OnpaM`Ks0dUFDgyP*YGkC7Wcy zdr&4Q82c?0Ug3>`5p!T3lP&OzRln*KZ z6@rRDrJyoU;3)8*G*DAeIw%9w9+U~n2IYeCK?R^fP!XsUR0ayb#Ox|i8mK8K9h3oT z56T2(gK|OnpaM`Ks0dUFDgy> zL200-pmb0Ms68kXlnu%S<%0@9g`gr(DX0t-I0-x`4b&8r4$1(v2W5h?LAjuOPywhA zR0Jvom4O1MfCr_4nu5|n8KCx{Oi(r`7nBbw02P9YK&7BEPykj5SAo(%O+o3P3{ZPe zCMX+}3(5x-fC@oHpi)p7C~z8hP#UNyC>@jmY7fc;WrK1-`Je(&A*cvc3MvByz5pJS z25JgQ2W5cTgEB$cpj=Qsr~p(5Dgu>)%0PiHfd{35nu5|n8KCx{Oi(r`7nBbw02P9Y zK&7BEP~Z&kpfpfZP&z0B)E<-x$_C|v@<9clLQoN?6jTNZoCO|~25JgQ2W5cTgEB$c zpj=Qsr~p(5Dgu>)%0Pj0z=P62O+o3P3{ZPeCMX+}3(5x-fC@oHpi)p7C{PAGC=JvU zln%-OwFhN_vO&3^d{6MftrHSK^dU-piEFUC>N9uDgYINia@2HGEm@a;6Z7irl53C2BN9uDgYINia@2HGEm?<;6Z7irl53C2BgA5lwQJO| zoiKMDo_wi;Xx#4APC&ZfUAwsdWIog7rH(IjDtGe){_FD0bNsx^bbhX@Xxy<=M_ju; z(+$`5xWQj9w0pAalcMo69iC|4<;mxtd7?u*ApJW8`SjCIJk#yzXF7L%0?B;t88?h4 zp6t@)$yePleEx4w1BmFJe7@t;z`cMdaA|Dfb>n#IrI$qGr(byf`DZ$H^`~l8e)HTm z2;#Zvm6*#iK|CA%@Vt{h*wV-Qyz~M;cMG0jY6#i9ncLzwevSz$n`Yk2&+%NCX#yUOO=TCwTwk z$NM6F@SkgrXP8!p{iob@0zdnD+;hB6VrmWT1^>fk$DbY`KRjNmF!8#=|M>Ct#Si=( z=eIMSVLCtAf69%Q?{Yx?a~(z|3#N8)8S%p#ANSnWrOAS+^9~Nd{rJyK$E_b;vw<}P z)9YLPr`+=6yQLsrhjF>`TC3&bDQ2yOTl4Y5E5seI)A%`FgSEzU{(t@!IB5Jl7ULKOFw%_gez;!(0ABidg@A-|o?b^L}+YPr}_uhRW+(zAd z&V?{-(Y@!EjSsgwe8}``^zbno33&h^l}uVcPVeFH}rY5C)inPemkZvElk zNnFie(GNkw;k^p;nJaw9~g%pivRr&xqbNQ`wI8m zKk$boD%ar;BXh#N$q(-R@@L7*Jq+|doSy%xaNn`Q{j0cd4jp%{nH=7z3ja^U{S^G~ zm&eDr?`b6)84*jCE>(CwDXy_`{6FV=7;O3YKUDbtkqY-6alanA+5YqS75-n0`y2{J z|A)?&7}F`)jQO0N2FS?pxcet?KPc`VRtzFL?tTqt-1~5Pw^X=??@Q6TcC0>J={8w| z;OFaBxc^0k`-=6WDgM93YdMONq;@#sXk#j(c#%M?Ue?*ZIzf5j5yg~wfKRzbjw zB>ew4+}FLv62;#0t8qWCgio+^@mE54UK@HOL%siR!+n$A=xBL&WZ}B^;qsY+`|bZL z2%B3M|0&~fc1Jr8AH;xEf=YR_ci#;6onK3WEtmUzChjMEN74E*hX4N~uQ%QIy$JXI zhvVCh`_=bZg8O;S&zBYce~>4c-iM!m2ls8!sJ460pTK>S+<>^lyH9n8bc5jM|H${p zv}on!rw8sQ+!qkP_nv>d!t+;KBg7<~j zYRcy-OhH=B35m(x^DS{-duu?n^TO|j`>fAH0-{#)d>-zbG!Kd!z5Ct#JkFa5d(YRz z6s9iPBkz51d@XUmy}1_Gc>llVPI02;F&Ou4(TKaFU*hN2RQP`7d=OMro+AJ6sr^>f?PT8j{4et%`R3kRx9ZfXb55N)b*di0!vgOG!p$wK53Urr zod_u|utKoSz|`JJ;Mfaws=jvsPwm7(B;x&l0M9}w(`!WEgJ2>yzOO3S1ieG}MBx4% z^}$y0eAzG|sr+++r`xg9K(y~B;F+1 z!teC_DTKkVlK;*F?nk8h-$eO80G_%AA(o=N-viGeG^kVHJ!p|mTvYxWfu~NYQ}QDV zJUOD}!zI8oN7XC&G;6*8Bw>WAyh@Z`PYx4tQTeNZi*KwCn(#Z(Yt*`b9q`;E4LH%1 z-+vsqajX7*9VS+@$5#b|qJ75_Mtx-a^EB}i7kzI6xbe_h1c34KrhzArh4>LszyAh~ zLBGlO4?P6!#pal6#r<`_!Lv()3-E*LTOcehD*v0nQ?G8oS%>`o9Njng|KK;^{*^cj zQG9-sh&5$4B{XYZGytzJjJKm-Gya6#gO=s3CdD#a%eGN9v ziRaG;o`bw1Gk~7I6S!1TRodft%Vj{zrtQX61Mvik3JgnXND z5TppVW4G86et!aZ>Xy3Ti{klPfjf^uoV>XITkHK}-iZEMqsP}E@MK?IaEEyQO5kE$ zgR;LK1eO*bUn6g^5>5O zo;bW#$oso;*MA%E?48KaE$DL@ za6euitK#MT+Is$oWAXe`4cNNQ?{6ZENvhP>iHM9()L2mY?9RrdHW@C-8AOZ(J)Z=%me+RI<_Qx+t!swqS{Gj_gf#+c_$mhQfJoEF~;CMmrKLJnvPhD`9 z`2PMacu`zb-n)SN53NDobza_?z>QnZf ze+50CtqzK!zkf{lHFW{fc`8lQ)CpgvHxq(;b|?E#+esYbpF zUjJ>t$?w!5-4w@P1a5kBE%LT={1Cn0T@xHAa1|Lw;-dO>Sok#H9WTE|SthoPG;Qn{wtTfbx`v1Ry=U3DO?V`M< zcVN8SS|98Xcr|eBnJOjU^1xI7MnWS|-Y0d(}{*)LeTGaV0lsbF91&(`hJfSQ9k5}q)!{Lyy*GMfcxQO zlk=&2fG66uKjiPgQ+WF#F}~l}iS}Y!J3?JpeNF;y#7=}S2znHNizC%RwZNAG%ZtAE zU?=GRyq+IcFPC?D|F5<16mX|8AGjZQ>dcw|DujK%Y6a*~u188Bj@y8nK2@#s@q2*f zMc@Af@Z^11mt4x9zsI`2cNgdl|MhF}EYW*8@N7}jV-Q$gRNj{@d?#?@UG>37MENfO zcm7vH@R=pNyf>}H_wTL3{vD1_29_66nOfm2IK?f_4yy`e(Ngq=lxXyL^`8K8dzRb{(TnS zW3{}?zyD5P(;qrO_xD|@=Es*<&rbo*e4!!8)bj8B0(cVkm*n49fafNb2Iry}8b8a{ zfF8#Da|n1A3!&2gbp`OucvWyx4KHtop3l}OdOZU?_Y-_i(C>9U=s(yWO(>uGe4dH;zgseWBfSa}~4SEDWeGquosNeU1@WZ|2EldwPc#69Ad-vLhz>iNwcN%=N!@A1HM z9Sy;wqWm*}8*z^9yXyG+p8}rPtjG6l^!$fwf}=%yo&}Z{)n~`apf7gtZ4%{u0a#vi z|6U9K131=IAKWdwt?NX;A)#U(@qxZNJu>5-#s`7A^rd{T23usLzeS zMfl6)czGB&R;TAruilLD-LBUw+JR>=S>A&FrSbF?;NbQ8{!f6XuhQe=Z**T=`{U<| zQ}MsihM+;nlbygxjMu&JXX=k@fcsCxfgyt4kJ0nuqW51Rd`C@ixTt^ZG|=CW55vIn zqUXO1JoQs8KWBj__lG}6l>cw*`F-An`%lsl1H3*hz)7Te+$Q>K3vdx|3?XAHa_-aldx{X2s=RM9_Y0nZ28 z|L_&y#_wu=coewljkRjMWv>j{3*-B>D2v*AJg~f|ej~vBhJ61zaOc}#4~y?TLio07 zHQ)aa@H~{$>D9cvmaHs|a1iVQPQF;B{5@9z&kQ2ruW0Yxz)g_vQeOSldVjB>aCvXB z@HXJ-AK@%CQQpPCv#qtksRDl=xGAgU)$f3(&_4|*mfC;NFyz5k^?GPGFwSca5Gu{? zpHJBEH~j#3Vtgs{$CJE1bSrR6O)w*#Ujy8T?fqviJL|xD#`sY#Ln4y3EZN2}Y(?O4;HGkd$95d{* zr+_E&)yn_0&o=pW-k(jtraxhgb-x5W{R1Re6YaSMcna}DC4nC#Z1_w6W4*s(1ntLS zKOSfF-v}H7{UyJS12>|+b>jZj!15yc{2sW;u=frh1-^4>a02d8f0ckKHkGdZ!F4-< zC*Q5-?_UM(|5uHYx4$C1r9OxY`o033oT>{-0=MKb9*pr`1fIG?%gZkS&m-tZu5aEA z-2X2v|NaVGe2>--Ed^FJQ9rET15aRmNw$B&x_=e$%&~gC<0;_e?RvfBpd$2>QNL4x z6XGe_7+VS?K4ZI(2 z?E*IaFP8$(4%e&s?mfoy)V^md{4#J8_j%KI8)|W+BQO6-;HF2hUR%q@Q{7JaHQwGX z;7LQCoC(}`Mne!2`e7P)+OYQ@2cGED`fA@@=+7@|`|1SXB=p0Z@foUr9(eK{n2(71 zdkNZZhQAIlx6Mo{x(DxDHreRNfQ7$$!=d*NOTsc|Yj=Y<=*MsLxTrNyz8D@GQ0W zJ;3s!_s4)I@2v?A6a0M*aB)*Z0F}d@f6RXV1E7C{w$HBvo;Ca-bHFp`FZsQu590p$ zI(}{?aPb#)%D;UEu)L@~9|N93@>S_yzrniyE8ux#X}n0Zx9V(^|8H&IH3Q3w$~z0V z_#yZ|G2lsF|H`_5@C4e^tmk`aV0qE|*H3_-4%X{S_X5j{?(g#<@Lz+Df9L`Zu-+%- zOPR2^==mAoVoR-B?|t67zwd|9U%h&Mdjhb$==~({q%j{m&$|C}dLR069iFB3R(%BR zL#9j_FVF@oFDmbR;3=GHCfDC?2A)H(#R<4a?*|{1PxJA(&cf#bC)?|A0yXECyMQMS zsSAF&l;f&%!tcMs!aISdS84nHbHJU(eDPj-9!XPvw1k&e^D)Su2ef=W8hCP(ju&|k z@ZA3O${&9&@XY3=!Q&`~`2RLwc~Scxr2B?Hp!QtU$Dr?t!1AK!ze4zDIv((4;Q0&n z`qHuIf!^Q8p-!T{n}Fp-?_UU9%;S7JasOuP{vR!T`1$A`gC4!W)9=*lCm#cr7nMH) z+}~Osd{C7CXW*t+w0-dE3(!BcI^L=USYGu0I^g-Hh5#-pMz6DhC!sI*#yz6nSAgfX z;=qj>j_(GZ$X5k_65o3scnb1(U-A6wFGPQSzgoqIEC-eseQzu9?7iCm{W;*qBjMi^ z{Btkx%pYo0{K`Lo`wy#8@td#zxGat8gY_fe+21c^Y)kchH*mjUuUt&`_pA>-D(ZW? z_5M@9$TIIjmZxa0Tg{t5#@%?4M$*f-gO99J^>T@A*XGxETZ&~-B2A)9B z|B-l>AFo-x+PzXm*K*cXojPt0liXOGKpANu&Uc$fI|Ex_`k z@=gby1;6cu-|7C9*8Q7-i{EMpuw=;YKS|GjSQ z{`os_Bm5(>{MUXO_pz9EvKWtDz)iwA)g2Z5W8s}G(N-@CxN{}6C&i`FmyvhKhBv#9^H){onOA;hQKlV!N`8$E9KB@WZDSH3>2DN^0=odhbb=v>78dzRbpIsKdot{4t z{v5%df3xnt`YQC#w`+rw1i!5Vo;Un)9|e{dm46R#e~+Fo?>Pnfj3a(s=#S;V^WW0@ z3&wzFFVXhOmB7>cRx5w^?ZB}Us@3|+)4=kg@4e}Z=-)3^t9bL(gm0@4P7?ex4%~EO zgW{(z0n3ZtzYjQeN3Gf)Q}ZRfZ|Lg;a1+Mo(Rh}||7pN8KgW8Ipw|V!#kW-jU;2bUi~8JKt@dNR9avse-*?mfzaoA~ygzB(zuCfn z1@2r;2e-td z`hVf_lfYBou2S-17qHwPyFa|PXIszz0(kBsZNF4qA1?23;3R@M{)j)*_}B{E^rc$m zFTR|fzghb$?+5PxwBC>MZ{Ya@v_I*%uYo@gs10(0o>}0cVPAd;IC(DizKikoFtEI+ z{eK6Zf&6|Iey92z{&nz_;Xl{_EH8TgW56@7X#dc4z_XX@c;2Po0RQ3;7s($hfaOK+ zUkKcfO@cDsd)m6c_czg=UhRKr1D^UTb}owc^#k{pwLkNdz|;Bq;2_byZvn@C))4dy z{2XxbI=%jN#I&pp=bu&!uL17gTif?%0Z-J|2MAeZ^tsM@{!jG$f%UGhP? z-vs=X`rrmZuTJ3U-_@%9W@piT7!pM42>01#`BkKD*;NWfAUT?S+{dI-j53vk* z+OTH^fMYLJsr9GxfE$hbHv`ZAQtO}RfoZdhjNdu(+rZ)?dhW9DHNc%G(xD2R9`{-I z_y3N3o73+^;7-GSI~%y^yE>lcdf@3vZ7t45`f3{TzU4lNx0v8`_Q1Patz%gv1miwu%1D=7rlkNL8u)L`J zeeVDbKcVGWJFvXy{$}7g$OkDeKW*LrJK+N`9+z_Zz4}M;ZNgZe0iHqdx6~(B0neg7 z``{kY`vKtToAmtsKfwK0RjYX8raQysod|6DU&_E!OLTn4=YZ$FulJMx1bFfjh(8ha zdk(lC>x486yZ}XffZv~!zO^w=LxE?sRU$xqgat82Jx-K}Vn$!0hx_@Uq zjvD9qXAHw07xbw8Dd@FJ^V3nl@}l}?fRlGI`$}MWQTe~4`#1#S+v53}d%-{NY6!Abygn-k z!})P(E$6Rk;NriQ28TEB`}YIKK31*b_5K1p^PKkgzUDspHF`b>P6eL&>eArA8eZPb zz>`;0sr9?x(f!kFg9jwN??->Wto!>I;Qn)Klzp-pSYA|q8F<>5-+u*oBE1wko`3HF zdfu21zD)Q$?e9JI=fH-4ZZmMv@K0O74gd5Zc#Zn+pM-B&s`zQcES^{U z4-X7_f#pT@eZPf&VBz2a`820rzlF~PPA2PBevGdGPu#BO=XV3o?ytwwe_QW&J{T_l zLg1OFv7RjG|9#-87qvb16!6?wjq(>9^o#KO{lLYu^nCX+;Q8nEeCkH)`PyHCKHsPg zCImkxfTu>_e-d~baQ}hY{+^=epQ}>)6Ym0^G3Fb80iHMfO9wv$ypNs_Yyh5nRQq!Z z!1CH3zuW~peHo5s677Ge46*i#TNdWh5u>c#IMjF2W$Qr0`7mL2FFQp{`oR+ z=d0_3H;D0fA8^x^^}&)le*Xn}AN;!~szLnrhW`S-Sj(3_;9&XE0BVQbzZ`f9^w>+_ zJAspiJbwYW6YGUCp7sroU_4)~I4Eo(xF5%Ztjtmheu! zzw0;F{r1PuJ_NT)`{P{T`77#z_X>J{4_IDQ-b29svznj(2e|lHy;`q)!{ey`!wt$` z@h;#_qrMjd&)%o=)BFH9`C-kE)xVKny?N79`a_ZoMzWCl!{=F=4FxwCe3w$-O-tT^3a075-u3D{EJ_tN%#E0zn zl>8cRe~W?l!>uxK(|=Vfd+rM0Nn?I=H}KS-8&v*@e*w?`sVcZf(Dx00MEh3j@v{U2Su)L`KM?VdInO+)TiJi%Z%YmmJ*Y%kJmKQz$jD>?|K%QUK1^ZQT zdMyK%7d?L#@bstZRs8<{v+n;CICh)f-(USF`8KE5fxxlL>Y2Zg`sYO8>Ce`w{cvTv zf0g!c{S3JC!+QSv65W4zDGpWQ^*i}lU<4OkBKmVDu)L_gR|3ziMSE)a^A7`09IgEk z{{U_}O~(r#^=J7uukT9WnGqg8K=QT(JiERjfQy3h@3nNlMeF+q2>+xyc%!J#i@?d( z=<)aZ|3-gQ>-GHOfoGwAkA4T12l0316@C-&8#puJ3l>*g-Wx zm-zmtf#)8t4;~Qs`@pl`)B8!Du-^YW@JzN&`6G^f9`fUTbwP)q_W-cGh`wI|o~hFD z01sRDUjj}(q51icdHFV{Zx7+q_4q#9y8lxP{}#COVr}2nybvz$eZZZu+F*%j??r^4 z(Cag|((|9!@jOph@BagM8tdKCzj44{!sTxPp7@4dulN{nXScS8Zz9~N*Ta8HxTy1g z?D1E;-&h}Dizn;PR^aKqYL$OB1#IT4`7k}dLC0f#1vvR+t;*;0D`0u;k6)_(hUbm- z?c)ibto2I~SYGt}Wfs22!v6uDx>S$Ht`{+0jQKzjcHHcMkH6#}c*yX-ts*QgdVdIb zVnq8lE&*=*K&`UhzGpoj{1f*-rtOCqu)L_emB6t#)CZps{a3W^e}?eWbpf_uu>QRl zxU;0~m;V4y!QN@YV>F(Rd}e7tSA2hxOlPdpQHW< z_%nJuy%$(sRR1f0V>Q~pevfs3-+w`08S}vtfM*PUVvg{d`ryJkUjI)5%ZtkYCh+Wg zb$-A{fF}(5?$!SWZmUx9z$XAV8sEzU&vfej&7T66*M9irSHM%J>wHm-FUz}BpCDKd zTzsTj`5*Gu{jXd2e&AWOuLqFi4%23THnf0c!^z|#l@lldAhrS~C>j>NOn-s^#om@3roKp0L5+KLu_w9;yDqp=+`k-HUc?VS2A+BU(%>DqPyPLZbw67de*O~R{y6j% z{!H|{6*zX5wnu+Y&tG2^{6*Z~yM9S9_ctByxD0rrLGMr51uQSB&;1rYqyhE&+R|XH zur~&Q|jm;&pof!@v4=JWKUm3EXeQ?-YUMMde)%JYndwAJP3} zwTkb526*leZ9nY4C-@Wd;lqV}eKv4%Sno&tI`H&+kbf1$(D$DNo`=8d0|M9V75;u2 zxY3v&Tns$DwkEh$^xy6D{Hl7jU-231{rbJ}{O7g2XaSC0s^#BiV0lq{FS78jfoDEa zui_^T+6T{nzed@MNy3lU1_z4%_%LB{QF%`T&s8-9pT?i5z191oKG=*T;}4Gl?te_j zFRTZa7rj3T9D5%A0MXt%fahlPe*5QvCq7-P{A;gk48Q+Q;C^F0_blUn6UzTI@Z1k; zRQ}1I8u#h@ZTp4G%KNf;DGglv+EAYpG(&Jr7xU z%Yothmsv>I4mRR8x`_=CXH*u32)zIUT_|Gz9;b1?dAz0PNQ6mT$KtMX&K8+h*h zHNkI0c^3gs=XJh~Zv)TP>HI{$ru*O5>zN0<9{qnm;&sIP?**PV()c%u!i#>Y%`)uH*XZ8H!%fPX#b^P_cgbn{t-5W4|41LoE zJbQO-&>`wG3_Op`S~4EzQeb(}_uqCXzW+jf@K*8sMqqi-{UY%6;aZ-2891~*o}Pzu zd~ZBU{PAz={bfzypQ&0!zYMUvsQixu_ro5S^63uXxs0BF{0(^WV8kzo`ZXPf_MU_L z0`~#O-lXIG&IE3HvJQ3R_4x+fzd`5c`V+9c==p{O!^{{yg0`)^BeH zj_rk$l2Ht`_ZPtOqVo1X0vPfX(-n4qEwGtybsTu^Yg(Rsk)B5|wDh0+gs`}%yt+5y zeIx$$?Z6W$z5hA`+<8K602eX)-XyTR=>2Db&3uvvyb0y~s#fKl@C^E_~Df4yJ*u%jVgkH`8o7@F$08MyN^$lomb=Oe)K zqV|3YIQgw=wV&mOz*DE`_|`uF&mUA3d~*+8UiDkhUMPbR(f$*F<<*2=HUm%pQO9?F zlE__)D%p1%JGa6i^-8t^;S_n>2Rd367H3-1IjKBwo0mjh2fTN{9c?ERkuH|F&E z_CKxn4|*&3_XBmojiUY=fs1=-|J4L=Q?K6Navkuj;h%g6xbYZ0ANx0O@?AASTzvnS zWAT1hor(|K20V4DJ|Eyb;7$lbxnK8tz;k=+c$g=EgX?Qle&s`sLx0{_6v7d=vntR)bBE#Ut=|J^7l)FlW~veKM34ute;&CTs&iG z@M9E1D&OK+biYOWTfYP>FRI^+g@141cmnj@t4^&)jsYhzf0pw1 zv%tYWwEnsSxYL+FKTpp;RulYMjITE}hrgczp8Jr_-*Y8l!+&xQaBNwF@@G(DZX|4gDxlYUXd+B{~5j~y&p1Vx@bN6e-_dcxi zUo`{Ei=OWV?z}+z+pY(mzqBT}1&Hg1V#fM?8aQeA&prv<`9yFNKBm91FdOnvbrb}bT4Hk<* zYW2E}E0(WIrOM+2sh(8$-j?36b?vECuE3758y(?qY}wF}O5wP?)ZosY>k@4#I<9VO zIzOCErSq9oI+ICda^+%rY;bFLGMB^obNTXEu~aA?x6J;UC~>efRvsH48ai$;NTp8d z+LT(?)7zIy1(|FqJDe+zWlO2C(bV8bA)hS=okNoB?grE)4gzB9nN zcg2zHST=LqvgYOvl#z#kY;S~d=hz6lYH|H|hLRY#t zHeSjy%Aln<84%Si3s}mgM$`H6v?ScRc=H+Kx$Ib1dSqlEJ-98GTsznsOW|KGxjk5) z7G3hcCUBxTNNw4&Au+hU8HKfEa_QmjO>0qnH+pesJUNxvwhid!`ZP^aWFNu zbv(cAxPhHPYUR5&EMMQ#h1-4WyF~f2m@R!h#LXSu>_AK$x!J8m$i`Kxdb|5lealy@ z>rMqHfpn=tX*g9VX7hvFQ;HK)Yc{UdJ(uF_nTp4WUMM-9SeqWf7+sYc!MI%+@80ed zH9C^&+P<={l+C74_s!{2&uFoLBSiCq*;E{@;^oAWUCWcD!s%ds47m-x_(ZBFUmPD> zP6u~(_a;*K$8RK38^_1g9V#Fm>)O7lFPX-9pye*A1243#jPLYhcKQnR6_s=9E(T|+ zhYwO}z)Eoc_O;nkK08t-I@pvOEI?{?(f=?i2Dfcuf}PW=cWl{qw69R=+1i`#+1lNm z?j`XOcgYW$b+4y?6+b}DBN94Q;M|z%MVDj?E7t2@Di%lVRz&@B+p~S8biO=PD2;lH zS1l}OL0-sxVo=^hVu?7ELS{oDza?8LESB`5$J%?x2QsICxL?u3oVo?448BfKxrqeM2L;qM$&$ zwQIZhdU%XzhNQ79Nu25K27BW3l0jQsGO+JJCa2I6GG1sjP;GUIy{Vo-6f?*Ri=!~F zbPzI9uqdPhjWLwDYj8w}hj<5lXRMIoV<*KJVUtwS-JA3$4K~T~&$i&R4vkoENO2)P zmmV2R#kb?@@mxqcIYW!sOtiaH*g;KRO9FQJ$Z(;Q8{0bC0n0Ewx+XhPq@m4dLibA; zSC9lF+3roKC0Q3HQ&Vl5xpZsmg)Tuode95#XTbp|6-stPdP7-oN>~PF@K}Dw>7AXc z*Y&LEI_ z%tjZC_%vC;5V>b&MF*r3$Hk9pcG{+2hKPd=Quza;>7Bkh>NeVnnQZk6--JZZjI%0l zNwkGWlo(hm@z=qv-jM}i&_kn(Kf=UAGlmw4iB{hb3yBA27sBG{>FIImCz*16fzH@2 z^U*k(i1CUt3nNx0f!;MfkQ?-gNuMHjKC>9M8l~2!;%>h&d9k8UD38IA;Xge}S9grd zeA&?=X~<|zjR_6ekHkosu3QPSue`dHgSr|@gJZpK8v2eIIf7zZ8V;d%pelsyzS z)`TW;XrmP zNHZms$I0~F8BE=kto3O@v-tN4JerfhX;*SxqjUCwqD zN~Nr2kw=A^q7|o_Cegn?#jzZIt{N%qSe3@y zkGxO5_ZIWfOxCNaC3WQ}-jOa9lX^-<1D{U^+K1BRu~p!JZaBT{@qtP*O(NNxynp0n z!(U5`g;*)=G+(CK2%9ZCWhhQ9pTTFC{KF*IThZx#-f0~Kx)KEgG*WW;;Y|_Kd8gVH z=$+hETDLZZ5zQ;nTHIASg^X*6p@roud#tq=wppqNR<^XK;|bQa!ujYYf*_=xdhx+X zzS($UeR}80+|ZDZ1+@Omj$jT}#z1YAgo#77WP9NONDpW6Rhl8>#5}JzLz=p>32%2U zDTXzL(ZX;kUEB%-L(+&ks-rkw-kKslrO(7z-?RxwjK9IE*V6;#Or^%5^i+N|=KfAM zvQfW$!-iCR{kkNqCG{&~-oDMfV;uwOa&8bV@iL}|V{w{1!aI#A@fiNl-5qYfm|anG znn%Y+e4UR_|d`ob6-U#m;$Ob#ZdugC0>7uG_XFlYBDUDi}?4Mq%T(+DQldn)C# zfLS>kU}>dCR-|`fMCv&UlWkOC&iTu7qcEj2oNJtB+l>IP46cFmU&sw@7pj@Fw2YXA zXv=^oY3!RviEfIv592LRM}6e?=otm)mz70IJ?nJ~Fshh}km(vYCgVeekqlF3sV#A9 z9)rUCvllE8zzT#L-*pD)(K>`FD|Z`Jni*o52i-CVvbM-U-D>Gtj}O#P3);|CZg5Oa zr8AvLD&FJa9&2OPb2D>*QDewE6HPfi?ikfWN|?K!MzdpE3rw$a=4um4eF4!xi$d_p zX=T&~M{FM%!OE{eLd`N7GnUDi^BBFX$5};b<^z&2CCMw-0j|O7(Rc>#6Ibqo7W}Va zYttDa%rT!zl6yWyDs;ohda}b-X7hzy8OlnGv#k$f zhO>g~XrFMkbrx|@p`6{+=M$hwJGFA2#RZw^gg z;%#i{K(1ktOz_AQw=3`w$j6-EWAaNXe|j74(L5DCMTa7iA*5L^e|pT~3hW!91lb#Y z^;RrpGdYr^uK8`q%T?h8J+%Vmz7#=_d=L8sNF5qT5E&p4DWL9L#@CZ&rD`t=G6@xyMJ~~`-a*gw`lr+=4k{C_;ETY-ieOlsGlal~lXDD3i$<%wmA{ zmRnMrvPC#MdsEy)SjrYt)OgrPt|bg^T{g&kgId>yN$l$gm-A$^&P}N8`@dEFWN*z~TK5f5X365l(&vYack`~7d?{LT1RxGw| zg?a63tN3eyFBCVnM}lh5?#|SXDq&ciFlQ`*P$!Zn}3 z6N^9z5i}86|E2X{bc(UaAei05MUvNH8^-w3E0G3k;&4wgi#o&`A~Zt|jNY+!_$uH> zfnR#J{WFl~Jfww9d;1E_&_k6bmm|46n_Af{U%~8RTvLMOo!OC!vo%AvS4>SiKvt+B zs^MKrk7B;X=bRXZ?Fh5TLKS+wEj}qgs@`E1Hb~W?cnY-y=dcxF5SuL13eEzuRhg>C zno*IWKFD8SlSJ}NkFd{13nk3&uqwN&8xt3o2By>i^*N`&M?Mb~8WV4G*8C)6(cMstRGTn+`EX?R=YoxcYXWXuvSK2*hgyUQ1?J47 zG+S_=vdNV$4X(@PhsUts>7x~kTeBAihDOrE-Werh&$SzPunxR_>eodWUFa9gR=ab< z+5G18NO^favpGBH{fcaYloDKC!UgPUK}9)4p1|=k6X}rzW%^?~W)xU=@qNH#r?Fkd ztPr+|gntFsCwrk0wPV^huog3_sN>ntO~lRcnC2l@Kt`DYVX?@#IXyTy4&@1x93gp` z@yL)oU#+1#9DbHFvsLNAEN0pTICIW~HkDSSJ+>KfSr0HzY-A@1t_LIZ72s`IiOco4 zNVbuJVsQId7vuz1ecVx^Kdj<)ICa@nkvtH*#dc_-j}8tI?22F^f!UT2bv)MYRKyRh zOAjm>867~MKDTIsb+CU}O^qoe6X(!dT7biWWWK%$mD zC7nZ}FhAaGgv~UU$47nKN`lI;X%=2{vKk-3DSGbfXhKium0YKaFJt@5+ zdIp#=b%0CH4VTcP7Be|os26d>zTQD=;)Ft@-~~rqFN%ji)zN`%;2koA=yrJkxBE7;2aNb^ zn@eS1_s9@rU)Y|Hqc8ztazF-~wM9y#5F3q-bCp`8=^&=ggA}Meq9fZppNd#1g`vaU zFIZ+3v?lEm)7~quKbtX}x%3mka_qTKSSw$kEpLSxw+${7U;k@94S8JYP3=68i!gRq zn07QA<-J>TLqhvfC$_;wxU#Urt0R00g8fZf01?`rbs>wq^blUTmwCLRtI*`&4Fw&C zr`^gsUVa6gXmPluk7y@15C$j>#VrgKYkS=A= zhftedqgKl<^zo@#>_@?dD<9+9V!&Mv@x7&sAItT2MvQcD##C8u~>kl>qzyC zM5ZHmWYY+e8H{vnEA|~koWwdd_jQ@v5eKYjHq*^yA*84^t`ujhrS4*>V3&3b_vJ(A zB)!qWr+CPe6>r38-Ma*$r}JJNC7xS`C5`bx?<%EF1n>aw1fogI0CyQG>0Qf)G7QI3 zlClwBPsGU&yP9p}+gPG#2e!}@(KNJx@-->xT&t?k*Ji1@>@Cq#yKT-xtYFc#++ zBgzI$7(rWKXEQD>FQbU`;e0oCF79T$zPU8x8^TelEv)FcE0nq%OIW$F^_bovT$5=w zcv%FulLv-;L@;euuIgj!k8bxf*`i%Mc!k)QMk9-fAHderJgf)EjLuH%Nm#xD;gyJr z$0tH7lW_{4=qq9W40cDX!<@+4(a{WlK+n*++_G*S-7EPWu}sVv>9rxm#=g{M3g8n_ zUgRJ_Txk}byag>GR0-a+IJ2j1D?rQC&=P^KOC%R5^+tL@`bj+Vr|4?KHHENDZ%6^F z50M`2qaQ5Lo_t%0gm4K+f#RPvxudhfoJqE*M_8jykYa=Y?}aD=uVqJ4QF9tiRyMy~ z#OAXiHbx40L;T!~PNQXfK0Wmf%#ctE&nkQamMv~*V@br*NAB4(`vKmKLPWfmZl?+l zq)4JfjQURRa;mTygKBQfQrLiSnA9>z*dB^xKB?)2n0>BvnXD(mu?R`@y$Yb9l^E>( z!id#W5PQ13ODB>!N5fJH>>X(yj2Ccy)4ze*mK0?rc)>b}HmRDX~Qgc;xM z>5Nci{chE6W=W8+qjVV$kMPljg0Ah77m9ORPs^fO5X9rONu<;VJz4VFwiuE=@?$W% zz8GWRxfWWbi9FnESUI5ob9y=R)>2GdeC_vDjLB`XF|^;M1EQ#xE$^TZ?8>xB z#yXHk#Oq;@JD`i?HRoh$Fc}i{j-yJK>^COc8*fW2~8kpe~4;5$`RfLIKfMVTD2x zz_Vz})hyROcp^boqA*FVFhw85z_MW(2R%k12(3!bxvVSYM3;~kQe|#}w>^#B!BXla z;+&bVEHX|DhTT|n!&IxA;+?cq1u$CWFr1GcXw$IhFGji!J%UuIaGM}Cv9Zb~fN!yN zL|W*k_|#2sJ<_H(+Oiq8?r3WqJ=MXUQq*mikrENK^o=qV`_Y{7uMhcNvijb$N8A_j z5oH!rz?RSgp z^~vzR^(2X@S(kKoZ|@_s#lryJHCDCn-5%T(RLnwlhp;FRvFJ4@9F0f&YkX9ZgftYX z@vBFdvO0%7v=J)68V(p>GF-r>gBEfsxY0GFqU03HO~rTQ^1ZoX|Kbg+F)1OTw$tOV z$koV?w>j28#DKvtOo|XEunK0dSQ}t5BrG6H4OZp~@o~IvB}QhSTP}iGcI`dSNDz$84LI7D(!H}- z^6soLJ8TfsL@RFwi4Lu%q)1ol(5`e;zha(IPE!J}6wJGl5pPb#GdA{hLxid4QI^Vj zWl@<@H-=3VxMqw1En7=DENa+}kv`~9?1mN9x0rB8og669nn5H2+lwI93lWo#e#6EG zA7y+D>P|rjYAQt@CcP!l*Anz`F5_#CQ+cC9 zJf?^qO0EFbLn%W`WbOk}=_t>b`b9}wZN0}6@cE;uT#lkmz2zwLhb4R&&5jNhcU4-l zl%EeNc)5UX4>9?0kMMbwvZJ8|&6coMh@d&h?hshYkuG6!`r-v$>BELR^LgZ6t1#&bv<|k!n*aTCYVvp^7B4m=1v|E9nnANkVDrIjlM;7@H zSP0NbG!9f`CzxE?VU%rx{flEifePio)4gMGPl-jw5grl8!u{c&5AQ{_xxy5@dd^s> zCvtgACH%|MtT5$yibLb)^5&*JJHT^8PikX+9X7_hETzN(C1yebGGTm>SJ?k>7roKD znU;Am2x-Hp%W@{M#|mR4*ZN$7Z2Yq7U@nniTMzBwV>eiafiq7z>dsset*gSj@s{!J;GEZaAL}-H#lN?9 z)?x+OFsqP202RBFkQRCs%0H;QlNjXPM1#&Ww}zsM%X%q6g4levaJmcl_IxY#jNt{p zD{ldJZ@R2&H%JG}G9E^y+6Tu6vYW`4=WefMpW?=eN|xoyTgH#nFJQ%V=JfIM7_#s9 z)suFbg0gr;=rb{(D1iytgJE6m8*1i~nK{rZu4Kv4??$Cc<0wvvOS||6ti_6a3{+RB zfp9W1(a$w9ID+pytz=ZoTGR9fA+$UTgxk@^7rTAMv99zRCFa_gX{>lm2b-OoB_m&J z?UzPLjrU=HTvS>pR#(GuaupyeBwb){)4$9VM(=!x;CrRJj>K;c*@- z+oM9d+yFfHMVs*4p(H}77t)cGxm#`%7J}HhAz7b#HNoK zpB3H7lA7UADk;iUvIOW9vdYz3kuzE=NJcgg2QWbyWf?6~akh1n$O*9k-5GBQo85v= z;ibJyjy82VpOuYPz7jC&=0d4}{3WztJVwz|`b&v80+K1i35!$aF}So`#d{$X9qaEH zTF{Co`VjWmMWJ;F(Q-HO%PL9A%B>D7M#kYeb^DF=E!dM3e|aohjAKbcQihiJ%tuJli6Gg}-e9$VF~nV>ESDq?$XIHng0W z8{)_bE7D5IKCE6_M%Uum(H-%+eq|L@6(MoD$c_m6FkB0fj~$h=l1Xx$(!xbIjXLe_ zXxXt9D?Bn@R?iog;iVoHA&$g;Qm)0<6vl_QQha+kx!Cq%7FSU-Vy8fMQPX&sc$7DG zId(4iSX{Qbk_VGL@id2@G;eu%#~~NrFQ9r!<9)Tq;sPq*-|oF zz-pGviRAkpt7mwSvpx)M2ntONNg=x~nB8{0gtmOyEz?}OKD`yG-SdS3e_k&AAS{Ft zZidNxbli4mWf6a1p^%S-xm`#^GB1WF`H@jSO$bNW~$b4$2me+cC$1AinL|l z*wW|gDl>#CmoLKG%?%e8IgZc_MCa?}o5)=1!j3>tlRvehA)6vCaGlY^6&;%K!|CYa zaU+tH6O{YD!W|RlET+gn)OL6{;RN$7e|X)Wy*=Uu5qay%K~_!Xaxs?-23W%KxLnV- z*^<4)o@eRyw1u)A+ZjEBS#u}tHRDHVkkSBWBCVDfbu1Qt5hF|_*FYQ%f@b&zFJx{& zTF}KUy%_x}bdC}Wk()qR{X>%^?$`6RRdRHAGOogZAT(Gwnm=ovZ-9k-{8&t9%QvUw zDHPrrPOM{exO^plzukQlXezpo3yk*O%`4d=x35w@qo1#6yDU~gJd;g}6LFT~^cXsD zis;EWV4)BxLxzE%Y8Rq#q>w9{$4+&?D+`CPTPN8|QtpJZeYx7*emslq<#c941@9P- zM;+r4ygU@hEw=(Qa_6{j8l)wQIb;#Xf?1J>+KAjXXE$UNS@hC*KH+k!XN=o$B#~#< zqns___!k=Fam${kxp9?JT z5;+S^yD@fY%D_i{Jf=M&i!Q(lnq5?IJ@Jgt!!R}J_&&Z6-?P$@Y!!rv5Ty&-czkPu zZ%cA2U1X`gm`c*ZMW%C#JekZ|S{^K9hhmkKf)qF4I%p6p2v|rz;?L-)iFn*tjpm)h z%?DlUsHHQq9u^TlF$4p{joFfu(>46myHswJBB1y=G@NT#bTL*8d>jL7(5Ern0mmec zVhMgM6zI{x>;~F0?<<|Q9cj7ozi>Q>pd9M`L`--aGKKL0Oii4%zYOFOcDrl&B?nDh z3ckGGZSNxh=;Cm?Jihk znnH$n@Z)!RZP&Zo=(A*&T9r!@iUsk2U+~?AS2k1PNibdM1jp_#SKJ z%Da-uiq{&L4IeN^g;QcQf2bqz7FBOvbmT!g=uMH17n{D<%Z(i9Rj7(V%e!sM0BzT7 zl;`)R=&Yb3jVQLQ9m}b1KbU%wCj}6elcP?;LOm26$eJa+zQvKy!tFBbM!^>Tu`Ewq z&>XcJ)W@VGH{|}zM9g(`Gr^MJ{&9O$f)=27TaY04%NuPeoYrRk%fPw~Z`A8u?g^H$+wV-8d@_Mt z;W&=jPHSlgUfILjWLC55qIS;M?`w%ABX?X@H+Ni@v%?hk#y4GyP6SCpsZ~0KOBFiYII!4WV5FF|&KdMVSqTRMFVF%|Zx4&F6_aCw0j7(;fsD!hk zZxUv7fv=vvo(TygUvQ*k^d4cbkeE;_W^(ipqqVZ_b@nX59`1H=MvRbL2W{tpTkJ6B z=>4kvAv*J!(kH_)$OE<29s)s>b{(52^M@^#G%Uy@G^V7*EY{$SRP54~S20{t$qL5Q zI;D^1Y8L)tEN+RahQnWYH@Xojp{VeM91o^1iVKe2^GKVs23ot}-e`4VX*-|y_)~27 z_>hhS058gbL6BDMpzhdI$dq!!@CYon=U|>WR?1?8jhqEOOddeWr!A@L5nt$oQ3IeAmumIlyVJ7G^vg%{`ggRrpM-Al(I|(4tn)BVyxR z>rPij#;m!|*pk6HZ`OUqb=es{7W-iIaS9dJPAwzYT{uA{^%LFeJ91^Fb+C)ql|anm z+0l02uYr(<{Kx+1f^J0Kf$@WrI#Rw$3PfZQwHNy>mIaCT?x8{<(mBn=UB*1`Ap7=z7 zU7OwlvPtU!+nn9G=*`!}D=@HdUNG6FzFI{X1s?ZC#5f#5I9bLUm14KU$ug!b7pO*= zx;a{sn2N)ZBaGXX%hZf)cfFu`m`hJ_oC40=fy*AG4yJQ5R|#Te%f1QC%JWf?>S{?*g^QC8v8EGw6*>je*5jN?p}b@vfS6n%fprJO6VQvO!Dh_O z*g;pG=^(2L`P4;{Cg0ewr`KHD?ke?yt^G6I(LjlLlo9P)wJ_O%4q{3p5r;1Uoark7 zVa;>5dRLJw;&f}5%X>cA%@#L|44P0dayFfVx;6jBo5emZZVos+x){Sz2U)R{4Ma^b zvV4{EX*)ziDtBa@`%xB=z}Huj(*Q>IE*i~HkG_#^3kAG@dAU4bvy$wmYsxndYMuz$=WruI;%aw_l|(9BW3gmvZu9LJb~DlY zYU=9*bSE`nz+Ib?7Vl;!b1Pz{IA=$%teXxkluysg*+wf%wc@LvC^fv=NW_6H$TboF zEZYdhPf_QYqkM(ot9VUs==L>U`8#uXbfFCvVe7>8{nJ(xI{Ha%GqDwd6mF*LlCe2& zRR_YG`0*pY=6I`L!RuoPa^u&ieIRVmDMJct?lk&RTQVa_E6cmxCWt+6O2-8^z%gQ| znGJKREOxm`_yw6RTRz~O14&XFkxCGCaegKc_bN#>7It`mYiKwSKS;3vE#r|?wuWUb zz&0PHVGWLl6Q3y&-;uA9fL(%+n@;9&g3UXdeb%fY<87@fW87RZD20(U6qz-U4`qW* zB;*p+rrdBYgBfWwD}q2W*;`!sE0Lec$F_KOv7Bo(&0UZmox2Eyo8>2Et$u@j*ohbV(JC5HFYM@>;7u_;hgy5Zt& zSk=NZGe573pO?qEKje)=1^7ST$Z)=>c@1nS!jRMl&2pI)VQR{SlOq~72dHQWJR(y# z@R#`@TFPhm6VoofC4vt{Qe|luILB^-{#ppIwD<0;&E!>6r(zG14Ba&nu6emwjtZ5v zDEibgDLk2=A01D47r&)6>QNyVxv^7WKP{(}`jp%Zi&=tS!nAieR33YYj!Cdx4$dZO zG04IS%wzMIOyW7TiIGvUbjYqjFF^1JyM_f*Ma;eh9WhL47pYBZ_5~eV!b;tih3O8D zSjAlt=2i+u_XW06R8%2Gs3&!ST}jz>vY%NkNBId%QP0Y5Hnx>%X@JA35z^_Cguc-N zi4e9qd@;XVp*5@1F8HD=R4lUf2B$KcsH@nOxcVpbjA zRO2M_5Xv`(>18)U7m8&sDV%Q5;x315w!V(CEhe_a?IswA?qS($JW@@1!7LG!pmSKf z8{X7r!+6{<)_m0V_Op>V-)X$$L0LmIb?p$f zTq-be%@iuzkk++b|L%F(rnFi?HJlv?pr8b(W7*zaqw*}ou-bA3lkN*=AF9Y4OqtQ$ z+RHH{>+ z;GswsX??>3w~ z-`j3fMT7;jmS+VuPv52~T*ykL55(Y+t9;CY(?{i9qjFn2)}7@x0~SJqR0z)Q6Oy8m z?1uB#ICKg9$ZH#420n-08A@XkhM3}92B#<3JY%zIXwLu*Ruo60N3ZvgYhoqa8v`<9 ztw-<;j*lN(KR&`zu=wa7(SfuOElQvaE?vn}`f`KYy6DtDZL~BShmDPnc>Exes#4lv zyim>p3ZeE19M1eI&&k3kNuy@r6U_u=K$teMHmNyP%Xaj!Wbgnm>#HBGLNgQ9`bPzBt(=wP z8IlyFnnUz0BJ2j$QfsSb8oj7WnVrD*{rUvN0=B#gqq4tZ8meO?$X1oGQ#_lOn{-uH zQA%0i$adk|(?M7Zl9ub#HL%~2n~P`f^EF2bd3$0cft7qS|>uDmWz^8nt4^Y zjWj?L&5VWIO{l(4S{iRt1{B9XS`jrZ7f%o>V+-lYSCc|7N0XTAqW=xz+_Am_GRCC6 zp;VH4q%cB|242GoJWu-JoAoN@6u~O4@oXlb@f~pjJBZFmb6>P;1CrZmMCH_3WSC}} zUD#kGl3U0N?Xlw)jNEZ6<9P-3p)D3-NW;2`$K(p@|9P!UuFdNW!-LuQ0$P_i^YL7k6S_ z(W|g7v1V6U9iXT?E;`I1Do}P|UBB?r3qz3p^i-vz7wj=0!%W`uI%#`q6HXw8d!^Jhg6)-AcZHIx414yBeAMb7cAsoy zmYi1{v!E)BxK@@_&-ZS`42r!-Q#1r-n-f$-hlK3N-P8dT77s@xcUVzA^sr3^14i2^ z2J0B1`cx%M@YONVTG8>D9o5-0=6%P4z_(z0V}T%U@x6%DIp1 zi=>+!T){*GRy}CzQe_RMxR1rKxN@5DDN!HGw$@sB&lbosJCc70GdF+dh(m}Svr>-) zG40ZEDxT^H_jH#p`lHe)(;hsZYSeWA?)fGHX#|78Rk$?sUrmo$|<|rBOQ+$=fCWB9mNbgPX438t-TbuD#M&Y;)@xw4(GB_ zy@-Wixw+le&Ec7mdBU5A@s+;d5i~BZv)cG0Wg;9mJ|593&0W!P|Dr;6Ssb?MJhR&> zS-eLq^y{sESmnm62f?Cbq!5#OQOiVj-PWaX{J|hXeJGisYng}BDj1fyYgaue6 zX@qDgj+e1RZ~$jXIp5dnfkm&$XrhjR!cLx?+$VT^GLgb0b%d=U`QWQXV(=78B1)9a z?i|eGc=AF%RU9jIc6Ob1+VT}WDWnrUPR)vAzRBNWD?;a5E;K~RdU9iN==*j`y~y2r zBGr-eDK;vd-61S5Z+W~_Dh$J?%i>fyuXND9N6O|Ph4X^Li|;!N`+}oeto(AcX0_6P zju@*l?IKY-Ez%=mgIc^pIB7O#2U~_*7Y%dv;$F|tx}03wV=;BKX2WKdG$yP`q1eZ4 zEVCHvVI7>cL6yaJxYWVXQ!{6}YZ$%YRGQv zS|tz^tO2_lp%<=nXfL$Q{kl#Oaa)TtyoCG3)z4b9de-L$u&%j{(z^1Tc24DlWV2`g zL*pYOE0ZMIKsB1u5I1$GO>k^kNXr}33FZsZwxzZR&5+`b%4Wx~hlH3rs@O=WU=}%X z|GdZBi5DoPsh_3w@DQ3RVn&KB3eYLdsU9)ka3^D|r`ETk!iF9# zjv&S7FfIA_F6L}1A*Xy>#eD06y75YcE!=dN#+>u_~b%pEeSl$_>TBgAy|5e4VdEF46_C^+ZOh@L@$h-tv_trD7Wm z5?T$$&s8HhE@_os`X$E~J9~j;;HND7a@$cmMLHm@jyIX1?{nN~?hb89sgMyUV|#_* zZq1cx6%=_$@MAkt+wu>D$mMhacL)2Dqpsh$Ix4O{YCc=aLXc*$+^zwNQ--%__df(>ij(H%=LQizDz<0DTk>_lWrw zjL0N{Tab8#k{|TR?J2(UH9_Dp61s%EV+k|Qw67O^oa2(kRb9e8>aB}q!dnp`a!|AA zlF$;TCj>zj#4Y2R>wd<**Ii1I4N>! zj6>(fyh)3=Eo+6KxZElUsN`bEaoJof?C?>+>tL4htKB^(+E<2;vp@yU*-24WmBC|> zOTA+q{3xJYq3k^bsD(s+cM|rc=gc51%7hPTTxjrNTieRQ4z|%{p+_@0c`l$7pH^;_ z#gYf&-j&yytz{^WyCF`aC_p$Z$V%lL$vDP_5AZ&;Wrc|lZrrG3KuXQTf7uy%{so|G=e6<*fiLNk2E zQkPGMu?L-BaX5h2Q~#v_Q;N8 zN5wwF#4;K-I;zS1tam95-=T~`lKA4|p@c#ygiuMjPOKcDr;}XXn=uQ-lq!sdxAX<*^LG!1D(_&uaC4-)7 zRr_L{MzLP@FUc5d_bM=S$QKw&A|Mf`c&n)WjG>uTi2gn{c6m_~5{Utg1cyG^=Frl2 zPUm7?#7FJmBUdZwnw8b+shcZL)+sFX{p+!o5GPlA+$+TSgfx_YC@JHMm%oO zH79(Ida+E)yoHOEK)Kw|qWt#Z`L|)>w7^l1gV>-72Dc(+Gvqoj$LWHye-o((;b4QK z0mB^52B{2MnX2QBV%}}D=va#6WeoC?R0qZrIV*8E9(+Y%8=DiaO(X+eEQyHyS>QeG zQx{$gDfin%wkuy*5h??9R4ST9Hn`4xK?SWu9VsCBvqw+O{pWA_#*OrG*nw-VqF}5Y z3RrTVkyVjHh4DW&J2m@0ZA>xs)^f%>*-}c)EsaA^ zJ+!FpYqCk+r2SIMaBm{^a;1!UqGL)~z6dr^qFi=7hmJOV+XuH}3(<%_NYN{UD+ysU zQblSe1SsToc5Owdb#_FqfrZ3A<6o_vWFVB?B%@0sy#_iOPRnvh28d zZ+KlObC@`d#kJt=FGqPJ7*pm);FBC-a|;q~ah|Vn{kmid?zvo^XJlX@w|22cx49#c znZtTr-4oQdz{;K-#meU1W*8-x)VrC3IJ_d8@3%{R{Ymx^Klq@ClQq1OWswVhKGCU*7kM@q16U^}cfEnR;;IE8?)W<7?J4Ynd>kQr;?XmZLv58ua)u|-8 zfz=LW*8Bh4(PrqDUe8H$q*(m>ta93pGOR{u1hv1#@I*LtF*wDQlUlF2EM|8s`Y@c9 zi28{WEROx4WtjX ztRk-r(y$W-9)(rt4al^u8Dv7g@!iMT=8}0PeF;+87&POZm{A*bo~A81H0^1xS?|{I zp`pbjrNv^oSg`RHC?pJDt57gb~Yp)>44E z!F0D*xE5aJ>mp8P2T5-z&~*-n`s0UvqbJ)oa=$p%1{lXrEz%fv%N^$lpf9^De6cz zjcr11vIT9IdqNA#+ei6!Q%d(QMX5@t;-yQta*-n2koT^+#iC^Ba|yY|CDxo^qVu_- zGa>||g@ZQDJZTl2M)ok&8RobePjryJa~8m4WxQr5h7_%Vh3i8}dB}xHGujIBSw$=> z4r)j2K?+QiZRtU*0nt6|@U%I^OI@APOB?d`i(14YLW4eUKVZ`e*$_&FowS(a#F8SJ z_txy-Hke(MUO$I}!Z5V7%|o6&WtlH#fz~!WDeQu~)6hMbI3-qA7S^GNNw;OX=`!A7 znbUgx44J&_kjuz#G8<_%QqGV#mwXIjwB-C=&Su=}$wfwaM|DI<$3-kxvuf*I91)fs z8@XzuA#7D7j0_!0%9TnYMn;wqN0{7ITDR6GN-In|m<|c4724hqPlRjDfW(9E4|h4_wnK`ZaI~MhPrO z`TcmSwZ`b9tWR5eR*K!vDrp`H$HE3&$P|%6%IXA%1O|nbVZ%8?DTa8$jDltBx@uv? zDbJ{+s|l3;;9rQdKyQ9Vq}(CBSo%Qfl1Wc8sJuuWD^eNAuwbPCX|0p3U;Fxj|1IQU zXMYNBC;_({Mv+4lTg-~c2RfcHQs8uPvqvZY^F1jGR&jaUBcZi>2lwj1gUHl7kxK<_ ziD|0Z=Owyb?v?WO1eXEP2Bs%0 u?S>p~Y`D~B0pyjSAU)wAA_805`6_)4LK^Ci* z*+iNh_ffmLGJ=l{eobdmjcoOkjD_g54r)S2Ti$)jI?A?F3s?NR9sY*6t7)5Mai$lVA&-N;BDlPa7?oQb0XY<=%8 zKdOPMl}9AGKrKcwyVPjM8^zB19CGw`&pwJMRzsVx@lB~6k433%Dy4Z1X{0N6xJoKtBB^y4C%(P0x?KE(VVuz+`6K5XW5VOggKjB zEQ4m;U3#1cLNQn7njOZ5~wHeRuz%iX$X+~Je!PHpHsf-A_ z8-?2>Uh;@mj^w1!K0Gv*;uKtE0rGK*|TmS*4^EaqP+0*w;pVKk=|ahd4Zo9y)0r6Mi-f_ zn~}6r5qqv({q2>L+G7`?URcZJEVXO9{0%fRqMndF+07ZdA<-NW=z(b; zm|4w0q6l0_txiU(~Bs)+jlYrm^Mdz?A^YWM}RsjU*Cf%q*FUN9sRw)I^PiLlA za;>G!&dSttSu!G zAZZONa-078elz57NX}4R7cCGZPO{SSnEUufx1UAW?JGo;SM3B~AEOeAAXW z)AtToK6#PVi&Tpi%p&xelVOa zhptX7zr8GbL^lHs3-g+jy+!!%5lst}MWXUxMv%Nyr;M=1KKGCPzTXfLU>GtZOQ^Jo zHL^Dd&QO3R?{khfn73>`j-a7K6ejb;Iru{zvEaI^)vmdyIYSZYCCFUtG!gsxgOtLg( zL`o6T>5QjnUD~ilCZTq9{RYVBZ||vR2%rYnPgkIXYodliW$>L#5|8t86lx2>O@OgR znfxDu-oRHvx7Uq>0!dVY|CM(pN<62~=f_9j=3Zw3W008PpHkjPg75h&P9n#X4f%}) zWRFEpzzF8;H6cViG24|h)D?G6~Dc*C5I!XI^{FOZ#d# zq&PVvk~TSFiV|uBFk2R=G4J)I=197I5c4lo1>%V_&+sU$j*LW5>G(p8ttF0@XetAn zC3Cy0!tuUOnXfwVi1pL~lYyc!LMBlKY{13|VBc?PSejGKAcR<`zTI)^sy?*`c4^w9 zZ4uL_iVC_aWG@ktP@XW4THyaHbdinDqd%|JkDlmlKdrTr_S$py=boKNp_37%_iA`I zk7PmR1%;**V=-%nYfqapMXr*m4nTrG5$6oh8EAo3hd4Rd|H1Cq*K#RaOKPq2yxT%u*}W zL2jr4KEgW;pc~*<>pr7J6T)5Q<9g-y$Gi1SFVnu+Y#C?W$KEYs>E8Y0&TPz64Cq;- zjM0Wo#4QdccY%{-AC_B9dE)>S7bP_W^<>(}7I7N6CX({qnmTj`do1S*Dn~}iGXtA1 z-p{DSdePl&pKos<>I3~?+1J!sAspv)It_rAcPuxuCWy$0j64wepwlZY7Uo@s`BvBK z9@F|HLFTA5mBN6Wn;Qa?iL1bV2Zfi;TmrT!t)l_wGjER@t%U~Q1u3ZOt@c*eP`iH$#MV|_GD zT4on>RcMpNd86cB#ELI#*D9M!@t8Fvcp0g_ij5tx!F?PfvpLzXCHyjrNuQn0Eb~@f z69xK@`xV01u3t99CQnTb_1UV}woP0?F0(n!kR9GimAZdNrr1z7pIKGTCDFQzlnU@< zrzbd3{}by5;>fIN5i*Ny<}+*h9yz}|ZnmqkO0U~(cuGWlheBLI5vVY}c1^8F##}o$d%9Wh4=H*GaBNSYo777>Zkl9JP(IIp*QvlzZ!Y8ouj2(7$SI*u7;UhlA^nru}hJV#j`(Md$LU_wUY(GDSyZh!d#=aG+}wtvF87Pm04 z*dXUR3v)OMlS3U)n3uSMFz_O%NS=iyog`&)E3k4=QAcL$4s+-FCsJzf=9jbO#8GQf zVDK5(xPVH1_(Y=;++2lJ_zA$sd%$q$u3)+HC# z6S>YX=0m7`017?0E;^YU7Rn;xnG27#j* z^`vS)l;By$KcydOQ00&VeE5dSf7l>s;y`NB^JVKJPt}Tzo5YtP2itFfbLzM?y5>SX z1|)-f05}manD@b;e;R&3yPC|N8jDNK=(4f*wcG6&`m9!8_2!v}0;CuV$ohHll!Ce= zAP@a1$WzXFqZ}W73*2?VSaQjqOa+;dbVeF`87c@s(}tGnZBQyQj4JewS}9M8*-l+G zTl3lrRi6kSx1R%IJ(cBv+>feR9@H>F!D70Fx9%2g1zxa}p!>X|lp^X%S^8<#W;|X! zc7MY3r!x9nOO?@p(&=X`I*1lT;~DQ8GM;ldS$$Kkr)~f`WOTso+w8*`Y$JjNPV5y& zj2Hsj2OLTO?EqLbjcTw)W!3}emzu^`YIK=InVLNkkj%D}w?nx`6(4qtXKW7enc-%j zu_nsMl$CfDYQ}Nc`npVKMt{zoKrTd{!U$k(nP!@Nb4rKF6l9O14C`2qW{E*rUbuJ^ zD)vax)4h2ioh3#wC$!RA>J{y?^|woppiUokK0{)eWwRCl+NW|WA8#?OIG%#Y^kp-p zmQ6Jzn~-N$e?tE605Qlx6r|)~bb2=u)HHqv8R*OaHwcMHNlB;_3Cm$*SiE|T@b;Fq zN~ty;K$;F%yV5tD%}Y`J(_-MVw>{UKGE~S?G&5|1zVu6qGRDEulY5edLR<#rSELXE zd0)|;aJ0Be&O-vgOHeo?4HFIBFr4QA)23^WGNzQR&q^7Q0rRR9u^p9?CyCl=$^&t8 zP`(!_vJtmYdJcgi#ixYx2l~wIc*?3~#j&plK=_1HntPAlfP$8C^8z7Z{bW?UzV9D? zkV%CtG^v)Qg7h7^(u`z>TIdjfs2^%R$X}P@Cz*dSMJkM43xcMhG3q#>=f^SL;h_D7c8CN|?tDD0|OvBDYdda0r$>&VmoxVu|Fe*7YBM6njL z{x}~|%-xu0}_*W9rDpxjKCg{1xq7eJe6C2fa1r{ zpi`jzQMiGoK3j=be<)%^-7F%@P_%|*29)!YV`b|7tsvzpeBM#hgdDLLJNZp)N*DU$1s8MlZ>BzS0Pf+^ z`z^2Bb$nwbPG+-Fo|e>*ME=S0egadX;D0DX+M|r>K-2>B_~Z9b8T;n3-?2vt)dGkt zl@{Z3sGYI^`^u*DTg=Q9{EhrF^nC3;AO=j_Xefv7zt+N(_z0wg5gBTN5_Tg-5&}*c z7(o^zM(KsY2frgO{giDckIb_Sw3(?B#tcB_!NEtFr%~pPrwy=-w6E-XPb809Cwb_| zOtUU_O>HW|VQ^m}(i1%0=Fy9QGF z_9_k<0X>XO@k}Tc4Wc{{!~_mWHgl{Z;(yI4L!@9)`f0C)H=YWt8byWm-Njl|`)%@| zD28CWA4jfckca2dX~iLjNKVSbC_`MNa&GUiu2+aQ=H+0?K6yQKakwWFzC< z1FryB{1j)H2+Uh-!)3KGaq49xnj4i`7AF#=c0&5eMuc{EWQ>J@<=#g4%#wI1%Kw;< zLN(PIlHmXma&j8kt<8 diff --git a/lib/linux32/libopencv_legacy.so b/lib/linux32/libopencv_legacy.so deleted file mode 120000 index 1afd5e1..0000000 --- a/lib/linux32/libopencv_legacy.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_legacy.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_legacy.so.2.4 b/lib/linux32/libopencv_legacy.so.2.4 deleted file mode 120000 index 0213de4..0000000 --- a/lib/linux32/libopencv_legacy.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_legacy.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_ml.so b/lib/linux32/libopencv_ml.so deleted file mode 120000 index 4e71450..0000000 --- a/lib/linux32/libopencv_ml.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_ml.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_ml.so.2.4 b/lib/linux32/libopencv_ml.so.2.4 deleted file mode 120000 index 338dffa..0000000 --- a/lib/linux32/libopencv_ml.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_ml.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_nonfree.so b/lib/linux32/libopencv_nonfree.so deleted file mode 120000 index 73c1613..0000000 --- a/lib/linux32/libopencv_nonfree.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_nonfree.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_nonfree.so.2.4 b/lib/linux32/libopencv_nonfree.so.2.4 deleted file mode 120000 index 2d6c369..0000000 --- a/lib/linux32/libopencv_nonfree.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_nonfree.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_objdetect.so b/lib/linux32/libopencv_objdetect.so deleted file mode 120000 index 3c4cef9..0000000 --- a/lib/linux32/libopencv_objdetect.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_objdetect.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_objdetect.so.2.4 b/lib/linux32/libopencv_objdetect.so.2.4 deleted file mode 120000 index 2be60de..0000000 --- a/lib/linux32/libopencv_objdetect.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_objdetect.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_photo.so b/lib/linux32/libopencv_photo.so deleted file mode 120000 index 387bc42..0000000 --- a/lib/linux32/libopencv_photo.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_photo.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_photo.so.2.4 b/lib/linux32/libopencv_photo.so.2.4 deleted file mode 120000 index 45b8eb2..0000000 --- a/lib/linux32/libopencv_photo.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_photo.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_superres.so b/lib/linux32/libopencv_superres.so deleted file mode 120000 index dbad36c..0000000 --- a/lib/linux32/libopencv_superres.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_superres.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_superres.so.2.4 b/lib/linux32/libopencv_superres.so.2.4 deleted file mode 120000 index 42dc315..0000000 --- a/lib/linux32/libopencv_superres.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_superres.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_ts.so b/lib/linux32/libopencv_ts.so deleted file mode 120000 index 88f5375..0000000 --- a/lib/linux32/libopencv_ts.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_ts.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_ts.so.2.4 b/lib/linux32/libopencv_ts.so.2.4 deleted file mode 120000 index 391bebc..0000000 --- a/lib/linux32/libopencv_ts.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_ts.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_video.so b/lib/linux32/libopencv_video.so deleted file mode 120000 index d5ddd6c..0000000 --- a/lib/linux32/libopencv_video.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_video.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_video.so.2.4 b/lib/linux32/libopencv_video.so.2.4 deleted file mode 120000 index 0e319f2..0000000 --- a/lib/linux32/libopencv_video.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_video.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_videostab.so b/lib/linux32/libopencv_videostab.so deleted file mode 120000 index faeb668..0000000 --- a/lib/linux32/libopencv_videostab.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_videostab.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_videostab.so.2.4 b/lib/linux32/libopencv_videostab.so.2.4 deleted file mode 120000 index 85a3c08..0000000 --- a/lib/linux32/libopencv_videostab.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_videostab.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_calib3d.so b/lib/linux64/libopencv_calib3d.so deleted file mode 120000 index 37c62ef..0000000 --- a/lib/linux64/libopencv_calib3d.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_calib3d.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_calib3d.so.2.4 b/lib/linux64/libopencv_calib3d.so.2.4 deleted file mode 120000 index 9819e07..0000000 --- a/lib/linux64/libopencv_calib3d.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_calib3d.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_contrib.so b/lib/linux64/libopencv_contrib.so deleted file mode 120000 index d8a80d5..0000000 --- a/lib/linux64/libopencv_contrib.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_contrib.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_contrib.so.2.4 b/lib/linux64/libopencv_contrib.so.2.4 deleted file mode 120000 index 3332855..0000000 --- a/lib/linux64/libopencv_contrib.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_contrib.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_core.so b/lib/linux64/libopencv_core.so deleted file mode 120000 index 4a68931..0000000 --- a/lib/linux64/libopencv_core.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_core.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_core.so.2.4 b/lib/linux64/libopencv_core.so.2.4 deleted file mode 120000 index ae2ae7b..0000000 --- a/lib/linux64/libopencv_core.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_core.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_features2d.so b/lib/linux64/libopencv_features2d.so deleted file mode 120000 index 171141c..0000000 --- a/lib/linux64/libopencv_features2d.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_features2d.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_features2d.so.2.4 b/lib/linux64/libopencv_features2d.so.2.4 deleted file mode 120000 index 5cd3acb..0000000 --- a/lib/linux64/libopencv_features2d.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_features2d.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_flann.so b/lib/linux64/libopencv_flann.so deleted file mode 120000 index 818d581..0000000 --- a/lib/linux64/libopencv_flann.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_flann.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_flann.so.2.4 b/lib/linux64/libopencv_flann.so.2.4 deleted file mode 120000 index fd7593e..0000000 --- a/lib/linux64/libopencv_flann.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_flann.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_gpu.so b/lib/linux64/libopencv_gpu.so deleted file mode 120000 index 61edaa4..0000000 --- a/lib/linux64/libopencv_gpu.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_gpu.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_gpu.so.2.4 b/lib/linux64/libopencv_gpu.so.2.4 deleted file mode 120000 index a72f295..0000000 --- a/lib/linux64/libopencv_gpu.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_gpu.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_highgui.so b/lib/linux64/libopencv_highgui.so deleted file mode 120000 index d95a21f..0000000 --- a/lib/linux64/libopencv_highgui.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_highgui.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_highgui.so.2.4 b/lib/linux64/libopencv_highgui.so.2.4 deleted file mode 120000 index 773f303..0000000 --- a/lib/linux64/libopencv_highgui.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_highgui.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_imgproc.so b/lib/linux64/libopencv_imgproc.so deleted file mode 120000 index 70e4328..0000000 --- a/lib/linux64/libopencv_imgproc.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_imgproc.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_imgproc.so.2.4 b/lib/linux64/libopencv_imgproc.so.2.4 deleted file mode 120000 index e8d4579..0000000 --- a/lib/linux64/libopencv_imgproc.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_imgproc.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_java245.so b/lib/linux64/libopencv_java245.so deleted file mode 100755 index 7244b2bf33ada3d78f189616a767cb339d19ffa4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 905430 zcmb@v2V7Ih_da}SiXs|2Dr!*dEubj&SWr=cDE3B>qF5jT_7?1YWA7S!*Vq*cvA0!Y zS#@n2yI5oI`ku)-xP>Z>^#p0 z<+Gnk6Da5RF{{{pd+5GBbRW;nXtu<2cT^*}Le6lKsw&TYNsi}CmL<)DXa)Mg&S4z1QqYut(?6k6pyDhI$ z(v@(Q+*tFtJLU0KLv92>**+B^bM<2xmQ~&=R_*pk2ohQ)KJG_5MxSL z^@5LCjV>t@TyxmWPR>r?QD;Nu3E(ym6W~11!rS-g-$Q&qF$)hylb6k=X_3$^(Dqd_cr9t=Uu$<3`se7x2{C#Z}bnk&##nvfNa%1TlW(67|%UGxnG$>OkED!Dm1sqI5n zX-QqssR8&hdv35vs$;LJQYpbdCp9z@G<+ewn;~msMUxy-gTg*~shQUy7qwGi2r{We zproIw9hN0fDW#8U4t0=n>vPza9GTZOx1=^q=yO@AhMKwilvnHB5BdZaNYOXcJ=V^j zK6=e=mx_LRAMh14tF**Pt#p?3xh6__y^~rsVV<;7>04%s%P^nZ3MtZ84~FG-GUU+f zC9{jy{RDjhX5JvBw5_2+DM_WkUZ{j~L$zKF7Y>bbE@_77SL&Si=gqas zIMLbJNAKtSS|RDrc}bvCp>**MOp)@a)fKy`BrhkmTMn&Sf;wl{gABBaMsjwBUR(;g zmZW!YWLO3F)Jh2{KgLf79V9l$N#8*4ZTArrhhxH7(0d+W0kCv|a&~+XjxT38{{xP% z(D_x!uhH?JkY5Mfq;qyJOSj?tFTfqZJ-`FNL%<`zV?a89r6-iL|H z>oz!lMcf<6-vK@XJ_Ei0z5%`iegK$jpmRztRAo2FvjW`doatjJ8=PkccmQ(9uCeR6 zh*M;^mIsdW(s=>M3jzw!IlCugDndMSeNg}#OMHmqaxk1pB>|-YWdIcbELEhO9anR+~U##rB2b}i=^ak_+!~IL0$k)&@GLp0w z&Nl!y$#`~sGjLk~Cct*UP5?{0Am0nv2iOld07wI{WQP0@;0WLt;5gs};1u8t;4I(* z;1b{p;2Pjhzzx7nz-@pPa2LSReaIgI9swQ$(g9Ba^7Ndz7nHw*+y;0BcthMf$ln7# z0X_r109g78`8U81b`FanCxA1+1>g#B17rnc17ru}0OSPZ0^|nd0ptVlR1l5}(RpFW zy#dAOTnV`kpaj5|ICfttI4%t+lYuJ>$K?PO02Kk104!CeoE@v+*bm?js0IiCuvCNc znvmB5)B&ggjR1`SO#m$bVSrYEHh^{jmO4V-8PFBbjW{jjk#yW0@+d$Io#S1S)C10Y z0(!~#-f-R*kN{w7&_qBopg&*$U?5-+Ug6Ukq3ZSO!=LSOs8dHRNjmYXR#3>j4`9 zn*b)jR={?^PQY$JDqtVr03Z!;7;qGD9B=|~3UC^54sae|0bBxH0bB#z2C(!Oz00A7H?03|@4N&@E#C*ULj*0Z<8` z0#pM80BQgN0kr^i0ct?T)Bw&K6Bi`A)&$O*5f==3b3hoNHJ~k^9iTm+BcKzYGoUM= z8-S+>IF8J4-aW%HlZk@!*bH26IPOd536$#~PXZ(Z`U3_41_1^Gcp3`F!!w*Sek7dh zWn2oJk0Nd~??0+tcCoN^{F zPb=ZtYQS2+I>36sMgU8jAm2jACdjwaFhr z0LSQ@X*>?cCjch_XEN|-;rKkATOhvxxCFQi_yceSa20S3a074)@E3q3E97?o_W=(8 zj{uJWPXJE=JUxTs=YSW0SH!=D{0-n8;630Y;1l2r;2YpO0G{L|S3nj(R)9Mo8z5uK z4(A@k<)GXX@>~D~AP;f;p1c{(^U-mB$_qeV5KstE7*G_zQZdMj(=j_&W;phN;}UdU z5^~=R*Gj{289-S;1wbW06@VYWAHY&o$g2Zt00N1t33+WmT|j+6LqKByOF@)3fxIc8 z86X(Y0uTb=DGZJ^fYyNU418NSZU<0Z#x=0nY(104&)ke+Bt#I>syb z|6AhTL;eZy8Sn-074Qwf(hta;U^CAd;F@7mFAE%J1LOdB0&)Ua$_2Rskekkt#sBjX zmyh!NkQV?Hq;oIG3jqoPya7c4#Q;8l5&&NSPo?O%4CLhj6#$h0RRI2gs(@+$mI5fR z4tXFQGrkra*9FuAGypUP1Or%#%iH)$uNQUNr1X05{_Jln`kV)3?N>L+EA(J`&5rMz zl{z`_bGI2yOex5kOi3+hcg(y-nnz~m+2UHvaF4w($c(JS77-mdPkQ*$t+Z>zrP8|J<UySG?pYo5RI%LkCR zz2K4U(zN;SOScX34B9kwmHuwWZo^momeOuu)+r4KwoePc{dhdjf1Fr9T{^jbdeGjm z`whyE2(1uQcKXtqL!6cYvcC#i&hq7ebp;z=Il4vX-Z96Uc_}y3-YxxWeCHc(``?XT zJ#`I(@m+%+V<3N(_wfa%_TLhQWf@SV%;4tV)1QtjaOLF7>bEQQ4cOXzR@|k-s@TMy z3suD%9AClmPs;o=e)LQjbYgSb#k{v$ea{1^R(DwmzwpS9RRP~dr!M&p7%-UjU7Q{&<#lQ(ddnk}P?*EAtdHZg(kmA*^qi_b<|C zcsyG;YVXunIS+i)gpVjb^5Do%LngXM-Wr~DhtB=7_kuPaaJ+Lur2-Agg;*N_Ru!r{ zy+!QKF?o(w%yKLB;*0ZT{|K34xf^n!&dEu?L0)0+`OkT7Y4`7|Kk&8hetmSk@#-^Q zqCN#@|2%PC%ZZ7BYB>K2=V?<~*T_HE6taDY`SXU$wmQul#1Ams-MHx3mWeIW*J%zs z1r&C1-M;$Lm3iCyZtA%f^5{L~NAHB=Y57`)tvl81Op(#HvmN{Pdfy39G*|I9McXe( zy3}-W$grXt1D6b}ayHxN_MRtPXWk8UsTqC>@;@UKS-*YNgg+=+ce-2MB#9v_z*Dp7 zU@kzTEY}7eKiSx`_>Xr1sg-kmexQp=PntT!JsA*GXFvg>8)$rg1l+tXDz!c zT)))5)bq*f5$4a)a6J2LShw>FUmMp&2Q6RpspGg0HCj&!KRB~SYe0+c?57n{w;mN-(W9U2hd#BYaJMXyI;KY1(Ty3hfI|1?nrYGe}FB^XNU2=umXFJro ze;$ywOR9~gGXcckG~yW3@>!cSi-vHVr<9{bqwrWJnU-oWvdkTPF}Jj>qZ9UNbSD1kj%i~zF-d!NCyXNIQkJKp-8Cu>g;^qnYs6mqg zoce&y__#Cmb0^nb?(4Pn+3NdeulGI}Ju~a^$Fl%MQmVPR`V~Ju=t=0L*>HZipI6@5 zqrTQ0zxV0p34nyKM&tVdY88oT$Mds=%YT`(q(~i>p95U_sJT}7iPy*rLsj2SvBFghaU+fgmxDje^8wk4$g)$z9TOC~N}IIZ!r%FS0y-ghMMHOmh`9#GY% zLv=uosMPMcdgcCWmKWbCS#$Jkmu+3&_MQt;+kHAxJe##^@30qRJda%btBzm6JAgU` zI<%-c*RrHy!*?~S1!xw|?Oy)mm@`v{Pp);P@KnIHejU6^U9a6TRHIN1&oe7}&k@_7 zEH9!i)SRK#o24^~Le3B#Sz=Pu5oV@P)_`=B})!^*> z+QsVVo|HROcF(@_^l42W1A1(-#_!A}1r0CUp~X|h)vdFl{GJ@x>-(^wx3Bit`KiOt z$K<;AwSeAxZii;Z6=fbC?DWCyL5y#O78O^2?ArtK!5*hWy}`4HaK1|y7Tn3&w)Wb zkN&=X-@O^b&Kk09i@WnU?Ow@I)AHXwvnpv&md~YDH`Mq7Qs!OUI0{e(j>G==G3d#q zmpMP>@YNJ;b=_RmW}CwhFlzqDeW^RvZ`OxgNKGC6f*o6~v~n>oi2VL#VVM8;W@o;x z{WO0M%O?QBwOc}Lsac1=-aoQJrTka7j!k}2uKu9Zc0oTLu9?2~{WYJWMUOFb=zpT{ zy$TNxc7*)n_B+mZgUc=b<88@N4ql@r%(YobmOrNBoy$@_jRY<+ zC4U2!>jAx&)qmN=v~tgcw%IqO!1=?(?G;8tJ_?Wy#|%#Z<;5`&)$G+J_uU6b1?)aqyw^{=^ko_!qhbbC|nd4}L`OkV#a7gIT?q`oxjcrzVs$cWN(_I1vp8T++({N=79G9A!w#w4Fvw`Q@ zh4H_Y0Zu(tx$PqG?0jL)?Hw<{@r;YbSN;Jx!^X;~k-5_~k6E4v@;^P=c3+!)ajng8 zeglpVeCm^=X?oSmSmm9?^WB+oxd8nxTsb^o{+|VWd?vXJgxoXa_@%EKitQ+SKfFs0 zIKF*kSj)~XSI4rv6XZWyOXaO_+y#yU+zyA_Vb>oNYugDB?&+WZR*mcD8BX_&nOLy@ z(fIry2Q*=LarNcK;L8*5S9LZGjBxAI{(5@7)M6Ub_ug-pnQp?d5{^d|>bv#*#T%b$ zdc7OGe^;k~QWIxSueGM|{1wgmwJkR;&NH?3$w^0_gjBipbZ@(99j6nYTg-? z6w&RK=kz#Q|P>2+J#aZ%HZ%~|32M%Sd~ zlivE3%egbQKRdoj=j*;b3HYnd^0PDgo}Rd@(t%v9V1xOP6b%vOQt=t8$Q+1q_Wyf4+O( zyxUiN@&!eRtH8U4<0-2woC8Zi<|*9HP-J8Z=Wp*sRj9&o@py< zL*Cyo^46hhVo?07*^tiwrY8`sTiQX7jS;xy}~f>(i%Mw0Va^QH|fKHr89VkiN4(5%UVea3c!+y|}MbS`vu{t5@imh^z*iPx$wuRQAHm>~1i z9#yMXk6Aq}V*Iy?<#!~7mMc{>@51w;7xEoWedW?~NY$*>r{^>GIRMA|0YOJ>r<(w3 zPboMp@9V357+#!>mF_-&qyO;WdZ$e-v)ZAC_1FOQnFNdZYX8NvS|kI)E4B%h z-tVltr0k9r#yqN`1A%jf^JQ>+;6Y$`#DanaQ=(7DSJ&RkA6sH@?cs4}P6eELSZvLg zam6bS3hcr1b%1)`*4Hq6+dF;ykqUpzp!}oB^KFFd@9kWEJhwH!J;wa)M%b*i-PhLq z0ePOH`$N~kaX!e8gq^zoqjiIBNy!tZq#6d^>ohjBPN9Zwy#`k)Sk#=l*}a9wk_z8< z_E$aHXc)-w@^QyJaZCH`tN6D6``(aOfxIu|)+!N$7JF>^-obxq($}fkSM&qUXJSl+ zl7Q}Wt^|+mUAkiw)g`ry0^wdv>4*fM-yCmd}wyx6cUZMLxCaq5!@yFJd7Y_V6+yYo!@yNa=yklQeF^Tazro=;r|`#lFVXt%M6 zrQ(p5tqS@cda~rs^O?6gX}2Hq+g>dotpdx>6yAQc$w{x0EU#I-$T)^g7nhqi0#@Xo zHfRgvidy|^#zY!lRb8@T)mE22lT3iQEnUy`3_6$WlDB#9B|bwpxM%ZKZZdoRwiJ#J z#FvauBbbI*?$+M=Id(^-61_Ji)=OY$r%Z3(bZTxd=h2hyC*>~KYt+6!_x4&KtVUPLrK za#l6qYW;e{8}%8xWBZ#|<=PpOey=;biq)-y*DQvdA4+PnKptQibGTfqijzv*GF97f z=SshlkOwxu+iY~1hNqh48S448(qenAeiX}>tS07r)D!au8j1N5 z@O88j0{={@EtbDy74svGiTU(%Vm^4am>+Bq^DpX)`NpspDr(1j@HIUVKc|dXe!~JW ze|>gE)UTOe|k?mzY;N=#OO`=pPUw))O*K%*WWod=2<+r>I@ROTuTI zf_AywMJzw(rkI}|DdsyTi}{gV#eA8%Vt!1Zn0Nao=D+xe`BEuje#~Jpzu7_D*4-Kdx>p62(%vXT# zDvRvQ317bz@uw$>`RN_S{5=QpbLN&<{`Pn=Z-Z~RitIT6eMQ8taxngMaTd$hTQBB^ zW);UtnQ>xyy@P)0<0ID78tjLV{w3`wD3&)Y7MELfwwNyo-`5rS$FHGSetH`*uXC_| z81JBcCsq*a>Hde9w|I%|EH+jwKg_{=>f1ytf89ZRMmw0_!-t6Vd@d`tvjH@u$j@d6 zey(#{EPtk$SpPiuPQFNgU4OA2n*;kxn#J-nzl!-T@HKByxd{&JXW!8j$*(#s=J#9@ z^G{)&Dbf?QOw70KB<9D!_$kuUC|1nxcM!MByNTs*Tod!(Hj4RY@HZhMd)5yW%g0X; z^E<=D{K(Q`K97U=zXyYlXdT$yL4SNRL#%&}gLZ%KFP5LVLd?&1FwfRCgw{giur0Vv5Dg3b7wJs^-nS1Lo4P@zT!9> z0P6u!Jgg}v)^pKGtY=yQG5^%TxIvHb8^V*ay(c1&6&mUnY74(u)^w#RdcSkK0AF&{WZ%=a%T*1zVC zSbjn!F`w=()-&~Yv3#laV*a&*^~@*-@(FdsdYU_!M{+t?&#ZPZ?&&45ezSx1m2Q|= zzWzNi|2~V@Z#O52<-3j)^Cjnr`8(CceB>B0Z#p98A3J#d-RdTmA5cPEulY5^@~2(J z{G!QX{s#OdplG~peO=7gixBhrR$@NAyO_VRMa=hiFi-VSi2bm^L4Er>Sl_OwDK1yB zUCh^Vur76*Dwc2BLd+juB<6!0w99)3_INs&zi#Xi>tEJi%#5`5c{9L4Jj1eA*kAg#6Z5@(i1`M) z#e7;@F`vJ(n9l|MSF~<<>>v)!4(e6nq*%YJgYnA0zF2-hVR1iw=wKgUzJt#f+Bzur zl7sQ5yo3GoV*AB*MnXF(0rJ$v!FctgwpdRu2lKVn!T1l`7{d5DeqPM?bl`{4dBuJ{ zHCC)ga?pS0jS$O6IM_!zkV{7adQ?G@XT+d(_t%qOmITnVwB5)S6mi!;UYHV6G~(Oj|oqfTPJj01m${wbDM zs>J;9WHBG+VEi}U70Xw0u&+|q!G2wHKe3-b*A(kda}c-NM~mg3M~V6H>0*9GGckW~ ziI{&eTg(^uAm*dbiutHJVt!Ii2mZ+~mM{C4nEx6g=8wS3B1Gd}&H%Ce2BVmt)LhK_ zKzoV$^@=>=a;H0p^Pq2H{jGh)yzR1>Z@x*)!!kkWhbmRX{OB8EzLkUL+?s>M^0l*x z{cvKfSiZwGRAdb1*I)bkM%ztm1Ole;4yt zSBv?uvSPkUKQSNgpk3NJ7&rVK>{qmOuuki}TwHEm2XXRzoLIiXIx)YYn3(rZ6Z0Q8 zi23xRVqRND+`qyWisd(L6Z7pyiTNiE{5-RVSl-%F%s;*;=CgGZ^JCzFQxxaku%1x@ z+vv%`N^Nfd_f1#acvxYj!?qEd{@9I*1z$jn4j*TeLWn^chL^~S=zyR zFQ31--}yP1FUmRCk7a+qD~j{xO~rg;2mMaxpkCkxNh$9vaQ@W6b6=nXdG@!^B0Il0 z7+1A(#PU}h#Q!u{zlrqcbI_04JFv&gL7aCSFV?Sfuzp+P;5l@-gZZVYgU?x}IavQh z%n;k7aNq|BH=%!qY!vgUDPsNrIJt!6A=!??LIM)^`L&1a82CT3+Ie^T|B}?D2+CWF zxY!w(4(#b$E0nifaJ8SWWPt{03GpiSf(uAS;`iFu8y=E@{Si8(8pOM8COuP;$GhOY zF^9;W_sGLC!=8@cLtY{~Aq?#4$SdTnf1>?d&m`h4GQQwslsD5K>8sEk(kF@aYY$0L z8$AHqP4+a&-o74^u{nIO2&oS7U5Qr*Bj1Yn+V*vqhom*5C3lHG(Ua=c0xied!Qao5 z9$Qn?)0E`Dk$)7^P?Fd8oqfO2L$YQ;dG?+imaZQoKjc6^u=km<)MOj-re&yy^J%0< zazQ;eNspR%)erQ;5#kGw9>Y@9laKU2x{LbN=ds*H#3vJP{2h7rjxd(`Dp6h&go=54 zv3Juj|C{IqYijaCgnd83LsGk=IRmJ|HC^cb`rlE?XOSv zbcjdZ7J?o|Z@~LANM5rO#6>!`uLzd`+`>qu}v95{fy?H1b8gY>+z?|Va>Tt}Y0LyIL_AC%Wo z`{Gr2&m75Xt|G(V?{;G!${Q{t&)+dOo7zi0epbniJiZ!i12<{|0nrB7VmIt}&cKcN5F_pw+q5ih+%ekAc8$7<*ru?j&*|i!tH?um*wZ@d7iL+%n6sArFc2+)k1y@%vwdF${N{os)~9%A z>r4HsA=(*4^_`hO{t3-I4&1hX&jIwPQNB3Icd(LwQqj&Dq(3ny^7fY(f+f4r9I4<} z@*c^E(4sJ>ymz4D-*&m=#fHOgDq%ex@)ad_7sq^BR+ z&)&1iQb!seY&1R$Cwn%mMtPME%l(1k(zyG`t7)BsSK&Q9a6OOU@0ce0(K;apli1AJ^}Phl`Xv369^I*`Yw&rsww}&E(K{L@olJITd$yhc)jZ9!Tu<1Mn5aC8q$p1$XnJUUxEC#2D%Dc$EsIixqQ5R zT?osSJW+5z$q%lCdbBid@Nz3;M|l%1OxSxBS-J)KnSYe*#ifwAe@@*-UbO-3=i{mm zgcFlDTto#uu-cLv$y?TkgF)K9*;ba^UjNrH#bBJhm+s#`Jw&h{y6?LCcZNi zXD_!KD(3OIT8q3%iH5Wx`LBDB*PKKBe4S9LH1fvLXg_i05ADpWMVhq4CqO6yt}&O8Vmi)%PCypZl%eT;vT(v}6m}d1emsQgP%B z#P70^yzKd+z8l4JUo4l;Yu`?zytO=*dy({bk-VuF$~PwaOHupkyCT8+Z#?NymqR_X zNKe0f=r`+Wly@RM%d%7a$o$ZO)+f?n)Duqf+3kNv2J><=w1>yfL;JoB%!3V(ze^34 z_8jYFyoP#+mZE5#pq0)01$&~riPpmuZqiwQ@{bp`)Df!Jdg{MAS^wQY@ne+5k3NFz zDU6y&k)E~Jkk?H{J6jQ-kK~nwP<{mQMN?5;dl75I`)PXzgto8rL`itRpy^jA(sJ+>&clb8GII`W!X=;s_HKaqIlOyprXXHSbZkv+5z z!2N8bb&GZ@%5(kii;KsTJ|u73g7SP^`jghNHd>$HRT!5hV7aCcslB0VA+N84?f5&zc{lJITW4zNCG~urd4%>8w6gt#zb=se zw&{!}0bt$wtwNJ2mJ=TI-=js1}4 z&&v-#I_^#jswfq$daIS3E-fQ;Cl+Pxc(A`Vv1~f&E%a z#RBPZQ8Ij^a+MfY@Mm%M)P;DXH}Ydqoz!HZC z*~r%={_88!BU?9~fp}o!p`I2vi^)!JCCaO4J%GIxe&3@r@|G;vEh@;QkQ z(4f4Y{XqvLK0mq*Lf*t)ZVidA6E5yR-ns?-#(!7CNW6X%^87bn`VB;RHN7yq2bDW8 z4tbSq-F4+R;_2ne?MU8|4|&_~7(cM>V^5vlAg`x&Be_$WFq`zSmnTEwersbS`B3Ef zbCwgy>+>PMipqUV>mo%6@;u%~fMeM_X!S+Udr*A7RU)r0f&4=1Ur)i|_IYg?##M;j zDp(Ki!}hh&^A6|heL_1;Z!k_^yT+ah5U+ZLJp0`mmJ%wXyp8=J1SB5M)oC1)&!=}t z-t-aWTa*1kYf+D80M@r8m1|u?`je4wPrQcqMO9VN^ZfIL*aXre+t)p@5P21Qu{5M4 z(zBF!3%z`r+h2xwT@94yd}G=#($f##@Nx28DC#$`9|?fO zU)Oa)1szF$dHdffgR`Gv+#V!;$a9o8$mY{+ImrI?sAntbzXpC|<=Td$-+rJvX+u`( zM^lmbo%By8UNHsvL%hCK*?zLuFZ;^ZRCejyKJNVF}>`$@*v4;YN9-E zugb(LYam~a^fWAv`qd4vrWi$%G<7=iDmS!~$A1~(tuDwnAv=db1F?3O7G$p1>NMo7 zkCCrQ<-T4``uicz_s8BHL|*>@`3}?odx*Eu#?^SzBN4B)qWmc0>(KMNekis(e~upT z67?%&^JB{kWKS6SCxJTHL#mg0G3pN`{eILR)w2G$i{w=cQ9e7#zrRQ2%D(^LofG3( zV?x0^6eq9CqP%VcI)JYSb~zaTucV5{|IO6CdTL*o{_Lr9P1J7-!iw^J(flw_vvyS1 zzAlcIZdA?73gm_hbxXFQqDE zk6{Axbt$e^LC0bCSm#l>q(@8oO~a7q^Sgz3Z3^;Luo_Y@=~2%`p09I`mm~kMAE<)F zKVLY%33)T^`=M9iH#}Y=ucmbrANOh~$j+6hpFa~(0B>7WM@|No; z??mn%6^nXw>;;36VoAPpZ_+PYcNHOdMI)5w^Wf&eD6bfayp8nK>4UsVw!gG}6v@+% z=@cV-iW6`Du^q6-o%qOtD6e^e#42jYH_M3ch>od6<=$z4y!{8lKu;Ly*$QIJ|F-@} zaQ|Ld(#KYRfb_ZmciSkR-{LN z5DC6tR1TWYZs$x?u-}Qb4*b?7^%p|~@_fC0q6NxZx1*hWpLQLs>%3&&6EMTl#Qxkw zpF8g){fnr-Yv_mXcz^E?W!w8Hec`T)-5U5kE>F}WpRZ?(CHqq`9@dZ_I@#Y_0`r9h z>zhR7`VwzChkQTc>qMd+LrK&#ig=3_c|{52`FyvU<_q;%)DubaEG(J*CR(5Kb}wMB zR}RT_Aodrkmh?@9^5$IV&nu+o^&{l9vhleleU51^h2vEc_3M$RP~KVs`P<}&CN3C1 zw$^ATpHF8EBRi*Iy*g95D;|;_icgGo_-!kC-qFoMdA<%0B0c)4$iuK;PmSR@g0(yU z5mVmo>3dPXTK4&1k50%-&ZtL0dd5#hUjGt#zQ6Yo_Englm3y(G{Q0pP$(x>|yoL1K zoPzSUr4Z)&+Ml{kqAhpYL0i_e6i{W%E}Z2lK+B3aCeO3mXcyXYFZV zU*wIl{q63g-yqYU_yFY%vT@^#9(l=#^)itD2;z0@hsGdr{!>Ggw+_L0yn zI`PfngHcwlTIQb~v_EO3c;M^#*4&*O4C62jsawx6^ZqR`z^7mgLp!hXo<=abvhY>Q@v*{s`%hBzfyw{2lwV2mTWI{Tcp%UB$?vpBdCM#0caZ$2CdljNAkY1FfaFy+ zeOnaFehj3?ea9eHj~R9&>k+5!2tRBj~6D-Fo= zavM>9H_Q5a(=Vt;4Zo?*l8*Ged5XM}+L5n&zf+v(r)Ac!AU&3>qz5lcJ`j*>Jk%V< z{&I`-XQB0{HVVgWUfL zA0vYFC}L0#|K3q?+BZ|n)}^7O$Iun^w4`|_1_oaHID8oG{BE}veoxo_{#f`NFbC?_ zQ++EwL3uTOKMK7B{Vy1KJuSGNk^J_HB;N_krR{PlcYgGzPL7NY@>}rJ*o|E_L}nba2X5P-xTd!M&(XD zkG!!l@>j{ur}SKDoQ@vwBfbsETN|MKHsVd^NPm6g+Y&!v0P?y7w1>}+XGp*4H`ITJ z?8!~?#=a=e=f^0DbCaxHYVq|+7IYZz?^U~Extd(4CxGyz9V$l36+l&>o4F%`!89;I?Cf0OQ$6d?58N9sBWLNIcGC zKT*BjqMpxWe_L;ZXm+hz&!<{-3_ z%U@lLyt*;+d|od72g&!rIN^Wqprm=nB%60OQG6QiqCAhc6%$a8^*oLP)5uOk81niP zSTDYh)QI*ut+d~TR>5xsPegeY`>|O_C#YSak{;VpC$QkWAhp+gB_}@{$VW50m^!P{`y}l{54A zz9O$IoH?$V-$h=33jL;ZL4yvFys;q4>#4nRki7PIX8A&D)FZ`UJMz!1W|c?#b+UcG ze%zkCsDC8hCLODfdaO}M@bM?lJJK)P|M&JmUPpiTG=%&aK;^2bTs{srr|*wR-7r2A z$!}?8(N0qrN9COd1QpZWOaQU!U%S9B!*{@Y1vFAe=bIiFv= zXusVy82vDj?CJX!^;l%zH<*?xo`2uEQJf^Ao?_TMQl~wr$8;J;fUTr|TO#tRj%Yt$ zj}<0)+ewt)PV!?&Ud#SK8xmiSoq>jC@n&s-e&hRB*LRcs$52mUimQ+PkTpD z$X>+D_E#5>9_10#vzFp8`WDKYS7Aj{NKg7rgM;h9D1NX-_%_ z^X#)lq+gAG;OpuUw4TvYzd&o@`+((0{}!w-bT@m_G(%po1j`*s0| zUlINJne^Omio9VF%JccCHR&qkp7bn3p6ieH!SO^JjP>I4X6L-ftLX1^_UM%1I4 zfOf`^|KmmwKNt0PA^rt@E~b>NH$q8|ay;sBB6$f0Ox9ldg&4`azJ~nBn=T+ve|9Bx z+l1w+r=T9bPMG6{@&;XIJpnIJ-Ynbi3Ve;cMB^=LlcdwszYNsB3R9dYTv5NK3+m_P z)~0 zX)NmD?XqGd>enwsJ-mJ03y?iGPyz2Rzi&X^I0p6bc3BMvtXDEH{$MH4$$djC@_fJUh2XUhy2u0-Q2G+hVXl9{=tnuNi>ye7&7c zpYKSW&_Dcp5r>IacR+p@>5qi=W%|uD;lnb(o|3$=y)0$W!LU5Dr?rn!-YDx|7igc^ zEZb);S_b6}Gf@w3mu>OLD`fi#`LxKJilYDd`t5d3w8u>Sk;kE*gYk2H1nN)44yh#!UgThhO`Dd}H`^&LliF_M>3P@eDSOe1;Se3TzU^2Ld_%|)KKS1=5Z ztX(Ye*e-mW8SG%(%j00YYS|6-=<}gHynnULjpL7LE%u8%)cm_?y=qvUnYVUDJ+?fk zhxa>|737Bq9M9qP;P#Y6&ozolSg-TM2gRVgLXCQOoG)mJyk$4a^LR_CiM+lk^89&! z5XtLzWtJ~OymBYdX>J9 zyi)dg@$&@a^|JoHatrbr`raD<`<0YrdjA+&{(RkXOjo?Ys9-z_dGd>hoI*Ka_Lutn$l-IvS z|I^n;q{pw3*UIEynvu8IP@ccHX97GpGyiD%px^lSvbK=C^*PEvC3}vLAJi|<5|+#WkFh%z$op4c(yuOo@&!o$88wyL z8{?L~HYBYk-b`OKm`?ULBwiJZ@?(h4LF11~HvT*Ww=jE@3s8{HFULk8Zm9`M7bq2lA@s$n)pQfh2F~g?vM@Cz0eW z&5&0<_^8(%l@0+6MMB9Co=g)~w#jsp+LF~W$`xuEwP>;q5{ZHFoQjx33D`{W7 zu!QMC8!Gn>`iIA7ngV&nfz0)7agpTPqyC$`zFx?iK4i8th{kQ533>i^LuVa~pGpVg z1)ycl^w7nwAD`nqL*hl+&`ekSjAGhx}LOqrd=!a5NFBghK zvn&qhki2;~%EPkIo&so|HD$v%Pc;<09o}Q!4vghcg z9Z`?%3zo~rw^tpIH+@FFI{EYRF61o-v8PWUesC4!Ro~H0__)WO7LvSSKg#p(T?7-a zOGQ36RUq#w)T8f#{B+XOnbtps^T_uk`K2pSUW!GYk4sL;$g8`fpZPwCnVuJ{m9bv@ zbC4NHC~weamM?jQ>=}S@l||*1mj{Tr8BD0MY$3E$nFSU6 ziI=56s9q*ny_U~JJ;t1w_1vX?t&lxOw|;{18d?XUS0(8T3_JF5eg@i^hwL7kL}3k>yy-9Y$vc@yuW1Mit_5YXn!}-^Oojum24hwM(Y77 z4$I}wm8yrR$1Hp9m~BE{KP$8TLZnAU3s|maF2$!d2K65zdtUgWA9PKy|MGYqL%gkV zW_}v^SxbJ#Xpy8&;b@Qc8v3Uf#qEvO$Sbap--v(x0C|(FUR|Bh9!*PVI7ob4dJhG$ zb~KMi1?wpex0XbCWqq`h@B1|)-c%QP9_P!mV;suokq6aKzp)J3xt#RZ&5!aL3zj>U z{N~;r<*jAWpGBx%w<3|3${_z2$@eIZyoSbSY&Ce_2d{w0eiZTb${;VzM*9a+y=)(;+?VJF zJ;_%NLEe&zC7OxfO7h0XD4!EEX+Pi3*^BaPNWKmDla1%vujqe1o)_&)cFM-R@vt7V zkHd#hp097~Y)4+X8tZkC^j}_qyde^K{+zO@Ir8ewXwM9ik0yDg7UlW6XyAA%_c*o- z->>LNdJG#;&vMe^OY*iOndP^Uoz^v(?M$7G`Yi>K=bvBShf`KxJ$)gRKi_3vkGze2 zpbUxkcWVW*^ENi%KD;cIeT2M{<{fHY=`uY}nP^@>YvFTyzTR-cdNn|IOXb_39>rhC z^XHd2I`yOc|Z%jt{i8w4u(@0*o66I@<{zfFPj6-?)`lEDd8|qiCLPzrTd{0<_ z*~bmp_ic94dRQx)XD3oW)lxq#g4L3`Q@K)~%;mlo+HFuG3N&M^I z(N28;b~wJSdlp9gDHP=6SSyk@`=dPne6UD4l(*5q)`I+#pX8M)l&9?m>Fr#UH~6A| zT9W)EU*t9Iv0jVNTxr`lkdE z`S-LvGxO)4V!0AMrx&Jn-%a~k%6OFDOnT18p}a=+9QS@W^5)rS4AA^B-)9&>_KaGM@}_w7C-;w%cx_+gc|X0f9Od<0ksnU>uOnXS zl37nZ;tidU=i}91H18{A^S(I&_3LDOgEh#j6H(6(vj6=HGH!MXz(8sq@ z*;>dOn8rFUXrbF_43(zALD`)Ux*KI2Pqqd$7Ie<8A4|802+3 zGxHG7xdH_-Z-k9*T-U&vAj z<-d@gk!8_-Z71ZLke-}nCd zf&6eE<-^Dx6%3g6^+rkb1CNIRG%qMtVjMb=d{NrZvB~xUwvhd{wrGEKvcC-R=J3q* z+IAny)ySR~T2?_``3>#o&!L_)FPN=pCtuf<2LWckVK(v`$?+52gdOA zh85$M_tX5x(N1+Pw3E+kMK>WYO~!WN_EvIjQ&+#g}ubZAXl{1j( zLhZFa7WG(zF#frpv(UQQP!Z+tk^DmXe9G1q{l>q~aIg&O(a*#BuEI=e{tkHs*@Lx{ zr1*!(TV(N&O!`%AQ2#Ef*Y_u+pZXmiAMV^iUVRib^XIr>#48UY?@jh!rge*v)-C5q zf9V#eN4FI<^Zlsvv>vu@L7w}kOfQm`jgw=G((@+$ojz~(MwLKsD6ghJ z*or28a}3Ehz;^k`Q#>1{VZHeJJc|eFF)zc4^6wk4kEdCD8l#YRCH-O4uO->I`bQ0v z*Ve-N@^wN}S`Qm({=(LgBwv`Y?E1?g!{hTdjWepU$VXAR*HkEP^+f$QiI3*vk}t|{ zrTB49M|rbs9tn7kyk6GdPs~DIkr(5Z&&zpe9x=%3+lb_ixiic6--3En^f@?R=LDBW z-e5)p*I@HVU*eG0%Hm;3ery-rMf5*?&06xVhw`S*=m&0RT3zJToig*s`XO%##{&8D z>CWEBOS0$686+>YMtKYQ*_p-%wQPKFg^7%{qrwa8^*j0hH1#{3tlxd5cu;9j{~^*p ze-Y|2)=%zok4Ikgzx?-T)*VH8E&X9x3d!fahrIOy>fzsWyVVYP<5Mg*f#iqK_cnF1=cYX* zuY7{?{PU^ew4bKGggo~{Gp_$J@*_xpQK+E3U4EcFynmG>dDSD7k0JSDdC_m`J?J;u zUXk8>LOr&xsK<-OLmh0W+VyWkdHT3l`ra1h%@0t2Jn6qk>nLkYlrKsA6w+^bhx!X* zCb8GH*vAL@Lkj-9zku{>?xTJk>DfT~4K+}IBeHYYX0l)QUL7eSqDS9^_=u$B$i(D` z2q_{iJ}y~`h=ojw2yPP+5gn5l(<3e^IVQ19NTWUp@iA>8yZ4E~YvL;rQG+6xg2+B` zLqM?kfQZ(lp;4d6q@+ zSKIhLWLbD}U|d2{MEA&~m}FJz zL4zW6F^NeD%-zX@BL=A0bXW2|) z84(OY5f5(_!RYq$&v+Fg97d~7(S0<~hyP=cXS|<{dpHis)v__DO>pM^Wbd#LomE33 zld*f+?+uKNOiFIT1}?Ay)iDV_Rdh^bpFRl?@P7|!_FHi<@vAW)CMr205eDAmh~$I_ zxTSS?a&;tw33uI!f%l;gr{8eOBbV7d^?SHi^8tjUe+2vQ0Zit9P@-VLx{V$O#9!~G+e1{kopPk&ZU zlOSv;rlLAtbEw1Lj|{iV`NxV4;ab2|LQ7;?qW@V7G}3?2QZ-JjrH0+osuBOON>v?H z>8G0iR;Q{C>hx2M!%F#+gImN5{)cu7xBQ|uAg10G29l_Q+#7}yePKkXXv$yiK^zHp{l~Q;JB7?)q)(fcP1qc zn#ryt_CG4A{x3?ZJF+D7m(h(6qQ74Fx!&UAA&m#W-oY>Yat93uzuv(w{Bj44xsl;) zID|Ir8`HOM!T`4H3QzW{5fPExGcjQxEDEC|?BhBc?EPv`Oi=v65b^hr{p)ZJci<{e zFed-)1R7(1wVYr0#d5mv({dJlnGM77Txc8J47O!xAsVxPwSiyw#Rj^N!3MvYnQgF7 z4?+{@-pnRY%l?Bx``jT^NcUz|NDZG+VPIwp*~B8$N4I9yC!Rfib(sUXUpy$5LvyRK zA;nK-E}OZ8=EAI>d0MLmW8JUD^BE_@+$XkPBq%grAehm9SwPr_YDX22sru&{WSC$6 zxds^p|FH%c1T)mY-!F5((1iYPbs%N`T!##k+dtPKqu@W*A%oz*)*<@e>OjiGb@(|$ z|7$1-C55#RN&Z_ks>V60hFsgf)FVUliyPyozKoLpT#pQr|5}gOf33$q#?(*iGK9dt z)FY$hKi4CJiX01aiVU$k$t9ZNEtU)CKc4EV$TH8`~SQKObgV z`~9u&UnPI)_ zk_Xj0vM^&Ci|xx?2ez`uXs>GDH28bD#@a2TwqGS778=JSB_}2fX3h_8*LvMvhU8pJ{McgLbLeujR;!9I(l9SkFdwPysGT6#~vyf zpPIvy{Z)Ly+$Sc!M{>`In8d_{L`{n*wvLglnjyA?%><9(OuGuh$Dcp`lb!g8DtH